Add multi-database discovery and grouping features
Some checks are pending
PostgreSQL Compatibility Matrix / PG14 smoke (push) Waiting to run
PostgreSQL Compatibility Matrix / PG15 smoke (push) Successful in 28s
PostgreSQL Compatibility Matrix / PG16 smoke (push) Successful in 7s
PostgreSQL Compatibility Matrix / PG17 smoke (push) Successful in 7s
PostgreSQL Compatibility Matrix / PG18 smoke (push) Successful in 6s

This update introduces optional automatic discovery and onboarding of all databases on a PostgreSQL instance. It also enhances the frontend UI with grouped target display and navigation, making it easier to view and manage related databases. Additionally, new backend endpoints and logic ensure seamless integration of these features.
This commit is contained in:
2026-02-12 16:54:22 +01:00
parent 1b12c01366
commit fa8958934f
7 changed files with 307 additions and 28 deletions

View File

@@ -1,4 +1,5 @@
from datetime import datetime
from uuid import uuid4
import asyncpg
from fastapi import APIRouter, Depends, HTTPException, Query, status
@@ -60,6 +61,47 @@ async def _set_target_owners(db: AsyncSession, target_id: int, user_ids: list[in
db.add(TargetOwner(target_id=target_id, user_id=user_id, assigned_by_user_id=assigned_by_user_id))
async def _discover_databases(payload: TargetCreate) -> list[str]:
ssl = False if payload.sslmode == "disable" else True
conn = None
try:
conn = await asyncpg.connect(
host=payload.host,
port=payload.port,
database=payload.dbname,
user=payload.username,
password=payload.password,
ssl=ssl,
timeout=8,
)
rows = await conn.fetch(
"""
SELECT datname
FROM pg_database
WHERE datallowconn
AND NOT datistemplate
ORDER BY datname
"""
)
return [row["datname"] for row in rows if row["datname"]]
except Exception as exc:
raise HTTPException(status_code=400, detail=f"Database discovery failed: {exc}")
finally:
if conn:
await conn.close()
async def _next_unique_target_name(db: AsyncSession, base_name: str) -> str:
candidate = base_name.strip()
suffix = 2
while True:
exists = await db.scalar(select(Target.id).where(Target.name == candidate))
if exists is None:
return candidate
candidate = f"{base_name} ({suffix})"
suffix += 1
@router.get("", response_model=list[TargetOut])
async def list_targets(user: User = Depends(get_current_user), db: AsyncSession = Depends(get_db)) -> list[TargetOut]:
_ = user
@@ -101,13 +143,78 @@ async def create_target(
user: User = Depends(require_roles("admin", "operator")),
db: AsyncSession = Depends(get_db),
) -> TargetOut:
owner_ids = sorted(set(payload.owner_user_ids or []))
if owner_ids:
owners_exist = (await db.scalars(select(User.id).where(User.id.in_(owner_ids)))).all()
if len(set(owners_exist)) != len(owner_ids):
raise HTTPException(status_code=400, detail="One or more owner users were not found")
encrypted_password = encrypt_secret(payload.password)
created_targets: list[Target] = []
if payload.discover_all_databases:
databases = await _discover_databases(payload)
if not databases:
raise HTTPException(status_code=400, detail="No databases discovered on target")
group_id = str(uuid4())
base_tags = payload.tags or {}
for dbname in databases:
duplicate = await db.scalar(
select(Target.id).where(
Target.host == payload.host,
Target.port == payload.port,
Target.dbname == dbname,
Target.username == payload.username,
)
)
if duplicate is not None:
continue
target_name = await _next_unique_target_name(db, f"{payload.name} / {dbname}")
tags = {
**base_tags,
"monitor_mode": "all_databases",
"monitor_group_id": group_id,
"monitor_group_name": payload.name,
}
target = Target(
name=target_name,
host=payload.host,
port=payload.port,
dbname=dbname,
username=payload.username,
encrypted_password=encrypted_password,
sslmode=payload.sslmode,
use_pg_stat_statements=payload.use_pg_stat_statements,
tags=tags,
)
db.add(target)
await db.flush()
created_targets.append(target)
if owner_ids:
await _set_target_owners(db, target.id, owner_ids, user.id)
if not created_targets:
raise HTTPException(status_code=400, detail="All discovered databases already exist as targets")
await db.commit()
for item in created_targets:
await db.refresh(item)
await write_audit_log(
db,
"target.create.all_databases",
user.id,
{"base_name": payload.name, "created_count": len(created_targets), "host": payload.host, "port": payload.port},
)
owner_map = await _owners_by_target_ids(db, [created_targets[0].id])
return _target_out_with_owners(created_targets[0], owner_map.get(created_targets[0].id, []))
target_name = await _next_unique_target_name(db, payload.name)
target = Target(
name=payload.name,
name=target_name,
host=payload.host,
port=payload.port,
dbname=payload.dbname,
username=payload.username,
encrypted_password=encrypt_secret(payload.password),
encrypted_password=encrypted_password,
sslmode=payload.sslmode,
use_pg_stat_statements=payload.use_pg_stat_statements,
tags=payload.tags,
@@ -116,11 +223,8 @@ async def create_target(
await db.commit()
await db.refresh(target)
if payload.owner_user_ids:
owners_exist = (await db.scalars(select(User.id).where(User.id.in_(payload.owner_user_ids)))).all()
if len(set(owners_exist)) != len(set(payload.owner_user_ids)):
raise HTTPException(status_code=400, detail="One or more owner users were not found")
await _set_target_owners(db, target.id, payload.owner_user_ids, user.id)
if owner_ids:
await _set_target_owners(db, target.id, owner_ids, user.id)
await db.commit()
await write_audit_log(db, "target.create", user.id, {"target_id": target.id, "name": target.name})

View File

@@ -16,6 +16,7 @@ class TargetBase(BaseModel):
class TargetCreate(TargetBase):
password: str
discover_all_databases: bool = False
class TargetConnectionTestRequest(BaseModel):