Compare commits
30 Commits
89d3a39679
...
developmen
| Author | SHA1 | Date | |
|---|---|---|---|
| f0076ff1f4 | |||
| 8d47c0c378 | |||
| 7f7cf9179f | |||
| 3e317abda8 | |||
| 597579376f | |||
| f25792b8d8 | |||
| 6093c5dea8 | |||
| 84bc7b0384 | |||
| 3932aa56f7 | |||
| 9657bd7a36 | |||
| 574e2eb9a5 | |||
| 21a8023bf1 | |||
| 328f69ea5e | |||
| c0077e3dd8 | |||
| af6ea11079 | |||
| 5a7f32541f | |||
| dd3f18bb06 | |||
| f4b18b6cf1 | |||
| a220e5de99 | |||
| a5ffafaf9e | |||
| d17752b611 | |||
| fe05c40426 | |||
| 5a0478f47d | |||
| 1cea82f5d9 | |||
| 418034f639 | |||
| 489dde812f | |||
| c2e4e614e0 | |||
| 344071193c | |||
| 03118e59d7 | |||
| 15fea78505 |
@@ -12,6 +12,7 @@ LOG_LEVEL=INFO
|
|||||||
# Core Database (internal metadata DB)
|
# Core Database (internal metadata DB)
|
||||||
# ------------------------------
|
# ------------------------------
|
||||||
# Database that stores users, targets, metrics, query stats, and audit logs.
|
# Database that stores users, targets, metrics, query stats, and audit logs.
|
||||||
|
# DEV default only. Use strong unique credentials in production.
|
||||||
DB_NAME=nexapg
|
DB_NAME=nexapg
|
||||||
DB_USER=nexapg
|
DB_USER=nexapg
|
||||||
DB_PASSWORD=nexapg
|
DB_PASSWORD=nexapg
|
||||||
@@ -23,7 +24,7 @@ DB_PORT=5433
|
|||||||
# ------------------------------
|
# ------------------------------
|
||||||
# Host port mapped to backend container port 8000.
|
# Host port mapped to backend container port 8000.
|
||||||
BACKEND_PORT=8000
|
BACKEND_PORT=8000
|
||||||
# JWT signing secret. Change this in every non-local environment.
|
# JWT signing secret. Never hardcode in source. Rotate regularly.
|
||||||
JWT_SECRET_KEY=change_this_super_secret
|
JWT_SECRET_KEY=change_this_super_secret
|
||||||
JWT_ALGORITHM=HS256
|
JWT_ALGORITHM=HS256
|
||||||
# Access token lifetime in minutes.
|
# Access token lifetime in minutes.
|
||||||
@@ -31,6 +32,7 @@ JWT_ACCESS_TOKEN_MINUTES=15
|
|||||||
# Refresh token lifetime in minutes (10080 = 7 days).
|
# Refresh token lifetime in minutes (10080 = 7 days).
|
||||||
JWT_REFRESH_TOKEN_MINUTES=10080
|
JWT_REFRESH_TOKEN_MINUTES=10080
|
||||||
# Key used to encrypt monitored target passwords at rest.
|
# Key used to encrypt monitored target passwords at rest.
|
||||||
|
# Never hardcode in source. Rotate with re-encryption plan.
|
||||||
# Generate with:
|
# Generate with:
|
||||||
# python -c "from cryptography.fernet import Fernet; print(Fernet.generate_key().decode())"
|
# python -c "from cryptography.fernet import Fernet; print(Fernet.generate_key().decode())"
|
||||||
ENCRYPTION_KEY=REPLACE_WITH_FERNET_KEY
|
ENCRYPTION_KEY=REPLACE_WITH_FERNET_KEY
|
||||||
@@ -56,5 +58,5 @@ INIT_ADMIN_PASSWORD=ChangeMe123!
|
|||||||
# ------------------------------
|
# ------------------------------
|
||||||
# Frontend
|
# Frontend
|
||||||
# ------------------------------
|
# ------------------------------
|
||||||
# Host port mapped to frontend container port 80.
|
# Host port mapped to frontend container port 8080.
|
||||||
FRONTEND_PORT=5173
|
FRONTEND_PORT=5173
|
||||||
|
|||||||
112
.github/workflows/container-cve-scan-development.yml
vendored
Normal file
112
.github/workflows/container-cve-scan-development.yml
vendored
Normal file
@@ -0,0 +1,112 @@
|
|||||||
|
name: Container CVE Scan (development)
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: ["development"]
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
cve-scan:
|
||||||
|
name: Scan backend/frontend images for CVEs
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
|
||||||
|
- name: Docker Hub login (for Scout)
|
||||||
|
if: ${{ secrets.DOCKERHUB_USERNAME != '' && secrets.DOCKERHUB_TOKEN != '' }}
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Prepare Docker auth config for Scout container
|
||||||
|
if: ${{ secrets.DOCKERHUB_USERNAME != '' && secrets.DOCKERHUB_TOKEN != '' }}
|
||||||
|
run: |
|
||||||
|
mkdir -p "$RUNNER_TEMP/scout-docker-config"
|
||||||
|
cp "$HOME/.docker/config.json" "$RUNNER_TEMP/scout-docker-config/config.json"
|
||||||
|
chmod 600 "$RUNNER_TEMP/scout-docker-config/config.json"
|
||||||
|
|
||||||
|
- name: Build backend image (local)
|
||||||
|
uses: docker/build-push-action@v6
|
||||||
|
with:
|
||||||
|
context: ./backend
|
||||||
|
file: ./backend/Dockerfile
|
||||||
|
push: false
|
||||||
|
load: true
|
||||||
|
tags: nexapg-backend:dev-scan
|
||||||
|
provenance: false
|
||||||
|
sbom: false
|
||||||
|
|
||||||
|
- name: Build frontend image (local)
|
||||||
|
uses: docker/build-push-action@v6
|
||||||
|
with:
|
||||||
|
context: ./frontend
|
||||||
|
file: ./frontend/Dockerfile
|
||||||
|
push: false
|
||||||
|
load: true
|
||||||
|
tags: nexapg-frontend:dev-scan
|
||||||
|
build-args: |
|
||||||
|
VITE_API_URL=/api/v1
|
||||||
|
provenance: false
|
||||||
|
sbom: false
|
||||||
|
|
||||||
|
- name: Docker Scout scan (backend)
|
||||||
|
continue-on-error: true
|
||||||
|
run: |
|
||||||
|
if [ -z "${{ secrets.DOCKERHUB_USERNAME }}" ] || [ -z "${{ secrets.DOCKERHUB_TOKEN }}" ]; then
|
||||||
|
echo "Docker Hub Scout scan skipped: DOCKERHUB_USERNAME/DOCKERHUB_TOKEN not set." > scout-backend.txt
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
docker run --rm \
|
||||||
|
-u root \
|
||||||
|
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||||
|
-v "$RUNNER_TEMP/scout-docker-config:/root/.docker" \
|
||||||
|
-e DOCKER_CONFIG=/root/.docker \
|
||||||
|
-e DOCKER_SCOUT_HUB_USER="${{ secrets.DOCKERHUB_USERNAME }}" \
|
||||||
|
-e DOCKER_SCOUT_HUB_PASSWORD="${{ secrets.DOCKERHUB_TOKEN }}" \
|
||||||
|
docker/scout-cli:latest cves nexapg-backend:dev-scan \
|
||||||
|
--only-severity critical,high,medium,low > scout-backend.txt 2>&1 || {
|
||||||
|
echo "" >> scout-backend.txt
|
||||||
|
echo "Docker Scout backend scan failed (non-blocking)." >> scout-backend.txt
|
||||||
|
}
|
||||||
|
|
||||||
|
- name: Docker Scout scan (frontend)
|
||||||
|
continue-on-error: true
|
||||||
|
run: |
|
||||||
|
if [ -z "${{ secrets.DOCKERHUB_USERNAME }}" ] || [ -z "${{ secrets.DOCKERHUB_TOKEN }}" ]; then
|
||||||
|
echo "Docker Hub Scout scan skipped: DOCKERHUB_USERNAME/DOCKERHUB_TOKEN not set." > scout-frontend.txt
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
docker run --rm \
|
||||||
|
-u root \
|
||||||
|
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||||
|
-v "$RUNNER_TEMP/scout-docker-config:/root/.docker" \
|
||||||
|
-e DOCKER_CONFIG=/root/.docker \
|
||||||
|
-e DOCKER_SCOUT_HUB_USER="${{ secrets.DOCKERHUB_USERNAME }}" \
|
||||||
|
-e DOCKER_SCOUT_HUB_PASSWORD="${{ secrets.DOCKERHUB_TOKEN }}" \
|
||||||
|
docker/scout-cli:latest cves nexapg-frontend:dev-scan \
|
||||||
|
--only-severity critical,high,medium,low > scout-frontend.txt 2>&1 || {
|
||||||
|
echo "" >> scout-frontend.txt
|
||||||
|
echo "Docker Scout frontend scan failed (non-blocking)." >> scout-frontend.txt
|
||||||
|
}
|
||||||
|
|
||||||
|
- name: Print scan summary
|
||||||
|
run: |
|
||||||
|
echo "===== Docker Scout backend ====="
|
||||||
|
test -f scout-backend.txt && cat scout-backend.txt || echo "scout-backend.txt not available"
|
||||||
|
echo
|
||||||
|
echo "===== Docker Scout frontend ====="
|
||||||
|
test -f scout-frontend.txt && cat scout-frontend.txt || echo "scout-frontend.txt not available"
|
||||||
|
|
||||||
|
- name: Upload scan reports
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: container-cve-scan-reports
|
||||||
|
path: |
|
||||||
|
scout-backend.txt
|
||||||
|
scout-frontend.txt
|
||||||
34
.github/workflows/docker-release.yml
vendored
34
.github/workflows/docker-release.yml
vendored
@@ -27,6 +27,20 @@ jobs:
|
|||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: "3.13"
|
||||||
|
|
||||||
|
- name: Dependency security gate (pip-audit)
|
||||||
|
run: |
|
||||||
|
python -m pip install --upgrade pip
|
||||||
|
pip install pip-audit
|
||||||
|
pip-audit -r backend/requirements.txt --format json --aliases --output pip-audit-backend.json || true
|
||||||
|
python backend/scripts/pip_audit_gate.py \
|
||||||
|
--report pip-audit-backend.json \
|
||||||
|
--allowlist ops/security/pip-audit-allowlist.json
|
||||||
|
|
||||||
- name: Resolve version/tag
|
- name: Resolve version/tag
|
||||||
id: ver
|
id: ver
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -51,10 +65,28 @@ jobs:
|
|||||||
if [ -z "$NS" ]; then
|
if [ -z "$NS" ]; then
|
||||||
NS="${{ secrets.DOCKERHUB_USERNAME }}"
|
NS="${{ secrets.DOCKERHUB_USERNAME }}"
|
||||||
fi
|
fi
|
||||||
if [ -z "$NS" ]; then
|
|
||||||
|
# Normalize accidental input like spaces or uppercase.
|
||||||
|
NS="$(echo "$NS" | tr '[:upper:]' '[:lower:]' | xargs)"
|
||||||
|
|
||||||
|
# Reject clearly invalid placeholders/config mistakes early.
|
||||||
|
if [ -z "$NS" ] || [ "$NS" = "-" ]; then
|
||||||
echo "Missing Docker Hub namespace. Set repo var DOCKERHUB_NAMESPACE or secret DOCKERHUB_USERNAME."
|
echo "Missing Docker Hub namespace. Set repo var DOCKERHUB_NAMESPACE or secret DOCKERHUB_USERNAME."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Namespace must be a single Docker Hub account/org name, not a path/url.
|
||||||
|
if [[ "$NS" == *"/"* ]] || [[ "$NS" == *":"* ]]; then
|
||||||
|
echo "Invalid Docker Hub namespace '$NS'. Use only the account/org name (e.g. 'nesterovicit')."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! [[ "$NS" =~ ^[a-z0-9]+([._-][a-z0-9]+)*$ ]]; then
|
||||||
|
echo "Invalid Docker Hub namespace '$NS'. Allowed: lowercase letters, digits, ., _, -"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Using Docker Hub namespace: $NS"
|
||||||
echo "value=$NS" >> "$GITHUB_OUTPUT"
|
echo "value=$NS" >> "$GITHUB_OUTPUT"
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
|
|||||||
110
.github/workflows/e2e-api-smoke.yml
vendored
Normal file
110
.github/workflows/e2e-api-smoke.yml
vendored
Normal file
@@ -0,0 +1,110 @@
|
|||||||
|
name: E2E API Smoke
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: ["main", "master", "development"]
|
||||||
|
paths:
|
||||||
|
- "backend/**"
|
||||||
|
- ".github/workflows/e2e-api-smoke.yml"
|
||||||
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- "backend/**"
|
||||||
|
- ".github/workflows/e2e-api-smoke.yml"
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
e2e-smoke:
|
||||||
|
name: Core API E2E Smoke
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
env:
|
||||||
|
APP_NAME: NexaPG Monitor
|
||||||
|
ENVIRONMENT: test
|
||||||
|
LOG_LEVEL: INFO
|
||||||
|
DB_HOST: 127.0.0.1
|
||||||
|
DB_PORT: 5432
|
||||||
|
DB_NAME: nexapg
|
||||||
|
DB_USER: nexapg
|
||||||
|
DB_PASSWORD: nexapg
|
||||||
|
JWT_SECRET_KEY: smoke_jwt_secret_for_ci_only
|
||||||
|
JWT_ALGORITHM: HS256
|
||||||
|
JWT_ACCESS_TOKEN_MINUTES: 15
|
||||||
|
JWT_REFRESH_TOKEN_MINUTES: 10080
|
||||||
|
ENCRYPTION_KEY: 5fLf8HSTbEUeo1c4DnWnvkXxU6v8XJ8iW58wNw5vJ8s=
|
||||||
|
CORS_ORIGINS: http://localhost:5173
|
||||||
|
POLL_INTERVAL_SECONDS: 30
|
||||||
|
INIT_ADMIN_EMAIL: admin@example.com
|
||||||
|
INIT_ADMIN_PASSWORD: ChangeMe123!
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 1
|
||||||
|
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: "3.13"
|
||||||
|
|
||||||
|
- name: Start PostgreSQL container
|
||||||
|
run: |
|
||||||
|
docker rm -f nexapg-e2e-pg >/dev/null 2>&1 || true
|
||||||
|
docker run -d \
|
||||||
|
--name nexapg-e2e-pg \
|
||||||
|
-e POSTGRES_DB=nexapg \
|
||||||
|
-e POSTGRES_USER=nexapg \
|
||||||
|
-e POSTGRES_PASSWORD=nexapg \
|
||||||
|
-p 5432:5432 \
|
||||||
|
postgres:16
|
||||||
|
|
||||||
|
- name: Install backend dependencies + test tooling
|
||||||
|
run: |
|
||||||
|
python -m pip install --upgrade pip
|
||||||
|
pip install -r backend/requirements.txt
|
||||||
|
pip install pytest
|
||||||
|
|
||||||
|
- name: Wait for PostgreSQL service
|
||||||
|
run: |
|
||||||
|
python - <<'PY'
|
||||||
|
import asyncio
|
||||||
|
import asyncpg
|
||||||
|
|
||||||
|
async def wait_for_db():
|
||||||
|
dsn = "postgresql://nexapg:nexapg@127.0.0.1:5432/nexapg?sslmode=disable"
|
||||||
|
last_err = None
|
||||||
|
for attempt in range(1, 61):
|
||||||
|
try:
|
||||||
|
conn = await asyncpg.connect(dsn=dsn, timeout=3)
|
||||||
|
try:
|
||||||
|
await conn.execute("SELECT 1")
|
||||||
|
finally:
|
||||||
|
await conn.close()
|
||||||
|
print(f"PostgreSQL ready after {attempt} attempt(s).")
|
||||||
|
return
|
||||||
|
except Exception as exc:
|
||||||
|
last_err = exc
|
||||||
|
await asyncio.sleep(2)
|
||||||
|
raise RuntimeError(f"PostgreSQL not ready after retries: {last_err}")
|
||||||
|
|
||||||
|
asyncio.run(wait_for_db())
|
||||||
|
PY
|
||||||
|
|
||||||
|
- name: Show PostgreSQL container status
|
||||||
|
if: ${{ always() }}
|
||||||
|
run: |
|
||||||
|
docker ps -a --filter "name=nexapg-e2e-pg"
|
||||||
|
docker logs --tail=80 nexapg-e2e-pg || true
|
||||||
|
|
||||||
|
- name: Run Alembic migrations
|
||||||
|
working-directory: backend
|
||||||
|
run: alembic upgrade head
|
||||||
|
|
||||||
|
- name: Run core API smoke suite
|
||||||
|
env:
|
||||||
|
PYTHONPATH: backend
|
||||||
|
run: pytest -q backend/tests/e2e/test_api_smoke.py
|
||||||
|
|
||||||
|
- name: Cleanup PostgreSQL container
|
||||||
|
if: ${{ always() }}
|
||||||
|
run: docker rm -f nexapg-e2e-pg >/dev/null 2>&1 || true
|
||||||
65
.github/workflows/pg-compat-matrix.yml
vendored
65
.github/workflows/pg-compat-matrix.yml
vendored
@@ -11,6 +11,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
|
max-parallel: 3
|
||||||
matrix:
|
matrix:
|
||||||
pg_version: ["14", "15", "16", "17", "18"]
|
pg_version: ["14", "15", "16", "17", "18"]
|
||||||
|
|
||||||
@@ -32,6 +33,8 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 1
|
||||||
|
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v5
|
||||||
@@ -67,65 +70,3 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
PG_DSN_CANDIDATES: postgresql://postgres:postgres@postgres:5432/compatdb?sslmode=disable,postgresql://postgres:postgres@127.0.0.1:5432/compatdb?sslmode=disable
|
PG_DSN_CANDIDATES: postgresql://postgres:postgres@postgres:5432/compatdb?sslmode=disable,postgresql://postgres:postgres@127.0.0.1:5432/compatdb?sslmode=disable
|
||||||
run: python backend/scripts/pg_compat_smoke.py
|
run: python backend/scripts/pg_compat_smoke.py
|
||||||
|
|
||||||
backend-alpine-smoke:
|
|
||||||
name: Backend Alpine smoke (PG16)
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
services:
|
|
||||||
postgres:
|
|
||||||
image: postgres:16
|
|
||||||
env:
|
|
||||||
POSTGRES_DB: compatdb
|
|
||||||
POSTGRES_USER: postgres
|
|
||||||
POSTGRES_PASSWORD: postgres
|
|
||||||
ports:
|
|
||||||
- 5432:5432
|
|
||||||
options: >-
|
|
||||||
--health-cmd "pg_isready -U postgres -d compatdb"
|
|
||||||
--health-interval 5s
|
|
||||||
--health-timeout 5s
|
|
||||||
--health-retries 20
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Enable pg_stat_statements in service container
|
|
||||||
run: |
|
|
||||||
PG_CID="$(docker ps --filter "ancestor=postgres:16" --format "{{.ID}}" | head -n1)"
|
|
||||||
if [ -z "$PG_CID" ]; then
|
|
||||||
echo "Could not find postgres service container for version 16"
|
|
||||||
docker ps -a
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Using postgres container: $PG_CID"
|
|
||||||
docker exec "$PG_CID" psql -U postgres -d compatdb -c "ALTER SYSTEM SET shared_preload_libraries = 'pg_stat_statements';"
|
|
||||||
docker restart "$PG_CID"
|
|
||||||
|
|
||||||
for i in $(seq 1 40); do
|
|
||||||
if docker exec "$PG_CID" pg_isready -U postgres -d compatdb; then
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
sleep 2
|
|
||||||
done
|
|
||||||
|
|
||||||
docker exec "$PG_CID" psql -U postgres -d compatdb -c "CREATE EXTENSION IF NOT EXISTS pg_stat_statements;"
|
|
||||||
|
|
||||||
- name: Build backend image with Alpine base
|
|
||||||
run: |
|
|
||||||
docker build \
|
|
||||||
-f backend/Dockerfile \
|
|
||||||
--build-arg PYTHON_BASE_IMAGE=python:3.13-alpine \
|
|
||||||
-t nexapg-backend-alpine-smoke:ci \
|
|
||||||
./backend
|
|
||||||
|
|
||||||
- name: Run smoke checks in backend Alpine image
|
|
||||||
env:
|
|
||||||
PG_DSN_CANDIDATES: postgresql://postgres:postgres@127.0.0.1:5432/compatdb?sslmode=disable
|
|
||||||
run: |
|
|
||||||
docker run --rm --network host \
|
|
||||||
-e PG_DSN_CANDIDATES="${PG_DSN_CANDIDATES}" \
|
|
||||||
nexapg-backend-alpine-smoke:ci \
|
|
||||||
python /app/scripts/pg_compat_smoke.py
|
|
||||||
|
|||||||
35
.github/workflows/proxy-profile-validation.yml
vendored
Normal file
35
.github/workflows/proxy-profile-validation.yml
vendored
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
name: Proxy Profile Validation
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: ["main", "master", "development"]
|
||||||
|
paths:
|
||||||
|
- "frontend/**"
|
||||||
|
- "ops/profiles/prod/**"
|
||||||
|
- "ops/scripts/validate_proxy_profile.sh"
|
||||||
|
- ".github/workflows/proxy-profile-validation.yml"
|
||||||
|
- "README.md"
|
||||||
|
- ".env.example"
|
||||||
|
- "ops/.env.example"
|
||||||
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- "frontend/**"
|
||||||
|
- "ops/profiles/prod/**"
|
||||||
|
- "ops/scripts/validate_proxy_profile.sh"
|
||||||
|
- ".github/workflows/proxy-profile-validation.yml"
|
||||||
|
- "README.md"
|
||||||
|
- ".env.example"
|
||||||
|
- "ops/.env.example"
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
validate:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 1
|
||||||
|
|
||||||
|
- name: Validate proxy profile and mixed-content guardrails
|
||||||
|
run: bash ops/scripts/validate_proxy_profile.sh
|
||||||
53
.github/workflows/python-dependency-security.yml
vendored
Normal file
53
.github/workflows/python-dependency-security.yml
vendored
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
name: Python Dependency Security
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: ["main", "master", "development"]
|
||||||
|
paths:
|
||||||
|
- "backend/**"
|
||||||
|
- ".github/workflows/python-dependency-security.yml"
|
||||||
|
- "ops/security/pip-audit-allowlist.json"
|
||||||
|
- "docs/security/dependency-exceptions.md"
|
||||||
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- "backend/**"
|
||||||
|
- ".github/workflows/python-dependency-security.yml"
|
||||||
|
- "ops/security/pip-audit-allowlist.json"
|
||||||
|
- "docs/security/dependency-exceptions.md"
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
pip-audit:
|
||||||
|
name: pip-audit (block high/critical)
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 1
|
||||||
|
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: "3.13"
|
||||||
|
|
||||||
|
- name: Install pip-audit
|
||||||
|
run: |
|
||||||
|
python -m pip install --upgrade pip
|
||||||
|
pip install pip-audit
|
||||||
|
|
||||||
|
- name: Run pip-audit (JSON report)
|
||||||
|
run: |
|
||||||
|
pip-audit -r backend/requirements.txt --format json --aliases --output pip-audit-backend.json || true
|
||||||
|
|
||||||
|
- name: Enforce vulnerability policy
|
||||||
|
run: |
|
||||||
|
python backend/scripts/pip_audit_gate.py \
|
||||||
|
--report pip-audit-backend.json \
|
||||||
|
--allowlist ops/security/pip-audit-allowlist.json
|
||||||
|
|
||||||
|
- name: Upload pip-audit report
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: pip-audit-security-report
|
||||||
|
path: pip-audit-backend.json
|
||||||
62
README.md
62
README.md
@@ -20,7 +20,11 @@ It combines FastAPI, React, and PostgreSQL in a Docker Compose stack with RBAC,
|
|||||||
- [API Error Format](#api-error-format)
|
- [API Error Format](#api-error-format)
|
||||||
- [`pg_stat_statements` Requirement](#pg_stat_statements-requirement)
|
- [`pg_stat_statements` Requirement](#pg_stat_statements-requirement)
|
||||||
- [Reverse Proxy / SSL Guidance](#reverse-proxy--ssl-guidance)
|
- [Reverse Proxy / SSL Guidance](#reverse-proxy--ssl-guidance)
|
||||||
|
- [Production Proxy Profile](#production-proxy-profile)
|
||||||
- [PostgreSQL Compatibility Smoke Test](#postgresql-compatibility-smoke-test)
|
- [PostgreSQL Compatibility Smoke Test](#postgresql-compatibility-smoke-test)
|
||||||
|
- [E2E API Smoke Test](#e2e-api-smoke-test)
|
||||||
|
- [Dependency Exception Flow](#dependency-exception-flow)
|
||||||
|
- [Secret Management (Production)](#secret-management-production)
|
||||||
- [Troubleshooting](#troubleshooting)
|
- [Troubleshooting](#troubleshooting)
|
||||||
- [Security Notes](#security-notes)
|
- [Security Notes](#security-notes)
|
||||||
|
|
||||||
@@ -206,7 +210,7 @@ Note: Migrations run automatically when the backend container starts (`entrypoin
|
|||||||
|
|
||||||
| Variable | Description |
|
| Variable | Description |
|
||||||
|---|---|
|
|---|---|
|
||||||
| `FRONTEND_PORT` | Host port mapped to frontend container port `80` |
|
| `FRONTEND_PORT` | Host port mapped to frontend container port `8080` |
|
||||||
|
|
||||||
## Core Functional Areas
|
## Core Functional Areas
|
||||||
|
|
||||||
@@ -371,6 +375,21 @@ For production, serve frontend and API under the same public origin via reverse
|
|||||||
|
|
||||||
This prevents mixed-content and CORS issues.
|
This prevents mixed-content and CORS issues.
|
||||||
|
|
||||||
|
## Production Proxy Profile
|
||||||
|
|
||||||
|
A secure, repeatable production profile is included:
|
||||||
|
|
||||||
|
- `ops/profiles/prod/.env.production.example`
|
||||||
|
- `ops/profiles/prod/nginx/nexapg.conf`
|
||||||
|
- `docs/deployment/proxy-production-profile.md`
|
||||||
|
|
||||||
|
Highlights:
|
||||||
|
|
||||||
|
- explicit CORS recommendations per environment (`dev`, `staging`, `prod`)
|
||||||
|
- required reverse-proxy header forwarding for backend context
|
||||||
|
- API path forwarding (`/api/` -> backend)
|
||||||
|
- mixed-content prevention guidance for HTTPS deployments
|
||||||
|
|
||||||
## PostgreSQL Compatibility Smoke Test
|
## PostgreSQL Compatibility Smoke Test
|
||||||
|
|
||||||
Run manually against one DSN:
|
Run manually against one DSN:
|
||||||
@@ -387,6 +406,45 @@ PG_DSN_CANDIDATES='postgresql://postgres:postgres@postgres:5432/compatdb?sslmode
|
|||||||
python backend/scripts/pg_compat_smoke.py
|
python backend/scripts/pg_compat_smoke.py
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## E2E API Smoke Test
|
||||||
|
|
||||||
|
Core API smoke suite covers:
|
||||||
|
|
||||||
|
- auth login + `/me`
|
||||||
|
- targets CRUD
|
||||||
|
- metrics access
|
||||||
|
- alerts status
|
||||||
|
- admin users CRUD
|
||||||
|
|
||||||
|
Run locally (with backend env vars set and DB migrated):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
PYTHONPATH=backend pytest -q backend/tests/e2e/test_api_smoke.py
|
||||||
|
```
|
||||||
|
|
||||||
|
## Dependency Exception Flow
|
||||||
|
|
||||||
|
Python dependency vulnerabilities are enforced by CI via `pip-audit`.
|
||||||
|
|
||||||
|
- CI blocks unresolved `HIGH` and `CRITICAL` findings.
|
||||||
|
- Missing severity metadata is treated conservatively as `HIGH`.
|
||||||
|
- Temporary exceptions must be declared in `ops/security/pip-audit-allowlist.json`.
|
||||||
|
- Full process and required metadata are documented in:
|
||||||
|
- `docs/security/dependency-exceptions.md`
|
||||||
|
|
||||||
|
## Secret Management (Production)
|
||||||
|
|
||||||
|
Secret handling guidance is documented in:
|
||||||
|
|
||||||
|
- `docs/security/secret-management.md`
|
||||||
|
|
||||||
|
It includes:
|
||||||
|
|
||||||
|
- secure handling for `JWT_SECRET_KEY`, `ENCRYPTION_KEY`, `DB_PASSWORD`, and SMTP credentials
|
||||||
|
- clear **Do / Don't** rules
|
||||||
|
- recommended secret provider patterns (Vault/cloud/orchestrator/CI injection)
|
||||||
|
- practical rotation basics and operational checklist
|
||||||
|
|
||||||
## Troubleshooting
|
## Troubleshooting
|
||||||
|
|
||||||
### Backend container keeps restarting during `make migrate`
|
### Backend container keeps restarting during `make migrate`
|
||||||
@@ -421,3 +479,5 @@ Set target `sslmode` to `disable` (or correct SSL config on target DB).
|
|||||||
- RBAC enforced on protected endpoints
|
- RBAC enforced on protected endpoints
|
||||||
- Audit logs for critical actions
|
- Audit logs for critical actions
|
||||||
- Collector error logging includes throttling to reduce repeated noise
|
- Collector error logging includes throttling to reduce repeated noise
|
||||||
|
- Production secret handling and rotation guidance:
|
||||||
|
- `docs/security/secret-management.md`
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
ARG PYTHON_BASE_IMAGE=python:3.13-slim
|
ARG PYTHON_BASE_IMAGE=python:3.13-alpine
|
||||||
FROM ${PYTHON_BASE_IMAGE} AS base
|
FROM ${PYTHON_BASE_IMAGE} AS base
|
||||||
|
|
||||||
ENV PYTHONDONTWRITEBYTECODE=1
|
ENV PYTHONDONTWRITEBYTECODE=1
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ from functools import lru_cache
|
|||||||
from pydantic import field_validator
|
from pydantic import field_validator
|
||||||
from pydantic_settings import BaseSettings, SettingsConfigDict
|
from pydantic_settings import BaseSettings, SettingsConfigDict
|
||||||
|
|
||||||
NEXAPG_VERSION = "0.2.0"
|
NEXAPG_VERSION = "0.2.5"
|
||||||
|
|
||||||
|
|
||||||
class Settings(BaseSettings):
|
class Settings(BaseSettings):
|
||||||
|
|||||||
192
backend/scripts/pip_audit_gate.py
Normal file
192
backend/scripts/pip_audit_gate.py
Normal file
@@ -0,0 +1,192 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""Gate pip-audit results with an auditable allowlist policy.
|
||||||
|
|
||||||
|
Policy:
|
||||||
|
- Block unresolved HIGH/CRITICAL vulnerabilities.
|
||||||
|
- If severity is missing, treat as HIGH by default.
|
||||||
|
- Allow temporary exceptions via allowlist with expiry metadata.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import datetime as dt
|
||||||
|
import json
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
|
||||||
|
SEVERITY_ORDER = {"unknown": 0, "low": 1, "medium": 2, "high": 3, "critical": 4}
|
||||||
|
BLOCKING_SEVERITIES = {"high", "critical"}
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_date(s: str) -> dt.date:
|
||||||
|
return dt.date.fromisoformat(s)
|
||||||
|
|
||||||
|
|
||||||
|
def _normalize_severity(value: object) -> str:
|
||||||
|
"""Normalize various pip-audit/osv-style severity payloads."""
|
||||||
|
if isinstance(value, str):
|
||||||
|
v = value.strip().lower()
|
||||||
|
if v in SEVERITY_ORDER:
|
||||||
|
return v
|
||||||
|
try:
|
||||||
|
# CVSS numeric string fallback
|
||||||
|
score = float(v)
|
||||||
|
if score >= 9.0:
|
||||||
|
return "critical"
|
||||||
|
if score >= 7.0:
|
||||||
|
return "high"
|
||||||
|
if score >= 4.0:
|
||||||
|
return "medium"
|
||||||
|
return "low"
|
||||||
|
except ValueError:
|
||||||
|
return "unknown"
|
||||||
|
|
||||||
|
if isinstance(value, (int, float)):
|
||||||
|
score = float(value)
|
||||||
|
if score >= 9.0:
|
||||||
|
return "critical"
|
||||||
|
if score >= 7.0:
|
||||||
|
return "high"
|
||||||
|
if score >= 4.0:
|
||||||
|
return "medium"
|
||||||
|
return "low"
|
||||||
|
|
||||||
|
if isinstance(value, list):
|
||||||
|
# OSV sometimes returns a list of dicts. Pick the max-known severity.
|
||||||
|
best = "unknown"
|
||||||
|
for item in value:
|
||||||
|
if isinstance(item, dict):
|
||||||
|
sev = _normalize_severity(item.get("severity"))
|
||||||
|
if SEVERITY_ORDER.get(sev, 0) > SEVERITY_ORDER.get(best, 0):
|
||||||
|
best = sev
|
||||||
|
return best
|
||||||
|
|
||||||
|
if isinstance(value, dict):
|
||||||
|
return _normalize_severity(value.get("severity"))
|
||||||
|
|
||||||
|
return "unknown"
|
||||||
|
|
||||||
|
|
||||||
|
def _load_allowlist(path: Path) -> tuple[list[dict], list[str]]:
|
||||||
|
if not path.exists():
|
||||||
|
return [], []
|
||||||
|
|
||||||
|
data = json.loads(path.read_text(encoding="utf-8"))
|
||||||
|
entries = data.get("entries", [])
|
||||||
|
today = dt.date.today()
|
||||||
|
active: list[dict] = []
|
||||||
|
errors: list[str] = []
|
||||||
|
|
||||||
|
required = {"id", "reason", "approved_by", "issue", "expires_on"}
|
||||||
|
for idx, entry in enumerate(entries, start=1):
|
||||||
|
missing = required - set(entry.keys())
|
||||||
|
if missing:
|
||||||
|
errors.append(f"allowlist entry #{idx} missing keys: {', '.join(sorted(missing))}")
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
expires = _parse_date(str(entry["expires_on"]))
|
||||||
|
except ValueError:
|
||||||
|
errors.append(f"allowlist entry #{idx} has invalid expires_on: {entry['expires_on']}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
if expires < today:
|
||||||
|
errors.append(
|
||||||
|
f"allowlist entry #{idx} ({entry['id']}) expired on {entry['expires_on']}"
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
active.append(entry)
|
||||||
|
|
||||||
|
return active, errors
|
||||||
|
|
||||||
|
|
||||||
|
def _iter_findings(report: object):
|
||||||
|
# pip-audit JSON can be list[dep] or dict with dependencies.
|
||||||
|
deps = report if isinstance(report, list) else report.get("dependencies", [])
|
||||||
|
for dep in deps:
|
||||||
|
package = dep.get("name", "unknown")
|
||||||
|
version = dep.get("version", "unknown")
|
||||||
|
for vuln in dep.get("vulns", []):
|
||||||
|
vuln_id = vuln.get("id", "unknown")
|
||||||
|
aliases = vuln.get("aliases", []) or []
|
||||||
|
severity = _normalize_severity(vuln.get("severity"))
|
||||||
|
if severity == "unknown":
|
||||||
|
severity = "high" # conservative default for policy safety
|
||||||
|
yield {
|
||||||
|
"package": package,
|
||||||
|
"version": version,
|
||||||
|
"id": vuln_id,
|
||||||
|
"aliases": aliases,
|
||||||
|
"severity": severity,
|
||||||
|
"fix_versions": vuln.get("fix_versions", []),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _is_allowlisted(finding: dict, allowlist: list[dict]) -> bool:
|
||||||
|
ids = {finding["id"], *finding["aliases"]}
|
||||||
|
pkg = finding["package"]
|
||||||
|
for entry in allowlist:
|
||||||
|
entry_pkg = entry.get("package")
|
||||||
|
if entry["id"] in ids and (not entry_pkg or entry_pkg == pkg):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def main() -> int:
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument("--report", required=True, help="Path to pip-audit JSON report")
|
||||||
|
parser.add_argument("--allowlist", required=True, help="Path to allowlist JSON")
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
report_path = Path(args.report)
|
||||||
|
allowlist_path = Path(args.allowlist)
|
||||||
|
if not report_path.exists():
|
||||||
|
print(f"[pip-audit-gate] Missing report: {report_path}")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
report = json.loads(report_path.read_text(encoding="utf-8"))
|
||||||
|
allowlist, allowlist_errors = _load_allowlist(allowlist_path)
|
||||||
|
if allowlist_errors:
|
||||||
|
print("[pip-audit-gate] Allowlist validation failed:")
|
||||||
|
for err in allowlist_errors:
|
||||||
|
print(f" - {err}")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
unresolved_blocking: list[dict] = []
|
||||||
|
summary = {"critical": 0, "high": 0, "medium": 0, "low": 0, "unknown": 0}
|
||||||
|
ignored = 0
|
||||||
|
|
||||||
|
for finding in _iter_findings(report):
|
||||||
|
sev = finding["severity"]
|
||||||
|
summary[sev] = summary.get(sev, 0) + 1
|
||||||
|
if _is_allowlisted(finding, allowlist):
|
||||||
|
ignored += 1
|
||||||
|
continue
|
||||||
|
if sev in BLOCKING_SEVERITIES:
|
||||||
|
unresolved_blocking.append(finding)
|
||||||
|
|
||||||
|
print("[pip-audit-gate] Summary:")
|
||||||
|
print(
|
||||||
|
f" CRITICAL={summary['critical']} HIGH={summary['high']} "
|
||||||
|
f"MEDIUM={summary['medium']} LOW={summary['low']} ALLOWLISTED={ignored}"
|
||||||
|
)
|
||||||
|
|
||||||
|
if unresolved_blocking:
|
||||||
|
print("[pip-audit-gate] Blocking vulnerabilities found:")
|
||||||
|
for f in unresolved_blocking:
|
||||||
|
aliases = ", ".join(f["aliases"]) if f["aliases"] else "-"
|
||||||
|
fixes = ", ".join(f["fix_versions"]) if f["fix_versions"] else "-"
|
||||||
|
print(
|
||||||
|
f" - {f['severity'].upper()} {f['package']}=={f['version']} "
|
||||||
|
f"id={f['id']} aliases=[{aliases}] fixes=[{fixes}]"
|
||||||
|
)
|
||||||
|
return 1
|
||||||
|
|
||||||
|
print("[pip-audit-gate] No unresolved HIGH/CRITICAL vulnerabilities.")
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
sys.exit(main())
|
||||||
153
backend/tests/e2e/test_api_smoke.py
Normal file
153
backend/tests/e2e/test_api_smoke.py
Normal file
@@ -0,0 +1,153 @@
|
|||||||
|
import asyncio
|
||||||
|
import os
|
||||||
|
from datetime import datetime, timedelta, timezone
|
||||||
|
from uuid import uuid4
|
||||||
|
|
||||||
|
from fastapi.testclient import TestClient
|
||||||
|
|
||||||
|
from app.core.db import SessionLocal
|
||||||
|
from app.main import app
|
||||||
|
from app.models.models import Metric
|
||||||
|
|
||||||
|
|
||||||
|
def _admin_credentials() -> tuple[str, str]:
|
||||||
|
return (
|
||||||
|
os.getenv("INIT_ADMIN_EMAIL", "admin@example.com"),
|
||||||
|
os.getenv("INIT_ADMIN_PASSWORD", "ChangeMe123!"),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _auth_headers(access_token: str) -> dict[str, str]:
|
||||||
|
return {"Authorization": f"Bearer {access_token}"}
|
||||||
|
|
||||||
|
|
||||||
|
async def _insert_metric(target_id: int, metric_name: str, value: float) -> None:
|
||||||
|
async with SessionLocal() as db:
|
||||||
|
db.add(
|
||||||
|
Metric(
|
||||||
|
target_id=target_id,
|
||||||
|
ts=datetime.now(timezone.utc),
|
||||||
|
metric_name=metric_name,
|
||||||
|
value=value,
|
||||||
|
labels={},
|
||||||
|
)
|
||||||
|
)
|
||||||
|
await db.commit()
|
||||||
|
|
||||||
|
|
||||||
|
def test_core_api_smoke_suite() -> None:
|
||||||
|
admin_email, admin_password = _admin_credentials()
|
||||||
|
unique = uuid4().hex[:8]
|
||||||
|
target_name = f"smoke-target-{unique}"
|
||||||
|
user_email = f"smoke-user-{unique}@example.com"
|
||||||
|
|
||||||
|
with TestClient(app) as client:
|
||||||
|
# Auth: login
|
||||||
|
login_res = client.post(
|
||||||
|
"/api/v1/auth/login",
|
||||||
|
json={"email": admin_email, "password": admin_password},
|
||||||
|
)
|
||||||
|
assert login_res.status_code == 200, login_res.text
|
||||||
|
tokens = login_res.json()
|
||||||
|
assert tokens.get("access_token")
|
||||||
|
assert tokens.get("refresh_token")
|
||||||
|
headers = _auth_headers(tokens["access_token"])
|
||||||
|
|
||||||
|
# Auth: me
|
||||||
|
me_res = client.get("/api/v1/me", headers=headers)
|
||||||
|
assert me_res.status_code == 200, me_res.text
|
||||||
|
assert me_res.json()["email"] == admin_email
|
||||||
|
|
||||||
|
# Targets: create
|
||||||
|
create_target_res = client.post(
|
||||||
|
"/api/v1/targets",
|
||||||
|
headers=headers,
|
||||||
|
json={
|
||||||
|
"name": target_name,
|
||||||
|
"host": "127.0.0.1",
|
||||||
|
"port": 5432,
|
||||||
|
"dbname": "postgres",
|
||||||
|
"username": "postgres",
|
||||||
|
"password": "postgres",
|
||||||
|
"sslmode": "disable",
|
||||||
|
"use_pg_stat_statements": False,
|
||||||
|
"owner_user_ids": [],
|
||||||
|
"tags": {"suite": "e2e-smoke"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
assert create_target_res.status_code == 201, create_target_res.text
|
||||||
|
target = create_target_res.json()
|
||||||
|
target_id = target["id"]
|
||||||
|
|
||||||
|
# Targets: list/get/update
|
||||||
|
list_targets_res = client.get("/api/v1/targets", headers=headers)
|
||||||
|
assert list_targets_res.status_code == 200, list_targets_res.text
|
||||||
|
assert any(item["id"] == target_id for item in list_targets_res.json())
|
||||||
|
|
||||||
|
get_target_res = client.get(f"/api/v1/targets/{target_id}", headers=headers)
|
||||||
|
assert get_target_res.status_code == 200, get_target_res.text
|
||||||
|
|
||||||
|
update_target_res = client.put(
|
||||||
|
f"/api/v1/targets/{target_id}",
|
||||||
|
headers=headers,
|
||||||
|
json={"name": f"{target_name}-updated"},
|
||||||
|
)
|
||||||
|
assert update_target_res.status_code == 200, update_target_res.text
|
||||||
|
assert update_target_res.json()["name"].endswith("-updated")
|
||||||
|
|
||||||
|
# Metrics access
|
||||||
|
asyncio.run(_insert_metric(target_id, "connections_total", 7.0))
|
||||||
|
now = datetime.now(timezone.utc)
|
||||||
|
from_ts = (now - timedelta(minutes=5)).isoformat()
|
||||||
|
to_ts = (now + timedelta(minutes=5)).isoformat()
|
||||||
|
metrics_res = client.get(
|
||||||
|
f"/api/v1/targets/{target_id}/metrics",
|
||||||
|
headers=headers,
|
||||||
|
params={"metric": "connections_total", "from": from_ts, "to": to_ts},
|
||||||
|
)
|
||||||
|
assert metrics_res.status_code == 200, metrics_res.text
|
||||||
|
assert isinstance(metrics_res.json(), list)
|
||||||
|
assert len(metrics_res.json()) >= 1
|
||||||
|
|
||||||
|
# Alerts status
|
||||||
|
alerts_status_res = client.get("/api/v1/alerts/status", headers=headers)
|
||||||
|
assert alerts_status_res.status_code == 200, alerts_status_res.text
|
||||||
|
payload = alerts_status_res.json()
|
||||||
|
assert "warnings" in payload
|
||||||
|
assert "alerts" in payload
|
||||||
|
|
||||||
|
# Admin users: list/create/update/delete
|
||||||
|
users_res = client.get("/api/v1/admin/users", headers=headers)
|
||||||
|
assert users_res.status_code == 200, users_res.text
|
||||||
|
assert isinstance(users_res.json(), list)
|
||||||
|
|
||||||
|
create_user_res = client.post(
|
||||||
|
"/api/v1/admin/users",
|
||||||
|
headers=headers,
|
||||||
|
json={
|
||||||
|
"email": user_email,
|
||||||
|
"first_name": "Smoke",
|
||||||
|
"last_name": "User",
|
||||||
|
"password": "SmokePass123!",
|
||||||
|
"role": "viewer",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
assert create_user_res.status_code == 201, create_user_res.text
|
||||||
|
created_user_id = create_user_res.json()["id"]
|
||||||
|
|
||||||
|
update_user_res = client.put(
|
||||||
|
f"/api/v1/admin/users/{created_user_id}",
|
||||||
|
headers=headers,
|
||||||
|
json={"role": "operator", "first_name": "SmokeUpdated"},
|
||||||
|
)
|
||||||
|
assert update_user_res.status_code == 200, update_user_res.text
|
||||||
|
assert update_user_res.json()["role"] == "operator"
|
||||||
|
|
||||||
|
delete_user_res = client.delete(f"/api/v1/admin/users/{created_user_id}", headers=headers)
|
||||||
|
assert delete_user_res.status_code == 200, delete_user_res.text
|
||||||
|
assert delete_user_res.json().get("status") == "deleted"
|
||||||
|
|
||||||
|
# Cleanup target
|
||||||
|
delete_target_res = client.delete(f"/api/v1/targets/{target_id}", headers=headers)
|
||||||
|
assert delete_target_res.status_code == 200, delete_target_res.text
|
||||||
|
assert delete_target_res.json().get("status") == "deleted"
|
||||||
@@ -54,7 +54,7 @@ services:
|
|||||||
depends_on:
|
depends_on:
|
||||||
- backend
|
- backend
|
||||||
ports:
|
ports:
|
||||||
- "${FRONTEND_PORT}:80"
|
- "${FRONTEND_PORT}:8080"
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
pg_data:
|
pg_data:
|
||||||
|
|||||||
78
docs/deployment/proxy-production-profile.md
Normal file
78
docs/deployment/proxy-production-profile.md
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
# Production Proxy Profile (HTTPS)
|
||||||
|
|
||||||
|
This profile defines a secure and repeatable NexaPG deployment behind a reverse proxy.
|
||||||
|
|
||||||
|
## Included Profile Files
|
||||||
|
|
||||||
|
- `ops/profiles/prod/.env.production.example`
|
||||||
|
- `ops/profiles/prod/nginx/nexapg.conf`
|
||||||
|
|
||||||
|
## CORS Recommendations by Environment
|
||||||
|
|
||||||
|
| Environment | Recommended `CORS_ORIGINS` | Notes |
|
||||||
|
|---|---|---|
|
||||||
|
| `dev` | `*` or local explicit origins | `*` is acceptable only for local/dev usage. |
|
||||||
|
| `staging` | Exact staging UI origins | Example: `https://staging-monitor.example.com` |
|
||||||
|
| `prod` | Exact production UI origin(s) only | No wildcard; use comma-separated HTTPS origins if needed. |
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
```env
|
||||||
|
# dev only
|
||||||
|
CORS_ORIGINS=*
|
||||||
|
|
||||||
|
# staging
|
||||||
|
CORS_ORIGINS=https://staging-monitor.example.com
|
||||||
|
|
||||||
|
# prod
|
||||||
|
CORS_ORIGINS=https://monitor.example.com
|
||||||
|
```
|
||||||
|
|
||||||
|
## Reverse Proxy Requirements
|
||||||
|
|
||||||
|
For stable auth, CORS, and request context handling, forward these headers to backend:
|
||||||
|
|
||||||
|
- `Host`
|
||||||
|
- `X-Real-IP`
|
||||||
|
- `X-Forwarded-For`
|
||||||
|
- `X-Forwarded-Proto`
|
||||||
|
- `X-Forwarded-Host`
|
||||||
|
- `X-Forwarded-Port`
|
||||||
|
|
||||||
|
Also forward API paths:
|
||||||
|
|
||||||
|
- `/api/` -> backend service (`:8000`)
|
||||||
|
|
||||||
|
## Mixed-Content Prevention
|
||||||
|
|
||||||
|
NexaPG frontend is designed to avoid mixed-content in HTTPS mode:
|
||||||
|
|
||||||
|
- Build/runtime default API base is relative (`/api/v1`)
|
||||||
|
- `frontend/src/api.js` upgrades `http` API URL to `https` when page runs on HTTPS
|
||||||
|
|
||||||
|
Recommended production setting:
|
||||||
|
|
||||||
|
```env
|
||||||
|
VITE_API_URL=/api/v1
|
||||||
|
```
|
||||||
|
|
||||||
|
## Validation Checklist
|
||||||
|
|
||||||
|
1. Open app over HTTPS and verify:
|
||||||
|
- login request is `https://.../api/v1/auth/login`
|
||||||
|
- no browser mixed-content errors in console
|
||||||
|
2. Verify CORS behavior:
|
||||||
|
- allowed origin works
|
||||||
|
- unknown origin is blocked
|
||||||
|
3. Verify backend receives forwarded protocol:
|
||||||
|
- proxied responses succeed with no redirect/proto issues
|
||||||
|
|
||||||
|
## CI Validation
|
||||||
|
|
||||||
|
`Proxy Profile Validation` workflow runs static guardrail checks:
|
||||||
|
|
||||||
|
- relative `VITE_API_URL` default
|
||||||
|
- required API proxy path in frontend NGINX config
|
||||||
|
- required forwarded headers
|
||||||
|
- HTTPS mixed-content guard in frontend API resolver
|
||||||
|
- production profile forbids wildcard CORS
|
||||||
53
docs/security/dependency-exceptions.md
Normal file
53
docs/security/dependency-exceptions.md
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
# Dependency Security Exception Flow (pip-audit)
|
||||||
|
|
||||||
|
This document defines the auditable exception process for Python dependency vulnerabilities.
|
||||||
|
|
||||||
|
## Policy
|
||||||
|
|
||||||
|
- CI blocks unresolved `HIGH` and `CRITICAL` dependency vulnerabilities.
|
||||||
|
- If a vulnerability does not provide severity metadata, it is treated as `HIGH` by policy.
|
||||||
|
- Temporary exceptions are allowed only through `ops/security/pip-audit-allowlist.json`.
|
||||||
|
|
||||||
|
## Allowlist Location
|
||||||
|
|
||||||
|
- File: `ops/security/pip-audit-allowlist.json`
|
||||||
|
- Format:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"entries": [
|
||||||
|
{
|
||||||
|
"id": "CVE-2026-12345",
|
||||||
|
"package": "example-package",
|
||||||
|
"reason": "Upstream fix not released yet",
|
||||||
|
"approved_by": "security-owner",
|
||||||
|
"issue": "NX-202",
|
||||||
|
"expires_on": "2026-12-31"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Required Fields
|
||||||
|
|
||||||
|
- `id`: Vulnerability ID (`CVE-*`, `GHSA-*`, or advisory ID)
|
||||||
|
- `reason`: Why exception is necessary
|
||||||
|
- `approved_by`: Approver identity
|
||||||
|
- `issue`: Tracking issue/ticket
|
||||||
|
- `expires_on`: Expiry date in `YYYY-MM-DD`
|
||||||
|
|
||||||
|
Optional:
|
||||||
|
|
||||||
|
- `package`: Restrict exception to one dependency package
|
||||||
|
|
||||||
|
## Rules
|
||||||
|
|
||||||
|
- Expired allowlist entries fail CI.
|
||||||
|
- Missing required fields fail CI.
|
||||||
|
- Exceptions must be time-limited and linked to a tracking issue.
|
||||||
|
- Removing an exception is required once an upstream fix is available.
|
||||||
|
|
||||||
|
## Auditability
|
||||||
|
|
||||||
|
- Every exception change is tracked in Git history and code review.
|
||||||
|
- CI logs include blocked vulnerabilities and allowlisted findings counts.
|
||||||
74
docs/security/secret-management.md
Normal file
74
docs/security/secret-management.md
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
# Secret Management (Production)
|
||||||
|
|
||||||
|
This guide defines secure handling for NexaPG secrets in production deployments.
|
||||||
|
|
||||||
|
## In Scope Secrets
|
||||||
|
|
||||||
|
- `JWT_SECRET_KEY`
|
||||||
|
- `ENCRYPTION_KEY`
|
||||||
|
- `DB_PASSWORD`
|
||||||
|
- SMTP credentials (configured in Admin Settings, encrypted at rest)
|
||||||
|
|
||||||
|
## Do / Don't
|
||||||
|
|
||||||
|
## Do
|
||||||
|
|
||||||
|
- Use an external secret source (Vault, cloud secret manager, orchestrator secrets, or CI/CD secret injection).
|
||||||
|
- Keep secrets out of Git history and out of image layers.
|
||||||
|
- Use strong random values:
|
||||||
|
- JWT secret: at least 32+ bytes random
|
||||||
|
- Fernet key: generated via `Fernet.generate_key()`
|
||||||
|
- Restrict access to runtime secrets (least privilege).
|
||||||
|
- Rotate secrets on schedule and on incident.
|
||||||
|
- Store production `.env` with strict permissions if file-based injection is used:
|
||||||
|
- owner-only read/write (e.g., `chmod 600 .env`)
|
||||||
|
- Audit who can read/update secrets in your deployment platform.
|
||||||
|
|
||||||
|
## Don't
|
||||||
|
|
||||||
|
- Do **not** hardcode secrets in source code.
|
||||||
|
- Do **not** commit `.env` with real values.
|
||||||
|
- Do **not** bake production secrets into Dockerfiles or image build args.
|
||||||
|
- Do **not** share secrets in tickets, chat logs, or CI console output.
|
||||||
|
- Do **not** reuse the same secrets between environments.
|
||||||
|
|
||||||
|
## Recommended Secret Providers
|
||||||
|
|
||||||
|
Pick one of these models:
|
||||||
|
|
||||||
|
1. Platform/Cloud secrets
|
||||||
|
- AWS Secrets Manager
|
||||||
|
- Azure Key Vault
|
||||||
|
- Google Secret Manager
|
||||||
|
2. HashiCorp Vault
|
||||||
|
3. CI/CD secret injection
|
||||||
|
- Inject as runtime env vars during deployment
|
||||||
|
4. Docker/Kubernetes secrets
|
||||||
|
- Prefer secret mounts or orchestrator-native secret stores
|
||||||
|
|
||||||
|
If you use plain `.env` files, treat them as sensitive artifacts and protect at OS and backup level.
|
||||||
|
|
||||||
|
## Rotation Basics
|
||||||
|
|
||||||
|
Minimum baseline:
|
||||||
|
|
||||||
|
1. `JWT_SECRET_KEY`
|
||||||
|
- Rotate on schedule (e.g., quarterly) and immediately after compromise.
|
||||||
|
- Expect existing sessions/tokens to become invalid after rotation.
|
||||||
|
2. `ENCRYPTION_KEY`
|
||||||
|
- Rotate with planned maintenance.
|
||||||
|
- Re-encrypt stored encrypted values (target passwords, SMTP password) during key transition.
|
||||||
|
3. `DB_PASSWORD`
|
||||||
|
- Rotate service account credentials regularly.
|
||||||
|
- Apply password changes in DB and deployment config atomically.
|
||||||
|
4. SMTP credentials
|
||||||
|
- Use dedicated sender account/app password.
|
||||||
|
- Rotate regularly and after provider-side security alerts.
|
||||||
|
|
||||||
|
## Operational Checklist
|
||||||
|
|
||||||
|
- [ ] No production secret in repository files.
|
||||||
|
- [ ] No production secret in container image metadata or build args.
|
||||||
|
- [ ] Runtime secret source documented for your environment.
|
||||||
|
- [ ] Secret rotation owner and schedule defined.
|
||||||
|
- [ ] Incident runbook includes emergency rotation steps.
|
||||||
@@ -7,9 +7,14 @@ ARG VITE_API_URL=/api/v1
|
|||||||
ENV VITE_API_URL=${VITE_API_URL}
|
ENV VITE_API_URL=${VITE_API_URL}
|
||||||
RUN npm run build
|
RUN npm run build
|
||||||
|
|
||||||
FROM nginx:1.29-alpine-slim
|
FROM nginx:1-alpine-slim
|
||||||
RUN apk upgrade --no-cache
|
RUN apk upgrade --no-cache \
|
||||||
|
&& mkdir -p /var/cache/nginx /var/run /var/log/nginx /tmp/nginx \
|
||||||
|
&& chown -R nginx:nginx /var/cache/nginx /var/run /var/log/nginx /tmp/nginx \
|
||||||
|
&& sed -i 's#pid[[:space:]]\+/run/nginx.pid;#pid /tmp/nginx/nginx.pid;#' /etc/nginx/nginx.conf \
|
||||||
|
&& sed -i 's#pid[[:space:]]\+/var/run/nginx.pid;#pid /tmp/nginx/nginx.pid;#' /etc/nginx/nginx.conf
|
||||||
COPY nginx.conf /etc/nginx/conf.d/default.conf
|
COPY nginx.conf /etc/nginx/conf.d/default.conf
|
||||||
COPY --from=build /app/dist /usr/share/nginx/html
|
COPY --from=build /app/dist /usr/share/nginx/html
|
||||||
EXPOSE 80
|
USER 101
|
||||||
|
EXPOSE 8080
|
||||||
HEALTHCHECK --interval=30s --timeout=3s --retries=5 CMD nginx -t || exit 1
|
HEALTHCHECK --interval=30s --timeout=3s --retries=5 CMD nginx -t || exit 1
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
server {
|
server {
|
||||||
listen 80;
|
listen 8080;
|
||||||
server_name _;
|
server_name _;
|
||||||
|
|
||||||
root /usr/share/nginx/html;
|
root /usr/share/nginx/html;
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ LOG_LEVEL=INFO
|
|||||||
# Core Database (internal metadata DB)
|
# Core Database (internal metadata DB)
|
||||||
# ------------------------------
|
# ------------------------------
|
||||||
# Database that stores users, targets, metrics, query stats, and audit logs.
|
# Database that stores users, targets, metrics, query stats, and audit logs.
|
||||||
|
# DEV default only. Use strong unique credentials in production.
|
||||||
DB_NAME=nexapg
|
DB_NAME=nexapg
|
||||||
DB_USER=nexapg
|
DB_USER=nexapg
|
||||||
DB_PASSWORD=nexapg
|
DB_PASSWORD=nexapg
|
||||||
@@ -23,7 +24,7 @@ DB_PORT=5433
|
|||||||
# ------------------------------
|
# ------------------------------
|
||||||
# Host port mapped to backend container port 8000.
|
# Host port mapped to backend container port 8000.
|
||||||
BACKEND_PORT=8000
|
BACKEND_PORT=8000
|
||||||
# JWT signing secret. Change this in every non-local environment.
|
# JWT signing secret. Never hardcode in source. Rotate regularly.
|
||||||
JWT_SECRET_KEY=change_this_super_secret
|
JWT_SECRET_KEY=change_this_super_secret
|
||||||
JWT_ALGORITHM=HS256
|
JWT_ALGORITHM=HS256
|
||||||
# Access token lifetime in minutes.
|
# Access token lifetime in minutes.
|
||||||
@@ -31,6 +32,7 @@ JWT_ACCESS_TOKEN_MINUTES=15
|
|||||||
# Refresh token lifetime in minutes (10080 = 7 days).
|
# Refresh token lifetime in minutes (10080 = 7 days).
|
||||||
JWT_REFRESH_TOKEN_MINUTES=10080
|
JWT_REFRESH_TOKEN_MINUTES=10080
|
||||||
# Key used to encrypt monitored target passwords at rest.
|
# Key used to encrypt monitored target passwords at rest.
|
||||||
|
# Never hardcode in source. Rotate with re-encryption plan.
|
||||||
# Generate with:
|
# Generate with:
|
||||||
# python -c "from cryptography.fernet import Fernet; print(Fernet.generate_key().decode())"
|
# python -c "from cryptography.fernet import Fernet; print(Fernet.generate_key().decode())"
|
||||||
ENCRYPTION_KEY=REPLACE_WITH_FERNET_KEY
|
ENCRYPTION_KEY=REPLACE_WITH_FERNET_KEY
|
||||||
@@ -49,7 +51,7 @@ INIT_ADMIN_PASSWORD=ChangeMe123!
|
|||||||
# ------------------------------
|
# ------------------------------
|
||||||
# Frontend
|
# Frontend
|
||||||
# ------------------------------
|
# ------------------------------
|
||||||
# Host port mapped to frontend container port 80.
|
# Host port mapped to frontend container port 8080.
|
||||||
FRONTEND_PORT=5173
|
FRONTEND_PORT=5173
|
||||||
# Base API URL used at frontend build time.
|
# Base API URL used at frontend build time.
|
||||||
# For reverse proxy + SSL, keep this relative to avoid mixed-content issues.
|
# For reverse proxy + SSL, keep this relative to avoid mixed-content issues.
|
||||||
|
|||||||
48
ops/profiles/prod/.env.production.example
Normal file
48
ops/profiles/prod/.env.production.example
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
# NexaPG production profile (reverse proxy + HTTPS)
|
||||||
|
# Copy to .env and adjust values for your environment.
|
||||||
|
|
||||||
|
# ------------------------------
|
||||||
|
# Application
|
||||||
|
# ------------------------------
|
||||||
|
APP_NAME=NexaPG Monitor
|
||||||
|
ENVIRONMENT=prod
|
||||||
|
LOG_LEVEL=INFO
|
||||||
|
|
||||||
|
# ------------------------------
|
||||||
|
# Core Database
|
||||||
|
# ------------------------------
|
||||||
|
DB_NAME=nexapg
|
||||||
|
DB_USER=nexapg
|
||||||
|
DB_PASSWORD=change_me
|
||||||
|
DB_PORT=5433
|
||||||
|
|
||||||
|
# ------------------------------
|
||||||
|
# Backend
|
||||||
|
# ------------------------------
|
||||||
|
BACKEND_PORT=8000
|
||||||
|
JWT_SECRET_KEY=replace_with_long_random_secret
|
||||||
|
JWT_ALGORITHM=HS256
|
||||||
|
JWT_ACCESS_TOKEN_MINUTES=15
|
||||||
|
JWT_REFRESH_TOKEN_MINUTES=10080
|
||||||
|
ENCRYPTION_KEY=REPLACE_WITH_FERNET_KEY
|
||||||
|
|
||||||
|
# Production CORS:
|
||||||
|
# - no wildcard
|
||||||
|
# - set exact public UI origin(s)
|
||||||
|
CORS_ORIGINS=https://monitor.example.com
|
||||||
|
|
||||||
|
POLL_INTERVAL_SECONDS=30
|
||||||
|
ALERT_ACTIVE_CONNECTION_RATIO_MIN_TOTAL_CONNECTIONS=5
|
||||||
|
ALERT_ROLLBACK_RATIO_WINDOW_MINUTES=15
|
||||||
|
ALERT_ROLLBACK_RATIO_MIN_TOTAL_TRANSACTIONS=100
|
||||||
|
ALERT_ROLLBACK_RATIO_MIN_ROLLBACKS=10
|
||||||
|
|
||||||
|
INIT_ADMIN_EMAIL=admin@example.com
|
||||||
|
INIT_ADMIN_PASSWORD=ChangeMe123!
|
||||||
|
|
||||||
|
# ------------------------------
|
||||||
|
# Frontend
|
||||||
|
# ------------------------------
|
||||||
|
# Keep frontend API base relative to avoid HTTPS mixed-content.
|
||||||
|
FRONTEND_PORT=5173
|
||||||
|
VITE_API_URL=/api/v1
|
||||||
49
ops/profiles/prod/nginx/nexapg.conf
Normal file
49
ops/profiles/prod/nginx/nexapg.conf
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
# NGINX reverse proxy profile for NexaPG (HTTPS).
|
||||||
|
# Replace monitor.example.com and certificate paths.
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
server_name monitor.example.com;
|
||||||
|
return 301 https://$host$request_uri;
|
||||||
|
}
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 443 ssl http2;
|
||||||
|
server_name monitor.example.com;
|
||||||
|
|
||||||
|
ssl_certificate /etc/letsencrypt/live/monitor.example.com/fullchain.pem;
|
||||||
|
ssl_certificate_key /etc/letsencrypt/live/monitor.example.com/privkey.pem;
|
||||||
|
ssl_session_timeout 1d;
|
||||||
|
ssl_session_cache shared:SSL:10m;
|
||||||
|
ssl_session_tickets off;
|
||||||
|
|
||||||
|
# Baseline security headers
|
||||||
|
add_header X-Content-Type-Options "nosniff" always;
|
||||||
|
add_header X-Frame-Options "SAMEORIGIN" always;
|
||||||
|
add_header Referrer-Policy "strict-origin-when-cross-origin" always;
|
||||||
|
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
|
||||||
|
|
||||||
|
# Frontend app
|
||||||
|
location / {
|
||||||
|
proxy_pass http://127.0.0.1:5173;
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
proxy_set_header X-Forwarded-Host $host;
|
||||||
|
proxy_set_header X-Forwarded-Port $server_port;
|
||||||
|
}
|
||||||
|
|
||||||
|
# API forwarding to backend
|
||||||
|
location /api/ {
|
||||||
|
proxy_pass http://127.0.0.1:8000;
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
proxy_set_header X-Forwarded-Host $host;
|
||||||
|
proxy_set_header X-Forwarded-Port $server_port;
|
||||||
|
}
|
||||||
|
}
|
||||||
38
ops/scripts/validate_proxy_profile.sh
Normal file
38
ops/scripts/validate_proxy_profile.sh
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
echo "[proxy-profile] validating reverse-proxy and mixed-content guardrails"
|
||||||
|
|
||||||
|
require_pattern() {
|
||||||
|
local file="$1"
|
||||||
|
local pattern="$2"
|
||||||
|
local message="$3"
|
||||||
|
if ! grep -Eq "$pattern" "$file"; then
|
||||||
|
echo "[proxy-profile] FAIL: $message ($file)"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Frontend should default to relative API base in container builds.
|
||||||
|
require_pattern "frontend/Dockerfile" "ARG VITE_API_URL=/api/v1" \
|
||||||
|
"VITE_API_URL default must be relative (/api/v1)"
|
||||||
|
|
||||||
|
# Frontend runtime proxy should forward /api with forward headers.
|
||||||
|
require_pattern "frontend/nginx.conf" "location /api/" \
|
||||||
|
"frontend nginx must proxy /api/"
|
||||||
|
require_pattern "frontend/nginx.conf" "proxy_set_header X-Forwarded-Proto" \
|
||||||
|
"frontend nginx must set X-Forwarded-Proto"
|
||||||
|
require_pattern "frontend/nginx.conf" "proxy_set_header X-Forwarded-For" \
|
||||||
|
"frontend nginx must set X-Forwarded-For"
|
||||||
|
require_pattern "frontend/nginx.conf" "proxy_set_header Host" \
|
||||||
|
"frontend nginx must forward Host"
|
||||||
|
|
||||||
|
# Mixed-content guard in frontend API client.
|
||||||
|
require_pattern "frontend/src/api.js" "window\\.location\\.protocol === \"https:\".*parsed\\.protocol === \"http:\"" \
|
||||||
|
"frontend api client must contain HTTPS mixed-content protection"
|
||||||
|
|
||||||
|
# Production profile must not use wildcard CORS.
|
||||||
|
require_pattern "ops/profiles/prod/.env.production.example" "^CORS_ORIGINS=https://[^*]+$" \
|
||||||
|
"production profile must use explicit HTTPS CORS origins"
|
||||||
|
|
||||||
|
echo "[proxy-profile] PASS"
|
||||||
3
ops/security/pip-audit-allowlist.json
Normal file
3
ops/security/pip-audit-allowlist.json
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
{
|
||||||
|
"entries": []
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user