Some checks failed
- Backend: FastAPI + SQLite (WAL mode), 22 tables, ~40 API endpoints - Frontend: Vue 3 + Vite + Pinia + Vue Router, 8 views, 3 stores - Database: migrate from JSON file to SQLite with proper schema - Dockerfile: multi-stage build (node + python) - Deploy: K8s manifests (namespace, deployment, service, ingress, pvc, backup) - CI/CD: Gitea Actions (test, deploy, PR preview at pr-$id.planner.oci.euphon.net) - Tests: 20 Cypress E2E test files, 196 test cases, ~85% coverage - Doc: test-coverage.md with full feature coverage report Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
260 lines
7.9 KiB
Python
260 lines
7.9 KiB
Python
#!/usr/bin/env python3
|
|
"""Deploy or teardown a PR preview environment on local k3s.
|
|
|
|
Runs directly on the oci server (where k3s and docker are local).
|
|
|
|
Usage:
|
|
python3 scripts/deploy-preview.py deploy <PR_ID>
|
|
python3 scripts/deploy-preview.py teardown <PR_ID>
|
|
python3 scripts/deploy-preview.py deploy-prod
|
|
"""
|
|
|
|
import subprocess
|
|
import sys
|
|
import json
|
|
import tempfile
|
|
import textwrap
|
|
from pathlib import Path
|
|
|
|
REGISTRY = "registry.oci.euphon.net"
|
|
BASE_DOMAIN = "planner.oci.euphon.net"
|
|
PROD_NS = "planner"
|
|
APP_NAME = "planner"
|
|
|
|
|
|
def run(cmd: list[str] | str, *, check=True, capture=False) -> subprocess.CompletedProcess:
|
|
if isinstance(cmd, str):
|
|
cmd = ["sh", "-c", cmd]
|
|
display = " ".join(cmd) if isinstance(cmd, list) else cmd
|
|
print(f" $ {display}")
|
|
r = subprocess.run(cmd, text=True, capture_output=capture)
|
|
if capture and r.stdout.strip():
|
|
for line in r.stdout.strip().splitlines()[:5]:
|
|
print(f" {line}")
|
|
if check and r.returncode != 0:
|
|
print(f" FAILED (exit {r.returncode})")
|
|
if capture and r.stderr.strip():
|
|
print(f" {r.stderr.strip()[:200]}")
|
|
sys.exit(1)
|
|
return r
|
|
|
|
|
|
def kubectl(*args, capture=False, check=True) -> subprocess.CompletedProcess:
|
|
return run(["sudo", "k3s", "kubectl", *args], capture=capture, check=check)
|
|
|
|
|
|
def docker(*args, check=True) -> subprocess.CompletedProcess:
|
|
return run(["docker", *args], check=check)
|
|
|
|
|
|
def write_temp(content: str, suffix=".yaml") -> Path:
|
|
f = tempfile.NamedTemporaryFile(mode="w", suffix=suffix, delete=False)
|
|
f.write(content)
|
|
f.close()
|
|
return Path(f.name)
|
|
|
|
|
|
# ─── Deploy Preview ─────────────────────────────────────
|
|
|
|
def deploy(pr_id: str):
|
|
ns = f"planner-pr-{pr_id}"
|
|
host = f"pr-{pr_id}.{BASE_DOMAIN}"
|
|
image = f"{REGISTRY}/{APP_NAME}:pr-{pr_id}"
|
|
|
|
print(f"\n{'='*60}")
|
|
print(f" Deploying: https://{host}")
|
|
print(f" Namespace: {ns}")
|
|
print(f"{'='*60}\n")
|
|
|
|
# 1. Copy production DB into build context
|
|
print("[1/5] Copying production database...")
|
|
Path("data").mkdir(exist_ok=True)
|
|
prod_pod = kubectl(
|
|
"get", "pods", "-n", PROD_NS,
|
|
"-l", f"app={APP_NAME}",
|
|
"--field-selector=status.phase=Running",
|
|
"-o", "jsonpath={.items[0].metadata.name}",
|
|
capture=True, check=False
|
|
).stdout.strip()
|
|
|
|
if prod_pod:
|
|
kubectl("cp", f"{PROD_NS}/{prod_pod}:/data/planner.db", "data/planner.db")
|
|
else:
|
|
print(" WARNING: No running prod pod, using empty DB")
|
|
Path("data/planner.db").touch()
|
|
|
|
# 2. Build and push image
|
|
print("[2/5] Building Docker image...")
|
|
dockerfile = textwrap.dedent("""\
|
|
FROM node:20-slim AS frontend-build
|
|
WORKDIR /build
|
|
COPY frontend/package.json frontend/package-lock.json ./
|
|
RUN npm ci
|
|
COPY frontend/ ./
|
|
RUN npm run build
|
|
|
|
FROM python:3.12-slim
|
|
WORKDIR /app
|
|
COPY backend/requirements.txt .
|
|
RUN pip install --no-cache-dir -r requirements.txt
|
|
COPY backend/ ./backend/
|
|
COPY --from=frontend-build /build/dist ./frontend/
|
|
COPY data/planner.db /data/planner.db
|
|
ENV DB_PATH=/data/planner.db
|
|
ENV FRONTEND_DIR=/app/frontend
|
|
ENV DATA_DIR=/data
|
|
EXPOSE 8000
|
|
CMD ["uvicorn", "backend.main:app", "--host", "0.0.0.0", "--port", "8000"]
|
|
""")
|
|
df = write_temp(dockerfile, suffix=".Dockerfile")
|
|
docker("build", "-f", str(df), "-t", image, ".")
|
|
df.unlink()
|
|
docker("push", image)
|
|
|
|
# 3. Create namespace + regcred
|
|
print("[3/5] Creating namespace...")
|
|
run(f"sudo k3s kubectl create namespace {ns} --dry-run=client -o yaml | sudo k3s kubectl apply -f -")
|
|
|
|
# Copy regcred from prod namespace
|
|
r = kubectl("get", "secret", "regcred", "-n", PROD_NS, "-o", "json", capture=True, check=False)
|
|
if r.returncode == 0 and r.stdout.strip():
|
|
secret = json.loads(r.stdout)
|
|
secret["metadata"] = {"name": "regcred", "namespace": ns}
|
|
p = write_temp(json.dumps(secret), suffix=".json")
|
|
kubectl("apply", "-f", str(p))
|
|
p.unlink()
|
|
|
|
# 4. Apply manifests
|
|
print("[4/5] Applying K8s resources...")
|
|
manifests = textwrap.dedent(f"""\
|
|
apiVersion: apps/v1
|
|
kind: Deployment
|
|
metadata:
|
|
name: {APP_NAME}
|
|
namespace: {ns}
|
|
spec:
|
|
replicas: 1
|
|
selector:
|
|
matchLabels:
|
|
app: {APP_NAME}
|
|
template:
|
|
metadata:
|
|
labels:
|
|
app: {APP_NAME}
|
|
spec:
|
|
imagePullSecrets:
|
|
- name: regcred
|
|
containers:
|
|
- name: app
|
|
image: {image}
|
|
imagePullPolicy: Always
|
|
ports:
|
|
- containerPort: 8000
|
|
resources:
|
|
requests:
|
|
cpu: 50m
|
|
memory: 64Mi
|
|
limits:
|
|
cpu: 500m
|
|
memory: 256Mi
|
|
---
|
|
apiVersion: v1
|
|
kind: Service
|
|
metadata:
|
|
name: {APP_NAME}
|
|
namespace: {ns}
|
|
spec:
|
|
selector:
|
|
app: {APP_NAME}
|
|
ports:
|
|
- port: 80
|
|
targetPort: 8000
|
|
---
|
|
apiVersion: networking.k8s.io/v1
|
|
kind: Ingress
|
|
metadata:
|
|
name: {APP_NAME}
|
|
namespace: {ns}
|
|
annotations:
|
|
traefik.ingress.kubernetes.io/router.tls.certresolver: le
|
|
spec:
|
|
ingressClassName: traefik
|
|
tls:
|
|
- hosts:
|
|
- {host}
|
|
rules:
|
|
- host: {host}
|
|
http:
|
|
paths:
|
|
- path: /
|
|
pathType: Prefix
|
|
backend:
|
|
service:
|
|
name: {APP_NAME}
|
|
port:
|
|
number: 80
|
|
""")
|
|
p = write_temp(manifests)
|
|
kubectl("apply", "-f", str(p))
|
|
p.unlink()
|
|
|
|
# 5. Restart and wait
|
|
print("[5/5] Restarting deployment...")
|
|
kubectl("rollout", "restart", f"deploy/{APP_NAME}", "-n", ns)
|
|
kubectl("rollout", "status", f"deploy/{APP_NAME}", "-n", ns, "--timeout=120s")
|
|
|
|
# Cleanup
|
|
run("rm -rf data/planner.db", check=False)
|
|
|
|
print(f"\n{'='*60}")
|
|
print(f" Preview live: https://{host}")
|
|
print(f"{'='*60}\n")
|
|
|
|
|
|
# ─── Teardown ────────────────────────────────────────────
|
|
|
|
def teardown(pr_id: str):
|
|
ns = f"planner-pr-{pr_id}"
|
|
image = f"{REGISTRY}/{APP_NAME}:pr-{pr_id}"
|
|
|
|
print(f"\n Tearing down: {ns}")
|
|
kubectl("delete", "namespace", ns, "--ignore-not-found")
|
|
docker("rmi", image, check=False)
|
|
print(" Done.\n")
|
|
|
|
|
|
# ─── Deploy Production ───────────────────────────────────
|
|
|
|
def deploy_prod():
|
|
image = f"{REGISTRY}/{APP_NAME}:latest"
|
|
|
|
print(f"\n{'='*60}")
|
|
print(f" Deploying production: https://{BASE_DOMAIN}")
|
|
print(f"{'='*60}\n")
|
|
|
|
docker("build", "-t", image, ".")
|
|
docker("push", image)
|
|
kubectl("rollout", "restart", f"deploy/{APP_NAME}", "-n", PROD_NS)
|
|
kubectl("rollout", "status", f"deploy/{APP_NAME}", "-n", PROD_NS, "--timeout=120s")
|
|
|
|
print(f"\n Production deployed: https://{BASE_DOMAIN}\n")
|
|
|
|
|
|
# ─── Main ────────────────────────────────────────────────
|
|
|
|
if __name__ == "__main__":
|
|
if len(sys.argv) < 2:
|
|
print(__doc__)
|
|
sys.exit(1)
|
|
|
|
action = sys.argv[1]
|
|
if action == "deploy" and len(sys.argv) >= 3:
|
|
deploy(sys.argv[2])
|
|
elif action == "teardown" and len(sys.argv) >= 3:
|
|
teardown(sys.argv[2])
|
|
elif action == "deploy-prod":
|
|
deploy_prod()
|
|
else:
|
|
print(__doc__)
|
|
sys.exit(1)
|