Refactor to Vue 3 + FastAPI + SQLite architecture
Some checks failed
Test / build-check (push) Successful in 3s
PR Preview / test (pull_request) Successful in 3s
PR Preview / teardown-preview (pull_request) Has been skipped
Test / e2e-test (push) Failing after 55s
PR Preview / deploy-preview (pull_request) Failing after 40s

- Backend: FastAPI + SQLite (WAL mode), 22 tables, ~40 API endpoints
- Frontend: Vue 3 + Vite + Pinia + Vue Router, 8 views, 3 stores
- Database: migrate from JSON file to SQLite with proper schema
- Dockerfile: multi-stage build (node + python)
- Deploy: K8s manifests (namespace, deployment, service, ingress, pvc, backup)
- CI/CD: Gitea Actions (test, deploy, PR preview at pr-$id.planner.oci.euphon.net)
- Tests: 20 Cypress E2E test files, 196 test cases, ~85% coverage
- Doc: test-coverage.md with full feature coverage report

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
2026-04-07 21:18:22 +00:00
parent b09cefad34
commit d3f3b4f37b
67 changed files with 10038 additions and 6 deletions

View File

@@ -0,0 +1,50 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: planner-backup-minio
namespace: planner
spec:
schedule: "0 */6 * * *"
successfulJobsHistoryLimit: 3
failedJobsHistoryLimit: 3
jobTemplate:
spec:
template:
spec:
containers:
- name: backup
image: python:3.12-alpine
command:
- /bin/sh
- -c
- |
apk add --no-cache curl sqlite >/dev/null 2>&1
curl -sL https://dl.min.io/client/mc/release/linux-arm64/mc -o /usr/local/bin/mc
chmod +x /usr/local/bin/mc
mc alias set s3 http://minio.minio.svc:9000 admin HpYMIVH0WN79VkzF4L4z8Zx1
TS=$(date +%Y%m%d_%H%M%S)
# SQLite safe backup
sqlite3 /data/planner.db ".backup /tmp/planner_${TS}.db"
mc cp "/tmp/planner_${TS}.db" "s3/planner-backups/planner_${TS}.db"
# Keep only last 60 backups
mc ls s3/planner-backups/ --json | python3 -c "
import sys, json
files = []
for line in sys.stdin:
d = json.loads(line)
if d.get('key','').startswith('planner_'):
files.append(d['key'])
files.sort()
for f in files[:-60]:
print(f)
" | while read f; do mc rm "s3/planner-backups/$f"; done
echo "Backup done: ${TS}"
volumeMounts:
- name: data
mountPath: /data
readOnly: true
volumes:
- name: data
persistentVolumeClaim:
claimName: planner-data
restartPolicy: OnFailure

47
deploy/deployment.yaml Normal file
View File

@@ -0,0 +1,47 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: planner
namespace: planner
spec:
replicas: 1
selector:
matchLabels:
app: planner
template:
metadata:
labels:
app: planner
spec:
containers:
- name: planner
image: planner:latest
ports:
- containerPort: 8000
env:
- name: DB_PATH
value: /data/planner.db
- name: FRONTEND_DIR
value: /app/frontend
- name: DATA_DIR
value: /data
volumeMounts:
- name: data
mountPath: /data
resources:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "256Mi"
cpu: "500m"
readinessProbe:
httpGet:
path: /api/backups
port: 8000
initialDelaySeconds: 5
periodSeconds: 10
volumes:
- name: data
persistentVolumeClaim:
claimName: planner-data

23
deploy/ingress.yaml Normal file
View File

@@ -0,0 +1,23 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: planner
namespace: planner
annotations:
traefik.ingress.kubernetes.io/router.tls.certresolver: le
spec:
ingressClassName: traefik
rules:
- host: planner.oci.euphon.net
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: planner
port:
number: 80
tls:
- hosts:
- planner.oci.euphon.net

4
deploy/namespace.yaml Normal file
View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: planner

11
deploy/pvc.yaml Normal file
View File

@@ -0,0 +1,11 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: planner-data
namespace: planner
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi

11
deploy/service.yaml Normal file
View File

@@ -0,0 +1,11 @@
apiVersion: v1
kind: Service
metadata:
name: planner
namespace: planner
spec:
selector:
app: planner
ports:
- port: 80
targetPort: 8000

View File

@@ -0,0 +1,82 @@
#!/bin/bash
# Creates a restricted kubeconfig for the planner namespace only.
# Run on the k8s server as a user with cluster-admin access.
set -e
NAMESPACE=planner
SA_NAME=planner-deployer
echo "Creating ServiceAccount, Role, and RoleBinding..."
kubectl apply -f - <<EOF
apiVersion: v1
kind: ServiceAccount
metadata:
name: ${SA_NAME}
namespace: ${NAMESPACE}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: ${SA_NAME}-role
namespace: ${NAMESPACE}
rules:
- apiGroups: ["", "apps", "networking.k8s.io"]
resources: ["pods", "services", "deployments", "replicasets", "ingresses", "persistentvolumeclaims", "configmaps", "secrets", "pods/log"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: ${SA_NAME}-binding
namespace: ${NAMESPACE}
subjects:
- kind: ServiceAccount
name: ${SA_NAME}
namespace: ${NAMESPACE}
roleRef:
kind: Role
name: ${SA_NAME}-role
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: Secret
metadata:
name: ${SA_NAME}-token
namespace: ${NAMESPACE}
annotations:
kubernetes.io/service-account.name: ${SA_NAME}
type: kubernetes.io/service-account-token
EOF
echo "Waiting for token..."
sleep 3
# Get cluster info
CLUSTER_SERVER=$(kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}')
CLUSTER_CA=$(kubectl config view --raw --minify -o jsonpath='{.clusters[0].cluster.certificate-authority-data}')
TOKEN=$(kubectl get secret ${SA_NAME}-token -n ${NAMESPACE} -o jsonpath='{.data.token}' | base64 -d)
cat > kubeconfig <<EOF
apiVersion: v1
kind: Config
clusters:
- cluster:
certificate-authority-data: ${CLUSTER_CA}
server: ${CLUSTER_SERVER}
name: planner
contexts:
- context:
cluster: planner
namespace: ${NAMESPACE}
user: ${SA_NAME}
name: planner
current-context: planner
users:
- name: ${SA_NAME}
user:
token: ${TOKEN}
EOF
echo "Kubeconfig written to ./kubeconfig"
echo "Test with: KUBECONFIG=./kubeconfig kubectl get pods"