Merge pull request 'feature/next_project' (#2) from feature/next_project into main
Some checks are pending
Deploy Home Assistant / deploy (push) Waiting to run
Build juntekim.com / Push-to-juntekim-to-docker-hub (push) Waiting to run
Build juntekim.com / run-on-k8s (push) Blocked by required conditions
Deploy n8n / deploy (push) Waiting to run
Build & Deploy stripe-to-invoice (with DB secrets + migrations) / build (push) Waiting to run
Build & Deploy stripe-to-invoice (with DB secrets + migrations) / Deploy Postgres (PV + PVC + Deployment) (push) Blocked by required conditions
Build & Deploy stripe-to-invoice (with DB secrets + migrations) / Apply runtime secrets (push) Blocked by required conditions
Build & Deploy stripe-to-invoice (with DB secrets + migrations) / Run DB migrations (Atlas) (push) Blocked by required conditions
Build & Deploy stripe-to-invoice (with DB secrets + migrations) / deploy (push) Blocked by required conditions
Terraform Apply / Terraform Apply (push) Waiting to run
Terraform Apply / Terraform Apply - SES (push) Blocked by required conditions

Reviewed-on: #2
This commit is contained in:
kimjunte 2026-03-12 08:03:24 +00:00
commit 1488009f7f
10 changed files with 261 additions and 44 deletions

View file

@ -1,12 +1,14 @@
## Forgejo Backup (TODO) ## Forgejo Backup (TODO)
- [ ] Set up restic CronJob to back up forgejo-pvc (/data) to S3 weekly
- Mount forgejo-pvc read-only in CronJob
- Use restic to snapshot to S3 bucket and put it under repos (need: S3_BUCKET, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, RESTIC_PASSWORD)
- Schedule: daily
- [ ] Forgejo postgres backup via databasus same bucket but key would be postgres - [ ] Forgejo postgres backup via databasus same bucket but key would be postgres
- [ ] Test restore from restic snapshot up - [ ] Test restore from restic snapshot up
# Lets deploy databasus first since i'm using it to sort my back up strategy first
## Set up runners
## set up docker image registry
## Migrate everything
# Migrate hyprland # Migrate hyprland

View file

@ -14,3 +14,18 @@ output "forgejo_backup_secret_access_key" {
value = module.forgejo_backup.iam_secret_access_key value = module.forgejo_backup.iam_secret_access_key
sensitive = true sensitive = true
} }
module "databasus_backup" {
source = "./modules/databasus_backup"
bucket_name = "juntekim-databasus-backup"
}
output "databasus_backup_access_key_id" {
value = module.databasus_backup.iam_access_key_id
}
output "databasus_backup_secret_access_key" {
value = module.databasus_backup.iam_secret_access_key
sensitive = true
}

View file

@ -0,0 +1,39 @@
module "bucket" {
source = "../s3_bucket"
bucket_name = var.bucket_name
versioning_enabled = true
retention_days = 90
}
resource "aws_iam_user" "databasus_backup" {
name = "databasus-backup"
}
resource "aws_iam_access_key" "databasus_backup" {
user = aws_iam_user.databasus_backup.name
}
resource "aws_iam_user_policy" "databasus_backup" {
name = "databasus-backup-s3"
user = aws_iam_user.databasus_backup.name
policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Effect = "Allow"
Action = [
"s3:PutObject",
"s3:GetObject",
"s3:DeleteObject",
"s3:ListBucket"
]
Resource = [
module.bucket.bucket_arn,
"${module.bucket.bucket_arn}/*"
]
}
]
})
}

View file

@ -0,0 +1,16 @@
output "bucket_name" {
value = module.bucket.bucket_name
}
output "bucket_arn" {
value = module.bucket.bucket_arn
}
output "iam_access_key_id" {
value = aws_iam_access_key.databasus_backup.id
}
output "iam_secret_access_key" {
value = aws_iam_access_key.databasus_backup.secret
sensitive = true
}

View file

@ -0,0 +1,5 @@
variable "bucket_name" {
description = "Name of the S3 bucket for Databasus backups"
type = string
default = "juntekim-databasus-backup"
}

39
databasus/TODO.md Normal file
View file

@ -0,0 +1,39 @@
# TODO
## Databasus — Migrate to Ceph + AWS Backups
> Files: `databasus/`
### Step 3 — Migrate PVC data (local → ceph)
```bash
# Scale down the app
kubectl scale deployment databasus --replicas=0
# Create new ceph PVC
kubectl apply -f databasus/databasus-storage.yaml
# Run migration job (copies /databasus-data from local PV → ceph PVC)
kubectl apply -f databasus/databasus-migration-job.yaml
kubectl wait --for=condition=complete job/databasus-migration --timeout=120s
# Verify data was copied
kubectl logs job/databasus-migration
```
### Step 4 — Deploy updated app
```bash
kubectl apply -f databasus/databasus-backup-secret.yaml
kubectl apply -f databasus/databasus.yaml
```
### Step 5 — Cleanup old local resources
```bash
kubectl delete pvc databasus-pvc-local
kubectl delete pv databasus-pv
kubectl delete job databasus-migration
```
### Step 6 — Verify
- Check app is running: `kubectl get pods -l app=databasus`
- Check backup sidecar logs: `kubectl logs -l app=databasus -c backup`
- Visit https://databasus.juntekim.com

View file

@ -0,0 +1,9 @@
apiVersion: v1
kind: Secret
metadata:
name: databasus-backup-secret
namespace: default
type: Opaque
stringData:
AWS_ACCESS_KEY_ID: AKIAQL67W6HIVFQBZTFF
AWS_SECRET_ACCESS_KEY: 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

View file

@ -0,0 +1,85 @@
# ================================
# DATABASUS - ONE-TIME MIGRATION JOB
# Copies data from old local PVC → new ceph PVC
#
# Steps:
# 1. Scale down databasus deployment:
# kubectl scale deployment databasus --replicas=0
# 2. Apply databasus-storage.yaml (creates new ceph PVC):
# kubectl apply -f databasus-storage.yaml
# 3. Apply this file:
# kubectl apply -f databasus-migration-job.yaml
# 4. Wait for job to complete:
# kubectl wait --for=condition=complete job/databasus-migration --timeout=120s
# 5. Verify data in new PVC:
# kubectl logs job/databasus-migration
# 6. Apply updated databasus.yaml (uses ceph PVC, drops local PV/PVC):
# kubectl apply -f databasus.yaml
# 7. Delete the old local PV and its PVC:
# kubectl delete pvc databasus-pvc-local
# kubectl delete pv databasus-pv
# 8. Delete this job:
# kubectl delete job databasus-migration
# ================================
---
# Rename the old local PVC binding so both can coexist during migration.
# Since the old PVC is named databasus-pvc, the new ceph one uses the same
# name — so you must delete or rename the old one first, OR use this job
# which mounts the old PV directly via a temporary PVC alias below.
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: databasus-pvc-local
spec:
accessModes:
- ReadWriteOnce
storageClassName: databasus-local-storage
resources:
requests:
storage: 500Mi
# Bind to the existing local PV explicitly
volumeName: databasus-pv
---
apiVersion: batch/v1
kind: Job
metadata:
name: databasus-migration
spec:
template:
spec:
nodeSelector:
kubernetes.io/hostname: mist
restartPolicy: Never
containers:
- name: migrate
image: busybox
command:
- sh
- -c
- |
echo "=== Starting migration ==="
echo "Source contents:"
ls -lah /old-data/
echo ""
echo "Copying data..."
cp -av /old-data/. /new-data/
echo ""
echo "=== Migration complete ==="
echo "New data contents:"
ls -lah /new-data/
volumeMounts:
- name: old-data
mountPath: /old-data
readOnly: true
- name: new-data
mountPath: /new-data
volumes:
- name: old-data
persistentVolumeClaim:
claimName: databasus-pvc-local
- name: new-data
persistentVolumeClaim:
claimName: databasus-pvc

View file

@ -0,0 +1,16 @@
# ================================
# DATABASUS PERSISTENT STORAGE
# Apply once — do NOT delete
# ================================
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: databasus-pvc
spec:
accessModes:
- ReadWriteOnce
storageClassName: rook-ceph-block
resources:
requests:
storage: 500Mi

View file

@ -4,43 +4,6 @@
# Open-source DB backup management UI # Open-source DB backup management UI
# ================================ # ================================
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: databasus-pv
spec:
capacity:
storage: 500Mi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: databasus-local-storage
local:
path: /home/kimjunte/k8s_storage/databasus
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- mist
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: databasus-pvc
spec:
accessModes:
- ReadWriteOnce
storageClassName: databasus-local-storage
resources:
requests:
storage: 500Mi
--- ---
apiVersion: apps/v1 apiVersion: apps/v1
kind: Deployment kind: Deployment
@ -50,6 +13,8 @@ metadata:
app: databasus app: databasus
spec: spec:
replicas: 1 replicas: 1
strategy:
type: Recreate
selector: selector:
matchLabels: matchLabels:
app: databasus app: databasus
@ -58,8 +23,6 @@ spec:
labels: labels:
app: databasus app: databasus
spec: spec:
nodeSelector:
kubernetes.io/hostname: mist
containers: containers:
- name: databasus - name: databasus
image: databasus/databasus:latest image: databasus/databasus:latest
@ -80,6 +43,34 @@ spec:
volumeMounts: volumeMounts:
- name: databasus-data - name: databasus-data
mountPath: /databasus-data mountPath: /databasus-data
- name: backup
image: python:3-alpine
env:
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: databasus-backup-secret
key: AWS_ACCESS_KEY_ID
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: databasus-backup-secret
key: AWS_SECRET_ACCESS_KEY
- name: AWS_DEFAULT_REGION
value: eu-west-2
command:
- /bin/sh
- -c
- |
apk add --no-cache dcron aws-cli
echo "0 2 * * 0 TIMESTAMP=\$(date +\%Y-\%m-\%d) && tar -czf - /databasus-data | aws s3 cp - s3://juntekim-databasus-backup/databasus-backup-\${TIMESTAMP}.tar.gz" | crontab -
crond -f -l 2
volumeMounts:
- name: databasus-data
mountPath: /databasus-data
readOnly: true
volumes: volumes:
- name: databasus-data - name: databasus-data
persistentVolumeClaim: persistentVolumeClaim: