diff --git a/README.md b/README.md index 40278eb..3668597 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,14 @@ ## Forgejo Backup (TODO) -- [ ] Set up restic CronJob to back up forgejo-pvc (/data) to S3 weekly - - Mount forgejo-pvc read-only in CronJob - - Use restic to snapshot to S3 bucket and put it under repos (need: S3_BUCKET, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, RESTIC_PASSWORD) - - Schedule: daily - [ ] Forgejo postgres backup via databasus same bucket but key would be postgres - [ ] Test restore from restic snapshot up -# Lets deploy databasus first since i'm using it to sort my back up strategy first + +## Set up runners + +## set up docker image registry + +## Migrate everything + # Migrate hyprland diff --git a/aws_infra/main.tf b/aws_infra/main.tf index 0f1b428..93ef34c 100644 --- a/aws_infra/main.tf +++ b/aws_infra/main.tf @@ -14,3 +14,18 @@ output "forgejo_backup_secret_access_key" { value = module.forgejo_backup.iam_secret_access_key sensitive = true } + +module "databasus_backup" { + source = "./modules/databasus_backup" + + bucket_name = "juntekim-databasus-backup" +} + +output "databasus_backup_access_key_id" { + value = module.databasus_backup.iam_access_key_id +} + +output "databasus_backup_secret_access_key" { + value = module.databasus_backup.iam_secret_access_key + sensitive = true +} diff --git a/aws_infra/modules/databasus_backup/main.tf b/aws_infra/modules/databasus_backup/main.tf new file mode 100644 index 0000000..46e5fc9 --- /dev/null +++ b/aws_infra/modules/databasus_backup/main.tf @@ -0,0 +1,39 @@ +module "bucket" { + source = "../s3_bucket" + + bucket_name = var.bucket_name + versioning_enabled = true + retention_days = 90 +} + +resource "aws_iam_user" "databasus_backup" { + name = "databasus-backup" +} + +resource "aws_iam_access_key" "databasus_backup" { + user = aws_iam_user.databasus_backup.name +} + +resource "aws_iam_user_policy" "databasus_backup" { + name = "databasus-backup-s3" + user = aws_iam_user.databasus_backup.name + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Action = [ + "s3:PutObject", + "s3:GetObject", + "s3:DeleteObject", + "s3:ListBucket" + ] + Resource = [ + module.bucket.bucket_arn, + "${module.bucket.bucket_arn}/*" + ] + } + ] + }) +} diff --git a/aws_infra/modules/databasus_backup/outputs.tf b/aws_infra/modules/databasus_backup/outputs.tf new file mode 100644 index 0000000..f0bea09 --- /dev/null +++ b/aws_infra/modules/databasus_backup/outputs.tf @@ -0,0 +1,16 @@ +output "bucket_name" { + value = module.bucket.bucket_name +} + +output "bucket_arn" { + value = module.bucket.bucket_arn +} + +output "iam_access_key_id" { + value = aws_iam_access_key.databasus_backup.id +} + +output "iam_secret_access_key" { + value = aws_iam_access_key.databasus_backup.secret + sensitive = true +} diff --git a/aws_infra/modules/databasus_backup/variables.tf b/aws_infra/modules/databasus_backup/variables.tf new file mode 100644 index 0000000..982544f --- /dev/null +++ b/aws_infra/modules/databasus_backup/variables.tf @@ -0,0 +1,5 @@ +variable "bucket_name" { + description = "Name of the S3 bucket for Databasus backups" + type = string + default = "juntekim-databasus-backup" +} diff --git a/databasus/TODO.md b/databasus/TODO.md new file mode 100644 index 0000000..0dc0cd7 --- /dev/null +++ b/databasus/TODO.md @@ -0,0 +1,39 @@ +# TODO + +## Databasus — Migrate to Ceph + AWS Backups + +> Files: `databasus/` + +### Step 3 — Migrate PVC data (local → ceph) +```bash +# Scale down the app +kubectl scale deployment databasus --replicas=0 + +# Create new ceph PVC +kubectl apply -f databasus/databasus-storage.yaml + +# Run migration job (copies /databasus-data from local PV → ceph PVC) +kubectl apply -f databasus/databasus-migration-job.yaml +kubectl wait --for=condition=complete job/databasus-migration --timeout=120s + +# Verify data was copied +kubectl logs job/databasus-migration +``` + +### Step 4 — Deploy updated app +```bash +kubectl apply -f databasus/databasus-backup-secret.yaml +kubectl apply -f databasus/databasus.yaml +``` + +### Step 5 — Cleanup old local resources +```bash +kubectl delete pvc databasus-pvc-local +kubectl delete pv databasus-pv +kubectl delete job databasus-migration +``` + +### Step 6 — Verify +- Check app is running: `kubectl get pods -l app=databasus` +- Check backup sidecar logs: `kubectl logs -l app=databasus -c backup` +- Visit https://databasus.juntekim.com diff --git a/databasus/databasus-backup-secret.yaml b/databasus/databasus-backup-secret.yaml new file mode 100644 index 0000000..ddbc1af --- /dev/null +++ b/databasus/databasus-backup-secret.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Secret +metadata: + name: databasus-backup-secret + namespace: default +type: Opaque +stringData: + AWS_ACCESS_KEY_ID: AKIAQL67W6HIVFQBZTFF + AWS_SECRET_ACCESS_KEY: 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 diff --git a/databasus/databasus-migration-job.yaml b/databasus/databasus-migration-job.yaml new file mode 100644 index 0000000..f95cb69 --- /dev/null +++ b/databasus/databasus-migration-job.yaml @@ -0,0 +1,85 @@ +# ================================ +# DATABASUS - ONE-TIME MIGRATION JOB +# Copies data from old local PVC → new ceph PVC +# +# Steps: +# 1. Scale down databasus deployment: +# kubectl scale deployment databasus --replicas=0 +# 2. Apply databasus-storage.yaml (creates new ceph PVC): +# kubectl apply -f databasus-storage.yaml +# 3. Apply this file: +# kubectl apply -f databasus-migration-job.yaml +# 4. Wait for job to complete: +# kubectl wait --for=condition=complete job/databasus-migration --timeout=120s +# 5. Verify data in new PVC: +# kubectl logs job/databasus-migration +# 6. Apply updated databasus.yaml (uses ceph PVC, drops local PV/PVC): +# kubectl apply -f databasus.yaml +# 7. Delete the old local PV and its PVC: +# kubectl delete pvc databasus-pvc-local +# kubectl delete pv databasus-pv +# 8. Delete this job: +# kubectl delete job databasus-migration +# ================================ + +--- +# Rename the old local PVC binding so both can coexist during migration. +# Since the old PVC is named databasus-pvc, the new ceph one uses the same +# name — so you must delete or rename the old one first, OR use this job +# which mounts the old PV directly via a temporary PVC alias below. + +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: databasus-pvc-local +spec: + accessModes: + - ReadWriteOnce + storageClassName: databasus-local-storage + resources: + requests: + storage: 500Mi + # Bind to the existing local PV explicitly + volumeName: databasus-pv + +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: databasus-migration +spec: + template: + spec: + nodeSelector: + kubernetes.io/hostname: mist + restartPolicy: Never + containers: + - name: migrate + image: busybox + command: + - sh + - -c + - | + echo "=== Starting migration ===" + echo "Source contents:" + ls -lah /old-data/ + echo "" + echo "Copying data..." + cp -av /old-data/. /new-data/ + echo "" + echo "=== Migration complete ===" + echo "New data contents:" + ls -lah /new-data/ + volumeMounts: + - name: old-data + mountPath: /old-data + readOnly: true + - name: new-data + mountPath: /new-data + volumes: + - name: old-data + persistentVolumeClaim: + claimName: databasus-pvc-local + - name: new-data + persistentVolumeClaim: + claimName: databasus-pvc diff --git a/databasus/databasus-storage.yaml b/databasus/databasus-storage.yaml new file mode 100644 index 0000000..8a0ee8e --- /dev/null +++ b/databasus/databasus-storage.yaml @@ -0,0 +1,16 @@ +# ================================ +# DATABASUS PERSISTENT STORAGE +# Apply once — do NOT delete +# ================================ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: databasus-pvc +spec: + accessModes: + - ReadWriteOnce + storageClassName: rook-ceph-block + resources: + requests: + storage: 500Mi diff --git a/databasus/databasus.yaml b/databasus/databasus.yaml index 718f887..cd2776d 100644 --- a/databasus/databasus.yaml +++ b/databasus/databasus.yaml @@ -4,43 +4,6 @@ # Open-source DB backup management UI # ================================ ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - name: databasus-pv -spec: - capacity: - storage: 500Mi - volumeMode: Filesystem - accessModes: - - ReadWriteOnce - persistentVolumeReclaimPolicy: Retain - storageClassName: databasus-local-storage - local: - path: /home/kimjunte/k8s_storage/databasus - nodeAffinity: - required: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - mist - ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: databasus-pvc -spec: - accessModes: - - ReadWriteOnce - storageClassName: databasus-local-storage - resources: - requests: - storage: 500Mi - --- apiVersion: apps/v1 kind: Deployment @@ -50,6 +13,8 @@ metadata: app: databasus spec: replicas: 1 + strategy: + type: Recreate selector: matchLabels: app: databasus @@ -58,8 +23,6 @@ spec: labels: app: databasus spec: - nodeSelector: - kubernetes.io/hostname: mist containers: - name: databasus image: databasus/databasus:latest @@ -80,6 +43,34 @@ spec: volumeMounts: - name: databasus-data mountPath: /databasus-data + + - name: backup + image: python:3-alpine + env: + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: databasus-backup-secret + key: AWS_ACCESS_KEY_ID + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: databasus-backup-secret + key: AWS_SECRET_ACCESS_KEY + - name: AWS_DEFAULT_REGION + value: eu-west-2 + command: + - /bin/sh + - -c + - | + apk add --no-cache dcron aws-cli + echo "0 2 * * 0 TIMESTAMP=\$(date +\%Y-\%m-\%d) && tar -czf - /databasus-data | aws s3 cp - s3://juntekim-databasus-backup/databasus-backup-\${TIMESTAMP}.tar.gz" | crontab - + crond -f -l 2 + volumeMounts: + - name: databasus-data + mountPath: /databasus-data + readOnly: true + volumes: - name: databasus-data persistentVolumeClaim: