back up setup

This commit is contained in:
Jun-te Kim 2026-02-15 23:14:46 +00:00
parent e544b098f8
commit 160e02f03f
3 changed files with 77 additions and 234 deletions

View file

@ -1,56 +0,0 @@
name: Weekly K8s Storage Backup
on:
schedule:
# Sunday 02:30 UTC (quiet time, predictable)
- cron: "30 2 * * 0"
workflow_dispatch:
jobs:
backup:
name: Backup /k8s_storage → S3
runs-on: mealcraft-runners
timeout-minutes: 180
steps:
- name: Checkout repo
uses: actions/checkout@v4
# Install kubectl
- name: Install kubectl
run: |
sudo apt-get update
sudo apt-get install -y curl ca-certificates
curl -LO "https://dl.k8s.io/release/$(curl -sL https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
sudo install -m 0755 kubectl /usr/local/bin/kubectl
- name: Sanity check mount
run: |
echo "Listing /k8s_storage:"
ls -lah /k8s_storage
ls -la
- name: Install AWS CLI (user-local)
run: |
if ! command -v aws >/dev/null 2>&1; then
echo "Installing AWS CLI locally..."
curl -s https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip -o awscliv2.zip
unzip -q awscliv2.zip
./aws/install \
--install-dir "$HOME/.aws-cli" \
--bin-dir "$HOME/bin"
echo "$HOME/bin" >> $GITHUB_PATH
fi
- name: Verify AWS identity
run: |
aws sts get-caller-identity
- name: Run K8s backup for DEV and PROD
run: bash mist_infra/scripts/backup_k8s_storage_to_s3.sh
# example of restoring a back up
# aws s3 cp s3://mist-backups/2025-03-09/k8s_storage_mist_2025-03-09_02-30-01.tar.gz .
# sudo tar -xzf k8s_storage_*.tar.gz -C /home/kimjunte/k8s_storage

View file

@ -1,5 +1,7 @@
# ================================ # ================================
# databasus - ALL IN ONE # DATABASUS - ALL IN ONE
# https://databasus.com
# Open-source DB backup management UI
# ================================ # ================================
--- ---
@ -12,7 +14,7 @@ spec:
storage: 500Mi storage: 500Mi
volumeMode: Filesystem volumeMode: Filesystem
accessModes: accessModes:
- ReadWriteMany - ReadWriteOnce
persistentVolumeReclaimPolicy: Retain persistentVolumeReclaimPolicy: Retain
storageClassName: databasus-local-storage storageClassName: databasus-local-storage
local: local:
@ -33,11 +35,11 @@ metadata:
name: databasus-pvc name: databasus-pvc
spec: spec:
accessModes: accessModes:
- ReadWriteMany - ReadWriteOnce
storageClassName: databasus-local-storage storageClassName: databasus-local-storage
resources: resources:
requests: requests:
storage: 1Gi storage: 500Mi
--- ---
apiVersion: apps/v1 apiVersion: apps/v1
@ -62,7 +64,7 @@ spec:
- name: databasus - name: databasus
image: databasus/databasus:latest image: databasus/databasus:latest
ports: ports:
- containerPort: 3000 - containerPort: 4005
env: env:
- name: NODE_ENV - name: NODE_ENV
value: "production" value: "production"
@ -77,7 +79,7 @@ spec:
memory: "512Mi" memory: "512Mi"
volumeMounts: volumeMounts:
- name: databasus-data - name: databasus-data
mountPath: /app/data mountPath: /databasus-data
volumes: volumes:
- name: databasus-data - name: databasus-data
persistentVolumeClaim: persistentVolumeClaim:
@ -92,8 +94,8 @@ spec:
selector: selector:
app: databasus app: databasus
ports: ports:
- port: 3000 - port: 4005
targetPort: 3000 targetPort: 4005
--- ---
apiVersion: traefik.io/v1alpha1 apiVersion: traefik.io/v1alpha1
@ -108,7 +110,7 @@ spec:
kind: Rule kind: Rule
services: services:
- name: databasus - name: databasus
port: 3000 port: 4005
tls: tls:
certResolver: myresolver certResolver: myresolver
domains: domains:

View file

@ -2,182 +2,79 @@
set -euo pipefail set -euo pipefail
# ================================================== # ==================================================
# GLOBAL CONFIG # CONFIG
# ================================================== # ==================================================
K8S_STORAGE_ROOT="/k8s_storage" K8S_STORAGE_ROOT="/home/kimjunte/k8s_storage"
BACKUP_ROOT="/tmp/k8s-backups" BACKUP_ROOT="/tmp/k8s-backups"
S3_BUCKET="s3://mist-backups"
# NEVER touch raw Postgres data AWS_PROFILE_NAME="personal"
TAR_EXCLUDES=( AWS_CONFIG_FILE="/home/kimjunte/.aws/config"
"$K8S_STORAGE_ROOT/postgres" AWS_SHARED_CREDENTIALS_FILE="/home/kimjunte/.aws/credentials"
"$K8S_STORAGE_ROOT/lost+found"
)
# ================================================== export AWS_PROFILE="$AWS_PROFILE_NAME"
# BACKUP FUNCTION export AWS_CONFIG_FILE
# ================================================== export AWS_SHARED_CREDENTIALS_FILE
run_backup() {
local ENVIRONMENT="$1"
local DATE="$(date -u +%Y-%m-%d_%H-%M-%S)"
local BACKUP_DIR="$BACKUP_ROOT/$DATE"
mkdir -p "$BACKUP_DIR" DATE="$(date -u +%Y-%m-%d_%H-%M-%S)"
BACKUP_DIR="$BACKUP_ROOT/$DATE"
ARCHIVE_NAME="k8s_storage_$DATE.tar.gz"
ARCHIVE_PATH="$BACKUP_DIR/$ARCHIVE_NAME"
# ================================================== mkdir -p "$BACKUP_DIR"
# ENVIRONMENT SWITCH
# ==================================================
case "$ENVIRONMENT" in
dev)
PG_SECRET_NAME="postgres-dev"
PG_POD_SELECTOR="app=postgres"
S3_PREFIX="dev"
NAMESPACE="dev"
;;
prod)
if [[ "${I_UNDERSTAND_THIS_IS_PROD:-}" != "true" ]]; then
echo "❌ Refusing to run PROD backup without confirmation"
echo " Re-run with: I_UNDERSTAND_THIS_IS_PROD=true"
return 1
fi
PG_SECRET_NAME="postgres-prod"
PG_POD_SELECTOR="app=postgres"
S3_PREFIX="prod"
NAMESPACE="default"
;;
*)
echo "❌ Invalid ENVIRONMENT: $ENVIRONMENT (must be dev or prod)"
return 1
;;
esac
echo ""
echo "=== Backup started ($(date -u)) ==="
echo "Environment: $ENVIRONMENT"
echo "Namespace: $NAMESPACE"
# ==================================================
# LOCATE POSTGRES POD
# ==================================================
POSTGRES_POD=$(kubectl get pods \
-n "$NAMESPACE" \
-l "$PG_POD_SELECTOR" \
-o jsonpath='{.items[0].metadata.name}')
if [[ -z "$POSTGRES_POD" ]]; then
echo "❌ No Postgres pod found for selector: $PG_POD_SELECTOR"
kubectl get pods -n "$NAMESPACE"
return 1
fi
echo "Using Postgres pod: $POSTGRES_POD"
# ==================================================
# READ DATABASE_URL FROM SECRET
# ==================================================
DATABASE_URL=$(kubectl get secret "$PG_SECRET_NAME" \
-n "$NAMESPACE" \
-o jsonpath='{.data.DATABASE_URL}' | base64 -d)
if [[ -z "$DATABASE_URL" ]]; then
echo "❌ DATABASE_URL missing in secret $PG_SECRET_NAME"
return 1
fi
# Parse DATABASE_URL
POSTGRES_USER="$(echo "$DATABASE_URL" | sed -E 's|.*://([^:]+):.*|\1|')"
POSTGRES_DB="$(echo "$DATABASE_URL" | sed -E 's|.*/([^?]+).*|\1|')"
if [[ -z "$POSTGRES_USER" || -z "$POSTGRES_DB" ]]; then
echo "❌ Failed to parse DATABASE_URL"
return 1
fi
echo "Dumping database: $POSTGRES_DB (user: $POSTGRES_USER)"
# ==================================================
# POSTGRES LOGICAL DUMP (SAFE)
# ==================================================
kubectl exec -n "$NAMESPACE" "$POSTGRES_POD" -- \
pg_dump "$POSTGRES_DB" \
> "$BACKUP_DIR/postgres.sql"
echo "✔ pg_dump complete ($(du -h "$BACKUP_DIR/postgres.sql" | cut -f1))"
# ==================================================
# NORMALISE PERMISSIONS (EXCLUDING POSTGRES)
# ==================================================
echo "Normalising permissions (excluding Postgres data)..."
sudo find "$K8S_STORAGE_ROOT" \
-mindepth 1 \
-maxdepth 1 \
! -name postgres \
-exec chmod -R a+rX {} \; || true
# ==================================================
# ARCHIVE K8S STORAGE (SAFE)
# ==================================================
TAR_EXCLUDE_ARGS=()
for path in "${TAR_EXCLUDES[@]}"; do
TAR_EXCLUDE_ARGS+=(--exclude="$path")
done
tar \
--ignore-failed-read \
--warning=no-file-changed \
-czf "$BACKUP_DIR/k8s_storage_$DATE.tar.gz" \
"${TAR_EXCLUDE_ARGS[@]}" \
"$K8S_STORAGE_ROOT"
echo "✔ k8s_storage archived ($(du -h "$BACKUP_DIR/k8s_storage_$DATE.tar.gz" | cut -f1))"
# ==================================================
# UPLOAD TO S3
# ==================================================
S3_BUCKET="s3://mist-backups/$S3_PREFIX/$DATE"
aws s3 cp "$BACKUP_DIR" "$S3_BUCKET" --recursive
echo "✔ Uploaded to $S3_BUCKET"
# ==================================================
# RESTORE GUIDE
# ==================================================
echo ""
echo "========================================"
echo "=== RESTORE GUIDE ($ENVIRONMENT)"
echo "========================================"
echo ""
echo "Restore volumes:"
echo " sudo tar -xzf k8s_storage_$DATE.tar.gz -C /"
echo ""
echo "Restore Postgres:"
echo " kubectl exec -n $NAMESPACE -i $POSTGRES_POD -- \\"
echo " psql $POSTGRES_DB < postgres.sql"
echo ""
echo "=== Backup completed successfully ==="
}
# ==================================================
# RUN BACKUPS FOR BOTH DEV AND PROD
# ==================================================
echo "Starting backup process for dev and prod environments..."
FAILED=0
# Backup dev
run_backup "dev" || FAILED=$((FAILED + 1))
# Backup prod (requires explicit confirmation)
I_UNDERSTAND_THIS_IS_PROD=true run_backup "prod" || FAILED=$((FAILED + 1))
echo "" echo ""
echo "========================================" echo "========================================"
if [[ $FAILED -eq 0 ]]; then echo "=== K8S STORAGE BACKUP STARTED"
echo "✅ All backups completed successfully" echo "=== $(date -u)"
else
echo "⚠️ $FAILED backup(s) failed"
exit 1
fi
echo "========================================" echo "========================================"
echo ""
# ==================================================
# VERIFY AWS IDENTITY
# ==================================================
echo "Using AWS profile: $AWS_PROFILE"
aws sts get-caller-identity
# ==================================================
# CREATE ARCHIVE (EXCLUDE POSTGRES)
# ==================================================
echo "Creating archive..."
sudo tar \
--ignore-failed-read \
--warning=no-file-changed \
--exclude="$K8S_STORAGE_ROOT/postgres" \
-czf "$ARCHIVE_PATH" \
-C /home/kimjunte \
k8s_storage
echo "✔ Archive created:"
du -h "$ARCHIVE_PATH"
# ==================================================
# GENERATE CHECKSUM
# ==================================================
sha256sum "$ARCHIVE_PATH" > "$ARCHIVE_PATH.sha256"
echo "✔ Checksum generated"
# ==================================================
# UPLOAD TO S3
# ==================================================
echo "Uploading to S3..."
aws s3 cp "$ARCHIVE_PATH" "$S3_BUCKET/$DATE/"
aws s3 cp "$ARCHIVE_PATH.sha256" "$S3_BUCKET/$DATE/"
echo "✔ Uploaded to $S3_BUCKET/$DATE/"
# ==================================================
# CLEAN LOCAL TEMP FILES (optional but recommended)
# ==================================================
rm -rf "$BACKUP_DIR"
echo ""
echo "========================================"
echo "=== BACKUP COMPLETED SUCCESSFULLY"
echo "========================================"
echo ""