Merge pull request #22 from MealCraft/feature/off_from_main

Feature/off from main
This commit is contained in:
Jun-te Kim 2025-12-28 13:43:15 +00:00 committed by GitHub
commit ee0cc2a768
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
17 changed files with 331 additions and 87 deletions

View file

@ -1,19 +1,16 @@
name: Deploy DB Infrastructure
name: Deploy DEV DB Infrastructure
on:
push:
branches:
- main
- "feature/*"
jobs:
deploy:
runs-on: mealcraft-runners
steps:
- name: Checkout repo
uses: actions/checkout@v4
- uses: actions/checkout@v4
# Install kubectl
- name: Install kubectl
run: |
sudo apt-get update
@ -21,56 +18,30 @@ jobs:
curl -LO "https://dl.k8s.io/release/$(curl -sL https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
sudo install -m 0755 kubectl /usr/local/bin/kubectl
# Configure kubeconfig (ARC in-cluster)
- name: Configure kubeconfig
- name: Configure kubeconfig (in-cluster)
run: |
KUBE_HOST="https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT"
SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)
CA_CERT=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
NAMESPACE=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace)
kubectl config set-cluster microk8s \
--server="$KUBE_HOST" \
--certificate-authority="$CA_CERT"
kubectl config set-credentials runner \
--token="$SA_TOKEN"
kubectl config set-context runner-context \
--cluster=microk8s \
--user=runner \
--namespace="$NAMESPACE"
kubectl config set-cluster microk8s --server="$KUBE_HOST" --certificate-authority="$CA_CERT"
kubectl config set-credentials runner --token="$SA_TOKEN"
kubectl config set-context runner-context --cluster=microk8s --user=runner --namespace="$NAMESPACE"
kubectl config use-context runner-context
# 1⃣ Secrets
- name: Apply DB secrets
run: |
kubectl apply -f db/k8s/secrets/
- name: Apply DEV secrets
run: kubectl apply -f db/k8s/secrets/
# 2⃣ PostgreSQL
- name: Deploy Postgres
run: |
kubectl apply -f db/k8s/postgres/
# 3⃣ Backups (CronJob)
- name: Deploy Postgres backups
run: |
kubectl apply -f db/k8s/backups/
- name: Deploy DEV Postgres
run: kubectl apply -f db/k8s/postgres/postgres-dev-stripe-to-invoice.yaml
migrate:
runs-on: mealcraft-runners
needs: deploy
steps:
- name: Checkout repo
uses: actions/checkout@v4
# Install Atlas
- name: debug
run: |
ls -la
pwd
- uses: actions/checkout@v4
- name: Install kubectl
run: |
@ -79,14 +50,22 @@ jobs:
curl -LO "https://dl.k8s.io/release/$(curl -sL https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
sudo install -m 0755 kubectl /usr/local/bin/kubectl
# Install Atlas
- name: Install Atlas CLI
- name: Configure kubeconfig (in-cluster)
run: |
curl -sSf https://atlasgo.sh | sh
KUBE_HOST="https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT"
SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)
CA_CERT=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
NAMESPACE=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace)
# Load DB creds from Kubernetes secret
- name: Load Postgres credentials
kubectl config set-cluster microk8s --server="$KUBE_HOST" --certificate-authority="$CA_CERT"
kubectl config set-credentials runner --token="$SA_TOKEN"
kubectl config set-context runner-context --cluster=microk8s --user=runner --namespace="$NAMESPACE"
kubectl config use-context runner-context
- name: Install Atlas
run: curl -sSf https://atlasgo.sh | sh
- name: Load DEV DB creds
run: |
export POSTGRES_USER=$(kubectl get secret postgres-secret -o jsonpath='{.data.POSTGRES_USER}' | base64 -d)
export POSTGRES_PASSWORD=$(kubectl get secret postgres-secret -o jsonpath='{.data.POSTGRES_PASSWORD}' | base64 -d)
@ -94,10 +73,8 @@ jobs:
echo "POSTGRES_USER=$POSTGRES_USER" >> $GITHUB_ENV
echo "POSTGRES_PASSWORD=$POSTGRES_PASSWORD" >> $GITHUB_ENV
- name: Run Atlas migrations (dev)
- name: Run Atlas migrations (DEV)
run: |
atlas migrate apply \
--config file://./db/atlas/atlas.hcl \
--env stripe_invoice_dev

View file

@ -0,0 +1,66 @@
name: Deploy PROD DB Infrastructure
on:
push:
branches:
- main
workflow_dispatch:
jobs:
deploy:
runs-on: mealcraft-runners
steps:
- uses: actions/checkout@v4
- name: Install kubectl
run: |
sudo apt-get update
sudo apt-get install -y curl ca-certificates
curl -LO "https://dl.k8s.io/release/$(curl -sL https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
sudo install -m 0755 kubectl /usr/local/bin/kubectl
- name: Configure kubeconfig (in-cluster)
run: |
KUBE_HOST="https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT"
SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)
CA_CERT=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
NAMESPACE=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace)
kubectl config set-cluster microk8s --server="$KUBE_HOST" --certificate-authority="$CA_CERT"
kubectl config set-credentials runner --token="$SA_TOKEN"
kubectl config set-context runner-context --cluster=microk8s --user=runner --namespace="$NAMESPACE"
kubectl config use-context runner-context
- name: Apply PROD secrets
run: kubectl apply -f db/k8s/secrets/
- name: Deploy PROD Postgres
run: kubectl apply -f db/k8s/postgres/
- name: Deploy PROD backups
run: kubectl apply -f db/k8s/backups/
migrate:
runs-on: mealcraft-runners
needs: deploy
steps:
- uses: actions/checkout@v4
- name: Install Atlas
run: curl -sSf https://atlasgo.sh | sh
- name: Load PROD DB creds
run: |
export POSTGRES_USER=$(kubectl get secret postgres-prod-secret -o jsonpath='{.data.POSTGRES_USER}' | base64 -d)
export POSTGRES_PASSWORD=$(kubectl get secret postgres-prod-secret -o jsonpath='{.data.POSTGRES_PASSWORD}' | base64 -d)
echo "POSTGRES_USER=$POSTGRES_USER" >> $GITHUB_ENV
echo "POSTGRES_PASSWORD=$POSTGRES_PASSWORD" >> $GITHUB_ENV
- name: Run Atlas migrations (PROD)
run: |
atlas migrate apply \
--config file://./db/atlas/atlas.hcl \
--env stripe_invoice_prod

View file

@ -19,7 +19,8 @@
"<C-S-e>": false,
"<C-b>": false,
"<C-j>": false,
"<C-S-c>": false
"<C-S-c>": false,
"<C-k>": false
},
// Terminal copy/paste via Ctrl+Shift+C / Ctrl+Shift+V

1
db/.gitignore vendored Normal file
View file

@ -0,0 +1 @@
.env

View file

@ -1,5 +1,5 @@
env "stripe_invoice_dev" {
url = "postgres://${getenv("POSTGRES_USER")}:${getenv("POSTGRES_PASSWORD")}@postgres.default.svc.cluster.local:5432/stripe_invoice?sslmode=disable"
url = "postgres://${getenv("POSTGRES_USER")}:${getenv("POSTGRES_PASSWORD")}@postgres-dev.default.svc.cluster.local:5432/stripe_invoice?sslmode=disable"
migration {
dir = "file://./db/atlas/stripe_invoice/migrations"
@ -7,9 +7,11 @@ env "stripe_invoice_dev" {
}
env "stripe_invoice_prod" {
url = "postgres://${getenv("POSTGRES_USER")}:${getenv("POSTGRES_PASSWORD")}@postgres.default.svc.cluster.local:5432/stripe_invoice_prod?sslmode=disable"
url = "postgres://${getenv("POSTGRES_USER")}:${getenv("POSTGRES_PASSWORD")}@postgres-prod.default.svc.cluster.local:5432/stripe_invoice_prod?sslmode=disable"
migration {
dir = "file://./db/atlas/stripe_invoice/migrations"
}
}

View file

@ -0,0 +1,7 @@
CREATE TABLE login_tokens (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
token TEXT NOT NULL UNIQUE,
expires_at TIMESTAMPTZ NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT now()
);

View file

@ -1,4 +1,5 @@
h1:R/iRQ2a7u+QeDRyEVRvl8TcvAzRjWXbjBpn4dNpyMAA=
h1:ELwFHTBDb63mdRBhmjXMMSpy05pUSVxH03zuUuHYAto=
0001_init.sql h1:gzb02ZbjrrJkXOC+2qIZsngnj7A+29O2/b4awScPlPs=
0002_auth.sql h1:4NhBu26dIBMy9gxMxM3tf6Z2CS2kfKlGjFBj07T/aBw=
0003_stripe_xero.sql h1:E2bcdUDnondsXwbdIwVlZqR4DQwzcoDiyeRFJwVxXwg=
0004_login_tokens.sql h1:rj1KcWu/0znh2YvtI7JV8Z2nwtL5rZzONbPwX1P+/PI=

View file

@ -1,9 +1,10 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: postgres-backup
name: postgres-backup-dev
namespace: default
spec:
schedule: "30 18 * * 5" # weekly on friday at 18:30
schedule: "30 18 * * 5" # weekly Friday 18:30
jobTemplate:
spec:
template:
@ -16,11 +17,47 @@ spec:
- /bin/sh
- -c
- |
pg_dump stripe_invoice \
pg_dump \
-h postgres-dev.default.svc.cluster.local \
-U $POSTGRES_USER \
stripe_invoice \
| gzip \
| aws s3 cp - s3://$S3_BUCKET/stripe_invoice/$(date +%F).sql.gz
| aws s3 cp - s3://$S3_BUCKET/dev/stripe_invoice/$(date +%F).sql.gz
envFrom:
- secretRef:
name: postgres-secret
name: postgres-secret # DEV DB creds
- secretRef:
name: aws-backup-secret
name: aws-backup-secret # shared AWS creds
---
apiVersion: batch/v1
kind: CronJob
metadata:
name: postgres-backup-prod
namespace: default
spec:
schedule: "30 01 * * *" # daily at 01:30 (recommended for prod)
jobTemplate:
spec:
template:
spec:
restartPolicy: OnFailure
containers:
- name: backup
image: postgres:16
command:
- /bin/sh
- -c
- |
pg_dump \
-h postgres-prod.default.svc.cluster.local \
-U $POSTGRES_USER \
stripe_invoice_prod \
| gzip \
| aws s3 cp - s3://$S3_BUCKET/prod/stripe_invoice/$(date +%F).sql.gz
envFrom:
- secretRef:
name: postgres-prod-secret # PROD DB creds
- secretRef:
name: aws-backup-secret # shared AWS creds

View file

@ -0,0 +1,23 @@
apiVersion: batch/v1
kind: Job
metadata:
name: atlas-migrate-dev
spec:
template:
spec:
restartPolicy: Never
containers:
- name: atlas
image: arigaio/atlas:latest
command: ["/atlas"]
args: ["migrate", "apply", "--env", "stripe_invoice_dev"]
envFrom:
- secretRef:
name: postgres-secret
# You can run this:
# kubectl apply -f k8s/migrations/atlas-job.yaml
# Or later from CI.

View file

@ -1,7 +1,7 @@
apiVersion: batch/v1
kind: Job
metadata:
name: atlas-migrate
name: atlas-migrate-dev
spec:
template:
spec:
@ -9,12 +9,14 @@ spec:
containers:
- name: atlas
image: arigaio/atlas:latest
command: ["atlas", "migrate", "apply", "--env", "stripe_invoice"]
command: ["migrate", "apply", "--env", "stripe_invoice_prod"]
envFrom:
- secretRef:
name: postgres-secret
# You can run this:
# kubectl apply -f k8s/migrations/atlas-job.yaml
# Or later from CI.

View file

@ -84,7 +84,7 @@ spec:
apiVersion: v1
kind: Service
metadata:
name: postgres
name: postgres-dev
namespace: default
spec:
type: ClusterIP
@ -93,3 +93,15 @@ spec:
ports:
- port: 5432
targetPort: 5432
---
apiVersion: v1
kind: Secret
metadata:
name: postgres-secret
namespace: default
type: Opaque
stringData:
POSTGRES_USER: stripe_invoice
POSTGRES_PASSWORD: averysecretpasswordPersonAppleWinter938
POSTGRES_DB: stripe_invoice

View file

@ -0,0 +1,111 @@
# --------------------------------------------------
# PersistentVolume (local disk on mist) — PROD
# --------------------------------------------------
apiVersion: v1
kind: PersistentVolume
metadata:
name: postgres-prod-pv
spec:
capacity:
storage: 20Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: local-storage
hostPath:
path: /home/kimjunte/k8s_storage/postgres/stripe_invoice_prod
---
# --------------------------------------------------
# PersistentVolumeClaim — PROD
# --------------------------------------------------
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: postgres-prod-pvc
namespace: default
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Gi
storageClassName: local-storage
---
# --------------------------------------------------
# PostgreSQL Secret — PROD
# (DO NOT COMMIT real values)
# --------------------------------------------------
apiVersion: v1
kind: Secret
metadata:
name: postgres-prod-secret
namespace: default
type: Opaque
stringData:
POSTGRES_USER: stripe_invoice_prod
POSTGRES_PASSWORD: productionPassword1142M@ke!tH@rd2Br3akWith$ymb0ls
POSTGRES_DB: stripe_invoice_prod
---
# --------------------------------------------------
# PostgreSQL Deployment — PROD
# --------------------------------------------------
apiVersion: apps/v1
kind: Deployment
metadata:
name: postgres-prod
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: postgres-prod
template:
metadata:
labels:
app: postgres-prod
spec:
containers:
- name: postgres
image: postgres:16
ports:
- containerPort: 5432
envFrom:
- secretRef:
name: postgres-prod-secret
volumeMounts:
- name: postgres-data
mountPath: /var/lib/postgresql/data
readinessProbe:
tcpSocket:
port: 5432
initialDelaySeconds: 10
periodSeconds: 5
livenessProbe:
tcpSocket:
port: 5432
initialDelaySeconds: 30
periodSeconds: 10
volumes:
- name: postgres-data
persistentVolumeClaim:
claimName: postgres-prod-pvc
---
# --------------------------------------------------
# PostgreSQL Service (cluster-internal only) — PROD
# --------------------------------------------------
apiVersion: v1
kind: Service
metadata:
name: postgres-prod
namespace: default
spec:
type: ClusterIP
selector:
app: postgres-prod
ports:
- port: 5432
targetPort: 5432

View file

@ -0,0 +1,12 @@
# This might be needed for runner permisison.
# Unsure at the momnet
apiVersion: v1
kind: Secret
metadata:
name: postgres-secret
namespace: arc-systems
type: Opaque
stringData:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: averysecretpasswordPersonAppleWinter938
POSTGRES_DB: stripe_invoice

View file

@ -1,21 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: postgres-secret
namespace: arc-systems
type: Opaque
stringData:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: averysecretpasswordPersonAppleWinter938
POSTGRES_DB: stripe_invoice
---
apiVersion: v1
kind: Secret
metadata:
name: postgres-secret
namespace: arc-systems
type: Opaque
stringData:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: averysecretpasswordPersonAppleWinter938
POSTGRES_DB: stripe_invoice

View file

View file

View file

@ -1,4 +1,3 @@
- Engineering management for the rest of us
- Next.js Pages Router Tutorial — <https://nextjs.org/learn/pages-router>
- Next.js Dashboard App Tutorial — <https://nextjs.org/learn/dashboard-app>
- MDN: Using Promises — <https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Using_promises>
@ -17,8 +16,22 @@
- RealPython: Async IO — <https://realpython.com/async-io-python/>
- AWS Documentation — <https://docs.aws.amazon.com/>
- SVG Animations (Book) — <https://www.amazon.com/SVG-Animations-Implementations-Responsive-Animation/dp/1491939702>
- Antifrafile
- Re read pragmatic automator
- Look up TrueNas to help with my storage problems
- https://www.truenas.com/docs/
- Video editing over network?
- DB with k8s back up?
- plex in the future?
- Antifragile
- Re read pragmatic programmer
- unix and linux system adminstration handbook
- pandas text book i bought
- Home assistant docs on dashboard etc
- Engineering management for the rest of us
If I am stuck:
- Go for shower/coldplunge/run/walk
- talk out loud to someone
- have i asked chat gpt
- have i asked google
- have i read a book on it
- can I make touch points any smaller and notice the wins in the struggle?