Add R2 image storage, upload endpoint, and CDN support
All checks were successful
ci/woodpecker/push/deploy Pipeline was successful

- Backend: R2StorageService, upload controller (POST /api/upload)
- Frontend: CDN url helper, NEXT_PUBLIC_CDN_URL build arg
- Deploy: pass R2 secrets from Woodpecker CI to containers via .env
- Docs: update CLAUDE.md with CDN and upload conventions

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-10 02:34:27 +02:00
parent a30fe60414
commit f0f769c5e8
17 changed files with 407 additions and 4 deletions

184
scripts/replicate-db.sh Normal file
View File

@@ -0,0 +1,184 @@
#!/usr/bin/env bash
#
# replicate-db.sh — Nightly prod_db -> dev_db replication with backups
# Runs via cron as user "deploy" at 04:00 Kyiv time
#
set -euo pipefail
# --- Lock (prevent parallel runs) ---
LOCK_FILE="/tmp/replicate-db.lock"
exec 200>"$LOCK_FILE"
flock -n 200 || { echo "[$(date)] Another instance is already running. Exiting."; exit 1; }
# --- Configuration ---
POSTGRES_CONTAINER="postgres"
SUPERUSER="app"
SUPERUSER_PASS="zYWT5JWu3iAbbW7mOyd1"
SOURCE_DB="prod_db"
TARGET_DB="dev_db"
TARGET_USER="dev_user"
DEV_BACKEND_CONTAINER="gb-dev-backend"
DEV_COMPOSE_DIR="/srv/apps/gb-site-dev/deploy"
DEV_COMPOSE_FILE="docker-compose.dev.yml"
BACKUP_DIR="/srv/backups"
BACKUP_RETENTION_DAYS=14
LOG_DIR="/srv/logs"
TIMESTAMP=$(date '+%Y%m%d-%H%M%S')
LOG_FILE="${LOG_DIR}/replicate-db-${TIMESTAMP}.log"
LOG_RETENTION_DAYS=30
CONTAINER_DUMP="/tmp/prod_dump.dump"
# --- Logging ---
mkdir -p "$LOG_DIR" "$BACKUP_DIR"
log() {
local ts
ts=$(date '+%Y-%m-%d %H:%M:%S')
echo "[$ts] $1" | tee -a "$LOG_FILE"
}
log_error() {
local ts
ts=$(date '+%Y-%m-%d %H:%M:%S')
echo "[$ts] ERROR: $1" | tee -a "$LOG_FILE" >&2
}
# --- Cleanup on exit ---
cleanup() {
local exit_code=$?
# Remove dump from container
log "Cleaning up dump file inside container..."
docker exec "$POSTGRES_CONTAINER" rm -f "$CONTAINER_DUMP" 2>/dev/null || true
# Always restart dev backend
if ! docker ps --format '{{.Names}}' | grep -q "^${DEV_BACKEND_CONTAINER}$"; then
log "Restarting dev backend..."
cd "$DEV_COMPOSE_DIR" && docker compose -f "$DEV_COMPOSE_FILE" up -d gb-dev-backend 2>>"$LOG_FILE" || true
fi
# Clean old logs
find "$LOG_DIR" -name "replicate-db-*.log" -mtime +${LOG_RETENTION_DAYS} -delete 2>/dev/null || true
if [ $exit_code -eq 0 ]; then
log "=== Replication completed successfully ==="
else
log_error "=== Replication FAILED with exit code $exit_code ==="
fi
}
trap cleanup EXIT
# ===== MAIN =====
log "=== Starting prod_db -> dev_db replication ==="
# 1. Check postgres container
log "Step 1: Checking postgres container..."
if ! docker ps --format '{{.Names}}' | grep -q "^${POSTGRES_CONTAINER}$"; then
log_error "PostgreSQL container is not running!"
exit 1
fi
# 2. Backup prod_db to host filesystem
log "Step 2: Backing up prod_db..."
BACKUP_FILE="${BACKUP_DIR}/prod_db_${TIMESTAMP}.dump"
docker exec -e PGPASSWORD="$SUPERUSER_PASS" "$POSTGRES_CONTAINER" \
pg_dump -U "$SUPERUSER" -d "$SOURCE_DB" \
--format=custom \
--file=/tmp/prod_backup.dump \
2>>"$LOG_FILE"
docker cp "${POSTGRES_CONTAINER}:/tmp/prod_backup.dump" "$BACKUP_FILE" 2>>"$LOG_FILE"
docker exec "$POSTGRES_CONTAINER" rm -f /tmp/prod_backup.dump 2>/dev/null || true
BACKUP_SIZE=$(du -h "$BACKUP_FILE" | cut -f1)
log "Backup saved: $BACKUP_FILE ($BACKUP_SIZE)"
# 3. Rotate old backups
log "Step 3: Rotating backups older than ${BACKUP_RETENTION_DAYS} days..."
DELETED_COUNT=$(find "$BACKUP_DIR" -name "prod_db_*.dump" -mtime +${BACKUP_RETENTION_DAYS} -delete -print | wc -l)
log "Deleted $DELETED_COUNT old backup(s)."
# 4. Stop dev backend
log "Step 4: Stopping dev backend..."
docker stop "$DEV_BACKEND_CONTAINER" 2>>"$LOG_FILE" || log "Warning: dev backend was already stopped."
sleep 2
# 5. Dump prod_db for replication (inside container)
log "Step 5: Dumping prod_db for replication..."
docker exec -e PGPASSWORD="$SUPERUSER_PASS" "$POSTGRES_CONTAINER" \
pg_dump -U "$SUPERUSER" -d "$SOURCE_DB" \
--format=custom \
--no-owner \
--no-acl \
--file="$CONTAINER_DUMP" \
2>>"$LOG_FILE"
log "Dump completed."
# 6. Terminate connections to dev_db
log "Step 6: Terminating connections to dev_db..."
docker exec -e PGPASSWORD="$SUPERUSER_PASS" "$POSTGRES_CONTAINER" \
psql -U "$SUPERUSER" -d postgres -c \
"SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname = '${TARGET_DB}' AND pid <> pg_backend_pid();" \
>>"$LOG_FILE" 2>&1 || true
# 7. Drop and recreate dev_db
log "Step 7: Recreating dev_db..."
docker exec -e PGPASSWORD="$SUPERUSER_PASS" "$POSTGRES_CONTAINER" \
psql -U "$SUPERUSER" -d postgres -c "DROP DATABASE IF EXISTS ${TARGET_DB};" \
>>"$LOG_FILE" 2>&1
docker exec -e PGPASSWORD="$SUPERUSER_PASS" "$POSTGRES_CONTAINER" \
psql -U "$SUPERUSER" -d postgres -c "CREATE DATABASE ${TARGET_DB} OWNER ${TARGET_USER};" \
>>"$LOG_FILE" 2>&1
log "Database recreated."
# 8. Restore dump into dev_db
log "Step 8: Restoring into dev_db..."
docker exec -e PGPASSWORD="$SUPERUSER_PASS" "$POSTGRES_CONTAINER" \
pg_restore -U "$SUPERUSER" -d "$TARGET_DB" \
--no-owner \
--no-acl \
--role="$TARGET_USER" \
"$CONTAINER_DUMP" \
2>>"$LOG_FILE"
log "Restore completed."
# 9. Grant privileges to dev_user
# Note: --role=dev_user in pg_restore already sets object ownership
log "Step 9: Granting privileges to dev_user..."
docker exec -e PGPASSWORD="$SUPERUSER_PASS" "$POSTGRES_CONTAINER" \
psql -U "$SUPERUSER" -d "$TARGET_DB" -c "
ALTER SCHEMA public OWNER TO ${TARGET_USER};
GRANT ALL PRIVILEGES ON DATABASE ${TARGET_DB} TO ${TARGET_USER};
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO ${TARGET_USER};
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public TO ${TARGET_USER};
GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA public TO ${TARGET_USER};
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON TABLES TO ${TARGET_USER};
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON SEQUENCES TO ${TARGET_USER};
" >>"$LOG_FILE" 2>&1
log "Privileges granted."
# 10. Start dev backend
log "Step 10: Starting dev backend..."
cd "$DEV_COMPOSE_DIR" && docker compose -f "$DEV_COMPOSE_FILE" up -d gb-dev-backend \
2>>"$LOG_FILE"
log "Dev backend started."
# 11. Health check
log "Step 11: Health check..."
sleep 5
HEALTH=$(curl -sf http://127.0.0.1:5200/api/health/ping 2>/dev/null || echo "FAILED")
if [ "$HEALTH" = "FAILED" ]; then
log "Warning: Health check did not respond (backend may still be starting)."
else
log "Health check OK: $HEALTH"
fi
log "=== Replication finished ==="