diff --git a/.woodpecker/deploy.yml b/.woodpecker/deploy.yml index 0c204a0..9d525ec 100644 --- a/.woodpecker/deploy.yml +++ b/.woodpecker/deploy.yml @@ -7,13 +7,30 @@ steps: image: appleboy/drone-ssh when: branch: main + environment: + R2_ACCOUNT_ID: + from_secret: r2_account_id + R2_ACCESS_KEY_ID: + from_secret: r2_access_key_id + R2_SECRET_ACCESS_KEY: + from_secret: r2_secret_access_key settings: host: 31.131.18.254 username: deploy key: from_secret: ssh_key + envs: + - R2_ACCOUNT_ID + - R2_ACCESS_KEY_ID + - R2_SECRET_ACCESS_KEY script: - cd /srv/apps/gb-site && git pull origin main + - | + cat > /srv/apps/gb-site/deploy/.env << EOF + R2_ACCOUNT_ID=$R2_ACCOUNT_ID + R2_ACCESS_KEY_ID=$R2_ACCESS_KEY_ID + R2_SECRET_ACCESS_KEY=$R2_SECRET_ACCESS_KEY + EOF - cd /srv/apps/gb-site/deploy && docker compose -f docker-compose.prod.yml build --no-cache - cd /srv/apps/gb-site/deploy && docker compose -f docker-compose.prod.yml up -d @@ -21,12 +38,29 @@ steps: image: appleboy/drone-ssh when: branch: dev + environment: + R2_ACCOUNT_ID: + from_secret: r2_account_id + R2_ACCESS_KEY_ID: + from_secret: r2_access_key_id + R2_SECRET_ACCESS_KEY: + from_secret: r2_secret_access_key settings: host: 31.131.18.254 username: deploy key: from_secret: ssh_key + envs: + - R2_ACCOUNT_ID + - R2_ACCESS_KEY_ID + - R2_SECRET_ACCESS_KEY script: - - cd /srv/apps/gb-site && git pull origin dev - - cd /srv/apps/gb-site/deploy && docker compose -f docker-compose.dev.yml build --no-cache - - cd /srv/apps/gb-site/deploy && docker compose -f docker-compose.dev.yml up -d + - cd /srv/apps/gb-site-dev && git pull origin dev + - | + cat > /srv/apps/gb-site-dev/deploy/.env << EOF + R2_ACCOUNT_ID=$R2_ACCOUNT_ID + R2_ACCESS_KEY_ID=$R2_ACCESS_KEY_ID + R2_SECRET_ACCESS_KEY=$R2_SECRET_ACCESS_KEY + EOF + - cd /srv/apps/gb-site-dev/deploy && docker compose -f docker-compose.dev.yml build --no-cache + - cd /srv/apps/gb-site-dev/deploy && docker compose -f docker-compose.dev.yml up -d diff --git a/CLAUDE.md b/CLAUDE.md index de442c4..278a8ab 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -130,6 +130,7 @@ cd backend && dotnet build - Controllers use attribute routing: `[Route("api/[controller]")]` - Health check: `GET /api/health` — returns `{ status, database, environment }` - Health ping: `GET /api/health/ping` — returns `{ status: "pong" }` +- Upload: `POST /api/upload` — multipart form, fields: `file` (required), `path` (optional folder prefix) ## Frontend Conventions @@ -168,6 +169,20 @@ Host=postgres;Port=5432;Database={db};Username={user};Password={pass} Configured via env var `ConnectionStrings__Default` in docker-compose files. Locally via `appsettings.Local.json` with SSH tunnel (`localhost:5433`). +## CDN & Image Storage + +- **Storage:** Cloudflare R2 (S3-compatible) +- **CDN domain:** https://cdn.goodbrick.com.ua +- **Bucket:** goodbrick +- **Frontend:** `NEXT_PUBLIC_CDN_URL` env var, `cdnUrl()` helper in `src/lib/cdn.ts` +- **Backend:** `R2__*` env vars, `R2StorageService` for uploads +- **Upload endpoint:** `POST /api/upload` — multipart/form-data, max 10MB, images only (jpeg/png/webp/avif) + +### Folder Convention +- `products//main.jpg` — product photos +- `catalog//cover.jpg` — collection covers +- `site/.jpg` — site-wide images (hero, about, etc.) + ## Key Rules - Never commit `.env` files or secrets — credentials are in docker-compose env vars on server diff --git a/backend/src/GBSite.Api/Configuration/R2Settings.cs b/backend/src/GBSite.Api/Configuration/R2Settings.cs new file mode 100644 index 0000000..342178c --- /dev/null +++ b/backend/src/GBSite.Api/Configuration/R2Settings.cs @@ -0,0 +1,14 @@ +namespace GBSite.Api.Configuration; + +public class R2Settings +{ + public const string SectionName = "R2"; + + public required string AccountId { get; set; } + public required string AccessKeyId { get; set; } + public required string SecretAccessKey { get; set; } + public required string BucketName { get; set; } + public string PublicUrl { get; set; } = "https://cdn.goodbrick.com.ua"; + + public string ServiceUrl => $"https://{AccountId}.r2.cloudflarestorage.com"; +} diff --git a/backend/src/GBSite.Api/Controllers/UploadController.cs b/backend/src/GBSite.Api/Controllers/UploadController.cs new file mode 100644 index 0000000..8674b94 --- /dev/null +++ b/backend/src/GBSite.Api/Controllers/UploadController.cs @@ -0,0 +1,44 @@ +using GBSite.Api.Services; +using Microsoft.AspNetCore.Mvc; + +namespace GBSite.Api.Controllers; + +[ApiController] +[Route("api/[controller]")] +public class UploadController : ControllerBase +{ + private readonly R2StorageService? _storage; + + public UploadController(R2StorageService? storage = null) + { + _storage = storage; + } + + [HttpPost] + [RequestSizeLimit(10 * 1024 * 1024)] + public async Task Upload( + IFormFile file, + [FromForm] string? path, + CancellationToken ct) + { + if (_storage is null) + return StatusCode(503, new { error = "Storage service not configured" }); + + if (file.Length == 0) + return BadRequest(new { error = "File is empty" }); + + var allowedTypes = new[] { "image/jpeg", "image/png", "image/webp", "image/avif" }; + if (!allowedTypes.Contains(file.ContentType)) + return BadRequest(new { error = $"File type '{file.ContentType}' not allowed" }); + + var fileName = Path.GetFileName(file.FileName); + var key = string.IsNullOrEmpty(path) + ? fileName + : $"{path.Trim('/')}/{fileName}"; + + await using var stream = file.OpenReadStream(); + var cdnUrl = await _storage.UploadAsync(stream, key, file.ContentType, ct); + + return Ok(new { url = cdnUrl, key }); + } +} diff --git a/backend/src/GBSite.Api/GBSite.Api.csproj b/backend/src/GBSite.Api/GBSite.Api.csproj index 8a30b20..93c2a8d 100644 --- a/backend/src/GBSite.Api/GBSite.Api.csproj +++ b/backend/src/GBSite.Api/GBSite.Api.csproj @@ -4,9 +4,11 @@ net9.0 enable enable + 805cad54-8a19-4713-b893-a1ec63696146 + diff --git a/backend/src/GBSite.Api/Program.cs b/backend/src/GBSite.Api/Program.cs index 31ce2d3..f047e1d 100644 --- a/backend/src/GBSite.Api/Program.cs +++ b/backend/src/GBSite.Api/Program.cs @@ -1,9 +1,32 @@ +using Amazon.S3; +using GBSite.Api.Configuration; +using GBSite.Api.Services; + var builder = WebApplication.CreateBuilder(args); builder.Configuration.AddJsonFile("appsettings.Local.json", optional: true, reloadOnChange: true); builder.Services.AddControllers(); builder.Services.AddOpenApi(); +// R2 Storage +var r2Section = builder.Configuration.GetSection(R2Settings.SectionName); +builder.Services.Configure(r2Section); + +var r2Settings = r2Section.Get(); +if (r2Settings is not null && !string.IsNullOrEmpty(r2Settings.AccountId)) +{ + builder.Services.AddSingleton(_ => + { + var config = new AmazonS3Config + { + ServiceURL = r2Settings.ServiceUrl, + ForcePathStyle = true + }; + return new AmazonS3Client(r2Settings.AccessKeyId, r2Settings.SecretAccessKey, config); + }); + builder.Services.AddSingleton(); +} + var app = builder.Build(); if (app.Environment.IsDevelopment()) diff --git a/backend/src/GBSite.Api/Services/R2StorageService.cs b/backend/src/GBSite.Api/Services/R2StorageService.cs new file mode 100644 index 0000000..35f5751 --- /dev/null +++ b/backend/src/GBSite.Api/Services/R2StorageService.cs @@ -0,0 +1,43 @@ +using Amazon.S3; +using Amazon.S3.Model; +using GBSite.Api.Configuration; +using Microsoft.Extensions.Options; + +namespace GBSite.Api.Services; + +public class R2StorageService +{ + private readonly IAmazonS3 _s3; + private readonly R2Settings _settings; + + public R2StorageService(IAmazonS3 s3, IOptions settings) + { + _s3 = s3; + _settings = settings.Value; + } + + public async Task UploadAsync( + Stream stream, + string key, + string contentType, + CancellationToken ct = default) + { + var request = new PutObjectRequest + { + BucketName = _settings.BucketName, + Key = key, + InputStream = stream, + ContentType = contentType, + DisablePayloadSigning = true + }; + + await _s3.PutObjectAsync(request, ct); + + return $"{_settings.PublicUrl.TrimEnd('/')}/{key}"; + } + + public async Task DeleteAsync(string key, CancellationToken ct = default) + { + await _s3.DeleteObjectAsync(_settings.BucketName, key, ct); + } +} diff --git a/backend/src/GBSite.Api/appsettings.json b/backend/src/GBSite.Api/appsettings.json index 10f68b8..95dae36 100644 --- a/backend/src/GBSite.Api/appsettings.json +++ b/backend/src/GBSite.Api/appsettings.json @@ -5,5 +5,12 @@ "Microsoft.AspNetCore": "Warning" } }, - "AllowedHosts": "*" + "AllowedHosts": "*", + "R2": { + "AccountId": "", + "AccessKeyId": "", + "SecretAccessKey": "", + "BucketName": "", + "PublicUrl": "https://cdn.goodbrick.com.ua" + } } diff --git a/deploy/docker-compose.dev.yml b/deploy/docker-compose.dev.yml index 3103d0f..0b5a67a 100644 --- a/deploy/docker-compose.dev.yml +++ b/deploy/docker-compose.dev.yml @@ -5,6 +5,7 @@ services: args: - NEXT_PUBLIC_POSTHOG_KEY=phc_pe0mP58n724h9eFxanbGIUsfMyS14gnAmr5tYez9V3Q - NEXT_PUBLIC_POSTHOG_HOST=https://eu.i.posthog.com + - NEXT_PUBLIC_CDN_URL=https://cdn.goodbrick.com.ua container_name: gb-dev-frontend restart: unless-stopped ports: @@ -24,6 +25,11 @@ services: environment: - ASPNETCORE_ENVIRONMENT=Development - ConnectionStrings__Default=Host=postgres;Port=5432;Database=dev_db;Username=dev_user;Password=dev_pass_vB6nM3qP8yW2rT9k + - R2__AccountId=${R2_ACCOUNT_ID} + - R2__AccessKeyId=${R2_ACCESS_KEY_ID} + - R2__SecretAccessKey=${R2_SECRET_ACCESS_KEY} + - R2__BucketName=${R2_BUCKET_NAME:-goodbrick} + - R2__PublicUrl=https://cdn.goodbrick.com.ua networks: - app-network diff --git a/deploy/docker-compose.local.yml b/deploy/docker-compose.local.yml index 5e46709..e5972dc 100644 --- a/deploy/docker-compose.local.yml +++ b/deploy/docker-compose.local.yml @@ -7,3 +7,8 @@ services: environment: - ASPNETCORE_ENVIRONMENT=Development - ConnectionStrings__Default=Host=host.docker.internal;Port=5433;Database=dev_db;Username=dev_user;Password=dev_pass_vB6nM3qP8yW2rT9k + - R2__AccountId=${R2_ACCOUNT_ID} + - R2__AccessKeyId=${R2_ACCESS_KEY_ID} + - R2__SecretAccessKey=${R2_SECRET_ACCESS_KEY} + - R2__BucketName=${R2_BUCKET_NAME:-goodbrick} + - R2__PublicUrl=https://cdn.goodbrick.com.ua diff --git a/deploy/docker-compose.prod.yml b/deploy/docker-compose.prod.yml index 9cd476a..c77b5d9 100644 --- a/deploy/docker-compose.prod.yml +++ b/deploy/docker-compose.prod.yml @@ -5,6 +5,7 @@ services: args: - NEXT_PUBLIC_POSTHOG_KEY=phc_pe0mP58n724h9eFxanbGIUsfMyS14gnAmr5tYez9V3Q - NEXT_PUBLIC_POSTHOG_HOST=https://eu.i.posthog.com + - NEXT_PUBLIC_CDN_URL=https://cdn.goodbrick.com.ua container_name: gb-prod-frontend restart: unless-stopped ports: @@ -24,6 +25,11 @@ services: environment: - ASPNETCORE_ENVIRONMENT=Production - ConnectionStrings__Default=Host=postgres;Port=5432;Database=prod_db;Username=prod_user;Password=prod_pass_kL9mN2pQ7xR8sT4v + - R2__AccountId=${R2_ACCOUNT_ID} + - R2__AccessKeyId=${R2_ACCESS_KEY_ID} + - R2__SecretAccessKey=${R2_SECRET_ACCESS_KEY} + - R2__BucketName=${R2_BUCKET_NAME:-goodbrick} + - R2__PublicUrl=https://cdn.goodbrick.com.ua networks: - app-network diff --git a/frontend/.env.example b/frontend/.env.example index 16d99ca..07554b9 100644 --- a/frontend/.env.example +++ b/frontend/.env.example @@ -3,3 +3,6 @@ INTERNAL_API_URL=http://gb-prod-backend:5000 # URL бэкенда для проксирования /api/* в локальной разработке (не задавать на сервере!) # LOCAL_API_URL=http://localhost:5000 + +# CDN URL для изображений (Cloudflare R2) +NEXT_PUBLIC_CDN_URL=https://cdn.goodbrick.com.ua diff --git a/frontend/Dockerfile b/frontend/Dockerfile index 67ae0ab..42451cc 100644 --- a/frontend/Dockerfile +++ b/frontend/Dockerfile @@ -7,6 +7,7 @@ FROM node:20-alpine AS builder WORKDIR /app ARG NEXT_PUBLIC_POSTHOG_KEY ARG NEXT_PUBLIC_POSTHOG_HOST +ARG NEXT_PUBLIC_CDN_URL COPY --from=deps /app/node_modules ./node_modules COPY . . RUN npm run build diff --git a/frontend/next.config.ts b/frontend/next.config.ts index a38131f..c5bb244 100644 --- a/frontend/next.config.ts +++ b/frontend/next.config.ts @@ -2,6 +2,11 @@ import type { NextConfig } from "next"; const nextConfig: NextConfig = { output: "standalone", + images: { + remotePatterns: [ + { protocol: "https", hostname: "cdn.goodbrick.com.ua" }, + ], + }, async rewrites() { const apiUrl = process.env.LOCAL_API_URL; if (!apiUrl) return []; diff --git a/frontend/public/robots.txt b/frontend/public/robots.txt new file mode 100644 index 0000000..f7e7c7c --- /dev/null +++ b/frontend/public/robots.txt @@ -0,0 +1,5 @@ +User-agent: * +Allow: / +Allow: /_next/image + +Sitemap: https://new.goodbrick.com.ua/sitemap.xml diff --git a/frontend/src/lib/cdn.ts b/frontend/src/lib/cdn.ts new file mode 100644 index 0000000..0332748 --- /dev/null +++ b/frontend/src/lib/cdn.ts @@ -0,0 +1,6 @@ +const CDN_BASE_URL = + process.env.NEXT_PUBLIC_CDN_URL || "https://cdn.goodbrick.com.ua"; + +export function cdnUrl(path: string): string { + return `${CDN_BASE_URL.replace(/\/+$/, "")}/${path.replace(/^\/+/, "")}`; +} diff --git a/scripts/replicate-db.sh b/scripts/replicate-db.sh new file mode 100644 index 0000000..38a8307 --- /dev/null +++ b/scripts/replicate-db.sh @@ -0,0 +1,184 @@ +#!/usr/bin/env bash +# +# replicate-db.sh — Nightly prod_db -> dev_db replication with backups +# Runs via cron as user "deploy" at 04:00 Kyiv time +# + +set -euo pipefail + +# --- Lock (prevent parallel runs) --- +LOCK_FILE="/tmp/replicate-db.lock" +exec 200>"$LOCK_FILE" +flock -n 200 || { echo "[$(date)] Another instance is already running. Exiting."; exit 1; } + +# --- Configuration --- +POSTGRES_CONTAINER="postgres" +SUPERUSER="app" +SUPERUSER_PASS="zYWT5JWu3iAbbW7mOyd1" + +SOURCE_DB="prod_db" +TARGET_DB="dev_db" +TARGET_USER="dev_user" + +DEV_BACKEND_CONTAINER="gb-dev-backend" +DEV_COMPOSE_DIR="/srv/apps/gb-site-dev/deploy" +DEV_COMPOSE_FILE="docker-compose.dev.yml" + +BACKUP_DIR="/srv/backups" +BACKUP_RETENTION_DAYS=14 + +LOG_DIR="/srv/logs" +TIMESTAMP=$(date '+%Y%m%d-%H%M%S') +LOG_FILE="${LOG_DIR}/replicate-db-${TIMESTAMP}.log" +LOG_RETENTION_DAYS=30 + +CONTAINER_DUMP="/tmp/prod_dump.dump" + +# --- Logging --- +mkdir -p "$LOG_DIR" "$BACKUP_DIR" + +log() { + local ts + ts=$(date '+%Y-%m-%d %H:%M:%S') + echo "[$ts] $1" | tee -a "$LOG_FILE" +} + +log_error() { + local ts + ts=$(date '+%Y-%m-%d %H:%M:%S') + echo "[$ts] ERROR: $1" | tee -a "$LOG_FILE" >&2 +} + +# --- Cleanup on exit --- +cleanup() { + local exit_code=$? + + # Remove dump from container + log "Cleaning up dump file inside container..." + docker exec "$POSTGRES_CONTAINER" rm -f "$CONTAINER_DUMP" 2>/dev/null || true + + # Always restart dev backend + if ! docker ps --format '{{.Names}}' | grep -q "^${DEV_BACKEND_CONTAINER}$"; then + log "Restarting dev backend..." + cd "$DEV_COMPOSE_DIR" && docker compose -f "$DEV_COMPOSE_FILE" up -d gb-dev-backend 2>>"$LOG_FILE" || true + fi + + # Clean old logs + find "$LOG_DIR" -name "replicate-db-*.log" -mtime +${LOG_RETENTION_DAYS} -delete 2>/dev/null || true + + if [ $exit_code -eq 0 ]; then + log "=== Replication completed successfully ===" + else + log_error "=== Replication FAILED with exit code $exit_code ===" + fi +} +trap cleanup EXIT + +# ===== MAIN ===== +log "=== Starting prod_db -> dev_db replication ===" + +# 1. Check postgres container +log "Step 1: Checking postgres container..." +if ! docker ps --format '{{.Names}}' | grep -q "^${POSTGRES_CONTAINER}$"; then + log_error "PostgreSQL container is not running!" + exit 1 +fi + +# 2. Backup prod_db to host filesystem +log "Step 2: Backing up prod_db..." +BACKUP_FILE="${BACKUP_DIR}/prod_db_${TIMESTAMP}.dump" + +docker exec -e PGPASSWORD="$SUPERUSER_PASS" "$POSTGRES_CONTAINER" \ + pg_dump -U "$SUPERUSER" -d "$SOURCE_DB" \ + --format=custom \ + --file=/tmp/prod_backup.dump \ + 2>>"$LOG_FILE" + +docker cp "${POSTGRES_CONTAINER}:/tmp/prod_backup.dump" "$BACKUP_FILE" 2>>"$LOG_FILE" +docker exec "$POSTGRES_CONTAINER" rm -f /tmp/prod_backup.dump 2>/dev/null || true + +BACKUP_SIZE=$(du -h "$BACKUP_FILE" | cut -f1) +log "Backup saved: $BACKUP_FILE ($BACKUP_SIZE)" + +# 3. Rotate old backups +log "Step 3: Rotating backups older than ${BACKUP_RETENTION_DAYS} days..." +DELETED_COUNT=$(find "$BACKUP_DIR" -name "prod_db_*.dump" -mtime +${BACKUP_RETENTION_DAYS} -delete -print | wc -l) +log "Deleted $DELETED_COUNT old backup(s)." + +# 4. Stop dev backend +log "Step 4: Stopping dev backend..." +docker stop "$DEV_BACKEND_CONTAINER" 2>>"$LOG_FILE" || log "Warning: dev backend was already stopped." +sleep 2 + +# 5. Dump prod_db for replication (inside container) +log "Step 5: Dumping prod_db for replication..." +docker exec -e PGPASSWORD="$SUPERUSER_PASS" "$POSTGRES_CONTAINER" \ + pg_dump -U "$SUPERUSER" -d "$SOURCE_DB" \ + --format=custom \ + --no-owner \ + --no-acl \ + --file="$CONTAINER_DUMP" \ + 2>>"$LOG_FILE" +log "Dump completed." + +# 6. Terminate connections to dev_db +log "Step 6: Terminating connections to dev_db..." +docker exec -e PGPASSWORD="$SUPERUSER_PASS" "$POSTGRES_CONTAINER" \ + psql -U "$SUPERUSER" -d postgres -c \ + "SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname = '${TARGET_DB}' AND pid <> pg_backend_pid();" \ + >>"$LOG_FILE" 2>&1 || true + +# 7. Drop and recreate dev_db +log "Step 7: Recreating dev_db..." +docker exec -e PGPASSWORD="$SUPERUSER_PASS" "$POSTGRES_CONTAINER" \ + psql -U "$SUPERUSER" -d postgres -c "DROP DATABASE IF EXISTS ${TARGET_DB};" \ + >>"$LOG_FILE" 2>&1 + +docker exec -e PGPASSWORD="$SUPERUSER_PASS" "$POSTGRES_CONTAINER" \ + psql -U "$SUPERUSER" -d postgres -c "CREATE DATABASE ${TARGET_DB} OWNER ${TARGET_USER};" \ + >>"$LOG_FILE" 2>&1 +log "Database recreated." + +# 8. Restore dump into dev_db +log "Step 8: Restoring into dev_db..." +docker exec -e PGPASSWORD="$SUPERUSER_PASS" "$POSTGRES_CONTAINER" \ + pg_restore -U "$SUPERUSER" -d "$TARGET_DB" \ + --no-owner \ + --no-acl \ + --role="$TARGET_USER" \ + "$CONTAINER_DUMP" \ + 2>>"$LOG_FILE" +log "Restore completed." + +# 9. Grant privileges to dev_user +# Note: --role=dev_user in pg_restore already sets object ownership +log "Step 9: Granting privileges to dev_user..." +docker exec -e PGPASSWORD="$SUPERUSER_PASS" "$POSTGRES_CONTAINER" \ + psql -U "$SUPERUSER" -d "$TARGET_DB" -c " + ALTER SCHEMA public OWNER TO ${TARGET_USER}; + GRANT ALL PRIVILEGES ON DATABASE ${TARGET_DB} TO ${TARGET_USER}; + GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO ${TARGET_USER}; + GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public TO ${TARGET_USER}; + GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA public TO ${TARGET_USER}; + ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON TABLES TO ${TARGET_USER}; + ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON SEQUENCES TO ${TARGET_USER}; + " >>"$LOG_FILE" 2>&1 +log "Privileges granted." + +# 10. Start dev backend +log "Step 10: Starting dev backend..." +cd "$DEV_COMPOSE_DIR" && docker compose -f "$DEV_COMPOSE_FILE" up -d gb-dev-backend \ + 2>>"$LOG_FILE" +log "Dev backend started." + +# 11. Health check +log "Step 11: Health check..." +sleep 5 +HEALTH=$(curl -sf http://127.0.0.1:5200/api/health/ping 2>/dev/null || echo "FAILED") +if [ "$HEALTH" = "FAILED" ]; then + log "Warning: Health check did not respond (backend may still be starting)." +else + log "Health check OK: $HEALTH" +fi + +log "=== Replication finished ==="