Add R2 image storage, upload endpoint, and CDN support
All checks were successful
ci/woodpecker/push/deploy Pipeline was successful

- Backend: R2StorageService, upload controller (POST /api/upload)
- Frontend: CDN url helper, NEXT_PUBLIC_CDN_URL build arg
- Deploy: pass R2 secrets from Woodpecker CI to containers via .env
- Docs: update CLAUDE.md with CDN and upload conventions

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-10 02:34:27 +02:00
parent a30fe60414
commit f0f769c5e8
17 changed files with 407 additions and 4 deletions

View File

@@ -7,13 +7,30 @@ steps:
image: appleboy/drone-ssh
when:
branch: main
environment:
R2_ACCOUNT_ID:
from_secret: r2_account_id
R2_ACCESS_KEY_ID:
from_secret: r2_access_key_id
R2_SECRET_ACCESS_KEY:
from_secret: r2_secret_access_key
settings:
host: 31.131.18.254
username: deploy
key:
from_secret: ssh_key
envs:
- R2_ACCOUNT_ID
- R2_ACCESS_KEY_ID
- R2_SECRET_ACCESS_KEY
script:
- cd /srv/apps/gb-site && git pull origin main
- |
cat > /srv/apps/gb-site/deploy/.env << EOF
R2_ACCOUNT_ID=$R2_ACCOUNT_ID
R2_ACCESS_KEY_ID=$R2_ACCESS_KEY_ID
R2_SECRET_ACCESS_KEY=$R2_SECRET_ACCESS_KEY
EOF
- cd /srv/apps/gb-site/deploy && docker compose -f docker-compose.prod.yml build --no-cache
- cd /srv/apps/gb-site/deploy && docker compose -f docker-compose.prod.yml up -d
@@ -21,12 +38,29 @@ steps:
image: appleboy/drone-ssh
when:
branch: dev
environment:
R2_ACCOUNT_ID:
from_secret: r2_account_id
R2_ACCESS_KEY_ID:
from_secret: r2_access_key_id
R2_SECRET_ACCESS_KEY:
from_secret: r2_secret_access_key
settings:
host: 31.131.18.254
username: deploy
key:
from_secret: ssh_key
envs:
- R2_ACCOUNT_ID
- R2_ACCESS_KEY_ID
- R2_SECRET_ACCESS_KEY
script:
- cd /srv/apps/gb-site && git pull origin dev
- cd /srv/apps/gb-site/deploy && docker compose -f docker-compose.dev.yml build --no-cache
- cd /srv/apps/gb-site/deploy && docker compose -f docker-compose.dev.yml up -d
- cd /srv/apps/gb-site-dev && git pull origin dev
- |
cat > /srv/apps/gb-site-dev/deploy/.env << EOF
R2_ACCOUNT_ID=$R2_ACCOUNT_ID
R2_ACCESS_KEY_ID=$R2_ACCESS_KEY_ID
R2_SECRET_ACCESS_KEY=$R2_SECRET_ACCESS_KEY
EOF
- cd /srv/apps/gb-site-dev/deploy && docker compose -f docker-compose.dev.yml build --no-cache
- cd /srv/apps/gb-site-dev/deploy && docker compose -f docker-compose.dev.yml up -d

View File

@@ -130,6 +130,7 @@ cd backend && dotnet build
- Controllers use attribute routing: `[Route("api/[controller]")]`
- Health check: `GET /api/health` — returns `{ status, database, environment }`
- Health ping: `GET /api/health/ping` — returns `{ status: "pong" }`
- Upload: `POST /api/upload` — multipart form, fields: `file` (required), `path` (optional folder prefix)
## Frontend Conventions
@@ -168,6 +169,20 @@ Host=postgres;Port=5432;Database={db};Username={user};Password={pass}
Configured via env var `ConnectionStrings__Default` in docker-compose files.
Locally via `appsettings.Local.json` with SSH tunnel (`localhost:5433`).
## CDN & Image Storage
- **Storage:** Cloudflare R2 (S3-compatible)
- **CDN domain:** https://cdn.goodbrick.com.ua
- **Bucket:** goodbrick
- **Frontend:** `NEXT_PUBLIC_CDN_URL` env var, `cdnUrl()` helper in `src/lib/cdn.ts`
- **Backend:** `R2__*` env vars, `R2StorageService` for uploads
- **Upload endpoint:** `POST /api/upload` — multipart/form-data, max 10MB, images only (jpeg/png/webp/avif)
### Folder Convention
- `products/<slug>/main.jpg` — product photos
- `catalog/<slug>/cover.jpg` — collection covers
- `site/<name>.jpg` — site-wide images (hero, about, etc.)
## Key Rules
- Never commit `.env` files or secrets — credentials are in docker-compose env vars on server

View File

@@ -0,0 +1,14 @@
namespace GBSite.Api.Configuration;
public class R2Settings
{
public const string SectionName = "R2";
public required string AccountId { get; set; }
public required string AccessKeyId { get; set; }
public required string SecretAccessKey { get; set; }
public required string BucketName { get; set; }
public string PublicUrl { get; set; } = "https://cdn.goodbrick.com.ua";
public string ServiceUrl => $"https://{AccountId}.r2.cloudflarestorage.com";
}

View File

@@ -0,0 +1,44 @@
using GBSite.Api.Services;
using Microsoft.AspNetCore.Mvc;
namespace GBSite.Api.Controllers;
[ApiController]
[Route("api/[controller]")]
public class UploadController : ControllerBase
{
private readonly R2StorageService? _storage;
public UploadController(R2StorageService? storage = null)
{
_storage = storage;
}
[HttpPost]
[RequestSizeLimit(10 * 1024 * 1024)]
public async Task<IActionResult> Upload(
IFormFile file,
[FromForm] string? path,
CancellationToken ct)
{
if (_storage is null)
return StatusCode(503, new { error = "Storage service not configured" });
if (file.Length == 0)
return BadRequest(new { error = "File is empty" });
var allowedTypes = new[] { "image/jpeg", "image/png", "image/webp", "image/avif" };
if (!allowedTypes.Contains(file.ContentType))
return BadRequest(new { error = $"File type '{file.ContentType}' not allowed" });
var fileName = Path.GetFileName(file.FileName);
var key = string.IsNullOrEmpty(path)
? fileName
: $"{path.Trim('/')}/{fileName}";
await using var stream = file.OpenReadStream();
var cdnUrl = await _storage.UploadAsync(stream, key, file.ContentType, ct);
return Ok(new { url = cdnUrl, key });
}
}

View File

@@ -4,9 +4,11 @@
<TargetFramework>net9.0</TargetFramework>
<Nullable>enable</Nullable>
<ImplicitUsings>enable</ImplicitUsings>
<UserSecretsId>805cad54-8a19-4713-b893-a1ec63696146</UserSecretsId>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="AWSSDK.S3" Version="3.7.*" />
<PackageReference Include="Microsoft.AspNetCore.OpenApi" Version="9.0.4" />
<PackageReference Include="Npgsql" Version="10.0.1" />
</ItemGroup>

View File

@@ -1,9 +1,32 @@
using Amazon.S3;
using GBSite.Api.Configuration;
using GBSite.Api.Services;
var builder = WebApplication.CreateBuilder(args);
builder.Configuration.AddJsonFile("appsettings.Local.json", optional: true, reloadOnChange: true);
builder.Services.AddControllers();
builder.Services.AddOpenApi();
// R2 Storage
var r2Section = builder.Configuration.GetSection(R2Settings.SectionName);
builder.Services.Configure<R2Settings>(r2Section);
var r2Settings = r2Section.Get<R2Settings>();
if (r2Settings is not null && !string.IsNullOrEmpty(r2Settings.AccountId))
{
builder.Services.AddSingleton<IAmazonS3>(_ =>
{
var config = new AmazonS3Config
{
ServiceURL = r2Settings.ServiceUrl,
ForcePathStyle = true
};
return new AmazonS3Client(r2Settings.AccessKeyId, r2Settings.SecretAccessKey, config);
});
builder.Services.AddSingleton<R2StorageService>();
}
var app = builder.Build();
if (app.Environment.IsDevelopment())

View File

@@ -0,0 +1,43 @@
using Amazon.S3;
using Amazon.S3.Model;
using GBSite.Api.Configuration;
using Microsoft.Extensions.Options;
namespace GBSite.Api.Services;
public class R2StorageService
{
private readonly IAmazonS3 _s3;
private readonly R2Settings _settings;
public R2StorageService(IAmazonS3 s3, IOptions<R2Settings> settings)
{
_s3 = s3;
_settings = settings.Value;
}
public async Task<string> UploadAsync(
Stream stream,
string key,
string contentType,
CancellationToken ct = default)
{
var request = new PutObjectRequest
{
BucketName = _settings.BucketName,
Key = key,
InputStream = stream,
ContentType = contentType,
DisablePayloadSigning = true
};
await _s3.PutObjectAsync(request, ct);
return $"{_settings.PublicUrl.TrimEnd('/')}/{key}";
}
public async Task DeleteAsync(string key, CancellationToken ct = default)
{
await _s3.DeleteObjectAsync(_settings.BucketName, key, ct);
}
}

View File

@@ -5,5 +5,12 @@
"Microsoft.AspNetCore": "Warning"
}
},
"AllowedHosts": "*"
"AllowedHosts": "*",
"R2": {
"AccountId": "",
"AccessKeyId": "",
"SecretAccessKey": "",
"BucketName": "",
"PublicUrl": "https://cdn.goodbrick.com.ua"
}
}

View File

@@ -5,6 +5,7 @@ services:
args:
- NEXT_PUBLIC_POSTHOG_KEY=phc_pe0mP58n724h9eFxanbGIUsfMyS14gnAmr5tYez9V3Q
- NEXT_PUBLIC_POSTHOG_HOST=https://eu.i.posthog.com
- NEXT_PUBLIC_CDN_URL=https://cdn.goodbrick.com.ua
container_name: gb-dev-frontend
restart: unless-stopped
ports:
@@ -24,6 +25,11 @@ services:
environment:
- ASPNETCORE_ENVIRONMENT=Development
- ConnectionStrings__Default=Host=postgres;Port=5432;Database=dev_db;Username=dev_user;Password=dev_pass_vB6nM3qP8yW2rT9k
- R2__AccountId=${R2_ACCOUNT_ID}
- R2__AccessKeyId=${R2_ACCESS_KEY_ID}
- R2__SecretAccessKey=${R2_SECRET_ACCESS_KEY}
- R2__BucketName=${R2_BUCKET_NAME:-goodbrick}
- R2__PublicUrl=https://cdn.goodbrick.com.ua
networks:
- app-network

View File

@@ -7,3 +7,8 @@ services:
environment:
- ASPNETCORE_ENVIRONMENT=Development
- ConnectionStrings__Default=Host=host.docker.internal;Port=5433;Database=dev_db;Username=dev_user;Password=dev_pass_vB6nM3qP8yW2rT9k
- R2__AccountId=${R2_ACCOUNT_ID}
- R2__AccessKeyId=${R2_ACCESS_KEY_ID}
- R2__SecretAccessKey=${R2_SECRET_ACCESS_KEY}
- R2__BucketName=${R2_BUCKET_NAME:-goodbrick}
- R2__PublicUrl=https://cdn.goodbrick.com.ua

View File

@@ -5,6 +5,7 @@ services:
args:
- NEXT_PUBLIC_POSTHOG_KEY=phc_pe0mP58n724h9eFxanbGIUsfMyS14gnAmr5tYez9V3Q
- NEXT_PUBLIC_POSTHOG_HOST=https://eu.i.posthog.com
- NEXT_PUBLIC_CDN_URL=https://cdn.goodbrick.com.ua
container_name: gb-prod-frontend
restart: unless-stopped
ports:
@@ -24,6 +25,11 @@ services:
environment:
- ASPNETCORE_ENVIRONMENT=Production
- ConnectionStrings__Default=Host=postgres;Port=5432;Database=prod_db;Username=prod_user;Password=prod_pass_kL9mN2pQ7xR8sT4v
- R2__AccountId=${R2_ACCOUNT_ID}
- R2__AccessKeyId=${R2_ACCESS_KEY_ID}
- R2__SecretAccessKey=${R2_SECRET_ACCESS_KEY}
- R2__BucketName=${R2_BUCKET_NAME:-goodbrick}
- R2__PublicUrl=https://cdn.goodbrick.com.ua
networks:
- app-network

View File

@@ -3,3 +3,6 @@ INTERNAL_API_URL=http://gb-prod-backend:5000
# URL бэкенда для проксирования /api/* в локальной разработке (не задавать на сервере!)
# LOCAL_API_URL=http://localhost:5000
# CDN URL для изображений (Cloudflare R2)
NEXT_PUBLIC_CDN_URL=https://cdn.goodbrick.com.ua

View File

@@ -7,6 +7,7 @@ FROM node:20-alpine AS builder
WORKDIR /app
ARG NEXT_PUBLIC_POSTHOG_KEY
ARG NEXT_PUBLIC_POSTHOG_HOST
ARG NEXT_PUBLIC_CDN_URL
COPY --from=deps /app/node_modules ./node_modules
COPY . .
RUN npm run build

View File

@@ -2,6 +2,11 @@ import type { NextConfig } from "next";
const nextConfig: NextConfig = {
output: "standalone",
images: {
remotePatterns: [
{ protocol: "https", hostname: "cdn.goodbrick.com.ua" },
],
},
async rewrites() {
const apiUrl = process.env.LOCAL_API_URL;
if (!apiUrl) return [];

View File

@@ -0,0 +1,5 @@
User-agent: *
Allow: /
Allow: /_next/image
Sitemap: https://new.goodbrick.com.ua/sitemap.xml

6
frontend/src/lib/cdn.ts Normal file
View File

@@ -0,0 +1,6 @@
const CDN_BASE_URL =
process.env.NEXT_PUBLIC_CDN_URL || "https://cdn.goodbrick.com.ua";
export function cdnUrl(path: string): string {
return `${CDN_BASE_URL.replace(/\/+$/, "")}/${path.replace(/^\/+/, "")}`;
}

184
scripts/replicate-db.sh Normal file
View File

@@ -0,0 +1,184 @@
#!/usr/bin/env bash
#
# replicate-db.sh — Nightly prod_db -> dev_db replication with backups
# Runs via cron as user "deploy" at 04:00 Kyiv time
#
set -euo pipefail
# --- Lock (prevent parallel runs) ---
LOCK_FILE="/tmp/replicate-db.lock"
exec 200>"$LOCK_FILE"
flock -n 200 || { echo "[$(date)] Another instance is already running. Exiting."; exit 1; }
# --- Configuration ---
POSTGRES_CONTAINER="postgres"
SUPERUSER="app"
SUPERUSER_PASS="zYWT5JWu3iAbbW7mOyd1"
SOURCE_DB="prod_db"
TARGET_DB="dev_db"
TARGET_USER="dev_user"
DEV_BACKEND_CONTAINER="gb-dev-backend"
DEV_COMPOSE_DIR="/srv/apps/gb-site-dev/deploy"
DEV_COMPOSE_FILE="docker-compose.dev.yml"
BACKUP_DIR="/srv/backups"
BACKUP_RETENTION_DAYS=14
LOG_DIR="/srv/logs"
TIMESTAMP=$(date '+%Y%m%d-%H%M%S')
LOG_FILE="${LOG_DIR}/replicate-db-${TIMESTAMP}.log"
LOG_RETENTION_DAYS=30
CONTAINER_DUMP="/tmp/prod_dump.dump"
# --- Logging ---
mkdir -p "$LOG_DIR" "$BACKUP_DIR"
log() {
local ts
ts=$(date '+%Y-%m-%d %H:%M:%S')
echo "[$ts] $1" | tee -a "$LOG_FILE"
}
log_error() {
local ts
ts=$(date '+%Y-%m-%d %H:%M:%S')
echo "[$ts] ERROR: $1" | tee -a "$LOG_FILE" >&2
}
# --- Cleanup on exit ---
cleanup() {
local exit_code=$?
# Remove dump from container
log "Cleaning up dump file inside container..."
docker exec "$POSTGRES_CONTAINER" rm -f "$CONTAINER_DUMP" 2>/dev/null || true
# Always restart dev backend
if ! docker ps --format '{{.Names}}' | grep -q "^${DEV_BACKEND_CONTAINER}$"; then
log "Restarting dev backend..."
cd "$DEV_COMPOSE_DIR" && docker compose -f "$DEV_COMPOSE_FILE" up -d gb-dev-backend 2>>"$LOG_FILE" || true
fi
# Clean old logs
find "$LOG_DIR" -name "replicate-db-*.log" -mtime +${LOG_RETENTION_DAYS} -delete 2>/dev/null || true
if [ $exit_code -eq 0 ]; then
log "=== Replication completed successfully ==="
else
log_error "=== Replication FAILED with exit code $exit_code ==="
fi
}
trap cleanup EXIT
# ===== MAIN =====
log "=== Starting prod_db -> dev_db replication ==="
# 1. Check postgres container
log "Step 1: Checking postgres container..."
if ! docker ps --format '{{.Names}}' | grep -q "^${POSTGRES_CONTAINER}$"; then
log_error "PostgreSQL container is not running!"
exit 1
fi
# 2. Backup prod_db to host filesystem
log "Step 2: Backing up prod_db..."
BACKUP_FILE="${BACKUP_DIR}/prod_db_${TIMESTAMP}.dump"
docker exec -e PGPASSWORD="$SUPERUSER_PASS" "$POSTGRES_CONTAINER" \
pg_dump -U "$SUPERUSER" -d "$SOURCE_DB" \
--format=custom \
--file=/tmp/prod_backup.dump \
2>>"$LOG_FILE"
docker cp "${POSTGRES_CONTAINER}:/tmp/prod_backup.dump" "$BACKUP_FILE" 2>>"$LOG_FILE"
docker exec "$POSTGRES_CONTAINER" rm -f /tmp/prod_backup.dump 2>/dev/null || true
BACKUP_SIZE=$(du -h "$BACKUP_FILE" | cut -f1)
log "Backup saved: $BACKUP_FILE ($BACKUP_SIZE)"
# 3. Rotate old backups
log "Step 3: Rotating backups older than ${BACKUP_RETENTION_DAYS} days..."
DELETED_COUNT=$(find "$BACKUP_DIR" -name "prod_db_*.dump" -mtime +${BACKUP_RETENTION_DAYS} -delete -print | wc -l)
log "Deleted $DELETED_COUNT old backup(s)."
# 4. Stop dev backend
log "Step 4: Stopping dev backend..."
docker stop "$DEV_BACKEND_CONTAINER" 2>>"$LOG_FILE" || log "Warning: dev backend was already stopped."
sleep 2
# 5. Dump prod_db for replication (inside container)
log "Step 5: Dumping prod_db for replication..."
docker exec -e PGPASSWORD="$SUPERUSER_PASS" "$POSTGRES_CONTAINER" \
pg_dump -U "$SUPERUSER" -d "$SOURCE_DB" \
--format=custom \
--no-owner \
--no-acl \
--file="$CONTAINER_DUMP" \
2>>"$LOG_FILE"
log "Dump completed."
# 6. Terminate connections to dev_db
log "Step 6: Terminating connections to dev_db..."
docker exec -e PGPASSWORD="$SUPERUSER_PASS" "$POSTGRES_CONTAINER" \
psql -U "$SUPERUSER" -d postgres -c \
"SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname = '${TARGET_DB}' AND pid <> pg_backend_pid();" \
>>"$LOG_FILE" 2>&1 || true
# 7. Drop and recreate dev_db
log "Step 7: Recreating dev_db..."
docker exec -e PGPASSWORD="$SUPERUSER_PASS" "$POSTGRES_CONTAINER" \
psql -U "$SUPERUSER" -d postgres -c "DROP DATABASE IF EXISTS ${TARGET_DB};" \
>>"$LOG_FILE" 2>&1
docker exec -e PGPASSWORD="$SUPERUSER_PASS" "$POSTGRES_CONTAINER" \
psql -U "$SUPERUSER" -d postgres -c "CREATE DATABASE ${TARGET_DB} OWNER ${TARGET_USER};" \
>>"$LOG_FILE" 2>&1
log "Database recreated."
# 8. Restore dump into dev_db
log "Step 8: Restoring into dev_db..."
docker exec -e PGPASSWORD="$SUPERUSER_PASS" "$POSTGRES_CONTAINER" \
pg_restore -U "$SUPERUSER" -d "$TARGET_DB" \
--no-owner \
--no-acl \
--role="$TARGET_USER" \
"$CONTAINER_DUMP" \
2>>"$LOG_FILE"
log "Restore completed."
# 9. Grant privileges to dev_user
# Note: --role=dev_user in pg_restore already sets object ownership
log "Step 9: Granting privileges to dev_user..."
docker exec -e PGPASSWORD="$SUPERUSER_PASS" "$POSTGRES_CONTAINER" \
psql -U "$SUPERUSER" -d "$TARGET_DB" -c "
ALTER SCHEMA public OWNER TO ${TARGET_USER};
GRANT ALL PRIVILEGES ON DATABASE ${TARGET_DB} TO ${TARGET_USER};
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO ${TARGET_USER};
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public TO ${TARGET_USER};
GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA public TO ${TARGET_USER};
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON TABLES TO ${TARGET_USER};
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON SEQUENCES TO ${TARGET_USER};
" >>"$LOG_FILE" 2>&1
log "Privileges granted."
# 10. Start dev backend
log "Step 10: Starting dev backend..."
cd "$DEV_COMPOSE_DIR" && docker compose -f "$DEV_COMPOSE_FILE" up -d gb-dev-backend \
2>>"$LOG_FILE"
log "Dev backend started."
# 11. Health check
log "Step 11: Health check..."
sleep 5
HEALTH=$(curl -sf http://127.0.0.1:5200/api/health/ping 2>/dev/null || echo "FAILED")
if [ "$HEALTH" = "FAILED" ]; then
log "Warning: Health check did not respond (backend may still be starting)."
else
log "Health check OK: $HEALTH"
fi
log "=== Replication finished ==="