Files

141 lines
5.1 KiB
Bash
Executable File

#!/bin/bash
# Gravl Deployment Script
#
# Purpose:
# Automates the deployment of Gravl services to production/staging.
# Ensures fresh builds and verifies service health after startup.
#
# Prevents stale containers by always building fresh with --no-cache:
# The --no-cache flag rebuilds all Docker layers from scratch.
# This prevents stale application code, assets, or dependencies
# from being cached and deployed. Essential for reliable deployments.
#
# Workflow:
# 1. Pull latest code from git
# 2. Capture build metadata (commit hash, timestamp)
# 3. Build Docker images (--no-cache for freshness)
# 4. Start containers with new images
# 5. Health check: wait for backend to respond
#
# Exit codes:
# 0 = Success (deployment complete, services healthy)
# 1 = Failure (see error message in logs)
#
# Usage:
# ./scripts/deploy.sh
#
# Logs:
# All output saved to logs/deploy.log (see tail to follow)
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_DIR="$(dirname "$SCRIPT_DIR")"
LOG_FILE="$REPO_DIR/logs/deploy.log"
BACKEND_HEALTH="http://localhost:3001/api/health"
# Logging helper: prints timestamp + message to both stdout and log file
log() {
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $*" | tee -a "$LOG_FILE"
}
# Ensure logs directory exists
mkdir -p "$REPO_DIR/logs"
cd "$REPO_DIR"
log "=== Deploy started ==="
# ============================================================================
# STEP 1: Git Pull
# ============================================================================
# Fetches latest code from remote and merges into current branch.
# Fails if there are merge conflicts (manual intervention required).
log "Pulling latest code..."
git pull
# ============================================================================
# STEP 2: Capture Build Metadata
# ============================================================================
# Build labels are attached to Docker images and stored in container labels.
# These are used by build-check.sh to verify deployed containers match local HEAD.
#
# Labels:
# org.opencontainers.image.revision = git commit hash (40-char SHA)
# Purpose: Track which commit the image was built from
# Example: abc1234567890abcdef1234567890abcdef123456
#
# org.opencontainers.image.created = RFC3339 timestamp
# Purpose: Track when the image was built
# Example: 2026-03-03T18:21:00Z
GIT_COMMIT=$(git rev-parse HEAD)
BUILD_DATE=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
log "Commit: $(git rev-parse --short HEAD) | Date: $BUILD_DATE"
# ============================================================================
# STEP 3: Build Docker Images (--no-cache)
# ============================================================================
# Why --no-cache?
# Docker layer caching can hide stale assets (CSS, JS bundles, dependencies).
# Example: If package.json changes but npm install is cached, old dependencies are used.
# --no-cache forces full rebuild of all layers every time.
#
# Build args are passed to Dockerfile via export, allowing them to be used
# in RUN instructions or referenced in labels (see docker-compose.yml).
log "Building images (--no-cache to prevent stale assets)..."
export GIT_COMMIT BUILD_DATE
docker compose build --no-cache
# ============================================================================
# STEP 4: Start Containers with New Images
# ============================================================================
# docker compose up -d --force-recreate:
# -d = Run in background (detached mode)
# --force-recreate = Stop and remove existing containers, start fresh
# Ensures old containers with old images are not reused.
#
# This step also networks containers (creates/reuses docker network).
log "Starting containers..."
docker compose up -d --force-recreate
# ============================================================================
# STEP 5: Health Check
# ============================================================================
# Waits for backend to respond on /api/health endpoint.
# This proves the service started correctly and is ready for traffic.
#
# Timeout configuration:
# Loop: 12 iterations
# Interval: 5 seconds per iteration
# Total: 60 seconds max wait time
#
# Why 60 seconds?
# - Docker startup: ~5-10 seconds
# - Node.js app initialization: ~5 seconds
# - Database connection: ~5-10 seconds
# - Buffer for system load: ~30 seconds
#
# If this timeout is too short, you may see false negatives (healthy app fails check).
# If too long, deployment takes unnecessarily long to fail.
#
# Endpoint details:
# URL: http://localhost:3001/api/health
# Method: GET
# Expected status: 200
# Should complete in <1 second
log "Health check: waiting for backend (60s timeout)..."
for i in $(seq 1 12); do
if curl -sf "$BACKEND_HEALTH" >/dev/null 2>&1; then
log "✓ Backend healthy"
break
fi
if [ "$i" -eq 12 ]; then
log "✗ ERROR: Health check failed after 60s"
log " Try: docker logs gravl-backend | tail -20"
exit 1
fi
log " Waiting... ($i/12 attempts, 5s intervals)"
sleep 5
done
log "=== Deploy complete: ${GIT_COMMIT:0:7} ==="