- Introduced `docker-compose.prod.yml` for production deployment. - Configured service to connect to an external PostgreSQL instance. - Set environment variables for JWT and database connection strings. - Defined network and volume for data protection keys.
125 lines
5.8 KiB
Bash
Executable File
125 lines
5.8 KiB
Bash
Executable File
#!/usr/bin/env bash
|
|
# deploy.sh — Sync source and build on a Docker Swarm manager over SSH
|
|
set -euo pipefail
|
|
|
|
# ── Configuration ─────────────────────────────────────────────────────────────
|
|
SWARM_HOST="${SWARM_HOST:-}" # e.g. user@10.0.0.1 or set via env
|
|
IMAGE_NAME="${IMAGE_NAME:-ots-orchestrator}"
|
|
IMAGE_TAG="${IMAGE_TAG:-latest}"
|
|
STACK_NAME="${STACK_NAME:-otsorchestrator-dev}"
|
|
COMPOSE_FILE="${COMPOSE_FILE:-docker-compose.yml}" # use docker-compose.prod.yml for prod
|
|
REMOTE_DIR="${REMOTE_DIR:-/stack/dev/orchestrator}"
|
|
ENV_FILE="${ENV_FILE:-.env}" # local .env file with secrets
|
|
FRESH_DEPLOY="${FRESH_DEPLOY:-0}" # set to 1 to tear down old stack+volumes before deploying
|
|
|
|
# ── Validate ──────────────────────────────────────────────────────────────────
|
|
if [[ -z "$SWARM_HOST" ]]; then
|
|
echo "ERROR: SWARM_HOST is not set."
|
|
echo " Usage: SWARM_HOST=user@host ./deploy.sh"
|
|
echo " Or set SWARM_HOST in your environment."
|
|
exit 1
|
|
fi
|
|
|
|
if [[ ! -f "$COMPOSE_FILE" ]]; then
|
|
echo "ERROR: $COMPOSE_FILE not found. Run this script from the repository root."
|
|
exit 1
|
|
fi
|
|
|
|
FULL_IMAGE="${IMAGE_NAME}:${IMAGE_TAG}"
|
|
BUILD_FLAGS=""
|
|
[[ "$FRESH_DEPLOY" == "1" ]] && BUILD_FLAGS="--no-cache"
|
|
|
|
echo "==> Deploying ${FULL_IMAGE} to swarm manager: ${SWARM_HOST}"
|
|
echo " Stack: ${STACK_NAME}"
|
|
echo " Compose: ${COMPOSE_FILE}"
|
|
echo " Remote dir: ${REMOTE_DIR}"
|
|
echo " Fresh: ${FRESH_DEPLOY}"
|
|
echo ""
|
|
|
|
# ── Step 0 (fresh only): Tear down existing stack and volumes ─────────────────
|
|
if [[ "$FRESH_DEPLOY" == "1" ]]; then
|
|
echo "==> [0/4] Tearing down existing stack '${STACK_NAME}'..."
|
|
ssh "${SWARM_HOST}" "
|
|
if docker stack ls --format '{{.Name}}' | grep -q '^${STACK_NAME}$'; then
|
|
docker stack rm ${STACK_NAME}
|
|
echo ' Waiting for services to stop...'
|
|
timeout=60
|
|
while docker service ls --filter name=${STACK_NAME} --format '{{.Name}}' 2>/dev/null | grep -q .; do
|
|
sleep 2
|
|
timeout=\$(( timeout - 2 ))
|
|
[[ \$timeout -le 0 ]] && break
|
|
done
|
|
# Wait for containers/tasks to fully exit so volumes are released
|
|
echo ' Waiting for containers to exit...'
|
|
timeout=30
|
|
while docker ps -q --filter name=${STACK_NAME} 2>/dev/null | grep -q .; do
|
|
sleep 2
|
|
timeout=\$(( timeout - 2 ))
|
|
[[ \$timeout -le 0 ]] && break
|
|
done
|
|
else
|
|
echo ' No existing stack found, skipping removal.'
|
|
fi
|
|
# Remove volumes created by the stack (stack-name prefix) or by docker compose
|
|
# (compose project-name prefix, which defaults to the remote directory basename).
|
|
echo ' Removing stack volumes...'
|
|
for vol in \$(docker volume ls --format '{{.Name}}' | grep -E '^(${STACK_NAME}_|orchestrator_)'); do
|
|
# Force-remove any containers still holding this volume before deleting it
|
|
containers=\$(docker ps -aq --filter volume=\"\$vol\" 2>/dev/null)
|
|
if [[ -n \"\$containers\" ]]; then
|
|
echo \" Force-removing containers holding volume \$vol...\"
|
|
echo \"\$containers\" | xargs docker rm -f 2>/dev/null || true
|
|
sleep 1
|
|
fi
|
|
docker volume rm \"\$vol\" && echo \" Removed volume: \$vol\" || echo \" WARNING: Could not remove volume: \$vol (may still be in use)\"
|
|
done
|
|
"
|
|
echo " Stack removed."
|
|
fi
|
|
|
|
# ── Step 1: Sync source to the remote host ───────────────────────────────────
|
|
echo "==> [1/4] Syncing source to ${SWARM_HOST}:${REMOTE_DIR}..."
|
|
ssh "${SWARM_HOST}" "mkdir -p ${REMOTE_DIR}"
|
|
rsync -az --delete \
|
|
--exclude 'bin/' \
|
|
--exclude 'obj/' \
|
|
--exclude 'node_modules/' \
|
|
--exclude '.git/' \
|
|
--exclude 'logs/' \
|
|
--exclude 'wwwroot/' \
|
|
./ "${SWARM_HOST}:${REMOTE_DIR}/"
|
|
|
|
if [[ -f "$ENV_FILE" ]]; then
|
|
scp "${ENV_FILE}" "${SWARM_HOST}:${REMOTE_DIR}/.env"
|
|
echo " Uploaded ${ENV_FILE} -> ${REMOTE_DIR}/.env"
|
|
else
|
|
echo " WARNING: No .env file found at '${ENV_FILE}'. Secrets must already exist on the remote host."
|
|
fi
|
|
|
|
# ── Step 2: Build the image on the remote host ───────────────────────────────
|
|
echo "==> [2/4] Building Docker image on ${SWARM_HOST}..."
|
|
ssh "${SWARM_HOST}" "cd ${REMOTE_DIR} && docker build ${BUILD_FLAGS} -t ${FULL_IMAGE} ."
|
|
|
|
# ── Step 3: Deploy the stack ──────────────────────────────────────────────────
|
|
echo "==> [3/4] Deploying stack '${STACK_NAME}'..."
|
|
# docker stack deploy does NOT load .env automatically — use `docker compose config`
|
|
# to resolve all variable substitutions first, then pipe the rendered YAML to the deploy.
|
|
# -p ensures compose uses the stack name as the project name so volume names are consistent.
|
|
ssh "${SWARM_HOST}" "cd ${REMOTE_DIR} && \
|
|
docker compose -p ${STACK_NAME} --env-file .env -f ${COMPOSE_FILE} config | \
|
|
sed '/^name:/d; s/published: \"\([0-9]*\)\"/published: \1/g' | \
|
|
docker stack deploy \
|
|
--compose-file - \
|
|
--with-registry-auth \
|
|
--prune \
|
|
${STACK_NAME}"
|
|
|
|
# ── Step 4: Verify rollout ───────────────────────────────────────────────────
|
|
echo "==> [4/4] Waiting for service to converge..."
|
|
ssh "${SWARM_HOST}" "docker service ls --filter name=${STACK_NAME}"
|
|
|
|
echo ""
|
|
echo "==> Deploy complete."
|
|
echo " Run the following to stream logs:"
|
|
echo " ssh ${SWARM_HOST} \"docker service logs -f ${STACK_NAME}_app\""
|