Some checks failed
Deploy — Staging / Lint, Typecheck & Test (push) Successful in 2m6s
Deploy — Staging / Build & push — admin (push) Failing after 2m7s
Deploy — Staging / Build & push — storefront (push) Failing after 1m35s
Deploy — Staging / Deploy to staging VPS (push) Has been skipped
- Added NEXT_PUBLIC_CLOUDINARY_CLOUD_NAME to both admin and storefront Dockerfiles to ensure it is available during the build process. - Updated deploy-staging.yml to pass the new Cloudinary variable as a build argument. - Clarified comments regarding the handling of NEXT_PUBLIC_* variables and Gitea secret prefixes. This change enhances the build configuration for both applications, ensuring all necessary environment variables are correctly passed during the Docker build process.
194 lines
8.1 KiB
YAML
194 lines
8.1 KiB
YAML
name: Deploy — Staging
|
|
|
|
on:
|
|
push:
|
|
branches:
|
|
- staging
|
|
|
|
# Gitea Actions has no environment-level secrets (unlike GitHub Actions).
|
|
# Staging and production secrets live at repo level, distinguished by prefix.
|
|
# Production workflow uses the same names with PROD_ prefix.
|
|
# (see: troubleshooting #8 — REGISTRY must include the owner segment)
|
|
#
|
|
# Required secrets (repo → Settings → Secrets and Variables → Actions):
|
|
# STAGING_REGISTRY — host:port/owner (e.g. git.yourdomain.com:3000/myorg)
|
|
# STAGING_REGISTRY_USER — Gitea username
|
|
# STAGING_REGISTRY_TOKEN — Gitea personal access token (package:write scope)
|
|
# STAGING_SSH_HOST — use host.containers.internal, not the external IP
|
|
# (see: troubleshooting #13 — VPS firewall blocks ext IP)
|
|
# STAGING_SSH_USER — SSH user on the VPS
|
|
# STAGING_SSH_KEY — SSH private key (full PEM)
|
|
# STAGING_SSH_PORT — (optional) defaults to 22
|
|
# STAGING_NEXT_PUBLIC_CONVEX_URL — Convex deployment URL (shared)
|
|
# STAGING_NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY — storefront Clerk publishable key
|
|
# STAGING_ADMIN_NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY — admin Clerk publishable key
|
|
# STAGING_NEXT_PUBLIC_CLOUDINARY_CLOUD_NAME — admin Cloudinary cloud name
|
|
#
|
|
# The Dockerfiles are expected at:
|
|
# apps/storefront/Dockerfile
|
|
# apps/admin/Dockerfile
|
|
# Both receive ./out as build context (turbo prune output).
|
|
|
|
jobs:
|
|
# ── 1. CI ───────────────────────────────────────────────────────────────────
|
|
|
|
ci:
|
|
name: Lint, Typecheck & Test
|
|
runs-on: ubuntu-latest
|
|
steps:
|
|
- uses: actions/checkout@v4
|
|
|
|
- name: Setup Node.js
|
|
uses: actions/setup-node@v4
|
|
with:
|
|
node-version: 20
|
|
cache: npm
|
|
|
|
- name: Install dependencies
|
|
run: npm ci
|
|
|
|
- name: Lint
|
|
run: npm run lint
|
|
|
|
- name: Typecheck
|
|
run: npm run type-check
|
|
|
|
- name: Test
|
|
run: npm run test:once
|
|
|
|
# ── 2. Build & push ─────────────────────────────────────────────────────────
|
|
# Runs storefront and admin in parallel via matrix.
|
|
# Each job prunes its own workspace so there is no out/ directory collision.
|
|
|
|
build:
|
|
name: Build & push — ${{ matrix.app }}
|
|
needs: ci
|
|
runs-on: ubuntu-latest
|
|
strategy:
|
|
matrix:
|
|
app: [storefront, admin]
|
|
|
|
steps:
|
|
- uses: actions/checkout@v4
|
|
|
|
- name: Setup Node.js
|
|
uses: actions/setup-node@v4
|
|
with:
|
|
node-version: 20
|
|
cache: npm
|
|
|
|
- name: Install dependencies
|
|
run: npm ci
|
|
|
|
- name: Prune workspace for ${{ matrix.app }}
|
|
run: |
|
|
npx turbo prune ${{ matrix.app }} --docker
|
|
# turbo prune only traces npm workspace packages. The root convex/ directory
|
|
# is not a workspace package, so it is excluded from out/full/ — causing
|
|
# "Module not found: convex/_generated/api" at build time.
|
|
# Copy it manually so the Dockerfile has the generated types it needs.
|
|
cp -r convex out/full/convex
|
|
|
|
- name: Authenticate with registry
|
|
# docker login sends HTTPS even for HTTP-only (insecure) registries, so it
|
|
# fails before the daemon can handle it. Pre-populating config.json bypasses
|
|
# login entirely — docker push goes through the Podman daemon which correctly
|
|
# uses HTTP. (see: troubleshooting #7)
|
|
run: |
|
|
mkdir -p ~/.docker
|
|
AUTH=$(echo -n "${{ secrets.STAGING_REGISTRY_USER }}:${{ secrets.STAGING_REGISTRY_TOKEN }}" | base64 -w 0)
|
|
REGISTRY_HOST=$(echo "${{ secrets.STAGING_REGISTRY }}" | cut -d'/' -f1)
|
|
echo "{\"auths\":{\"${REGISTRY_HOST}\":{\"auth\":\"${AUTH}\"}}}" > ~/.docker/config.json
|
|
|
|
- name: Build & push ${{ matrix.app }}
|
|
# Uses --push to push directly during build, which avoids the "No output
|
|
# specified with docker-container driver" warning that appears when using
|
|
# a separate docker push step without --load. (see: troubleshooting #5)
|
|
#
|
|
# Each app has its own Clerk instance so the publishable key differs.
|
|
# NEXT_PUBLIC_* vars must be baked in at build time — Next.js prerender
|
|
# fails with "Missing publishableKey" if they are absent.
|
|
# Secrets use STAGING_/PROD_ prefix in Gitea; the prefix is stripped here
|
|
# so Dockerfiles receive the plain NEXT_PUBLIC_* names they expect.
|
|
env:
|
|
STOREFRONT_CLERK_KEY: ${{ secrets.STAGING_STOREFRONT_NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY }}
|
|
ADMIN_CLERK_KEY: ${{ secrets.STAGING_ADMIN_NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY }}
|
|
NEXT_PUBLIC_CONVEX_URL: ${{ secrets.STAGING_NEXT_PUBLIC_CONVEX_URL }}
|
|
NEXT_PUBLIC_CLOUDINARY_CLOUD_NAME: ${{ secrets.STAGING_NEXT_PUBLIC_CLOUDINARY_CLOUD_NAME }}
|
|
run: |
|
|
SHORT_SHA="${GITHUB_SHA::7}"
|
|
IMAGE="${{ secrets.STAGING_REGISTRY }}/${{ matrix.app }}"
|
|
|
|
if [ "${{ matrix.app }}" = "admin" ]; then
|
|
CLERK_KEY="$ADMIN_CLERK_KEY"
|
|
docker build \
|
|
-f apps/admin/Dockerfile \
|
|
--build-arg NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY="$CLERK_KEY" \
|
|
--build-arg NEXT_PUBLIC_CONVEX_URL="$NEXT_PUBLIC_CONVEX_URL" \
|
|
--build-arg NEXT_PUBLIC_CLOUDINARY_CLOUD_NAME="$NEXT_PUBLIC_CLOUDINARY_CLOUD_NAME" \
|
|
-t "${IMAGE}:staging" \
|
|
-t "${IMAGE}:sha-${SHORT_SHA}" \
|
|
--push \
|
|
./out
|
|
else
|
|
CLERK_KEY="$STOREFRONT_CLERK_KEY"
|
|
docker build \
|
|
-f apps/storefront/Dockerfile \
|
|
--build-arg NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY="$CLERK_KEY" \
|
|
--build-arg NEXT_PUBLIC_CONVEX_URL="$NEXT_PUBLIC_CONVEX_URL" \
|
|
-t "${IMAGE}:staging" \
|
|
-t "${IMAGE}:sha-${SHORT_SHA}" \
|
|
--push \
|
|
./out
|
|
fi
|
|
|
|
# ── 3. Deploy ───────────────────────────────────────────────────────────────
|
|
|
|
deploy:
|
|
name: Deploy to staging VPS
|
|
needs: build
|
|
runs-on: ubuntu-latest
|
|
|
|
steps:
|
|
- name: Write SSH key
|
|
run: |
|
|
mkdir -p ~/.ssh
|
|
echo "${{ secrets.STAGING_SSH_KEY }}" > ~/.ssh/staging
|
|
chmod 600 ~/.ssh/staging
|
|
|
|
- name: Pull & restart containers on VPS
|
|
env:
|
|
REGISTRY: ${{ secrets.STAGING_REGISTRY }}
|
|
REGISTRY_USER: ${{ secrets.STAGING_REGISTRY_USER }}
|
|
REGISTRY_TOKEN: ${{ secrets.STAGING_REGISTRY_TOKEN }}
|
|
SSH_HOST: ${{ secrets.STAGING_SSH_HOST }}
|
|
SSH_USER: ${{ secrets.STAGING_SSH_USER }}
|
|
SSH_PORT: ${{ secrets.STAGING_SSH_PORT }}
|
|
run: |
|
|
# Auth key is the hostname only — strip the /owner path
|
|
REGISTRY_HOST=$(echo "$REGISTRY" | cut -d'/' -f1)
|
|
|
|
# StrictHostKeyChecking=accept-new trusts on first connect but rejects
|
|
# changed keys on subsequent runs — safer than no-verify
|
|
ssh -i ~/.ssh/staging \
|
|
-p "${SSH_PORT:-22}" \
|
|
-o StrictHostKeyChecking=accept-new \
|
|
"${SSH_USER}@${SSH_HOST}" bash -s << EOF
|
|
set -euo pipefail
|
|
|
|
# Registry uses HTTP — --tls-verify=false required for podman login & pull
|
|
# (see: troubleshooting #12)
|
|
echo "${REGISTRY_TOKEN}" \
|
|
| podman login "${REGISTRY_HOST}" \
|
|
-u "${REGISTRY_USER}" --password-stdin --tls-verify=false
|
|
|
|
podman pull --tls-verify=false "${REGISTRY}/storefront:staging"
|
|
podman pull --tls-verify=false "${REGISTRY}/admin:staging"
|
|
|
|
cd /opt/staging
|
|
podman compose up -d --remove-orphans
|
|
|
|
# Remove dangling images from previous deploys
|
|
podman image prune -f
|
|
EOF
|