Some checks failed
Deploy — Staging / Lint, Typecheck & Test (push) Successful in 2m7s
Deploy — Staging / Build & push — admin (push) Successful in 3m20s
Deploy — Staging / Build & push — storefront (push) Failing after 2m30s
Deploy — Staging / Deploy to staging VPS (push) Has been skipped
turbo prune cannot fully parse the npm 11 lockfile format, causing it to generate an incomplete out/package-lock.json that drops non-hoisted workspace entries (apps/storefront/node_modules/@heroui/react and related packages). Replacing it with the full root lockfile ensures npm ci in the Docker deps stage installs all packages including non-hoisted ones. Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
203 lines
8.7 KiB
YAML
203 lines
8.7 KiB
YAML
name: Deploy — Staging
|
|
|
|
on:
|
|
push:
|
|
branches:
|
|
- staging
|
|
|
|
# Gitea Actions has no environment-level secrets (unlike GitHub Actions).
|
|
# Staging and production secrets live at repo level, distinguished by prefix.
|
|
# Production workflow uses the same names with PROD_ prefix.
|
|
# (see: troubleshooting #8 — REGISTRY must include the owner segment)
|
|
#
|
|
# Required secrets (repo → Settings → Secrets and Variables → Actions):
|
|
# STAGING_REGISTRY — host:port/owner (e.g. git.yourdomain.com:3000/myorg)
|
|
# STAGING_REGISTRY_USER — Gitea username
|
|
# STAGING_REGISTRY_TOKEN — Gitea personal access token (package:write scope)
|
|
# STAGING_SSH_HOST — use host.containers.internal, not the external IP
|
|
# (see: troubleshooting #13 — VPS firewall blocks ext IP)
|
|
# STAGING_SSH_USER — SSH user on the VPS
|
|
# STAGING_SSH_KEY — SSH private key (full PEM)
|
|
# STAGING_SSH_PORT — (optional) defaults to 22
|
|
# STAGING_NEXT_PUBLIC_CONVEX_URL — Convex deployment URL (shared)
|
|
# STAGING_NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY — storefront Clerk publishable key
|
|
# STAGING_ADMIN_NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY — admin Clerk publishable key
|
|
# STAGING_NEXT_PUBLIC_CLOUDINARY_CLOUD_NAME — admin Cloudinary cloud name
|
|
#
|
|
# The Dockerfiles are expected at:
|
|
# apps/storefront/Dockerfile
|
|
# apps/admin/Dockerfile
|
|
# Both receive ./out as build context (turbo prune output).
|
|
|
|
jobs:
|
|
# ── 1. CI ───────────────────────────────────────────────────────────────────
|
|
|
|
ci:
|
|
name: Lint, Typecheck & Test
|
|
runs-on: ubuntu-latest
|
|
steps:
|
|
- uses: actions/checkout@v4
|
|
|
|
- name: Setup Node.js
|
|
uses: actions/setup-node@v4
|
|
with:
|
|
node-version: 20
|
|
cache: npm
|
|
|
|
- name: Install dependencies
|
|
run: npm ci
|
|
|
|
- name: Lint
|
|
run: npm run lint
|
|
|
|
- name: Typecheck
|
|
run: npm run type-check
|
|
|
|
- name: Test
|
|
run: npm run test:once
|
|
|
|
# ── 2. Build & push ─────────────────────────────────────────────────────────
|
|
# Runs storefront and admin in parallel via matrix.
|
|
# Each job prunes its own workspace so there is no out/ directory collision.
|
|
|
|
build:
|
|
name: Build & push — ${{ matrix.app }}
|
|
needs: ci
|
|
runs-on: ubuntu-latest
|
|
strategy:
|
|
matrix:
|
|
app: [storefront, admin]
|
|
|
|
steps:
|
|
- uses: actions/checkout@v4
|
|
|
|
- name: Setup Node.js
|
|
uses: actions/setup-node@v4
|
|
with:
|
|
node-version: 20
|
|
cache: npm
|
|
|
|
- name: Install dependencies
|
|
run: npm ci
|
|
|
|
- name: Prune workspace for ${{ matrix.app }}
|
|
run: |
|
|
npx turbo prune ${{ matrix.app }} --docker
|
|
# turbo prune only traces npm workspace packages. The root convex/ directory
|
|
# is not a workspace package, so it is excluded from out/full/ — causing
|
|
# "Module not found: convex/_generated/api" at build time.
|
|
# Copy it manually so the Dockerfile has the generated types it needs.
|
|
cp -r convex out/full/convex
|
|
# turbo prune cannot fully parse the npm 11 lockfile format, so it generates
|
|
# an incomplete out/package-lock.json that omits non-hoisted workspace entries
|
|
# (e.g. apps/storefront/node_modules/@heroui/react). Replace it with the full
|
|
# root lockfile so that npm ci in Docker installs every package correctly.
|
|
cp package-lock.json out/package-lock.json
|
|
|
|
- name: Authenticate with registry
|
|
# docker login sends HTTPS even for HTTP-only (insecure) registries, so it
|
|
# fails before the daemon can handle it. Pre-populating config.json bypasses
|
|
# login entirely — docker push goes through the Podman daemon which correctly
|
|
# uses HTTP. (see: troubleshooting #7)
|
|
run: |
|
|
mkdir -p ~/.docker
|
|
AUTH=$(echo -n "${{ secrets.STAGING_REGISTRY_USER }}:${{ secrets.STAGING_REGISTRY_TOKEN }}" | base64 -w 0)
|
|
REGISTRY_HOST=$(echo "${{ secrets.STAGING_REGISTRY }}" | cut -d'/' -f1)
|
|
echo "{\"auths\":{\"${REGISTRY_HOST}\":{\"auth\":\"${AUTH}\"}}}" > ~/.docker/config.json
|
|
|
|
- name: Build & push ${{ matrix.app }}
|
|
# --push bypasses the Podman daemon and uses buildkit's internal HTTPS push,
|
|
# which fails for HTTP-only registries. Instead:
|
|
# 1. --load loads the image into the Podman daemon after build
|
|
# 2. docker push goes through the daemon, which has insecure=true in
|
|
# registries.conf and correctly uses HTTP. (see: troubleshooting #7, #12)
|
|
#
|
|
# Each app has its own Clerk instance so the publishable key differs.
|
|
# NEXT_PUBLIC_* vars must be baked in at build time — Next.js prerender
|
|
# fails with "Missing publishableKey" if they are absent.
|
|
# Secrets use STAGING_/PROD_ prefix in Gitea; the prefix is stripped here
|
|
# so Dockerfiles receive the plain NEXT_PUBLIC_* names they expect.
|
|
env:
|
|
STOREFRONT_CLERK_KEY: ${{ secrets.STAGING_STOREFRONT_NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY }}
|
|
ADMIN_CLERK_KEY: ${{ secrets.STAGING_ADMIN_NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY }}
|
|
NEXT_PUBLIC_CONVEX_URL: ${{ secrets.STAGING_NEXT_PUBLIC_CONVEX_URL }}
|
|
NEXT_PUBLIC_CLOUDINARY_CLOUD_NAME: ${{ secrets.STAGING_NEXT_PUBLIC_CLOUDINARY_CLOUD_NAME }}
|
|
run: |
|
|
SHORT_SHA="${GITHUB_SHA::7}"
|
|
IMAGE="${{ secrets.STAGING_REGISTRY }}/${{ matrix.app }}"
|
|
|
|
if [ "${{ matrix.app }}" = "admin" ]; then
|
|
CLERK_KEY="$ADMIN_CLERK_KEY"
|
|
docker build \
|
|
-f apps/admin/Dockerfile \
|
|
--build-arg NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY="$CLERK_KEY" \
|
|
--build-arg NEXT_PUBLIC_CONVEX_URL="$NEXT_PUBLIC_CONVEX_URL" \
|
|
--build-arg NEXT_PUBLIC_CLOUDINARY_CLOUD_NAME="$NEXT_PUBLIC_CLOUDINARY_CLOUD_NAME" \
|
|
--load \
|
|
-t "${IMAGE}:staging" \
|
|
./out
|
|
else
|
|
CLERK_KEY="$STOREFRONT_CLERK_KEY"
|
|
docker build \
|
|
-f apps/storefront/Dockerfile \
|
|
--build-arg NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY="$CLERK_KEY" \
|
|
--build-arg NEXT_PUBLIC_CONVEX_URL="$NEXT_PUBLIC_CONVEX_URL" \
|
|
--load \
|
|
-t "${IMAGE}:staging" \
|
|
./out
|
|
fi
|
|
|
|
docker tag "${IMAGE}:staging" "${IMAGE}:sha-${SHORT_SHA}"
|
|
docker push "${IMAGE}:staging"
|
|
docker push "${IMAGE}:sha-${SHORT_SHA}"
|
|
|
|
# ── 3. Deploy ───────────────────────────────────────────────────────────────
|
|
|
|
deploy:
|
|
name: Deploy to staging VPS
|
|
needs: build
|
|
runs-on: ubuntu-latest
|
|
|
|
steps:
|
|
- name: Write SSH key
|
|
run: |
|
|
mkdir -p ~/.ssh
|
|
echo "${{ secrets.STAGING_SSH_KEY }}" > ~/.ssh/staging
|
|
chmod 600 ~/.ssh/staging
|
|
|
|
- name: Pull & restart containers on VPS
|
|
env:
|
|
REGISTRY: ${{ secrets.STAGING_REGISTRY }}
|
|
REGISTRY_USER: ${{ secrets.STAGING_REGISTRY_USER }}
|
|
REGISTRY_TOKEN: ${{ secrets.STAGING_REGISTRY_TOKEN }}
|
|
SSH_HOST: ${{ secrets.STAGING_SSH_HOST }}
|
|
SSH_USER: ${{ secrets.STAGING_SSH_USER }}
|
|
SSH_PORT: ${{ secrets.STAGING_SSH_PORT }}
|
|
run: |
|
|
# Auth key is the hostname only — strip the /owner path
|
|
REGISTRY_HOST=$(echo "$REGISTRY" | cut -d'/' -f1)
|
|
|
|
# StrictHostKeyChecking=accept-new trusts on first connect but rejects
|
|
# changed keys on subsequent runs — safer than no-verify
|
|
ssh -i ~/.ssh/staging \
|
|
-p "${SSH_PORT:-22}" \
|
|
-o StrictHostKeyChecking=accept-new \
|
|
"${SSH_USER}@${SSH_HOST}" bash -s << EOF
|
|
set -euo pipefail
|
|
|
|
# Registry uses HTTP — --tls-verify=false required for podman login & pull
|
|
# (see: troubleshooting #12)
|
|
echo "${REGISTRY_TOKEN}" \
|
|
| podman login "${REGISTRY_HOST}" \
|
|
-u "${REGISTRY_USER}" --password-stdin --tls-verify=false
|
|
|
|
podman pull --tls-verify=false "${REGISTRY}/storefront:staging"
|
|
podman pull --tls-verify=false "${REGISTRY}/admin:staging"
|
|
|
|
cd /opt/staging
|
|
podman compose up -d --remove-orphans
|
|
|
|
# Remove dangling images from previous deploys
|
|
podman image prune -f
|
|
EOF
|