Compare commits
9 Commits
main
...
5b0a727bce
| Author | SHA1 | Date | |
|---|---|---|---|
| 5b0a727bce | |||
| 5391b3b428 | |||
| 829fec9ac1 | |||
| 6b63cbb6cd | |||
| bc7306fea4 | |||
| 7a6da4f18f | |||
| fc5f98541b | |||
| 70b728a474 | |||
| 79640074cd |
@@ -3,7 +3,7 @@ name: CI
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "**"
|
||||
- feat #"**" # TODO: change to "**" after testing
|
||||
|
||||
jobs:
|
||||
ci:
|
||||
|
||||
202
.gitea/workflows/deploy-staging.yml
Normal file
202
.gitea/workflows/deploy-staging.yml
Normal file
@@ -0,0 +1,202 @@
|
||||
name: Deploy — Staging
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- staging
|
||||
|
||||
# Gitea Actions has no environment-level secrets (unlike GitHub Actions).
|
||||
# Staging and production secrets live at repo level, distinguished by prefix.
|
||||
# Production workflow uses the same names with PROD_ prefix.
|
||||
# (see: troubleshooting #8 — REGISTRY must include the owner segment)
|
||||
#
|
||||
# Required secrets (repo → Settings → Secrets and Variables → Actions):
|
||||
# STAGING_REGISTRY — host:port/owner (e.g. git.yourdomain.com:3000/myorg)
|
||||
# STAGING_REGISTRY_USER — Gitea username
|
||||
# STAGING_REGISTRY_TOKEN — Gitea personal access token (package:write scope)
|
||||
# STAGING_SSH_HOST — use host.containers.internal, not the external IP
|
||||
# (see: troubleshooting #13 — VPS firewall blocks ext IP)
|
||||
# STAGING_SSH_USER — SSH user on the VPS
|
||||
# STAGING_SSH_KEY — SSH private key (full PEM)
|
||||
# STAGING_SSH_PORT — (optional) defaults to 22
|
||||
# STAGING_NEXT_PUBLIC_CONVEX_URL — Convex deployment URL (shared)
|
||||
# STAGING_NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY — storefront Clerk publishable key
|
||||
# STAGING_ADMIN_NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY — admin Clerk publishable key
|
||||
# STAGING_NEXT_PUBLIC_CLOUDINARY_CLOUD_NAME — admin Cloudinary cloud name
|
||||
#
|
||||
# The Dockerfiles are expected at:
|
||||
# apps/storefront/Dockerfile
|
||||
# apps/admin/Dockerfile
|
||||
# Both receive ./out as build context (turbo prune output).
|
||||
|
||||
jobs:
|
||||
# ── 1. CI ───────────────────────────────────────────────────────────────────
|
||||
|
||||
ci:
|
||||
name: Lint, Typecheck & Test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
cache: npm
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
|
||||
- name: Lint
|
||||
run: npm run lint
|
||||
|
||||
- name: Typecheck
|
||||
run: npm run type-check
|
||||
|
||||
- name: Test
|
||||
run: npm run test:once
|
||||
|
||||
# ── 2. Build & push ─────────────────────────────────────────────────────────
|
||||
# Runs storefront and admin in parallel via matrix.
|
||||
# Each job prunes its own workspace so there is no out/ directory collision.
|
||||
|
||||
build:
|
||||
name: Build & push — ${{ matrix.app }}
|
||||
needs: ci
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
app: [storefront, admin]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
cache: npm
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
|
||||
- name: Prune workspace for ${{ matrix.app }}
|
||||
run: |
|
||||
npx turbo prune ${{ matrix.app }} --docker
|
||||
# turbo prune only traces npm workspace packages. The root convex/ directory
|
||||
# is not a workspace package, so it is excluded from out/full/ — causing
|
||||
# "Module not found: convex/_generated/api" at build time.
|
||||
# Copy it manually so the Dockerfile has the generated types it needs.
|
||||
cp -r convex out/full/convex
|
||||
# turbo prune cannot fully parse the npm 11 lockfile format, so it generates
|
||||
# an incomplete out/package-lock.json that omits non-hoisted workspace entries
|
||||
# (e.g. apps/storefront/node_modules/@heroui/react). Replace it with the full
|
||||
# root lockfile so that npm ci in Docker installs every package correctly.
|
||||
cp package-lock.json out/package-lock.json
|
||||
|
||||
- name: Authenticate with registry
|
||||
# docker login sends HTTPS even for HTTP-only (insecure) registries, so it
|
||||
# fails before the daemon can handle it. Pre-populating config.json bypasses
|
||||
# login entirely — docker push goes through the Podman daemon which correctly
|
||||
# uses HTTP. (see: troubleshooting #7)
|
||||
run: |
|
||||
mkdir -p ~/.docker
|
||||
AUTH=$(echo -n "${{ secrets.STAGING_REGISTRY_USER }}:${{ secrets.STAGING_REGISTRY_TOKEN }}" | base64 -w 0)
|
||||
REGISTRY_HOST=$(echo "${{ secrets.STAGING_REGISTRY }}" | cut -d'/' -f1)
|
||||
echo "{\"auths\":{\"${REGISTRY_HOST}\":{\"auth\":\"${AUTH}\"}}}" > ~/.docker/config.json
|
||||
|
||||
- name: Build & push ${{ matrix.app }}
|
||||
# --push bypasses the Podman daemon and uses buildkit's internal HTTPS push,
|
||||
# which fails for HTTP-only registries. Instead:
|
||||
# 1. --load loads the image into the Podman daemon after build
|
||||
# 2. docker push goes through the daemon, which has insecure=true in
|
||||
# registries.conf and correctly uses HTTP. (see: troubleshooting #7, #12)
|
||||
#
|
||||
# Each app has its own Clerk instance so the publishable key differs.
|
||||
# NEXT_PUBLIC_* vars must be baked in at build time — Next.js prerender
|
||||
# fails with "Missing publishableKey" if they are absent.
|
||||
# Secrets use STAGING_/PROD_ prefix in Gitea; the prefix is stripped here
|
||||
# so Dockerfiles receive the plain NEXT_PUBLIC_* names they expect.
|
||||
env:
|
||||
STOREFRONT_CLERK_KEY: ${{ secrets.STAGING_STOREFRONT_NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY }}
|
||||
ADMIN_CLERK_KEY: ${{ secrets.STAGING_ADMIN_NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY }}
|
||||
NEXT_PUBLIC_CONVEX_URL: ${{ secrets.STAGING_NEXT_PUBLIC_CONVEX_URL }}
|
||||
NEXT_PUBLIC_CLOUDINARY_CLOUD_NAME: ${{ secrets.STAGING_NEXT_PUBLIC_CLOUDINARY_CLOUD_NAME }}
|
||||
run: |
|
||||
SHORT_SHA="${GITHUB_SHA::7}"
|
||||
IMAGE="${{ secrets.STAGING_REGISTRY }}/${{ matrix.app }}"
|
||||
|
||||
if [ "${{ matrix.app }}" = "admin" ]; then
|
||||
CLERK_KEY="$ADMIN_CLERK_KEY"
|
||||
docker build \
|
||||
-f apps/admin/Dockerfile \
|
||||
--build-arg NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY="$CLERK_KEY" \
|
||||
--build-arg NEXT_PUBLIC_CONVEX_URL="$NEXT_PUBLIC_CONVEX_URL" \
|
||||
--build-arg NEXT_PUBLIC_CLOUDINARY_CLOUD_NAME="$NEXT_PUBLIC_CLOUDINARY_CLOUD_NAME" \
|
||||
--load \
|
||||
-t "${IMAGE}:staging" \
|
||||
./out
|
||||
else
|
||||
CLERK_KEY="$STOREFRONT_CLERK_KEY"
|
||||
docker build \
|
||||
-f apps/storefront/Dockerfile \
|
||||
--build-arg NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY="$CLERK_KEY" \
|
||||
--build-arg NEXT_PUBLIC_CONVEX_URL="$NEXT_PUBLIC_CONVEX_URL" \
|
||||
--load \
|
||||
-t "${IMAGE}:staging" \
|
||||
./out
|
||||
fi
|
||||
|
||||
docker tag "${IMAGE}:staging" "${IMAGE}:sha-${SHORT_SHA}"
|
||||
docker push "${IMAGE}:staging"
|
||||
docker push "${IMAGE}:sha-${SHORT_SHA}"
|
||||
|
||||
# ── 3. Deploy ───────────────────────────────────────────────────────────────
|
||||
|
||||
deploy:
|
||||
name: Deploy to staging VPS
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Write SSH key
|
||||
run: |
|
||||
mkdir -p ~/.ssh
|
||||
echo "${{ secrets.STAGING_SSH_KEY }}" > ~/.ssh/staging
|
||||
chmod 600 ~/.ssh/staging
|
||||
|
||||
- name: Pull & restart containers on VPS
|
||||
env:
|
||||
REGISTRY: ${{ secrets.STAGING_REGISTRY }}
|
||||
REGISTRY_USER: ${{ secrets.STAGING_REGISTRY_USER }}
|
||||
REGISTRY_TOKEN: ${{ secrets.STAGING_REGISTRY_TOKEN }}
|
||||
SSH_HOST: ${{ secrets.STAGING_SSH_HOST }}
|
||||
SSH_USER: ${{ secrets.STAGING_SSH_USER }}
|
||||
SSH_PORT: ${{ secrets.STAGING_SSH_PORT }}
|
||||
run: |
|
||||
# Auth key is the hostname only — strip the /owner path
|
||||
REGISTRY_HOST=$(echo "$REGISTRY" | cut -d'/' -f1)
|
||||
|
||||
# StrictHostKeyChecking=accept-new trusts on first connect but rejects
|
||||
# changed keys on subsequent runs — safer than no-verify
|
||||
ssh -i ~/.ssh/staging \
|
||||
-p "${SSH_PORT:-22}" \
|
||||
-o StrictHostKeyChecking=accept-new \
|
||||
"${SSH_USER}@${SSH_HOST}" bash -s << EOF
|
||||
set -euo pipefail
|
||||
|
||||
# Registry uses HTTP — --tls-verify=false required for podman login & pull
|
||||
# (see: troubleshooting #12)
|
||||
echo "${REGISTRY_TOKEN}" \
|
||||
| podman login "${REGISTRY_HOST}" \
|
||||
-u "${REGISTRY_USER}" --password-stdin --tls-verify=false
|
||||
|
||||
podman pull --tls-verify=false "${REGISTRY}/storefront:staging"
|
||||
podman pull --tls-verify=false "${REGISTRY}/admin:staging"
|
||||
|
||||
cd /opt/staging
|
||||
podman compose up -d --remove-orphans
|
||||
|
||||
# Remove dangling images from previous deploys
|
||||
podman image prune -f
|
||||
EOF
|
||||
64
apps/admin/Dockerfile
Normal file
64
apps/admin/Dockerfile
Normal file
@@ -0,0 +1,64 @@
|
||||
# Build context: ./out (turbo prune admin --docker)
|
||||
# out/json/ — package.json files only → used by deps stage for layer caching
|
||||
# out/full/ — full pruned monorepo → used by builder stage for source
|
||||
# out/package-lock.json
|
||||
|
||||
# ── Stage 1: deps ────────────────────────────────────────────────────────────
|
||||
FROM node:20-alpine AS deps
|
||||
|
||||
RUN apk add --no-cache libc6-compat
|
||||
WORKDIR /app
|
||||
|
||||
# Upgrade npm to match the project's packageManager (npm@11). The package-lock.json
|
||||
# was generated with npm 11 — npm 10 (bundled with node:20) can't fully parse it,
|
||||
# causing turbo prune to generate an incomplete pruned lockfile and npm ci to miss
|
||||
# packages.
|
||||
RUN npm install -g npm@11 --quiet
|
||||
|
||||
COPY json/ .
|
||||
COPY package-lock.json .
|
||||
RUN npm ci
|
||||
|
||||
# ── Stage 2: builder ─────────────────────────────────────────────────────────
|
||||
FROM node:20-alpine AS builder
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY --from=deps /app/node_modules ./node_modules
|
||||
COPY full/ .
|
||||
|
||||
# NEXT_PUBLIC_* vars are baked into the client bundle at build time by Next.js.
|
||||
# They must be present here (not just at runtime) or SSG/prerender fails.
|
||||
# Passed via --build-arg in CI. Note: Gitea secrets use a STAGING_/PROD_ prefix
|
||||
# which is stripped by the workflow before being forwarded here as build args.
|
||||
ARG NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY
|
||||
ARG NEXT_PUBLIC_CONVEX_URL
|
||||
ARG NEXT_PUBLIC_CLOUDINARY_CLOUD_NAME
|
||||
ENV NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY=$NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY \
|
||||
NEXT_PUBLIC_CONVEX_URL=$NEXT_PUBLIC_CONVEX_URL \
|
||||
NEXT_PUBLIC_CLOUDINARY_CLOUD_NAME=$NEXT_PUBLIC_CLOUDINARY_CLOUD_NAME \
|
||||
NEXT_TELEMETRY_DISABLED=1
|
||||
|
||||
RUN npx turbo build --filter=admin
|
||||
|
||||
# ── Stage 3: runner ──────────────────────────────────────────────────────────
|
||||
FROM node:20-alpine AS runner
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
ENV NODE_ENV=production \
|
||||
NEXT_TELEMETRY_DISABLED=1 \
|
||||
HOSTNAME=0.0.0.0 \
|
||||
PORT=3001
|
||||
|
||||
RUN addgroup -g 1001 -S nodejs && adduser -S nextjs -u 1001
|
||||
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/apps/admin/.next/standalone ./
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/apps/admin/.next/static ./apps/admin/.next/static
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/apps/admin/public ./apps/admin/public
|
||||
|
||||
USER nextjs
|
||||
|
||||
EXPOSE 3001
|
||||
|
||||
CMD ["node", "server.js"]
|
||||
@@ -3,6 +3,8 @@ const path = require("path");
|
||||
|
||||
/** @type {import('next').NextConfig} */
|
||||
const nextConfig = {
|
||||
output: "standalone",
|
||||
outputFileTracingRoot: path.join(__dirname, "../.."),
|
||||
transpilePackages: ["@repo/convex", "@repo/types", "@repo/utils"],
|
||||
turbopack: {
|
||||
root: path.join(__dirname, "..", ".."),
|
||||
|
||||
78
apps/storefront/Dockerfile
Normal file
78
apps/storefront/Dockerfile
Normal file
@@ -0,0 +1,78 @@
|
||||
# Build context: ./out (turbo prune storefront --docker)
|
||||
# out/json/ — package.json files only → used by deps stage for layer caching
|
||||
# out/full/ — full pruned monorepo → used by builder stage for source
|
||||
# out/package-lock.json
|
||||
|
||||
# ── Stage 1: deps ────────────────────────────────────────────────────────────
|
||||
# Install ALL dependencies (dev + prod) using only the package.json tree.
|
||||
# This layer is shared with the builder stage and only rebuilds when
|
||||
# a package.json or the lock file changes — not when source code changes.
|
||||
FROM node:20-alpine AS deps
|
||||
|
||||
RUN apk add --no-cache libc6-compat
|
||||
WORKDIR /app
|
||||
|
||||
# Upgrade npm to match the project's packageManager (npm@11). The package-lock.json
|
||||
# was generated with npm 11 — npm 10 (bundled with node:20) can't fully parse it,
|
||||
# causing turbo prune to generate an incomplete pruned lockfile and npm ci to miss
|
||||
# packages like @heroui/react.
|
||||
RUN npm install -g npm@11 --quiet
|
||||
|
||||
COPY json/ .
|
||||
COPY package-lock.json .
|
||||
RUN npm ci
|
||||
|
||||
# ── Stage 2: builder ─────────────────────────────────────────────────────────
|
||||
# Full monorepo source + build artifact.
|
||||
# next build produces .next/standalone/ because output: "standalone" is set
|
||||
# in next.config.js — that's what makes the runner stage small.
|
||||
FROM node:20-alpine AS builder
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy everything from the deps stage — not just /app/node_modules.
|
||||
# @heroui/react cannot be hoisted to the root by npm and is installed at
|
||||
# apps/storefront/node_modules/ instead. Copying only the root node_modules
|
||||
# would leave it missing. Copying all of /app/ brings both root and
|
||||
# workspace-level node_modules, then full/ layers the source on top.
|
||||
COPY --from=deps /app/ ./
|
||||
COPY full/ .
|
||||
|
||||
# NEXT_PUBLIC_* vars are baked into the client bundle at build time by Next.js.
|
||||
# They must be present here (not just at runtime) or SSG/prerender fails.
|
||||
# Passed via --build-arg in CI. Note: Gitea secrets use a STAGING_/PROD_ prefix
|
||||
# which is stripped by the workflow before being forwarded here as build args.
|
||||
ARG NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY
|
||||
ARG NEXT_PUBLIC_CONVEX_URL
|
||||
ENV NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY=$NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY \
|
||||
NEXT_PUBLIC_CONVEX_URL=$NEXT_PUBLIC_CONVEX_URL \
|
||||
NEXT_TELEMETRY_DISABLED=1
|
||||
|
||||
RUN npx turbo build --filter=storefront
|
||||
|
||||
# ── Stage 3: runner ──────────────────────────────────────────────────────────
|
||||
# Minimal runtime image — only the standalone bundle, static assets, and public dir.
|
||||
# No source code, no dev dependencies, no build tools.
|
||||
FROM node:20-alpine AS runner
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
ENV NODE_ENV=production \
|
||||
NEXT_TELEMETRY_DISABLED=1 \
|
||||
HOSTNAME=0.0.0.0 \
|
||||
PORT=3000
|
||||
|
||||
# Non-root user for security
|
||||
RUN addgroup -g 1001 -S nodejs && adduser -S nextjs -u 1001
|
||||
|
||||
# standalone output mirrors the monorepo tree, so server.js lands at /app/server.js
|
||||
# Static files and public/ must be copied separately — they are not in standalone/
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/apps/storefront/.next/standalone ./
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/apps/storefront/.next/static ./apps/storefront/.next/static
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/apps/storefront/public ./apps/storefront/public
|
||||
|
||||
USER nextjs
|
||||
|
||||
EXPOSE 3000
|
||||
|
||||
CMD ["node", "server.js"]
|
||||
@@ -3,6 +3,10 @@ const path = require("path");
|
||||
|
||||
/** @type {import('next').NextConfig} */
|
||||
const nextConfig = {
|
||||
output: "standalone",
|
||||
// Required in a monorepo: tells Next.js to trace files from the repo root
|
||||
// so the standalone bundle includes files from packages/
|
||||
outputFileTracingRoot: path.join(__dirname, "../.."),
|
||||
transpilePackages: ["@repo/convex", "@repo/types", "@repo/utils"],
|
||||
turbopack: {
|
||||
root: path.join(__dirname, "..", ".."),
|
||||
|
||||
Reference in New Issue
Block a user