diff --git a/.github/workflows/cd.yml b/.github/workflows/cd.yml index b61d19b..f550dc1 100644 --- a/.github/workflows/cd.yml +++ b/.github/workflows/cd.yml @@ -11,7 +11,9 @@ on: push jobs: build: runs-on: ubuntu-latest - + environment: CI + outputs: + image_sha: sha-${{ github.sha }} steps: - name: Checkout uses: actions/checkout@v4 @@ -35,32 +37,62 @@ jobs: tags: | type=ref,event=pr type=ref,event=branch - type=sha + type=sha,format=long type=raw,value=latest,enable=${{ github.ref == format('refs/heads/{0}', github.event.repository.default_branch) }} - name: Build and push Docker images uses: docker/build-push-action@v6 with: - push: ${{github.ref == 'refs/heads/main' || github.ref == 'refs/heads/develop' || github.ref == 'refs/heads/feature/actions'}} + push: true tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} + cache-from: type=gha + cache-to: type=gha,mode=max deployment: needs: build - if: github.ref == 'refs/heads/main' runs-on: ubuntu-latest + environment: CI + outputs: + pr_number: ${{ steps.get-pr.outputs.result }} + preview_url: ${{ steps.set-outputs.outputs.preview_url }} + is_production: ${{ steps.set-outputs.outputs.is_production }} steps: - - name: Checkout to branch + - name: Checkout uses: actions/checkout@v4 + - name: Get PR number + id: get-pr + uses: actions/github-script@v7 + with: + script: | + const branch = context.ref.replace('refs/heads/', ''); + const prs = await github.rest.pulls.list({ + owner: context.repo.owner, + repo: context.repo.repo, + state: 'open', + head: `${context.repo.owner}:${branch}` + }); + if (prs.data.length > 0 && !prs.data[0].draft) { + const pr = prs.data[0]; + const hasNoPreview = pr.labels.some(l => l.name === 'no-preview'); + if (!hasNoPreview) { + console.log(`Found PR #${pr.number} for branch ${branch}`); + return pr.number; + } + } + console.log(`No eligible PR for branch ${branch}`); + return ''; + result-encoding: string + - name: Tag Build uses: docker/metadata-action@v5 id: meta with: images: ghcr.io/${{ github.repository }} tags: | - type=sha + type=sha,format=long - name: Create build context for k8s deployment # There should only be 1 tag, so 'join' will just produce a simple string @@ -72,14 +104,68 @@ jobs: - name: Set up kubectl uses: matootie/dokube@v1.4.1 with: - personalAccessToken: ${{ secrets.DIGITALOCEAN_TOKEN }} + personalAccessToken: ${{ secrets.DIGITAL_OCEAN_K8S }} clusterName: k8s-rf - - name: Deploy app + # --- Production deployment (main branch only) --- + - name: Create production secret manifest + if: github.ref == 'refs/heads/main' run: | - kubectl diff -k . || echo \n - kubectl delete secret modbot-env || echo \n - kubectl create secret generic modbot-env \ + cat < secret-values.yaml + apiVersion: v1 + kind: Secret + metadata: + name: modbot-env + namespace: default + type: Opaque + stringData: + SESSION_SECRET: "${{ secrets.SESSION_SECRET }}" + DISCORD_PUBLIC_KEY: "${{ secrets.DISCORD_PUBLIC_KEY }}" + DISCORD_APP_ID: "${{ secrets.DISCORD_APP_ID }}" + DISCORD_SECRET: "${{ secrets.DISCORD_SECRET }}" + DISCORD_HASH: "${{ secrets.DISCORD_HASH }}" + DISCORD_TEST_GUILD: "${{ secrets.DISCORD_TEST_GUILD }}" + SENTRY_INGEST: "${{ secrets.SENTRY_INGEST }}" + SENTRY_RELEASES: "${{ secrets.SENTRY_RELEASES }}" + STRIPE_SECRET_KEY: "${{ secrets.STRIPE_SECRET_KEY }}" + STRIPE_PUBLISHABLE_KEY: "${{ secrets.STRIPE_PUBLISHABLE_KEY }}" + STRIPE_WEBHOOK_SECRET: "${{ secrets.STRIPE_WEBHOOK_SECRET }}" + VITE_PUBLIC_POSTHOG_KEY: "${{ secrets.VITE_PUBLIC_POSTHOG_KEY }}" + VITE_PUBLIC_POSTHOG_HOST: "${{ secrets.VITE_PUBLIC_POSTHOG_HOST }}" + DATABASE_URL: "${{ secrets.DATABASE_URL }}" + EOF + + - name: Deploy to production + if: github.ref == 'refs/heads/main' + run: | + kubectl diff -k . || true + kubectl apply -f secret-values.yaml + kubectl apply -k . + if ! kubectl rollout status statefulset/mod-bot-set --timeout=5m; then + echo "Deployment failed, rolling back..." + kubectl rollout undo statefulset/mod-bot-set + exit 1 + fi + + - name: Set Sentry release + if: github.ref == 'refs/heads/main' + run: | + curl ${{secrets.SENTRY_RELEASES}} \ + -X POST \ + -H 'Content-Type: application/json' \ + -d '{"version": "${{github.sha}}"}' + + # --- Preview deployment (PR branches only) --- + - name: Deploy preview + if: github.ref != 'refs/heads/main' && steps.get-pr.outputs.result != '' + run: | + PR_NUMBER=${{ steps.get-pr.outputs.result }} + echo "Deploying preview for PR #${PR_NUMBER}" + + kubectl config set-context --current --namespace=staging + + # Ensure staging secret exists + kubectl create secret generic modbot-staging-env \ --from-literal=SESSION_SECRET=${{ secrets.SESSION_SECRET }} \ --from-literal=DISCORD_PUBLIC_KEY=${{ secrets.DISCORD_PUBLIC_KEY }} \ --from-literal=DISCORD_APP_ID=${{ secrets.DISCORD_APP_ID }} \ @@ -93,12 +179,222 @@ jobs: --from-literal=STRIPE_WEBHOOK_SECRET=${{ secrets.STRIPE_WEBHOOK_SECRET }} \ --from-literal=VITE_PUBLIC_POSTHOG_KEY=${{ secrets.VITE_PUBLIC_POSTHOG_KEY }} \ --from-literal=VITE_PUBLIC_POSTHOG_HOST=${{ secrets.VITE_PUBLIC_POSTHOG_HOST }} \ - --from-literal=DATABASE_URL=${{ secrets.DATABASE_URL }} - kubectl apply -k . + --from-literal=DATABASE_URL=/data/mod-bot.sqlite3 \ + --dry-run=client -o yaml | kubectl apply -f - - - name: Set Sentry release + # Deploy preview environment + export PR_NUMBER + export COMMIT_SHA=${{ github.sha }} + + # Delete database to start fresh (ignore errors if pod doesn't exist yet) + kubectl exec statefulset/mod-bot-pr-${PR_NUMBER} -- rm -f /data/mod-bot.sqlite3 || true + + envsubst < cluster/preview/deployment.yaml | kubectl apply -f - + + kubectl rollout restart statefulset/mod-bot-pr-${PR_NUMBER} + + echo "Preview deployed at https://${PR_NUMBER}.euno-staging.reactiflux.com" + + - name: Set deployment outputs + id: set-outputs run: | - curl ${{secrets.SENTRY_RELEASES}} \ - -X POST \ - -H 'Content-Type: application/json' \ - -d '{"version": "${{github.sha}}"}' + if [[ "${{ github.ref }}" == "refs/heads/main" ]]; then + echo "is_production=true" >> $GITHUB_OUTPUT + echo "preview_url=" >> $GITHUB_OUTPUT + elif [[ -n "${{ steps.get-pr.outputs.result }}" ]]; then + echo "is_production=false" >> $GITHUB_OUTPUT + echo "preview_url=https://${{ steps.get-pr.outputs.result }}.euno-staging.reactiflux.com" >> $GITHUB_OUTPUT + else + echo "is_production=false" >> $GITHUB_OUTPUT + echo "preview_url=" >> $GITHUB_OUTPUT + fi + + - name: Comment preview URL on PR + if: github.ref != 'refs/heads/main' && steps.get-pr.outputs.result != '' + uses: actions/github-script@v7 + with: + script: | + const prNumber = parseInt('${{ steps.get-pr.outputs.result }}'); + const previewUrl = `https://${prNumber}.euno-staging.reactiflux.com`; + const commitSha = '${{ github.sha }}'; + + const comments = await github.rest.issues.listComments({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: prNumber + }); + + const botComment = comments.data.find(c => + c.user.type === 'Bot' && c.body.includes('Preview deployed') + ); + + const body = `### Preview deployed + + It may take a few minutes before the service becomes available. + + | Environment | URL | + |-------------|-----| + | Preview | ${previewUrl} | + + Deployed commit: \`${commitSha.substring(0, 7)}\` + + This preview will be updated on each push and deleted when the PR is closed.`; + + if (botComment) { + await github.rest.issues.updateComment({ + owner: context.repo.owner, + repo: context.repo.repo, + comment_id: botComment.id, + body + }); + } else { + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: prNumber, + body + }); + } + + # --- E2E Tests after deployment --- + e2e: + needs: deployment + if: needs.deployment.outputs.preview_url != '' || needs.deployment.outputs.is_production == 'true' + runs-on: ubuntu-latest + timeout-minutes: 10 + env: + TARGET_URL: ${{ needs.deployment.outputs.preview_url || 'https://euno.reactiflux.com' }} + PR_NUMBER: ${{ needs.deployment.outputs.pr_number }} + steps: + - name: Checkout repo + uses: actions/checkout@v4 + + - name: Setup node + uses: actions/setup-node@v4 + with: + node-version: 24 + + - run: npm ci + + - name: Cache Playwright browsers + uses: actions/cache@v4 + with: + path: ~/.cache/ms-playwright + key: playwright-${{ runner.os }}-${{ hashFiles('package-lock.json') }} + + - name: Install Playwright browsers + run: npx playwright install chromium + + - name: Wait for service to be ready + run: | + for i in {1..30}; do + if curl -sf "$TARGET_URL" > /dev/null; then + echo "Service is ready" + exit 0 + fi + echo "Waiting for service... ($i/30)" + sleep 10 + done + echo "Service did not become ready in time" + exit 1 + + - name: Run Playwright tests + run: npm run test:e2e + env: + E2E_PREVIEW_URL: ${{ env.TARGET_URL }} + + - name: Upload test artifacts + if: always() + uses: actions/upload-artifact@v4 + with: + name: playwright-report-${{ github.run_id }} + path: | + playwright-report/ + test-results/ + retention-days: 30 + + - name: Deploy test report to GitHub Pages + if: always() + uses: peaceiris/actions-gh-pages@v4 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + publish_dir: ./playwright-report + destination_dir: reports/${{ github.run_number }} + keep_files: true + + - name: Comment PR with test results + if: ${{ always() && env.PR_NUMBER != '' }} + uses: actions/github-script@v7 + with: + script: | + const fs = require('fs'); + const prNumber = parseInt('${{ env.PR_NUMBER }}'); + const targetUrl = '${{ env.TARGET_URL }}'; + const reportUrl = `https://reactiflux.github.io/mod-bot/reports/${{ github.run_number }}`; + const runUrl = '${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}'; + + // Parse test results + let stats = { passed: 0, failed: 0, flaky: 0, skipped: 0 }; + try { + const results = JSON.parse(fs.readFileSync('test-results/results.json', 'utf8')); + const countTests = (suites) => { + for (const suite of suites) { + for (const spec of suite.specs || []) { + for (const test of spec.tests || []) { + if (test.status === 'expected') stats.passed++; + else if (test.status === 'unexpected') stats.failed++; + else if (test.status === 'flaky') stats.flaky++; + else if (test.status === 'skipped') stats.skipped++; + } + } + if (suite.suites) countTests(suite.suites); + } + }; + countTests(results.suites || []); + } catch (e) { + console.log('Could not parse test results:', e.message); + } + + const emoji = stats.failed > 0 ? '❌' : stats.flaky > 0 ? '⚠️' : '✅'; + const status = stats.failed > 0 ? 'Failed' : stats.flaky > 0 ? 'Flaky' : 'Passed'; + const statsParts = [ + stats.passed > 0 && `**${stats.passed}** passed`, + stats.flaky > 0 && `**${stats.flaky}** flaky`, + stats.failed > 0 && `**${stats.failed}** failed`, + stats.skipped > 0 && `**${stats.skipped}** skipped`, + ].filter(Boolean).join(' · '); + + const body = `## ${emoji} E2E Tests ${status} + + ${statsParts} + + [View Report](${reportUrl}) · [View Run](${runUrl}) + + Tested against: ${targetUrl}`; + + // Find existing E2E comment to update + const { data: comments } = await github.rest.issues.listComments({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: prNumber + }); + + const existingComment = comments.find(c => + c.user.type === 'Bot' && c.body.includes('E2E Tests') + ); + + if (existingComment) { + await github.rest.issues.updateComment({ + owner: context.repo.owner, + repo: context.repo.repo, + comment_id: existingComment.id, + body + }); + } else { + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: prNumber, + body + }); + } diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 936d44b..b4e2bb1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,4 +1,5 @@ name: CI + concurrency: group: ci-${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true @@ -62,194 +63,3 @@ jobs: - name: Run vitest run: npm run test - - e2e: - name: Playwright E2E - timeout-minutes: 5 - runs-on: ubuntu-latest - environment: CI - steps: - - name: Checkout repo - uses: actions/checkout@v4 - - - name: Setup node - uses: actions/setup-node@v4 - with: - node-version: 24 - - - run: npm ci - - - name: Cache Playwright browsers - uses: actions/cache@v4 - with: - path: ~/.cache/ms-playwright - key: playwright-${{ runner.os }}-${{ hashFiles('package-lock.json') }} - - - name: Install Playwright browsers - run: npx playwright install --with-deps chromium - - - name: Run Playwright tests - run: npm run test:e2e - env: - NODE_ENV: - CI: true - SESSION_SECRET: ${{ secrets.SESSION_SECRET }} - DISCORD_PUBLIC_KEY: ${{ secrets.DISCORD_PUBLIC_KEY }} - DISCORD_APP_ID: ${{ secrets.DISCORD_APP_ID }} - DISCORD_SECRET: ${{ secrets.DISCORD_SECRET }} - DISCORD_HASH: ${{ secrets.DISCORD_HASH }} - DISCORD_TEST_GUILD: ${{ secrets.DISCORD_TEST_GUILD }} - SENTRY_INGEST: ${{ secrets.SENTRY_INGEST }} - SENTRY_RELEASES: ${{ secrets.SENTRY_RELEASES }} - STRIPE_SECRET_KEY: ${{ secrets.STRIPE_SECRET_KEY }} - STRIPE_PUBLISHABLE_KEY: ${{ secrets.STRIPE_PUBLISHABLE_KEY }} - STRIPE_WEBHOOK_SECRET: ${{ secrets.STRIPE_WEBHOOK_SECRET }} - VITE_PUBLIC_POSTHOG_KEY: ${{ secrets.VITE_PUBLIC_POSTHOG_KEY }} - VITE_PUBLIC_POSTHOG_HOST: ${{ secrets.VITE_PUBLIC_POSTHOG_HOST }} - DATABASE_URL: ${{ secrets.DATABASE_URL }} - - - name: Generate test summary - run: | - echo "## 🎭 Playwright E2E Test Results" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - - if [ -f playwright-report/index.html ]; then - # Extract test counts from the HTML report - TOTAL=$(grep -o 'passed.*failed.*skipped' playwright-report/index.html | head -1 || echo "Results available in artifacts") - echo "**Status**: ${{ job.status }}" >> $GITHUB_STEP_SUMMARY - echo "**Test Suite**: Payment Flow E2E" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "📊 Test artifacts available below" >> $GITHUB_STEP_SUMMARY - else - echo "⚠️ Test report not generated" >> $GITHUB_STEP_SUMMARY - fi - - echo "" >> $GITHUB_STEP_SUMMARY - echo "### Artifacts" >> $GITHUB_STEP_SUMMARY - echo "- HTML Report (playwright-report/)" >> $GITHUB_STEP_SUMMARY - echo "- Screenshots (on failure)" >> $GITHUB_STEP_SUMMARY - echo "- Video recordings" >> $GITHUB_STEP_SUMMARY - - - name: Upload test artifacts - uses: actions/upload-artifact@v4 - with: - name: playwright-report-${{ github.run_id }} - path: | - playwright-report/ - test-results/ - retention-days: 30 - - - name: Deploy test report to GitHub Pages - uses: peaceiris/actions-gh-pages@v4 - with: - github_token: ${{ secrets.GITHUB_TOKEN }} - publish_dir: ./playwright-report - destination_dir: reports/${{ github.run_number }} - keep_files: true - - - name: Comment PR with test results - uses: actions/github-script@v7 - with: - script: | - const fs = require('fs'); - const runId = '${{ github.run_id }}'; - const runNumber = '${{ github.run_number }}'; - const reportUrl = `https://reactiflux.github.io/mod-bot/reports/${runNumber}`; - - // Find PR associated with this push - const branch = context.ref.replace('refs/heads/', ''); - const { data: prs } = await github.rest.pulls.list({ - owner: context.repo.owner, - repo: context.repo.repo, - head: `${context.repo.owner}:${branch}`, - state: 'open' - }); - - if (prs.length === 0) { - console.log(`No open PR found for branch ${branch}, skipping comment`); - return; - } - - // Parse test results - let testRows = []; - let stats = { passed: 0, failed: 0, flaky: 0, skipped: 0 }; - - try { - const results = JSON.parse(fs.readFileSync('test-results/results.json', 'utf8')); - - const processSpecs = (suites) => { - for (const suite of suites) { - for (const spec of suite.specs || []) { - for (const test of spec.tests || []) { - const status = test.status; - if (status === 'expected') stats.passed++; - else if (status === 'unexpected') stats.failed++; - else if (status === 'flaky') stats.flaky++; - else if (status === 'skipped') stats.skipped++; - - // Get duration from the last result - const lastResult = test.results?.[test.results.length - 1]; - const duration = lastResult?.duration || 0; - const durationStr = duration >= 1000 - ? `${(duration / 1000).toFixed(1)}s` - : `${duration}ms`; - - // Check for retries - const retryCount = test.results?.length - 1 || 0; - const retryInfo = retryCount > 0 ? ` (${retryCount} ${retryCount === 1 ? 'retry' : 'retries'})` : ''; - - let emoji; - if (status === 'expected') emoji = '✅'; - else if (status === 'flaky') emoji = '⚠️'; - else if (status === 'unexpected') emoji = '❌'; - else emoji = '⏭️'; - - const testLink = spec.id - ? `[${spec.title}](${reportUrl}#?testId=${spec.id})` - : spec.title; - - testRows.push(`| ${emoji} | ${testLink}${retryInfo} | ${durationStr} |`); - } - } - - // Recurse into child suites - if (suite.suites) { - processSpecs(suite.suites); - } - } - }; - - processSpecs(results.suites || []); - } catch (e) { - console.log('Could not parse test results:', e.message); - } - - const total = stats.passed + stats.failed + stats.flaky + stats.skipped; - const statusEmoji = stats.failed > 0 ? '❌' : '✅'; - const statusText = stats.failed > 0 ? 'Failed' : stats.flaky > 0 ? 'Flaky' : 'Passed'; - - // Build stats line - let statsParts = []; - if (stats.passed > 0) statsParts.push(`**${stats.passed}** passed`); - if (stats.flaky > 0) statsParts.push(`**${stats.flaky}** flaky`); - if (stats.failed > 0) statsParts.push(`**${stats.failed}** failed`); - if (stats.skipped > 0) statsParts.push(`**${stats.skipped}** skipped`); - - let comment = [ - `## ${statusEmoji} E2E Tests ${statusText}`, - '', - statsParts.join(' · '), - '', - `[View Report](${reportUrl}) · [View Run](${{ github.server_url }}/${{ github.repository }}/actions/runs/${runId})`, - '', - '| | Test | Duration |', - '|--------|------|----------|', - ...testRows - ].join('\n'); - - await github.rest.issues.createComment({ - issue_number: prs[0].number, - owner: context.repo.owner, - repo: context.repo.repo, - body: comment - }); diff --git a/.github/workflows/preview.yml b/.github/workflows/preview.yml new file mode 100644 index 0000000..16c4ef6 --- /dev/null +++ b/.github/workflows/preview.yml @@ -0,0 +1,65 @@ +name: Preview Cleanup + +on: + pull_request: + types: [closed, labeled] + +env: + HUSKY: 0 + +jobs: + cleanup-preview: + name: Cleanup Preview + if: | + github.event.action == 'closed' || + (github.event.action == 'labeled' && github.event.label.name == 'no-preview') + runs-on: ubuntu-latest + environment: CI + + steps: + - name: Set up kubectl + uses: matootie/dokube@v1.4.1 + with: + personalAccessToken: ${{ secrets.DIGITAL_OCEAN_K8S }} + clusterName: k8s-rf + + - name: Set default namespace + run: kubectl config set-context --current --namespace=staging + + - name: Delete preview resources + run: | + PR_NUMBER=${{ github.event.pull_request.number }} + echo "Cleaning up preview environment for PR #${PR_NUMBER}" + + kubectl delete statefulset mod-bot-pr-${PR_NUMBER} --ignore-not-found + kubectl delete service mod-bot-pr-${PR_NUMBER} --ignore-not-found + kubectl delete ingress mod-bot-pr-${PR_NUMBER} --ignore-not-found + kubectl delete pvc -l preview=pr-${PR_NUMBER} --ignore-not-found + + echo "Preview environment cleaned up" + + - name: Comment cleanup notice + if: github.event.action == 'closed' + uses: actions/github-script@v7 + with: + script: | + const prNumber = context.payload.pull_request.number; + + const comments = await github.rest.issues.listComments({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: prNumber + }); + + const botComment = comments.data.find(c => + c.user.type === 'Bot' && c.body.includes('Preview deployed') + ); + + if (botComment) { + await github.rest.issues.updateComment({ + owner: context.repo.owner, + repo: context.repo.repo, + comment_id: botComment.id, + body: '### Preview environment removed\n\nThe preview for this PR has been cleaned up.' + }); + } diff --git a/.nvmrc b/.nvmrc index 19f23bc..54c6511 100644 --- a/.nvmrc +++ b/.nvmrc @@ -1 +1 @@ -v24.4 +v24 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 2ee5d65..0b64722 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -32,18 +32,32 @@ 1. Look for the following message in the logs, and open the URL in a browser where you're logged into Discord. - `Bot started. If necessary, add it to your test server:` +## PR Preview Environments + +When you open a pull request, a preview environment is automatically deployed at `https://.euno-staging.reactiflux.com`. The bot will comment on your PR with the preview URL. + +**What happens on each push:** + +1. Docker image is built and pushed +2. Preview is deployed to the staging namespace +3. Database is reset (starts fresh each deploy) +4. E2E tests run against the preview URL +5. Test results are posted as a PR comment + +**To skip preview deployment:** Add the `no-preview` label to your PR, or mark it as a draft. + # Implementation notes There are subtle issues when making some chaings. These are notes for steps to take to make sure it's done correctly when needed. ## Environment variables -Adding a new environment variable needs to be done in several places to work corectly and be predictable for new developers: +Adding a new environment variable needs to be done in several places to work correctly and be predictable for new developers: - Add a suitable example to `.env.example` - Add to your own `.env` (and restart the dev server) -- Add to the action in `.github/workflows/node.js.yml` -- Add to the Kubernetes config under `cluster/deployment.yml +- Add to `.github/workflows/ci.yml` (for E2E tests) +- Add to `.github/workflows/cd.yml` (in the secret manifest step) # Useful DevOps commands @@ -53,9 +67,18 @@ This bot runs on a managed Kubernetes cluster on DigitalOcean. It's possible (th # Tail the logs of the production instance kubectl logs -f mod-bot-set-0 -# Force a restart without merging a PR (as of 2025-11 only 1 replica is in use) -kubectl scale statefulset mod-bot-set --replicas 0 -kubectl scale statefulset mod-bot-set --replicas 1 +# Check pod health and readiness +kubectl get pods -l app=mod-bot +kubectl describe pod mod-bot-set-0 + +# Check rollout status (CD does this automatically) +kubectl rollout status statefulset/mod-bot-set + +# Rollback to previous version +kubectl rollout undo statefulset/mod-bot-set + +# Force a restart without merging a PR (single replica in use) +kubectl rollout restart statefulset/mod-bot-set # Copy out the production database (for backups!) kubectl cp mod-bot-set-0:data/mod-bot.sqlite3 ./mod-bot-prod.sqlite3 @@ -66,4 +89,7 @@ kubectl exec mod-bot-set-0 -- npm run start:migrate # Extract production secrets (in base64) kubectl get secret modbot-env -o json + +# Check resource usage (requires metrics-server) +kubectl top pod mod-bot-set-0 ``` diff --git a/Dockerfile b/Dockerfile index b9d10be..347643f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM node:24-alpine as build +FROM node:24-alpine AS build WORKDIR /app COPY package.json package-lock.json ./ @@ -22,6 +22,7 @@ RUN npm prune --production COPY --from=build /app/build ./build ADD index.prod.js ./ +COPY scripts ./scripts COPY kysely.config.ts ./ COPY migrations ./migrations diff --git a/app/commands/escalate/handlers.ts b/app/commands/escalate/handlers.ts index d626c8b..921af2c 100644 --- a/app/commands/escalate/handlers.ts +++ b/app/commands/escalate/handlers.ts @@ -61,7 +61,7 @@ export const EscalationHandlers = { ); } catch (error) { log("error", "EscalationHandlers", "Error deleting reported messages", { - error: error instanceof Error ? error.message : String(error), + error, }); await interaction.editReply({ content: "Failed to delete messages", @@ -95,9 +95,7 @@ export const EscalationHandlers = { ), ]); } catch (error) { - log("error", "EscalationHandlers", "Error kicking user", { - error: error instanceof Error ? error.message : String(error), - }); + log("error", "EscalationHandlers", "Error kicking user", { error }); await interaction.reply({ content: "Failed to kick user", flags: [MessageFlags.Ephemeral], @@ -131,9 +129,7 @@ export const EscalationHandlers = { ), ]); } catch (error) { - log("error", "EscalationHandlers", "Error banning user", { - error: error instanceof Error ? error.message : String(error), - }); + log("error", "EscalationHandlers", "Error banning user", { error }); await interaction.reply({ content: "Failed to ban user", flags: [MessageFlags.Ephemeral], @@ -167,9 +163,7 @@ export const EscalationHandlers = { ), ]); } catch (error) { - log("error", "EscalationHandlers", "Error restricting user", { - error: error instanceof Error ? error.message : String(error), - }); + log("error", "EscalationHandlers", "Error restricting user", { error }); await interaction.reply({ content: "Failed to restrict user", flags: [MessageFlags.Ephemeral], @@ -203,9 +197,7 @@ export const EscalationHandlers = { ), ]); } catch (error) { - log("error", "EscalationHandlers", "Error timing out user", { - error: error instanceof Error ? error.message : String(error), - }); + log("error", "EscalationHandlers", "Error timing out user", { error }); await interaction.reply({ content: "Failed to timeout user", flags: [MessageFlags.Ephemeral], @@ -276,7 +268,7 @@ ${buildVotesListContent(tally)}`, components: [], // Remove buttons }); } catch (error) { - log("error", "Expedite failed", JSON.stringify({ error })); + log("error", "EscalationHandlers", "Expedite failed", { error }); await interaction.editReply( "Something went wrong while executing the resolution", ); @@ -481,7 +473,7 @@ ${buildVotesListContent(tally)}`, await interaction.editReply("Escalation started"); } catch (error) { log("error", "EscalationHandlers", "Error creating escalation vote", { - error: error instanceof Error ? error.message : String(error), + error, }); await interaction.editReply({ content: "Failed to create escalation vote", diff --git a/app/discord/client.server.ts b/app/discord/client.server.ts index 9c05eba..5f19ceb 100644 --- a/app/discord/client.server.ts +++ b/app/discord/client.server.ts @@ -42,9 +42,7 @@ export const login = () => { guildNames: guildNames.join(", "), }); } catch (error) { - log("error", "Client", "Failed to fetch guilds", { - error: error instanceof Error ? error.message : String(error), - }); + log("error", "Client", "Failed to fetch guilds", { error }); } if (client.application) { diff --git a/app/discord/escalationResolver.ts b/app/discord/escalationResolver.ts index 7b16ad9..da819f6 100644 --- a/app/discord/escalationResolver.ts +++ b/app/discord/escalationResolver.ts @@ -85,7 +85,7 @@ export async function executeResolution( } catch (error) { log("error", "EscalationControls", "Failed to execute resolution", { ...logBag, - error: error instanceof Error ? error.message : String(error), + error, }); throw error; } @@ -139,7 +139,7 @@ async function executeScheduledResolution( } catch (error) { log("warn", "EscalationResolver", "Could not update vote message", { ...logBag, - error: error instanceof Error ? error.message : String(error), + error, }); } @@ -152,7 +152,7 @@ async function executeScheduledResolution( } catch (error) { log("error", "EscalationResolver", "Failed to auto-resolve escalation", { ...logBag, - error: error instanceof Error ? error.message : String(error), + error, }); } } @@ -215,7 +215,7 @@ async function checkPendingEscalations(client: Client): Promise { } catch (error) { log("error", "EscalationResolver", "Error processing escalation", { escalationId: escalation.id, - error: error instanceof Error ? error.message : String(error), + error, }); } } diff --git a/app/discord/gateway.ts b/app/discord/gateway.ts index 4932fde..1cb1eb8 100644 --- a/app/discord/gateway.ts +++ b/app/discord/gateway.ts @@ -88,7 +88,7 @@ export default function init() { log("error", "Gateway", "Failed to join thread", { threadId: thread.id, guildId: thread.guild.id, - error: error instanceof Error ? error.message : String(error), + error, }); }); }); diff --git a/app/discord/reactjiChanneler.ts b/app/discord/reactjiChanneler.ts index 5305e6d..08b5af1 100644 --- a/app/discord/reactjiChanneler.ts +++ b/app/discord/reactjiChanneler.ts @@ -108,7 +108,7 @@ export async function startReactjiChanneler(client: Client) { }); } catch (error) { log("error", "ReactjiChanneler", "Error handling reaction", { - error: error instanceof Error ? error.message : String(error), + error, messageId: partialReaction.message.id, }); } diff --git a/app/models/reportedMessages.server.ts b/app/models/reportedMessages.server.ts index cbbff2a..f9de381 100644 --- a/app/models/reportedMessages.server.ts +++ b/app/models/reportedMessages.server.ts @@ -229,7 +229,7 @@ async function deleteSingleMessage( log("warn", "ReportedMessage", "Failed to delete message", { messageId, - error: error instanceof Error ? error.message : String(error), + error, }); return { success: false, messageId, error }; } diff --git a/app/models/session.server.ts b/app/models/session.server.ts index a8bdc58..15f67a7 100644 --- a/app/models/session.server.ts +++ b/app/models/session.server.ts @@ -168,7 +168,11 @@ export async function initOauthLogin({ flow?: "user" | "signup" | "add-bot"; guildId?: string; }) { - const { origin } = new URL(request.url); + const url = new URL(request.url); + const proto = + request.headers.get("X-Forwarded-Proto") ?? url.protocol.replace(":", ""); + const host = request.headers.get("X-Forwarded-Host") ?? url.host; + const origin = `${proto}://${host}`; const cookieSession = await getCookieSession(request.headers.get("Cookie")); const state = JSON.stringify({ @@ -223,7 +227,10 @@ export async function completeOauthLogin(request: Request) { throw redirect("/login", 500); } - const origin: string = url.origin; + const proto = + request.headers.get("X-Forwarded-Proto") ?? url.protocol.replace(":", ""); + const host = request.headers.get("X-Forwarded-Host") ?? url.host; + const origin = `${proto}://${host}`; const reqCookie: string = cookie; const state: string | undefined = url.searchParams.get("state") ?? undefined; diff --git a/app/models/stripe.server.ts b/app/models/stripe.server.ts index dba767e..a1db74b 100644 --- a/app/models/stripe.server.ts +++ b/app/models/stripe.server.ts @@ -58,7 +58,10 @@ export const StripeService = { client_reference_id: guildId, customer_email: customerEmail, metadata: { guild_id: guildId }, - subscription_data: { metadata: { guild_id: guildId } }, + subscription_data: { + metadata: { guild_id: guildId }, + trial_period_days: 90, + }, }); log("info", "Stripe", "Checkout session created successfully", { @@ -70,7 +73,7 @@ export const StripeService = { } catch (error) { log("error", "Stripe", "Failed to create checkout session", { guildId, - error: error instanceof Error ? error.message : String(error), + error, }); Sentry.captureException(error); throw error; @@ -115,7 +118,7 @@ export const StripeService = { } catch (error) { log("error", "Stripe", "Failed to verify checkout session", { sessionId, - error: error instanceof Error ? error.message : String(error), + error, }); Sentry.captureException(error); return null; @@ -141,10 +144,7 @@ export const StripeService = { try { const customer = await stripe.customers.create({ email, - metadata: { - guild_id: guildId, - guild_name: guildName ?? "", - }, + metadata: { guild_id: guildId, guild_name: guildName ?? "" }, }); log("info", "Stripe", "Customer created successfully", { @@ -156,7 +156,7 @@ export const StripeService = { } catch (error) { log("error", "Stripe", "Failed to create customer", { guildId, - error: error instanceof Error ? error.message : String(error), + error, }); Sentry.captureException(error); throw error; @@ -196,7 +196,7 @@ export const StripeService = { } catch (error) { log("error", "Stripe", "Failed to search for customer", { guildId, - error: error instanceof Error ? error.message : String(error), + error, }); Sentry.captureException(error); return null; @@ -226,7 +226,7 @@ export const StripeService = { } catch (error) { log("error", "Stripe", "Failed to cancel subscription", { subscriptionId, - error: error instanceof Error ? error.message : String(error), + error, }); Sentry.captureException(error); return false; diff --git a/app/routes/healthcheck.tsx b/app/routes/healthcheck.tsx index 46cb088..5fd8d9b 100644 --- a/app/routes/healthcheck.tsx +++ b/app/routes/healthcheck.tsx @@ -12,8 +12,13 @@ export async function loader({ request }: Route.LoaderArgs) { // if we can connect to the database and make a simple query // and make a HEAD request to ourselves, then we're good. await Promise.all([ - // @ts-expect-error because kysely doesn't generate types for these - db.selectFrom("sqlite_master").where("type", "=", "table").execute(), + db + // @ts-expect-error because kysely doesn't generate types for sqlite_master + .selectFrom("sqlite_master") + .select("name") + // @ts-expect-error because kysely doesn't generate types for sqlite_master + .where("type", "=", "table") + .execute(), fetch(url.toString(), { method: "HEAD" }).then((r) => { if (!r.ok) { return Promise.reject( diff --git a/app/routes/webhooks.stripe.tsx b/app/routes/webhooks.stripe.tsx index 75b182e..0764100 100644 --- a/app/routes/webhooks.stripe.tsx +++ b/app/routes/webhooks.stripe.tsx @@ -80,9 +80,7 @@ export async function action({ request }: Route.ActionArgs) { headers: { "Content-Type": "application/json" }, }); } catch (error) { - log("error", "Webhook", "Failed to process webhook", { - error: error instanceof Error ? error.message : String(error), - }); + log("error", "Webhook", "Failed to process webhook", { error }); return new Response( JSON.stringify({ error: "Webhook processing failed" }), { diff --git a/cluster/cluster-issuer.yaml b/cluster/cluster-issuer.yaml index 7d7bc8c..a8e2d26 100644 --- a/cluster/cluster-issuer.yaml +++ b/cluster/cluster-issuer.yaml @@ -1,6 +1,6 @@ -# I believe this only needed to be run once on the cluster as a whole, to make -# a LetsEncrypt service available. That was done with: -# kubectl apply -f cluster/cluster-issuer.yaml +# Apply with: kubectl apply -f cluster/cluster-issuer.yaml +# This configures cert-manager to issue TLS certificates via Let's Encrypt. +# HTTP-01 is used for regular certs, DNS-01 for wildcard certs. apiVersion: cert-manager.io/v1 kind: ClusterIssuer metadata: @@ -12,6 +12,16 @@ spec: privateKeySecretRef: name: letsencrypt-prod-key solvers: + # Default: HTTP-01 for regular certificates - http01: ingress: class: nginx + # DNS-01 for wildcard certificates (staging previews) + - dns01: + digitalocean: + tokenSecretRef: + name: digitalocean-dns-token + key: access-token + selector: + dnsNames: + - "*.euno-staging.reactiflux.com" diff --git a/cluster/deployment.yaml b/cluster/deployment.yaml index 8474fde..37db2b9 100644 --- a/cluster/deployment.yaml +++ b/cluster/deployment.yaml @@ -18,96 +18,47 @@ spec: containers: - name: mod-bot image: $(IMAGE) + ports: + - containerPort: 3000 volumeMounts: - mountPath: "/data" name: mod-bot-pvc + resources: + requests: + memory: "256Mi" + cpu: "100m" + limits: + memory: "512Mi" + cpu: "500m" + startupProbe: + httpGet: + path: /healthcheck + port: 3000 + failureThreshold: 30 + periodSeconds: 2 + livenessProbe: + httpGet: + path: /healthcheck + port: 3000 + initialDelaySeconds: 0 + periodSeconds: 30 + timeoutSeconds: 5 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /healthcheck + port: 3000 + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 2 + envFrom: + - secretRef: + name: modbot-env env: - name: ENVIRONMENT value: production - - name: DISCORD_HASH - valueFrom: - secretKeyRef: - name: modbot-env - key: DISCORD_HASH - - - name: SESSION_SECRET - valueFrom: - secretKeyRef: - name: modbot-env - key: SESSION_SECRET - - - name: DISCORD_PUBLIC_KEY - valueFrom: - secretKeyRef: - name: modbot-env - key: DISCORD_PUBLIC_KEY - - - name: DISCORD_APP_ID - valueFrom: - secretKeyRef: - name: modbot-env - key: DISCORD_APP_ID - - - name: DISCORD_SECRET - valueFrom: - secretKeyRef: - name: modbot-env - key: DISCORD_SECRET - - name: STRIPE_SECRET_KEY - valueFrom: - secretKeyRef: - name: modbot-env - key: STRIPE_SECRET_KEY - - - name: STRIPE_PUBLISHABLE_KEY - valueFrom: - secretKeyRef: - name: modbot-env - key: STRIPE_PUBLISHABLE_KEY - - - name: STRIPE_WEBHOOK_SECRET - valueFrom: - secretKeyRef: - name: modbot-env - key: STRIPE_WEBHOOK_SECRET - - - name: DISCORD_TEST_GUILD - valueFrom: - secretKeyRef: - name: modbot-env - key: DISCORD_TEST_GUILD - - - name: SENTRY_INGEST - valueFrom: - secretKeyRef: - name: modbot-env - key: SENTRY_INGEST - - - name: SENTRY_RELEASES - valueFrom: - secretKeyRef: - name: modbot-env - key: SENTRY_RELEASES - - - name: VITE_PUBLIC_POSTHOG_KEY - valueFrom: - secretKeyRef: - name: modbot-env - key: VITE_PUBLIC_POSTHOG_KEY - - - name: VITE_PUBLIC_POSTHOG_HOST - valueFrom: - secretKeyRef: - name: modbot-env - key: VITE_PUBLIC_POSTHOG_HOST - - - name: DATABASE_URL - valueFrom: - secretKeyRef: - name: modbot-env - key: DATABASE_URL - volumeClaimTemplates: - metadata: name: mod-bot-pvc diff --git a/cluster/pdb.yaml b/cluster/pdb.yaml new file mode 100644 index 0000000..4fa4743 --- /dev/null +++ b/cluster/pdb.yaml @@ -0,0 +1,9 @@ +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: mod-bot-pdb +spec: + maxUnavailable: 0 + selector: + matchLabels: + app: mod-bot diff --git a/cluster/preview/deployment.yaml b/cluster/preview/deployment.yaml new file mode 100644 index 0000000..bada152 --- /dev/null +++ b/cluster/preview/deployment.yaml @@ -0,0 +1,121 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: mod-bot-pr-${PR_NUMBER} + namespace: staging + labels: + app: mod-bot-preview + preview: pr-${PR_NUMBER} +spec: + serviceName: mod-bot-pr-${PR_NUMBER} + replicas: 1 + selector: + matchLabels: + preview: pr-${PR_NUMBER} + template: + metadata: + labels: + app: mod-bot-preview + preview: pr-${PR_NUMBER} + spec: + containers: + - name: mod-bot + image: ghcr.io/reactiflux/mod-bot:sha-${COMMIT_SHA} + imagePullPolicy: Always + command: ["npm", "run", "start:staging"] + ports: + - containerPort: 3000 + volumeMounts: + - mountPath: "/data" + name: mod-bot-data + envFrom: + - secretRef: + name: modbot-staging-env + env: + - name: ENVIRONMENT + value: staging + resources: + requests: + memory: "128Mi" + cpu: "50m" + limits: + memory: "256Mi" + cpu: "500m" + startupProbe: + httpGet: + path: /healthcheck + port: 3000 + failureThreshold: 60 + periodSeconds: 5 + timeoutSeconds: 5 + livenessProbe: + httpGet: + path: /healthcheck + port: 3000 + periodSeconds: 30 + timeoutSeconds: 5 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /healthcheck + port: 3000 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 2 + volumeClaimTemplates: + - metadata: + name: mod-bot-data + labels: + preview: pr-${PR_NUMBER} + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 256Mi + storageClassName: do-block-storage +--- +apiVersion: v1 +kind: Service +metadata: + name: mod-bot-pr-${PR_NUMBER} + namespace: staging + labels: + app: mod-bot-preview + preview: pr-${PR_NUMBER} +spec: + selector: + preview: pr-${PR_NUMBER} + ports: + - port: 80 + targetPort: 3000 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: mod-bot-pr-${PR_NUMBER} + namespace: staging + labels: + app: mod-bot-preview + preview: pr-${PR_NUMBER} + annotations: + nginx.ingress.kubernetes.io/ssl-redirect: "true" + nginx.ingress.kubernetes.io/force-ssl-redirect: "true" + cert-manager.io/cluster-issuer: letsencrypt-prod +spec: + ingressClassName: nginx + rules: + - host: ${PR_NUMBER}.euno-staging.reactiflux.com + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: mod-bot-pr-${PR_NUMBER} + port: + number: 80 + tls: + - hosts: + - ${PR_NUMBER}.euno-staging.reactiflux.com + secretName: mod-bot-pr-${PR_NUMBER}-tls diff --git a/kustomization.yaml b/kustomization.yaml index 4ef25b5..b26bba6 100644 --- a/kustomization.yaml +++ b/kustomization.yaml @@ -7,6 +7,7 @@ resources: - cluster/service.yaml - cluster/deployment.yaml - cluster/ingress.yaml + - cluster/pdb.yaml configMapGenerator: - name: k8s-context # this is an internal name diff --git a/notes/2025-12-14_1_ci-cd-architecture-review.md b/notes/2025-12-14_1_ci-cd-architecture-review.md new file mode 100644 index 0000000..dd43e3c --- /dev/null +++ b/notes/2025-12-14_1_ci-cd-architecture-review.md @@ -0,0 +1,292 @@ +# CI/CD Strategic Architecture Review + +## A new release management scheme + +The proposal treats main as a staging/RC environment with a dedicated Discord bot, and introduces a release branch for production. This creates +a natural promotion path where code is validated on staging before reaching production, avoiding the SHA mismatch problem entirely. + +## Comparison of release management schemes + +| Scheme | Description | Pros | Cons | Best For | +| ----------------------- | ---------------------------------------------------- | ---------------------------------------------------------------- | ------------------------------------------------------------- | ----------------------------------------------------------- | +| Trunk-Based Development | All commits to main, continuous deployment | Fast iteration, simple branching, encourages small changes | Requires feature flags, high test confidence, no staging gate | High-velocity teams with mature testing | +| GitHub Flow | Feature branches → main, deploy from main | Simple, low overhead, clear PR workflow | No staging environment, main = production risk | Small teams, low-risk applications | +| GitLab Flow | Feature → main → environment branches | Clear promotion path, environment parity | Multiple long-lived branches, merge complexity | Teams needing explicit environment gates | +| Git Flow | develop → release → main, hotfix branches | Formal release process, version control | Complex branching, slow releases, merge hell | Packaged software, versioned releases | +| Release Trains | Scheduled releases from main at intervals | Predictable cadence, batched testing | Delays features, doesn't fit continuous delivery | Enterprise, compliance-heavy environments | +| Environment Branches | Separate branches per environment (dev/staging/prod) | Clear mapping, easy rollback | Drift between branches, complex merges | Legacy systems, regulated industries | +| Proposed | main (staging) → release (prod), | Promotion via merge, environment isolation, validates in staging | Two long-lived branches, requires staging infra | Multi-tenant SaaS, Discord bots, apps needing RC validation | + +Recommendation: K8s-Based Git Flow model + +``` +PR → main (staging) → release (production) + ↓ ↓ + staging k8s production k8s + (RC Discord bot) (prod Discord bot) +``` + +1. Discord bots benefit from RC validation (real users, isolated bot) +2. Squash-and-merge is preserved (no workflow changes for developers) +3. Multi-tenant k8s leverages existing infrastructure +4. Promotion is explicit (merge to release) rather than implicit (push to main) + +--- + +## Current Reality vs. Proposed Model + +### What Exists Today + +| Component | Current State | +| ------------- | ------------------------------------------------- | +| Branching | Feature branches → main (squash merge) | +| CI | Runs on PR branches only (branches-ignore: main) | +| CD | Runs on all pushes; deploys on main | +| Gating | Branch protection requires CI status before merge | +| Environments | PR previews (ephemeral), production (main) | +| Staging | None (PR previews serve this role, imperfectly) | +| Artifact flow | New image built on main (untested SHA) | + +### What the Proposed Model Changes + +| Component | Proposed State | +| ------------- | ---------------------------------------------------------------------- | +| Branching | Feature → main (staging) → release (production) | +| CI | Runs on PR branches (unchanged) | +| CD | Deploy main → staging namespace; deploy release → production namespace | +| Gating | Branch protection on both main AND release | +| Environments | PR previews, staging (main), production (release) | +| Staging | Persistent, with dedicated Discord bot, real-ish data | +| Artifact flow | Image built on main and validated in staging, promoted to release | + +--- + +## Implementation Plan + +### Phase 1: Create Release Branch Infrastructure + +Goal: Establish the release branch and production deployment path. + +Steps: + +1. Create release branch from current main +2. Update cd.yml to deploy based on branch +3. Create staging namespace and resources + +- New cluster/staging/ directory with persistent staging manifests +- Staging uses different Discord bot credentials (new Discord app) +- Staging ingress: staging.euno.reactiflux.com + +4. Configure branch protection on release + +- Require PR reviews before merge +- Optionally require specific approvers for production releases + +Files to modify: + +- .github/workflows/cd.yml +- cluster/staging/deployment.yaml (new) +- cluster/staging/kustomization.yaml (new) + +--- + +### Phase 2: Staging Environment Data Strategy + +Goal: Staging should have realistic data without copying production. + +The seed data problem: + +- Hand-written seeds don't scale +- Production copies have privacy/security risks +- Empty databases don't catch real-world bugs + +Recommended approach: Synthetic Data Generation + +Why this over production snapshots: + +1. No PII concerns +2. Reproducible (seeded random) +3. Can generate edge cases intentionally +4. Scales independently of production size + +Migration path: + +1. Start with expanded seed-e2e.ts (more guilds, varied configs) +2. Evolve to faker-based generation as coverage needs grow +3. Add distribution configs for realistic scenarios + +Files to create/modify: + +- scripts/generate-staging-data.ts (new) +- scripts/seed-e2e.ts (expand for now) + +--- + +### Phase 3: Staging Discord Bot + +Goal: Staging environment has its own Discord bot for RC validation. + +Steps: + +1. Create new Discord application (Discord Developer Portal) + +- Rename current app "Euno Staging" or "Euno RC" +- Invite to a test guild (could be public "beta testers" guild) + +2. Create staging secrets in GitHub +3. Update staging deployment to use staging secrets +4. Configure staging Stripe (test mode keys, separate from production) + +Benefit: Real users can opt-in to test RC releases before they hit production. Dogfooding path for the team. + +--- + +### Phase 4: CI Workflow Adjustments + +Goal: CI continues to gate merges; add staging validation. + +Current CI runs on: All branches except main (branches-ignore: main) + +Proposed changes: + +1. Keep CI as-is for PR branches (lint, typecheck, vitest, e2e) +2. Add staging E2E job that runs after staging deploy + +- Trigger: Push to main (after staging deploys) +- Runs E2E against staging.euno.reactiflux.com +- Reports results but doesn't block (staging is already deployed) +- Alerts if staging E2E fails + +3. Optional: Add release validation job + +- Trigger: PR from main to release +- Runs smoke tests against staging +- Gates the release PR merge + +Files to modify: + +- .github/workflows/ci.yml (add staging E2E trigger) +- .github/workflows/cd.yml (add staging E2E after deploy) + +--- + +### Phase 5: Release Process Documentation + +Goal: Clear process for promoting staging to production. + +Release workflow (normal): + +1. Code lands in main via squash-merge PR +2. Staging auto-deploys and runs E2E +3. Validation period (hours/days depending on change risk) +4. Create release PR from main → release + +- PR description summarizes changes since last release +- Automated checks verify staging health + +5. Merge with merge commit (preserves release history) +6. Production auto-deploys from release branch +7. Monitor production health + +Merge strategy: Use merge commits for main → release. This: + +- Preserves clear release history on the release branch +- Makes it easy to see what was included in each release +- Allows release branch to have its own commits (hotfixes) + +Hotfix workflow (critical issues): + +1. Create hotfix branch from release (not main) +2. Fix the issue, PR to release +3. Merge and deploy (production gets fix immediately) +4. Cherry-pick to main so staging also gets the fix + +When to use hotfix path: + +- Production is broken (P0/P1 incidents) +- Security vulnerabilities +- Data corruption risks + +When NOT to use hotfix path: + +- Features that "should have been in the release" +- Non-critical bugs (wait for next release) +- Anything that needs validation time + +Automation opportunities: + +- Automated release PR creation (weekly, or on-demand) +- Changelog generation from commit messages +- Slack notification on release merge +- Reminder to cherry-pick hotfixes back to main + +--- + +## Seed Data Strategy Deep Dive + +Options Evaluated + +| Strategy | Scalability | Realism | Privacy | Maintenance | Complexity | +| -------------------- | ----------- | ----------- | ------- | ----------- | ---------- | +| Hand-written seeds | Poor | Low | Safe | High | Low | +| Production snapshots | Good | High | Risky | Low | Medium | +| Anonymized snapshots | Good | High | Safe | Medium | High | +| Synthetic generation | Good | Medium-High | Safe | Medium | Medium | +| Fixture factories | Good | Medium | Safe | Medium | Low | + +Recommended: Hybrid Approach + +1. Immediate: Expand seed-e2e.ts with more scenarios + +- Real guilds only. Can fabricate other details + +2. Short-term: Add faker-based generation + +- Configurable distributions +- Deterministic seeds for reproducibility +- Run on staging deployment, not preview + +--- + +## Tactical Cleanup Checklist + +Address these regardless of strategic direction: + +- Add data seed step to database migration commands +- preview.yml:44-50 - Remove debug token logging +- cd.yml:44 - Remove hardcoded feature/actions branch +- ci.yml:94 - Fix empty NODE_ENV: assignment +- payment-flow.spec.ts:159 - Replace waitForTimeout(2000) with proper wait +- Standardize secret naming (DIGITALOCEAN_TOKEN vs DIGITAL_OCEAN_K8S) + +--- + +## Key Files Reference + +| Purpose | File | +| ----------------- | ---------------------------------------------------- | +| CI workflow | .github/workflows/ci.yml | +| CD workflow | .github/workflows/cd.yml | +| Preview workflow | .github/workflows/preview.yml | +| E2E tests | tests/e2e/payment-flow.spec.ts | +| E2E fixtures | tests/e2e/fixtures/auth.ts, tests/e2e/fixtures/db.ts | +| E2E seed script | scripts/seed-e2e.ts | +| Playwright config | playwright.config.ts | +| Production K8s | cluster/deployment.yaml, cluster/kustomization.yaml | +| Preview K8s | cluster/preview/deployment.yaml | +| Staging K8s (new) | cluster/staging/ (to be created) | + +--- + +## Summary: What Changes + +| Change | Effort | Impact | +| --------------------------------- | ------ | -------------------------------- | +| Create release branch | Low | Enables promotion model | +| Update CD for branch-based deploy | Medium | Staging + production separation | +| Create staging k8s manifests | Medium | Persistent staging environment | +| Create staging Discord app | Low | RC validation with real users | +| Expand seed data | Low | Better test coverage | +| Add synthetic data generation | Medium | Scalable, realistic staging data | +| Unify test constants | Low | Reduce duplication bugs | +| Tactical cleanup | Low | Reduce footguns | diff --git a/notes/2025-12-15_2_infrastructure-state.md b/notes/2025-12-15_2_infrastructure-state.md new file mode 100644 index 0000000..6db8afe --- /dev/null +++ b/notes/2025-12-15_2_infrastructure-state.md @@ -0,0 +1,108 @@ +# Infrastructure State + +Consolidated view of CI/CD, preview environments, and fixture systems. + +## Current Architecture + +``` +PR → main (production) + ↓ + - CI runs lint, typecheck, vitest on PR branches + - CD builds image on all pushes, deploys production on main + - Preview deploys on non-main branches with open PRs +``` + +**Preview Environments**: Per-PR at `https://.euno-staging.reactiflux.com` + +## Workflows + +| Workflow | Trigger | Purpose | +| ----------- | ------------------------------ | -------------------------------------------------------------- | +| ci.yml | push (non-main), workflow_call | lint, typecheck, vitest; e2e when called with preview_url | +| cd.yml | push (all) | Build image; deploy production (main) or preview (PR branches) | +| preview.yml | PR closed/labeled | Cleanup preview resources only | + +Preview deploy and E2E orchestration consolidated into cd.yml. + +## Fixture Generation System + +``` +scripts/fixtures/ +├── constants.ts # Single source of truth for test IDs +├── integrity-checks.ts # Data consistency validation +├── seed-fixtures.ts # Known Discord fixture data +├── generate-historical.ts # Historical record generation (seeded random) +├── db.ts # Database operations +├── index.ts # Re-exports +└── run.ts # Orchestrator +``` + +- `npm run seed:fixtures` - runs fixture setup +- `npm run start:staging` - migrations + fixtures + bot +- Uses `node --experimental-strip-types` (not tsx) +- Uses `onConflict(doNothing)` for idempotency + +## Known Issues + +### Stripe Webhooks Don't Work in Previews + +Each preview has unique URL but Stripe only has one registered webhook endpoint. Payment flow E2E tests in preview environments won't receive webhooks. + +**Status**: Architectural limitation. Options: + +- Mock Stripe in previews +- Document payment testing as local-only +- Shared staging webhook with routing + +### waitForTimeout Anti-Pattern + +`tests/e2e/payment-flow.spec.ts:159` uses hardcoded `waitForTimeout(2000)`. + +**Fix**: Replace with condition-based wait. + +### GH Pages Permissions May Be Missing + +`ci.yml:118-125` uses peaceiris/actions-gh-pages but doesn't declare `contents: write` permission. + +**Impact**: Report deploy may fail with permission errors. + +## Preview Environment Setup (Manual Steps) + +Required before workflow functions: + +1. **DNS**: Wildcard A record `*.euno-staging.reactiflux.com → ` +2. **Namespace**: `kubectl create namespace staging` +3. **Discord App**: Create staging app with OAuth redirect +4. **Secret**: modbot-staging-env created automatically by cd.yml on first preview deploy + +TLS handled via cert-manager HTTP-01 per-preview. + +## Proposed Future Architecture (Not Implemented) + +Main as staging with promotion to release branch: + +``` +PR → main (staging) → release (production) +``` + +Benefits: RC validation with real users, explicit promotion, staging Discord bot. + +See `2025-12-14_1_ci-cd-architecture-review.md` for full proposal. + +## Key Files + +| Purpose | File | +| ------------------- | ------------------------------- | +| CI | .github/workflows/ci.yml | +| CD + Preview deploy | .github/workflows/cd.yml | +| Preview cleanup | .github/workflows/preview.yml | +| Production K8s | cluster/deployment.yaml | +| Preview K8s | cluster/preview/deployment.yaml | +| Fixtures | scripts/fixtures/ | +| E2E tests | tests/e2e/payment-flow.spec.ts | + +## Open Questions + +1. Is payment flow testing in preview environments required, or local-only? +2. Should preview environments skip Discord integration entirely? +3. What's the intended behavior if E2E tests fail - should it block the PR? diff --git a/package.json b/package.json index b290540..f4adefe 100644 --- a/package.json +++ b/package.json @@ -23,6 +23,9 @@ "": "", "start:migrate": "kysely --no-outdated-check migrate:list; kysely --no-outdated-check migrate:latest", "start:bot": "node ./index.prod.js", + "start:staging": "npm run start:migrate; npm run seed:fixtures; npm run start:bot", + "seed:e2e": "node --experimental-strip-types scripts/seed-e2e.ts", + "seed:fixtures": "node --experimental-strip-types scripts/fixtures/run.ts", "prepare": "husky || true", "typecheck": "react-router typegen && tsc -b", "build:css": "npm run generate:css -- --minify", @@ -102,7 +105,6 @@ "prettier-plugin-tailwindcss": "^0.6.9", "tailwindcss": "^3.0.23", "tsconfig-paths": "^3.14.1", - "tsx": "^4.19.2", "typescript": "5.6.3", "typescript-eslint": "^8.18.2", "vite": "^5.4.11", diff --git a/playwright.config.ts b/playwright.config.ts index 81db39b..26efbf5 100644 --- a/playwright.config.ts +++ b/playwright.config.ts @@ -1,5 +1,9 @@ import { defineConfig, devices } from "@playwright/test"; +// Check if running against a remote preview +const isRemote = !!process.env.E2E_PREVIEW_URL; +const baseURL = process.env.E2E_PREVIEW_URL ?? "http://localhost:3000"; + /** * See https://playwright.dev/docs/test-configuration. */ @@ -17,7 +21,7 @@ export default defineConfig({ ["json", { outputFile: "test-results/results.json" }], ], use: { - baseURL: "http://localhost:3000", + baseURL, screenshot: "only-on-failure", video: "on", }, @@ -29,10 +33,13 @@ export default defineConfig({ }, ], - webServer: { - command: "npm run build; npm start", - url: "http://localhost:3000", - reuseExistingServer: !process.env.CI, - timeout: 120000, - }, + // Only start local server if not running against remote preview + webServer: isRemote + ? undefined + : { + command: "npm run build; npm start", + url: "http://localhost:3000", + reuseExistingServer: !process.env.CI, + timeout: 120000, + }, }); diff --git a/scripts/dump-db-to-json.ts b/scripts/dump-db-to-json.ts new file mode 100644 index 0000000..499f15a --- /dev/null +++ b/scripts/dump-db-to-json.ts @@ -0,0 +1,160 @@ +/** + * Dumps the entire SQLite database to a JSON file. + * Usage: npx tsx scripts/dump-db-to-json.ts [output-file] [--sample] + * + * Options: + * --sample Sample large tables (message_stats limited to 1000 rows) + * --exclude=table1,table2 Exclude specific tables + * + * Default output: ./db-dump.json + */ + +import { writeFileSync } from "fs"; +import SQLite from "better-sqlite3"; + +const DATABASE_URL = process.env.DATABASE_URL ?? "./mod-bot.sqlite3"; + +// Parse args +const args = process.argv.slice(2); +const sampleMode = args.includes("--sample"); +const excludeArg = args.find((a) => a.startsWith("--exclude=")); +const excludeTables = excludeArg + ? excludeArg.replace("--exclude=", "").split(",") + : []; +const outputFile = args.find((a) => !a.startsWith("--")) ?? "./db-dump.json"; + +// Tables with row limits in sample mode +const SAMPLE_LIMITS: Record = { + message_stats: 1000, + sessions: 100, + users: 100, + guilds: 100, + guild_subscriptions: 100, +}; + +interface TableDump { + name: string; + count: number; + totalCount?: number; // Only present if sampled + rows: Record[]; +} + +interface DatabaseDump { + exportedAt: string; + databasePath: string; + sampled: boolean; + excludedTables: string[]; + tables: TableDump[]; +} + +function dumpDatabase(): DatabaseDump { + const db = new SQLite(DATABASE_URL, { readonly: true }); + + // Get all table names (excluding sqlite internals and kysely migration tables) + const tables = db + .prepare( + ` + SELECT name FROM sqlite_master + WHERE type='table' + AND name NOT LIKE 'sqlite_%' + AND name NOT LIKE 'kysely_%' + ORDER BY name + `, + ) + .all() as { name: string }[]; + + const tableDumps: TableDump[] = []; + + for (const { name } of tables) { + if (excludeTables.includes(name)) { + console.log(` Skipping ${name} (excluded)`); + continue; + } + + // Get total count + const countResult = db + .prepare(`SELECT COUNT(*) as count FROM "${name}"`) + .get() as { count: number }; + const totalCount = countResult.count; + + // Determine limit + const limit = sampleMode ? SAMPLE_LIMITS[name] : undefined; + const query = limit + ? `SELECT * FROM "${name}" ORDER BY ROWID DESC LIMIT ${limit}` + : `SELECT * FROM "${name}"`; + + const rows = db.prepare(query).all() as Record[]; + + // Parse JSON columns where applicable + const parsedRows = rows.map((row) => { + const parsed: Record = {}; + for (const [key, value] of Object.entries(row)) { + if (typeof value === "string") { + // Try to parse as JSON if it looks like JSON + if ( + (value.startsWith("{") && value.endsWith("}")) || + (value.startsWith("[") && value.endsWith("]")) + ) { + try { + parsed[key] = JSON.parse(value); + } catch { + parsed[key] = value; + } + } else { + parsed[key] = value; + } + } else { + parsed[key] = value; + } + } + return parsed; + }); + + const tableDump: TableDump = { + name, + count: parsedRows.length, + rows: parsedRows, + }; + + // Add totalCount if we sampled + if (limit && totalCount > limit) { + tableDump.totalCount = totalCount; + } + + tableDumps.push(tableDump); + } + + db.close(); + + return { + exportedAt: new Date().toISOString(), + databasePath: DATABASE_URL, + sampled: sampleMode, + excludedTables: excludeTables, + tables: tableDumps, + }; +} + +function main() { + console.log(`Dumping database: ${DATABASE_URL}`); + if (sampleMode) console.log("Sample mode: ON (limiting large tables)"); + if (excludeTables.length) + console.log(`Excluding: ${excludeTables.join(", ")}`); + + const dump = dumpDatabase(); + + // Summary + console.log("\nTable summary:"); + for (const table of dump.tables) { + const sampledNote = table.totalCount + ? ` (sampled from ${table.totalCount})` + : ""; + console.log(` ${table.name}: ${table.count} rows${sampledNote}`); + } + + // Write to file + writeFileSync(outputFile, JSON.stringify(dump, null, 2)); + console.log(`\nWritten to: ${outputFile}`); +} + +main(); diff --git a/scripts/fixtures/constants.ts b/scripts/fixtures/constants.ts new file mode 100644 index 0000000..38cc4b9 --- /dev/null +++ b/scripts/fixtures/constants.ts @@ -0,0 +1,53 @@ +/** + * Shared fixture constants for e2e tests and staging environments. + * Single source of truth for test data IDs across the codebase. + */ + +// Discord-format snowflake IDs for realism +// These are fake but follow the snowflake format +export const FIXTURE_IDS = { + // Users + users: { + testUser: { + id: "3486c000-6af3-45db-81f4-98bcff8806c9", + externalId: "103525876892708864", + email: "vcarl@example.com", + }, + }, + + // Guilds + guilds: { + free: { + id: "614601782152265748", + name: "Test Server", + }, + paid: { + id: "1442358269497577665", + name: "Euno", + }, + }, + + // Channels (for historical data) + channels: { + testing: "1442382154511155401", + }, + + // Sessions + sessions: { + testSession: "test-session-e2e", + }, + + // Stripe (test mode) + stripe: { + customerId: "cus_test_e2e", + subscriptionId: "sub_test_e2e", + }, +} as const; + +// Legacy aliases for backwards compatibility with existing tests +export const TEST_USER_ID = FIXTURE_IDS.users.testUser.id; +export const TEST_USER_EXTERNAL_ID = FIXTURE_IDS.users.testUser.externalId; +export const TEST_USER_EMAIL = FIXTURE_IDS.users.testUser.email; +export const TEST_SESSION_ID = FIXTURE_IDS.sessions.testSession; +export const TEST_GUILD_FREE_ID = FIXTURE_IDS.guilds.free.id; +export const TEST_GUILD_PAID_ID = FIXTURE_IDS.guilds.paid.id; diff --git a/scripts/fixtures/db.ts b/scripts/fixtures/db.ts new file mode 100644 index 0000000..33521b3 --- /dev/null +++ b/scripts/fixtures/db.ts @@ -0,0 +1,168 @@ +/** + * Standalone database connection for fixture scripts. + * This module duplicates the app's db.server.ts to avoid dependency on the app's module system. + * This allows scripts to run with `node --experimental-strip-types`. + */ + +import SQLite from "better-sqlite3"; +import { + Kysely, + ParseJSONResultsPlugin, + SqliteDialect, + type ColumnType, +} from "kysely"; + +// Database URL from environment +const databaseUrl = process.env.DATABASE_URL ?? "./mod-bot.sqlite3"; + +// Type definitions (copied from app/db.d.ts) +type Generated = + T extends ColumnType + ? ColumnType + : ColumnType; + +interface ChannelInfo { + category: string | null; + id: string | null; + name: string | null; +} + +interface EscalationRecords { + escalation_id: string; + id: string; + vote: string; + voted_at: Generated; + voter_id: string; +} + +interface Escalations { + created_at: Generated; + flags: string; + guild_id: string; + id: string; + initiator_id: string; + reported_user_id: string; + resolution: string | null; + resolved_at: string | null; + thread_id: string; + vote_message_id: string; +} + +interface Guilds { + id: string | null; + settings: string | null; +} + +interface GuildSubscriptions { + created_at: Generated; + current_period_end: string | null; + guild_id: string | null; + product_tier: Generated; + status: Generated; + stripe_customer_id: string | null; + stripe_subscription_id: string | null; + updated_at: Generated; +} + +interface HoneypotConfig { + channel_id: string; + guild_id: string; +} + +interface MessageStats { + author_id: string; + channel_category: string | null; + channel_id: string; + char_count: number; + code_stats: Generated; + guild_id: string; + link_stats: Generated; + message_id: string | null; + react_count: Generated; + recipient_id: string | null; + sent_at: number; + word_count: number; +} + +interface ReactjiChannelerConfig { + channel_id: string; + configured_by_id: string; + created_at: Generated; + emoji: string; + guild_id: string; + id: string; + threshold: Generated; +} + +interface ReportedMessages { + created_at: Generated; + deleted_at: string | null; + extra: string | null; + guild_id: string; + id: string; + log_channel_id: string; + log_message_id: string; + reason: string; + reported_channel_id: string; + reported_message_id: string; + reported_user_id: string; + staff_id: string | null; + staff_username: string | null; +} + +interface Sessions { + data: string | null; + expires: string | null; + id: string | null; +} + +interface TicketsConfig { + channel_id: string | null; + message_id: string; + role_id: string; +} + +interface Users { + authProvider: Generated; + email: string | null; + externalId: string; + id: string; +} + +interface UserThreads { + created_at: Generated; + guild_id: string; + thread_id: string; + user_id: string; +} + +interface DB { + channel_info: ChannelInfo; + escalation_records: EscalationRecords; + escalations: Escalations; + guild_subscriptions: GuildSubscriptions; + guilds: Guilds; + honeypot_config: HoneypotConfig; + message_stats: MessageStats; + reactji_channeler_config: ReactjiChannelerConfig; + reported_messages: ReportedMessages; + sessions: Sessions; + tickets_config: TicketsConfig; + user_threads: UserThreads; + users: Users; +} + +// Create database connection +console.log(`[fixtures] Connecting to database at ${databaseUrl}`); + +const dialect = new SqliteDialect({ + database: new SQLite(databaseUrl), +}); + +const db = new Kysely({ + dialect, + plugins: [new ParseJSONResultsPlugin()], +}); + +export default db; +export type { DB }; diff --git a/scripts/fixtures/generate-historical.ts b/scripts/fixtures/generate-historical.ts new file mode 100644 index 0000000..a392d3e --- /dev/null +++ b/scripts/fixtures/generate-historical.ts @@ -0,0 +1,168 @@ +/** + * Generates historical records for non-production environments. + * Creates realistic-looking message_stats, reported_messages, and escalations. + */ + +import { randomUUID } from "crypto"; + +import { FIXTURE_IDS } from "./constants.ts"; +import db from "./db.ts"; + +const DAYS_OF_DATA = 7; +const MESSAGES_PER_DAY = 50; +const REPORTS_TOTAL = 5; +const ESCALATIONS_TOTAL = 2; + +// Simple seeded random for reproducibility +function seededRandom(seed: number): () => number { + return () => { + seed = (seed * 1103515245 + 12345) & 0x7fffffff; + return seed / 0x7fffffff; + }; +} + +export async function generateHistoricalData(): Promise { + const random = seededRandom(42); // Fixed seed for reproducibility + const now = Date.now(); + const guildId = FIXTURE_IDS.guilds.free.id; + const channels = Object.values(FIXTURE_IDS.channels); + + // Generate fake user IDs for variety + const fakeUserIds = Array.from( + { length: 20 }, + (_, i) => `${200000000000000000 + i}`, + ); + + // 1. Generate message_stats (7 days, ~50/day) + console.log(" Generating message_stats..."); + const messageStats: { + message_id: string; + author_id: string; + guild_id: string; + channel_id: string; + channel_category: string; + recipient_id: string | null; + char_count: number; + word_count: number; + react_count: number; + sent_at: number; + code_stats: string; + link_stats: string; + }[] = []; + + for (let day = 0; day < DAYS_OF_DATA; day++) { + const dayStart = now - (day + 1) * 24 * 60 * 60 * 1000; + + for (let i = 0; i < MESSAGES_PER_DAY; i++) { + const sentAt = dayStart + Math.floor(random() * 24 * 60 * 60 * 1000); + const authorId = fakeUserIds[Math.floor(random() * fakeUserIds.length)]; + const channelId = channels[Math.floor(random() * channels.length)]; + const wordCount = Math.floor(random() * 100) + 1; + + messageStats.push({ + message_id: `${1000000000000000000 + day * 1000 + i}`, + author_id: authorId, + guild_id: guildId, + channel_id: channelId, + channel_category: channelId.includes("0002") ? "Help" : "General", + recipient_id: null, + char_count: wordCount * 5, + word_count: wordCount, + react_count: Math.floor(random() * 5), + sent_at: sentAt, + code_stats: "[]", + link_stats: "[]", + }); + } + } + + // Batch insert + for (let i = 0; i < messageStats.length; i += 100) { + await db + .insertInto("message_stats") + .values(messageStats.slice(i, i + 100)) + .onConflict((oc) => oc.doNothing()) + .execute(); + } + console.log(` ${messageStats.length} message records`); + + // 2. Generate reported_messages + console.log(" Generating reported_messages..."); + const reasons = ["anonReport", "track", "spam"] as const; + + for (let i = 0; i < REPORTS_TOTAL; i++) { + const reportedUserId = + fakeUserIds[Math.floor(random() * fakeUserIds.length)]; + const daysAgo = Math.floor(random() * DAYS_OF_DATA); + + await db + .insertInto("reported_messages") + .values({ + id: randomUUID(), + reported_message_id: `${1100000000000000000 + i}`, + reported_channel_id: channels[Math.floor(random() * channels.length)], + reported_user_id: reportedUserId, + guild_id: guildId, + log_message_id: `${1200000000000000000 + i}`, + log_channel_id: FIXTURE_IDS.channels.testing, + reason: reasons[Math.floor(random() * reasons.length)], + staff_id: null, + staff_username: null, + extra: null, + created_at: new Date(now - daysAgo * 24 * 60 * 60 * 1000).toISOString(), + deleted_at: null, + }) + .onConflict((oc) => oc.doNothing()) + .execute(); + } + console.log(` ${REPORTS_TOTAL} report records`); + + // 3. Generate escalations with votes + console.log(" Generating escalations..."); + + for (let i = 0; i < ESCALATIONS_TOTAL; i++) { + const escalationId = randomUUID(); + const reportedUserId = + fakeUserIds[Math.floor(random() * fakeUserIds.length)]; + const initiatorId = fakeUserIds[Math.floor(random() * fakeUserIds.length)]; + const daysAgo = Math.floor(random() * DAYS_OF_DATA); + const isResolved = random() > 0.5; + + await db + .insertInto("escalations") + .values({ + id: escalationId, + guild_id: guildId, + thread_id: `${1300000000000000000 + i}`, + vote_message_id: `${1400000000000000000 + i}`, + reported_user_id: reportedUserId, + initiator_id: initiatorId, + flags: JSON.stringify({ quorum: 3 }), + created_at: new Date(now - daysAgo * 24 * 60 * 60 * 1000).toISOString(), + resolved_at: isResolved + ? new Date(now - (daysAgo - 1) * 24 * 60 * 60 * 1000).toISOString() + : null, + resolution: isResolved ? "ban" : null, + }) + .onConflict((oc) => oc.doNothing()) + .execute(); + + // Add 2-4 votes per escalation + const voteCount = 2 + Math.floor(random() * 3); + for (let v = 0; v < voteCount; v++) { + const voterId = fakeUserIds[Math.floor(random() * fakeUserIds.length)]; + await db + .insertInto("escalation_records") + .values({ + id: randomUUID(), + escalation_id: escalationId, + voter_id: voterId, + vote: ["ban", "kick", "warn"][Math.floor(random() * 3)], + voted_at: new Date(now - daysAgo * 24 * 60 * 60 * 1000).toISOString(), + }) + .onConflict((oc) => oc.doNothing()) + .execute(); + } + } + console.log(` ${ESCALATIONS_TOTAL} escalations with votes`); +} diff --git a/scripts/fixtures/index.ts b/scripts/fixtures/index.ts new file mode 100644 index 0000000..4e74fce --- /dev/null +++ b/scripts/fixtures/index.ts @@ -0,0 +1,8 @@ +/** + * Re-exports for fixture module. + */ + +export * from "./constants.ts"; +export { runIntegrityChecks } from "./integrity-checks.ts"; +export { seedFixtures } from "./seed-fixtures.ts"; +export { generateHistoricalData } from "./generate-historical.ts"; diff --git a/scripts/fixtures/integrity-checks.ts b/scripts/fixtures/integrity-checks.ts new file mode 100644 index 0000000..d027eb3 --- /dev/null +++ b/scripts/fixtures/integrity-checks.ts @@ -0,0 +1,134 @@ +/** + * Data consistency validation for non-production environments. + * Identifies orphaned records, invalid values, and other data issues. + */ + +import db from "./db.ts"; + +interface IntegrityIssue { + table: string; + issue: string; + count: number; + details?: string; +} + +export async function runIntegrityChecks(): Promise { + const issues: IntegrityIssue[] = []; + + // 1. Check for orphaned guild_subscriptions (no parent guild) + const orphanedSubscriptions = await db + .selectFrom("guild_subscriptions") + .leftJoin("guilds", "guilds.id", "guild_subscriptions.guild_id") + .where("guilds.id", "is", null) + .select("guild_subscriptions.guild_id") + .execute(); + + if (orphanedSubscriptions.length > 0) { + issues.push({ + table: "guild_subscriptions", + issue: "Orphaned subscriptions (no parent guild)", + count: orphanedSubscriptions.length, + details: orphanedSubscriptions.map((s) => s.guild_id).join(", "), + }); + } + + // 2. Check for invalid product_tier values + const invalidTiers = await db + .selectFrom("guild_subscriptions") + .where("product_tier", "not in", ["free", "paid"]) + .select(["guild_id", "product_tier"]) + .execute(); + + if (invalidTiers.length > 0) { + issues.push({ + table: "guild_subscriptions", + issue: "Invalid product_tier values", + count: invalidTiers.length, + details: invalidTiers + .map((t) => `${t.guild_id}: ${t.product_tier}`) + .join(", "), + }); + } + + // 3. Check for orphaned escalation_records (no parent escalation) + const orphanedVotes = await db + .selectFrom("escalation_records") + .leftJoin( + "escalations", + "escalations.id", + "escalation_records.escalation_id", + ) + .where("escalations.id", "is", null) + .select("escalation_records.escalation_id") + .execute(); + + if (orphanedVotes.length > 0) { + issues.push({ + table: "escalation_records", + issue: "Orphaned votes (no parent escalation)", + count: orphanedVotes.length, + }); + } + + // 4. Check for expired sessions that should be cleaned + const expiredSessions = await db + .selectFrom("sessions") + .where("expires", "<", new Date().toISOString()) + .select(db.fn.count("id").as("count")) + .executeTakeFirst(); + + if (expiredSessions && Number(expiredSessions.count) > 0) { + issues.push({ + table: "sessions", + issue: "Expired sessions (should be cleaned)", + count: Number(expiredSessions.count), + }); + } + + // 5. Check for invalid reported_messages reason values + const validReasons = ["anonReport", "track", "modResolution", "spam"]; + const invalidReasons = await db + .selectFrom("reported_messages") + .where("reason", "not in", validReasons) + .select(["id", "reason"]) + .execute(); + + if (invalidReasons.length > 0) { + issues.push({ + table: "reported_messages", + issue: "Invalid reason values", + count: invalidReasons.length, + details: [...new Set(invalidReasons.map((r) => r.reason))].join(", "), + }); + } + + // 6. Check for message_stats with future timestamps + const futureMessages = await db + .selectFrom("message_stats") + .where("sent_at", ">", Date.now()) + .select(db.fn.count("message_id").as("count")) + .executeTakeFirst(); + + if (futureMessages && Number(futureMessages.count) > 0) { + issues.push({ + table: "message_stats", + issue: "Messages with future timestamps", + count: Number(futureMessages.count), + }); + } + + // Log results + if (issues.length === 0) { + console.log(" Integrity checks passed - no issues found"); + } else { + console.warn(` Found ${issues.length} integrity issues:`); + for (const issue of issues) { + console.warn( + ` - ${issue.table}: ${issue.issue} (${issue.count} records)`, + ); + if (issue.details) { + console.warn(` Details: ${issue.details}`); + } + } + } +} diff --git a/scripts/fixtures/run.ts b/scripts/fixtures/run.ts new file mode 100644 index 0000000..77a2135 --- /dev/null +++ b/scripts/fixtures/run.ts @@ -0,0 +1,41 @@ +/** + * Fixture runner for non-production environments. + * + * Runs: + * 1. Data integrity checks + * 2. Known fixture seeding + * 3. Historical data generation + * + * Usage: npx tsx scripts/fixtures/run.ts + * + * This script is designed to run _after_ migrations. In staging: + * npm run start:migrate && npm run seed:fixtures + */ + +import "dotenv/config"; + +import { generateHistoricalData } from "./generate-historical.ts"; +import { runIntegrityChecks } from "./integrity-checks.ts"; +import { seedFixtures } from "./seed-fixtures.ts"; + +async function run() { + console.log("Running fixture setup...\n"); + + console.log("1. Running integrity checks..."); + await runIntegrityChecks(); + + console.log("\n2. Seeding fixture data..."); + await seedFixtures(); + + console.log("\n3. Generating historical data..."); + await generateHistoricalData(); + + console.log("\nFixture setup complete"); +} + +run() + .then(() => process.exit(0)) + .catch((error) => { + console.error("Fixture setup failed:", error); + process.exit(1); + }); diff --git a/scripts/fixtures/seed-fixtures.ts b/scripts/fixtures/seed-fixtures.ts new file mode 100644 index 0000000..564b457 --- /dev/null +++ b/scripts/fixtures/seed-fixtures.ts @@ -0,0 +1,93 @@ +/** + * Seeds known fixture data for non-production environments. + * Creates deterministic test users, guilds, sessions, and channel info. + */ + +import { FIXTURE_IDS } from "./constants.ts"; +import db from "./db.ts"; + +export async function seedFixtures(): Promise { + // 1. Seed test users + for (const [key, user] of Object.entries(FIXTURE_IDS.users)) { + await db + .insertInto("users") + .values({ + id: user.id, + externalId: user.externalId, + email: user.email, + authProvider: "discord", + }) + .onConflict((oc) => oc.doNothing()) + .execute(); + console.log(` User: ${key}`); + } + + // 2. Seed test session + await db + .insertInto("sessions") + .values({ + id: FIXTURE_IDS.sessions.testSession, + data: JSON.stringify({ + userId: FIXTURE_IDS.users.testUser.id, + discordToken: { + access_token: "fixture_access_token", + token_type: "Bearer", + expires_at: new Date( + Date.now() + 7 * 24 * 60 * 60 * 1000, + ).toISOString(), + scope: "identify email guilds guilds.members.read", + }, + }), + expires: new Date(Date.now() + 7 * 24 * 60 * 60 * 1000).toISOString(), + }) + .onConflict((oc) => oc.doNothing()) + .execute(); + console.log(" Session: testSession"); + + // 3. Seed guilds with subscriptions + for (const [key, guild] of Object.entries(FIXTURE_IDS.guilds)) { + await db + .insertInto("guilds") + .values({ + id: guild.id, + settings: null, + }) + .onConflict((oc) => oc.doNothing()) + .execute(); + + const isPaid = key === "paid"; + await db + .insertInto("guild_subscriptions") + .values({ + guild_id: guild.id, + product_tier: isPaid ? "paid" : "free", + status: "active", + stripe_customer_id: isPaid ? FIXTURE_IDS.stripe.customerId : null, + stripe_subscription_id: isPaid + ? FIXTURE_IDS.stripe.subscriptionId + : null, + current_period_end: isPaid + ? new Date(Date.now() + 30 * 24 * 60 * 60 * 1000).toISOString() + : null, + created_at: new Date().toISOString(), + updated_at: new Date().toISOString(), + }) + .onConflict((oc) => oc.doNothing()) + .execute(); + console.log(` Guild: ${key} (${isPaid ? "paid" : "free"})`); + } + + // 4. Seed channel info for reference + for (const [name, id] of Object.entries(FIXTURE_IDS.channels)) { + await db + .insertInto("channel_info") + .values({ + id, + name: `#${name.replace(/([A-Z])/g, "-$1").toLowerCase()}`, + category: name.startsWith("help") ? "Help" : "General", + }) + .onConflict((oc) => oc.doNothing()) + .execute(); + } + console.log(" Channels: seeded"); +} diff --git a/scripts/seed-e2e.ts b/scripts/seed-e2e.ts new file mode 100644 index 0000000..5c45399 --- /dev/null +++ b/scripts/seed-e2e.ts @@ -0,0 +1,122 @@ +/** + * Seed script for e2e tests in staging environments. + * Creates deterministic test data that e2e tests can reference. + * + * Run via: npm run seed:e2e + * Called automatically in staging via: npm run start:staging + * + * @deprecated Use scripts/fixtures/run.ts for full fixture setup. + * This script is kept for backwards compatibility. + */ + +import "dotenv/config"; + +import { + TEST_GUILD_FREE_ID, + TEST_GUILD_PAID_ID, + TEST_SESSION_ID, + TEST_USER_EXTERNAL_ID, + TEST_USER_ID, +} from "./fixtures/constants.ts"; +import db from "./fixtures/db.ts"; + +// Re-export for backwards compatibility +export { + TEST_GUILD_FREE_ID, + TEST_GUILD_PAID_ID, + TEST_SESSION_ID, + TEST_USER_EXTERNAL_ID, + TEST_USER_ID, +}; + +async function seed() { + console.log("Seeding e2e test data..."); + + // Create test user + await db + .insertInto("users") + .values({ + id: TEST_USER_ID, + externalId: TEST_USER_EXTERNAL_ID, + email: "e2e-test@example.com", + authProvider: "discord", + }) + .onConflict((oc) => oc.doNothing()) + .execute(); + + // Create session for test user + await db + .insertInto("sessions") + .values({ + id: TEST_SESSION_ID, + data: JSON.stringify({ + userId: TEST_USER_ID, + discordToken: { + access_token: "test_access_token", + token_type: "Bearer", + expires_at: new Date( + Date.now() + 7 * 24 * 60 * 60 * 1000, + ).toISOString(), + scope: "identify email guilds guilds.members.read", + }, + }), + expires: new Date(Date.now() + 7 * 24 * 60 * 60 * 1000).toISOString(), + }) + .onConflict((oc) => oc.doNothing()) + .execute(); + + // Create free guild with subscription + await db + .insertInto("guilds") + .values({ id: TEST_GUILD_FREE_ID, settings: null }) + .onConflict((oc) => oc.doNothing()) + .execute(); + + await db + .insertInto("guild_subscriptions") + .values({ + guild_id: TEST_GUILD_FREE_ID, + product_tier: "free", + status: "active", + stripe_customer_id: null, + stripe_subscription_id: null, + current_period_end: null, + created_at: new Date().toISOString(), + updated_at: new Date().toISOString(), + }) + .onConflict((oc) => oc.doNothing()) + .execute(); + + // Create paid guild with subscription + await db + .insertInto("guilds") + .values({ id: TEST_GUILD_PAID_ID, settings: null }) + .onConflict((oc) => oc.doNothing()) + .execute(); + + await db + .insertInto("guild_subscriptions") + .values({ + guild_id: TEST_GUILD_PAID_ID, + product_tier: "paid", + status: "active", + stripe_customer_id: "cus_test_e2e", + stripe_subscription_id: "sub_test_e2e", + current_period_end: new Date( + Date.now() + 30 * 24 * 60 * 60 * 1000, + ).toISOString(), + created_at: new Date().toISOString(), + updated_at: new Date().toISOString(), + }) + .onConflict((oc) => oc.doNothing()) + .execute(); + + console.log("E2E test data seeded successfully."); +} + +seed() + .then(() => process.exit(0)) + .catch((error) => { + console.error("Failed to seed e2e data:", error); + process.exit(1); + }); diff --git a/tests/e2e/fixtures/auth.ts b/tests/e2e/fixtures/auth.ts index 00b092c..e372695 100644 --- a/tests/e2e/fixtures/auth.ts +++ b/tests/e2e/fixtures/auth.ts @@ -2,7 +2,17 @@ import { test as base, type Cookie, type Page } from "@playwright/test"; -import { DbFixture, type TestUser } from "./db"; +import { DbFixture, isRemote, type TestUser } from "./db"; + +// Get domain from preview URL or default to localhost +function getCookieDomain(): string { + const previewUrl = process.env.E2E_PREVIEW_URL; + if (previewUrl) { + const url = new URL(previewUrl); + return url.hostname; + } + return "localhost"; +} /** * Extended test fixture with authentication support @@ -36,16 +46,18 @@ export const test = base.extend<{ "base64", ); + const domain = getCookieDomain(); + // Create cookies that match what the app expects const cookies: Cookie[] = [ { name: "__session", value: encodedSessionId, - domain: "localhost", + domain, path: "/", expires: Math.floor(Date.now() / 1000) + 7 * 24 * 60 * 60, // 7 days from now httpOnly: true, - secure: false, + secure: isRemote, // Secure cookies for remote (HTTPS) sameSite: "Lax", }, ]; diff --git a/tests/e2e/fixtures/db.ts b/tests/e2e/fixtures/db.ts index 26bd3d2..669ac51 100644 --- a/tests/e2e/fixtures/db.ts +++ b/tests/e2e/fixtures/db.ts @@ -4,16 +4,25 @@ import { Kysely, SqliteDialect } from "kysely"; import type { DB } from "#~/db"; +import { FIXTURE_IDS } from "../../../scripts/fixtures/constants"; + +// Check if we're running against a remote preview +export const isRemote = !!process.env.E2E_PREVIEW_URL; + const DATABASE_URL = process.env.DATABASE_URL ?? "./mod-bot.sqlite3"; -// Create a separate db instance for tests -const testDialect = new SqliteDialect({ - database: new SQLite(DATABASE_URL), -}); +// Only create local db connection if not in remote mode +const testDialect = isRemote + ? null + : new SqliteDialect({ + database: new SQLite(DATABASE_URL), + }); -const testDb = new Kysely({ - dialect: testDialect, -}); +const testDb = isRemote + ? null + : new Kysely({ + dialect: testDialect!, + }); export interface TestGuild { id: string; @@ -40,6 +49,7 @@ export interface TestUser { export class DbFixture { /** * Create a test guild with optional subscription + * In remote mode, returns pre-seeded guild data */ async createGuild(options?: { id?: string; @@ -49,10 +59,29 @@ export class DbFixture { stripeSubscriptionId?: string; currentPeriodEnd?: string; }): Promise { + // In remote mode, return pre-seeded guild + if (isRemote) { + const isPaid = options?.productTier === "paid"; + const guild = isPaid ? FIXTURE_IDS.guilds.paid : FIXTURE_IDS.guilds.free; + return { + id: guild.id, + subscription: { + product_tier: isPaid ? "paid" : "free", + status: "active", + stripe_customer_id: isPaid + ? FIXTURE_IDS.stripe.customerId + : undefined, + stripe_subscription_id: isPaid + ? FIXTURE_IDS.stripe.subscriptionId + : undefined, + }, + }; + } + const guildId = options?.id ?? randomUUID(); // Create guild record - await testDb + await testDb! .insertInto("guilds") .values({ id: guildId, @@ -62,7 +91,7 @@ export class DbFixture { // Create subscription if tier is provided if (options?.productTier) { - await testDb + await testDb! .insertInto("guild_subscriptions") .values({ guild_id: guildId, @@ -93,17 +122,27 @@ export class DbFixture { /** * Create a test user + * In remote mode, returns pre-seeded user data */ async createUser(options?: { id?: string; externalId?: string; email?: string; }): Promise { + // In remote mode, return pre-seeded user + if (isRemote) { + return { + id: FIXTURE_IDS.users.testUser.id, + externalId: FIXTURE_IDS.users.testUser.externalId, + email: FIXTURE_IDS.users.testUser.email, + }; + } + const userId = options?.id ?? randomUUID(); const externalId = options?.externalId ?? `discord_${randomUUID()}`; const email = options?.email ?? `test_${randomUUID()}@example.com`; - await testDb + await testDb! .insertInto("users") .values({ id: userId, @@ -118,11 +157,17 @@ export class DbFixture { /** * Create a test session for a user + * In remote mode, returns pre-seeded session ID */ async createSession( userId: string, discordToken?: Record, ): Promise { + // In remote mode, return pre-seeded session + if (isRemote) { + return FIXTURE_IDS.sessions.testSession; + } + const sessionId = randomUUID(); const sessionData = { @@ -137,7 +182,7 @@ export class DbFixture { }, }; - await testDb + await testDb! .insertInto("sessions") .values({ id: sessionId, @@ -151,9 +196,31 @@ export class DbFixture { /** * Get subscription for a guild + * In remote mode, returns seeded subscription data */ async getGuildSubscription(guildId: string) { - return await testDb + if (isRemote) { + // Return seeded data based on guild ID + if (guildId === FIXTURE_IDS.guilds.free.id) { + return { + guild_id: guildId, + product_tier: "free" as const, + status: "active", + }; + } + if (guildId === FIXTURE_IDS.guilds.paid.id) { + return { + guild_id: guildId, + product_tier: "paid" as const, + status: "active", + stripe_customer_id: FIXTURE_IDS.stripe.customerId, + stripe_subscription_id: FIXTURE_IDS.stripe.subscriptionId, + }; + } + return undefined; + } + + return await testDb! .selectFrom("guild_subscriptions") .selectAll() .where("guild_id", "=", guildId) @@ -203,6 +270,7 @@ export class DbFixture { /** * Get database instance for custom queries + * Returns null in remote mode */ getDb() { return testDb; diff --git a/tests/e2e/payment-flow.spec.ts b/tests/e2e/payment-flow.spec.ts index 13c7cdc..c439217 100644 --- a/tests/e2e/payment-flow.spec.ts +++ b/tests/e2e/payment-flow.spec.ts @@ -153,7 +153,7 @@ test.describe("Payment Flow", () => { // Submit the payment await authenticatedPage - .getByRole("button", { name: /subscribe|pay/i }) + .getByTestId("hosted-payment-submit-button") .click(); await authenticatedPage.waitForTimeout(2000); diff --git a/tsconfig.json b/tsconfig.json index e7a83ac..14c8a75 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -26,6 +26,7 @@ }, "skipLibCheck": true, "noEmit": true, + "allowImportingTsExtensions": true, "allowJs": true, "forceConsistentCasingInFileNames": true, "verbatimModuleSyntax": true