diff --git a/Dockerfile b/Dockerfile index 2e620e8..ce150a1 100644 --- a/Dockerfile +++ b/Dockerfile @@ -37,6 +37,7 @@ COPY --from=builder /app/tsconfig.json ./tsconfig.json COPY --from=builder /app/package.json ./package.json COPY --from=builder /app/node_modules ./node_modules COPY --from=builder /app/sources ./sources +COPY --from=builder /app/prisma ./prisma # Expose the port the app will run on EXPOSE 3000 diff --git a/deploy.sh b/deploy.sh new file mode 100755 index 0000000..b78252a --- /dev/null +++ b/deploy.sh @@ -0,0 +1,364 @@ +#!/bin/bash +set -e + +# Colors +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +# Helpers +info() { echo -e "${BLUE}$1${NC}"; } +success() { echo -e "${GREEN}✓ $1${NC}"; } +warn() { echo -e "${YELLOW}⚠ $1${NC}"; } +error() { echo -e "${RED}✗ $1${NC}"; } +prompt() { echo -en "${YELLOW}$1${NC}"; } + +# Get script directory +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +cd "$SCRIPT_DIR" + +echo "" +echo -e "${BLUE}═══════════════════════════════════════════${NC}" +echo -e "${BLUE} Happy Server Deployment Script ${NC}" +echo -e "${BLUE}═══════════════════════════════════════════${NC}" +echo "" + +# ───────────────────────────────────────────────────────────── +# Environment Detection +# ───────────────────────────────────────────────────────────── + +info "Detecting environment..." + +# Check Docker +if ! command -v docker &> /dev/null; then + error "Docker is not installed" + exit 1 +fi +success "Docker installed" + +# Check docker compose +if docker compose version &> /dev/null; then + COMPOSE_CMD="docker compose" + success "Docker Compose installed" +elif command -v docker-compose &> /dev/null; then + COMPOSE_CMD="docker-compose" + success "Docker Compose (standalone) installed" +else + error "Docker Compose is not installed" + exit 1 +fi + +# Check system Caddy +SYSTEM_CADDY=false +if systemctl is-active --quiet caddy 2>/dev/null; then + SYSTEM_CADDY=true + success "System Caddy detected (active)" +elif command -v caddy &> /dev/null; then + SYSTEM_CADDY=true + success "System Caddy detected (installed)" +else + warn "No system Caddy - will use Docker Caddy" +fi + +# Check existing PostgreSQL data +EXISTING_DATA=false +if docker volume inspect happy-server_postgres_data &> /dev/null; then + EXISTING_DATA=true + warn "Existing PostgreSQL data found" +else + info "No existing database data" +fi + +# Check if app is running +APP_RUNNING=false +if $COMPOSE_CMD ps 2>/dev/null | grep -q "app.*Up"; then + APP_RUNNING=true + info "App is currently running" +fi + +echo "" + +# ───────────────────────────────────────────────────────────── +# Interactive Prompts +# ───────────────────────────────────────────────────────────── + +# Domain +DEFAULT_DOMAIN="" +if [ -f /etc/caddy/Caddyfile ]; then + DEFAULT_DOMAIN=$(grep -oP '^\S+(?=\s*\{)' /etc/caddy/Caddyfile 2>/dev/null | head -1 || true) +fi +if [ -z "$DEFAULT_DOMAIN" ]; then + DEFAULT_DOMAIN=$(hostname -f 2>/dev/null || echo "localhost") +fi + +prompt "Enter domain name [$DEFAULT_DOMAIN]: " +read -r DOMAIN +DOMAIN=${DOMAIN:-$DEFAULT_DOMAIN} +echo "" + +# Existing data handling +RESET_DATA=false +if [ "$EXISTING_DATA" = true ]; then + echo "Existing database found. What would you like to do?" + echo " 1) Keep existing data (default)" + echo " 2) Reset everything (WARNING: destroys all data)" + prompt "Choice [1]: " + read -r DATA_CHOICE + if [ "$DATA_CHOICE" = "2" ]; then + prompt "Are you sure? Type 'yes' to confirm: " + read -r CONFIRM + if [ "$CONFIRM" = "yes" ]; then + RESET_DATA=true + warn "Will reset all data" + else + info "Keeping existing data" + fi + fi + echo "" +fi + +# PostgreSQL password +echo "PostgreSQL password:" +echo " 1) Use default (postgres) - for development" +echo " 2) Enter custom password" +echo " 3) Generate random password" +prompt "Choice [1]: " +read -r PW_CHOICE + +case "$PW_CHOICE" in + 2) + prompt "Enter password: " + read -rs POSTGRES_PASSWORD + echo "" + ;; + 3) + POSTGRES_PASSWORD=$(openssl rand -base64 24 | tr -d '/+=' | head -c 24) + info "Generated password: $POSTGRES_PASSWORD" + ;; + *) + POSTGRES_PASSWORD="postgres" + ;; +esac +echo "" + +# ───────────────────────────────────────────────────────────── +# Create/Update .env file +# ───────────────────────────────────────────────────────────── + +info "Creating .env file..." + +cat > .env <> .gitignore + success "Added .env to .gitignore" + fi +else + echo ".env" > .gitignore + success "Created .gitignore with .env" +fi + +# ───────────────────────────────────────────────────────────── +# Docker Deployment +# ───────────────────────────────────────────────────────────── + +echo "" +info "Starting Docker deployment..." + +# Reset if requested +if [ "$RESET_DATA" = true ]; then + warn "Stopping and removing all containers and volumes..." + $COMPOSE_CMD down -v --remove-orphans 2>/dev/null || true +fi + +# Build app +info "Building app image..." +$COMPOSE_CMD build app + +# Start infrastructure +info "Starting infrastructure (postgres, redis, minio)..." +$COMPOSE_CMD up -d postgres redis minio + +# Wait for postgres to be healthy +info "Waiting for PostgreSQL to be ready..." +for i in {1..60}; do + if $COMPOSE_CMD exec -T postgres pg_isready -U postgres &> /dev/null; then + success "PostgreSQL is ready" + break + fi + if [ $i -eq 60 ]; then + error "PostgreSQL failed to start within 60 seconds" + exit 1 + fi + sleep 1 +done + +# Fix PostgreSQL password +info "Configuring PostgreSQL password..." +$COMPOSE_CMD exec -T postgres psql -U postgres -c "ALTER USER postgres WITH PASSWORD '$POSTGRES_PASSWORD';" > /dev/null +success "PostgreSQL password configured" + +# Wait for redis +info "Waiting for Redis..." +for i in {1..30}; do + if $COMPOSE_CMD exec -T redis redis-cli ping 2>/dev/null | grep -q PONG; then + success "Redis is ready" + break + fi + if [ $i -eq 30 ]; then + error "Redis failed to start" + exit 1 + fi + sleep 1 +done + +# Start minio-init and app +info "Starting application..." +$COMPOSE_CMD up -d + +# Wait for app to be healthy +info "Waiting for app to be ready..." +for i in {1..30}; do + if curl -sf http://127.0.0.1:3005/health 2>/dev/null | grep -q '"status":"ok"'; then + success "App is healthy" + break + fi + if [ $i -eq 30 ]; then + error "App failed to start within 30 seconds" + echo "" + warn "Checking logs..." + $COMPOSE_CMD logs app --tail 20 + exit 1 + fi + sleep 1 +done + +# ───────────────────────────────────────────────────────────── +# Caddy Configuration +# ───────────────────────────────────────────────────────────── + +echo "" +info "Configuring Caddy for HTTPS..." + +if [ "$SYSTEM_CADDY" = true ]; then + # System Caddy + CADDYFILE="/etc/caddy/Caddyfile" + + info "Updating $CADDYFILE..." + sudo tee "$CADDYFILE" > /dev/null < Caddyfile </dev/null | grep -q '"status":"ok"'; then + success "Health endpoint: OK" +else + error "Health endpoint: FAILED" + ((ERRORS++)) +fi + +# Root endpoint +if curl -sf http://127.0.0.1:3005/ &>/dev/null; then + success "Root endpoint: OK" +else + error "Root endpoint: FAILED" + ((ERRORS++)) +fi + +# Socket.io +if curl -sf "http://127.0.0.1:3005/v1/updates/?EIO=4&transport=polling" 2>/dev/null | grep -q '"sid"'; then + success "Socket.io: OK" +else + error "Socket.io: FAILED" + ((ERRORS++)) +fi + +# HTTPS (give Caddy time to get certificate) +sleep 2 +if curl -sf "https://$DOMAIN/health" 2>/dev/null | grep -q '"status":"ok"'; then + success "HTTPS ($DOMAIN): OK" +else + warn "HTTPS ($DOMAIN): Not ready yet (certificate may still be provisioning)" +fi + +echo "" + +# ───────────────────────────────────────────────────────────── +# Summary +# ───────────────────────────────────────────────────────────── + +if [ $ERRORS -eq 0 ]; then + echo -e "${GREEN}═══════════════════════════════════════════${NC}" + echo -e "${GREEN} Deployment Successful! ${NC}" + echo -e "${GREEN}═══════════════════════════════════════════${NC}" +else + echo -e "${YELLOW}═══════════════════════════════════════════${NC}" + echo -e "${YELLOW} Deployment completed with warnings ${NC}" + echo -e "${YELLOW}═══════════════════════════════════════════${NC}" +fi + +echo "" +echo "Access URLs:" +echo " API: https://$DOMAIN" +echo " Health: https://$DOMAIN/health" +echo " Direct: http://$(hostname -I | awk '{print $1}'):3005" +echo "" + +if [ "$POSTGRES_PASSWORD" != "postgres" ]; then + echo "Credentials saved in .env file" + echo "" +fi diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..1f99174 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,112 @@ +services: + app: + build: . + image: happy-server:local + ports: + - "3005:3005" + - "9091:9090" + environment: + - DATABASE_URL=postgresql://postgres:postgres@postgres:5432/handy + - REDIS_URL=redis://redis:6379 + - HANDY_MASTER_SECRET=your-super-secret-key-change-in-production + - PORT=3005 + - NODE_ENV=production + - METRICS_ENABLED=true + - METRICS_PORT=9090 + - S3_HOST=minio + - S3_PORT=9000 + - S3_USE_SSL=false + - S3_ACCESS_KEY=minioadmin + - S3_SECRET_KEY=minioadmin + - S3_BUCKET=happy + - S3_PUBLIC_URL=http://localhost:9000/happy + - DANGEROUSLY_LOG_TO_SERVER_FOR_AI_AUTO_DEBUGGING=true + volumes: + - ./logs:/app/.logs + depends_on: + postgres-init: + condition: service_completed_successfully + redis: + condition: service_healthy + minio-init: + condition: service_completed_successfully + restart: unless-stopped + + postgres: + image: postgres:16-alpine + environment: + - POSTGRES_USER=postgres + - POSTGRES_PASSWORD=postgres + - POSTGRES_DB=handy + volumes: + - postgres_data:/var/lib/postgresql/data + ports: + - "127.0.0.1:5432:5432" + healthcheck: + test: ["CMD-SHELL", "pg_isready -U postgres"] + interval: 5s + timeout: 5s + retries: 5 + restart: unless-stopped + + postgres-init: + image: postgres:16-alpine + depends_on: + postgres: + condition: service_healthy + environment: + - PGPASSWORD=postgres + entrypoint: > + /bin/sh -c " + psql -h postgres -U postgres -c \"SELECT 1\" && echo 'PostgreSQL connection verified' || echo 'Password may need reset'; + " + + redis: + image: redis:7-alpine + command: redis-server --appendonly yes + volumes: + - redis_data:/data + ports: + - "127.0.0.1:6379:6379" + healthcheck: + test: ["CMD", "redis-cli", "ping"] + interval: 5s + timeout: 5s + retries: 5 + restart: unless-stopped + + minio: + image: minio/minio + command: server /data --console-address ":9001" + environment: + - MINIO_ROOT_USER=minioadmin + - MINIO_ROOT_PASSWORD=minioadmin + volumes: + - minio_data:/data + ports: + - "9000:9000" + - "9001:9001" + healthcheck: + test: ["CMD", "mc", "ready", "local"] + interval: 5s + timeout: 5s + retries: 5 + restart: unless-stopped + + minio-init: + image: minio/mc + depends_on: + minio: + condition: service_healthy + entrypoint: > + /bin/sh -c " + mc alias set myminio http://minio:9000 minioadmin minioadmin; + mc mb -p myminio/happy || true; + mc anonymous set download myminio/happy; + echo 'Bucket created successfully'; + " + +volumes: + postgres_data: + redis_data: + minio_data: diff --git a/docs/plans/2025-01-01-incremental-message-sync-implementation.md b/docs/plans/2025-01-01-incremental-message-sync-implementation.md new file mode 100644 index 0000000..0266a22 --- /dev/null +++ b/docs/plans/2025-01-01-incremental-message-sync-implementation.md @@ -0,0 +1,215 @@ +# Incremental Message Sync Implementation Plan + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Add `updatedAfter` and `before` query parameters to the messages endpoint for incremental sync. + +**Architecture:** Extend existing GET `/v1/sessions/:sessionId/messages` endpoint with optional query params. Add database index on `(sessionId, updatedAt)` for efficient queries. + +**Tech Stack:** Fastify, Zod validation, Prisma ORM, Vitest + +--- + +## Task 1: Add Query Parameter Schema + +**Files:** +- Modify: `sources/app/api/routes/sessionRoutes.ts:308-355` + +**Step 1: Update the schema to include querystring validation** + +Find the existing route at line 308 and update the schema: + +```typescript +app.get('/v1/sessions/:sessionId/messages', { + schema: { + params: z.object({ + sessionId: z.string() + }), + querystring: z.object({ + updatedAfter: z.coerce.number().int().min(0).optional(), + before: z.coerce.number().int().min(0).optional(), + limit: z.coerce.number().int().min(1).max(150).default(150) + }) + }, + preHandler: app.authenticate +}, async (request, reply) => { +``` + +**Step 2: Update the handler to use query params** + +Replace the handler body (lines 315-354) with: + +```typescript +}, async (request, reply) => { + const userId = request.userId; + const { sessionId } = request.params; + const { updatedAfter, before, limit } = request.query; + + // Verify session belongs to user + const session = await db.session.findFirst({ + where: { + id: sessionId, + accountId: userId + } + }); + + if (!session) { + return reply.code(404).send({ error: 'Session not found' }); + } + + const messages = await db.sessionMessage.findMany({ + where: { + sessionId, + ...(updatedAfter !== undefined ? { updatedAt: { gt: new Date(updatedAfter) } } : {}), + ...(before !== undefined ? { createdAt: { lt: new Date(before) } } : {}) + }, + orderBy: updatedAfter !== undefined ? { updatedAt: 'asc' } : { createdAt: 'desc' }, + take: limit, + select: { + id: true, + seq: true, + localId: true, + content: true, + createdAt: true, + updatedAt: true + } + }); + + return reply.send({ + messages: messages.map((v) => ({ + id: v.id, + seq: v.seq, + content: v.content, + localId: v.localId, + createdAt: v.createdAt.getTime(), + updatedAt: v.updatedAt.getTime() + })), + hasMore: messages.length === limit, + ...(messages.length > 0 ? { + oldestTimestamp: Math.min(...messages.map(m => m.createdAt.getTime())), + newestTimestamp: Math.max(...messages.map(m => m.updatedAt.getTime())) + } : {}) + }); +}); +``` + +**Step 3: Run TypeScript check** + +Run: `yarn build` +Expected: No errors + +**Step 4: Commit** + +```bash +git add sources/app/api/routes/sessionRoutes.ts +git commit -m "feat: add updatedAfter and before params to messages endpoint + +Enables incremental sync by allowing clients to request only +messages updated after a specific timestamp. + +Generated with [Claude Code](https://claude.ai/code) +via [Happy](https://happy.engineering) + +Co-Authored-By: Claude +Co-Authored-By: Happy " +``` + +--- + +## Task 2: Add Database Index + +**Files:** +- Modify: `prisma/schema.prisma:116-129` + +**Step 1: Add the index to SessionMessage model** + +Find the `SessionMessage` model (around line 116) and add the index: + +```prisma +model SessionMessage { + id String @id @default(cuid()) + sessionId String + session Session @relation(fields: [sessionId], references: [id]) + localId String? + seq Int + /// [SessionMessageContent] + content Json + createdAt DateTime @default(now()) + updatedAt DateTime @updatedAt + + @@unique([sessionId, localId]) + @@index([sessionId, seq]) + @@index([sessionId, updatedAt]) +} +``` + +**Step 2: Generate Prisma client** + +Run: `yarn generate` +Expected: Success message about Prisma client generation + +**Step 3: Commit** + +```bash +git add prisma/schema.prisma +git commit -m "chore: add index for incremental message sync + +Index on (sessionId, updatedAt) optimizes queries with updatedAfter parameter. + +Generated with [Claude Code](https://claude.ai/code) +via [Happy](https://happy.engineering) + +Co-Authored-By: Claude +Co-Authored-By: Happy " +``` + +**Note:** Migration must be created by a human. The index will be applied when they run `yarn migrate`. + +--- + +## Task 3: Manual Testing + +**Step 1: Start the server** + +Run: `yarn start` (or however the dev server runs) + +**Step 2: Test backwards compatibility (no params)** + +```bash +curl -H "Authorization: Bearer " \ + "http://localhost:3000/v1/sessions//messages" +``` + +Expected: Returns up to 150 messages with `hasMore`, `oldestTimestamp`, `newestTimestamp` fields + +**Step 3: Test with updatedAfter** + +```bash +curl -H "Authorization: Bearer " \ + "http://localhost:3000/v1/sessions//messages?updatedAfter=1735600000000" +``` + +Expected: Returns only messages updated after the timestamp + +**Step 4: Test with limit** + +```bash +curl -H "Authorization: Bearer " \ + "http://localhost:3000/v1/sessions//messages?limit=10" +``` + +Expected: Returns at most 10 messages + +--- + +## Summary + +| Task | Description | Files | +|------|-------------|-------| +| 1 | Add query params to endpoint | `sessionRoutes.ts` | +| 2 | Add database index | `schema.prisma` | +| 3 | Manual testing | - | + +**Total commits:** 2 (code change + index) + +**Migration note:** After Task 2, a human must create and apply the migration. diff --git a/docs/plans/2025-01-01-message-sync-latency-design.md b/docs/plans/2025-01-01-message-sync-latency-design.md new file mode 100644 index 0000000..506c5a5 --- /dev/null +++ b/docs/plans/2025-01-01-message-sync-latency-design.md @@ -0,0 +1,282 @@ +# Message Sync Latency Reduction Design + +## Problem + +When opening the iOS app, users wait 2-7 seconds before session content (messages) appears. The delay comes from: + +1. Socket reconnection (1-5s on mobile) +2. Full HTTP fetch of 150 messages every time +3. Decrypting all 150 messages even when most are cached +4. No prefetching - messages only fetched when session opened + +## Solution: Two-Part Approach + +### Part A: Incremental Sync + +Add `updatedAfter` parameter to message fetching so iOS only requests new/edited messages. + +--- + +#### Server API Change + +**Endpoint:** +``` +GET /v1/sessions/:sessionId/messages?updatedAfter=&before=&limit=150 +``` + +**Parameters:** +| Param | Type | Description | +|-------|------|-------------| +| `updatedAfter` | timestamp (ms) | Messages updated after this time | +| `before` | timestamp (ms) | Messages created before this time (for history) | +| `limit` | int (1-150) | Max messages to return, default 150 | + +**Behavior:** +- `updatedAfter` only: Fetch new/edited messages (incremental sync) +- `before` only: Fetch older history (scroll up) +- Both: Fetch range +- Neither: Last 150 messages (backwards compatible) + +**Response:** +```json +{ + "messages": [...], + "hasMore": true, + "oldestTimestamp": 1735600000000, + "newestTimestamp": 1735689999000 +} +``` + +**Server Implementation:** +```typescript +// In sessionRoutes.ts +app.get('/v1/sessions/:sessionId/messages', { + schema: { + params: z.object({ sessionId: z.string() }), + querystring: z.object({ + updatedAfter: z.coerce.number().int().min(0).optional(), + before: z.coerce.number().int().min(0).optional(), + limit: z.coerce.number().int().min(1).max(150).default(150) + }).optional() + }, + preHandler: app.authenticate +}, async (request, reply) => { + const { sessionId } = request.params; + const { updatedAfter, before, limit } = request.query || {}; + + const messages = await db.sessionMessage.findMany({ + where: { + sessionId, + ...(updatedAfter !== undefined ? { updatedAt: { gt: new Date(updatedAfter) } } : {}), + ...(before !== undefined ? { createdAt: { lt: new Date(before) } } : {}) + }, + orderBy: updatedAfter !== undefined ? { updatedAt: 'asc' } : { createdAt: 'desc' }, + take: limit || 150, + select: { id: true, seq: true, localId: true, content: true, createdAt: true, updatedAt: true } + }); + + return reply.send({ + messages: messages.map(v => ({ + id: v.id, + seq: v.seq, + content: v.content, + localId: v.localId, + createdAt: v.createdAt.getTime(), + updatedAt: v.updatedAt.getTime() + })), + hasMore: messages.length === (limit || 150), + ...(messages.length > 0 ? { + oldestTimestamp: Math.min(...messages.map(m => m.createdAt.getTime())), + newestTimestamp: Math.max(...messages.map(m => m.createdAt.getTime())) + } : {}) + }); +}); +``` + +**Index Required:** +```prisma +@@index([sessionId, updatedAt]) +``` + +--- + +#### iOS Changes for Incremental Sync + +**Track last sync timestamp:** +```typescript +// In Sync class +private sessionLastSync = new Map(); + +// Persist to AsyncStorage for cold starts +private async loadLastSyncTimes() { + const stored = await AsyncStorage.getItem('sessionLastSync'); + if (stored) { + const parsed = JSON.parse(stored); + Object.entries(parsed).forEach(([id, ts]) => + this.sessionLastSync.set(id, ts as number) + ); + } +} + +private async persistLastSyncTimes() { + const obj = Object.fromEntries(this.sessionLastSync); + await AsyncStorage.setItem('sessionLastSync', JSON.stringify(obj)); +} +``` + +**Modified fetchMessages:** +```typescript +private fetchMessages = async (sessionId: string) => { + const encryption = this.encryption.getSessionEncryption(sessionId); + if (!encryption) { + throw new Error(`Session encryption not ready for ${sessionId}`); + } + + const lastSync = this.sessionLastSync.get(sessionId); + const url = lastSync + ? `/v1/sessions/${sessionId}/messages?updatedAfter=${lastSync}` + : `/v1/sessions/${sessionId}/messages`; + + const response = await apiSocket.request(url); + const data = await response.json(); + + // Only decrypt NEW/UPDATED messages (typically 0-5) + const decrypted = await encryption.decryptMessages(data.messages); + + // Update last sync time using SERVER timestamp (avoids clock skew issues) + if (data.newestTimestamp) { + this.sessionLastSync.set(sessionId, data.newestTimestamp); + this.persistLastSyncTimes(); + } + + this.applyMessages(sessionId, decrypted); +}; +``` + +--- + +### Part B: Prefetch on App Active + +When app becomes active, immediately prefetch messages for recently-active sessions with timeout protection. + +**Implementation:** +```typescript +// In constructor, modify AppState handler +AppState.addEventListener('change', (nextAppState) => { + if (nextAppState === 'active') { + log.log('App became active'); + + // Existing syncs... + this.sessionsSync.invalidate(); + this.machinesSync.invalidate(); + // ... etc + + // NEW: Prefetch messages for active sessions + this.prefetchActiveSessionMessages(); + } +}); + +private prefetchActiveSessionMessages = async () => { + // Wait for sessions list (with timeout) + const sessionsReady = Promise.race([ + this.sessionsSync.awaitQueue(), + delay(3000).then(() => 'timeout') + ]); + + if (await sessionsReady === 'timeout') { + log.log('Sessions sync timeout - skipping prefetch'); + return; + } + + const activeSessions = storage.getState() + .sessionsData + ?.filter((s): s is Session => typeof s !== 'string' && s.active) + .slice(0, 5) ?? []; + + if (activeSessions.length === 0) return; + + log.log(`Prefetching messages for ${activeSessions.length} active sessions`); + + // Prefetch with 5s total timeout + const prefetchWithTimeout = Promise.race([ + Promise.allSettled( + activeSessions.map(session => { + let sync = this.messagesSync.get(session.id); + if (!sync) { + sync = new InvalidateSync(() => this.fetchMessages(session.id)); + this.messagesSync.set(session.id, sync); + } + return sync.invalidateAndAwait(); + }) + ), + delay(5000).then(() => 'timeout') + ]); + + const result = await prefetchWithTimeout; + if (result === 'timeout') { + log.log('Prefetch timeout - continuing in background'); + } else { + log.log('Prefetch complete'); + } +}; +``` + +**Timeouts:** +- 3s for sessions list to load +- 5s for all message prefetches combined +- If timeout, prefetches continue in background (don't cancel) + +--- + +## Known Limitations + +### Deleted Messages +This design does not handle message deletions. If a message is deleted after sync, the client won't know to remove it. Options for future: +- Soft-delete flag with `deletedAt` timestamp +- Periodic full refresh (e.g., every 24h) +- Separate tombstone endpoint + +### Clock Skew Protection +Using server's `newestTimestamp` (not client's `Date.now()`) mitigates most clock skew issues. For additional protection, could subtract a small buffer (5-10s) from `updatedAfter`, but this may cause minor duplicate fetches. + +--- + +## Expected Impact + +| Metric | Before | After | +|--------|--------|-------| +| Messages fetched | 150 always | 0-5 typical | +| Decrypt time | 100-500ms | ~10ms | +| HTTP payload | ~50-200KB | ~1-5KB | +| Time to content | 2-7s | <500ms | + +--- + +## Implementation Branches + +### Branch: `feat/incremental-message-sync` +**Repos:** `happy-server` + `happy` + +1. Server: Add `updatedAfter` and `before` query params +2. Server: Add `@@index([sessionId, updatedAt])` to schema +3. iOS: Add `sessionLastSync` Map + persistence +4. iOS: Modify `fetchMessages` to use `updatedAfter` + +### Branch: `feat/prefetch-on-active` +**Repo:** `happy` + +1. Add `prefetchActiveSessionMessages` method +2. Call from AppState 'active' handler +3. Add timeout protection (3s sessions, 5s prefetch) + +--- + +## Files to Modify + +**Server (`happy-server`):** +- `sources/app/api/routes/sessionRoutes.ts` - add query params +- `prisma/schema.prisma` - add index (if needed) + +**iOS (`happy`):** +- `sources/sync/sync.ts` - incremental fetch + prefetch +- `sources/sync/storage.ts` - persist lastSync times (optional) diff --git a/docs/plans/2026-01-01-deploy-script-design.md b/docs/plans/2026-01-01-deploy-script-design.md new file mode 100644 index 0000000..cc16f09 --- /dev/null +++ b/docs/plans/2026-01-01-deploy-script-design.md @@ -0,0 +1,126 @@ +# Deploy Script Design + +## Overview + +A single `deploy.sh` script that handles complete server deployment, working on both fresh VPS installations and existing setups like mycaller.xyz. + +## Requirements + +- Idempotent - safe to run multiple times +- Auto-detects environment (system Caddy vs Docker Caddy) +- Interactive prompts with sensible defaults +- Full health verification after deployment + +## Script Flow + +``` +deploy.sh +├── Detect environment (fresh vs existing) +├── Interactive prompts for configuration +├── Docker setup (build & start containers) +├── PostgreSQL password configuration +├── Caddy configuration (system or Docker) +├── Full health verification +└── Summary with access URLs +``` + +## Environment Detection + +Runs first without user input: +- Check Docker and docker-compose installed +- Check if system Caddy is installed (systemd service) +- Check if PostgreSQL volume exists (existing data) +- Check if app is currently running + +## Interactive Prompts + +### Domain Configuration +``` +Enter domain name [mycaller.xyz]: _ +``` +Default: hostname or previous value from Caddyfile + +### Existing Data Handling +``` +Existing database found. What would you like to do? + 1) Keep existing data (default) + 2) Reset everything (WARNING: destroys data) +Choice [1]: _ +``` + +### PostgreSQL Password +``` +PostgreSQL password: + 1) Use default (postgres) - for development + 2) Enter custom password + 3) Generate random password +Choice [1]: _ +``` + +Credentials stored in `.env` file (gitignored). + +## Docker Deployment Sequence + +1. Build the app image: `docker compose build app` +2. Start infrastructure: `docker compose up -d postgres redis minio` +3. Wait for health checks (60s timeout) +4. Fix PostgreSQL password: `ALTER USER postgres WITH PASSWORD '...'` +5. Start app: `docker compose up -d app` +6. Wait for app health (30s timeout) + +If reset mode chosen, runs `docker compose down -v` first. + +## Caddy Configuration + +### System Caddy (when detected) +Updates `/etc/caddy/Caddyfile`: +``` +${DOMAIN} { + encode gzip + reverse_proxy 127.0.0.1:3005 +} + +www.${DOMAIN} { + redir https://${DOMAIN}{uri} permanent +} +``` +Then: `sudo systemctl reload caddy` + +### Docker Caddy (fallback) +- Adds Caddy service to docker-compose.yml +- Creates Caddyfile in project directory +- Handles SSL via Let's Encrypt + +### Port Conflicts +If ports 80/443 in use by unknown process, warn and ask user to resolve manually. + +## Health Verification + +Tests after deployment: +1. `GET /health` - expect `{"status":"ok"}` +2. `GET /` - expect 200 +3. `GET /v1/updates/?EIO=4` - expect valid Socket.io SID +4. `GET https://${DOMAIN}/health` - expect `{"status":"ok"}` + +Output: +``` +Verifying deployment... + + OK Health endpoint + OK Root endpoint + OK Socket.io + OK HTTPS (mycaller.xyz) + +Deployment successful! + +Access URLs: + API: https://mycaller.xyz + Health: https://mycaller.xyz/health +``` + +## File Changes + +- Creates `deploy.sh` in project root +- Creates/updates `.env` with credentials +- Updates `/etc/caddy/Caddyfile` (if system Caddy) +- `.env` added to `.gitignore`