|
| 1 | +# CodeGraph Configuration Example |
| 2 | +# Copy this file to .env and update the values for your environment |
| 3 | + |
| 4 | +# ============================================================================ |
| 5 | +# CodeGraph Core Configuration (Simplified Setup) |
| 6 | +# ============================================================================ |
| 7 | + |
| 8 | +# Minimal Setup - Auto-detect embedding provider (ONNX, Ollama, Jina, or OpenAI) |
| 9 | +CODEGRAPH_EMBEDDING_PROVIDER=auto |
| 10 | + |
| 11 | +# That's it for basic usage! CodeGraph will auto-detect everything else. |
| 12 | +# Uncomment and customize the settings below if you need more control. |
| 13 | + |
| 14 | +# Embedding Provider Configuration |
| 15 | +# ---------------------------------- |
| 16 | +# Provider options: "auto", "onnx", "ollama", "openai", "jina", or "lmstudio" |
| 17 | +# CODEGRAPH_EMBEDDING_PROVIDER=auto |
| 18 | + |
| 19 | +# ONNX: Specify model path (or leave empty for auto-detection from HuggingFace cache) |
| 20 | +# CODEGRAPH_LOCAL_MODEL=/path/to/your/onnx/model |
| 21 | + |
| 22 | +# Ollama: Specify embedding model name |
| 23 | +# CODEGRAPH_EMBEDDING_MODEL=all-minilm:latest |
| 24 | +# CODEGRAPH_OLLAMA_URL=http://localhost:11434 |
| 25 | + |
| 26 | +# Jina: Cloud-based embeddings with automatic reranking |
| 27 | +# Recommended for production - fast, accurate, and includes code-specific models |
| 28 | +# CODEGRAPH_EMBEDDING_PROVIDER=jina |
| 29 | +# JINA_API_KEY=your-jina-api-key-here |
| 30 | +# CODEGRAPH_EMBEDDING_MODEL=jina-embeddings-v4 |
| 31 | +# CODEGRAPH_EMBEDDING_DIMENSION=1024 |
| 32 | +# JINA_API_BASE=https://api.jina.ai/v1 |
| 33 | +# JINA_ENABLE_RERANKING=true |
| 34 | +# JINA_RERANKING_MODEL=jina-reranker-v3 |
| 35 | +# JINA_RERANKING_TOP_N=10 |
| 36 | +# JINA_LATE_CHUNKING=true |
| 37 | +# JINA_TASK=code.query |
| 38 | + |
| 39 | +# LM Studio: Best for MLX + Flash Attention 2 (recommended on macOS) |
| 40 | +# Default: jina-code-embeddings-1.5b (1536 dimensions) |
| 41 | +# CODEGRAPH_EMBEDDING_PROVIDER=lmstudio |
| 42 | +# CODEGRAPH_EMBEDDING_MODEL=jinaai/jina-embeddings-v3 |
| 43 | +# CODEGRAPH_LMSTUDIO_URL=http://localhost:1234 |
| 44 | +# CODEGRAPH_EMBEDDING_DIMENSION=1536 |
| 45 | + |
| 46 | +# OpenAI: Model name (API key configured below in Security section) |
| 47 | +# CODEGRAPH_EMBEDDING_MODEL=text-embedding-3-small |
| 48 | + |
| 49 | +# LLM Configuration (for local insights generation) |
| 50 | +# -------------------------------------------------- |
| 51 | +# Leave empty to use context-only mode (fastest, recommended for agents like Claude/GPT-4) |
| 52 | +# Set to enable local LLM insights generation |
| 53 | + |
| 54 | +# LM Studio with DeepSeek Coder v2 Lite Instruct (recommended) |
| 55 | +# Superior MLX support and Flash Attention 2 on macOS |
| 56 | +# CODEGRAPH_LLM_PROVIDER=lmstudio |
| 57 | +# CODEGRAPH_MODEL=lmstudio-community/DeepSeek-Coder-V2-Lite-Instruct-GGUF/DeepSeek-Coder-V2-Lite-Instruct-Q4_K_M.gguf |
| 58 | +# CODEGRAPH_LMSTUDIO_URL=http://localhost:1234 |
| 59 | +# CODEGRAPH_CONTEXT_WINDOW=32000 |
| 60 | +# CODEGRAPH_TEMPERATURE=0.1 |
| 61 | + |
| 62 | +# Ollama (alternative) |
| 63 | +# LLM model (e.g., "qwen2.5-coder:14b", "codellama:13b") |
| 64 | +# CODEGRAPH_MODEL=qwen2.5-coder:14b |
| 65 | +# CODEGRAPH_OLLAMA_URL=http://localhost:11434 |
| 66 | + |
| 67 | +# LLM context window size (tokens) |
| 68 | +# CODEGRAPH_CONTEXT_WINDOW=32000 |
| 69 | + |
| 70 | +# LLM temperature (0.0 = deterministic, 1.0 = creative) |
| 71 | +# CODEGRAPH_TEMPERATURE=0.1 |
| 72 | + |
| 73 | +# Logging |
| 74 | +# ------- |
| 75 | +# Log level: trace, debug, info, warn, error |
| 76 | +# Use "warn" during indexing for clean TUI output (recommended) |
| 77 | +# Use "info" for development/debugging |
| 78 | +RUST_LOG=warn |
| 79 | + |
| 80 | +# ============================================================================ |
| 81 | +# Security Configuration (for production deployments) |
| 82 | +# ============================================================================ |
| 83 | + |
| 84 | +# JWT Authentication |
| 85 | +JWT_SECRET=replace_with_secure_random_secret_minimum_32_characters_long |
| 86 | +JWT_EXPIRY_HOURS=24 |
| 87 | + |
| 88 | +# API Key Configuration |
| 89 | +API_KEY_PREFIX=cgk |
| 90 | + |
| 91 | +# Server Configuration |
| 92 | +HOST=127.0.0.1 |
| 93 | +PORT=8080 |
| 94 | +ENVIRONMENT=development |
| 95 | + |
| 96 | +# TLS/HTTPS Configuration (for production) |
| 97 | +# TLS_CERT_PATH=/path/to/certificate.pem |
| 98 | +# TLS_KEY_PATH=/path/to/private-key.pem |
| 99 | +# REQUIRE_TLS=true |
| 100 | + |
| 101 | +# Database Configuration |
| 102 | +# DATABASE_URL=postgresql://user:password@localhost/codegraph |
| 103 | +# REDIS_URL=redis://localhost:6379 |
| 104 | + |
| 105 | +# SurrealDB Configuration (for graph storage) |
| 106 | +# CODEGRAPH_SURREALDB_URL=ws://localhost:8000 |
| 107 | +# CODEGRAPH_SURREALDB_NAMESPACE=codegraph |
| 108 | +# CODEGRAPH_SURREALDB_DATABASE=main |
| 109 | +# CODEGRAPH_SURREALDB_USERNAME=root |
| 110 | +# CODEGRAPH_SURREALDB_PASSWORD=root |
| 111 | + |
| 112 | +# Rate Limiting |
| 113 | +RATE_LIMIT_ANONYMOUS=60 |
| 114 | +RATE_LIMIT_USER=1000 |
| 115 | +RATE_LIMIT_PREMIUM=5000 |
| 116 | +RATE_LIMIT_ADMIN=10000 |
| 117 | + |
| 118 | +# Security Settings |
| 119 | +MAX_REQUEST_SIZE=10485760 # 10MB |
| 120 | +SESSION_TIMEOUT_HOURS=24 |
| 121 | +PASSWORD_MIN_LENGTH=12 |
| 122 | + |
| 123 | +# Logging (see RUST_LOG above for CodeGraph core logging) |
| 124 | +# LOG_LEVEL=info # For application-level logging |
| 125 | +SECURITY_LOG_LEVEL=warn |
| 126 | +LOG_FORMAT=json |
| 127 | + |
| 128 | +# Monitoring |
| 129 | +METRICS_ENABLED=true |
| 130 | +PROMETHEUS_PORT=9090 |
| 131 | + |
| 132 | +# Cloud Provider API Keys |
| 133 | +# ----------------------- |
| 134 | +# Jina AI (for embeddings and reranking) |
| 135 | +# JINA_API_KEY=your_jina_api_key_here |
| 136 | + |
| 137 | +# OpenAI API (if using external AI services) |
| 138 | +# OPENAI_API_KEY=your_openai_api_key_here |
| 139 | +# OPENAI_ORG_ID=your_org_id_here |
| 140 | + |
| 141 | +# Anthropic API (for Claude) |
| 142 | +# ANTHROPIC_API_KEY=your_anthropic_api_key_here |
| 143 | + |
| 144 | +# External Services |
| 145 | +# SENTRY_DSN=https://your-sentry-dsn |
| 146 | +# ANALYTICS_KEY=your_analytics_key |
| 147 | + |
| 148 | +# Development/Testing Only |
| 149 | +DEV_MODE=true |
| 150 | +DISABLE_AUTH=false # Never set to true in production! |
| 151 | +ENABLE_DEBUG_ENDPOINTS=false |
0 commit comments