|
| 1 | +# CodeGraph Configuration File |
| 2 | +# Copy this to .codegraph.toml or ~/.codegraph/config.toml and customize |
| 3 | + |
| 4 | +# ============================================================================ |
| 5 | +# Embedding Configuration |
| 6 | +# ============================================================================ |
| 7 | +[embedding] |
| 8 | +# Provider: "auto", "onnx", "ollama", "openai", or "lmstudio" |
| 9 | +# "auto" will detect available models automatically |
| 10 | +# "lmstudio" recommended for MLX + Flash Attention 2 (macOS) |
| 11 | +provider = "lmstudio" |
| 12 | + |
| 13 | +# Model path or identifier |
| 14 | +# For ONNX: Absolute path to model directory (auto-detected from HuggingFace cache) |
| 15 | +# For Ollama: Model name (e.g., "all-minilm:latest") |
| 16 | +# For LM Studio: Model name (e.g., "jinaai/jina-embeddings-v3") |
| 17 | +# For OpenAI: Model name (e.g., "text-embedding-3-small") |
| 18 | +# Recommended: jinaai/jina-embeddings-v3 (1536-dim, optimized for code) |
| 19 | +model = "jinaai/jina-embeddings-v3" |
| 20 | + |
| 21 | +# LM Studio URL (default port 1234) |
| 22 | +lmstudio_url = "http://localhost:1234" |
| 23 | + |
| 24 | +# Ollama URL (only used if provider is "ollama") |
| 25 | +ollama_url = "http://localhost:11434" |
| 26 | + |
| 27 | +# OpenAI API key (only used if provider is "openai") |
| 28 | +# Can also be set via OPENAI_API_KEY environment variable |
| 29 | +# openai_api_key = "sk-..." |
| 30 | + |
| 31 | +# Embedding dimension (1536 for jina-code-embeddings-1.5b, 384 for all-MiniLM) |
| 32 | +dimension = 1536 |
| 33 | + |
| 34 | +# Batch size for embedding generation (GPU optimization) |
| 35 | +batch_size = 64 |
| 36 | + |
| 37 | +# ============================================================================ |
| 38 | +# LLM Configuration (for insights generation) |
| 39 | +# ============================================================================ |
| 40 | +[llm] |
| 41 | +# Enable LLM insights (false = context-only mode for agents like Claude/GPT-4) |
| 42 | +# Set to false for maximum speed if using an external agent |
| 43 | +enabled = false |
| 44 | + |
| 45 | +# LLM provider: "ollama" or "lmstudio" |
| 46 | +# "lmstudio" recommended for MLX + Flash Attention 2 (macOS) |
| 47 | +provider = "lmstudio" |
| 48 | + |
| 49 | +# LLM model identifier |
| 50 | +# For LM Studio: lmstudio-community/DeepSeek-Coder-V2-Lite-Instruct-GGUF/DeepSeek-Coder-V2-Lite-Instruct-Q4_K_M.gguf |
| 51 | +# For Ollama: Model name (e.g., "qwen2.5-coder:14b", "codellama:13b") |
| 52 | +# Recommended: DeepSeek Coder v2 Lite Instruct Q4_K_M (superior performance) |
| 53 | +model = "lmstudio-community/DeepSeek-Coder-V2-Lite-Instruct-GGUF" |
| 54 | + |
| 55 | +# LM Studio URL (default port 1234) |
| 56 | +lmstudio_url = "http://localhost:1234" |
| 57 | + |
| 58 | +# Ollama URL |
| 59 | +ollama_url = "http://localhost:11434" |
| 60 | + |
| 61 | +# Context window size (tokens) |
| 62 | +# DeepSeek Coder v2 Lite: 32768 tokens |
| 63 | +context_window = 32000 |
| 64 | + |
| 65 | +# Temperature for generation (0.0 = deterministic, 1.0 = creative) |
| 66 | +temperature = 0.1 |
| 67 | + |
| 68 | +# Insights mode: "context-only", "balanced", or "deep" |
| 69 | +# - context-only: Return context only (fastest, for agents) |
| 70 | +# - balanced: Process top 10 files with LLM (good speed/quality) |
| 71 | +# - deep: Process all reranked files (comprehensive) |
| 72 | +insights_mode = "context-only" |
| 73 | + |
| 74 | +# ============================================================================ |
| 75 | +# Performance Configuration |
| 76 | +# ============================================================================ |
| 77 | +[performance] |
| 78 | +# Number of worker threads (defaults to CPU count) |
| 79 | +num_threads = 0 # 0 = auto-detect |
| 80 | + |
| 81 | +# Cache size in MB |
| 82 | +cache_size_mb = 512 |
| 83 | + |
| 84 | +# Enable GPU acceleration (requires CUDA/Metal support) |
| 85 | +enable_gpu = false |
| 86 | + |
| 87 | +# Maximum concurrent requests for embedding/LLM |
| 88 | +max_concurrent_requests = 4 |
| 89 | + |
| 90 | +# ============================================================================ |
| 91 | +# Logging Configuration |
| 92 | +# ============================================================================ |
| 93 | +[logging] |
| 94 | +# Log level: "trace", "debug", "info", "warn", "error" |
| 95 | +# Use "warn" during indexing for clean TUI output (recommended) |
| 96 | +# Use "info" for development/debugging |
| 97 | +level = "warn" |
| 98 | + |
| 99 | +# Log format: "pretty", "json", "compact" |
| 100 | +format = "pretty" |
0 commit comments