Skip to content

Commit fc13e06

Browse files
committed
Remove outdated documentation and integration test plans
- Deleted the architecture design and implementation plan for CodeGraph. - Removed the integration test plan document, which outlined testing strategies and execution commands. - Eliminated the testing context summary document that provided an overview of the testing environment and constraints. - Cleared the monitoring setup documentation detailing the Docker-based monitoring stack for the application. - Removed the Rust SDK development plan for the native Rust client SDK for MCP, including development context and library research.
1 parent e1e678e commit fc13e06

38 files changed

+467
-14504
lines changed

.codegraph/index.json

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,12 @@
11
{
22
"project_path": ".",
3-
"indexed_at": "2025-09-11T22:40:30.157223Z",
3+
"indexed_at": "2025-09-12T05:16:15.023218Z",
44
"stats": {
5-
"files": 0,
6-
"lines": 161517,
7-
"functions": 4197,
5+
"files": 315,
6+
"lines": 162442,
7+
"functions": 4209,
88
"classes": 0,
9-
"embeddings": 0,
9+
"embeddings": 13509,
1010
"errors": 0
1111
},
1212
"config": {

.gitignore

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -168,4 +168,4 @@ CLAUDE.md
168168
AGENTS.md
169169
CRUSH.md
170170
OUROBOROS.md
171-
.codegraph/index.json
171+
.codegraph/index.json

README.md

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -104,16 +104,16 @@ How to use ONNX embeddings
104104
# CPU-only (default)
105105
export CODEGRAPH_EMBEDDING_PROVIDER=onnx
106106
export CODEGRAPH_ONNX_EP=cpu
107-
export CODEGRAPH_LOCAL_MODEL=sentence-transformers/all-MiniLM-L6-v2
107+
export CODEGRAPH_LOCAL_MODEL=/path/to/onnx-file
108108

109109
# CoreML (requires CoreML-enabled ORT build)
110110
export CODEGRAPH_EMBEDDING_PROVIDER=onnx
111111
export CODEGRAPH_ONNX_EP=coreml
112-
export CODEGRAPH_LOCAL_MODEL=sentence-transformers/all-MiniLM-L6-v2
112+
export CODEGRAPH_LOCAL_MODEL=/path/to/onnx-file
113113

114114

115115
# Install codegraph
116-
cargo install --path crates/codegraph-mcp --features "embeddings,codegraph-vector/onnx"
116+
cargo install --path crates/codegraph-mcp --features "embeddings,codegraph-vector/onnx,faiss"
117117
```
118118

119119
Notes
@@ -154,9 +154,9 @@ sudo dnf install cmake clang openssl-devel
154154
### Optional Dependencies
155155

156156
- **FAISS** (for vector search acceleration)
157-
- **Local Embeddings (Hugging Face + Candle + ONNX osx-metal/cuda/cpu)**
157+
- **Local Embeddings (HuggingFace + Candle + ONNX/ORT(coreML) osx-metal/cuda/cpu)**
158158
- Enables on-device embedding generation (no external API calls)
159-
- Downloads models from Hugging Face Hub on first run and caches them locally
159+
- Downloads models from HuggingFace Hub on first run and caches them locally
160160
- Internet access required for the initial model download (or pre-populate cache)
161161
- Default runs on CPU; advanced GPU backends (CUDA/Metal) require appropriate hardware and drivers
162162
- **CUDA** (for GPU-accelerated embeddings)
@@ -260,7 +260,7 @@ codegraph index .
260260
codegraph index . --languages rust,python,typescript
261261

262262
# Or with more options in Osx
263-
RUST_LOG=info,codegraph_vector=debug codegraph index . --workers 10 --batch-size 256 --device metal --max-seq-len 512 --force
263+
RUST_LOG=info,codegraph_vector=debug codegraph index . --workers 10 --batch-size 256 --max-seq-len 512 --force
264264

265265
# Index with file watching
266266
codegraph index . --watch

SESSION-MEMORY.md

Lines changed: 0 additions & 91 deletions
This file was deleted.
File renamed without changes.

benchmarks/.gitkeep

Whitespace-only changes.

benchmarks/README.md

Lines changed: 0 additions & 8 deletions
This file was deleted.

crates/codegraph-vector/src/embedding.rs

Lines changed: 25 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,12 @@
11
use codegraph_core::{CodeGraphError, CodeNode, Result};
2-
#[cfg(any(feature = "local-embeddings", feature = "openai"))]
2+
#[cfg(any(feature = "local-embeddings", feature = "openai", feature = "onnx"))]
33
use std::sync::Arc;
4-
#[cfg(any(feature = "local-embeddings", feature = "openai"))]
4+
#[cfg(any(feature = "local-embeddings", feature = "openai", feature = "onnx"))]
55
use crate::embeddings::generator::TextEmbeddingEngine;
66

77
pub struct EmbeddingGenerator {
88
model_config: ModelConfig,
9-
#[cfg(any(feature = "local-embeddings", feature = "openai"))]
9+
#[cfg(any(feature = "local-embeddings", feature = "openai", feature = "onnx"))]
1010
pub(crate) advanced: Option<Arc<crate::embeddings::generator::AdvancedEmbeddingGenerator>>,
1111
}
1212

@@ -31,7 +31,7 @@ impl EmbeddingGenerator {
3131
pub fn new(config: ModelConfig) -> Self {
3232
Self {
3333
model_config: config,
34-
#[cfg(any(feature = "local-embeddings", feature = "openai"))]
34+
#[cfg(any(feature = "local-embeddings", feature = "openai", feature = "onnx"))]
3535
advanced: None,
3636
}
3737
}
@@ -40,7 +40,7 @@ impl EmbeddingGenerator {
4040
Self::new(ModelConfig::default())
4141
}
4242

43-
#[cfg(any(feature = "local-embeddings", feature = "openai"))]
43+
#[cfg(any(feature = "local-embeddings", feature = "openai", feature = "onnx"))]
4444
pub fn set_advanced_engine(
4545
&mut self,
4646
engine: Arc<crate::embeddings::generator::AdvancedEmbeddingGenerator>,
@@ -55,15 +55,15 @@ impl EmbeddingGenerator {
5555
/// Construct an EmbeddingGenerator that optionally wraps the advanced engine based on env.
5656
/// If CODEGRAPH_EMBEDDING_PROVIDER=local, tries to initialize a local-first engine.
5757
pub async fn with_auto_from_env() -> Self {
58-
#[cfg(any(feature = "local-embeddings", feature = "openai"))]
58+
#[cfg(any(feature = "local-embeddings", feature = "openai", feature = "onnx"))]
5959
let mut base = Self::new(ModelConfig::default());
60-
#[cfg(not(any(feature = "local-embeddings", feature = "openai")))]
60+
#[cfg(not(any(feature = "local-embeddings", feature = "openai", feature = "onnx")))]
6161
let base = Self::new(ModelConfig::default());
6262
let provider = std::env::var("CODEGRAPH_EMBEDDING_PROVIDER")
6363
.unwrap_or_default()
6464
.to_lowercase();
6565
if provider == "local" {
66-
#[cfg(any(feature = "local-embeddings", feature = "openai"))]
66+
#[cfg(any(feature = "local-embeddings", feature = "openai", feature = "onnx"))]
6767
{
6868
use crate::embeddings::generator::{
6969
AdvancedEmbeddingGenerator, EmbeddingEngineConfig, LocalDeviceTypeCompat,
@@ -85,6 +85,22 @@ impl EmbeddingGenerator {
8585
base.advanced = Some(Arc::new(engine));
8686
}
8787
}
88+
} else if provider == "onnx" {
89+
#[cfg(feature = "onnx")]
90+
{
91+
use crate::embeddings::generator::{AdvancedEmbeddingGenerator, EmbeddingEngineConfig, OnnxConfigCompat};
92+
let mut cfg = EmbeddingEngineConfig::default();
93+
let model_repo = std::env::var("CODEGRAPH_LOCAL_MODEL").unwrap_or_default();
94+
cfg.onnx = Some(OnnxConfigCompat {
95+
model_repo,
96+
model_file: Some("model.onnx".into()),
97+
max_sequence_length: 512,
98+
pooling: "mean".into(),
99+
});
100+
if let Ok(engine) = AdvancedEmbeddingGenerator::new(cfg).await {
101+
base.advanced = Some(Arc::new(engine));
102+
}
103+
}
88104
}
89105
base
90106
}
@@ -95,7 +111,7 @@ impl EmbeddingGenerator {
95111
}
96112

97113
pub async fn generate_embeddings(&self, nodes: &[CodeNode]) -> Result<Vec<Vec<f32>>> {
98-
#[cfg(any(feature = "local-embeddings", feature = "openai"))]
114+
#[cfg(any(feature = "local-embeddings", feature = "openai", feature = "onnx"))]
99115
if let Some(engine) = &self.advanced {
100116
// Use provider's batched path when available
101117
let texts: Vec<String> = nodes.iter().map(|n| self.prepare_text(n)).collect();

0 commit comments

Comments
 (0)