Skip to content

Commit ed0f688

Browse files
committed
Updates: New features
1 parent 99014bf commit ed0f688

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

44 files changed

+196665
-608
lines changed

Cargo.lock

Lines changed: 1952 additions & 74 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

crates/codegraph-ai/src/anthropic_provider.rs

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -84,8 +84,11 @@ impl AnthropicProvider {
8484
Err(e) => {
8585
last_error = Some(e);
8686
if attempt < self.config.max_retries {
87-
tracing::warn!("Anthropic request failed (attempt {}/{}), retrying...",
88-
attempt + 1, self.config.max_retries + 1);
87+
tracing::warn!(
88+
"Anthropic request failed (attempt {}/{}), retrying...",
89+
attempt + 1,
90+
self.config.max_retries + 1
91+
);
8992
}
9093
}
9194
}
@@ -143,11 +146,7 @@ impl AnthropicProvider {
143146
.await
144147
.unwrap_or_else(|_| "Unknown error".to_string());
145148

146-
return Err(anyhow!(
147-
"Anthropic API error ({}): {}",
148-
status,
149-
error_text
150-
));
149+
return Err(anyhow!("Anthropic API error ({}): {}", status, error_text));
151150
}
152151

153152
response

crates/codegraph-ai/src/lib.rs

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
1-
pub mod llm_provider;
21
pub mod llm_factory;
2+
pub mod llm_provider;
33
pub mod ml;
44
pub mod optimization;
55
pub mod qwen_simple;
@@ -9,12 +9,12 @@ pub mod semantic;
99
// Cloud LLM providers
1010
#[cfg(feature = "anthropic")]
1111
pub mod anthropic_provider;
12-
#[cfg(feature = "openai-llm")]
13-
pub mod openai_llm_provider;
1412
#[cfg(feature = "openai-compatible")]
1513
pub mod openai_compatible_provider;
14+
#[cfg(feature = "openai-llm")]
15+
pub mod openai_llm_provider;
1616

17-
pub use llm_provider::*;
1817
pub use llm_factory::LLMProviderFactory;
18+
pub use llm_provider::*;
1919
pub use qwen_simple::{QwenClient, QwenConfig, QwenResult};
2020
pub use semantic::search::*;

crates/codegraph-ai/src/llm_factory.rs

Lines changed: 19 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -37,9 +37,21 @@ impl LLMProviderFactory {
3737
_ => Err(anyhow!(
3838
"Unsupported LLM provider: {}. Available providers: ollama, lmstudio{}{}{}",
3939
provider_name,
40-
if cfg!(feature = "anthropic") { ", anthropic" } else { "" },
41-
if cfg!(feature = "openai-llm") { ", openai" } else { "" },
42-
if cfg!(feature = "openai-compatible") { ", openai-compatible" } else { "" }
40+
if cfg!(feature = "anthropic") {
41+
", anthropic"
42+
} else {
43+
""
44+
},
45+
if cfg!(feature = "openai-llm") {
46+
", openai"
47+
} else {
48+
""
49+
},
50+
if cfg!(feature = "openai-compatible") {
51+
", openai-compatible"
52+
} else {
53+
""
54+
}
4355
)),
4456
}
4557
}
@@ -136,10 +148,7 @@ impl LLMProviderFactory {
136148
let openai_config = OpenAIConfig {
137149
api_key,
138150
base_url: "https://api.openai.com/v1".to_string(),
139-
model: config
140-
.model
141-
.clone()
142-
.unwrap_or_else(|| "gpt-4o".to_string()),
151+
model: config.model.clone().unwrap_or_else(|| "gpt-4o".to_string()),
143152
context_window: config.context_window,
144153
timeout_secs: config.timeout_secs,
145154
max_retries: 3,
@@ -152,14 +161,9 @@ impl LLMProviderFactory {
152161
/// Create an OpenAI-compatible provider
153162
#[cfg(feature = "openai-compatible")]
154163
fn create_openai_compatible_provider(config: &LLMConfig) -> Result<Arc<dyn LLMProvider>> {
155-
let base_url = config
156-
.openai_compatible_url
157-
.clone()
158-
.ok_or_else(|| {
159-
anyhow!(
160-
"OpenAI-compatible base URL not found. Set 'openai_compatible_url' in config"
161-
)
162-
})?;
164+
let base_url = config.openai_compatible_url.clone().ok_or_else(|| {
165+
anyhow!("OpenAI-compatible base URL not found. Set 'openai_compatible_url' in config")
166+
})?;
163167

164168
let compat_config = OpenAICompatibleConfig {
165169
base_url,

crates/codegraph-ai/src/llm_provider.rs

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -123,7 +123,8 @@ pub trait LLMProvider: Send + Sync {
123123
role: MessageRole::User,
124124
content: prompt.to_string(),
125125
}];
126-
self.generate_chat(&messages, &GenerationConfig::default()).await
126+
self.generate_chat(&messages, &GenerationConfig::default())
127+
.await
127128
}
128129

129130
/// Generate a completion with custom configuration

crates/codegraph-ai/src/openai_compatible_provider.rs

Lines changed: 21 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -190,16 +190,14 @@ impl OpenAICompatibleProvider {
190190

191191
// Add API key if provided
192192
if let Some(api_key) = &self.config.api_key {
193-
request_builder = request_builder.header("Authorization", format!("Bearer {}", api_key));
193+
request_builder =
194+
request_builder.header("Authorization", format!("Bearer {}", api_key));
194195
}
195196

196-
let response = request_builder
197-
.send()
198-
.await
199-
.context(format!(
200-
"Failed to send request to {} Responses API at {}",
201-
self.config.provider_name, self.config.base_url
202-
))?;
197+
let response = request_builder.send().await.context(format!(
198+
"Failed to send request to {} Responses API at {}",
199+
self.config.provider_name, self.config.base_url
200+
))?;
203201

204202
let status = response.status();
205203

@@ -257,16 +255,14 @@ impl OpenAICompatibleProvider {
257255

258256
// Add API key if provided
259257
if let Some(api_key) = &self.config.api_key {
260-
request_builder = request_builder.header("Authorization", format!("Bearer {}", api_key));
258+
request_builder =
259+
request_builder.header("Authorization", format!("Bearer {}", api_key));
261260
}
262261

263-
let response = request_builder
264-
.send()
265-
.await
266-
.context(format!(
267-
"Failed to send request to {} Chat Completions API at {}",
268-
self.config.provider_name, self.config.base_url
269-
))?;
262+
let response = request_builder.send().await.context(format!(
263+
"Failed to send request to {} Chat Completions API at {}",
264+
self.config.provider_name, self.config.base_url
265+
))?;
270266

271267
let status = response.status();
272268

@@ -284,16 +280,16 @@ impl OpenAICompatibleProvider {
284280
));
285281
}
286282

287-
let chat_response: ChatCompletionsResponse = response
288-
.json()
289-
.await
290-
.context(format!(
291-
"Failed to parse {} Chat Completions API response",
292-
self.config.provider_name
293-
))?;
283+
let chat_response: ChatCompletionsResponse = response.json().await.context(format!(
284+
"Failed to parse {} Chat Completions API response",
285+
self.config.provider_name
286+
))?;
294287

295288
// Convert Chat Completions response to Responses API format
296-
let choice = chat_response.choices.first().ok_or_else(|| anyhow!("No choices in response"))?;
289+
let choice = chat_response
290+
.choices
291+
.first()
292+
.ok_or_else(|| anyhow!("No choices in response"))?;
297293

298294
Ok(ResponseAPIResponse {
299295
id: chat_response.id,
@@ -352,7 +348,7 @@ impl LLMProvider for OpenAICompatibleProvider {
352348
ProviderCharacteristics {
353349
max_tokens: self.config.context_window,
354350
avg_latency_ms: 1500, // Local models are typically slower
355-
rpm_limit: None, // No rate limits for local providers
351+
rpm_limit: None, // No rate limits for local providers
356352
tpm_limit: None,
357353
supports_streaming: true,
358354
supports_functions: false, // Most local providers don't support function calling

crates/codegraph-ai/src/openai_llm_provider.rs

Lines changed: 25 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -230,18 +230,21 @@ impl LLMProvider for OpenAIProvider {
230230

231231
fn characteristics(&self) -> ProviderCharacteristics {
232232
// Characteristics vary by model
233-
let (max_tokens, rpm_limit, tpm_limit, supports_functions) = match self.config.model.as_str() {
234-
// Reasoning models
235-
m if m.contains("o1") => (200_000, Some(50), Some(30_000), false),
236-
m if m.contains("o3") || m.contains("o4") => (200_000, Some(50), Some(30_000), false),
237-
m if m.starts_with("gpt-5") => (200_000, Some(50), Some(30_000), false),
238-
// Standard models
239-
"gpt-4o" => (128_000, Some(500), Some(30_000), true),
240-
"gpt-4o-mini" => (128_000, Some(500), Some(200_000), true),
241-
"gpt-4-turbo" => (128_000, Some(500), Some(30_000), true),
242-
"gpt-4" => (8_192, Some(500), Some(10_000), true),
243-
_ => (self.config.context_window, Some(500), Some(30_000), true),
244-
};
233+
let (max_tokens, rpm_limit, tpm_limit, supports_functions) =
234+
match self.config.model.as_str() {
235+
// Reasoning models
236+
m if m.contains("o1") => (200_000, Some(50), Some(30_000), false),
237+
m if m.contains("o3") || m.contains("o4") => {
238+
(200_000, Some(50), Some(30_000), false)
239+
}
240+
m if m.starts_with("gpt-5") => (200_000, Some(50), Some(30_000), false),
241+
// Standard models
242+
"gpt-4o" => (128_000, Some(500), Some(30_000), true),
243+
"gpt-4o-mini" => (128_000, Some(500), Some(200_000), true),
244+
"gpt-4-turbo" => (128_000, Some(500), Some(30_000), true),
245+
"gpt-4" => (8_192, Some(500), Some(10_000), true),
246+
_ => (self.config.context_window, Some(500), Some(30_000), true),
247+
};
245248

246249
ProviderCharacteristics {
247250
max_tokens,
@@ -394,7 +397,11 @@ mod tests {
394397
..Default::default()
395398
};
396399
let provider = OpenAIProvider::new(config).unwrap();
397-
assert!(provider.is_reasoning_model(), "Model {} should be detected as reasoning model", model);
400+
assert!(
401+
provider.is_reasoning_model(),
402+
"Model {} should be detected as reasoning model",
403+
model
404+
);
398405
}
399406
}
400407

@@ -408,7 +415,11 @@ mod tests {
408415
..Default::default()
409416
};
410417
let provider = OpenAIProvider::new(config).unwrap();
411-
assert!(!provider.is_reasoning_model(), "Model {} should NOT be detected as reasoning model", model);
418+
assert!(
419+
!provider.is_reasoning_model(),
420+
"Model {} should NOT be detected as reasoning model",
421+
model
422+
);
412423
}
413424
}
414425
}

crates/codegraph-api/src/bin/codegraph-setup.rs

Lines changed: 4 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -269,12 +269,7 @@ async fn run_setup_wizard() -> Result<SetupConfig> {
269269
.allow_empty(true)
270270
.interact_text()?;
271271

272-
let models = vec![
273-
"gpt-4o",
274-
"gpt-4o-mini",
275-
"gpt-4-turbo",
276-
"gpt-4",
277-
];
272+
let models = vec!["gpt-4o", "gpt-4o-mini", "gpt-4-turbo", "gpt-4"];
278273

279274
let model_selection = Select::with_theme(&theme)
280275
.with_prompt("Select OpenAI model")
@@ -345,18 +340,15 @@ fn save_config(config: &SetupConfig) -> Result<()> {
345340
.join(".codegraph");
346341

347342
// Create config directory if it doesn't exist
348-
fs::create_dir_all(&config_dir)
349-
.context("Failed to create config directory")?;
343+
fs::create_dir_all(&config_dir).context("Failed to create config directory")?;
350344

351345
let config_path = config_dir.join("config.toml");
352346

353347
// Serialize config to TOML
354-
let toml_content = toml::to_string_pretty(config)
355-
.context("Failed to serialize config")?;
348+
let toml_content = toml::to_string_pretty(config).context("Failed to serialize config")?;
356349

357350
// Write to file
358-
fs::write(&config_path, toml_content)
359-
.context("Failed to write config file")?;
351+
fs::write(&config_path, toml_content).context("Failed to write config file")?;
360352

361353
println!("\n📝 Configuration preview:");
362354
println!("─────────────────────────────────────────────────");

crates/codegraph-cli/src/main.rs

Lines changed: 4 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -265,8 +265,7 @@ async fn main() -> Result<()> {
265265
let cli = Cli::parse();
266266

267267
// Initialize configuration
268-
let config = ConfigManager::new(None)
269-
.context("Failed to load configuration")?;
268+
let config = ConfigManager::new(None).context("Failed to load configuration")?;
270269

271270
// Initialize app state (for now, we'll create a temporary one)
272271
// In production, this would connect to the actual storage
@@ -321,8 +320,7 @@ async fn execute_transaction_command(
321320
}
322321

323322
TransactionCommands::Commit { transaction_id } => {
324-
let tx_id = Uuid::parse_str(transaction_id)
325-
.context("Invalid transaction ID format")?;
323+
let tx_id = Uuid::parse_str(transaction_id).context("Invalid transaction ID format")?;
326324

327325
state
328326
.transactional_graph
@@ -341,8 +339,7 @@ async fn execute_transaction_command(
341339
}
342340

343341
TransactionCommands::Rollback { transaction_id } => {
344-
let tx_id = Uuid::parse_str(transaction_id)
345-
.context("Invalid transaction ID format")?;
342+
let tx_id = Uuid::parse_str(transaction_id).context("Invalid transaction ID format")?;
346343

347344
state
348345
.transactional_graph
@@ -668,11 +665,7 @@ fn print_pretty(value: &serde_json::Value) -> Result<()> {
668665
println!("{}: {}", key_colored, n.to_string().yellow());
669666
}
670667
serde_json::Value::Bool(b) => {
671-
let val_colored = if *b {
672-
"true".green()
673-
} else {
674-
"false".red()
675-
};
668+
let val_colored = if *b { "true".green() } else { "false".red() };
676669
println!("{}: {}", key_colored, val_colored);
677670
}
678671
_ => {

crates/codegraph-core/src/advanced_config.rs

Lines changed: 1 addition & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ use tracing::{debug, info, warn};
1010
use crate::embedding_config::{EmbeddingModelConfig, EmbeddingPreset};
1111
use crate::performance_config::{PerformanceMode, PerformanceModeConfig, PerformanceProfile};
1212

13-
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
13+
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Default)]
1414
pub struct AdvancedConfig {
1515
#[serde(default)]
1616
pub embedding: EmbeddingModelConfig,
@@ -28,18 +28,6 @@ pub struct AdvancedConfig {
2828
pub templates: ConfigTemplates,
2929
}
3030

31-
impl Default for AdvancedConfig {
32-
fn default() -> Self {
33-
Self {
34-
embedding: EmbeddingModelConfig::default(),
35-
performance: PerformanceModeConfig::default(),
36-
runtime: RuntimeConfig::default(),
37-
monitoring: MonitoringConfig::default(),
38-
templates: ConfigTemplates::default(),
39-
}
40-
}
41-
}
42-
4331
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
4432
pub struct RuntimeConfig {
4533
#[serde(default = "RuntimeConfig::default_allow_runtime_switching")]

0 commit comments

Comments
 (0)