Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
58aa7d1
git status -sb
Sahilbhatane Nov 25, 2025
6d6fed7
package and test error fix
Sahilbhatane Nov 25, 2025
131f84c
fixes #42 package conflit resolution UI
Sahilbhatane Nov 25, 2025
0106e10
error fix and conflict resolution
Sahilbhatane Nov 28, 2025
b88ecc7
Package error for test (fix yaml)
Sahilbhatane Nov 28, 2025
53389a5
Fix automation workflow test path
Sahilbhatane Dec 14, 2025
74df07e
Fix CLI env precedence and LLMRouter env override
Sahilbhatane Dec 17, 2025
5dfbdb7
Fix lint (ruff/black)
Sahilbhatane Dec 17, 2025
8fae6e5
Fix LLMRouter None disables env fallback
Sahilbhatane Dec 17, 2025
f603f58
test fix
Sahilbhatane Dec 18, 2025
0a24ccc
Fix Ruff import order in sandbox executor
Sahilbhatane Dec 18, 2025
7b9bf00
lint fix
Sahilbhatane Dec 18, 2025
3349579
fix: resolve rebase conflicts and fix tests for Issue #42
Sahilbhatane Dec 28, 2025
4f0ee99
add dict for duplicate code and cursor mention in .gitignore
Sahilbhatane Jan 1, 2026
78277ce
test fix
Sahilbhatane Jan 1, 2026
e4f4b05
test fix
Sahilbhatane Jan 1, 2026
c6f6ad0
review changes
Sahilbhatane Jan 1, 2026
04da6d2
fix: update test assertion for config list output format
Sahilbhatane Jan 5, 2026
1b55aa9
feat: add config subparser for Issue #42 preferences management
Sahilbhatane Jan 5, 2026
0b0c160
Remove unused varibale
Sahilbhatane Jan 8, 2026
fe5d076
suggestion fix
Sahilbhatane Jan 10, 2026
64d1040
linting
Sahilbhatane Jan 10, 2026
dc84970
Error fix
Sahilbhatane Jan 10, 2026
b11e255
Suggestion fix
Sahilbhatane Jan 10, 2026
98b388c
Error fix
Sahilbhatane Jan 10, 2026
e302d17
resolve conversation and lint all files
Sahilbhatane Jan 15, 2026
5bd9483
Merge branch 'main' into issue-42
Sahilbhatane Jan 15, 2026
33c5be7
fix: update integration tests to use pyproject.toml and fix linting i…
Sahilbhatane Jan 15, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/automation.yml
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ jobs:
ANTHROPIC_API_KEY: "test-key-for-ci"
OPENAI_API_KEY: "test-key-for-ci"
run: |
python -m pytest tests/ -v --cov=cortex --cov-report=xml --cov-report=term-missing --timeout=60
python -m pytest tests/ -v --tb=short --cov=cortex --cov-report=xml --cov-report=term-missing --timeout=60

- name: Upload coverage to Codecov
uses: codecov/codecov-action@v5
Expand Down
11 changes: 10 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,7 @@ dmypy.json
.ropeproject/
.sublime-project
.sublime-workspace
.cursor/

# ==============================
# OS-specific
Expand Down Expand Up @@ -151,10 +152,13 @@ htmlcov/
*.swo

# ==============================
# Cortex specific
# Cortex-specific
# ==============================
# User preferences and configuration
.cortex/
*.yaml.bak
~/.config/cortex/preferences.yaml
~/.config/cortex/*.backup.*
/tmp/
.env

Expand All @@ -178,6 +182,11 @@ cortex-code-stats.csv
# Local scripts (not part of distribution)
*.local.sh

# Data files (except contributors.json which is tracked)
data/*.json
data/*.csv
!data/contributors.json

# Editor config (keep .editorconfig)
.vscode/settings.json
.idea/workspace.xml
Expand Down
28 changes: 22 additions & 6 deletions cortex/api_key_detector.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@
import os
import re
from pathlib import Path
from typing import Optional

from cortex.branding import console, cx_print

Expand Down Expand Up @@ -397,14 +396,31 @@ def _get_check_locations(self) -> list[tuple]:
Returns:
List of (source, env_vars) tuples
"""
return [
home = Path.home()
locations: list[tuple[str | Path, list[str]]] = [
("environment", ["ANTHROPIC_API_KEY", "OPENAI_API_KEY"]),
(Path.home() / CORTEX_DIR / CORTEX_ENV_FILE, ["ANTHROPIC_API_KEY", "OPENAI_API_KEY"]),
(Path.home() / ".config" / "anthropic" / "credentials.json", ["ANTHROPIC_API_KEY"]),
(Path.home() / ".config" / "openai" / "credentials.json", ["OPENAI_API_KEY"]),
(Path.cwd() / ".env", ["ANTHROPIC_API_KEY", "OPENAI_API_KEY"]),
(home / CORTEX_DIR / CORTEX_ENV_FILE, ["ANTHROPIC_API_KEY", "OPENAI_API_KEY"]),
(home / ".config" / "anthropic" / "credentials.json", ["ANTHROPIC_API_KEY"]),
(home / ".config" / "openai" / "credentials.json", ["OPENAI_API_KEY"]),
]

# Only consult the working-directory .env when a valid home directory exists; this
# prevents accidental pickup of repository .env files when HOME is mocked or missing.
allow_cwd_env = os.environ.get("CORTEX_DISABLE_CWD_DOTENV", "").lower() not in (
"1",
"true",
)

try:
cwd_under_home = Path.cwd().is_relative_to(home)
except ValueError:
cwd_under_home = False

if home.exists() and allow_cwd_env and cwd_under_home:
locations.append((Path.cwd() / ".env", ["ANTHROPIC_API_KEY", "OPENAI_API_KEY"]))

return locations

def _extract_key_from_file(self, file_path: Path, env_var: str) -> str | None:
"""
Extract API key from a file.
Expand Down
72 changes: 41 additions & 31 deletions cortex/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
from dataclasses import asdict, dataclass, field
from datetime import datetime
from pathlib import Path
from typing import Any, List, Optional, Tuple
from typing import Any, Optional

from rich import box
from rich.console import Console
Expand Down Expand Up @@ -118,7 +118,9 @@ def _get_system_info(self) -> dict:
elif platform.system() == "Darwin":
result = subprocess.run(
["sysctl", "-n", "machdep.cpu.brand_string"],
capture_output=True, text=True, timeout=5
capture_output=True,
text=True,
timeout=5,
)
if result.returncode == 0:
info["cpu_model"] = result.stdout.strip()
Expand All @@ -139,8 +141,7 @@ def _get_system_info(self) -> dict:
break
elif platform.system() == "Darwin":
result = subprocess.run(
["sysctl", "-n", "hw.memsize"],
capture_output=True, text=True, timeout=5
["sysctl", "-n", "hw.memsize"], capture_output=True, text=True, timeout=5
)
if result.returncode == 0:
mem_bytes = int(result.stdout.strip())
Expand All @@ -160,7 +161,9 @@ def _detect_nvidia_gpu(self) -> bool:
try:
result = subprocess.run(
["nvidia-smi", "--query-gpu=name", "--format=csv,noheader"],
capture_output=True, text=True, timeout=10
capture_output=True,
text=True,
timeout=10,
)
return result.returncode == 0 and result.stdout.strip() != ""
except Exception:
Expand All @@ -171,7 +174,9 @@ def _get_nvidia_vram(self) -> int:
try:
result = subprocess.run(
["nvidia-smi", "--query-gpu=memory.total", "--format=csv,noheader,nounits"],
capture_output=True, text=True, timeout=10
capture_output=True,
text=True,
timeout=10,
)
if result.returncode == 0:
return int(result.stdout.strip().split("\n")[0])
Expand Down Expand Up @@ -223,7 +228,7 @@ def _benchmark_cpu(self) -> BenchmarkResult:
score=score,
raw_value=round(avg_time * 1000, 2),
unit="ms",
description="Matrix computation speed"
description="Matrix computation speed",
)

def _benchmark_memory(self) -> BenchmarkResult:
Expand All @@ -250,7 +255,7 @@ def _benchmark_memory(self) -> BenchmarkResult:

# Calculate approximate bandwidth (bytes per second)
bytes_processed = size * 8 * 2 # 8 bytes per int, 2 operations
bandwidth_gbps = (bytes_processed / avg_time) / (1024 ** 3)
bandwidth_gbps = (bytes_processed / avg_time) / (1024**3)

# Score based on bandwidth
# Baseline: 10 GB/s = 50, 50 GB/s = 100, 1 GB/s = 10
Expand All @@ -267,7 +272,7 @@ def _benchmark_memory(self) -> BenchmarkResult:
score=score,
raw_value=round(bandwidth_gbps, 2),
unit="GB/s",
description="Memory throughput"
description="Memory throughput",
)

def _benchmark_gpu(self, system_info: dict) -> BenchmarkResult:
Expand Down Expand Up @@ -298,7 +303,7 @@ def _benchmark_gpu(self, system_info: dict) -> BenchmarkResult:
score=score,
raw_value=vram_mb,
unit="MB",
description="NVIDIA GPU VRAM"
description="NVIDIA GPU VRAM",
)

elif system_info.get("has_apple_silicon"):
Expand All @@ -320,7 +325,7 @@ def _benchmark_gpu(self, system_info: dict) -> BenchmarkResult:
score=score,
raw_value=int(ram_gb * 1024),
unit="MB (unified)",
description="Apple Silicon unified memory"
description="Apple Silicon unified memory",
)

else:
Expand All @@ -330,7 +335,7 @@ def _benchmark_gpu(self, system_info: dict) -> BenchmarkResult:
score=15,
raw_value=0,
unit="MB",
description="No dedicated GPU detected"
description="No dedicated GPU detected",
)

def _benchmark_inference_simulation(self) -> BenchmarkResult:
Expand All @@ -348,9 +353,11 @@ def _benchmark_inference_simulation(self) -> BenchmarkResult:
# Simulate embedding lookup (string hashing)
embeddings = [hash(token) % 10000 for token in tokens]
# Simulate attention (nested loops)
attention = sum(embeddings[i] * embeddings[j]
for i in range(min(50, len(embeddings)))
for j in range(min(50, len(embeddings))))
attention = sum(
embeddings[i] * embeddings[j]
for i in range(min(50, len(embeddings)))
for j in range(min(50, len(embeddings)))
)
_ = attention
elapsed = time.perf_counter() - start

Expand All @@ -372,7 +379,7 @@ def _benchmark_inference_simulation(self) -> BenchmarkResult:
score=score,
raw_value=round(tokens_per_sec / 1000, 2),
unit="K tok/s",
description="Simulated inference throughput"
description="Simulated inference throughput",
)

def _benchmark_token_generation(self) -> BenchmarkResult:
Expand All @@ -390,8 +397,10 @@ def _benchmark_token_generation(self) -> BenchmarkResult:
context = [0] * 10
for _ in range(sequence_length):
# Simulate softmax over vocabulary
logits = [(hash((i, tuple(context[-10:]))) % 1000) / 1000
for i in range(min(1000, vocab_size))]
logits = [
(hash((i, tuple(context[-10:]))) % 1000) / 1000
for i in range(min(1000, vocab_size))
]
next_token = max(range(len(logits)), key=lambda i: logits[i])
generated.append(next_token)
context.append(next_token)
Expand All @@ -415,7 +424,7 @@ def _benchmark_token_generation(self) -> BenchmarkResult:
score=score,
raw_value=round(tokens_per_sec, 1),
unit="tok/s",
description="Simulated generation speed"
description="Simulated generation speed",
)

def _calculate_overall_score(self, results: list[BenchmarkResult]) -> tuple[int, str]:
Expand Down Expand Up @@ -579,8 +588,9 @@ def run(self, save_history: bool = True) -> BenchmarkReport:
report.overall_score, report.rating = self._calculate_overall_score(report.results)

# Get model recommendations
report.can_run, report.needs_upgrade, report.upgrade_suggestion = \
report.can_run, report.needs_upgrade, report.upgrade_suggestion = (
self._get_model_recommendations(report.system_info, report.overall_score)
)

# Save to history
if save_history:
Expand Down Expand Up @@ -633,11 +643,7 @@ def display_report(self, report: BenchmarkReport):
else:
score_str = f"[red]{result.score}/100[/red]"

table.add_row(
result.name,
score_str,
f"{result.raw_value} {result.unit}"
)
table.add_row(result.name, score_str, f"{result.raw_value} {result.unit}")

console.print(table)
console.print()
Expand All @@ -650,12 +656,16 @@ def display_report(self, report: BenchmarkReport):
else:
score_color = "red"

score_content = f"[bold {score_color}]{report.overall_score}/100[/bold {score_color}] ({report.rating})"
console.print(Panel(
f"[bold]OVERALL SCORE:[/bold] {score_content}",
border_style="cyan",
box=box.ROUNDED,
))
score_content = (
f"[bold {score_color}]{report.overall_score}/100[/bold {score_color}] ({report.rating})"
)
console.print(
Panel(
f"[bold]OVERALL SCORE:[/bold] {score_content}",
border_style="cyan",
box=box.ROUNDED,
)
)
console.print()

# Model recommendations
Expand Down
6 changes: 4 additions & 2 deletions cortex/branding.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
- Consistent visual language
"""

from typing import List, Optional, Tuple
from typing import Optional

from rich import box
from rich.console import Console
Expand Down Expand Up @@ -318,7 +318,9 @@ def cx_error(message: str) -> None:

def cx_warning(message: str) -> None:
"""Print a warning message with warning icon."""
console.print(f"[{CORTEX_WARNING}]⚠[/{CORTEX_WARNING}] [{CORTEX_WARNING}]{message}[/{CORTEX_WARNING}]")
console.print(
f"[{CORTEX_WARNING}]⚠[/{CORTEX_WARNING}] [{CORTEX_WARNING}]{message}[/{CORTEX_WARNING}]"
)


def cx_info(message: str) -> None:
Expand Down
Loading
Loading