diff --git a/README.md b/README.md index 37407191..1ecf9ba1 100644 --- a/README.md +++ b/README.md @@ -193,6 +193,21 @@ cortex role set | `cortex --version` | Show version information | | `cortex --help` | Display help message | +#### Daemon Commands + +| Command | Description | +|---------|-------------| +| `cortex daemon install --execute` | Install and enable the cortexd daemon | +| `cortex daemon uninstall --execute` | Stop and remove the daemon | +| `cortex daemon ping` | Test daemon connectivity | +| `cortex daemon version` | Show daemon version | +| `cortex daemon config` | Show daemon configuration | +| `cortex daemon reload-config` | Reload daemon configuration | +| `cortex daemon run-tests` | Run daemon test suite | +| `cortex daemon run-tests --unit` | Run only unit tests | +| `cortex daemon run-tests --integration` | Run only integration tests | +| `cortex daemon run-tests -t ` | Run a specific test | + ### Configuration Cortex stores configuration in `~/.cortex/`: @@ -256,20 +271,45 @@ Cortex stores configuration in `~/.cortex/`: ``` cortex/ -├── cortex/ # Main package +├── cortex/ # Main Python package │ ├── cli.py # Command-line interface │ ├── coordinator.py # Installation orchestration │ ├── llm_router.py # Multi-LLM routing +│ ├── daemon_client.py # IPC client for cortexd │ ├── packages.py # Package manager wrapper │ ├── hardware_detection.py │ ├── installation_history.py │ └── utils/ # Utility modules -├── tests/ # Test suite +├── daemon/ # C++ background daemon (cortexd) +│ ├── src/ # Daemon source code +│ ├── include/ # Header files +│ ├── tests/ # Unit & integration tests +│ ├── scripts/ # Build and setup scripts +│ └── README.md # Daemon documentation +├── tests/ # Python test suite ├── docs/ # Documentation ├── examples/ # Example scripts └── scripts/ # Utility scripts ``` +### Background Daemon (cortexd) + +Cortex includes an optional C++ background daemon for system-level operations: + +```bash +# Install the daemon +cortex daemon install --execute + +# Check daemon status +cortex daemon ping +cortex daemon version + +# Run daemon tests (no installation required) +cortex daemon run-tests +``` + +See [daemon/README.md](daemon/README.md) for full documentation. + --- ## Safety & Security @@ -433,11 +473,37 @@ pip install -e ".[dev]" # Install pre-commit hooks pre-commit install +``` + +### Running Tests + +**Python Tests:** -# Run tests +```bash +# Run all Python tests pytest tests/ -v + +# Run with coverage +pytest tests/ -v --cov=cortex +``` + +**Daemon Tests (C++):** + +```bash +# Build daemon with tests +cd daemon && ./scripts/build.sh Release --with-tests + +# Run all daemon tests (no daemon installation required) +cortex daemon run-tests + +# Run specific test types +cortex daemon run-tests --unit # Unit tests only +cortex daemon run-tests --integration # Integration tests only +cortex daemon run-tests -t config # Specific test ``` +> **Note:** Daemon tests run against a static library and don't require the daemon to be installed as a systemd service. They test the code directly. + See [CONTRIBUTING.md](CONTRIBUTING.md) for detailed guidelines. --- diff --git a/TESTING.md b/TESTING.md index f77329c4..a0cc9d68 100644 --- a/TESTING.md +++ b/TESTING.md @@ -150,6 +150,64 @@ python -m src.sandbox_executor "echo hello" --dry-run - [ ] Works in fish (if installed) - [ ] Works in tmux/screen +## Daemon Tests (C++) + +The daemon has its own test suite that tests the C++ code directly. + +### Prerequisites + +```bash +# Build daemon with tests +cd daemon +./scripts/build.sh Release --with-tests +``` + +### Running Daemon Tests + +**Via Cortex CLI:** + +- [ ] `cortex daemon run-tests` - Runs all daemon tests +- [ ] `cortex daemon run-tests --unit` - Runs unit tests only +- [ ] `cortex daemon run-tests --integration` - Runs integration tests only +- [ ] `cortex daemon run-tests -t config` - Runs specific test +- [ ] `cortex daemon run-tests -v` - Verbose output + +**Via ctest:** + +```bash +cd daemon/build +ctest --output-on-failure +``` + +### Test Structure + +| Test | Type | What It Tests | +|------|------|---------------| +| `test_config` | Unit | Configuration loading/validation | +| `test_protocol` | Unit | IPC message serialization | +| `test_rate_limiter` | Unit | Request rate limiting | +| `test_logger` | Unit | Logging subsystem | +| `test_common` | Unit | Common constants/types | +| `test_ipc_server` | Integration | IPC server lifecycle | +| `test_handlers` | Integration | IPC request handlers | +| `test_daemon` | Integration | Daemon lifecycle/services | + +### Important Notes + +> **Tests don't require daemon installation!** +> +> The tests link against a static library (`cortexd_lib`) containing all daemon code. +> They instantiate classes directly in memory and test their behavior without needing +> systemd or an installed binary. + +### Daemon Test Checklist + +- [ ] All unit tests pass +- [ ] All integration tests pass +- [ ] Tests complete in < 30 seconds +- [ ] No memory leaks (run with valgrind if available) +- [ ] Tests pass without root/sudo + ## Notes Record any issues found: diff --git a/cortex/cli.py b/cortex/cli.py index b1cfe4a1..6eff2b07 100644 --- a/cortex/cli.py +++ b/cortex/cli.py @@ -2826,6 +2826,706 @@ def progress_callback(current: int, total: int, step: InstallationStep) -> None: return 1 # -------------------------- + # Daemon Commands + # -------------------------- + + def daemon(self, args: argparse.Namespace) -> int: + """Handle daemon commands: install, uninstall, config, reload-config, version, ping, shutdown. + + PR1 available commands: + - install/uninstall: Manage systemd service files (Python-side) + - config: Get daemon configuration via IPC + - reload-config: Reload daemon configuration via IPC + - version: Get daemon version via IPC + - ping: Test daemon connectivity via IPC + - shutdown: Request daemon shutdown via IPC + """ + action = getattr(args, "daemon_action", None) + + if action == "install": + return self._daemon_install(args) + elif action == "uninstall": + return self._daemon_uninstall(args) + elif action == "config": + return self._daemon_config() + elif action == "reload-config": + return self._daemon_reload_config() + elif action == "version": + return self._daemon_version() + elif action == "ping": + return self._daemon_ping() + elif action == "shutdown": + return self._daemon_shutdown() + elif action == "run-tests": + return self._daemon_run_tests(args) + else: + cx_print("Usage: cortex daemon ", "info") + cx_print("", "info") + cx_print("Available commands:", "info") + cx_print(" install Install and enable the daemon service", "info") + cx_print(" uninstall Remove the daemon service", "info") + cx_print(" config Show daemon configuration", "info") + cx_print(" reload-config Reload daemon configuration", "info") + cx_print(" version Show daemon version", "info") + cx_print(" ping Test daemon connectivity", "info") + cx_print(" shutdown Request daemon shutdown", "info") + cx_print(" run-tests Run daemon test suite", "info") + return 0 + + def _daemon_ipc_call(self, operation_name: str, ipc_func): + """ + Helper method for daemon IPC calls with centralized error handling. + + Args: + operation_name: Human-readable name of the operation for error messages. + ipc_func: A callable that takes a DaemonClient and returns a DaemonResponse. + + Returns: + Tuple of (success: bool, response: DaemonResponse | None) + On error, response is None and an error message is printed. + """ + # Initialize audit logging + history = InstallationHistory() + start_time = datetime.now(timezone.utc) + install_id = None + + try: + # Record operation start + install_id = history.record_installation( + InstallationType.CONFIG, + ["cortexd"], + [f"daemon.{operation_name}"], + start_time, + ) + except Exception: + # Continue even if audit logging fails + pass + + try: + from cortex.daemon_client import ( + DaemonClient, + DaemonConnectionError, + DaemonNotInstalledError, + ) + + client = DaemonClient() + response = ipc_func(client) + + # Update history with success/failure + if install_id: + try: + if response and response.success: + history.update_installation(install_id, InstallationStatus.SUCCESS) + else: + error_msg = ( + response.error if response and response.error else "IPC call failed" + ) + history.update_installation( + install_id, InstallationStatus.FAILED, error_msg + ) + except Exception: + pass + + return True, response + + except DaemonNotInstalledError as e: + error_msg = str(e) + cx_print(f"{error_msg}", "error") + if install_id: + try: + history.update_installation(install_id, InstallationStatus.FAILED, error_msg) + except Exception: + pass + return False, None + except DaemonConnectionError as e: + error_msg = str(e) + cx_print(f"{error_msg}", "error") + if install_id: + try: + history.update_installation(install_id, InstallationStatus.FAILED, error_msg) + except Exception: + pass + return False, None + except ImportError: + error_msg = "Daemon client not available." + cx_print(error_msg, "error") + if install_id: + try: + history.update_installation(install_id, InstallationStatus.FAILED, error_msg) + except Exception: + pass + return False, None + except Exception as e: + error_msg = f"Unexpected error during {operation_name}: {e}" + cx_print(error_msg, "error") + if install_id: + try: + history.update_installation(install_id, InstallationStatus.FAILED, error_msg) + except Exception: + pass + return False, None + + def _daemon_install(self, args: argparse.Namespace) -> int: + """Install the cortexd daemon using setup_daemon.py.""" + import subprocess + from pathlib import Path + + cx_header("Installing Cortex Daemon") + + # Initialize audit logging + history = InstallationHistory() + start_time = datetime.now(timezone.utc) + install_id = None + + try: + # Record operation start + install_id = history.record_installation( + InstallationType.CONFIG, + ["cortexd"], + ["cortex daemon install"], + start_time, + ) + except Exception as e: + cx_print(f"Warning: Could not initialize audit logging: {e}", "warning") + + # Find setup_daemon.py + daemon_dir = Path(__file__).parent.parent / "daemon" + setup_script = daemon_dir / "scripts" / "setup_daemon.py" + + if not setup_script.exists(): + error_msg = f"Setup script not found at {setup_script}" + cx_print(error_msg, "error") + cx_print("Please ensure the daemon directory is present.", "error") + if install_id: + try: + history.update_installation(install_id, InstallationStatus.FAILED, error_msg) + except Exception: + pass + return 1 + + execute = getattr(args, "execute", False) + + if not execute: + cx_print("This will build and install the cortexd daemon.", "info") + cx_print("", "info") + cx_print("The setup wizard will:", "info") + cx_print(" 1. Check and install build dependencies", "info") + cx_print(" 2. Build the daemon from source", "info") + cx_print(" 3. Install systemd service files", "info") + cx_print(" 4. Enable and start the service", "info") + cx_print("", "info") + cx_print("Run with --execute to proceed:", "info") + cx_print(" cortex daemon install --execute", "dim") + if install_id: + try: + history.update_installation( + install_id, + InstallationStatus.FAILED, + "Operation cancelled (no --execute flag)", + ) + except Exception: + pass + return 0 + + # Run setup_daemon.py + cx_print("Running daemon setup wizard...", "info") + try: + result = subprocess.run( + [sys.executable, str(setup_script)], + check=False, + ) + + # Record completion + if install_id: + try: + if result.returncode == 0: + history.update_installation(install_id, InstallationStatus.SUCCESS) + else: + error_msg = f"Setup script returned exit code {result.returncode}" + history.update_installation( + install_id, InstallationStatus.FAILED, error_msg + ) + except Exception: + pass + + return result.returncode + except subprocess.SubprocessError as e: + error_msg = f"Subprocess error during daemon install: {str(e)}" + cx_print(error_msg, "error") + if install_id: + try: + history.update_installation(install_id, InstallationStatus.FAILED, error_msg) + except Exception: + pass + return 1 + except Exception as e: + error_msg = f"Unexpected error during daemon install: {str(e)}" + cx_print(error_msg, "error") + if install_id: + try: + history.update_installation(install_id, InstallationStatus.FAILED, error_msg) + except Exception: + pass + return 1 + + def _daemon_uninstall(self, args: argparse.Namespace) -> int: + """Uninstall the cortexd daemon.""" + import subprocess + from pathlib import Path + + cx_header("Uninstalling Cortex Daemon") + + # Initialize audit logging + history = InstallationHistory() + start_time = datetime.now(timezone.utc) + install_id = None + + try: + # Record operation start + install_id = history.record_installation( + InstallationType.CONFIG, + ["cortexd"], + ["cortex daemon uninstall"], + start_time, + ) + except Exception as e: + cx_print(f"Warning: Could not initialize audit logging: {e}", "warning") + + execute = getattr(args, "execute", False) + + if not execute: + cx_print("This will stop and remove the cortexd daemon.", "warning") + cx_print("", "info") + cx_print("This will:", "info") + cx_print(" 1. Stop the cortexd service", "info") + cx_print(" 2. Disable the service", "info") + cx_print(" 3. Remove systemd unit files", "info") + cx_print(" 4. Remove the daemon binary", "info") + cx_print("", "info") + cx_print("Run with --execute to proceed:", "info") + cx_print(" cortex daemon uninstall --execute", "dim") + if install_id: + try: + history.update_installation( + install_id, + InstallationStatus.FAILED, + "Operation cancelled (no --execute flag)", + ) + except Exception: + pass + return 0 + + # Find uninstall script + daemon_dir = Path(__file__).parent.parent / "daemon" + uninstall_script = daemon_dir / "scripts" / "uninstall.sh" + + if uninstall_script.exists(): + cx_print("Running uninstall script...", "info") + try: + # Log the uninstall script command + if install_id: + try: + history.record_installation( + InstallationType.CONFIG, + ["cortexd"], + [f"sudo bash {uninstall_script}"], + datetime.now(timezone.utc), + ) + except Exception: + pass + + result = subprocess.run( + ["sudo", "bash", str(uninstall_script)], + check=False, + ) + + # Record completion + if install_id: + try: + if result.returncode == 0: + history.update_installation(install_id, InstallationStatus.SUCCESS) + else: + error_msg = f"Uninstall script returned exit code {result.returncode}" + if result.stderr: + error_msg += f": {result.stderr[:500]}" + history.update_installation( + install_id, InstallationStatus.FAILED, error_msg + ) + except Exception: + pass + + return result.returncode + except subprocess.SubprocessError as e: + error_msg = f"Subprocess error during daemon uninstall: {str(e)}" + cx_print(error_msg, "error") + if install_id: + try: + history.update_installation( + install_id, InstallationStatus.FAILED, error_msg + ) + except Exception: + pass + return 1 + except Exception as e: + error_msg = f"Unexpected error during daemon uninstall: {str(e)}" + cx_print(error_msg, "error") + if install_id: + try: + history.update_installation( + install_id, InstallationStatus.FAILED, error_msg + ) + except Exception: + pass + return 1 + else: + # Manual uninstall + cx_print("Running manual uninstall...", "info") + commands = [ + ["sudo", "systemctl", "stop", "cortexd"], + ["sudo", "systemctl", "disable", "cortexd"], + ["sudo", "rm", "-f", "/etc/systemd/system/cortexd.service"], + ["sudo", "rm", "-f", "/etc/systemd/system/cortexd.socket"], + ["sudo", "rm", "-f", "/usr/local/bin/cortexd"], + ["sudo", "systemctl", "daemon-reload"], + ] + + try: + any_failed = False + error_messages = [] + + for cmd in commands: + cmd_str = " ".join(cmd) + cx_print(f" Running: {cmd_str}", "dim") + + # Log each critical command before execution + if install_id: + try: + history.record_installation( + InstallationType.CONFIG, + ["cortexd"], + [cmd_str], + datetime.now(timezone.utc), + ) + except Exception: + pass + + result = subprocess.run(cmd, check=False, capture_output=True, text=True) + + # Track failures + if result.returncode != 0: + any_failed = True + error_msg = ( + f"Command '{cmd_str}' failed with return code {result.returncode}" + ) + if result.stderr: + error_msg += f": {result.stderr[:500]}" + error_messages.append(error_msg) + cx_print(f" Failed: {error_msg}", "error") + + # Update history and return based on overall success + if any_failed: + combined_error = "; ".join(error_messages) + cx_print("Daemon uninstall failed.", "error") + if install_id: + try: + history.update_installation( + install_id, InstallationStatus.FAILED, combined_error + ) + except Exception: + pass + return 1 + else: + cx_print("Daemon uninstalled.", "success") + # Record success + if install_id: + try: + history.update_installation(install_id, InstallationStatus.SUCCESS) + except Exception: + pass + return 0 + except subprocess.SubprocessError as e: + error_msg = f"Subprocess error during manual uninstall: {str(e)}" + cx_print(error_msg, "error") + if install_id: + try: + history.update_installation( + install_id, InstallationStatus.FAILED, error_msg + ) + except Exception: + pass + return 1 + except Exception as e: + error_msg = f"Unexpected error during manual uninstall: {str(e)}" + cx_print(error_msg, "error") + if install_id: + try: + history.update_installation( + install_id, InstallationStatus.FAILED, error_msg + ) + except Exception: + pass + return 1 + + def _daemon_config(self) -> int: + """Get daemon configuration via IPC.""" + from rich.table import Table + + cx_header("Daemon Configuration") + + success, response = self._daemon_ipc_call("config.get", lambda c: c.config_get()) + if not success: + return 1 + + if response.success and response.result: + table = Table(title="Current Configuration", show_header=True) + table.add_column("Setting", style="cyan") + table.add_column("Value", style="green") + + for key, value in response.result.items(): + table.add_row(key, str(value)) + + console.print(table) + return 0 + else: + cx_print(f"Failed to get config: {response.error}", "error") + return 1 + + def _daemon_reload_config(self) -> int: + """Reload daemon configuration via IPC.""" + cx_header("Reloading Daemon Configuration") + + success, response = self._daemon_ipc_call("config.reload", lambda c: c.config_reload()) + if not success: + return 1 + + if response.success: + cx_print("Configuration reloaded successfully!", "success") + return 0 + else: + cx_print(f"Failed to reload config: {response.error}", "error") + return 1 + + def _daemon_version(self) -> int: + """Get daemon version via IPC.""" + cx_header("Daemon Version") + + success, response = self._daemon_ipc_call("version", lambda c: c.version()) + if not success: + return 1 + + if response.success and response.result: + name = response.result.get("name", "cortexd") + version = response.result.get("version", "unknown") + cx_print(f"{name} version {version}", "success") + return 0 + else: + cx_print(f"Failed to get version: {response.error}", "error") + return 1 + + def _daemon_ping(self) -> int: + """Test daemon connectivity via IPC.""" + import time + + cx_header("Daemon Ping") + + start = time.time() + success, response = self._daemon_ipc_call("ping", lambda c: c.ping()) + elapsed = (time.time() - start) * 1000 # ms + + if not success: + return 1 + + if response.success: + cx_print(f"Pong! Response time: {elapsed:.1f}ms", "success") + return 0 + else: + cx_print(f"Ping failed: {response.error}", "error") + return 1 + + def _daemon_shutdown(self) -> int: + """Request daemon shutdown via IPC.""" + cx_header("Requesting Daemon Shutdown") + + success, response = self._daemon_ipc_call("shutdown", lambda c: c.shutdown()) + if not success: + return 1 + + if response.success: + cx_print("Daemon shutdown requested successfully!", "success") + return 0 + cx_print(f"Failed to request shutdown: {response.error}", "error") + return 1 + + def _daemon_run_tests(self, args: argparse.Namespace) -> int: + """Run the daemon test suite.""" + import subprocess + from pathlib import Path + + cx_header("Daemon Tests") + + # Initialize audit logging + history = InstallationHistory() + start_time = datetime.now(timezone.utc) + install_id = None + + try: + # Record operation start + install_id = history.record_installation( + InstallationType.CONFIG, + ["cortexd"], + ["daemon.run-tests"], + start_time, + ) + except Exception: + # Continue even if audit logging fails + pass + + # Find daemon directory + daemon_dir = Path(__file__).parent.parent / "daemon" + build_dir = daemon_dir / "build" + tests_dir = build_dir / "tests" # Test binaries are in build/tests/ + + # Define test binaries + unit_tests = [ + "test_config", + "test_protocol", + "test_rate_limiter", + "test_logger", + "test_common", + ] + integration_tests = ["test_ipc_server", "test_handlers", "test_daemon"] + all_tests = unit_tests + integration_tests + + # Check if tests are built + def check_tests_built() -> tuple[bool, list[str]]: + """Check which test binaries exist.""" + existing = [] + for test in all_tests: + if (tests_dir / test).exists(): + existing.append(test) + return len(existing) > 0, existing + + tests_built, existing_tests = check_tests_built() + + if not tests_built: + error_msg = "Tests are not built." + cx_print(error_msg, "warning") + cx_print("", "info") + cx_print("To build tests, run the setup wizard with test building enabled:", "info") + cx_print("", "info") + cx_print(" [bold]python daemon/scripts/setup_daemon.py[/bold]", "info") + cx_print("", "info") + cx_print("When prompted, answer 'yes' to build the test suite.", "info") + cx_print("", "info") + cx_print("Or build manually:", "info") + cx_print(" cd daemon && ./scripts/build.sh Release --with-tests", "dim") + if install_id: + try: + history.update_installation(install_id, InstallationStatus.FAILED, error_msg) + except Exception: + pass + return 1 + + # Determine which tests to run + test_filter = getattr(args, "test", None) + run_unit = getattr(args, "unit", False) + run_integration = getattr(args, "integration", False) + verbose = getattr(args, "verbose", False) + + tests_to_run = [] + + if test_filter: + # Run a specific test + # Allow partial matching (e.g., "config" matches "test_config") + test_name = test_filter if test_filter.startswith("test_") else f"test_{test_filter}" + if test_name in existing_tests: + tests_to_run = [test_name] + else: + error_msg = f"Test '{test_filter}' not found or not built." + cx_print(error_msg, "error") + cx_print("", "info") + cx_print("Available tests:", "info") + for t in existing_tests: + cx_print(f" • {t}", "info") + if install_id: + try: + history.update_installation( + install_id, InstallationStatus.FAILED, error_msg + ) + except Exception: + pass + return 1 + elif run_unit and not run_integration: + tests_to_run = [t for t in unit_tests if t in existing_tests] + if not tests_to_run: + error_msg = "No unit tests built." + cx_print(error_msg, "warning") + if install_id: + try: + history.update_installation( + install_id, InstallationStatus.FAILED, error_msg + ) + except Exception: + pass + return 1 + elif run_integration and not run_unit: + tests_to_run = [t for t in integration_tests if t in existing_tests] + if not tests_to_run: + error_msg = "No integration tests built." + cx_print(error_msg, "warning") + if install_id: + try: + history.update_installation( + install_id, InstallationStatus.FAILED, error_msg + ) + except Exception: + pass + return 1 + else: + # Run all available tests + tests_to_run = existing_tests + + # Show what we're running + cx_print(f"Running {len(tests_to_run)} test(s)...", "info") + cx_print("", "info") + + # Use ctest for running tests + ctest_args = ["ctest", "--output-on-failure"] + + if verbose: + ctest_args.append("-V") + + # Filter specific tests if not running all + if test_filter or run_unit or run_integration: + # ctest uses -R for regex filtering + test_regex = "|".join(tests_to_run) + ctest_args.extend(["-R", test_regex]) + + result = subprocess.run( + ctest_args, + cwd=str(build_dir), + check=False, + ) + + if result.returncode == 0: + cx_print("", "info") + cx_print("All tests passed!", "success") + if install_id: + try: + history.update_installation(install_id, InstallationStatus.SUCCESS) + except Exception: + pass + return 0 + else: + error_msg = f"Test execution failed with return code {result.returncode}" + cx_print("", "info") + cx_print("Some tests failed.", "error") + if install_id: + try: + history.update_installation(install_id, InstallationStatus.FAILED, error_msg) + except Exception: + pass + return 1 def show_rich_help(): @@ -3465,6 +4165,62 @@ def main(): update_subs.add_parser("backups", help="List available backups for rollback") # -------------------------- + # --- Daemon Commands --- + daemon_parser = subparsers.add_parser("daemon", help="Manage the cortexd background daemon") + daemon_subs = daemon_parser.add_subparsers(dest="daemon_action", help="Daemon actions") + + # daemon install [--execute] + daemon_install_parser = daemon_subs.add_parser( + "install", help="Install and enable the daemon service" + ) + daemon_install_parser.add_argument( + "--execute", action="store_true", help="Actually run the installation" + ) + + # daemon uninstall [--execute] + daemon_uninstall_parser = daemon_subs.add_parser( + "uninstall", help="Stop and remove the daemon service" + ) + daemon_uninstall_parser.add_argument( + "--execute", action="store_true", help="Actually run the uninstallation" + ) + + # daemon config - uses config.get IPC handler + daemon_subs.add_parser("config", help="Show current daemon configuration") + + # daemon reload-config - uses config.reload IPC handler + daemon_subs.add_parser("reload-config", help="Reload daemon configuration from disk") + + # daemon version - uses version IPC handler + daemon_subs.add_parser("version", help="Show daemon version") + + # daemon ping - uses ping IPC handler + daemon_subs.add_parser("ping", help="Test daemon connectivity") + + # daemon shutdown - uses shutdown IPC handler + daemon_subs.add_parser("shutdown", help="Request daemon shutdown") + + # daemon run-tests - run daemon test suite + daemon_run_tests_parser = daemon_subs.add_parser( + "run-tests", + help="Run daemon test suite (runs all tests by default when no filters are provided)", + ) + daemon_run_tests_parser.add_argument("--unit", action="store_true", help="Run only unit tests") + daemon_run_tests_parser.add_argument( + "--integration", action="store_true", help="Run only integration tests" + ) + daemon_run_tests_parser.add_argument( + "--test", + "-t", + type=str, + metavar="NAME", + help="Run a specific test (e.g., test_config, test_daemon)", + ) + daemon_run_tests_parser.add_argument( + "--verbose", "-v", action="store_true", help="Show verbose test output" + ) + # -------------------------- + # WiFi/Bluetooth Driver Matcher wifi_parser = subparsers.add_parser("wifi", help="WiFi/Bluetooth driver auto-matcher") wifi_parser.add_argument( @@ -3639,6 +4395,8 @@ def main(): return 0 if activate_license(args.license_key) else 1 elif args.command == "update": return cli.update(args) + elif args.command == "daemon": + return cli.daemon(args) elif args.command == "wifi": from cortex.wifi_driver import run_wifi_driver diff --git a/cortex/daemon_client.py b/cortex/daemon_client.py new file mode 100644 index 00000000..8c75546a --- /dev/null +++ b/cortex/daemon_client.py @@ -0,0 +1,251 @@ +""" +Cortex Daemon IPC Client + +Provides communication with the cortexd daemon via Unix socket IPC. +Supports the PR1 commands: ping, version, config.get, config.reload, shutdown. +""" + +import json +import socket +from dataclasses import dataclass +from pathlib import Path +from typing import Any + +# Default socket path (matches daemon config) +DEFAULT_SOCKET_PATH = "/run/cortex/cortex.sock" +SOCKET_TIMEOUT = 5.0 # seconds +MAX_RESPONSE_SIZE = 65536 # 64KB + +# Paths to check if daemon is installed +DAEMON_BINARY_PATH = "/usr/local/bin/cortexd" +DAEMON_SERVICE_PATH = "/etc/systemd/system/cortexd.service" + + +def is_daemon_installed() -> bool: + """ + Check if the daemon is installed on the system. + + Returns: + True if daemon binary or service file exists, False otherwise. + """ + return Path(DAEMON_BINARY_PATH).exists() or Path(DAEMON_SERVICE_PATH).exists() + + +@dataclass +class DaemonResponse: + """Response from the daemon.""" + + success: bool + result: dict[str, Any] | None = None + error: str | None = None + error_code: int | None = None + timestamp: int | None = None + + @classmethod + def from_json(cls, data: dict[str, Any]) -> "DaemonResponse": + """Parse a JSON response from the daemon.""" + return cls( + success=data.get("success", False), + result=data.get("result"), + error=data.get("error", {}).get("message") if "error" in data else None, + error_code=data.get("error", {}).get("code") if "error" in data else None, + timestamp=data.get("timestamp"), + ) + + +class DaemonClient: + """ + IPC client for communicating with the cortexd daemon. + + Uses Unix domain sockets for local communication. + """ + + def __init__(self, socket_path: str = DEFAULT_SOCKET_PATH): + """ + Initialize the daemon client. + + Args: + socket_path: Path to the Unix socket. + """ + self.socket_path = socket_path + + def is_daemon_running(self) -> bool: + """ + Check if the daemon is running by testing socket connectivity. + + Returns: + True if daemon is reachable, False otherwise. + """ + if not Path(self.socket_path).exists(): + return False + + try: + response = self.ping() + return response.success + except DaemonConnectionError: + return False + + def _send_request(self, method: str, params: dict[str, Any] | None = None) -> DaemonResponse: + """ + Send a request to the daemon and receive the response. + + Args: + method: The IPC method to call. + params: Optional parameters for the method. + + Returns: + DaemonResponse containing the result or error. + + Raises: + DaemonConnectionError: If unable to connect to daemon. + DaemonProtocolError: If response is invalid. + """ + request = { + "method": method, + "params": params or {}, + } + + try: + # Create Unix socket and use context manager for automatic cleanup + with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as sock: + sock.settimeout(SOCKET_TIMEOUT) + + # Connect to daemon + sock.connect(self.socket_path) + + # Send request + request_json = json.dumps(request) + sock.sendall(request_json.encode("utf-8")) + + # Receive response - loop to handle partial reads + # TCP is stream-based, so data may arrive in multiple chunks + chunks: list[bytes] = [] + total_received = 0 + + while total_received < MAX_RESPONSE_SIZE: + chunk = sock.recv(4096) + if not chunk: + # Connection closed by server + break + chunks.append(chunk) + total_received += len(chunk) + + # Try to parse - if valid JSON, we're done + # This handles the common case where the full message arrives + try: + response_data = b"".join(chunks) + response_json = json.loads(response_data.decode("utf-8")) + return DaemonResponse.from_json(response_json) + except json.JSONDecodeError: + # Incomplete JSON, continue receiving + continue + + # If we get here, either connection closed or max size reached + if not chunks: + raise DaemonProtocolError("Empty response from daemon") + + # Final attempt to parse + response_data = b"".join(chunks) + response_json = json.loads(response_data.decode("utf-8")) + return DaemonResponse.from_json(response_json) + + except FileNotFoundError: + # Check if daemon is installed at all + if not is_daemon_installed(): + raise DaemonNotInstalledError( + "The cortexd daemon is not installed. " + "Install it with: cortex daemon install --execute" + ) + raise DaemonConnectionError( + f"Daemon socket not found at {self.socket_path}. " + "The daemon is installed but not running. Try: sudo systemctl start cortexd" + ) + except ConnectionRefusedError: + raise DaemonConnectionError( + "Connection refused. The daemon is not running. Try: sudo systemctl start cortexd" + ) + except TimeoutError: + raise DaemonConnectionError("Connection timed out. The daemon may be unresponsive.") + except json.JSONDecodeError as e: + raise DaemonProtocolError(f"Invalid JSON response: {e}") + + # ========================================================================= + # PR1 IPC Methods + # ========================================================================= + + def ping(self) -> DaemonResponse: + """ + Ping the daemon to check connectivity. + + Returns: + DaemonResponse with {"pong": true} on success. + """ + return self._send_request("ping") + + def version(self) -> DaemonResponse: + """ + Get daemon version information. + + Returns: + DaemonResponse with {"version": "x.x.x", "name": "cortexd"}. + """ + return self._send_request("version") + + def config_get(self) -> DaemonResponse: + """ + Get current daemon configuration. + + Returns: + DaemonResponse with configuration key-value pairs. + """ + return self._send_request("config.get") + + def config_reload(self) -> DaemonResponse: + """ + Reload daemon configuration from disk. + + Returns: + DaemonResponse with {"reloaded": true} on success. + """ + return self._send_request("config.reload") + + def shutdown(self) -> DaemonResponse: + """ + Request daemon shutdown. + + Returns: + DaemonResponse with {"shutdown": "initiated"} on success. + """ + return self._send_request("shutdown") + + +class DaemonNotInstalledError(Exception): + """Raised when the daemon is not installed.""" + + pass + + +class DaemonConnectionError(Exception): + """Raised when unable to connect to the daemon (but it is installed).""" + + pass + + +class DaemonProtocolError(Exception): + """Raised when the daemon response is invalid.""" + + pass + + +# Convenience function for quick checks +def get_daemon_client(socket_path: str = DEFAULT_SOCKET_PATH) -> DaemonClient: + """ + Get a daemon client instance. + + Args: + socket_path: Path to the Unix socket. + + Returns: + DaemonClient instance. + """ + return DaemonClient(socket_path) diff --git a/daemon/CMakeLists.txt b/daemon/CMakeLists.txt new file mode 100644 index 00000000..ab227033 --- /dev/null +++ b/daemon/CMakeLists.txt @@ -0,0 +1,163 @@ +cmake_minimum_required(VERSION 3.20) +project(cortexd VERSION 1.0.0 LANGUAGES CXX) + +# CMake policies +cmake_policy(SET CMP0135 NEW) + +# Require C++17 +set(CMAKE_CXX_STANDARD 17) +set(CMAKE_CXX_STANDARD_REQUIRED ON) +set(CMAKE_CXX_EXTENSIONS OFF) + +# Build options +option(BUILD_TESTS "Build test suite" OFF) +option(BUILD_STATIC "Build static binary" OFF) +option(ENABLE_SANITIZERS "Enable address/undefined sanitizers" OFF) + +# Build type defaults to Release +if(NOT CMAKE_BUILD_TYPE) + set(CMAKE_BUILD_TYPE Release) +endif() + +# Compiler flags +add_compile_options(-Wall -Wextra -Wpedantic) + +if(CMAKE_BUILD_TYPE STREQUAL "Release") + add_compile_options(-O3 -DNDEBUG) +endif() + +if(CMAKE_BUILD_TYPE STREQUAL "Debug") + add_compile_options(-g3 -O0) +endif() + +if(ENABLE_SANITIZERS) + add_compile_options(-fsanitize=address,undefined -fno-omit-frame-pointer) + add_link_options(-fsanitize=address,undefined) +endif() + +# Suppress harmless linker warnings +if(NOT APPLE) + string(APPEND CMAKE_EXE_LINKER_FLAGS " -Wl,--no-warnings") +endif() + +# Find required packages +find_package(PkgConfig REQUIRED) +pkg_check_modules(SYSTEMD REQUIRED libsystemd) +pkg_check_modules(OPENSSL REQUIRED openssl) +pkg_check_modules(UUID REQUIRED uuid) + +# Fetch nlohmann/json +include(FetchContent) +FetchContent_Declare(json + GIT_REPOSITORY https://github.com/nlohmann/json.git + GIT_TAG v3.11.3 + GIT_SHALLOW TRUE +) +FetchContent_MakeAvailable(json) + +# Fetch yaml-cpp +# Note: yaml-cpp 0.8.0 already has cmake_minimum_required(VERSION 3.5), no patching needed +FetchContent_Declare(yaml-cpp + GIT_REPOSITORY https://github.com/jbeder/yaml-cpp.git + GIT_TAG 0.8.0 + GIT_SHALLOW TRUE +) +set(YAML_CPP_BUILD_TESTS OFF CACHE BOOL "" FORCE) +set(YAML_CPP_BUILD_TOOLS OFF CACHE BOOL "" FORCE) +FetchContent_MakeAvailable(yaml-cpp) + +# Include directories +include_directories( + ${CMAKE_CURRENT_SOURCE_DIR}/include + ${SYSTEMD_INCLUDE_DIRS} + ${OPENSSL_INCLUDE_DIRS} + ${UUID_INCLUDE_DIRS} +) + +# Source files +set(DAEMON_SOURCES + # Core + src/main.cpp + src/core/daemon.cpp + + # Config + src/config/config.cpp + + # IPC + src/ipc/server.cpp + src/ipc/protocol.cpp + src/ipc/handlers.cpp + + # Utils + src/utils/logger.cpp +) + +# Main daemon executable +add_executable(cortexd ${DAEMON_SOURCES}) + +# Compile definitions +target_compile_definitions(cortexd PRIVATE + CORTEXD_VERSION="${PROJECT_VERSION}" +) + +# Link libraries +target_link_libraries(cortexd + PRIVATE + ${SYSTEMD_LIBRARIES} + ${OPENSSL_LIBRARIES} + ${UUID_LIBRARIES} + nlohmann_json::nlohmann_json + yaml-cpp::yaml-cpp + pthread +) + +# Static build option +# Use partial static linking to allow libsystemd to use dlopen +if(BUILD_STATIC) + target_link_options(cortexd PRIVATE -static-libgcc -static-libstdc++) +endif() + +# Position independent code +set_target_properties(cortexd PROPERTIES + POSITION_INDEPENDENT_CODE ON +) + +# Installation +install(TARGETS cortexd + RUNTIME DESTINATION bin +) + +install(FILES + systemd/cortexd.service + DESTINATION lib/systemd/system +) + +install(FILES + config/cortexd.yaml.example + DESTINATION share/cortex +) + +# Print build summary +message(STATUS "") +message(STATUS "=== cortexd ${PROJECT_VERSION} build configuration ===") +message(STATUS "Build type: ${CMAKE_BUILD_TYPE}") +message(STATUS "C++ Standard: ${CMAKE_CXX_STANDARD}") +message(STATUS "Static build: ${BUILD_STATIC}") +message(STATUS "Tests: ${BUILD_TESTS}") +message(STATUS "Sanitizers: ${ENABLE_SANITIZERS}") +message(STATUS "") + +# Tests (optional) +if(BUILD_TESTS) + enable_testing() + + # Fetch Google Test + FetchContent_Declare(googletest + GIT_REPOSITORY https://github.com/google/googletest.git + GIT_TAG v1.14.0 + GIT_SHALLOW TRUE + ) + FetchContent_MakeAvailable(googletest) + + add_subdirectory(tests) +endif() diff --git a/daemon/README.md b/daemon/README.md new file mode 100644 index 00000000..694748de --- /dev/null +++ b/daemon/README.md @@ -0,0 +1,416 @@ +# Cortexd - Core Daemon + +**cortexd** is the core daemon foundation for the Cortex AI Package Manager. The essential daemon infrastructure with Unix socket IPC and basic handlers are implemented. + +## Features + +- 🚀 **Fast Startup**: < 1 second startup time +- 💾 **Low Memory**: < 30MB idle +- 🔌 **Unix Socket IPC**: JSON-RPC protocol at `/run/cortex/cortex.sock` +- ⚙️ **systemd Integration**: Type=notify, watchdog, journald logging +- 📝 **Configuration Management**: YAML-based configuration with hot reload +- 🔧 **Basic IPC Handlers**: ping, version, config, shutdown + +## Quick Start + +### Recommended: Interactive Setup (Handles Everything) + +```bash +# Run the interactive setup wizard +python daemon/scripts/setup_daemon.py +``` + +The setup wizard will: +1. ✅ Check and install required system dependencies (cmake, build-essential, etc.) +2. ✅ Build the daemon from source +3. ✅ Install the systemd service + +### Manual Setup + +If you prefer manual installation: + +#### 1. Install System Dependencies + +```bash +sudo apt-get install -y \ + cmake build-essential libsystemd-dev \ + libssl-dev uuid-dev pkg-config libcap-dev +``` + +#### 2. Build + +```bash +cd daemon +./scripts/build.sh Release +``` + +#### 3. Install + +```bash +sudo ./scripts/install.sh +``` + +### Verify + +```bash +# Check status +systemctl status cortexd + +# View logs +journalctl -u cortexd -f + +# Test socket +echo '{"method":"ping"}' | socat - UNIX-CONNECT:/run/cortex/cortex.sock +``` + +## Architecture + +```text +┌─────────────────────────────────────────────────────────────┐ +│ cortex CLI (Python) │ +└───────────────────────────┬─────────────────────────────────┘ + │ Unix Socket (/run/cortex/cortex.sock) + ▼ +┌─────────────────────────────────────────────────────────────┐ +│ cortexd (C++) │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ IPC Server │ │ +│ │ ─────────── │ │ +│ │ JSON-RPC Protocol │ │ +│ │ Basic Handlers: ping, version, config, shutdown │ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ Config Manager (YAML) │ Logger │ Daemon Lifecycle │ │ +│ └─────────────────────────────────────────────────────────┘ │ +└─────────────────────────────────────────────────────────────┘ +``` + +## Directory Structure + +```text +daemon/ +├── include/cortexd/ # Public headers +│ ├── common.h # Types, constants +│ ├── config.h # Configuration +│ ├── logger.h # Logging +│ ├── core/ # Daemon core +│ │ ├── daemon.h +│ │ └── service.h +│ └── ipc/ # IPC layer +│ ├── server.h +│ ├── protocol.h +│ └── handlers.h # Basic handlers only +├── src/ # Implementation +│ ├── core/ # Daemon lifecycle +│ ├── config/ # Configuration management +│ ├── ipc/ # IPC server and handlers +│ └── utils/ # Logging utilities +├── systemd/ # Service files +├── config/ # Config templates +└── scripts/ # Build scripts +``` + +## CLI Commands + +Cortex provides integrated CLI commands to interact with the daemon: + +```bash +# Basic daemon commands +cortex daemon ping # Health check +cortex daemon version # Get daemon version +cortex daemon config # Show configuration +cortex daemon reload-config # Reload configuration +cortex daemon shutdown # Request daemon shutdown + +# Install/uninstall daemon +cortex daemon install +cortex daemon install --execute +cortex daemon uninstall +``` + +``` + +## IPC API + +### Available Methods + +| Method | Description | +|--------|-------------| +| `ping` | Health check | +| `version` | Get version info | +| `config.get` | Get configuration | +| `config.reload` | Reload config file | +| `shutdown` | Request shutdown | + +### Example + +```bash +# Ping the daemon +echo '{"method":"ping"}' | socat - UNIX-CONNECT:/run/cortex/cortex.sock + +# Response: +# { +# "success": true, +# "result": {"pong": true} +# } + +# Get version +echo '{"method":"version"}' | socat - UNIX-CONNECT:/run/cortex/cortex.sock + +# Response: +# { +# "success": true, +# "result": { +# "version": "1.0.0", +# "name": "cortexd" +# } +# } + +# Get configuration +echo '{"method":"config.get"}' | socat - UNIX-CONNECT:/run/cortex/cortex.sock +``` + +## Configuration + +Default config: `/etc/cortex/daemon.yaml` + +```yaml +socket: + path: /run/cortex/cortex.sock + timeout_ms: 5000 + +log_level: 1 # 0=DEBUG, 1=INFO, 2=WARN, 3=ERROR +``` + + +## Building from Source + +### Prerequisites + +The easiest way to install all prerequisites is using the setup wizard: + +```bash +python daemon/scripts/setup_daemon.py +``` + +The wizard automatically checks and installs these required system packages: + +| Package | Purpose | +|---------|---------| +| `cmake` | Build system generator | +| `build-essential` | GCC, G++, make, and other build tools | +| `libsystemd-dev` | systemd integration headers | +| `libssl-dev` | OpenSSL development libraries | +| `uuid-dev` | UUID generation libraries | +| `pkg-config` | Package configuration tool | +| `libcap-dev` | Linux capabilities library | + +#### Manual Prerequisite Installation + +If you prefer to install dependencies manually: + +```bash +# Ubuntu/Debian - Core dependencies +sudo apt-get update +sudo apt-get install -y \ + cmake \ + build-essential \ + libsystemd-dev \ + libssl-dev \ + uuid-dev \ + pkg-config \ + libcap-dev +``` + +### Build + +```bash +# Release build +./scripts/build.sh Release + +# Debug build +./scripts/build.sh Debug + +# Build with tests +./scripts/build.sh Release --with-tests + +# Manual build +mkdir build && cd build +cmake -DCMAKE_BUILD_TYPE=Release .. +make -j$(nproc) +``` + +## Testing + +### How Tests Work + +Tests run against a **static library** (`cortexd_lib`) containing all daemon code, allowing testing without installing the daemon as a systemd service. + +```text +┌──────────────────────────────────────────────────────────┐ +│ Test Executable │ +│ (e.g., test_config) │ +└──────────────────────────────────────────────────────────┘ + │ + ▼ +┌──────────────────────────────────────────────────────────┐ +│ cortexd_lib │ +│ (Static library with all daemon code) │ +│ │ +│ • Config, Logger, Daemon, IPCServer, Handlers... │ +│ • Same code that runs in the actual daemon │ +└──────────────────────────────────────────────────────────┘ +``` + +**Key Points:** +- **No daemon installation required** - Tests instantiate classes directly +- **No systemd needed** - Tests run in user space +- **Same code tested** - The library contains identical code to the daemon binary +- **Fast execution** - No service startup overhead + +### Test Types + +| Type | Purpose | Daemon Required? | +|------|---------|------------------| +| **Unit Tests** | Test individual classes/functions in isolation | No | +| **Integration Tests** | Test component interactions (IPC, handlers) | No | +| **End-to-End Tests** | Test the running daemon service | Yes (not yet implemented) | + +### Building Tests + +Tests are built separately from the main daemon. Use the `--with-tests` flag: + +```bash +./scripts/build.sh Release --with-tests +``` + +Or use the setup wizard and select "yes" when asked to build tests: + +```bash +python daemon/scripts/setup_daemon.py +``` + +### Running Tests + +**Using Cortex CLI (recommended):** + +```bash +# Run all tests +cortex daemon run-tests + +# Run only unit tests +cortex daemon run-tests --unit + +# Run only integration tests +cortex daemon run-tests --integration + +# Run a specific test +cortex daemon run-tests --test config +cortex daemon run-tests -t daemon + +# Verbose output +cortex daemon run-tests -v +``` + +**Using ctest directly:** + +```bash +cd daemon/build + +# Run all tests +ctest --output-on-failure + +# Run specific tests +ctest -R test_config --output-on-failure + +# Verbose output +ctest -V +``` + +### Test Structure + +| Test | Type | Description | +|------|------|-------------| +| `test_config` | Unit | Configuration loading and validation | +| `test_protocol` | Unit | IPC message serialization | +| `test_rate_limiter` | Unit | Request rate limiting | +| `test_logger` | Unit | Logging subsystem | +| `test_common` | Unit | Common constants and types | +| `test_ipc_server` | Integration | IPC server lifecycle | +| `test_handlers` | Integration | IPC request handlers | +| `test_daemon` | Integration | Daemon lifecycle and services | + +### Example: How Integration Tests Work + +```cpp +// test_daemon.cpp - Tests Daemon class without systemd + +TEST_F(DaemonTest, InitializeWithValidConfig) { + // Instantiate Daemon directly (no systemd) + auto& daemon = cortexd::Daemon::instance(); + + // Call methods and verify behavior + daemon.initialize(config_path_); + EXPECT_TRUE(daemon.is_initialized()); + + // Test config was loaded + auto config = daemon.config(); + EXPECT_EQ(config.socket_path, expected_path); +} +``` + +The test creates a temporary config file, instantiates the `Daemon` class directly in memory, and verifies its behavior - all without touching systemd or installing anything. + +## systemd Management + +```bash +# Start daemon +sudo systemctl start cortexd + +# Stop daemon +sudo systemctl stop cortexd + +# View status +sudo systemctl status cortexd + +# View logs +journalctl -u cortexd -f + +# Reload config +sudo systemctl reload cortexd + +# Enable at boot +sudo systemctl enable cortexd +``` + +## Performance + +| Metric | Target | Actual | +|--------|--------|--------| +| Startup time | < 1s | ~0.2-0.4s | +| Idle memory | < 30MB | ~20-30MB | +| Socket latency | < 50ms | ~5-15ms | + +## Security + +- Unix socket with 0666 permissions (local access only, not network accessible) +- No network exposure +- systemd hardening (NoNewPrivileges, ProtectSystem, etc.) +- Minimal attack surface (core daemon only) + +## Contributing + +1. Follow C++17 style +2. Add tests for new features +3. Update documentation +4. Test on Ubuntu 22.04+ + +## License + +Apache 2.0 - See [LICENSE](../LICENSE) + +## Support + +- Issues: [GitHub Issues](https://github.com/cortexlinux/cortex/issues) +- Discord: [Discord](https://discord.gg/uCqHvxjU83) diff --git a/daemon/config/cortexd.yaml.example b/daemon/config/cortexd.yaml.example new file mode 100644 index 00000000..406f5d0a --- /dev/null +++ b/daemon/config/cortexd.yaml.example @@ -0,0 +1,20 @@ +# Cortexd Daemon Configuration +# Copy this file to /etc/cortex/daemon.yaml or ~/.cortex/daemon.yaml +# +# - Socket and logging config are used in PR 1 +# - LLM, monitoring, alerts config will be used in PR 2 & PR 3 + +# Socket configuration +socket: + path: /run/cortex/cortex.sock + backlog: 16 + timeout_ms: 5000 + +# Rate limiting +rate_limit: + # Maximum IPC requests per second + max_requests_per_sec: 100 + +# Logging level +# 0=DEBUG, 1=INFO, 2=WARN, 3=ERROR +log_level: 1 diff --git a/daemon/include/cortexd/common.h b/daemon/include/cortexd/common.h new file mode 100644 index 00000000..4de71692 --- /dev/null +++ b/daemon/include/cortexd/common.h @@ -0,0 +1,103 @@ +/** + * @file common.h + * @brief Common types and constants for cortexd + */ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace cortexd { + +// JSON type alias +using json = nlohmann::json; + +// Version info - CORTEXD_VERSION is defined by CMake from PROJECT_VERSION +#ifndef CORTEXD_VERSION +#define CORTEXD_VERSION "1.0.0" // Fallback for non-CMake builds +#endif +constexpr const char* VERSION = CORTEXD_VERSION; +constexpr const char* NAME = "cortexd"; + +// Socket constants +constexpr const char* DEFAULT_SOCKET_PATH = "/run/cortex/cortex.sock"; +constexpr int SOCKET_BACKLOG = 16; +constexpr int SOCKET_TIMEOUT_MS = 5000; +constexpr size_t MAX_MESSAGE_SIZE = 65536; // 64KB + +// Memory constraints (in MB) +constexpr int IDLE_MEMORY_MB = 50; +constexpr int ACTIVE_MEMORY_MB = 150; + +// Performance targets +constexpr int STARTUP_TIME_MS = 1000; +constexpr int CACHED_INFERENCE_MS = 100; + +// Monitoring intervals +constexpr int MONITORING_INTERVAL_SECONDS = 300; // 5 minutes +constexpr int ALERT_RETENTION_DAYS = 7; + +// Thresholds +constexpr double DISK_USAGE_THRESHOLD = 0.80; // 80% +constexpr double MEMORY_USAGE_THRESHOLD = 0.85; // 85% + +// Alert severity levels +enum class AlertSeverity { + INFO, + WARNING, + ERROR, + CRITICAL +}; + +// Alert types +enum class AlertType { + APT_UPDATES, + DISK_USAGE, + MEMORY_USAGE, + CVE_FOUND, + DEPENDENCY_CONFLICT, + SYSTEM_ERROR, + DAEMON_STATUS +}; + +// IPC command types +enum class CommandType { + STATUS, + ALERTS, + SHUTDOWN, + CONFIG_RELOAD, + HEALTH, + UNKNOWN +}; + +// Helper functions +std::string to_string(AlertSeverity severity); +std::string to_string(AlertType type); +AlertSeverity severity_from_string(const std::string& s); +AlertType alert_type_from_string(const std::string& s); +CommandType command_from_string(const std::string& cmd); + +// Struct for system health snapshot +struct HealthSnapshot { + std::chrono::system_clock::time_point timestamp; + double cpu_usage; + double memory_usage; + double disk_usage; + int active_processes; + int open_files; + bool llm_loaded; + int inference_queue_size; + int alerts_count; +}; + +// Clock type alias for consistency +using Clock = std::chrono::system_clock; + +} // namespace cortexd diff --git a/daemon/include/cortexd/config.h b/daemon/include/cortexd/config.h new file mode 100644 index 00000000..c25c0265 --- /dev/null +++ b/daemon/include/cortexd/config.h @@ -0,0 +1,148 @@ +/** + * @file config.h + * @brief Configuration management for cortexd (PR 1: Core Daemon) + */ + +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace cortexd { + +// Default configuration path +constexpr const char* DEFAULT_CONFIG_PATH = "/etc/cortex/daemon.yaml"; + +/** + * @brief Expand ~ to home directory in paths + */ +inline std::string expand_path(const std::string& path) { + if (path.empty() || path[0] != '~') { + return path; + } + const char* home = std::getenv("HOME"); + if (!home) { + return path; + } + return std::string(home) + path.substr(1); +} + +/** + * @brief Daemon configuration structure (PR 1: Core fields only) + */ +struct Config { + // Socket configuration + std::string socket_path = "/run/cortex/cortex.sock"; + int socket_backlog = 16; + int socket_timeout_ms = 5000; + + // Rate limiting + int max_requests_per_sec = 100; + + // Logging + int log_level = 1; // 0=DEBUG, 1=INFO, 2=WARN, 3=ERROR + + /** + * @brief Load configuration from YAML file + * @param path Path to configuration file + * @return Config if successful, nullopt on error + */ + static std::optional load(const std::string& path); + + /** + * @brief Save configuration to YAML file + * @param path Path to save to + * @return true if successful + */ + bool save(const std::string& path) const; + + /** + * @brief Get default configuration + */ + static Config defaults(); + + /** + * @brief Expand ~ in all path fields + */ + void expand_paths(); + + /** + * @brief Validate configuration + * @return Empty string if valid, error message otherwise + */ + std::string validate() const; +}; + +/** + * @brief Configuration manager singleton + * + * Thread-safe configuration management with change notification support. + */ +class ConfigManager { +public: + using ChangeCallback = std::function; + + /** + * @brief Get singleton instance + */ + static ConfigManager& instance(); + + /** + * @brief Load configuration from file + * @param path Path to configuration file + * @return true if loaded successfully + */ + bool load(const std::string& path); + + /** + * @brief Reload configuration from previously loaded path + * @return true if reloaded successfully + */ + bool reload(); + + /** + * @brief Get current configuration (returns copy for thread safety) + */ + Config get() const; + + /** + * @brief Register callback for configuration changes + * @param callback Function to call when config changes + */ + void on_change(ChangeCallback callback); + + // Delete copy/move + ConfigManager(const ConfigManager&) = delete; + ConfigManager& operator=(const ConfigManager&) = delete; + +private: + ConfigManager() = default; + + Config config_; + std::string config_path_; + mutable std::mutex mutex_; + std::vector callbacks_; + + /** + * @brief Notify all registered callbacks (acquires mutex internally) + */ + void notify_callbacks(); + + /** + * @brief Notify callbacks without acquiring mutex + * @param callbacks Copy of callbacks to invoke + * @param config Copy of config to pass to callbacks + * + * This method is used to invoke callbacks outside the lock to prevent + * deadlock if a callback calls ConfigManager::get() or other methods. + */ + void notify_callbacks_unlocked( + const std::vector& callbacks, + const Config& config); +}; + +} // namespace cortexd diff --git a/daemon/include/cortexd/core/daemon.h b/daemon/include/cortexd/core/daemon.h new file mode 100644 index 00000000..7f7a8bef --- /dev/null +++ b/daemon/include/cortexd/core/daemon.h @@ -0,0 +1,162 @@ +/** + * @file daemon.h + * @brief Main daemon class - coordinates all services + */ + + #pragma once + + #include "cortexd/core/service.h" + #include "cortexd/config.h" + #include "cortexd/common.h" + #include + #include + #include + #include + #include + + namespace cortexd { + + // Forward declarations + class IPCServer; + class SystemMonitor; + class LLMEngine; + class AlertManager; + + /** + * @brief Main daemon coordinator + * + * The Daemon class is a singleton that manages the lifecycle of all services, + * handles signals, and coordinates graceful shutdown. + */ + class Daemon { + public: + /** + * @brief Get singleton instance + */ + static Daemon& instance(); + + /** + * @brief Initialize the daemon with configuration + * @param config_path Path to YAML configuration file + * @return true if initialization successful + */ + bool initialize(const std::string& config_path); + + /** + * @brief Run the daemon main loop + * @return Exit code (0 = success) + * + * This method blocks until shutdown is requested. + */ + int run(); + + /** + * @brief Request graceful shutdown + */ + void request_shutdown(); + + /** + * @brief Check if daemon is running + */ + bool is_running() const { return running_.load(); } + + /** + * @brief Check if shutdown was requested + */ + bool shutdown_requested() const { return shutdown_requested_.load(); } + + /** + * @brief Register a service with the daemon + * @param service Service to register + */ + void register_service(std::unique_ptr service); + + /** + * @brief Get service by type + * @return Pointer to service or nullptr if not found + */ + template + T* get_service() { + for (auto& svc : services_) { + if (auto* ptr = dynamic_cast(svc.get())) { + return ptr; + } + } + return nullptr; + } + + /** + * @brief Get current configuration (returns copy for thread safety) + */ + Config config() const; + + /** + * @brief Get daemon uptime + */ + std::chrono::seconds uptime() const; + + /** + * @brief Notify systemd that daemon is ready + */ + void notify_ready(); + + /** + * @brief Notify systemd that daemon is stopping + */ + void notify_stopping(); + + /** + * @brief Send watchdog keepalive to systemd + */ + void notify_watchdog(); + + /** + * @brief Reload configuration + * @return true if successful + */ + bool reload_config(); + + /** + * @brief Reset daemon state for testing + * + * Clears all registered services and resets shutdown flag. + * Should only be used in test teardown to ensure clean state between tests. + */ + void reset(); + + // Delete copy/move + Daemon(const Daemon&) = delete; + Daemon& operator=(const Daemon&) = delete; + + private: + Daemon() = default; + + std::vector> services_; + std::atomic running_{false}; + std::atomic shutdown_requested_{false}; + std::chrono::steady_clock::time_point start_time_; + + /** + * @brief Setup signal handlers + */ + void setup_signals(); + + /** + * @brief Start all registered services + * @return true if all services started + */ + bool start_services(); + + /** + * @brief Stop all running services + */ + void stop_services(); + + /** + * @brief Main event loop iteration + */ + void event_loop(); + }; + + } // namespace cortexd + \ No newline at end of file diff --git a/daemon/include/cortexd/core/service.h b/daemon/include/cortexd/core/service.h new file mode 100644 index 00000000..f53a771f --- /dev/null +++ b/daemon/include/cortexd/core/service.h @@ -0,0 +1,52 @@ +/** + * @file service.h + * @brief Base class for daemon services + */ + +#pragma once + +namespace cortexd { + +/** + * @brief Abstract base class for daemon services + * + * All daemon services (IPC server, system monitor, etc.) should inherit + * from this class to participate in the daemon lifecycle. + */ +class Service { +public: + virtual ~Service() = default; + + /** + * @brief Start the service + * @return true if started successfully + */ + virtual bool start() = 0; + + /** + * @brief Stop the service + */ + virtual void stop() = 0; + + /** + * @brief Get service name for logging + */ + virtual const char* name() const = 0; + + /** + * @brief Get service priority (higher = start earlier) + */ + virtual int priority() const { return 0; } + + /** + * @brief Check if service is currently running + */ + virtual bool is_running() const = 0; + + /** + * @brief Check if service is healthy + */ + virtual bool is_healthy() const { return is_running(); } +}; + +} // namespace cortexd diff --git a/daemon/include/cortexd/ipc/handlers.h b/daemon/include/cortexd/ipc/handlers.h new file mode 100644 index 00000000..0bf3eebb --- /dev/null +++ b/daemon/include/cortexd/ipc/handlers.h @@ -0,0 +1,36 @@ +/** + * @file handlers.h + * @brief IPC request handlers + */ + +#pragma once + +#include "cortexd/ipc/server.h" +#include "cortexd/ipc/protocol.h" + +namespace cortexd { + +/** + * @brief IPC request handlers + */ +class Handlers { +public: + /** + * @brief Register all handlers with IPC server + */ + static void register_all(IPCServer& server); + +private: + // Handler implementations + static Response handle_ping(const Request& req); + static Response handle_version(const Request& req); + + // Config handlers + static Response handle_config_get(const Request& req); + static Response handle_config_reload(const Request& req); + + // Daemon control + static Response handle_shutdown(const Request& req); +}; + +} // namespace cortexd \ No newline at end of file diff --git a/daemon/include/cortexd/ipc/protocol.h b/daemon/include/cortexd/ipc/protocol.h new file mode 100644 index 00000000..a35fdd27 --- /dev/null +++ b/daemon/include/cortexd/ipc/protocol.h @@ -0,0 +1,113 @@ +/** + * @file protocol.h + * @brief JSON-RPC protocol definitions for IPC + */ + + #pragma once + + #include "cortexd/common.h" + #include + #include + + namespace cortexd { + + /** + * @brief IPC request structure + */ + struct Request { + std::string method; + json params; + std::optional id; + + /** + * @brief Parse request from JSON string + * @param raw Raw JSON string + * @return Request if valid, std::nullopt on parse error + */ + static std::optional parse(const std::string& raw); + + /** + * @brief Serialize to JSON string + */ + std::string to_json() const; + }; + + /** + * @brief IPC response structure + */ + struct Response { + bool success = false; + json result; + std::string error; + int error_code = 0; + + /** + * @brief Serialize to JSON string + */ + std::string to_json() const; + + /** + * @brief Create success response + */ + static Response ok(json result = json::object()); + + /** + * @brief Create error response + */ + static Response err(const std::string& message, int code = -1); + }; + + /** + * @brief Supported IPC methods + */ + namespace Methods { + // Status and health + constexpr const char* STATUS = "status"; + constexpr const char* HEALTH = "health"; + constexpr const char* VERSION = "version"; + + // Alert management + constexpr const char* ALERTS = "alerts"; + constexpr const char* ALERTS_GET = "alerts.get"; + constexpr const char* ALERTS_ACK = "alerts.acknowledge"; + constexpr const char* ALERTS_DISMISS = "alerts.dismiss"; + + // Configuration + constexpr const char* CONFIG_GET = "config.get"; + constexpr const char* CONFIG_RELOAD = "config.reload"; + + // LLM operations + constexpr const char* LLM_STATUS = "llm.status"; + constexpr const char* LLM_LOAD = "llm.load"; + constexpr const char* LLM_UNLOAD = "llm.unload"; + constexpr const char* LLM_INFER = "llm.infer"; + + // Daemon control + constexpr const char* SHUTDOWN = "shutdown"; + constexpr const char* PING = "ping"; + } + + /** + * @brief Error codes for IPC responses + * + * JSON-RPC reserves -32768 to -32000 for standard errors. + * Custom application errors use positive integers (1-999). + */ + namespace ErrorCodes { + // JSON-RPC standard errors (reserved range: -32768 to -32000) + constexpr int PARSE_ERROR = -32700; + constexpr int INVALID_REQUEST = -32600; + constexpr int METHOD_NOT_FOUND = -32601; + constexpr int INVALID_PARAMS = -32602; + constexpr int INTERNAL_ERROR = -32603; + + // Custom application errors (non-reserved range: 1-999) + constexpr int LLM_NOT_LOADED = 100; + constexpr int LLM_BUSY = 101; + constexpr int RATE_LIMITED = 102; + constexpr int ALERT_NOT_FOUND = 103; + constexpr int CONFIG_ERROR = 104; + } + + } // namespace cortexd + \ No newline at end of file diff --git a/daemon/include/cortexd/ipc/server.h b/daemon/include/cortexd/ipc/server.h new file mode 100644 index 00000000..049b2f63 --- /dev/null +++ b/daemon/include/cortexd/ipc/server.h @@ -0,0 +1,139 @@ +/** + * @file server.h + * @brief Unix socket IPC server + */ + + #pragma once + + #include "cortexd/core/service.h" + #include "cortexd/ipc/protocol.h" + #include + #include + #include + #include + #include + #include + #include + #include + + namespace cortexd { + + /** + * @brief Request handler function type + */ + using RequestHandler = std::function; + + /** + * @brief Rate limiter for request throttling + */ + class RateLimiter { + public: + explicit RateLimiter(int max_per_second); + + /** + * @brief Check if request is allowed + * @return true if allowed, false if rate limited + */ + bool allow(); + + /** + * @brief Reset the rate limiter + */ + void reset(); + + private: + int max_per_second_; + int count_ = 0; + std::chrono::steady_clock::time_point window_start_; + std::mutex mutex_; + }; + + /** + * @brief Unix socket IPC server + */ + class IPCServer : public Service { + public: + /** + * @brief Construct server with socket path + * @param socket_path Path to Unix socket + * @param max_requests_per_sec Rate limit for requests + */ + explicit IPCServer(const std::string& socket_path, int max_requests_per_sec = 100); + ~IPCServer() override; + + // Service interface + bool start() override; + void stop() override; + const char* name() const override { return "IPCServer"; } + int priority() const override { return 100; } // Start first + bool is_running() const override { return running_.load(); } + bool is_healthy() const override; + + /** + * @brief Register a request handler for a method + * @param method Method name + * @param handler Handler function + */ + void register_handler(const std::string& method, RequestHandler handler); + + /** + * @brief Get number of connections served + */ + size_t connections_served() const { return connections_served_.load(); } + + /** + * @brief Get number of active connections + */ + size_t active_connections() const { return active_connections_.load(); } + + private: + std::string socket_path_; + int server_fd_ = -1; + std::atomic running_{false}; + std::unique_ptr accept_thread_; + + std::unordered_map handlers_; + std::mutex handlers_mutex_; + + RateLimiter rate_limiter_; + + std::atomic connections_served_{0}; + std::atomic active_connections_{0}; + + // Condition variable for waiting on in-flight handlers during stop() + std::condition_variable connections_cv_; + std::mutex connections_mutex_; + + /** + * @brief Create and bind the socket + */ + bool create_socket(); + + /** + * @brief Set socket permissions + */ + bool setup_permissions(); + + /** + * @brief Clean up socket file + */ + void cleanup_socket(); + + /** + * @brief Accept loop running in thread + */ + void accept_loop(); + + /** + * @brief Handle a single client connection + */ + void handle_client(int client_fd); + + /** + * @brief Dispatch request to handler + */ + Response dispatch(const Request& request); + }; + + } // namespace cortexd + \ No newline at end of file diff --git a/daemon/include/cortexd/logger.h b/daemon/include/cortexd/logger.h new file mode 100644 index 00000000..eb242e44 --- /dev/null +++ b/daemon/include/cortexd/logger.h @@ -0,0 +1,122 @@ +/** + * @file logger.h + * @brief Logging utilities for cortexd with journald support + */ + +#pragma once + +#include +#include + +namespace cortexd { + +// Syslog priority constants (from syslog.h) +namespace internal { + constexpr int SYSLOG_DEBUG = 7; + constexpr int SYSLOG_INFO = 6; + constexpr int SYSLOG_WARNING = 4; + constexpr int SYSLOG_ERR = 3; + constexpr int SYSLOG_CRIT = 2; +} + +// Logging levels +enum class LogLevel { + DEBUG = 0, + INFO = 1, + WARN = 2, + ERROR = 3, + CRITICAL = 4 +}; + +/** + * @brief Logging utilities with journald and stderr support + */ +class Logger { +public: + /** + * @brief Initialize the logger + * @param min_level Minimum log level to output + * @param use_journald If true, log to systemd journal; otherwise stderr + */ + static void init(LogLevel min_level = LogLevel::INFO, bool use_journald = true); + + /** + * @brief Shutdown the logger + */ + static void shutdown(); + + /** + * @brief Log a debug message + */ + static void debug(const std::string& component, const std::string& message); + + /** + * @brief Log an info message + */ + static void info(const std::string& component, const std::string& message); + + /** + * @brief Log a warning message + */ + static void warn(const std::string& component, const std::string& message); + + /** + * @brief Log an error message + */ + static void error(const std::string& component, const std::string& message); + + /** + * @brief Log a critical message + */ + static void critical(const std::string& component, const std::string& message); + + /** + * @brief Set the minimum log level + */ + static void set_level(LogLevel level); + + /** + * @brief Get the current log level + */ + static LogLevel get_level(); + +private: + static LogLevel min_level_; + static bool use_journald_; + static std::mutex mutex_; + static bool initialized_; + + /** + * @brief Log a message at specified level + */ + static void log(LogLevel level, const std::string& component, const std::string& message); + + /** + * @brief Log to systemd journal + */ + static void log_to_journald(LogLevel level, const std::string& component, const std::string& message); + + /** + * @brief Log to stderr + */ + static void log_to_stderr(LogLevel level, const std::string& component, const std::string& message); + + /** + * @brief Convert log level to syslog priority + */ + static int level_to_priority(LogLevel level); + + /** + * @brief Convert log level to string + */ + static const char* level_to_string(LogLevel level); +}; + +// Convenience macros for logging +#define LOG_DEBUG(component, message) cortexd::Logger::debug(component, message) +#define LOG_INFO(component, message) cortexd::Logger::info(component, message) +#define LOG_WARN(component, message) cortexd::Logger::warn(component, message) +#define LOG_ERROR(component, message) cortexd::Logger::error(component, message) +#define LOG_CRITICAL(component, message) cortexd::Logger::critical(component, message) + +} // namespace cortexd \ No newline at end of file diff --git a/daemon/scripts/build.sh b/daemon/scripts/build.sh new file mode 100755 index 00000000..72119526 --- /dev/null +++ b/daemon/scripts/build.sh @@ -0,0 +1,85 @@ +#!/bin/bash +# Build script for cortexd daemon +# Usage: ./scripts/build.sh [Release|Debug] [--with-tests] + +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +BUILD_TYPE="${1:-Release}" +BUILD_TESTS="OFF" +BUILD_DIR="${SCRIPT_DIR}/build" + +# Check for --with-tests flag +for arg in "$@"; do + if [ "$arg" = "--with-tests" ]; then + BUILD_TESTS="ON" + fi +done + +echo "=== Building cortexd ===" +echo "Build Type: $BUILD_TYPE" +echo "Build Tests: $BUILD_TESTS" +echo "Build Directory: $BUILD_DIR" +echo "" + +# Check for required tools +check_tool() { + if ! command -v "$1" &> /dev/null; then + echo "Error: $1 not found. Install with: $2" + exit 1 + fi +} + +echo "Checking build tools..." +check_tool cmake "sudo apt install cmake" +check_tool pkg-config "sudo apt install pkg-config" +check_tool g++ "sudo apt install build-essential" + +# Check for required libraries +check_lib() { + if ! pkg-config --exists "$1" 2>/dev/null; then + echo "Error: $1 not found. Install with: sudo apt install $2" + exit 1 + fi +} + +echo "Checking dependencies..." +check_lib libsystemd libsystemd-dev +check_lib openssl libssl-dev +check_lib uuid uuid-dev + +echo "" + +# Create build directory +mkdir -p "$BUILD_DIR" +cd "$BUILD_DIR" + +# Run CMake +echo "Running CMake..." +cmake -DCMAKE_BUILD_TYPE="$BUILD_TYPE" \ + -DBUILD_TESTS="$BUILD_TESTS" \ + "$SCRIPT_DIR" + +# Build +echo "" +echo "Building..." +make -j"$(nproc)" + +# Show result +echo "" +echo "=== Build Complete ===" +echo "" +echo "Binary: $BUILD_DIR/cortexd" +ls -lh "$BUILD_DIR/cortexd" +echo "" + +if [ "$BUILD_TESTS" = "ON" ]; then + echo "Tests built successfully!" + echo "" + echo "To run tests:" + echo " cd $BUILD_DIR && ctest --output-on-failure" + echo " # Or: cd $BUILD_DIR && make run_tests" + echo "" +fi + +echo "To install: sudo ./scripts/install.sh" \ No newline at end of file diff --git a/daemon/scripts/install.sh b/daemon/scripts/install.sh new file mode 100755 index 00000000..9138cb52 --- /dev/null +++ b/daemon/scripts/install.sh @@ -0,0 +1,133 @@ +#!/bin/bash +# Installation script for cortexd daemon + +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +BUILD_DIR="${SCRIPT_DIR}/build" + +echo "=== Installing cortexd ===" + +# Check if built +if [ ! -f "$BUILD_DIR/cortexd" ]; then + echo "Error: cortexd binary not found." + echo "Run: ./scripts/build.sh" + exit 1 +fi + +# Check if running as root +if [ "$EUID" -ne 0 ]; then + echo "Error: Installation requires root privileges" + echo "Please run: sudo ./scripts/install.sh" + exit 1 +fi + +# Get the actual user who invoked sudo (not root) +INSTALL_USER="${SUDO_USER:-$USER}" +if [ "$INSTALL_USER" = "root" ]; then + # Try to get the user from logname if SUDO_USER is not set + INSTALL_USER=$(logname 2>/dev/null || echo "root") +fi + +# Stop existing service if running +if systemctl is-active --quiet cortexd 2>/dev/null; then + echo "Stopping existing cortexd service..." + systemctl stop cortexd +fi + +# Install binary +echo "Installing binary to /usr/local/bin..." +install -m 0755 "$BUILD_DIR/cortexd" /usr/local/bin/cortexd + +# Install systemd files +# Note: We only install the service file, not a socket file. +# The daemon manages its own socket to avoid conflicts with systemd socket activation. +echo "Installing systemd service files..." +install -m 0644 "$SCRIPT_DIR/systemd/cortexd.service" /etc/systemd/system/ + +# Create config directory +echo "Creating configuration directory..." +mkdir -p /etc/cortex +if [ ! -f /etc/cortex/daemon.yaml ]; then + # SCRIPT_DIR points to daemon/, so config is at daemon/config/ + install -m 0644 "$SCRIPT_DIR/config/cortexd.yaml.example" /etc/cortex/daemon.yaml + echo " Created default config: /etc/cortex/daemon.yaml" +fi + +# Create cortex group for socket access +echo "Setting up cortex group for socket access..." +if ! getent group cortex >/dev/null 2>&1; then + groupadd cortex + echo " Created 'cortex' group" +else + echo " Group 'cortex' already exists" +fi + +# Add the installing user to the cortex group +if [ "$INSTALL_USER" != "root" ]; then + if id -nG "$INSTALL_USER" | grep -qw cortex; then + echo " User '$INSTALL_USER' is already in 'cortex' group" + else + usermod -aG cortex "$INSTALL_USER" + echo " Added user '$INSTALL_USER' to 'cortex' group" + GROUP_ADDED=1 + fi +fi + +# Create state directories +echo "Creating state directories..." +mkdir -p /var/lib/cortex +chown root:cortex /var/lib/cortex +chmod 0750 /var/lib/cortex + +mkdir -p /run/cortex +chown root:cortex /run/cortex +chmod 0755 /run/cortex + +# Create user config directory for installing user +if [ "$INSTALL_USER" != "root" ]; then + INSTALL_USER_HOME=$(getent passwd "$INSTALL_USER" | cut -d: -f6) + if [ -n "$INSTALL_USER_HOME" ]; then + mkdir -p "$INSTALL_USER_HOME/.cortex" + chown "$INSTALL_USER:$INSTALL_USER" "$INSTALL_USER_HOME/.cortex" + chmod 0700 "$INSTALL_USER_HOME/.cortex" + fi +fi + +# Also create root's config directory +mkdir -p /root/.cortex +chmod 0700 /root/.cortex + +# Reload systemd +echo "Reloading systemd daemon..." +systemctl daemon-reload + +# Enable service +echo "Enabling cortexd service..." +systemctl enable cortexd + +# Start service +echo "Starting cortexd service..." +if systemctl start cortexd; then + echo "" + echo "=== Installation Complete ===" + echo "" + systemctl status cortexd --no-pager || true + echo "" + echo "Commands:" + echo " Status: systemctl status cortexd" + echo " Logs: journalctl -u cortexd -f" + echo " Stop: systemctl stop cortexd" + echo " Config: /etc/cortex/daemon.yaml" + +else + echo "" + echo "=== Installation Complete (service failed to start) ===" + echo "" + echo "Troubleshooting:" + echo " 1. Check logs: journalctl -xeu cortexd -n 50" + echo " 2. Verify binary: /usr/local/bin/cortexd --version" + echo " 3. Check config: cat /etc/cortex/daemon.yaml" + echo "" + exit 1 +fi diff --git a/daemon/scripts/setup_daemon.py b/daemon/scripts/setup_daemon.py new file mode 100755 index 00000000..bee1a12d --- /dev/null +++ b/daemon/scripts/setup_daemon.py @@ -0,0 +1,545 @@ +import sqlite3 +import subprocess +import sys +from datetime import datetime, timezone +from pathlib import Path + +from rich.console import Console +from rich.prompt import Confirm +from rich.table import Table + +console = Console() + +# Audit logging database path +AUDIT_DB_PATH = Path.home() / ".cortex" / "history.db" + + +def init_audit_db() -> bool: + """ + Initialize the audit database for installer actions. + + Creates ~/.cortex directory if needed and sets up a SQLite database + with an events table for logging installer actions. + + Returns: + bool: True if initialization succeeded, False otherwise. + """ + try: + # Create ~/.cortex directory + audit_dir = AUDIT_DB_PATH.parent + audit_dir.mkdir(parents=True, exist_ok=True) + + # Create/connect to database + conn = sqlite3.connect(str(AUDIT_DB_PATH)) + cursor = conn.cursor() + + # Create events table if it doesn't exist + cursor.execute( + """ + CREATE TABLE IF NOT EXISTS events ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + timestamp TEXT NOT NULL, + event_type TEXT NOT NULL, + details TEXT, + success INTEGER DEFAULT 1 + ) + """ + ) + + conn.commit() + conn.close() + return True + except (sqlite3.Error, OSError) as e: + console.print(f"[dim]Warning: Could not initialize audit database: {e}[/dim]") + return False + + +def log_audit_event(event_type: str, details: str, success: bool = True) -> None: + """ + Log an audit event to the history database. + + Inserts a timestamped row into the events table. Handles errors gracefully + without crashing the installer. + + Args: + event_type: Type of event (e.g., "install_dependencies", "build_daemon"). + details: Human-readable description of the event. + success: Whether the action succeeded (default True). + """ + try: + # Ensure the database exists + if not AUDIT_DB_PATH.exists(): + if not init_audit_db(): + return + + conn = sqlite3.connect(str(AUDIT_DB_PATH)) + cursor = conn.cursor() + + timestamp = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z") + cursor.execute( + "INSERT INTO events (timestamp, event_type, details, success) VALUES (?, ?, ?, ?)", + (timestamp, event_type, details, 1 if success else 0), + ) + + conn.commit() + conn.close() + except (sqlite3.Error, OSError) as e: + # Log to console but don't crash the installer + console.print(f"[dim]Warning: Could not log audit event: {e}[/dim]") + + +DAEMON_DIR = Path(__file__).parent.parent +BUILD_SCRIPT = DAEMON_DIR / "scripts" / "build.sh" +INSTALL_SCRIPT = DAEMON_DIR / "scripts" / "install.sh" +CONFIG_FILE = "/etc/cortex/daemon.yaml" +CONFIG_EXAMPLE = DAEMON_DIR / "config" / "cortexd.yaml.example" +CORTEX_ENV_FILE = Path.home() / ".cortex" / ".env" + +# System dependencies required to build the daemon (apt packages) +DAEMON_SYSTEM_DEPENDENCIES = [ + "cmake", + "build-essential", + "libsystemd-dev", + "libssl-dev", + "uuid-dev", + "pkg-config", + "libcap-dev", +] + + +def check_package_installed(package: str) -> bool: + """ + Check if a system package is installed via dpkg. + + Args: + package: Name of the apt package to check. + + Returns: + bool: True if the package is installed, False otherwise. + """ + result = subprocess.run( + ["dpkg", "-s", package], + capture_output=True, + text=True, + check=False, + ) + return result.returncode == 0 + + +def check_system_dependencies() -> tuple[list[str], list[str]]: + """ + Check which system dependencies are installed and which are missing. + + Returns: + tuple: (installed_packages, missing_packages) + """ + installed = [] + missing = [] + + for package in DAEMON_SYSTEM_DEPENDENCIES: + if check_package_installed(package): + installed.append(package) + else: + missing.append(package) + + return installed, missing + + +def install_system_dependencies(packages: list[str]) -> bool: + """ + Install system dependencies using apt-get. + + Args: + packages: List of package names to install. + + Returns: + bool: True if installation succeeded, False otherwise. + """ + if not packages: + return True + + console.print(f"\n[cyan]Installing {len(packages)} system package(s)...[/cyan]") + console.print(f"[dim]Packages: {', '.join(packages)}[/dim]\n") + + # Update package list first + console.print("[cyan]Updating package list...[/cyan]") + update_result = subprocess.run( + ["sudo", "apt-get", "update"], + check=False, + ) + if update_result.returncode != 0: + console.print("[yellow]Warning: apt-get update failed, continuing anyway...[/yellow]") + + # Install packages + install_cmd = ["sudo", "apt-get", "install", "-y"] + packages + result = subprocess.run(install_cmd, check=False) + + if result.returncode == 0: + console.print(f"[green]✓ Successfully installed {len(packages)} package(s)[/green]") + log_audit_event( + "install_system_dependencies", + f"Installed {len(packages)} package(s): {', '.join(packages)}", + success=True, + ) + return True + else: + console.print("[red]✗ Failed to install some packages[/red]") + log_audit_event( + "install_system_dependencies", + f"Failed to install package(s): {', '.join(packages)}", + success=False, + ) + return False + + +def setup_system_dependencies() -> bool: + """ + Check and install required system dependencies for building the daemon. + + Displays a table of dependencies with their status and prompts the user + to install missing ones. + + Returns: + bool: True if all dependencies are satisfied, False otherwise. + """ + console.print("\n[bold cyan]Checking System Dependencies[/bold cyan]\n") + + installed, missing = check_system_dependencies() + + # Display dependency status table + table = Table(title="Build Dependencies") + table.add_column("Package", style="cyan") + table.add_column("Status", style="green") + table.add_column("Description") + + package_descriptions = { + "cmake": "Build system generator", + "build-essential": "GCC, G++, make, and other build tools", + "libsystemd-dev": "systemd integration headers", + "libssl-dev": "OpenSSL development libraries", + "uuid-dev": "UUID generation libraries", + "pkg-config": "Package configuration tool", + "libcap-dev": "Linux capabilities library", + } + + for package in DAEMON_SYSTEM_DEPENDENCIES: + status = "[green]✓ Installed[/green]" if package in installed else "[red]✗ Missing[/red]" + description = package_descriptions.get(package, "") + table.add_row(package, status, description) + + console.print(table) + + if not missing: + console.print("\n[green]✓ All system dependencies are installed![/green]") + return True + + console.print( + f"\n[yellow]⚠ Missing {len(missing)} required package(s): {', '.join(missing)}[/yellow]" + ) + + if Confirm.ask("\nDo you want to install the missing dependencies now?", default=True): + if install_system_dependencies(missing): + # Verify installation + _, still_missing = check_system_dependencies() + if still_missing: + console.print(f"[red]Some packages still missing: {', '.join(still_missing)}[/red]") + return False + return True + else: + return False + else: + console.print("[yellow]Cannot build daemon without required dependencies.[/yellow]") + console.print("\n[cyan]You can install them manually with:[/cyan]") + console.print(f"[dim] sudo apt-get install -y {' '.join(missing)}[/dim]\n") + return False + + +def check_daemon_built() -> bool: + """ + Check if the cortexd daemon binary has been built. + + Checks for the existence of the cortexd binary at DAEMON_DIR / "build" / "cortexd". + + Returns: + bool: True if the daemon binary exists, False otherwise. + """ + return (DAEMON_DIR / "build" / "cortexd").exists() + + +def clean_build() -> None: + """ + Remove the previous build directory to ensure a clean build. + + Removes DAEMON_DIR / "build" using sudo rm -rf. Prints status messages + to console. On failure, logs an error and calls sys.exit(1) to terminate. + + Returns: + None + """ + build_dir = DAEMON_DIR / "build" + if not build_dir.exists(): + # Log cancelled operation + log_audit_event( + "clean_build", + f"Build directory does not exist: {build_dir}", + success=True, + ) + return + + console.print(f"[cyan]Removing previous build directory: {build_dir}[/cyan]") + result = subprocess.run( + ["sudo", "rm", "-rf", str(build_dir)], + capture_output=True, + text=True, + check=False, + ) + if result.returncode == 0: + # Log successful removal + log_audit_event( + "clean_build", + f"Successfully removed build directory: {build_dir}", + success=True, + ) + else: + # Log failure before exiting + error_details = f"returncode={result.returncode}" + if result.stderr: + error_details += f", stderr={result.stderr[:500]}" + log_audit_event( + "clean_build", + f"Failed to remove build directory: {build_dir}, {error_details}", + success=False, + ) + console.print("[red]Failed to remove previous build directory.[/red]") + if result.stderr: + console.print(f"[dim]Error: {result.stderr.strip()}[/dim]") + sys.exit(1) + + +def build_daemon(with_tests: bool = False) -> bool: + """ + Build the cortexd daemon from source. + + Runs the BUILD_SCRIPT (daemon/scripts/build.sh) with "Release" argument + using subprocess.run. Optionally builds tests. + + Args: + with_tests: If True, also build the test suite. + + Returns: + bool: True if the build completed successfully (exit code 0), False otherwise. + """ + if with_tests: + console.print("[cyan]Building the daemon with tests...[/cyan]") + cmd = ["bash", str(BUILD_SCRIPT), "Release", "--with-tests"] + else: + console.print("[cyan]Building the daemon...[/cyan]") + cmd = ["bash", str(BUILD_SCRIPT), "Release"] + + result = subprocess.run(cmd, check=False) + success = result.returncode == 0 + log_audit_event( + "build_daemon", + f"Build daemon {'with tests ' if with_tests else ''}{'succeeded' if success else 'failed'}", + success=success, + ) + return success + + +def run_tests() -> bool: + """ + Run the daemon test suite using ctest. + + Returns: + bool: True if all tests passed, False otherwise. + """ + build_dir = DAEMON_DIR / "build" + tests_dir = build_dir / "tests" + + if not (tests_dir / "test_config").exists(): + console.print("[yellow]Tests not built. Please rebuild with tests enabled.[/yellow]") + return False + + console.print("\n[cyan]Running daemon tests...[/cyan]\n") + result = subprocess.run( + ["ctest", "--output-on-failure"], + cwd=str(build_dir), + check=False, + ) + + success = result.returncode == 0 + log_audit_event( + "run_tests", + f"Test suite {'passed' if success else 'failed'}", + success=success, + ) + + if success: + console.print("\n[green]✓ All tests passed![/green]") + else: + console.print("\n[red]✗ Some tests failed.[/red]") + + return success + + +def check_tests_built() -> bool: + """ + Check if the test binaries have been built. + + Returns: + bool: True if test binaries exist, False otherwise. + """ + # Test binaries are in daemon/build/tests/ + return (DAEMON_DIR / "build" / "tests" / "test_config").exists() + + +def install_daemon() -> bool: + """ + Install the cortexd daemon system-wide. + + Runs the INSTALL_SCRIPT (daemon/scripts/install.sh) with sudo using + subprocess.run. + + Returns: + bool: True if the installation completed successfully (exit code 0), + False otherwise. + """ + console.print("[cyan]Installing the daemon...[/cyan]") + result = subprocess.run(["sudo", str(INSTALL_SCRIPT)], check=False) + success = result.returncode == 0 + log_audit_event( + "install_daemon", + f"Install daemon {'succeeded' if success else 'failed'}", + success=success, + ) + return success + + +def main() -> int: + """ + Interactive setup wizard for the Cortex daemon. + + Guides the user through building and installing the cortexd daemon. + + Returns: + int: Exit code (0 for success, 1 for failure). The function calls sys.exit() + directly on failures, so the return value is primarily for documentation + and potential future refactoring. + """ + console.print( + "\n[bold cyan]╔══════════════════════════════════════════════════════════════╗[/bold cyan]" + ) + console.print( + "[bold cyan]║ Cortex Daemon Interactive Setup ║[/bold cyan]" + ) + console.print( + "[bold cyan]╚══════════════════════════════════════════════════════════════╝[/bold cyan]\n" + ) + + # Initialize audit database + init_audit_db() + log_audit_event("setup_started", "Cortex daemon interactive setup started") + + # Step 0: Check and install system dependencies + if not setup_system_dependencies(): + console.print("[red]Cannot proceed without required system dependencies.[/red]") + sys.exit(1) + + # Step 1: Build daemon + build_tests = False + + if not check_daemon_built(): + if Confirm.ask("Daemon not built. Do you want to build it now?"): + build_tests = Confirm.ask("Do you also want to build the test suite?", default=False) + if not build_daemon(with_tests=build_tests): + console.print("[red]Failed to build the daemon.[/red]") + sys.exit(1) + else: + console.print("[yellow]Cannot proceed without building the daemon.[/yellow]") + sys.exit(1) + else: + if Confirm.ask("Daemon already built. Do you want to rebuild it?"): + build_tests = Confirm.ask("Do you also want to build the test suite?", default=False) + clean_build() + if not build_daemon(with_tests=build_tests): + console.print("[red]Failed to build the daemon.[/red]") + sys.exit(1) + + # Step 1.5: Run tests if they were built or user wants to build them + if Confirm.ask("\nDo you want to run the test suite?", default=False): + if not check_tests_built(): + console.print("\n[yellow]Tests are not built.[/yellow]") + if Confirm.ask( + "Would you like to rebuild the daemon with tests enabled?", default=True + ): + clean_build() + if not build_daemon(with_tests=True): + console.print("[red]Failed to build the daemon with tests.[/red]") + sys.exit(1) + + # Verify tests were built successfully + if not check_tests_built(): + console.print( + "[red]Tests were not built successfully. Check the build output above.[/red]" + ) + sys.exit(1) + + # Run the tests now that they're built + console.print("\n[green]✓ Tests built successfully![/green]") + if not run_tests(): + if not Confirm.ask( + "[yellow]Some tests failed. Continue with installation anyway?[/yellow]", + default=False, + ): + console.print("[yellow]Installation cancelled.[/yellow]") + sys.exit(1) + else: + console.print("[dim]Skipping tests.[/dim]") + else: + # Tests are already built, just run them + if not run_tests(): + if not Confirm.ask( + "[yellow]Some tests failed. Continue with installation anyway?[/yellow]", + default=False, + ): + console.print("[yellow]Installation cancelled.[/yellow]") + sys.exit(1) + + # Step 2: Install daemon + if not install_daemon(): + console.print("[red]Failed to install the daemon.[/red]") + sys.exit(1) + + console.print( + "\n[bold green]╔══════════════════════════════════════════════════════════════╗[/bold green]" + ) + console.print( + "[bold green]║ Setup Completed Successfully! ║[/bold green]" + ) + console.print( + "[bold green]╚══════════════════════════════════════════════════════════════╝[/bold green]" + ) + console.print("\n[green]✓ Daemon installed successfully![/green]") + console.print("\n[cyan]Useful commands:[/cyan]") + console.print("[cyan] systemctl status cortexd # Check daemon status[/cyan]") + console.print("[cyan] journalctl -u cortexd -f # View daemon logs[/cyan]") + console.print("[cyan] cortex daemon ping # Test daemon connection[/cyan]") + console.print("[cyan] cortex daemon version # Get daemon version[/cyan]") + + if check_tests_built(): + console.print("\n[cyan]Test commands:[/cyan]") + console.print("[cyan] cortex daemon run-tests # Run all tests[/cyan]") + console.print("[cyan] cortex daemon run-tests --unit # Run unit tests only[/cyan]") + console.print( + "[cyan] cortex daemon run-tests --integration # Run integration tests[/cyan]" + ) + console.print("[cyan] cortex daemon run-tests -t config # Run specific test[/cyan]") + + console.print("") + + log_audit_event("setup_completed", "Setup completed successfully") + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/daemon/src/config/config.cpp b/daemon/src/config/config.cpp new file mode 100644 index 00000000..2bda89db --- /dev/null +++ b/daemon/src/config/config.cpp @@ -0,0 +1,247 @@ +/** + * @file config.cpp + * @brief Configuration implementation with YAML support (PR 1: Core Daemon) + */ + +#include "cortexd/config.h" +#include "cortexd/logger.h" +#include +#include +#include + +namespace cortexd { + +std::optional Config::load(const std::string& path) { + try { + std::string expanded_path = expand_path(path); + + // Check if file exists + std::ifstream file(expanded_path); + if (!file.good()) { + LOG_WARN("Config", "Configuration file not found: " + expanded_path); + return std::nullopt; + } + + YAML::Node yaml = YAML::LoadFile(expanded_path); + Config config; + + // Socket configuration + if (yaml["socket"]) { + auto socket = yaml["socket"]; + if (socket["path"]) config.socket_path = socket["path"].as(); + if (socket["backlog"]) config.socket_backlog = socket["backlog"].as(); + if (socket["timeout_ms"]) config.socket_timeout_ms = socket["timeout_ms"].as(); + } + + // Rate limiting + if (yaml["rate_limit"]) { + auto rate = yaml["rate_limit"]; + if (rate["max_requests_per_sec"]) config.max_requests_per_sec = rate["max_requests_per_sec"].as(); + } + + // Logging + if (yaml["log_level"]) { + config.log_level = yaml["log_level"].as(); + } + + // Expand paths and validate + config.expand_paths(); + std::string error = config.validate(); + if (!error.empty()) { + LOG_ERROR("Config", "Configuration validation failed: " + error); + return std::nullopt; + } + + LOG_INFO("Config", "Configuration loaded from " + expanded_path); + return config; + + } catch (const YAML::Exception& e) { + LOG_ERROR("Config", "YAML parse error: " + std::string(e.what())); + return std::nullopt; + } catch (const std::exception& e) { + LOG_ERROR("Config", "Error loading config: " + std::string(e.what())); + return std::nullopt; + } +} + +bool Config::save(const std::string& path) const { + try { + std::string expanded_path = expand_path(path); + + YAML::Emitter out; + out << YAML::BeginMap; + + // Socket + out << YAML::Key << "socket" << YAML::Value << YAML::BeginMap; + out << YAML::Key << "path" << YAML::Value << socket_path; + out << YAML::Key << "backlog" << YAML::Value << socket_backlog; + out << YAML::Key << "timeout_ms" << YAML::Value << socket_timeout_ms; + out << YAML::EndMap; + + // Rate limiting + out << YAML::Key << "rate_limit" << YAML::Value << YAML::BeginMap; + out << YAML::Key << "max_requests_per_sec" << YAML::Value << max_requests_per_sec; + out << YAML::EndMap; + + // Logging + out << YAML::Key << "log_level" << YAML::Value << log_level; + + out << YAML::EndMap; + + std::ofstream file(expanded_path); + if (!file.good()) { + LOG_ERROR("Config", "Cannot write to " + expanded_path); + return false; + } + + file << out.c_str(); + LOG_INFO("Config", "Configuration saved to " + expanded_path); + return true; + + } catch (const std::exception& e) { + LOG_ERROR("Config", "Error saving config: " + std::string(e.what())); + return false; + } +} + +void Config::expand_paths() { + socket_path = expand_path(socket_path); +} + +std::string Config::validate() const { + if (socket_backlog <= 0) { + return "socket_backlog must be positive"; + } + if (socket_timeout_ms <= 0) { + return "socket_timeout_ms must be positive"; + } + if (max_requests_per_sec <= 0) { + return "max_requests_per_sec must be positive"; + } + if (log_level < 0 || log_level > 4) { + return "log_level must be between 0 and 4"; + } + return ""; // Valid +} + +Config Config::defaults() { + return Config{}; +} + +// ConfigManager implementation + +ConfigManager& ConfigManager::instance() { + static ConfigManager instance; + return instance; +} + +bool ConfigManager::load(const std::string& path) { + Config config_copy; + std::vector callbacks_copy; + + { + std::lock_guard lock(mutex_); + + auto loaded = Config::load(path); + if (!loaded) { + LOG_WARN("ConfigManager", "Using default configuration"); + config_ = Config::defaults(); + config_.expand_paths(); + return false; + } + + config_ = *loaded; + config_path_ = path; + + // Copy for callback invocation outside the lock + config_copy = config_; + callbacks_copy = callbacks_; + } + + // Invoke callbacks outside the lock to prevent deadlock + notify_callbacks_unlocked(callbacks_copy, config_copy); + return true; +} + +bool ConfigManager::reload() { + std::string path_copy; + Config config_copy; + std::vector callbacks_copy; + + { + std::lock_guard lock(mutex_); + + // Copy config_path_ while holding mutex to avoid TOCTOU race + if (config_path_.empty()) { + LOG_WARN("ConfigManager", "No config path set, cannot reload"); + return false; + } + path_copy = config_path_; + } + + // Load config outside the lock (Config::load is self-contained) + auto loaded = Config::load(path_copy); + if (!loaded) { + LOG_ERROR("ConfigManager", "Failed to reload configuration"); + return false; + } + + { + std::lock_guard lock(mutex_); + if (config_path_ != path_copy) { + LOG_WARN("ConfigManager", "Config path changed during reload; aborting"); + return false; + } + config_ = *loaded; + + // Copy for callback invocation outside the lock + config_copy = config_; + callbacks_copy = callbacks_; + } + + // Invoke callbacks outside the lock to prevent deadlock + notify_callbacks_unlocked(callbacks_copy, config_copy); + LOG_INFO("ConfigManager", "Configuration reloaded"); + return true; +} + +Config ConfigManager::get() const { + std::lock_guard lock(mutex_); + return config_; // Return copy for thread safety +} + +void ConfigManager::on_change(ChangeCallback callback) { + std::lock_guard lock(mutex_); + callbacks_.push_back(std::move(callback)); +} + +void ConfigManager::notify_callbacks() { + // This method should only be called while NOT holding the mutex + // For internal use, prefer notify_callbacks_unlocked + Config config_copy; + std::vector callbacks_copy; + + { + std::lock_guard lock(mutex_); + config_copy = config_; + callbacks_copy = callbacks_; + } + + notify_callbacks_unlocked(callbacks_copy, config_copy); +} + +void ConfigManager::notify_callbacks_unlocked( + const std::vector& callbacks, + const Config& config) { + // Invoke callbacks outside the lock to prevent deadlock if a callback + // calls ConfigManager::get() or other mutex-guarded methods + for (const auto& callback : callbacks) { + try { + callback(config); + } catch (const std::exception& e) { + LOG_ERROR("ConfigManager", "Callback error: " + std::string(e.what())); + } + } +} + +} // namespace cortexd diff --git a/daemon/src/core/daemon.cpp b/daemon/src/core/daemon.cpp new file mode 100644 index 00000000..51b3da4c --- /dev/null +++ b/daemon/src/core/daemon.cpp @@ -0,0 +1,254 @@ +/** + * @file daemon.cpp + * @brief Main daemon implementation + */ + + #include "cortexd/core/daemon.h" + #include "cortexd/logger.h" + #include + #include + #include + #include + + namespace cortexd { + + // Global daemon pointer for signal handler + static Daemon* g_daemon = nullptr; + + // Volatile flags for async-signal-safe signal handling + // Signal handlers should only set flags, not call complex functions + static volatile sig_atomic_t g_shutdown_requested = 0; + static volatile sig_atomic_t g_reload_requested = 0; + + // Signal handler function - only sets flags (async-signal-safe) + static void signal_handler(int sig) { + if (sig == SIGTERM || sig == SIGINT) { + g_shutdown_requested = 1; + } else if (sig == SIGHUP) { + g_reload_requested = 1; + } + } + + Daemon& Daemon::instance() { + static Daemon instance; + return instance; + } + + bool Daemon::initialize(const std::string& config_path) { + LOG_INFO("Daemon", "Initializing cortexd version " + std::string(VERSION)); + + // Load configuration + auto& config_mgr = ConfigManager::instance(); + if (!config_mgr.load(config_path)) { + LOG_WARN("Daemon", "Using default configuration"); + } + + // Set log level from config + const auto& config = config_mgr.get(); + switch (config.log_level) { + case 0: Logger::set_level(LogLevel::DEBUG); break; + case 1: Logger::set_level(LogLevel::INFO); break; + case 2: Logger::set_level(LogLevel::WARN); break; + case 3: Logger::set_level(LogLevel::ERROR); break; + default: Logger::set_level(LogLevel::INFO); break; + } + + // Setup signal handlers + setup_signals(); + + LOG_INFO("Daemon", "Initialization complete"); + return true; + } + + int Daemon::run() { + LOG_INFO("Daemon", "Starting daemon"); + start_time_ = std::chrono::steady_clock::now(); + + // Start all services + if (!start_services()) { + LOG_ERROR("Daemon", "Failed to start services"); + return 1; + } + + running_ = true; + + // Notify systemd that we're ready + notify_ready(); + + LOG_INFO("Daemon", "Daemon started successfully"); + + // Main event loop + while (!shutdown_requested_.load(std::memory_order_relaxed)) { + event_loop(); + } + + LOG_INFO("Daemon", "Shutdown requested, stopping services"); + + // Notify systemd we're stopping + notify_stopping(); + + // Stop all services + stop_services(); + + running_ = false; + + LOG_INFO("Daemon", "Daemon stopped"); + return 0; + } + +void Daemon::request_shutdown() { + shutdown_requested_.store(true, std::memory_order_relaxed); +} + + void Daemon::register_service(std::unique_ptr service) { + LOG_DEBUG("Daemon", "Registering service: " + std::string(service->name())); + services_.push_back(std::move(service)); + } + + Config Daemon::config() const { + return ConfigManager::instance().get(); + } + + std::chrono::seconds Daemon::uptime() const { + auto now = std::chrono::steady_clock::now(); + return std::chrono::duration_cast(now - start_time_); + } + + void Daemon::notify_ready() { + sd_notify(0, "READY=1\nSTATUS=Running"); + LOG_DEBUG("Daemon", "Notified systemd: READY"); + } + + void Daemon::notify_stopping() { + sd_notify(0, "STOPPING=1\nSTATUS=Shutting down"); + LOG_DEBUG("Daemon", "Notified systemd: STOPPING"); + } + + void Daemon::notify_watchdog() { + sd_notify(0, "WATCHDOG=1"); + } + +bool Daemon::reload_config() { + LOG_INFO("Daemon", "Reloading configuration"); + if (ConfigManager::instance().reload()) { + // Reapply log level from config + const auto& config = ConfigManager::instance().get(); + switch (config.log_level) { + case 0: Logger::set_level(LogLevel::DEBUG); break; + case 1: Logger::set_level(LogLevel::INFO); break; + case 2: Logger::set_level(LogLevel::WARN); break; + case 3: Logger::set_level(LogLevel::ERROR); break; + default: Logger::set_level(LogLevel::INFO); break; + } + LOG_INFO("Daemon", "Configuration reloaded successfully"); + return true; + } + LOG_ERROR("Daemon", "Failed to reload configuration"); + return false; +} + +void Daemon::reset() { + // Reset all singleton state for test isolation + // This ensures each test starts with a clean daemon state + + // Stop any running services first + stop_services(); + + // Clear all registered services + services_.clear(); + + // Reset state flags + shutdown_requested_.store(false, std::memory_order_relaxed); + running_.store(false, std::memory_order_relaxed); + + // Reset start time + start_time_ = std::chrono::steady_clock::time_point{}; + + LOG_DEBUG("Daemon", "Daemon state reset for testing"); +} + +void Daemon::setup_signals() { + g_daemon = this; + + struct sigaction sa; + sa.sa_handler = signal_handler; + sigemptyset(&sa.sa_mask); + sa.sa_flags = 0; + + sigaction(SIGTERM, &sa, nullptr); + sigaction(SIGINT, &sa, nullptr); + sigaction(SIGHUP, &sa, nullptr); + + // Ignore SIGPIPE (broken pipe from socket) + signal(SIGPIPE, SIG_IGN); + + LOG_DEBUG("Daemon", "Signal handlers installed"); + } + + bool Daemon::start_services() { + // Sort services by priority (higher first) + std::sort(services_.begin(), services_.end(), + [](const auto& a, const auto& b) { + return a->priority() > b->priority(); + }); + + for (auto& service : services_) { + LOG_INFO("Daemon", "Starting service: " + std::string(service->name())); + + if (!service->start()) { + LOG_ERROR("Daemon", "Failed to start service: " + std::string(service->name())); + // Stop already started services + stop_services(); + return false; + } + + LOG_INFO("Daemon", "Service started: " + std::string(service->name())); + } + + return true; + } + + void Daemon::stop_services() { + // Stop services in reverse order (lower priority first) + for (auto it = services_.rbegin(); it != services_.rend(); ++it) { + auto& service = *it; + if (service->is_running()) { + LOG_INFO("Daemon", "Stopping service: " + std::string(service->name())); + service->stop(); + LOG_INFO("Daemon", "Service stopped: " + std::string(service->name())); + } + } + } + + void Daemon::event_loop() { + // Check signal flags set by the async-signal-safe handler + // Perform the actual operations here in a normal thread context + if (g_shutdown_requested) { + g_shutdown_requested = 0; + LOG_INFO("Daemon", "Received shutdown signal"); + request_shutdown(); + return; + } + + if (g_reload_requested) { + g_reload_requested = 0; + LOG_INFO("Daemon", "Received SIGHUP, reloading configuration"); + reload_config(); + } + + // Check service health + for (auto& service : services_) { + if (service->is_running() && !service->is_healthy()) { + LOG_WARN("Daemon", "Service unhealthy: " + std::string(service->name())); + } + } + + // Send watchdog keepalive + notify_watchdog(); + + // Sleep for a short interval + std::this_thread::sleep_for(std::chrono::seconds(5)); + } + + } // namespace cortexd + \ No newline at end of file diff --git a/daemon/src/ipc/handlers.cpp b/daemon/src/ipc/handlers.cpp new file mode 100644 index 00000000..a789064b --- /dev/null +++ b/daemon/src/ipc/handlers.cpp @@ -0,0 +1,79 @@ +/** + * @file handlers.cpp + * @brief IPC request handler implementations + */ + +#include "cortexd/ipc/handlers.h" +#include "cortexd/core/daemon.h" +#include "cortexd/config.h" +#include "cortexd/logger.h" + +namespace cortexd { + +void Handlers::register_all(IPCServer& server) { + // Basic handlers only + server.register_handler(Methods::PING, [](const Request& req) { + return handle_ping(req); + }); + + server.register_handler(Methods::VERSION, [](const Request& req) { + return handle_version(req); + }); + + // Config handlers + server.register_handler(Methods::CONFIG_GET, [](const Request& req) { + return handle_config_get(req); + }); + + server.register_handler(Methods::CONFIG_RELOAD, [](const Request& req) { + return handle_config_reload(req); + }); + + // Daemon control + server.register_handler(Methods::SHUTDOWN, [](const Request& req) { + return handle_shutdown(req); + }); + + LOG_INFO("Handlers", "Registered 5 core IPC handlers"); +} + +Response Handlers::handle_ping(const Request& /*req*/) { + return Response::ok({{"pong", true}}); +} + +Response Handlers::handle_version(const Request& /*req*/) { + return Response::ok({ + {"version", VERSION}, + {"name", NAME} + }); +} + +Response Handlers::handle_config_get(const Request& /*req*/) { + const auto& config = ConfigManager::instance().get(); + + // PR 1: Return only core daemon configuration + json result = { + {"socket_path", config.socket_path}, + {"socket_backlog", config.socket_backlog}, + {"socket_timeout_ms", config.socket_timeout_ms}, + {"max_requests_per_sec", config.max_requests_per_sec}, + {"log_level", config.log_level} + }; + + return Response::ok(result); +} + +Response Handlers::handle_config_reload(const Request& /*req*/) { + if (Daemon::instance().reload_config()) { + return Response::ok({{"reloaded", true}}); + } + return Response::err("Failed to reload configuration", ErrorCodes::CONFIG_ERROR); +} + +Response Handlers::handle_shutdown(const Request& /*req*/) { + LOG_INFO("Handlers", "Shutdown requested via IPC"); + Daemon::instance().request_shutdown(); + return Response::ok({{"shutdown", "initiated"}}); +} + +} // namespace cortexd \ No newline at end of file diff --git a/daemon/src/ipc/protocol.cpp b/daemon/src/ipc/protocol.cpp new file mode 100644 index 00000000..ea503eed --- /dev/null +++ b/daemon/src/ipc/protocol.cpp @@ -0,0 +1,91 @@ +/** + * @file protocol.cpp + * @brief IPC protocol implementation + */ + + #include "cortexd/ipc/protocol.h" + #include "cortexd/logger.h" + + namespace cortexd { + + std::optional Request::parse(const std::string& raw) { + try { + auto j = json::parse(raw); + + Request req; + + // Method is required + if (!j.contains("method") || !j["method"].is_string()) { + LOG_WARN("Protocol", "Request missing 'method' field"); + return std::nullopt; + } + req.method = j["method"].get(); + + // Params are optional + if (j.contains("params")) { + req.params = j["params"]; + } else { + req.params = json::object(); + } + + // ID is optional + if (j.contains("id")) { + if (j["id"].is_string()) { + req.id = j["id"].get(); + } else if (j["id"].is_number()) { + req.id = std::to_string(j["id"].get()); + } + } + + return req; + + } catch (const json::exception& e) { + LOG_WARN("Protocol", "JSON parse error: " + std::string(e.what())); + return std::nullopt; + } + } + + std::string Request::to_json() const { + json j; + j["method"] = method; + j["params"] = params; + if (id) { + j["id"] = *id; + } + return j.dump(); + } + + std::string Response::to_json() const { + json j; + j["success"] = success; + j["timestamp"] = Clock::to_time_t(Clock::now()); + + if (success) { + j["result"] = result; + } else { + j["error"] = { + {"message", error}, + {"code", error_code} + }; + } + + return j.dump(); + } + + Response Response::ok(json result) { + Response resp; + resp.success = true; + resp.result = std::move(result); + return resp; + } + + Response Response::err(const std::string& message, int code) { + Response resp; + resp.success = false; + resp.error = message; + resp.error_code = code; + return resp; + } + + } // namespace cortexd + \ No newline at end of file diff --git a/daemon/src/ipc/server.cpp b/daemon/src/ipc/server.cpp new file mode 100644 index 00000000..1507034b --- /dev/null +++ b/daemon/src/ipc/server.cpp @@ -0,0 +1,322 @@ +/** + * @file server.cpp + * @brief Unix socket IPC server implementation + */ + + #include "cortexd/ipc/server.h" + #include "cortexd/logger.h" + #include + #include + #include + #include + #include + #include + #include + + namespace cortexd { + + // RateLimiter implementation + + RateLimiter::RateLimiter(int max_per_second) + : max_per_second_(max_per_second) + , window_start_(std::chrono::steady_clock::now()) { + } + + bool RateLimiter::allow() { + std::lock_guard lock(mutex_); + + auto now = std::chrono::steady_clock::now(); + auto elapsed = std::chrono::duration_cast(now - window_start_); + + // Reset window every second + if (elapsed.count() >= 1000) { + count_ = 0; + window_start_ = now; + } + + if (count_ >= max_per_second_) { + return false; + } + + count_++; + return true; + } + + void RateLimiter::reset() { + std::lock_guard lock(mutex_); + count_ = 0; + window_start_ = std::chrono::steady_clock::now(); + } + + // IPCServer implementation + + IPCServer::IPCServer(const std::string& socket_path, int max_requests_per_sec) + : socket_path_(socket_path) + , rate_limiter_(max_requests_per_sec) { + } + + IPCServer::~IPCServer() { + stop(); + } + + bool IPCServer::start() { + if (running_) { + return true; + } + + if (!create_socket()) { + return false; + } + + running_ = true; + accept_thread_ = std::make_unique([this] { accept_loop(); }); + + LOG_INFO("IPCServer", "Started on " + socket_path_); + return true; + } + + void IPCServer::stop() { + if (!running_) { + return; + } + + running_ = false; + + // Shutdown socket to unblock accept() and stop new connections + if (server_fd_ != -1) { + shutdown(server_fd_, SHUT_RDWR); + } + + // Wait for accept thread + if (accept_thread_ && accept_thread_->joinable()) { + accept_thread_->join(); + } + + // Wait for all in-flight handlers to finish before cleanup + // This prevents dangling references to server state + { + std::unique_lock lock(connections_mutex_); + connections_cv_.wait(lock, [this] { + return active_connections_.load() == 0; + }); + } + + cleanup_socket(); + LOG_INFO("IPCServer", "Stopped"); + } + + bool IPCServer::is_healthy() const { + return running_.load() && server_fd_ != -1; + } + + void IPCServer::register_handler(const std::string& method, RequestHandler handler) { + std::lock_guard lock(handlers_mutex_); + handlers_[method] = std::move(handler); + LOG_DEBUG("IPCServer", "Registered handler for: " + method); + } + + bool IPCServer::create_socket() { + // Create socket + server_fd_ = socket(AF_UNIX, SOCK_STREAM, 0); + if (server_fd_ == -1) { + LOG_ERROR("IPCServer", "Failed to create socket: " + std::string(strerror(errno))); + return false; + } + + // Set socket options + int opt = 1; + setsockopt(server_fd_, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt)); + + // Remove existing socket file + if (std::filesystem::exists(socket_path_)) { + std::filesystem::remove(socket_path_); + LOG_DEBUG("IPCServer", "Removed existing socket file"); + } + + // Create parent directory if needed + auto parent = std::filesystem::path(socket_path_).parent_path(); + if (!parent.empty() && !std::filesystem::exists(parent)) { + std::filesystem::create_directories(parent); + } + + // Bind socket + struct sockaddr_un addr; + memset(&addr, 0, sizeof(addr)); + addr.sun_family = AF_UNIX; + + // Check socket path length before copying to prevent silent truncation + if (socket_path_.size() > sizeof(addr.sun_path) - 1) { + LOG_ERROR("IPCServer", "Socket path too long: " + socket_path_ + " (max " + + std::to_string(sizeof(addr.sun_path) - 1) + " bytes)"); + close(server_fd_); + server_fd_ = -1; + return false; + } + + strncpy(addr.sun_path, socket_path_.c_str(), sizeof(addr.sun_path) - 1); + addr.sun_path[sizeof(addr.sun_path) - 1] = '\0'; // Ensure null termination + + if (bind(server_fd_, (struct sockaddr*)&addr, sizeof(addr)) == -1) { + LOG_ERROR("IPCServer", "Failed to bind socket: " + std::string(strerror(errno))); + close(server_fd_); + server_fd_ = -1; + return false; + } + + // Listen + if (listen(server_fd_, SOCKET_BACKLOG) == -1) { + LOG_ERROR("IPCServer", "Failed to listen: " + std::string(strerror(errno))); + close(server_fd_); + server_fd_ = -1; + return false; + } + + return setup_permissions(); + } + +bool IPCServer::setup_permissions() { + // Set socket permissions to 0666 (world read/write) + // This is safe for Unix domain sockets as they are local-only (not network accessible). + // The socket directory (/run/cortex/) provides additional access control if needed. + if (chmod(socket_path_.c_str(), 0666) == -1) { + LOG_WARN("IPCServer", "Failed to set socket permissions: " + std::string(strerror(errno))); + // Continue anyway + } + return true; +} + + void IPCServer::cleanup_socket() { + if (server_fd_ != -1) { + close(server_fd_); + server_fd_ = -1; + } + + if (std::filesystem::exists(socket_path_)) { + std::filesystem::remove(socket_path_); + } + } + + void IPCServer::accept_loop() { + LOG_DEBUG("IPCServer", "Accept loop started"); + + while (running_) { + int client_fd = accept(server_fd_, nullptr, nullptr); + + if (client_fd == -1) { + if (running_) { + LOG_ERROR("IPCServer", "Accept failed: " + std::string(strerror(errno))); + } + continue; + } + + // Set socket timeout + struct timeval timeout; + timeout.tv_sec = SOCKET_TIMEOUT_MS / 1000; + timeout.tv_usec = (SOCKET_TIMEOUT_MS % 1000) * 1000; + setsockopt(client_fd, SOL_SOCKET, SO_RCVTIMEO, &timeout, sizeof(timeout)); + setsockopt(client_fd, SOL_SOCKET, SO_SNDTIMEO, &timeout, sizeof(timeout)); + + // Handle client (could be async in future) + handle_client(client_fd); + } + + LOG_DEBUG("IPCServer", "Accept loop ended"); + } + + void IPCServer::handle_client(int client_fd) { + { + std::lock_guard lock(connections_mutex_); + active_connections_++; + connections_served_++; + } + + try { + // Read request + char buffer[MAX_MESSAGE_SIZE]; + ssize_t bytes = recv(client_fd, buffer, sizeof(buffer) - 1, 0); + + if (bytes <= 0) { + LOG_DEBUG("IPCServer", "Client disconnected without data"); + close(client_fd); + { + std::lock_guard lock(connections_mutex_); + active_connections_--; + } + connections_cv_.notify_all(); + return; + } + + buffer[bytes] = '\0'; + std::string raw_request(buffer); + LOG_DEBUG("IPCServer", "Received: " + raw_request); + + // Check rate limit + if (!rate_limiter_.allow()) { + LOG_WARN("IPCServer", "Rate limit exceeded"); + auto resp = Response::err("Rate limit exceeded", ErrorCodes::RATE_LIMITED); + std::string response_str = resp.to_json(); + send(client_fd, response_str.c_str(), response_str.length(), 0); + close(client_fd); + { + std::lock_guard lock(connections_mutex_); + active_connections_--; + } + connections_cv_.notify_all(); + return; + } + + // Parse request + auto request = Request::parse(raw_request); + Response response; + + if (!request) { + response = Response::err("Invalid request format", ErrorCodes::PARSE_ERROR); + } else { + response = dispatch(*request); + } + + // Send response + std::string response_str = response.to_json(); + LOG_DEBUG("IPCServer", "Sending: " + response_str); + + if (send(client_fd, response_str.c_str(), response_str.length(), 0) == -1) { + LOG_ERROR("IPCServer", "Failed to send response: " + std::string(strerror(errno))); + } + + } catch (const std::exception& e) { + LOG_ERROR("IPCServer", "Exception handling client: " + std::string(e.what())); + auto resp = Response::err(e.what(), ErrorCodes::INTERNAL_ERROR); + std::string response_str = resp.to_json(); + send(client_fd, response_str.c_str(), response_str.length(), 0); + } + + close(client_fd); + { + std::lock_guard lock(connections_mutex_); + active_connections_--; + } + connections_cv_.notify_all(); + } + + Response IPCServer::dispatch(const Request& request) { + std::lock_guard lock(handlers_mutex_); + + auto it = handlers_.find(request.method); + if (it == handlers_.end()) { + LOG_WARN("IPCServer", "Unknown method: " + request.method); + return Response::err("Method not found: " + request.method, ErrorCodes::METHOD_NOT_FOUND); + } + + LOG_INFO("IPCServer", "Handler found, invoking..."); + try { + Response resp = it->second(request); + LOG_INFO("IPCServer", "Handler completed successfully"); + return resp; + } catch (const std::exception& e) { + LOG_ERROR("IPCServer", "Handler error for " + request.method + ": " + e.what()); + return Response::err(e.what(), ErrorCodes::INTERNAL_ERROR); + } + } + + } // namespace cortexd + \ No newline at end of file diff --git a/daemon/src/main.cpp b/daemon/src/main.cpp new file mode 100644 index 00000000..98d6cd9b --- /dev/null +++ b/daemon/src/main.cpp @@ -0,0 +1,125 @@ +/** + * @file main.cpp + * @brief cortexd daemon entry point + */ + +#include "cortexd/core/daemon.h" +#include "cortexd/ipc/server.h" +#include "cortexd/ipc/handlers.h" +#include "cortexd/logger.h" +#include "cortexd/config.h" +#include "cortexd/common.h" +#include +#include + + using namespace cortexd; + + void print_version() { + std::cout << NAME << " " << VERSION << std::endl; + } + + void print_usage(const char* prog) { + std::cout << "Usage: " << prog << " [options]\n\n" + << "Cortex AI Package Manager Daemon\n\n" + << "Options:\n" + << " -c, --config PATH Configuration file path\n" + << " (default: " << DEFAULT_CONFIG_PATH << ")\n" + << " -v, --verbose Enable debug logging\n" + << " -f, --foreground Run in foreground (don't daemonize)\n" + << " -h, --help Show this help message\n" + << " --version Show version information\n" + << "\n" + << "Examples:\n" + << " " << prog << " Start with default config\n" + << " " << prog << " -c /etc/cortex/custom.yaml\n" + << " " << prog << " -v Start with debug logging\n" + << "\n" + << "systemd integration:\n" + << " systemctl start cortexd Start the daemon\n" + << " systemctl stop cortexd Stop the daemon\n" + << " systemctl status cortexd Check status\n" + << " journalctl -u cortexd -f View logs\n" + << std::endl; + } + + int main(int argc, char* argv[]) { + std::string config_path = DEFAULT_CONFIG_PATH; + bool verbose = false; + bool foreground = false; + + // Parse command line options + static struct option long_options[] = { + {"config", required_argument, nullptr, 'c'}, + {"verbose", no_argument, nullptr, 'v'}, + {"foreground", no_argument, nullptr, 'f'}, + {"help", no_argument, nullptr, 'h'}, + {"version", no_argument, nullptr, 'V'}, + {nullptr, 0, nullptr, 0} + }; + + int opt; + while ((opt = getopt_long(argc, argv, "c:vfhV", long_options, nullptr)) != -1) { + switch (opt) { + case 'c': + config_path = optarg; + break; + case 'v': + verbose = true; + break; + case 'f': + foreground = true; + break; + case 'h': + print_usage(argv[0]); + return 0; + case 'V': + print_version(); + return 0; + default: + print_usage(argv[0]); + return 1; + } + } + + // Initialize logging + // Use journald unless in foreground mode + Logger::init( + verbose ? LogLevel::DEBUG : LogLevel::INFO, + !foreground // Use journald when not in foreground + ); + + LOG_INFO("main", "cortexd starting - version " + std::string(VERSION)); + + // Get daemon instance + auto& daemon = Daemon::instance(); + + // Initialize daemon with config + if (!daemon.initialize(config_path)) { + LOG_ERROR("main", "Failed to initialize daemon"); + return 1; + } + + // Get configuration + const auto& config = ConfigManager::instance().get(); + + // Create IPC server + auto ipc_server = std::make_unique( + config.socket_path, + config.max_requests_per_sec + ); + + // Register IPC handlers + Handlers::register_all(*ipc_server); + + // Register services with daemon + daemon.register_service(std::move(ipc_server)); + + // Run daemon (blocks until shutdown) + int exit_code = daemon.run(); + + LOG_INFO("main", "cortexd shutdown complete"); + Logger::shutdown(); + + return exit_code; + } + \ No newline at end of file diff --git a/daemon/src/utils/logger.cpp b/daemon/src/utils/logger.cpp new file mode 100644 index 00000000..c07b7729 --- /dev/null +++ b/daemon/src/utils/logger.cpp @@ -0,0 +1,139 @@ +/** + * @file logger.cpp + * @brief Logger implementation with journald and stderr support + */ + + #include "cortexd/logger.h" + #include + #include + #include + #include + + namespace cortexd { + + // Static member initialization + LogLevel Logger::min_level_ = LogLevel::INFO; + bool Logger::use_journald_ = true; + std::mutex Logger::mutex_; + bool Logger::initialized_ = false; + + void Logger::init(LogLevel min_level, bool use_journald) { + std::lock_guard lock(mutex_); + min_level_ = min_level; + use_journald_ = use_journald; + initialized_ = true; + + if (!use_journald_) { + std::cerr << "[cortexd] Logging initialized (stderr mode, level=" + << level_to_string(min_level_) << ")" << std::endl; + } + } + + void Logger::shutdown() { + std::lock_guard lock(mutex_); + if (initialized_ && !use_journald_) { + std::cerr << "[cortexd] Logging shutdown" << std::endl; + } + initialized_ = false; + } + + void Logger::set_level(LogLevel level) { + std::lock_guard lock(mutex_); + min_level_ = level; + } + + LogLevel Logger::get_level() { + std::lock_guard lock(mutex_); + return min_level_; + } + + void Logger::debug(const std::string& component, const std::string& message) { + log(LogLevel::DEBUG, component, message); + } + + void Logger::info(const std::string& component, const std::string& message) { + log(LogLevel::INFO, component, message); + } + + void Logger::warn(const std::string& component, const std::string& message) { + log(LogLevel::WARN, component, message); + } + + void Logger::error(const std::string& component, const std::string& message) { + log(LogLevel::ERROR, component, message); + } + + void Logger::critical(const std::string& component, const std::string& message) { + log(LogLevel::CRITICAL, component, message); + } + +void Logger::log(LogLevel level, const std::string& component, const std::string& message) { + std::lock_guard lock(mutex_); + + // Check log level while holding the lock to avoid race condition + if (static_cast(level) < static_cast(min_level_)) { + return; + } + + if (use_journald_) { + log_to_journald(level, component, message); + } else { + log_to_stderr(level, component, message); + } +} + + void Logger::log_to_journald(LogLevel level, const std::string& component, const std::string& message) { + sd_journal_send( + "MESSAGE=%s", message.c_str(), + "PRIORITY=%d", level_to_priority(level), + "SYSLOG_IDENTIFIER=cortexd", + "CORTEXD_COMPONENT=%s", component.c_str(), + "CODE_FUNC=%s", component.c_str(), + NULL + ); + } + +void Logger::log_to_stderr(LogLevel level, const std::string& component, const std::string& message) { + // Get current time using thread-safe localtime_r (POSIX) + auto now = std::time(nullptr); + std::tm tm_buf{}; + std::tm* tm = localtime_r(&now, &tm_buf); + + // Format: [TIMESTAMP] [LEVEL] component: message + if (tm) { + std::cerr << std::put_time(tm, "[%Y-%m-%d %H:%M:%S]") + << " [" << level_to_string(level) << "]" + << " " << component << ": " + << message << std::endl; + } else { + // Fallback if localtime_r fails + std::cerr << "[XXXX-XX-XX XX:XX:XX]" + << " [" << level_to_string(level) << "]" + << " " << component << ": " + << message << std::endl; + } +} + + int Logger::level_to_priority(LogLevel level) { + switch (level) { + case LogLevel::DEBUG: return internal::SYSLOG_DEBUG; + case LogLevel::INFO: return internal::SYSLOG_INFO; + case LogLevel::WARN: return internal::SYSLOG_WARNING; + case LogLevel::ERROR: return internal::SYSLOG_ERR; + case LogLevel::CRITICAL: return internal::SYSLOG_CRIT; + default: return internal::SYSLOG_INFO; + } + } + + const char* Logger::level_to_string(LogLevel level) { + switch (level) { + case LogLevel::DEBUG: return "DEBUG"; + case LogLevel::INFO: return "INFO"; + case LogLevel::WARN: return "WARN"; + case LogLevel::ERROR: return "ERROR"; + case LogLevel::CRITICAL: return "CRITICAL"; + default: return "UNKNOWN"; + } + } + + } // namespace cortexd \ No newline at end of file diff --git a/daemon/systemd/cortexd.service b/daemon/systemd/cortexd.service new file mode 100644 index 00000000..6231a328 --- /dev/null +++ b/daemon/systemd/cortexd.service @@ -0,0 +1,62 @@ +[Unit] +Description=Cortex AI Package Manager Daemon +Documentation=https://github.com/cortexlinux/cortex +After=network-online.target +Wants=network-online.target + +[Service] +Type=notify +ExecStart=/usr/local/bin/cortexd +ExecReload=/bin/kill -HUP $MAINPID +Restart=on-failure +RestartSec=5 +WatchdogSec=30 + +# Environment +Environment=HOME=/root + +# Security hardening +NoNewPrivileges=yes +ProtectSystem=strict +ProtectHome=read-only +PrivateTmp=yes +PrivateDevices=yes +ProtectKernelTunables=yes +ProtectKernelModules=yes +ProtectControlGroups=yes +RestrictRealtime=yes +RestrictSUIDSGID=yes + +# Allow memory mapping for llama.cpp +MemoryDenyWriteExecute=no + +# Resource limits +MemoryMax=256M +MemoryHigh=200M +TasksMax=64 + +# Paths +RuntimeDirectory=cortex +RuntimeDirectoryMode=0755 +StateDirectory=cortex +StateDirectoryMode=0750 +ConfigurationDirectory=cortex + +# Read/Write paths +ReadWritePaths=/var/lib/cortex +ReadWritePaths=/run/cortex +ReadWritePaths=/root/.cortex + +# Logging +StandardOutput=journal +StandardError=journal +SyslogIdentifier=cortexd + +# Graceful shutdown +TimeoutStopSec=30 +KillMode=mixed +KillSignal=SIGTERM +FinalKillSignal=SIGKILL + +[Install] +WantedBy=multi-user.target diff --git a/daemon/tests/CMakeLists.txt b/daemon/tests/CMakeLists.txt new file mode 100644 index 00000000..9406865b --- /dev/null +++ b/daemon/tests/CMakeLists.txt @@ -0,0 +1,90 @@ +# Tests CMakeLists.txt for cortexd + +# Create a library with daemon sources (excluding main.cpp) for testing +set(DAEMON_TEST_SOURCES + ${CMAKE_SOURCE_DIR}/src/core/daemon.cpp + ${CMAKE_SOURCE_DIR}/src/config/config.cpp + ${CMAKE_SOURCE_DIR}/src/ipc/server.cpp + ${CMAKE_SOURCE_DIR}/src/ipc/protocol.cpp + ${CMAKE_SOURCE_DIR}/src/ipc/handlers.cpp + ${CMAKE_SOURCE_DIR}/src/utils/logger.cpp +) + +add_library(cortexd_lib STATIC ${DAEMON_TEST_SOURCES}) + +target_include_directories(cortexd_lib PUBLIC + ${CMAKE_SOURCE_DIR}/include + ${SYSTEMD_INCLUDE_DIRS} + ${OPENSSL_INCLUDE_DIRS} + ${UUID_INCLUDE_DIRS} +) + +target_link_libraries(cortexd_lib PUBLIC + ${SYSTEMD_LIBRARIES} + ${OPENSSL_LIBRARIES} + ${UUID_LIBRARIES} + nlohmann_json::nlohmann_json + yaml-cpp::yaml-cpp + pthread +) + +target_compile_definitions(cortexd_lib PUBLIC + CORTEXD_VERSION="${PROJECT_VERSION}" +) + +# Unit tests +add_executable(test_config + unit/test_config.cpp +) +target_link_libraries(test_config PRIVATE cortexd_lib GTest::gtest_main) +add_test(NAME test_config COMMAND test_config) + +add_executable(test_protocol + unit/test_protocol.cpp +) +target_link_libraries(test_protocol PRIVATE cortexd_lib GTest::gtest_main) +add_test(NAME test_protocol COMMAND test_protocol) + +add_executable(test_rate_limiter + unit/test_rate_limiter.cpp +) +target_link_libraries(test_rate_limiter PRIVATE cortexd_lib GTest::gtest_main) +add_test(NAME test_rate_limiter COMMAND test_rate_limiter) + +add_executable(test_logger + unit/test_logger.cpp +) +target_link_libraries(test_logger PRIVATE cortexd_lib GTest::gtest_main) +add_test(NAME test_logger COMMAND test_logger) + +add_executable(test_common + unit/test_common.cpp +) +target_link_libraries(test_common PRIVATE cortexd_lib GTest::gtest_main) +add_test(NAME test_common COMMAND test_common) + +# Integration tests +add_executable(test_ipc_server + integration/test_ipc_server.cpp +) +target_link_libraries(test_ipc_server PRIVATE cortexd_lib GTest::gtest_main) +add_test(NAME test_ipc_server COMMAND test_ipc_server) + +add_executable(test_handlers + integration/test_handlers.cpp +) +target_link_libraries(test_handlers PRIVATE cortexd_lib GTest::gtest_main) +add_test(NAME test_handlers COMMAND test_handlers) + +add_executable(test_daemon + integration/test_daemon.cpp +) +target_link_libraries(test_daemon PRIVATE cortexd_lib GTest::gtest_main) +add_test(NAME test_daemon COMMAND test_daemon) + +# Add custom target to run all tests +add_custom_target(run_tests + COMMAND ${CMAKE_CTEST_COMMAND} --output-on-failure + DEPENDS test_config test_protocol test_rate_limiter test_logger test_common test_ipc_server test_handlers test_daemon + COMMENT "Running all cortexd tests" +) diff --git a/daemon/tests/integration/test_daemon.cpp b/daemon/tests/integration/test_daemon.cpp new file mode 100644 index 00000000..c92c4e43 --- /dev/null +++ b/daemon/tests/integration/test_daemon.cpp @@ -0,0 +1,410 @@ +/** + * @file test_daemon.cpp + * @brief Integration tests for Daemon lifecycle and service management + */ + +#include +#include +#include +#include +#include +#include +#include +#include "cortexd/core/daemon.h" +#include "cortexd/core/service.h" +#include "cortexd/config.h" +#include "cortexd/logger.h" +#include "cortexd/ipc/server.h" + +namespace fs = std::filesystem; + +/** + * @brief Mock service for testing service lifecycle + */ +class MockService : public cortexd::Service { +public: + MockService(const std::string& name, int priority = 0) + : name_(name), priority_(priority) {} + + bool start() override { + if (should_fail_start_) return false; + running_ = true; + start_count_++; + return true; + } + + void stop() override { + running_ = false; + stop_count_++; + } + + const char* name() const override { return name_.c_str(); } + int priority() const override { return priority_; } + bool is_running() const override { return running_; } + bool is_healthy() const override { return healthy_ && running_; } + + void set_should_fail_start(bool fail) { should_fail_start_ = fail; } + void set_healthy(bool healthy) { healthy_ = healthy; } + + int start_count() const { return start_count_; } + int stop_count() const { return stop_count_; } + +private: + std::string name_; + int priority_; + std::atomic running_{false}; + bool should_fail_start_ = false; + bool healthy_ = true; + int start_count_ = 0; + int stop_count_ = 0; +}; + +class DaemonTest : public ::testing::Test { +protected: + void SetUp() override { + cortexd::Logger::init(cortexd::LogLevel::ERROR, false); + + // Create temp directory for test files + temp_dir_ = fs::temp_directory_path() / ("cortexd_daemon_test_" + std::to_string(getpid())); + fs::create_directories(temp_dir_); + + config_path_ = (temp_dir_ / "config.yaml").string(); + socket_path_ = (temp_dir_ / "test.sock").string(); + + // Create a minimal config file + std::ofstream config_file(config_path_); + config_file << R"( +socket: + path: )" << socket_path_ << R"( + backlog: 16 + timeout_ms: 5000 + +rate_limit: + max_requests_per_sec: 100 + +log_level: 1 +)"; + config_file.close(); + } + + void TearDown() override { + // Reset daemon singleton state to ensure clean state between tests + cortexd::Daemon::instance().reset(); + + fs::remove_all(temp_dir_); + cortexd::Logger::shutdown(); + } + + fs::path temp_dir_; + std::string config_path_; + std::string socket_path_; +}; + +// ============================================================================ +// Singleton tests +// ============================================================================ + +TEST_F(DaemonTest, InstanceReturnsSameDaemon) { + auto& daemon1 = cortexd::Daemon::instance(); + auto& daemon2 = cortexd::Daemon::instance(); + + EXPECT_EQ(&daemon1, &daemon2); +} + +// ============================================================================ +// Initialization tests +// ============================================================================ + +TEST_F(DaemonTest, InitializeWithValidConfig) { + auto& daemon = cortexd::Daemon::instance(); + + EXPECT_TRUE(daemon.initialize(config_path_)); +} + +TEST_F(DaemonTest, InitializeWithNonexistentConfigUsesDefaults) { + auto& daemon = cortexd::Daemon::instance(); + + // Should still initialize (with defaults) + EXPECT_TRUE(daemon.initialize("/nonexistent/config.yaml")); +} + +TEST_F(DaemonTest, ConfigIsLoadedAfterInitialize) { + auto& daemon = cortexd::Daemon::instance(); + daemon.initialize(config_path_); + + auto config = daemon.config(); + EXPECT_EQ(config.socket_path, socket_path_); +} + +// ============================================================================ +// Shutdown request tests +// ============================================================================ + +TEST_F(DaemonTest, RequestShutdownSetsFlag) { + auto& daemon = cortexd::Daemon::instance(); + daemon.initialize(config_path_); + + // The test fixture resets the Daemon in TearDown(), so prior-test state is not possible. + // This test verifies that request_shutdown() sets shutdown_requested_ to true and + // is idempotent on a freshly reset singleton. + daemon.request_shutdown(); + + EXPECT_TRUE(daemon.shutdown_requested()); +} + +// ============================================================================ +// Service registration tests +// ============================================================================ + +TEST_F(DaemonTest, RegisterServiceAddsService) { + auto& daemon = cortexd::Daemon::instance(); + daemon.initialize(config_path_); + + auto mock = std::make_unique("TestService", 50); + MockService* mock_ptr = mock.get(); + + daemon.register_service(std::move(mock)); + + // Verify service is registered + auto* retrieved = daemon.get_service(); + EXPECT_EQ(retrieved, mock_ptr); +} + +TEST_F(DaemonTest, GetServiceReturnsNullptrForUnregistered) { + auto& daemon = cortexd::Daemon::instance(); + daemon.initialize(config_path_); + + // No services registered, should return nullptr + auto* service = daemon.get_service(); + EXPECT_EQ(service, nullptr); +} + +// ============================================================================ +// Uptime tests +// ============================================================================ + +TEST_F(DaemonTest, UptimeIsZeroBeforeRun) { + auto& daemon = cortexd::Daemon::instance(); + daemon.initialize(config_path_); + + // Before running, uptime calculation may not be meaningful + // but it shouldn't crash + auto uptime = daemon.uptime(); + EXPECT_GE(uptime.count(), 0); +} + +// ============================================================================ +// Config reload tests +// ============================================================================ + +TEST_F(DaemonTest, ReloadConfigWorks) { + auto& daemon = cortexd::Daemon::instance(); + daemon.initialize(config_path_); + + // Initial config + auto initial_config = daemon.config(); + EXPECT_EQ(initial_config.log_level, 1); + + // Modify config file + std::ofstream config_file(config_path_); + config_file << R"( +socket: + path: )" << socket_path_ << R"( + backlog: 16 + timeout_ms: 5000 + +rate_limit: + max_requests_per_sec: 100 + +log_level: 2 +)"; + config_file.close(); + + // Reload + EXPECT_TRUE(daemon.reload_config()); + + // Verify change + auto reloaded_config = daemon.config(); + EXPECT_EQ(reloaded_config.log_level, 2); +} + +// ============================================================================ +// Run loop tests (limited scope due to blocking nature) +// ============================================================================ + +TEST_F(DaemonTest, RunReturnsOnShutdownRequest) { + auto& daemon = cortexd::Daemon::instance(); + daemon.initialize(config_path_); + + // Request shutdown immediately so run() doesn't block forever + daemon.request_shutdown(); + + // Run should return quickly since shutdown is already requested + // Note: This test verifies basic shutdown flow without blocking + EXPECT_TRUE(daemon.shutdown_requested()); +} + +TEST_F(DaemonTest, RunWithServicesThatFailToStart) { + auto& daemon = cortexd::Daemon::instance(); + daemon.initialize(config_path_); + + auto failing_service = std::make_unique("FailingService"); + failing_service->set_should_fail_start(true); + + daemon.register_service(std::move(failing_service)); + daemon.request_shutdown(); // Prevent blocking + + // The run will fail due to service start failure + // We can't easily test this without modifying daemon internals +} + +// ============================================================================ +// Multiple service tests +// ============================================================================ + +TEST_F(DaemonTest, RegisterMultipleServices) { + auto& daemon = cortexd::Daemon::instance(); + daemon.initialize(config_path_); + + daemon.register_service(std::make_unique("Service1", 10)); + daemon.register_service(std::make_unique("Service2", 20)); + daemon.register_service(std::make_unique("Service3", 30)); + + // All services should be retrievable + auto* svc = daemon.get_service(); + EXPECT_NE(svc, nullptr); +} + +// ============================================================================ +// Running state tests +// ============================================================================ + +TEST_F(DaemonTest, IsRunningInitiallyFalse) { + auto& daemon = cortexd::Daemon::instance(); + daemon.initialize(config_path_); + + // Before run() is called + EXPECT_FALSE(daemon.is_running()); +} + +// ============================================================================ +// Config access tests +// ============================================================================ + +TEST_F(DaemonTest, ConfigReturnsValidConfig) { + auto& daemon = cortexd::Daemon::instance(); + daemon.initialize(config_path_); + + auto config = daemon.config(); + + // Verify config has expected values + EXPECT_FALSE(config.socket_path.empty()); + EXPECT_GT(config.socket_backlog, 0); + EXPECT_GT(config.socket_timeout_ms, 0); + EXPECT_GT(config.max_requests_per_sec, 0); +} + +// ============================================================================ +// Thread safety tests +// ============================================================================ + +TEST_F(DaemonTest, ConfigAccessIsThreadSafe) { + auto& daemon = cortexd::Daemon::instance(); + daemon.initialize(config_path_); + + std::atomic read_count{0}; + std::vector threads; + + // Multiple threads reading config concurrently + for (int t = 0; t < 10; ++t) { + threads.emplace_back([&]() { + for (int i = 0; i < 100; ++i) { + auto config = daemon.config(); + // Access some fields to ensure no crashes + (void)config.socket_path; + (void)config.log_level; + read_count++; + } + }); + } + + for (auto& thread : threads) { + thread.join(); + } + + EXPECT_EQ(read_count.load(), 1000); +} + +TEST_F(DaemonTest, ShutdownRequestIsThreadSafe) { + auto& daemon = cortexd::Daemon::instance(); + daemon.initialize(config_path_); + + std::vector threads; + + // Multiple threads requesting shutdown + for (int t = 0; t < 10; ++t) { + threads.emplace_back([&]() { + daemon.request_shutdown(); + }); + } + + for (auto& thread : threads) { + thread.join(); + } + + EXPECT_TRUE(daemon.shutdown_requested()); +} + +// ============================================================================ +// systemd notification tests (mock verification) +// ============================================================================ + +TEST_F(DaemonTest, NotifyReadyDoesNotCrash) { + auto& daemon = cortexd::Daemon::instance(); + daemon.initialize(config_path_); + + // These should not crash (systemd may not be available in test env) + daemon.notify_ready(); + SUCCEED(); +} + +TEST_F(DaemonTest, NotifyStoppingDoesNotCrash) { + auto& daemon = cortexd::Daemon::instance(); + daemon.initialize(config_path_); + + daemon.notify_stopping(); + SUCCEED(); +} + +TEST_F(DaemonTest, NotifyWatchdogDoesNotCrash) { + auto& daemon = cortexd::Daemon::instance(); + daemon.initialize(config_path_); + + daemon.notify_watchdog(); + SUCCEED(); +} + +// ============================================================================ +// Edge case tests +// ============================================================================ + +TEST_F(DaemonTest, DoubleInitialize) { + auto& daemon = cortexd::Daemon::instance(); + + EXPECT_TRUE(daemon.initialize(config_path_)); + EXPECT_TRUE(daemon.initialize(config_path_)); // Should not crash +} + +TEST_F(DaemonTest, ReloadBeforeInit) { + auto& daemon = cortexd::Daemon::instance(); + + // reload without init - should handle gracefully + // (depends on implementation, may return false) + daemon.reload_config(); // Should not crash + SUCCEED(); +} + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/daemon/tests/integration/test_handlers.cpp b/daemon/tests/integration/test_handlers.cpp new file mode 100644 index 00000000..4675eaeb --- /dev/null +++ b/daemon/tests/integration/test_handlers.cpp @@ -0,0 +1,389 @@ +/** + * @file test_handlers.cpp + * @brief Integration tests for IPC handlers + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "cortexd/ipc/server.h" +#include "cortexd/ipc/handlers.h" +#include "cortexd/ipc/protocol.h" +#include "cortexd/config.h" +#include "cortexd/core/daemon.h" +#include "cortexd/logger.h" + +namespace fs = std::filesystem; + +class HandlersTest : public ::testing::Test { +protected: + void SetUp() override { + cortexd::Logger::init(cortexd::LogLevel::ERROR, false); + + // Create temp directory for test files + temp_dir_ = fs::temp_directory_path() / ("cortexd_handlers_test_" + std::to_string(getpid())); + fs::create_directories(temp_dir_); + + socket_path_ = (temp_dir_ / "test.sock").string(); + config_path_ = (temp_dir_ / "config.yaml").string(); + + // Create a test config file + std::ofstream config_file(config_path_); + config_file << R"( +socket: + path: )" << socket_path_ << R"( + backlog: 16 + timeout_ms: 5000 + +rate_limit: + max_requests_per_sec: 100 + +log_level: 1 +)"; + config_file.close(); + + // Load config + cortexd::ConfigManager::instance().load(config_path_); + } + + void TearDown() override { + if (server_) { + server_->stop(); + server_.reset(); + } + + fs::remove_all(temp_dir_); + cortexd::Logger::shutdown(); + } + + void start_server_with_handlers() { + auto config = cortexd::ConfigManager::instance().get(); + server_ = std::make_unique(socket_path_, config.max_requests_per_sec); + cortexd::Handlers::register_all(*server_); + ASSERT_TRUE(server_->start()); + std::this_thread::sleep_for(std::chrono::milliseconds(50)); + } + + std::string send_request(const std::string& request) { + int sock = socket(AF_UNIX, SOCK_STREAM, 0); + if (sock == -1) return ""; + + struct sockaddr_un addr; + memset(&addr, 0, sizeof(addr)); + addr.sun_family = AF_UNIX; + strncpy(addr.sun_path, socket_path_.c_str(), sizeof(addr.sun_path) - 1); + + if (connect(sock, (struct sockaddr*)&addr, sizeof(addr)) == -1) { + close(sock); + return ""; + } + + // Check send() return value to ensure data was sent successfully + ssize_t sent = send(sock, request.c_str(), request.length(), 0); + if (sent <= 0 || static_cast(sent) < request.length()) { + close(sock); + return ""; // Send failed or partial send + } + + char buffer[65536]; + ssize_t bytes = recv(sock, buffer, sizeof(buffer) - 1, 0); + close(sock); + + if (bytes <= 0) return ""; + + buffer[bytes] = '\0'; + return std::string(buffer); + } + + cortexd::json send_json_request(const std::string& method, + const cortexd::json& params = cortexd::json::object()) { + cortexd::json request = { + {"method", method}, + {"params", params} + }; + + std::string response = send_request(request.dump()); + if (response.empty()) { + return cortexd::json{{"error", "empty response"}}; + } + + return cortexd::json::parse(response); + } + + fs::path temp_dir_; + std::string socket_path_; + std::string config_path_; + std::unique_ptr server_; +}; + +// ============================================================================ +// Ping handler tests +// ============================================================================ + +TEST_F(HandlersTest, PingReturnsSuccess) { + start_server_with_handlers(); + + auto response = send_json_request("ping"); + + EXPECT_TRUE(response["success"]); + EXPECT_TRUE(response["result"]["pong"]); +} + +TEST_F(HandlersTest, PingIgnoresParams) { + start_server_with_handlers(); + + auto response = send_json_request("ping", {{"ignored", "param"}}); + + EXPECT_TRUE(response["success"]); + EXPECT_TRUE(response["result"]["pong"]); +} + +// ============================================================================ +// Version handler tests +// ============================================================================ + +TEST_F(HandlersTest, VersionReturnsVersionAndName) { + start_server_with_handlers(); + + auto response = send_json_request("version"); + + EXPECT_TRUE(response["success"]); + EXPECT_TRUE(response["result"].contains("version")); + EXPECT_TRUE(response["result"].contains("name")); + EXPECT_EQ(response["result"]["name"], "cortexd"); +} + +TEST_F(HandlersTest, VersionReturnsNonEmptyVersion) { + start_server_with_handlers(); + + auto response = send_json_request("version"); + + std::string version = response["result"]["version"]; + EXPECT_FALSE(version.empty()); +} + +// ============================================================================ +// Config.get handler tests +// ============================================================================ + +TEST_F(HandlersTest, ConfigGetReturnsConfig) { + start_server_with_handlers(); + + auto response = send_json_request("config.get"); + + EXPECT_TRUE(response["success"]); + EXPECT_TRUE(response["result"].contains("socket_path")); + EXPECT_TRUE(response["result"].contains("socket_backlog")); + EXPECT_TRUE(response["result"].contains("socket_timeout_ms")); + EXPECT_TRUE(response["result"].contains("max_requests_per_sec")); + EXPECT_TRUE(response["result"].contains("log_level")); +} + +TEST_F(HandlersTest, ConfigGetReturnsCorrectValues) { + start_server_with_handlers(); + + auto response = send_json_request("config.get"); + + EXPECT_TRUE(response["success"]); + EXPECT_EQ(response["result"]["socket_path"], socket_path_); + EXPECT_EQ(response["result"]["socket_backlog"], 16); + EXPECT_EQ(response["result"]["socket_timeout_ms"], 5000); + EXPECT_EQ(response["result"]["max_requests_per_sec"], 100); + EXPECT_EQ(response["result"]["log_level"], 1); +} + +// ============================================================================ +// Config.reload handler tests +// ============================================================================ + +TEST_F(HandlersTest, ConfigReloadSucceeds) { + start_server_with_handlers(); + + auto response = send_json_request("config.reload"); + + EXPECT_TRUE(response["success"]); + EXPECT_TRUE(response["result"]["reloaded"]); +} + +TEST_F(HandlersTest, ConfigReloadPicksUpChanges) { + start_server_with_handlers(); + + // Verify initial value + auto initial = send_json_request("config.get"); + EXPECT_EQ(initial["result"]["log_level"], 1); + + // Modify config file + std::ofstream config_file(config_path_); + config_file << R"( +socket: + path: )" << socket_path_ << R"( + backlog: 16 + timeout_ms: 5000 + +rate_limit: + max_requests_per_sec: 100 + +log_level: 2 +)"; + config_file.close(); + + // Reload config + auto reload_response = send_json_request("config.reload"); + EXPECT_TRUE(reload_response["success"]); + + // Verify new value + auto updated = send_json_request("config.get"); + EXPECT_EQ(updated["result"]["log_level"], 2); +} + +// ============================================================================ +// Shutdown handler tests +// ============================================================================ + +TEST_F(HandlersTest, ShutdownReturnsInitiated) { + start_server_with_handlers(); + + auto response = send_json_request("shutdown"); + + EXPECT_TRUE(response["success"]); + EXPECT_EQ(response["result"]["shutdown"], "initiated"); +} + +// Note: We can't easily test that shutdown actually stops the daemon +// in this test environment since we're not running the full daemon + +// ============================================================================ +// Unknown method tests +// ============================================================================ + +TEST_F(HandlersTest, UnknownMethodReturnsError) { + start_server_with_handlers(); + + auto response = send_json_request("unknown.method"); + + EXPECT_FALSE(response["success"]); + EXPECT_EQ(response["error"]["code"], cortexd::ErrorCodes::METHOD_NOT_FOUND); +} + +TEST_F(HandlersTest, StatusMethodNotAvailableInPR1) { + start_server_with_handlers(); + + // Status handler is not registered in PR 1 + auto response = send_json_request("status"); + + EXPECT_FALSE(response["success"]); + EXPECT_EQ(response["error"]["code"], cortexd::ErrorCodes::METHOD_NOT_FOUND); +} + +TEST_F(HandlersTest, HealthMethodNotAvailableInPR1) { + start_server_with_handlers(); + + // Health handler is not registered in PR 1 + auto response = send_json_request("health"); + + EXPECT_FALSE(response["success"]); + EXPECT_EQ(response["error"]["code"], cortexd::ErrorCodes::METHOD_NOT_FOUND); +} + +TEST_F(HandlersTest, AlertsMethodNotAvailableInPR1) { + start_server_with_handlers(); + + // Alerts handler is not registered in PR 1 + auto response = send_json_request("alerts"); + + EXPECT_FALSE(response["success"]); + EXPECT_EQ(response["error"]["code"], cortexd::ErrorCodes::METHOD_NOT_FOUND); +} + +// ============================================================================ +// Response format tests +// ============================================================================ + +TEST_F(HandlersTest, AllResponsesHaveTimestamp) { + start_server_with_handlers(); + + std::vector methods = {"ping", "version", "config.get"}; + + for (const auto& method : methods) { + auto response = send_json_request(method); + EXPECT_TRUE(response.contains("timestamp")) + << "Method " << method << " should include timestamp"; + } +} + +TEST_F(HandlersTest, SuccessResponsesHaveResult) { + start_server_with_handlers(); + + std::vector methods = {"ping", "version", "config.get"}; + + for (const auto& method : methods) { + auto response = send_json_request(method); + EXPECT_TRUE(response["success"]) << "Method " << method << " should succeed"; + EXPECT_TRUE(response.contains("result")) + << "Method " << method << " should include result"; + } +} + +// ============================================================================ +// Multiple requests tests +// ============================================================================ + +TEST_F(HandlersTest, HandlesMultipleSequentialRequests) { + start_server_with_handlers(); + + for (int i = 0; i < 10; ++i) { + auto response = send_json_request("ping"); + EXPECT_TRUE(response["success"]) << "Request " << i << " should succeed"; + } +} + +TEST_F(HandlersTest, HandlesMixedRequests) { + start_server_with_handlers(); + + EXPECT_TRUE(send_json_request("ping")["success"]); + EXPECT_TRUE(send_json_request("version")["success"]); + EXPECT_TRUE(send_json_request("config.get")["success"]); + EXPECT_TRUE(send_json_request("ping")["success"]); + EXPECT_FALSE(send_json_request("unknown")["success"]); + EXPECT_TRUE(send_json_request("version")["success"]); +} + +// ============================================================================ +// Concurrent handler tests +// ============================================================================ + +TEST_F(HandlersTest, HandlesConcurrentRequests) { + start_server_with_handlers(); + + std::atomic success_count{0}; + std::vector threads; + + for (int t = 0; t < 5; ++t) { + threads.emplace_back([&, t]() { + std::vector methods = {"ping", "version", "config.get"}; + for (int i = 0; i < 10; ++i) { + auto response = send_json_request(methods[i % methods.size()]); + if (response["success"]) { + success_count++; + } + } + }); + } + + for (auto& thread : threads) { + thread.join(); + } + + // Most requests should succeed + EXPECT_GT(success_count.load(), 40); +} + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/daemon/tests/integration/test_ipc_server.cpp b/daemon/tests/integration/test_ipc_server.cpp new file mode 100644 index 00000000..bf55e111 --- /dev/null +++ b/daemon/tests/integration/test_ipc_server.cpp @@ -0,0 +1,362 @@ +/** + * @file test_ipc_server.cpp + * @brief Integration tests for IPCServer + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "cortexd/ipc/server.h" +#include "cortexd/ipc/protocol.h" +#include "cortexd/logger.h" + +namespace fs = std::filesystem; + +class IPCServerTest : public ::testing::Test { +protected: + void SetUp() override { + cortexd::Logger::init(cortexd::LogLevel::ERROR, false); + + // Create a unique socket path for each test + socket_path_ = "/tmp/cortexd_test_" + std::to_string(getpid()) + ".sock"; + + // Clean up any existing socket + if (fs::exists(socket_path_)) { + fs::remove(socket_path_); + } + } + + void TearDown() override { + // Stop server if running + if (server_) { + server_->stop(); + server_.reset(); + } + + // Clean up socket file + if (fs::exists(socket_path_)) { + fs::remove(socket_path_); + } + + cortexd::Logger::shutdown(); + } + + // Create and start the server + void start_server(int max_requests_per_sec = 100) { + server_ = std::make_unique(socket_path_, max_requests_per_sec); + ASSERT_TRUE(server_->start()); + + // Give server time to start + std::this_thread::sleep_for(std::chrono::milliseconds(50)); + } + + // Connect to the server and send a request + std::string send_request(const std::string& request) { + int sock = socket(AF_UNIX, SOCK_STREAM, 0); + EXPECT_NE(sock, -1); + + struct sockaddr_un addr; + memset(&addr, 0, sizeof(addr)); + addr.sun_family = AF_UNIX; + strncpy(addr.sun_path, socket_path_.c_str(), sizeof(addr.sun_path) - 1); + + int result = connect(sock, (struct sockaddr*)&addr, sizeof(addr)); + if (result == -1) { + close(sock); + return ""; + } + + // Send request + send(sock, request.c_str(), request.length(), 0); + + // Receive response + char buffer[65536]; + ssize_t bytes = recv(sock, buffer, sizeof(buffer) - 1, 0); + close(sock); + + if (bytes <= 0) { + return ""; + } + + buffer[bytes] = '\0'; + return std::string(buffer); + } + + std::string socket_path_; + std::unique_ptr server_; +}; + +// ============================================================================ +// Server lifecycle tests +// ============================================================================ + +TEST_F(IPCServerTest, StartsSuccessfully) { + server_ = std::make_unique(socket_path_); + + EXPECT_TRUE(server_->start()); + EXPECT_TRUE(server_->is_running()); + EXPECT_TRUE(server_->is_healthy()); + + // Socket file should exist + EXPECT_TRUE(fs::exists(socket_path_)); +} + +TEST_F(IPCServerTest, StopsCleanly) { + start_server(); + + EXPECT_TRUE(server_->is_running()); + + server_->stop(); + + EXPECT_FALSE(server_->is_running()); + // Socket file should be cleaned up + EXPECT_FALSE(fs::exists(socket_path_)); +} + +TEST_F(IPCServerTest, CanRestartAfterStop) { + start_server(); + server_->stop(); + + // Start again + EXPECT_TRUE(server_->start()); + EXPECT_TRUE(server_->is_running()); +} + +TEST_F(IPCServerTest, StartTwiceReturnsTrue) { + start_server(); + + // Starting again should return true (already running) + EXPECT_TRUE(server_->start()); +} + +TEST_F(IPCServerTest, StopTwiceIsSafe) { + start_server(); + + server_->stop(); + server_->stop(); // Should not crash + + EXPECT_FALSE(server_->is_running()); +} + +// ============================================================================ +// Handler registration tests +// ============================================================================ + +TEST_F(IPCServerTest, RegisterHandlerWorks) { + start_server(); + + // Register a simple handler + server_->register_handler("test.echo", [](const cortexd::Request& req) { + return cortexd::Response::ok(req.params); + }); + + // Send a request + std::string request = R"({"method": "test.echo", "params": {"message": "hello"}})"; + std::string response = send_request(request); + + ASSERT_FALSE(response.empty()); + + auto json = cortexd::json::parse(response); + EXPECT_TRUE(json["success"]); + EXPECT_EQ(json["result"]["message"], "hello"); +} + +TEST_F(IPCServerTest, UnknownMethodReturnsError) { + start_server(); + + std::string request = R"({"method": "unknown.method"})"; + std::string response = send_request(request); + + ASSERT_FALSE(response.empty()); + + auto json = cortexd::json::parse(response); + EXPECT_FALSE(json["success"]); + EXPECT_EQ(json["error"]["code"], cortexd::ErrorCodes::METHOD_NOT_FOUND); +} + +TEST_F(IPCServerTest, InvalidJsonReturnsParseError) { + start_server(); + + std::string request = "not valid json"; + std::string response = send_request(request); + + ASSERT_FALSE(response.empty()); + + auto json = cortexd::json::parse(response); + EXPECT_FALSE(json["success"]); + EXPECT_EQ(json["error"]["code"], cortexd::ErrorCodes::PARSE_ERROR); +} + +TEST_F(IPCServerTest, MissingMethodReturnsParseError) { + start_server(); + + std::string request = R"({"params": {"key": "value"}})"; + std::string response = send_request(request); + + ASSERT_FALSE(response.empty()); + + auto json = cortexd::json::parse(response); + EXPECT_FALSE(json["success"]); +} + +// ============================================================================ +// Rate limiting tests +// ============================================================================ + +TEST_F(IPCServerTest, RateLimitingWorks) { + // Create server with low rate limit + server_ = std::make_unique(socket_path_, 3); + server_->register_handler("ping", [](const cortexd::Request&) { + return cortexd::Response::ok({{"pong", true}}); + }); + ASSERT_TRUE(server_->start()); + std::this_thread::sleep_for(std::chrono::milliseconds(50)); + + // First 3 requests should succeed + for (int i = 0; i < 3; ++i) { + std::string response = send_request(R"({"method": "ping"})"); + auto json = cortexd::json::parse(response); + EXPECT_TRUE(json["success"]) << "Request " << i << " should succeed"; + } + + // 4th request should be rate limited + std::string response = send_request(R"({"method": "ping"})"); + auto json = cortexd::json::parse(response); + EXPECT_FALSE(json["success"]); + EXPECT_EQ(json["error"]["code"], cortexd::ErrorCodes::RATE_LIMITED); +} + +// ============================================================================ +// Connection counting tests +// ============================================================================ + +TEST_F(IPCServerTest, TracksConnectionsServed) { + start_server(); + server_->register_handler("ping", [](const cortexd::Request&) { + return cortexd::Response::ok({{"pong", true}}); + }); + + EXPECT_EQ(server_->connections_served(), 0); + + // Make some requests + for (int i = 0; i < 5; ++i) { + send_request(R"({"method": "ping"})"); + } + + // Give time for connections to be processed + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + + EXPECT_EQ(server_->connections_served(), 5); +} + +// ============================================================================ +// Concurrent connection tests +// ============================================================================ + +TEST_F(IPCServerTest, HandlesConcurrentConnections) { + start_server(); + + std::atomic success_count{0}; + server_->register_handler("ping", [&](const cortexd::Request&) { + return cortexd::Response::ok({{"pong", true}}); + }); + + // Launch multiple threads making requests + std::vector threads; + for (int t = 0; t < 5; ++t) { + threads.emplace_back([&]() { + for (int i = 0; i < 10; ++i) { + std::string response = send_request(R"({"method": "ping"})"); + if (!response.empty()) { + auto json = cortexd::json::parse(response); + if (json["success"]) { + success_count++; + } + } + } + }); + } + + for (auto& thread : threads) { + thread.join(); + } + + // Most requests should succeed (some might fail due to timing) + EXPECT_GT(success_count.load(), 30); +} + +// ============================================================================ +// Handler exception tests +// ============================================================================ + +TEST_F(IPCServerTest, HandlerExceptionReturnsInternalError) { + start_server(); + + server_->register_handler("throw", [](const cortexd::Request&) -> cortexd::Response { + throw std::runtime_error("Test exception"); + }); + + std::string response = send_request(R"({"method": "throw"})"); + + ASSERT_FALSE(response.empty()); + + auto json = cortexd::json::parse(response); + EXPECT_FALSE(json["success"]); + EXPECT_EQ(json["error"]["code"], cortexd::ErrorCodes::INTERNAL_ERROR); +} + +// ============================================================================ +// Socket path tests +// ============================================================================ + +TEST_F(IPCServerTest, CreatesParentDirectoryIfNeeded) { + std::string nested_path = "/tmp/cortexd_test_nested_" + std::to_string(getpid()) + "/test.sock"; + + // Ensure parent doesn't exist + fs::remove_all(fs::path(nested_path).parent_path()); + + auto server = std::make_unique(nested_path); + EXPECT_TRUE(server->start()); + EXPECT_TRUE(fs::exists(nested_path)); + + server->stop(); + fs::remove_all(fs::path(nested_path).parent_path()); +} + +TEST_F(IPCServerTest, RemovesExistingSocketOnStart) { + // Create a file at the socket path + std::ofstream(socket_path_) << "dummy"; + EXPECT_TRUE(fs::exists(socket_path_)); + + // Server should remove it and create a socket + start_server(); + + EXPECT_TRUE(server_->is_running()); +} + +// ============================================================================ +// Response format tests +// ============================================================================ + +TEST_F(IPCServerTest, ResponseIncludesTimestamp) { + start_server(); + server_->register_handler("ping", [](const cortexd::Request&) { + return cortexd::Response::ok({{"pong", true}}); + }); + + std::string response = send_request(R"({"method": "ping"})"); + + auto json = cortexd::json::parse(response); + EXPECT_TRUE(json.contains("timestamp")); + EXPECT_TRUE(json["timestamp"].is_number()); +} + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/daemon/tests/unit/test_common.cpp b/daemon/tests/unit/test_common.cpp new file mode 100644 index 00000000..7752a157 --- /dev/null +++ b/daemon/tests/unit/test_common.cpp @@ -0,0 +1,120 @@ +/** + * @file test_common.cpp + * @brief Unit tests for common.h constants and types (PR1 scope only) + * + * PR1 includes: Core daemon, IPC server, config management + * PR2 adds: Monitoring, alerts (AlertSeverity, AlertType, HealthSnapshot) + * PR3 adds: LLM integration + */ + +#include +#include +#include "cortexd/common.h" + +class CommonTest : public ::testing::Test { +protected: + void SetUp() override {} + void TearDown() override {} +}; + +// ============================================================================ +// Version and Name constants (PR1) +// ============================================================================ + +TEST_F(CommonTest, VersionIsDefined) { + EXPECT_NE(cortexd::VERSION, nullptr); + EXPECT_STRNE(cortexd::VERSION, ""); +} + +TEST_F(CommonTest, NameIsDefined) { + EXPECT_NE(cortexd::NAME, nullptr); + EXPECT_STREQ(cortexd::NAME, "cortexd"); +} + +// ============================================================================ +// Socket constants (PR1 - used by IPC server) +// ============================================================================ + +TEST_F(CommonTest, DefaultSocketPathIsDefined) { + EXPECT_NE(cortexd::DEFAULT_SOCKET_PATH, nullptr); + EXPECT_STREQ(cortexd::DEFAULT_SOCKET_PATH, "/run/cortex/cortex.sock"); +} + +TEST_F(CommonTest, SocketBacklogIsPositive) { + EXPECT_GT(cortexd::SOCKET_BACKLOG, 0); +} + +TEST_F(CommonTest, SocketTimeoutIsPositive) { + EXPECT_GT(cortexd::SOCKET_TIMEOUT_MS, 0); +} + +TEST_F(CommonTest, MaxMessageSizeIsPositive) { + EXPECT_GT(cortexd::MAX_MESSAGE_SIZE, 0); + // Should be at least 1KB for reasonable messages + EXPECT_GE(cortexd::MAX_MESSAGE_SIZE, 1024); +} + +// ============================================================================ +// CommandType enum tests (PR1 - shutdown and config_reload are available) +// ============================================================================ + +TEST_F(CommonTest, CommandTypeEnumValuesAreDistinct) { + std::set values; + values.insert(static_cast(cortexd::CommandType::STATUS)); + values.insert(static_cast(cortexd::CommandType::ALERTS)); + values.insert(static_cast(cortexd::CommandType::SHUTDOWN)); + values.insert(static_cast(cortexd::CommandType::CONFIG_RELOAD)); + values.insert(static_cast(cortexd::CommandType::HEALTH)); + values.insert(static_cast(cortexd::CommandType::UNKNOWN)); + + EXPECT_EQ(values.size(), 6); +} + +TEST_F(CommonTest, CommandTypeUnknownExists) { + // UNKNOWN should be a valid enum value for unrecognized commands + cortexd::CommandType cmd = cortexd::CommandType::UNKNOWN; + EXPECT_EQ(cmd, cortexd::CommandType::UNKNOWN); +} + +TEST_F(CommonTest, CommandTypeShutdownExists) { + // SHUTDOWN is available in PR1 + cortexd::CommandType cmd = cortexd::CommandType::SHUTDOWN; + EXPECT_EQ(cmd, cortexd::CommandType::SHUTDOWN); +} + +TEST_F(CommonTest, CommandTypeConfigReloadExists) { + // CONFIG_RELOAD is available in PR1 + cortexd::CommandType cmd = cortexd::CommandType::CONFIG_RELOAD; + EXPECT_EQ(cmd, cortexd::CommandType::CONFIG_RELOAD); +} + +// ============================================================================ +// Memory constraints (PR1 - daemon memory footprint targets) +// ============================================================================ + +TEST_F(CommonTest, IdleMemoryConstraintIsDefined) { + EXPECT_GT(cortexd::IDLE_MEMORY_MB, 0); +} + +TEST_F(CommonTest, ActiveMemoryConstraintIsDefined) { + EXPECT_GT(cortexd::ACTIVE_MEMORY_MB, 0); +} + +TEST_F(CommonTest, ActiveMemoryGreaterThanIdle) { + EXPECT_GT(cortexd::ACTIVE_MEMORY_MB, cortexd::IDLE_MEMORY_MB); +} + +// ============================================================================ +// Startup time target (PR1 - daemon startup performance) +// ============================================================================ + +TEST_F(CommonTest, StartupTimeTargetIsDefined) { + EXPECT_GT(cortexd::STARTUP_TIME_MS, 0); + // Should be reasonable (less than 10 seconds) + EXPECT_LT(cortexd::STARTUP_TIME_MS, 10000); +} + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/daemon/tests/unit/test_config.cpp b/daemon/tests/unit/test_config.cpp new file mode 100644 index 00000000..311e0dbc --- /dev/null +++ b/daemon/tests/unit/test_config.cpp @@ -0,0 +1,346 @@ +/** + * @file test_config.cpp + * @brief Unit tests for Config and ConfigManager + */ + +#include +#include +#include +#include +#include +#include "cortexd/config.h" +#include "cortexd/logger.h" + +namespace fs = std::filesystem; + +class ConfigTest : public ::testing::Test { +protected: + void SetUp() override { + // Initialize logger in non-journald mode for tests + cortexd::Logger::init(cortexd::LogLevel::ERROR, false); + + // Create a temp directory for test files with unique suffix to avoid collisions + temp_dir_ = fs::temp_directory_path() / ("cortexd_test_" + std::to_string(getpid()) + "_" + std::to_string(std::chrono::steady_clock::now().time_since_epoch().count())); + fs::create_directories(temp_dir_); + } + + void TearDown() override { + // Clean up temp directory + fs::remove_all(temp_dir_); + cortexd::Logger::shutdown(); + } + + fs::path temp_dir_; + + // Helper to write a config file + void write_config(const std::string& filename, const std::string& content) { + std::ofstream file(temp_dir_ / filename); + file << content; + file.close(); + } +}; + +// ============================================================================ +// Config::defaults() tests +// ============================================================================ + +TEST_F(ConfigTest, DefaultsReturnsValidConfig) { + auto config = cortexd::Config::defaults(); + + EXPECT_EQ(config.socket_path, "/run/cortex/cortex.sock"); + EXPECT_EQ(config.socket_backlog, 16); + EXPECT_EQ(config.socket_timeout_ms, 5000); + EXPECT_EQ(config.max_requests_per_sec, 100); + EXPECT_EQ(config.log_level, 1); +} + +TEST_F(ConfigTest, DefaultsPassesValidation) { + auto config = cortexd::Config::defaults(); + std::string error = config.validate(); + + EXPECT_TRUE(error.empty()) << "Validation error: " << error; +} + +// ============================================================================ +// Config::validate() tests +// ============================================================================ + +TEST_F(ConfigTest, ValidateRejectsZeroSocketBacklog) { + auto config = cortexd::Config::defaults(); + config.socket_backlog = 0; + + std::string error = config.validate(); + EXPECT_FALSE(error.empty()); + EXPECT_TRUE(error.find("socket_backlog") != std::string::npos); +} + +TEST_F(ConfigTest, ValidateRejectsNegativeSocketBacklog) { + auto config = cortexd::Config::defaults(); + config.socket_backlog = -5; + + std::string error = config.validate(); + EXPECT_FALSE(error.empty()); +} + +TEST_F(ConfigTest, ValidateRejectsZeroSocketTimeout) { + auto config = cortexd::Config::defaults(); + config.socket_timeout_ms = 0; + + std::string error = config.validate(); + EXPECT_FALSE(error.empty()); + EXPECT_TRUE(error.find("socket_timeout_ms") != std::string::npos); +} + +TEST_F(ConfigTest, ValidateRejectsZeroMaxRequests) { + auto config = cortexd::Config::defaults(); + config.max_requests_per_sec = 0; + + std::string error = config.validate(); + EXPECT_FALSE(error.empty()); + EXPECT_TRUE(error.find("max_requests_per_sec") != std::string::npos); +} + +TEST_F(ConfigTest, ValidateRejectsInvalidLogLevel) { + auto config = cortexd::Config::defaults(); + config.log_level = 5; // Valid range is 0-4 (DEBUG=0, INFO=1, WARN=2, ERROR=3, CRITICAL=4) + + std::string error = config.validate(); + EXPECT_FALSE(error.empty()); + EXPECT_TRUE(error.find("log_level") != std::string::npos); +} + +TEST_F(ConfigTest, ValidateRejectsNegativeLogLevel) { + auto config = cortexd::Config::defaults(); + config.log_level = -1; + + std::string error = config.validate(); + EXPECT_FALSE(error.empty()); +} + +TEST_F(ConfigTest, ValidateAcceptsAllValidLogLevels) { + auto config = cortexd::Config::defaults(); + + // Valid range is 0-4 (DEBUG=0, INFO=1, WARN=2, ERROR=3, CRITICAL=4) + for (int level = 0; level <= 4; ++level) { + config.log_level = level; + std::string error = config.validate(); + EXPECT_TRUE(error.empty()) << "Log level " << level << " should be valid"; + } +} + +// ============================================================================ +// Config::load() tests +// ============================================================================ + +TEST_F(ConfigTest, LoadReturnsNulloptForNonexistentFile) { + auto result = cortexd::Config::load("/nonexistent/path/config.yaml"); + EXPECT_FALSE(result.has_value()); +} + +TEST_F(ConfigTest, LoadParsesValidYaml) { + write_config("valid.yaml", R"( +socket: + path: /tmp/test.sock + backlog: 32 + timeout_ms: 10000 + +rate_limit: + max_requests_per_sec: 200 + +log_level: 2 +)"); + + auto result = cortexd::Config::load((temp_dir_ / "valid.yaml").string()); + + ASSERT_TRUE(result.has_value()); + EXPECT_EQ(result->socket_path, "/tmp/test.sock"); + EXPECT_EQ(result->socket_backlog, 32); + EXPECT_EQ(result->socket_timeout_ms, 10000); + EXPECT_EQ(result->max_requests_per_sec, 200); + EXPECT_EQ(result->log_level, 2); +} + +TEST_F(ConfigTest, LoadUsesDefaultsForMissingFields) { + write_config("partial.yaml", R"( +socket: + path: /tmp/partial.sock +)"); + + auto result = cortexd::Config::load((temp_dir_ / "partial.yaml").string()); + + ASSERT_TRUE(result.has_value()); + EXPECT_EQ(result->socket_path, "/tmp/partial.sock"); + // Other fields should have defaults + EXPECT_EQ(result->socket_backlog, 16); + EXPECT_EQ(result->socket_timeout_ms, 5000); + EXPECT_EQ(result->max_requests_per_sec, 100); + EXPECT_EQ(result->log_level, 1); +} + +TEST_F(ConfigTest, LoadReturnsNulloptForInvalidYaml) { + write_config("invalid.yaml", R"( +socket: + path: [this is not valid yaml + backlog: "not a number" +)"); + + auto result = cortexd::Config::load((temp_dir_ / "invalid.yaml").string()); + EXPECT_FALSE(result.has_value()); +} + +TEST_F(ConfigTest, LoadReturnsNulloptForInvalidConfig) { + write_config("invalid_values.yaml", R"( +socket: + path: /tmp/test.sock + backlog: -1 + +log_level: 1 +)"); + + auto result = cortexd::Config::load((temp_dir_ / "invalid_values.yaml").string()); + EXPECT_FALSE(result.has_value()); +} + +// ============================================================================ +// Config::save() tests +// ============================================================================ + +TEST_F(ConfigTest, SaveCreatesValidYamlFile) { + auto config = cortexd::Config::defaults(); + config.socket_path = "/tmp/saved.sock"; + config.max_requests_per_sec = 50; + + std::string save_path = (temp_dir_ / "saved.yaml").string(); + ASSERT_TRUE(config.save(save_path)); + + // Verify file exists + EXPECT_TRUE(fs::exists(save_path)); + + // Reload and verify + auto reloaded = cortexd::Config::load(save_path); + ASSERT_TRUE(reloaded.has_value()); + EXPECT_EQ(reloaded->socket_path, "/tmp/saved.sock"); + EXPECT_EQ(reloaded->max_requests_per_sec, 50); +} + +// ============================================================================ +// Config::expand_paths() tests +// ============================================================================ + +TEST_F(ConfigTest, ExpandPathsExpandsTilde) { + auto config = cortexd::Config::defaults(); + config.socket_path = "~/test.sock"; + + config.expand_paths(); + + // Should start with home directory, not ~ + EXPECT_NE(config.socket_path[0], '~'); + EXPECT_TRUE(config.socket_path.find("/test.sock") != std::string::npos); +} + +TEST_F(ConfigTest, ExpandPathsLeavesAbsolutePathsUnchanged) { + auto config = cortexd::Config::defaults(); + config.socket_path = "/absolute/path.sock"; + + config.expand_paths(); + + EXPECT_EQ(config.socket_path, "/absolute/path.sock"); +} + +// ============================================================================ +// expand_path() function tests +// ============================================================================ + +TEST_F(ConfigTest, ExpandPathFunctionExpandsTilde) { + std::string path = "~/.cortex/test"; + std::string expanded = cortexd::expand_path(path); + + EXPECT_NE(expanded[0], '~'); + EXPECT_TRUE(expanded.find("/.cortex/test") != std::string::npos); +} + +TEST_F(ConfigTest, ExpandPathFunctionHandlesEmptyString) { + std::string path = ""; + std::string expanded = cortexd::expand_path(path); + + EXPECT_TRUE(expanded.empty()); +} + +TEST_F(ConfigTest, ExpandPathFunctionHandlesAbsolutePath) { + std::string path = "/absolute/path"; + std::string expanded = cortexd::expand_path(path); + + EXPECT_EQ(expanded, "/absolute/path"); +} + +// ============================================================================ +// ConfigManager tests +// ============================================================================ + +TEST_F(ConfigTest, ConfigManagerReturnsSameInstance) { + auto& instance1 = cortexd::ConfigManager::instance(); + auto& instance2 = cortexd::ConfigManager::instance(); + + EXPECT_EQ(&instance1, &instance2); +} + +TEST_F(ConfigTest, ConfigManagerLoadReturnsDefaultsOnFailure) { + auto& manager = cortexd::ConfigManager::instance(); + + // Load non-existent file + bool result = manager.load("/nonexistent/config.yaml"); + EXPECT_FALSE(result); + + // Should still have valid defaults + auto config = manager.get(); + EXPECT_EQ(config.socket_path, "/run/cortex/cortex.sock"); +} + +TEST_F(ConfigTest, ConfigManagerLoadSucceedsWithValidFile) { + write_config("manager_test.yaml", R"( +socket: + path: /tmp/manager.sock + +log_level: 0 +)"); + + auto& manager = cortexd::ConfigManager::instance(); + bool result = manager.load((temp_dir_ / "manager_test.yaml").string()); + + EXPECT_TRUE(result); + + auto config = manager.get(); + EXPECT_EQ(config.socket_path, "/tmp/manager.sock"); + EXPECT_EQ(config.log_level, 0); +} + +TEST_F(ConfigTest, ConfigManagerReloadWorks) { + write_config("reload_test.yaml", R"( +socket: + path: /tmp/original.sock +log_level: 1 +)"); + + auto& manager = cortexd::ConfigManager::instance(); + manager.load((temp_dir_ / "reload_test.yaml").string()); + + // Modify the file + write_config("reload_test.yaml", R"( +socket: + path: /tmp/modified.sock +log_level: 2 +)"); + + // Reload + bool result = manager.reload(); + EXPECT_TRUE(result); + + auto config = manager.get(); + EXPECT_EQ(config.socket_path, "/tmp/modified.sock"); + EXPECT_EQ(config.log_level, 2); +} + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/daemon/tests/unit/test_logger.cpp b/daemon/tests/unit/test_logger.cpp new file mode 100644 index 00000000..9df15e5f --- /dev/null +++ b/daemon/tests/unit/test_logger.cpp @@ -0,0 +1,324 @@ +/** + * @file test_logger.cpp + * @brief Unit tests for Logger class + */ + +#include +#include +#include +#include +#include +#include +#include "cortexd/logger.h" + +class LoggerTest : public ::testing::Test { +protected: + void SetUp() override { + // Each test starts with a fresh logger state + cortexd::Logger::shutdown(); + } + + void TearDown() override { + cortexd::Logger::shutdown(); + } +}; + +// ============================================================================ +// Initialization tests +// ============================================================================ + +TEST_F(LoggerTest, InitializesWithDefaultLevel) { + cortexd::Logger::init(cortexd::LogLevel::INFO, false); + + EXPECT_EQ(cortexd::Logger::get_level(), cortexd::LogLevel::INFO); +} + +TEST_F(LoggerTest, InitializesWithCustomLevel) { + cortexd::Logger::init(cortexd::LogLevel::DEBUG, false); + + EXPECT_EQ(cortexd::Logger::get_level(), cortexd::LogLevel::DEBUG); +} + +TEST_F(LoggerTest, InitializesWithErrorLevel) { + cortexd::Logger::init(cortexd::LogLevel::ERROR, false); + + EXPECT_EQ(cortexd::Logger::get_level(), cortexd::LogLevel::ERROR); +} + +TEST_F(LoggerTest, InitializesWithCriticalLevel) { + cortexd::Logger::init(cortexd::LogLevel::CRITICAL, false); + + EXPECT_EQ(cortexd::Logger::get_level(), cortexd::LogLevel::CRITICAL); +} + +// ============================================================================ +// Level setting tests +// ============================================================================ + +TEST_F(LoggerTest, SetLevelWorks) { + cortexd::Logger::init(cortexd::LogLevel::INFO, false); + + cortexd::Logger::set_level(cortexd::LogLevel::DEBUG); + EXPECT_EQ(cortexd::Logger::get_level(), cortexd::LogLevel::DEBUG); + + cortexd::Logger::set_level(cortexd::LogLevel::WARN); + EXPECT_EQ(cortexd::Logger::get_level(), cortexd::LogLevel::WARN); + + cortexd::Logger::set_level(cortexd::LogLevel::ERROR); + EXPECT_EQ(cortexd::Logger::get_level(), cortexd::LogLevel::ERROR); +} + +TEST_F(LoggerTest, GetLevelReturnsCorrectLevel) { + cortexd::Logger::init(cortexd::LogLevel::WARN, false); + + EXPECT_EQ(cortexd::Logger::get_level(), cortexd::LogLevel::WARN); +} + +// ============================================================================ +// Log level filtering tests +// ============================================================================ + +TEST_F(LoggerTest, DebugLevelLogsAllMessages) { + cortexd::Logger::init(cortexd::LogLevel::DEBUG, false); + + // These should not throw or crash + cortexd::Logger::debug("Test", "debug message"); + cortexd::Logger::info("Test", "info message"); + cortexd::Logger::warn("Test", "warn message"); + cortexd::Logger::error("Test", "error message"); + cortexd::Logger::critical("Test", "critical message"); + + SUCCEED(); +} + +TEST_F(LoggerTest, InfoLevelFiltersDebug) { + cortexd::Logger::init(cortexd::LogLevel::INFO, false); + + // Debug should be filtered + cortexd::Logger::debug("Test", "should be filtered"); + + // These should pass through + cortexd::Logger::info("Test", "info message"); + cortexd::Logger::warn("Test", "warn message"); + cortexd::Logger::error("Test", "error message"); + cortexd::Logger::critical("Test", "critical message"); + + SUCCEED(); +} + +TEST_F(LoggerTest, WarnLevelFiltersDebugAndInfo) { + cortexd::Logger::init(cortexd::LogLevel::WARN, false); + + // Debug and Info should be filtered + cortexd::Logger::debug("Test", "should be filtered"); + cortexd::Logger::info("Test", "should be filtered"); + + // These should pass through + cortexd::Logger::warn("Test", "warn message"); + cortexd::Logger::error("Test", "error message"); + cortexd::Logger::critical("Test", "critical message"); + + SUCCEED(); +} + +TEST_F(LoggerTest, ErrorLevelFiltersDebugInfoWarn) { + cortexd::Logger::init(cortexd::LogLevel::ERROR, false); + + // Debug, Info, Warn should be filtered + cortexd::Logger::debug("Test", "should be filtered"); + cortexd::Logger::info("Test", "should be filtered"); + cortexd::Logger::warn("Test", "should be filtered"); + + // These should pass through + cortexd::Logger::error("Test", "error message"); + cortexd::Logger::critical("Test", "critical message"); + + SUCCEED(); +} + +TEST_F(LoggerTest, CriticalLevelFiltersAllButCritical) { + cortexd::Logger::init(cortexd::LogLevel::CRITICAL, false); + + // All but critical should be filtered + cortexd::Logger::debug("Test", "should be filtered"); + cortexd::Logger::info("Test", "should be filtered"); + cortexd::Logger::warn("Test", "should be filtered"); + cortexd::Logger::error("Test", "should be filtered"); + + // Only critical should pass through + cortexd::Logger::critical("Test", "critical message"); + + SUCCEED(); +} + +// ============================================================================ +// Macro tests +// ============================================================================ + +TEST_F(LoggerTest, LogMacrosWork) { + cortexd::Logger::init(cortexd::LogLevel::DEBUG, false); + + // Test all logging macros + LOG_DEBUG("MacroTest", "debug via macro"); + LOG_INFO("MacroTest", "info via macro"); + LOG_WARN("MacroTest", "warn via macro"); + LOG_ERROR("MacroTest", "error via macro"); + LOG_CRITICAL("MacroTest", "critical via macro"); + + SUCCEED(); +} + +// ============================================================================ +// Thread safety tests +// ============================================================================ + +TEST_F(LoggerTest, ThreadSafeLogging) { + cortexd::Logger::init(cortexd::LogLevel::INFO, false); + + std::atomic log_count{0}; + std::vector threads; + + // Launch multiple threads all logging + for (int t = 0; t < 10; ++t) { + threads.emplace_back([&, t]() { + for (int i = 0; i < 100; ++i) { + cortexd::Logger::info("Thread" + std::to_string(t), "message " + std::to_string(i)); + log_count++; + } + }); + } + + for (auto& thread : threads) { + thread.join(); + } + + EXPECT_EQ(log_count.load(), 1000); +} + +TEST_F(LoggerTest, ThreadSafeLevelChange) { + cortexd::Logger::init(cortexd::LogLevel::INFO, false); + + std::atomic running{true}; + + // Thread that keeps logging + std::thread logger_thread([&]() { + while (running) { + cortexd::Logger::info("Test", "message"); + std::this_thread::sleep_for(std::chrono::microseconds(10)); + } + }); + + // Thread that keeps changing level + std::thread changer_thread([&]() { + for (int i = 0; i < 100; ++i) { + cortexd::Logger::set_level(cortexd::LogLevel::DEBUG); + cortexd::Logger::set_level(cortexd::LogLevel::INFO); + cortexd::Logger::set_level(cortexd::LogLevel::WARN); + cortexd::Logger::set_level(cortexd::LogLevel::ERROR); + } + }); + + changer_thread.join(); + running = false; + logger_thread.join(); + + // If we got here without crashing, thread safety is working + SUCCEED(); +} + +// ============================================================================ +// Edge cases +// ============================================================================ + +TEST_F(LoggerTest, EmptyMessageWorks) { + cortexd::Logger::init(cortexd::LogLevel::DEBUG, false); + + cortexd::Logger::info("Test", ""); + + SUCCEED(); +} + +TEST_F(LoggerTest, EmptyComponentWorks) { + cortexd::Logger::init(cortexd::LogLevel::DEBUG, false); + + cortexd::Logger::info("", "message"); + + SUCCEED(); +} + +TEST_F(LoggerTest, LongMessageWorks) { + cortexd::Logger::init(cortexd::LogLevel::DEBUG, false); + + std::string long_message(10000, 'a'); + cortexd::Logger::info("Test", long_message); + + SUCCEED(); +} + +TEST_F(LoggerTest, SpecialCharactersInMessage) { + cortexd::Logger::init(cortexd::LogLevel::DEBUG, false); + + cortexd::Logger::info("Test", "Special chars: \n\t\"'\\{}[]"); + cortexd::Logger::info("Test", "Unicode: 日本語 中文 한국어"); + + SUCCEED(); +} + +TEST_F(LoggerTest, LoggingWithoutInit) { + // Logger should still work even if not explicitly initialized + // (uses static defaults) + cortexd::Logger::info("Test", "message before init"); + + SUCCEED(); +} + +// ============================================================================ +// Shutdown and reinit tests +// ============================================================================ + +TEST_F(LoggerTest, ShutdownAndReinit) { + cortexd::Logger::init(cortexd::LogLevel::DEBUG, false); + cortexd::Logger::info("Test", "before shutdown"); + + cortexd::Logger::shutdown(); + + cortexd::Logger::init(cortexd::LogLevel::INFO, false); + cortexd::Logger::info("Test", "after reinit"); + + EXPECT_EQ(cortexd::Logger::get_level(), cortexd::LogLevel::INFO); +} + +TEST_F(LoggerTest, MultipleShutdownCalls) { + cortexd::Logger::init(cortexd::LogLevel::DEBUG, false); + + cortexd::Logger::shutdown(); + cortexd::Logger::shutdown(); // Should not crash + cortexd::Logger::shutdown(); + + SUCCEED(); +} + +// ============================================================================ +// LogLevel enum tests +// ============================================================================ + +TEST_F(LoggerTest, LogLevelOrdering) { + // Verify log levels have correct ordering + EXPECT_LT(static_cast(cortexd::LogLevel::DEBUG), static_cast(cortexd::LogLevel::INFO)); + EXPECT_LT(static_cast(cortexd::LogLevel::INFO), static_cast(cortexd::LogLevel::WARN)); + EXPECT_LT(static_cast(cortexd::LogLevel::WARN), static_cast(cortexd::LogLevel::ERROR)); + EXPECT_LT(static_cast(cortexd::LogLevel::ERROR), static_cast(cortexd::LogLevel::CRITICAL)); +} + +TEST_F(LoggerTest, AllLogLevelsHaveValues) { + EXPECT_EQ(static_cast(cortexd::LogLevel::DEBUG), 0); + EXPECT_EQ(static_cast(cortexd::LogLevel::INFO), 1); + EXPECT_EQ(static_cast(cortexd::LogLevel::WARN), 2); + EXPECT_EQ(static_cast(cortexd::LogLevel::ERROR), 3); + EXPECT_EQ(static_cast(cortexd::LogLevel::CRITICAL), 4); +} + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/daemon/tests/unit/test_protocol.cpp b/daemon/tests/unit/test_protocol.cpp new file mode 100644 index 00000000..76802f77 --- /dev/null +++ b/daemon/tests/unit/test_protocol.cpp @@ -0,0 +1,350 @@ +/** + * @file test_protocol.cpp + * @brief Unit tests for IPC protocol (Request/Response) + */ + +#include +#include "cortexd/ipc/protocol.h" +#include "cortexd/logger.h" + +class ProtocolTest : public ::testing::Test { +protected: + void SetUp() override { + // Initialize logger in non-journald mode for tests + cortexd::Logger::init(cortexd::LogLevel::ERROR, false); + } + + void TearDown() override { + cortexd::Logger::shutdown(); + } +}; + +// ============================================================================ +// Request::parse() tests +// ============================================================================ + +TEST_F(ProtocolTest, ParseValidRequestWithMethod) { + std::string json = R"({"method": "ping"})"; + + auto result = cortexd::Request::parse(json); + + ASSERT_TRUE(result.has_value()); + EXPECT_EQ(result->method, "ping"); + EXPECT_TRUE(result->params.empty()); + EXPECT_FALSE(result->id.has_value()); +} + +TEST_F(ProtocolTest, ParseValidRequestWithParams) { + std::string json = R"({ + "method": "config.get", + "params": {"key": "socket_path"} + })"; + + auto result = cortexd::Request::parse(json); + + ASSERT_TRUE(result.has_value()); + EXPECT_EQ(result->method, "config.get"); + EXPECT_TRUE(result->params.contains("key")); + EXPECT_EQ(result->params["key"], "socket_path"); +} + +TEST_F(ProtocolTest, ParseValidRequestWithStringId) { + std::string json = R"({ + "method": "version", + "id": "request-123" + })"; + + auto result = cortexd::Request::parse(json); + + ASSERT_TRUE(result.has_value()); + EXPECT_EQ(result->method, "version"); + ASSERT_TRUE(result->id.has_value()); + EXPECT_EQ(result->id.value(), "request-123"); +} + +TEST_F(ProtocolTest, ParseValidRequestWithNumericId) { + std::string json = R"({ + "method": "version", + "id": 42 + })"; + + auto result = cortexd::Request::parse(json); + + ASSERT_TRUE(result.has_value()); + EXPECT_EQ(result->method, "version"); + ASSERT_TRUE(result->id.has_value()); + EXPECT_EQ(result->id.value(), "42"); +} + +TEST_F(ProtocolTest, ParseReturnsNulloptForMissingMethod) { + std::string json = R"({"params": {"key": "value"}})"; + + auto result = cortexd::Request::parse(json); + + EXPECT_FALSE(result.has_value()); +} + +TEST_F(ProtocolTest, ParseReturnsNulloptForNonStringMethod) { + std::string json = R"({"method": 123})"; + + auto result = cortexd::Request::parse(json); + + EXPECT_FALSE(result.has_value()); +} + +TEST_F(ProtocolTest, ParseReturnsNulloptForInvalidJson) { + std::string json = "this is not json"; + + auto result = cortexd::Request::parse(json); + + EXPECT_FALSE(result.has_value()); +} + +TEST_F(ProtocolTest, ParseReturnsNulloptForEmptyString) { + std::string json = ""; + + auto result = cortexd::Request::parse(json); + + EXPECT_FALSE(result.has_value()); +} + +TEST_F(ProtocolTest, ParseReturnsNulloptForMalformedJson) { + std::string json = R"({"method": "ping")"; // Missing closing brace + + auto result = cortexd::Request::parse(json); + + EXPECT_FALSE(result.has_value()); +} + +TEST_F(ProtocolTest, ParseHandlesEmptyParams) { + std::string json = R"({ + "method": "ping", + "params": {} + })"; + + auto result = cortexd::Request::parse(json); + + ASSERT_TRUE(result.has_value()); + EXPECT_TRUE(result->params.empty()); +} + +TEST_F(ProtocolTest, ParseHandlesComplexParams) { + std::string json = R"({ + "method": "test", + "params": { + "string": "value", + "number": 42, + "boolean": true, + "array": [1, 2, 3], + "nested": {"inner": "data"} + } + })"; + + auto result = cortexd::Request::parse(json); + + ASSERT_TRUE(result.has_value()); + EXPECT_EQ(result->params["string"], "value"); + EXPECT_EQ(result->params["number"], 42); + EXPECT_EQ(result->params["boolean"], true); + EXPECT_EQ(result->params["array"].size(), 3); + EXPECT_EQ(result->params["nested"]["inner"], "data"); +} + +// ============================================================================ +// Request::to_json() tests +// ============================================================================ + +TEST_F(ProtocolTest, RequestToJsonProducesValidJson) { + cortexd::Request req; + req.method = "ping"; + req.params = cortexd::json::object(); + + std::string json_str = req.to_json(); + + // Parse it back + auto parsed = cortexd::json::parse(json_str); + EXPECT_EQ(parsed["method"], "ping"); +} + +TEST_F(ProtocolTest, RequestToJsonIncludesParams) { + cortexd::Request req; + req.method = "test"; + req.params = {{"key", "value"}}; + + std::string json_str = req.to_json(); + + auto parsed = cortexd::json::parse(json_str); + EXPECT_EQ(parsed["method"], "test"); + EXPECT_EQ(parsed["params"]["key"], "value"); +} + +TEST_F(ProtocolTest, RequestToJsonIncludesId) { + cortexd::Request req; + req.method = "test"; + req.params = cortexd::json::object(); + req.id = "my-id"; + + std::string json_str = req.to_json(); + + auto parsed = cortexd::json::parse(json_str); + EXPECT_EQ(parsed["id"], "my-id"); +} + +// ============================================================================ +// Response::ok() tests +// ============================================================================ + +TEST_F(ProtocolTest, ResponseOkCreatesSuccessResponse) { + auto resp = cortexd::Response::ok(); + + EXPECT_TRUE(resp.success); + EXPECT_TRUE(resp.error.empty()); + EXPECT_EQ(resp.error_code, 0); +} + +TEST_F(ProtocolTest, ResponseOkIncludesResult) { + auto resp = cortexd::Response::ok({{"key", "value"}, {"number", 42}}); + + EXPECT_TRUE(resp.success); + EXPECT_EQ(resp.result["key"], "value"); + EXPECT_EQ(resp.result["number"], 42); +} + +TEST_F(ProtocolTest, ResponseOkWithEmptyResult) { + auto resp = cortexd::Response::ok(cortexd::json::object()); + + EXPECT_TRUE(resp.success); + EXPECT_TRUE(resp.result.empty()); +} + +// ============================================================================ +// Response::err() tests +// ============================================================================ + +TEST_F(ProtocolTest, ResponseErrCreatesErrorResponse) { + auto resp = cortexd::Response::err("Something went wrong"); + + EXPECT_FALSE(resp.success); + EXPECT_EQ(resp.error, "Something went wrong"); + EXPECT_EQ(resp.error_code, -1); // Default code +} + +TEST_F(ProtocolTest, ResponseErrWithCustomCode) { + auto resp = cortexd::Response::err("Not found", cortexd::ErrorCodes::METHOD_NOT_FOUND); + + EXPECT_FALSE(resp.success); + EXPECT_EQ(resp.error, "Not found"); + EXPECT_EQ(resp.error_code, cortexd::ErrorCodes::METHOD_NOT_FOUND); +} + +TEST_F(ProtocolTest, ResponseErrWithAllErrorCodes) { + // Test standard JSON-RPC error codes + auto parse_err = cortexd::Response::err("Parse error", cortexd::ErrorCodes::PARSE_ERROR); + EXPECT_EQ(parse_err.error_code, -32700); + + auto invalid_req = cortexd::Response::err("Invalid", cortexd::ErrorCodes::INVALID_REQUEST); + EXPECT_EQ(invalid_req.error_code, -32600); + + auto method_not_found = cortexd::Response::err("Not found", cortexd::ErrorCodes::METHOD_NOT_FOUND); + EXPECT_EQ(method_not_found.error_code, -32601); + + auto invalid_params = cortexd::Response::err("Invalid params", cortexd::ErrorCodes::INVALID_PARAMS); + EXPECT_EQ(invalid_params.error_code, -32602); + + auto internal = cortexd::Response::err("Internal", cortexd::ErrorCodes::INTERNAL_ERROR); + EXPECT_EQ(internal.error_code, -32603); + + // Test custom error codes + auto rate_limited = cortexd::Response::err("Rate limited", cortexd::ErrorCodes::RATE_LIMITED); + EXPECT_EQ(rate_limited.error_code, 102); + + auto config_error = cortexd::Response::err("Config error", cortexd::ErrorCodes::CONFIG_ERROR); + EXPECT_EQ(config_error.error_code, 104); +} + +// ============================================================================ +// Response::to_json() tests +// ============================================================================ + +TEST_F(ProtocolTest, ResponseToJsonProducesValidJson) { + auto resp = cortexd::Response::ok({{"pong", true}}); + + std::string json_str = resp.to_json(); + + auto parsed = cortexd::json::parse(json_str); + EXPECT_TRUE(parsed["success"]); + EXPECT_TRUE(parsed.contains("timestamp")); + EXPECT_TRUE(parsed.contains("result")); + EXPECT_EQ(parsed["result"]["pong"], true); +} + +TEST_F(ProtocolTest, ResponseToJsonErrorFormat) { + auto resp = cortexd::Response::err("Test error", 123); + + std::string json_str = resp.to_json(); + + auto parsed = cortexd::json::parse(json_str); + EXPECT_FALSE(parsed["success"]); + EXPECT_TRUE(parsed.contains("error")); + EXPECT_EQ(parsed["error"]["message"], "Test error"); + EXPECT_EQ(parsed["error"]["code"], 123); +} + +TEST_F(ProtocolTest, ResponseToJsonIncludesTimestamp) { + auto resp = cortexd::Response::ok(); + + std::string json_str = resp.to_json(); + + auto parsed = cortexd::json::parse(json_str); + EXPECT_TRUE(parsed.contains("timestamp")); + EXPECT_TRUE(parsed["timestamp"].is_number()); +} + +// ============================================================================ +// Methods namespace tests (PR1 methods only) +// ============================================================================ + +TEST_F(ProtocolTest, PR1MethodConstantsAreDefined) { + // PR1 available methods: ping, version, config.get, config.reload, shutdown + EXPECT_STREQ(cortexd::Methods::PING, "ping"); + EXPECT_STREQ(cortexd::Methods::VERSION, "version"); + EXPECT_STREQ(cortexd::Methods::CONFIG_GET, "config.get"); + EXPECT_STREQ(cortexd::Methods::CONFIG_RELOAD, "config.reload"); + EXPECT_STREQ(cortexd::Methods::SHUTDOWN, "shutdown"); +} + +TEST_F(ProtocolTest, PR2MethodConstantsAreDefined) { + // PR2 methods are defined in protocol.h but handlers not registered in PR1 + // These constants exist for forward compatibility + EXPECT_STREQ(cortexd::Methods::STATUS, "status"); + EXPECT_STREQ(cortexd::Methods::HEALTH, "health"); + EXPECT_STREQ(cortexd::Methods::ALERTS, "alerts"); +} + +// ============================================================================ +// Round-trip tests +// ============================================================================ + +TEST_F(ProtocolTest, RequestRoundTrip) { + cortexd::Request original; + original.method = "test.method"; + original.params = {{"param1", "value1"}, {"param2", 123}}; + original.id = "test-id-456"; + + // Serialize + std::string json_str = original.to_json(); + + // Parse back + auto parsed = cortexd::Request::parse(json_str); + + ASSERT_TRUE(parsed.has_value()); + EXPECT_EQ(parsed->method, original.method); + EXPECT_EQ(parsed->params["param1"], original.params["param1"]); + EXPECT_EQ(parsed->params["param2"], original.params["param2"]); + EXPECT_EQ(parsed->id, original.id); +} + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/daemon/tests/unit/test_rate_limiter.cpp b/daemon/tests/unit/test_rate_limiter.cpp new file mode 100644 index 00000000..2f1ddb2c --- /dev/null +++ b/daemon/tests/unit/test_rate_limiter.cpp @@ -0,0 +1,212 @@ +/** + * @file test_rate_limiter.cpp + * @brief Unit tests for RateLimiter + */ + +#include +#include +#include +#include +#include +#include "cortexd/ipc/server.h" +#include "cortexd/logger.h" + +class RateLimiterTest : public ::testing::Test { +protected: + void SetUp() override { + cortexd::Logger::init(cortexd::LogLevel::ERROR, false); + } + + void TearDown() override { + cortexd::Logger::shutdown(); + } +}; + +// ============================================================================ +// Basic functionality tests +// ============================================================================ + +TEST_F(RateLimiterTest, AllowsRequestsUnderLimit) { + cortexd::RateLimiter limiter(10); // 10 requests per second + + // Should allow 10 requests + for (int i = 0; i < 10; ++i) { + EXPECT_TRUE(limiter.allow()) << "Request " << i << " should be allowed"; + } +} + +TEST_F(RateLimiterTest, DeniesRequestsOverLimit) { + cortexd::RateLimiter limiter(5); // 5 requests per second + + // Allow 5 requests + for (int i = 0; i < 5; ++i) { + EXPECT_TRUE(limiter.allow()); + } + + // 6th request should be denied + EXPECT_FALSE(limiter.allow()); +} + +TEST_F(RateLimiterTest, ResetsAfterOneSecond) { + cortexd::RateLimiter limiter(5); + + // Use up the limit + for (int i = 0; i < 5; ++i) { + limiter.allow(); + } + EXPECT_FALSE(limiter.allow()); + + // Wait for window to reset + std::this_thread::sleep_for(std::chrono::milliseconds(1100)); + + // Should allow requests again + EXPECT_TRUE(limiter.allow()); +} + +TEST_F(RateLimiterTest, ResetMethodWorks) { + cortexd::RateLimiter limiter(3); + + // Use up the limit + for (int i = 0; i < 3; ++i) { + limiter.allow(); + } + EXPECT_FALSE(limiter.allow()); + + // Reset + limiter.reset(); + + // Should allow requests again + EXPECT_TRUE(limiter.allow()); +} + +// ============================================================================ +// Edge cases +// ============================================================================ + +TEST_F(RateLimiterTest, HandlesHighLimit) { + cortexd::RateLimiter limiter(1000); + + // Should allow many requests + for (int i = 0; i < 1000; ++i) { + EXPECT_TRUE(limiter.allow()); + } + + // 1001st should be denied + EXPECT_FALSE(limiter.allow()); +} + +TEST_F(RateLimiterTest, HandlesLimitOfOne) { + cortexd::RateLimiter limiter(1); + + EXPECT_TRUE(limiter.allow()); + EXPECT_FALSE(limiter.allow()); + EXPECT_FALSE(limiter.allow()); +} + +// ============================================================================ +// Thread safety tests +// ============================================================================ + +TEST_F(RateLimiterTest, ThreadSafetyUnderConcurrentAccess) { + cortexd::RateLimiter limiter(100); + std::atomic allowed_count{0}; + std::atomic denied_count{0}; + + // Launch multiple threads making requests + std::vector threads; + for (int t = 0; t < 10; ++t) { + threads.emplace_back([&]() { + for (int i = 0; i < 20; ++i) { + if (limiter.allow()) { + allowed_count++; + } else { + denied_count++; + } + } + }); + } + + for (auto& thread : threads) { + thread.join(); + } + + // Total requests: 10 threads * 20 requests = 200 + EXPECT_EQ(allowed_count + denied_count, 200); + + // Allowed should not exceed limit + EXPECT_LE(allowed_count.load(), 100); +} + +TEST_F(RateLimiterTest, ConcurrentResetIsSafe) { + cortexd::RateLimiter limiter(50); + std::atomic running{true}; + + // Thread that keeps making requests + std::thread requester([&]() { + while (running) { + limiter.allow(); + } + }); + + // Thread that keeps resetting + std::thread resetter([&]() { + for (int i = 0; i < 100; ++i) { + limiter.reset(); + std::this_thread::sleep_for(std::chrono::microseconds(100)); + } + }); + + resetter.join(); + running = false; + requester.join(); + + // If we got here without crashing, thread safety is working + SUCCEED(); +} + +// ============================================================================ +// Window behavior tests +// ============================================================================ + +TEST_F(RateLimiterTest, WindowResetsCorrectly) { + cortexd::RateLimiter limiter(5); + + // Make 3 requests + for (int i = 0; i < 3; ++i) { + EXPECT_TRUE(limiter.allow()); + } + + // Wait half a second (window hasn't reset) + std::this_thread::sleep_for(std::chrono::milliseconds(500)); + + // Should still have only 2 remaining + EXPECT_TRUE(limiter.allow()); + EXPECT_TRUE(limiter.allow()); + EXPECT_FALSE(limiter.allow()); + + // Wait for full window reset + std::this_thread::sleep_for(std::chrono::milliseconds(600)); + + // Should have full capacity again + EXPECT_TRUE(limiter.allow()); +} + +TEST_F(RateLimiterTest, MultipleWindowCycles) { + cortexd::RateLimiter limiter(3); + + for (int cycle = 0; cycle < 3; ++cycle) { + // Use up the limit + for (int i = 0; i < 3; ++i) { + EXPECT_TRUE(limiter.allow()) << "Cycle " << cycle << ", request " << i; + } + EXPECT_FALSE(limiter.allow()) << "Cycle " << cycle << " should be exhausted"; + + // Wait for reset + std::this_thread::sleep_for(std::chrono::milliseconds(1100)); + } +} + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +}