class="mt-1 px-1.5 block w-full rounded-md border border-gray-300 shadow-sm focus:border-indigo-500 focus:ring-indigo-500 dark:bg-gray-900 dark:placeholder-gray-300 dark:text-gray-300"
/>
+
+
+
+
+
@@ -10989,131 +11123,22 @@
Breakdown by Type:
let currentLogPage = 0;
const logsPerPage = 100;
- async function refreshLogs() {
- const level = document.getElementById("log-level-filter").value;
- const entityType = document.getElementById("log-entity-filter").value;
- const search = document.getElementById("log-search").value;
-
- const params = new URLSearchParams({
- limit: logsPerPage,
- offset: currentLogPage * logsPerPage,
- order: "desc",
- });
-
- if (level) params.append("level", level);
- if (entityType) params.append("entity_type", entityType);
- if (search) params.append("search", search);
-
- try {
- const headers = {};
- const token = localStorage.getItem("token");
- if (token) {
- headers["Authorization"] = `Bearer ${token}`;
- }
-
- const response = await fetch(
- `${window.ROOT_PATH || ""}/admin/logs?${params}`,
- {
- headers: headers,
- credentials: "same-origin",
- },
- );
-
- if (!response.ok) throw new Error(`HTTP ${response.status}`);
-
- const data = await response.json();
- displayLogs(data.logs);
- updateLogStats(data.stats);
- updateLogCount(data.total);
- } catch (error) {
- console.error("Error fetching logs:", error);
- showErrorMessage("Failed to fetch logs");
- }
- }
-
- function displayLogs(logs) {
- const tbody = document.getElementById("logs-tbody");
- tbody.innerHTML = "";
-
- logs.forEach((log) => {
- const row = document.createElement("tr");
- row.className = "hover:bg-gray-50 dark:hover:bg-gray-700";
-
- const timestamp = new Date(log.timestamp).toLocaleString();
- const levelClass = getLevelClass(log.level);
- const entity = log.entity_name || log.entity_id || "-";
-
- row.innerHTML = `
-
- ${timestamp}
- |
-
-
- ${log.level}
-
- |
-
- ${log.entity_type ? `${log.entity_type}: ${entity}` : entity}
- |
-
- ${escapeHtml(log.message)}
- |
- `;
-
- tbody.appendChild(row);
- });
- }
-
- function getLevelClass(level) {
- switch (level) {
- case "debug":
- return "bg-gray-100 text-gray-800";
- case "info":
- return "bg-blue-100 text-blue-800";
- case "warning":
- return "bg-yellow-100 text-yellow-800";
- case "error":
- return "bg-red-100 text-red-800";
- case "critical":
- return "bg-red-600 text-white";
- default:
- return "bg-gray-100 text-gray-800";
- }
- }
-
- function updateLogStats(stats) {
- if (!stats) return;
-
- const statsDiv = document.getElementById("log-stats");
- const levelDist = stats.level_distribution || {};
- const entityDist = stats.entity_distribution || {};
-
- let html = `
-
- Buffer: ${stats.usage_percent || 0}% (${stats.buffer_size_mb || 0}/${stats.max_size_mb || 0} MB)
- Total: ${stats.total_logs || 0} logs
- `;
-
- if (Object.keys(levelDist).length > 0) {
- html += "Levels: ";
- for (const [level, count] of Object.entries(levelDist)) {
- html += `${level}(${count}) `;
- }
- html += "";
- }
-
- html += "
";
- statsDiv.innerHTML = html;
- }
-
- function updateLogCount(total) {
- document.getElementById("log-count").textContent = `${total} logs`;
-
- // Update pagination buttons
- document.getElementById("prev-page").disabled = currentLogPage === 0;
- document.getElementById("next-page").disabled =
- (currentLogPage + 1) * logsPerPage >= total;
- }
+ // Main search function for structured logs
+ // Note: Structured logging functions are defined in admin.js which loads below:
+ // - searchStructuredLogs() - Search logs with filters
+ // - displayLogResults() - Display log table
+ // - showCorrelationTrace() - Show correlation trace modal
+ // - displayCorrelationTrace() - Display trace results
+ // - Helper functions: getLevelClass(), formatDuration(), getDurationClass()
+ //
+ // Keeping all structured logging UI logic centralized in admin.js to avoid
+ // duplication and maintenance issues. admin.js loads last and provides the
+ // definitive implementations.
+
+ // Note: showSecurityEvents, showAuditTrail, showPerformanceMetrics,
+ // updateLogStats, and updateLogCount are also defined in admin.js which
+ // is loaded below and overrides any inline definitions.
+ // Keeping functions centralized in admin.js to avoid duplication and maintenance issues.
function toggleLogStream() {
const button = document.getElementById("stream-toggle");
@@ -11207,7 +11232,7 @@
Breakdown by Type:
try {
// Use the same auth approach as other admin endpoints
const headers = {};
- const token = localStorage.getItem("token");
+ const token = getAuthToken();
if (token) {
headers["Authorization"] = `Bearer ${token}`;
}
@@ -11258,7 +11283,7 @@
Breakdown by Type:
async function showLogFiles() {
try {
const headers = {};
- const token = localStorage.getItem("token");
+ const token = getAuthToken();
if (token) {
headers["Authorization"] = `Bearer ${token}`;
}
@@ -11326,7 +11351,7 @@
Available Log Files
async function downloadLogFile(filename) {
try {
const headers = {};
- const token = localStorage.getItem("token");
+ const token = getAuthToken();
if (token) {
headers["Authorization"] = `Bearer ${token}`;
}
@@ -11387,7 +11412,7 @@
Available Log Files
document.addEventListener("DOMContentLoaded", () => {
const logFilters = [
"log-level-filter",
- "log-entity-filter",
+ "log-component-filter",
"log-search",
];
logFilters.forEach((id) => {
diff --git a/mcpgateway/templates/resources_partial.html b/mcpgateway/templates/resources_partial.html
index 209df6880..9306b029e 100644
--- a/mcpgateway/templates/resources_partial.html
+++ b/mcpgateway/templates/resources_partial.html
@@ -30,6 +30,7 @@
{{ 'Active' if resource.enabled else 'Inactive' }} |
+
diff --git a/mcpgateway/toolops/README.md b/mcpgateway/toolops/README.md
index 61bdc31e3..4142525ed 100644
--- a/mcpgateway/toolops/README.md
+++ b/mcpgateway/toolops/README.md
@@ -1,14 +1,15 @@
### Starting MCP context forge from git repo
* Use `make venv` to create virtual environment (tested with python 3.12)
-* Install MCP-CF and toolops dependencies using `make install install-dev install-toolops`. Please check if all the packages are installed in the created virtual environment.
+* Install MCP-CF and dependencies using `make install install-dev`
+* Install toolops and other dependencies using `uv pip install .'[toolops,grpc]'`.Please check if all the packages are installed in the created virtual environment.
* `uvicorn mcpgateway.main:app --host 0.0.0.0 --port 4444 --workers 2 --env-file .env` will start Context forge UI and APIs at http://localhost:4444/docs and toolops API endpoints will be shown.
### Important NOTE:
* Please provide all configurations such as LLM provider, api keys etc., in `.env` file. And you need to set `TOOLOPS_ENABLED=true` for enabling toolops functionality`
* While selecting LLM model , please use the model that supports instruction following (IF) text generation tasks and tool-calling capabilities for executing tools in chat mode. For example `granite4:micro` , `llama-3-3-70b-instruct` etc.,
* Toolops depends on `agent life cycle toolkit(ALTK)` which is specified in `pyproject.toml` required packages, to install ALTK please set-up github public key SSH if required.
-* For toolops developement (Caution) : Only if required to re-install of latest version of `agent life cycle toolkit(ALTK)` from git repo in case of fixes/updates please use pip install via git ssh url.
+* For toolops developement (Caution) : Only if required to re-install of latest version of `agent life cycle toolkit(ALTK)` from git repo in case of fixes/updates please use pip install via git ssh url.
### Testing toolops requires MCP server running to set up MCP server using OAPI specification
```
@@ -17,4 +18,4 @@ python3 -m mcpgateway.translate \
--expose-sse \
--expose-streamable-http \
--port 9000
-```
\ No newline at end of file
+```
diff --git a/mcpgateway/tools/builder/__init__.py b/mcpgateway/tools/builder/__init__.py
new file mode 100644
index 000000000..ec309d8bd
--- /dev/null
+++ b/mcpgateway/tools/builder/__init__.py
@@ -0,0 +1,8 @@
+# -*- coding: utf-8 -*-
+"""Location: ./mcpgateway/tools/builder/__init__.py
+Copyright 2025
+SPDX-License-Identifier: Apache-2.0
+Authors: Teryl Taylor
+
+Builder Package.
+"""
diff --git a/mcpgateway/tools/builder/cli.py b/mcpgateway/tools/builder/cli.py
new file mode 100644
index 000000000..0fdfaebfd
--- /dev/null
+++ b/mcpgateway/tools/builder/cli.py
@@ -0,0 +1,337 @@
+# -*- coding: utf-8 -*-
+"""
+Location: ./mcpgateway/tools/builder/cli.py
+Copyright 2025
+SPDX-License-Identifier: Apache-2.0
+Authors: Teryl Taylor
+
+MCP Stack Deployment Tool - Hybrid Dagger/Python Implementation
+
+This script can run in two modes:
+1. Plain Python mode (default) - No external dependencies
+2. Dagger mode (opt-in) - Requires dagger-io package, auto-downloads CLI
+
+Usage:
+ # Local execution (plain Python mode)
+ cforge deploy deploy.yaml
+
+ # Use Dagger mode for optimization (requires dagger-io, auto-downloads CLI)
+ cforge --dagger deploy deploy.yaml
+
+ # Inside container
+ docker run -v $PWD:/workspace mcpgateway/mcp-builder:latest deploy deploy.yaml
+
+Features:
+ - Validates deploy.yaml configuration
+ - Builds plugin containers from git repos
+ - Generates mTLS certificates
+ - Deploys to Kubernetes or Docker Compose
+ - Integrates with CI/CD vault secrets
+
+Examples:
+ >>> # Test that IN_CONTAINER detection works
+ >>> import os
+ >>> isinstance(IN_CONTAINER, bool)
+ True
+
+ >>> # Test that BUILDER_DIR is a Path
+ >>> from pathlib import Path
+ >>> isinstance(BUILDER_DIR, Path)
+ True
+
+ >>> # Test IMPL_MODE is set
+ >>> isinstance(IMPL_MODE, str)
+ True
+"""
+
+# Standard
+import asyncio
+import os
+from pathlib import Path
+import sys
+from typing import Optional
+
+# Third-Party
+from rich.console import Console
+from rich.panel import Panel
+import typer
+from typing_extensions import Annotated
+
+# First-Party
+from mcpgateway.tools.builder.factory import DeployFactory
+
+app = typer.Typer(
+ help="Command line tools for deploying the gateway and plugins via a config file.",
+)
+
+console = Console()
+
+deployer = None
+
+IN_CONTAINER = os.path.exists("/.dockerenv") or os.environ.get("CONTAINER") == "true"
+BUILDER_DIR = Path(__file__).parent / "builder"
+IMPL_MODE = "plain"
+
+
+@app.callback()
+def cli(
+ ctx: typer.Context,
+ dagger: Annotated[bool, typer.Option("--dagger", help="Use Dagger mode (requires dagger-io package)")] = False,
+ verbose: Annotated[bool, typer.Option("--verbose", "-v", help="Verbose output")] = False,
+):
+ """MCP Stack deployment tool
+
+ Deploys MCP Gateway + external plugins from a single YAML configuration.
+
+ By default, uses plain Python mode. Use --dagger to enable Dagger optimization.
+
+ Args:
+ ctx: Typer context object
+ dagger: Enable Dagger mode (requires dagger-io package and auto-downloads CLI)
+ verbose: Enable verbose output
+ """
+ ctx.ensure_object(dict)
+ ctx.obj["verbose"] = verbose
+ ctx.obj["dagger"] = dagger
+
+ if ctx.invoked_subcommand != "version":
+ # Show execution mode - default to Python, opt-in to Dagger
+ mode = "dagger" if dagger else "python"
+ ctx.obj["deployer"], ctx.obj["mode"] = DeployFactory.create_deployer(mode, verbose)
+ mode_color = "green" if ctx.obj["mode"] == "dagger" else "yellow"
+ env_text = "container" if IN_CONTAINER else "local"
+
+ if verbose:
+ console.print(Panel(f"[bold]Mode:[/bold] [{mode_color}]{ctx.obj['mode']}[/{mode_color}]\n" f"[bold]Environment:[/bold] {env_text}\n", title="MCP Deploy", border_style=mode_color))
+
+
+@app.command()
+def validate(ctx: typer.Context, config_file: Annotated[Path, typer.Argument(help="The deployment configuration file.")]):
+ """Validate mcp-stack.yaml configuration
+
+ Args:
+ ctx: Typer context object
+ config_file: Path to the deployment configuration file
+ """
+ impl = ctx.obj["deployer"]
+
+ try:
+ impl.validate(config_file)
+ console.print("[green]✓ Configuration valid[/green]")
+ except Exception as e:
+ console.print(f"[red]✗ Validation failed: {e}[/red]")
+ sys.exit(1)
+
+
+@app.command()
+def build(
+ ctx: typer.Context,
+ config_file: Annotated[Path, typer.Argument(help="The deployment configuration file")],
+ plugins_only: Annotated[bool, typer.Option("--plugins-only", help="Only build plugin containers")] = False,
+ plugin: Annotated[Optional[list[str]], typer.Option("--plugin", "-p", help="Build specific plugin(s)")] = None,
+ no_cache: Annotated[bool, typer.Option("--no-cache", help="Disable build cache")] = False,
+ copy_env_templates: Annotated[bool, typer.Option("--copy-env-templates", help="Copy .env.template files from plugin repos")] = True,
+):
+ """Build containers
+
+ Args:
+ ctx: Typer context object
+ config_file: Path to the deployment configuration file
+ plugins_only: Only build plugin containers, skip gateway
+ plugin: List of specific plugin names to build
+ no_cache: Disable build cache
+ copy_env_templates: Copy .env.template files from plugin repos
+ """
+ impl = ctx.obj["deployer"]
+
+ try:
+ asyncio.run(impl.build(config_file, plugins_only=plugins_only, specific_plugins=list(plugin) if plugin else None, no_cache=no_cache, copy_env_templates=copy_env_templates))
+ console.print("[green]✓ Build complete[/green]")
+
+ if copy_env_templates:
+ console.print("[yellow]⚠ IMPORTANT: Review .env files in deploy/env/ before deploying![/yellow]")
+ console.print("[yellow] Update any required configuration values.[/yellow]")
+ except Exception as e:
+ console.print(f"[red]✗ Build failed: {e}[/red]")
+ sys.exit(1)
+
+
+@app.command()
+def certs(ctx: typer.Context, config_file: Annotated[Path, typer.Argument(help="The deployment configuration file")]):
+ """Generate mTLS certificates
+
+ Args:
+ ctx: Typer context object
+ config_file: Path to the deployment configuration file
+ """
+ impl = ctx.obj["deployer"]
+
+ try:
+ asyncio.run(impl.generate_certificates(config_file))
+ console.print("[green]✓ Certificates generated[/green]")
+ except Exception as e:
+ console.print(f"[red]✗ Certificate generation failed: {e}[/red]")
+ sys.exit(1)
+
+
+@app.command()
+def deploy(
+ ctx: typer.Context,
+ config_file: Annotated[Path, typer.Argument(help="The deployment configuration file")],
+ output_dir: Annotated[Optional[Path], typer.Option("--output-dir", "-o", help="The deployment configuration file")] = None,
+ dry_run: Annotated[bool, typer.Option("--dry-run", help="Generate manifests without deploying")] = False,
+ skip_build: Annotated[bool, typer.Option("--skip-build", help="Skip building containers")] = False,
+ skip_certs: Annotated[bool, typer.Option("--skip-certs", help="Skip certificate generation")] = False,
+):
+ """Deploy MCP stack
+
+ Args:
+ ctx: Typer context object
+ config_file: Path to the deployment configuration file
+ output_dir: Custom output directory for manifests
+ dry_run: Generate manifests without deploying
+ skip_build: Skip building containers
+ skip_certs: Skip certificate generation
+ """
+ impl = ctx.obj["deployer"]
+
+ try:
+ asyncio.run(impl.deploy(config_file, dry_run=dry_run, skip_build=skip_build, skip_certs=skip_certs, output_dir=output_dir))
+ if dry_run:
+ console.print("[yellow]✓ Dry-run complete (no changes made)[/yellow]")
+ else:
+ console.print("[green]✓ Deployment complete[/green]")
+ except Exception as e:
+ console.print(f"[red]✗ Deployment failed: {e}[/red]")
+ sys.exit(1)
+
+
+@app.command()
+def verify(
+ ctx: typer.Context,
+ config_file: Annotated[Path, typer.Argument(help="The deployment configuration file")],
+ wait: Annotated[bool, typer.Option("--wait", help="Wait for deployment to be ready")] = True,
+ timeout: Annotated[int, typer.Option("--timeout", help="Wait timeout in seconds")] = 300,
+):
+ """Verify deployment health
+
+ Args:
+ ctx: Typer context object
+ config_file: Path to the deployment configuration file
+ wait: Wait for deployment to be ready
+ timeout: Wait timeout in seconds
+ """
+ impl = ctx.obj["deployer"]
+
+ try:
+ asyncio.run(impl.verify(config_file, wait=wait, timeout=timeout))
+ console.print("[green]✓ Deployment healthy[/green]")
+ except Exception as e:
+ console.print(f"[red]✗ Verification failed: {e}[/red]")
+ sys.exit(1)
+
+
+@app.command()
+def destroy(
+ ctx: typer.Context,
+ config_file: Annotated[Path, typer.Argument(help="The deployment configuration file")],
+ force: Annotated[bool, typer.Option("--force", help="Force destruction without confirmation")] = False,
+):
+ """Destroy deployed MCP stack
+
+ Args:
+ ctx: Typer context object
+ config_file: Path to the deployment configuration file
+ force: Force destruction without confirmation
+ """
+ impl = ctx.obj["deployer"]
+
+ if not force:
+ if not typer.confirm("Are you sure you want to destroy the deployment?"):
+ console.print("[yellow]Aborted[/yellow]")
+ return
+
+ try:
+ asyncio.run(impl.destroy(config_file))
+ console.print("[green]✓ Deployment destroyed[/green]")
+ except Exception as e:
+ console.print(f"[red]✗ Destruction failed: {e}[/red]")
+ sys.exit(1)
+
+
+@app.command()
+def version():
+ """Show version information
+
+ Examples:
+ >>> # Test that version function exists
+ >>> callable(version)
+ True
+
+ >>> # Test that it accesses module constants
+ >>> IMPL_MODE in ['plain', 'dagger']
+ True
+ """
+ console.print(
+ Panel(f"[bold]MCP Deploy[/bold]\n" f"Version: 1.0.0\n" f"Mode: {IMPL_MODE}\n" f"Environment: {'container' if IN_CONTAINER else 'local'}\n", title="Version Info", border_style="blue")
+ )
+
+
+@app.command()
+def generate(
+ ctx: typer.Context,
+ config_file: Annotated[Path, typer.Argument(help="The deployment configuration file")],
+ output: Annotated[Optional[Path], typer.Option("--output", "-o", help="Output directory for manifests")] = None,
+):
+ """Generate deployment manifests (k8s or compose)
+
+ Args:
+ ctx: Typer context object
+ config_file: Path to the deployment configuration file
+ output: Output directory for manifests
+ """
+ impl = ctx.obj["deployer"]
+
+ try:
+ manifests_dir = impl.generate_manifests(config_file, output_dir=output)
+ console.print(f"[green]✓ Manifests generated: {manifests_dir}[/green]")
+ except Exception as e:
+ console.print(f"[red]✗ Manifest generation failed: {e}[/red]")
+ sys.exit(1)
+
+
+def main():
+ """Main entry point
+
+ Raises:
+ Exception: Any unhandled exception from subcommands (re-raised in debug mode)
+
+ Examples:
+ >>> # Test that main function exists and is callable
+ >>> callable(main)
+ True
+
+ >>> # Test that app is a Typer instance
+ >>> import typer
+ >>> isinstance(app, typer.Typer)
+ True
+
+ >>> # Test that console is available
+ >>> from rich.console import Console
+ >>> isinstance(console, Console)
+ True
+ """
+ try:
+ app(obj={})
+ except KeyboardInterrupt:
+ console.print("\n[yellow]Interrupted by user[/yellow]")
+ sys.exit(130)
+ except Exception as e:
+ console.print(f"[red]Fatal error: {e}[/red]")
+ if os.environ.get("MCP_DEBUG"):
+ raise
+ sys.exit(1)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/mcpgateway/tools/builder/common.py b/mcpgateway/tools/builder/common.py
new file mode 100644
index 000000000..940652d6d
--- /dev/null
+++ b/mcpgateway/tools/builder/common.py
@@ -0,0 +1,1268 @@
+# -*- coding: utf-8 -*-
+"""Location: ./mcpgateway/tools/builder/common.py
+Copyright 2025
+SPDX-License-Identifier: Apache-2.0
+Authors: Teryl Taylor
+
+Common utilities shared between Dagger and plain Python implementations.
+
+This module contains shared functionality to avoid code duplication between
+the Dagger-based (dagger_module.py) and plain Python (plain_deploy.py)
+implementations of the MCP Stack deployment system.
+
+Shared functions:
+- load_config: Load and parse YAML configuration file
+- generate_plugin_config: Generate plugins-config.yaml for gateway from mcp-stack.yaml
+- generate_kubernetes_manifests: Generate Kubernetes deployment manifests
+- generate_compose_manifests: Generate Docker Compose manifest
+- copy_env_template: Copy .env.template from plugin repo to env.d/ directory
+- handle_registry_operations: Tag and push images to container registry
+- get_docker_compose_command: Detect available docker compose command
+- run_compose: Run docker compose with error handling
+- deploy_compose: Deploy using docker compose up -d
+- verify_compose: Verify deployment with docker compose ps
+- destroy_compose: Destroy deployment with docker compose down -v
+- deploy_kubernetes: Deploy to Kubernetes using kubectl
+- verify_kubernetes: Verify Kubernetes deployment health
+- destroy_kubernetes: Destroy Kubernetes deployment with kubectl delete
+"""
+
+# Standard
+import base64
+import os
+from pathlib import Path
+import shutil
+import subprocess # nosec B404
+from typing import List
+
+# Third-Party
+from jinja2 import Environment, FileSystemLoader
+from rich.console import Console
+import yaml
+
+# First-Party
+from mcpgateway.tools.builder.schema import MCPStackConfig
+
+console = Console()
+
+
+def get_deploy_dir() -> Path:
+ """Get deployment directory from environment variable or default.
+
+ Checks MCP_DEPLOY_DIR environment variable, defaults to './deploy'.
+
+ Returns:
+ Path to deployment directory
+
+ Examples:
+ >>> # Test with default value (when MCP_DEPLOY_DIR is not set)
+ >>> import os
+ >>> old_value = os.environ.pop("MCP_DEPLOY_DIR", None)
+ >>> result = get_deploy_dir()
+ >>> isinstance(result, Path)
+ True
+ >>> str(result)
+ 'deploy'
+
+ >>> # Test with custom environment variable
+ >>> os.environ["MCP_DEPLOY_DIR"] = "/custom/deploy"
+ >>> result = get_deploy_dir()
+ >>> str(result)
+ '/custom/deploy'
+
+ >>> # Cleanup: restore original value
+ >>> if old_value is not None:
+ ... os.environ["MCP_DEPLOY_DIR"] = old_value
+ ... else:
+ ... _ = os.environ.pop("MCP_DEPLOY_DIR", None)
+ """
+ deploy_dir = os.environ.get("MCP_DEPLOY_DIR", "./deploy")
+ return Path(deploy_dir)
+
+
+def load_config(config_file: str) -> MCPStackConfig:
+ """Load and parse YAML configuration file into validated Pydantic model.
+
+ Args:
+ config_file: Path to mcp-stack.yaml configuration file
+
+ Returns:
+ Validated MCPStackConfig Pydantic model
+
+ Raises:
+ FileNotFoundError: If configuration file doesn't exist
+ ValidationError: If configuration validation fails
+
+ Examples:
+ >>> # Test with non-existent file
+ >>> try:
+ ... load_config("/nonexistent/path/config.yaml")
+ ... except FileNotFoundError as e:
+ ... "Configuration file not found" in str(e)
+ True
+
+ >>> # Test that function returns MCPStackConfig type
+ >>> from mcpgateway.tools.builder.schema import MCPStackConfig
+ >>> # Actual file loading would require a real file:
+ >>> # config = load_config("mcp-stack.yaml")
+ >>> # assert isinstance(config, MCPStackConfig)
+ """
+ config_path = Path(config_file)
+ if not config_path.exists():
+ raise FileNotFoundError(f"Configuration file not found: {config_file}")
+
+ with open(config_path, encoding="utf-8") as f:
+ config_dict = yaml.safe_load(f)
+
+ # Validate and return Pydantic model
+ return MCPStackConfig.model_validate(config_dict)
+
+
+def generate_plugin_config(config: MCPStackConfig, output_dir: Path, verbose: bool = False) -> Path:
+ """Generate plugin config.yaml for gateway from mcp-stack.yaml.
+
+ This function is shared between Dagger and plain Python implementations
+ to avoid code duplication.
+
+ Args:
+ config: Validated MCPStackConfig Pydantic model
+ output_dir: Output directory for generated config
+ verbose: Print verbose output
+
+ Returns:
+ Path to generated plugins-config.yaml file
+
+ Raises:
+ FileNotFoundError: If template directory not found
+
+ Examples:
+ >>> from pathlib import Path
+ >>> from mcpgateway.tools.builder.schema import MCPStackConfig, DeploymentConfig, GatewayConfig
+ >>> import tempfile
+ >>> # Test with minimal config
+ >>> with tempfile.TemporaryDirectory() as tmpdir:
+ ... output = Path(tmpdir)
+ ... config = MCPStackConfig(
+ ... deployment=DeploymentConfig(type="compose"),
+ ... gateway=GatewayConfig(image="test:latest"),
+ ... plugins=[]
+ ... )
+ ... result = generate_plugin_config(config, output, verbose=False)
+ ... result.name
+ 'plugins-config.yaml'
+
+ >>> # Test return type
+ >>> # result_path = generate_plugin_config(config, output_dir)
+ >>> # isinstance(result_path, Path)
+ >>> # True
+ """
+
+ deployment_type = config.deployment.type
+ plugins = config.plugins
+
+ # Load template
+ template_dir = Path(__file__).parent / "templates"
+ if not template_dir.exists():
+ raise FileNotFoundError(f"Template directory not found: {template_dir}")
+
+ # YAML files should not use HTML autoescape
+ env = Environment(loader=FileSystemLoader(str(template_dir)), autoescape=False) # nosec B701
+ template = env.get_template("plugins-config.yaml.j2")
+
+ # Prepare plugin data with computed URLs
+ plugin_data = []
+ for plugin in plugins:
+ plugin_name = plugin.name
+ port = plugin.port or 8000
+
+ # Determine URL based on deployment type
+ if deployment_type == "compose":
+ # Use container hostname (lowercase)
+ hostname = plugin_name.lower()
+ # Use HTTPS if mTLS is enabled
+ protocol = "https" if plugin.mtls_enabled else "http"
+ url = f"{protocol}://{hostname}:{port}/mcp"
+ else: # kubernetes
+ # Use Kubernetes service DNS
+ namespace = config.deployment.namespace or "mcp-gateway"
+ service_name = f"mcp-plugin-{plugin_name.lower()}"
+ protocol = "https" if plugin.mtls_enabled else "http"
+ url = f"{protocol}://{service_name}.{namespace}.svc:{port}/mcp"
+
+ # Build plugin entry with computed URL
+ plugin_entry = {
+ "name": plugin_name,
+ "port": port,
+ "url": url,
+ }
+
+ # Merge plugin_overrides (client-side config only, excludes 'config')
+ # Allowed client-side fields that plugin manager uses
+ if plugin.plugin_overrides:
+ overrides = plugin.plugin_overrides
+ allowed_fields = ["priority", "mode", "description", "version", "author", "hooks", "tags", "conditions"]
+ for field in allowed_fields:
+ if field in overrides:
+ plugin_entry[field] = overrides[field]
+
+ plugin_data.append(plugin_entry)
+
+ # Render template
+ rendered = template.render(plugins=plugin_data)
+
+ # Write config file
+ config_path = output_dir / "plugins-config.yaml"
+ config_path.write_text(rendered)
+
+ if verbose:
+ print(f"✓ Plugin config generated: {config_path}")
+
+ return config_path
+
+
+def generate_kubernetes_manifests(config: MCPStackConfig, output_dir: Path, verbose: bool = False) -> None:
+ """Generate Kubernetes manifests from configuration.
+
+ Args:
+ config: Validated MCPStackConfig Pydantic model
+ output_dir: Output directory for manifests
+ verbose: Print verbose output
+
+ Raises:
+ FileNotFoundError: If template directory not found
+
+ Examples:
+ >>> from pathlib import Path
+ >>> import inspect
+ >>> # Test function signature
+ >>> sig = inspect.signature(generate_kubernetes_manifests)
+ >>> list(sig.parameters.keys())
+ ['config', 'output_dir', 'verbose']
+
+ >>> # Test that verbose parameter has default
+ >>> sig.parameters['verbose'].default
+ False
+
+ >>> # Actual usage requires valid config and templates:
+ >>> # from mcpgateway.tools.builder.schema import MCPStackConfig
+ >>> # generate_kubernetes_manifests(config, Path("./output"))
+ """
+
+ # Load templates
+ template_dir = Path(__file__).parent / "templates" / "kubernetes"
+ if not template_dir.exists():
+ raise FileNotFoundError(f"Template directory not found: {template_dir}")
+
+ # Auto-detect and assign env files if not specified
+ _auto_detect_env_files(config, output_dir, verbose=verbose)
+
+ env = Environment(loader=FileSystemLoader(str(template_dir)), autoescape=True) # nosec B701
+
+ # Generate namespace
+ namespace = config.deployment.namespace or "mcp-gateway"
+
+ # Generate mTLS certificate resources if enabled
+ gateway_mtls = config.gateway.mtls_enabled if config.gateway.mtls_enabled is not None else True
+ cert_config = config.certificates
+ use_cert_manager = cert_config.use_cert_manager if cert_config else False
+
+ if gateway_mtls:
+ if use_cert_manager:
+ # Generate cert-manager Certificate CRDs
+ cert_manager_template = env.get_template("cert-manager-certificates.yaml.j2")
+
+ # Calculate duration and renewBefore in hours
+ validity_days = cert_config.validity_days or 825
+ duration_hours = validity_days * 24
+ # Renew at 2/3 of lifetime (cert-manager default)
+ renew_before_hours = int(duration_hours * 2 / 3)
+
+ # Prepare certificate data
+ cert_data = {
+ "namespace": namespace,
+ "gateway_name": "mcpgateway",
+ "issuer_name": cert_config.cert_manager_issuer or "mcp-ca-issuer",
+ "issuer_kind": cert_config.cert_manager_kind or "Issuer",
+ "duration": duration_hours,
+ "renew_before": renew_before_hours,
+ "plugins": [],
+ }
+
+ # Add plugins with mTLS enabled
+ for plugin in config.plugins:
+ if plugin.mtls_enabled if plugin.mtls_enabled is not None else True:
+ cert_data["plugins"].append({"name": f"mcp-plugin-{plugin.name.lower()}"})
+
+ # Generate cert-manager certificates manifest
+ cert_manager_manifest = cert_manager_template.render(**cert_data)
+ (output_dir / "cert-manager-certificates.yaml").write_text(cert_manager_manifest)
+ if verbose:
+ print(" ✓ cert-manager Certificate CRDs manifest generated")
+
+ else:
+ # Generate traditional certificate secrets (backward compatibility)
+ cert_secrets_template = env.get_template("cert-secrets.yaml.j2")
+
+ # Prepare certificate data
+ cert_data = {"namespace": namespace, "gateway_name": "mcpgateway", "plugins": []}
+
+ # Read and encode CA certificate
+ ca_cert_path = Path("certs/mcp/ca/ca.crt")
+ if ca_cert_path.exists():
+ cert_data["ca_cert_b64"] = base64.b64encode(ca_cert_path.read_bytes()).decode("utf-8")
+ else:
+ if verbose:
+ print(f"[yellow]Warning: CA certificate not found at {ca_cert_path}[/yellow]")
+
+ # Read and encode gateway certificates
+ gateway_cert_path = Path("certs/mcp/gateway/client.crt")
+ gateway_key_path = Path("certs/mcp/gateway/client.key")
+ if gateway_cert_path.exists() and gateway_key_path.exists():
+ cert_data["gateway_cert_b64"] = base64.b64encode(gateway_cert_path.read_bytes()).decode("utf-8")
+ cert_data["gateway_key_b64"] = base64.b64encode(gateway_key_path.read_bytes()).decode("utf-8")
+ else:
+ if verbose:
+ print("[yellow]Warning: Gateway certificates not found[/yellow]")
+
+ # Read and encode plugin certificates
+ for plugin in config.plugins:
+ if plugin.mtls_enabled if plugin.mtls_enabled is not None else True:
+ plugin_name = plugin.name
+ plugin_cert_path = Path(f"certs/mcp/plugins/{plugin_name}/server.crt")
+ plugin_key_path = Path(f"certs/mcp/plugins/{plugin_name}/server.key")
+
+ if plugin_cert_path.exists() and plugin_key_path.exists():
+ cert_data["plugins"].append(
+ {
+ "name": f"mcp-plugin-{plugin_name.lower()}",
+ "cert_b64": base64.b64encode(plugin_cert_path.read_bytes()).decode("utf-8"),
+ "key_b64": base64.b64encode(plugin_key_path.read_bytes()).decode("utf-8"),
+ }
+ )
+ else:
+ if verbose:
+ print(f"[yellow]Warning: Plugin {plugin_name} certificates not found[/yellow]")
+
+ # Generate certificate secrets manifest
+ if "ca_cert_b64" in cert_data:
+ cert_secrets_manifest = cert_secrets_template.render(**cert_data)
+ (output_dir / "cert-secrets.yaml").write_text(cert_secrets_manifest)
+ if verbose:
+ print(" ✓ mTLS certificate secrets manifest generated")
+
+ # Generate infrastructure manifests (postgres, redis) if enabled
+ infrastructure = config.infrastructure
+
+ # PostgreSQL
+ if infrastructure and infrastructure.postgres and infrastructure.postgres.enabled:
+ postgres_config = infrastructure.postgres
+ postgres_template = env.get_template("postgres.yaml.j2")
+ postgres_manifest = postgres_template.render(
+ namespace=namespace,
+ image=postgres_config.image or "quay.io/sclorg/postgresql-15-c9s:latest",
+ database=postgres_config.database or "mcp",
+ user=postgres_config.user or "postgres",
+ password=postgres_config.password or "mysecretpassword",
+ storage_size=postgres_config.storage_size or "10Gi",
+ storage_class=postgres_config.storage_class,
+ )
+ (output_dir / "postgres-deployment.yaml").write_text(postgres_manifest)
+ if verbose:
+ print(" ✓ PostgreSQL deployment manifest generated")
+
+ # Redis
+ if infrastructure and infrastructure.redis and infrastructure.redis.enabled:
+ redis_config = infrastructure.redis
+ redis_template = env.get_template("redis.yaml.j2")
+ redis_manifest = redis_template.render(namespace=namespace, image=redis_config.image or "redis:latest")
+ (output_dir / "redis-deployment.yaml").write_text(redis_manifest)
+ if verbose:
+ print(" ✓ Redis deployment manifest generated")
+
+ # Generate plugins ConfigMap if plugins are configured
+ if config.plugins and len(config.plugins) > 0:
+ configmap_template = env.get_template("plugins-configmap.yaml.j2")
+ # Read the generated plugins-config.yaml file
+ plugins_config_path = output_dir / "plugins-config.yaml"
+ if plugins_config_path.exists():
+ plugins_config_content = plugins_config_path.read_text()
+ configmap_manifest = configmap_template.render(namespace=namespace, plugins_config=plugins_config_content)
+ (output_dir / "plugins-configmap.yaml").write_text(configmap_manifest)
+ if verbose:
+ print(" ✓ Plugins ConfigMap manifest generated")
+
+ # Generate gateway deployment
+ gateway_template = env.get_template("deployment.yaml.j2")
+ # Convert Pydantic model to dict for template rendering
+ gateway_dict = config.gateway.model_dump(exclude_none=True)
+ gateway_dict["name"] = "mcpgateway"
+ gateway_dict["namespace"] = namespace
+ gateway_dict["has_plugins"] = config.plugins and len(config.plugins) > 0
+
+ # Update image to use full registry path if registry is enabled
+ if config.gateway.registry and config.gateway.registry.enabled:
+ base_image_name = config.gateway.image.split(":")[0].split("/")[-1]
+ image_version = config.gateway.image.split(":")[-1] if ":" in config.gateway.image else "latest"
+ gateway_dict["image"] = f"{config.gateway.registry.url}/{config.gateway.registry.namespace}/{base_image_name}:{image_version}"
+ # Set imagePullPolicy from registry config
+ if config.gateway.registry.image_pull_policy:
+ gateway_dict["image_pull_policy"] = config.gateway.registry.image_pull_policy
+
+ # Add DATABASE_URL and REDIS_URL to gateway environment if infrastructure is enabled
+ if "env_vars" not in gateway_dict:
+ gateway_dict["env_vars"] = {}
+
+ # Enable plugins if any are configured
+ if config.plugins and len(config.plugins) > 0:
+ gateway_dict["env_vars"]["PLUGINS_ENABLED"] = "true"
+ gateway_dict["env_vars"]["PLUGIN_CONFIG_FILE"] = "/app/config/plugins.yaml"
+
+ # Add init containers to wait for infrastructure services
+ init_containers = []
+
+ if infrastructure and infrastructure.postgres and infrastructure.postgres.enabled:
+ postgres = infrastructure.postgres
+ db_user = postgres.user or "postgres"
+ db_password = postgres.password or "mysecretpassword"
+ db_name = postgres.database or "mcp"
+ gateway_dict["env_vars"]["DATABASE_URL"] = f"postgresql://{db_user}:{db_password}@postgres:5432/{db_name}"
+
+ # Add init container to wait for PostgreSQL
+ init_containers.append({"name": "wait-for-postgres", "image": "busybox:1.36", "command": ["sh", "-c", "until nc -z postgres 5432; do echo waiting for postgres; sleep 2; done"]})
+
+ if infrastructure and infrastructure.redis and infrastructure.redis.enabled:
+ gateway_dict["env_vars"]["REDIS_URL"] = "redis://redis:6379/0"
+
+ # Add init container to wait for Redis
+ init_containers.append({"name": "wait-for-redis", "image": "busybox:1.36", "command": ["sh", "-c", "until nc -z redis 6379; do echo waiting for redis; sleep 2; done"]})
+
+ # Add init containers to wait for plugins to be ready
+ if config.plugins and len(config.plugins) > 0:
+ for plugin in config.plugins:
+ plugin_service_name = f"mcp-plugin-{plugin.name.lower()}"
+ plugin_port = plugin.port or 8000
+ # Wait for plugin service to be available
+ init_containers.append(
+ {
+ "name": f"wait-for-{plugin.name.lower()}",
+ "image": "busybox:1.36",
+ "command": ["sh", "-c", f"until nc -z {plugin_service_name} {plugin_port}; do echo waiting for {plugin_service_name}; sleep 2; done"],
+ }
+ )
+
+ if init_containers:
+ gateway_dict["init_containers"] = init_containers
+
+ gateway_manifest = gateway_template.render(**gateway_dict)
+ (output_dir / "gateway-deployment.yaml").write_text(gateway_manifest)
+
+ # Generate OpenShift Route if configured
+ if config.deployment.openshift and config.deployment.openshift.create_routes:
+ route_template = env.get_template("route.yaml.j2")
+ openshift_config = config.deployment.openshift
+
+ # Auto-detect OpenShift apps domain if not specified
+ openshift_domain = openshift_config.domain
+ if not openshift_domain:
+ try:
+ # Try to get domain from OpenShift cluster info
+ result = subprocess.run(
+ ["kubectl", "get", "ingresses.config.openshift.io", "cluster", "-o", "jsonpath={.spec.domain}"], capture_output=True, text=True, check=False
+ ) # nosec B603, B607
+ if result.returncode == 0 and result.stdout.strip():
+ openshift_domain = result.stdout.strip()
+ if verbose:
+ console.print(f"[dim]Auto-detected OpenShift domain: {openshift_domain}[/dim]")
+ else:
+ # Fallback to common OpenShift Local domain
+ openshift_domain = "apps-crc.testing"
+ if verbose:
+ console.print(f"[yellow]Could not auto-detect OpenShift domain, using default: {openshift_domain}[/yellow]")
+ except Exception:
+ # Fallback to common OpenShift Local domain
+ openshift_domain = "apps-crc.testing"
+ if verbose:
+ console.print(f"[yellow]Could not auto-detect OpenShift domain, using default: {openshift_domain}[/yellow]")
+
+ route_manifest = route_template.render(namespace=namespace, openshift_domain=openshift_domain, tls_termination=openshift_config.tls_termination)
+ (output_dir / "gateway-route.yaml").write_text(route_manifest)
+ if verbose:
+ print(" ✓ OpenShift Route manifest generated")
+
+ # Generate plugin deployments
+ for plugin in config.plugins:
+ # Convert Pydantic model to dict for template rendering
+ plugin_dict = plugin.model_dump(exclude_none=True)
+ plugin_dict["name"] = f"mcp-plugin-{plugin.name.lower()}"
+ plugin_dict["namespace"] = namespace
+
+ # Update image to use full registry path if registry is enabled
+ if plugin.registry and plugin.registry.enabled:
+ base_image_name = plugin.image.split(":")[0].split("/")[-1]
+ image_version = plugin.image.split(":")[-1] if ":" in plugin.image else "latest"
+ plugin_dict["image"] = f"{plugin.registry.url}/{plugin.registry.namespace}/{base_image_name}:{image_version}"
+ # Set imagePullPolicy from registry config
+ if plugin.registry.image_pull_policy:
+ plugin_dict["image_pull_policy"] = plugin.registry.image_pull_policy
+
+ plugin_manifest = gateway_template.render(**plugin_dict)
+ (output_dir / f"plugin-{plugin.name.lower()}-deployment.yaml").write_text(plugin_manifest)
+
+ if verbose:
+ print(f"✓ Kubernetes manifests generated in {output_dir}")
+
+
+def generate_compose_manifests(config: MCPStackConfig, output_dir: Path, verbose: bool = False) -> None:
+ """Generate Docker Compose manifest from configuration.
+
+ Args:
+ config: Validated MCPStackConfig Pydantic model
+ output_dir: Output directory for manifests
+ verbose: Print verbose output
+
+ Raises:
+ FileNotFoundError: If template directory not found
+
+ Examples:
+ >>> from pathlib import Path
+ >>> import inspect
+ >>> # Test function signature
+ >>> sig = inspect.signature(generate_compose_manifests)
+ >>> list(sig.parameters.keys())
+ ['config', 'output_dir', 'verbose']
+
+ >>> # Test default parameters
+ >>> sig.parameters['verbose'].default
+ False
+
+ >>> # Actual execution requires templates and config:
+ >>> # from mcpgateway.tools.builder.schema import MCPStackConfig
+ >>> # generate_compose_manifests(config, Path("./output"))
+ """
+
+ # Load templates
+ template_dir = Path(__file__).parent / "templates" / "compose"
+ if not template_dir.exists():
+ raise FileNotFoundError(f"Template directory not found: {template_dir}")
+
+ # Auto-detect and assign env files if not specified
+ _auto_detect_env_files(config, output_dir, verbose=verbose)
+
+ # Auto-assign host_ports if expose_port is true but host_port not specified
+ next_host_port = 8000
+ for plugin in config.plugins:
+ # Port defaults are handled by Pydantic defaults in schema
+
+ # Auto-assign host_port if expose_port is true
+ if plugin.expose_port and not plugin.host_port:
+ plugin.host_port = next_host_port # type: ignore
+ next_host_port += 1
+
+ # Compute relative certificate paths (from output_dir to project root certs/)
+ # Certificates are at: ./certs/mcp/...
+ # Output dir is at: ./deploy/manifests/
+ # So relative path is: ../../certs/mcp/...
+ certs_base = Path.cwd() / "certs"
+ certs_rel_base = os.path.relpath(certs_base, output_dir)
+
+ # Add computed cert paths to context for template
+ cert_paths = {
+ "certs_base": certs_rel_base,
+ "gateway_cert_dir": os.path.join(certs_rel_base, "mcp/gateway"),
+ "ca_cert_file": os.path.join(certs_rel_base, "mcp/ca/ca.crt"),
+ "plugins_cert_base": os.path.join(certs_rel_base, "mcp/plugins"),
+ }
+
+ env = Environment(loader=FileSystemLoader(str(template_dir)), autoescape=True) # nosec B701
+
+ # Generate compose file
+ compose_template = env.get_template("docker-compose.yaml.j2")
+ # Convert Pydantic model to dict for template rendering
+ config_dict = config.model_dump(exclude_none=True)
+ compose_manifest = compose_template.render(**config_dict, cert_paths=cert_paths)
+ (output_dir / "docker-compose.yaml").write_text(compose_manifest)
+
+ if verbose:
+ print(f"✓ Compose manifest generated in {output_dir}")
+
+
+def _auto_detect_env_files(config: MCPStackConfig, output_dir: Path, verbose: bool = False) -> None:
+ """Auto-detect and assign env files if not explicitly specified.
+
+ If env_file is not specified in the config, check if {deploy_dir}/env/.env.{name}
+ exists and use it. Warn the user when auto-detection is used.
+
+ Args:
+ config: MCPStackConfig Pydantic model (modified in-place via attribute assignment)
+ output_dir: Output directory where manifests will be generated (for relative paths)
+ verbose: Print verbose output
+
+ Examples:
+ >>> from pathlib import Path
+ >>> from mcpgateway.tools.builder.schema import MCPStackConfig, DeploymentConfig, GatewayConfig
+ >>> import tempfile
+ >>> # Test function modifies config in place
+ >>> with tempfile.TemporaryDirectory() as tmpdir:
+ ... output = Path(tmpdir)
+ ... config = MCPStackConfig(
+ ... deployment=DeploymentConfig(type="compose"),
+ ... gateway=GatewayConfig(image="test:latest"),
+ ... plugins=[]
+ ... )
+ ... # Function modifies config if env files exist
+ ... _auto_detect_env_files(config, output, verbose=False)
+ ... # Config object is modified in place
+ ... isinstance(config, MCPStackConfig)
+ True
+
+ >>> # Test function signature
+ >>> import inspect
+ >>> sig = inspect.signature(_auto_detect_env_files)
+ >>> 'verbose' in sig.parameters
+ True
+ """
+ deploy_dir = get_deploy_dir()
+ env_dir = deploy_dir / "env"
+
+ # Check gateway - since we need to modify the model, we access env_file directly
+ # Note: Pydantic models allow attribute assignment after creation
+ if not hasattr(config.gateway, "env_file") or not config.gateway.env_file:
+ gateway_env = env_dir / ".env.gateway"
+ if gateway_env.exists():
+ # Make path relative to output_dir (where docker-compose.yaml will be)
+ relative_path = os.path.relpath(gateway_env, output_dir)
+ config.gateway.env_file = relative_path # type: ignore
+ print(f"⚠ Auto-detected env file: {gateway_env}")
+ if verbose:
+ print(" (Gateway env_file not specified in config)")
+
+ # Check plugins
+ for plugin in config.plugins:
+ plugin_name = plugin.name
+ if not hasattr(plugin, "env_file") or not plugin.env_file:
+ plugin_env = env_dir / f".env.{plugin_name}"
+ if plugin_env.exists():
+ # Make path relative to output_dir (where docker-compose.yaml will be)
+ relative_path = os.path.relpath(plugin_env, output_dir)
+ plugin.env_file = relative_path # type: ignore
+ print(f"⚠ Auto-detected env file: {plugin_env}")
+ if verbose:
+ print(f" (Plugin {plugin_name} env_file not specified in config)")
+
+
+def copy_env_template(plugin_name: str, plugin_build_dir: Path, verbose: bool = False) -> None:
+ """Copy .env.template from plugin repo to {deploy_dir}/env/ directory.
+
+ Uses MCP_DEPLOY_DIR environment variable if set, defaults to './deploy'.
+ This function is shared between Dagger and plain Python implementations.
+
+ Args:
+ plugin_name: Name of the plugin
+ plugin_build_dir: Path to plugin build directory (contains .env.template)
+ verbose: Print verbose output
+
+ Examples:
+ >>> from pathlib import Path
+ >>> import tempfile
+ >>> import os
+ >>> # Test with non-existent template (should return early)
+ >>> with tempfile.TemporaryDirectory() as tmpdir:
+ ... build_dir = Path(tmpdir)
+ ... # No .env.template exists, function returns early
+ ... copy_env_template("test-plugin", build_dir, verbose=False)
+
+ >>> # Test directory creation
+ >>> with tempfile.TemporaryDirectory() as tmpdir:
+ ... os.environ["MCP_DEPLOY_DIR"] = tmpdir
+ ... build_dir = Path(tmpdir) / "build"
+ ... build_dir.mkdir()
+ ... template = build_dir / ".env.template"
+ ... _ = template.write_text("TEST=value")
+ ... copy_env_template("test", build_dir, verbose=False)
+ ... env_file = Path(tmpdir) / "env" / ".env.test"
+ ... env_file.exists()
+ True
+
+ >>> # Cleanup
+ >>> _ = os.environ.pop("MCP_DEPLOY_DIR", None)
+ """
+ # Create {deploy_dir}/env directory if it doesn't exist
+ deploy_dir = get_deploy_dir()
+ env_dir = deploy_dir / "env"
+ env_dir.mkdir(parents=True, exist_ok=True)
+
+ # Look for .env.template in plugin build directory
+ template_file = plugin_build_dir / ".env.template"
+ if not template_file.exists():
+ if verbose:
+ print(f"No .env.template found in {plugin_name}")
+ return
+
+ # Target file path
+ target_file = env_dir / f".env.{plugin_name}"
+
+ # Only copy if target doesn't exist (don't overwrite user edits)
+ if target_file.exists():
+ if verbose:
+ print(f"⚠ {target_file} already exists, skipping")
+ return
+
+ # Copy template
+ shutil.copy2(template_file, target_file)
+ if verbose:
+ print(f"✓ Copied .env.template -> {target_file}")
+
+
+def handle_registry_operations(component, component_name: str, image_tag: str, container_runtime: str, verbose: bool = False) -> str:
+ """Handle registry tagging and pushing for a built component.
+
+ This function is shared between Dagger and plain Python implementations.
+ It tags the locally built image with the registry path and optionally pushes it.
+
+ Args:
+ component: BuildableConfig component (GatewayConfig or PluginConfig)
+ component_name: Name of the component (gateway or plugin name)
+ image_tag: Current local image tag
+ container_runtime: Container runtime to use ("docker" or "podman")
+ verbose: Print verbose output
+
+ Returns:
+ Final image tag (registry path if registry enabled, otherwise original tag)
+
+ Raises:
+ TypeError: If component is not a BuildableConfig instance
+ ValueError: If registry enabled but missing required configuration
+ subprocess.CalledProcessError: If tag or push command fails
+
+ Examples:
+ >>> from mcpgateway.tools.builder.schema import GatewayConfig, RegistryConfig
+ >>> # Test with registry disabled (returns original tag)
+ >>> gateway = GatewayConfig(image="test:latest")
+ >>> result = handle_registry_operations(gateway, "gateway", "test:latest", "docker")
+ >>> result
+ 'test:latest'
+
+ >>> # Test type checking - wrong type raises TypeError
+ >>> try:
+ ... handle_registry_operations("not a config", "test", "tag:latest", "docker")
+ ... except TypeError as e:
+ ... "BuildableConfig" in str(e)
+ True
+
+ >>> # Test validation error - registry enabled but missing config
+ >>> from mcpgateway.tools.builder.schema import GatewayConfig, RegistryConfig
+ >>> gateway_bad = GatewayConfig(
+ ... image="test:latest",
+ ... registry=RegistryConfig(enabled=True, url="docker.io") # missing namespace
+ ... )
+ >>> try:
+ ... handle_registry_operations(gateway_bad, "gateway", "test:latest", "docker")
+ ... except ValueError as e:
+ ... "missing" in str(e) and "namespace" in str(e)
+ True
+
+ >>> # Test validation error - missing URL
+ >>> gateway_bad2 = GatewayConfig(
+ ... image="test:latest",
+ ... registry=RegistryConfig(enabled=True, namespace="myns") # missing url
+ ... )
+ >>> try:
+ ... handle_registry_operations(gateway_bad2, "gateway", "test:latest", "docker")
+ ... except ValueError as e:
+ ... "missing" in str(e) and "url" in str(e)
+ True
+
+ >>> # Test function signature
+ >>> import inspect
+ >>> sig = inspect.signature(handle_registry_operations)
+ >>> list(sig.parameters.keys())
+ ['component', 'component_name', 'image_tag', 'container_runtime', 'verbose']
+
+ >>> # Test return type
+ >>> sig.return_annotation
+
+ """
+ # First-Party
+ from mcpgateway.tools.builder.schema import BuildableConfig
+
+ # Type check for better error messages
+ if not isinstance(component, BuildableConfig):
+ raise TypeError(f"Component must be a BuildableConfig instance, got {type(component)}")
+
+ # Check if registry is enabled
+ if not component.registry or not component.registry.enabled:
+ return image_tag
+
+ registry_config = component.registry
+
+ # Validate registry configuration
+ if not registry_config.url or not registry_config.namespace:
+ raise ValueError(f"Registry enabled for {component_name} but missing 'url' or 'namespace' configuration")
+
+ # Construct registry image path
+ # Format: {registry_url}/{namespace}/{image_name}:{tag}
+ base_image_name = image_tag.split(":")[0].split("/")[-1] # Extract base name (e.g., "mcpgateway-gateway")
+ image_version = image_tag.split(":")[-1] if ":" in image_tag else "latest" # Extract tag
+ registry_image = f"{registry_config.url}/{registry_config.namespace}/{base_image_name}:{image_version}"
+
+ # Tag image for registry
+ if verbose:
+ console.print(f"[dim]Tagging {image_tag} as {registry_image}[/dim]")
+ tag_cmd = [container_runtime, "tag", image_tag, registry_image]
+ result = subprocess.run(tag_cmd, capture_output=True, text=True, check=True) # nosec B603, B607
+ if result.stdout and verbose:
+ console.print(result.stdout)
+
+ # Push to registry if enabled
+ if registry_config.push:
+ if verbose:
+ console.print(f"[blue]Pushing {registry_image} to registry...[/blue]")
+
+ # Build push command with TLS options
+ push_cmd = [container_runtime, "push"]
+
+ # For podman, add --tls-verify=false for registries with self-signed certs
+ # This is common for OpenShift internal registries and local development
+ if container_runtime == "podman":
+ push_cmd.append("--tls-verify=false")
+
+ push_cmd.append(registry_image)
+
+ try:
+ result = subprocess.run(push_cmd, capture_output=True, text=True, check=True) # nosec B603, B607
+ if result.stdout and verbose:
+ console.print(result.stdout)
+ console.print(f"[green]✓ Pushed to registry: {registry_image}[/green]")
+ except subprocess.CalledProcessError as e:
+ console.print(f"[red]✗ Failed to push to registry: {e}[/red]")
+ if e.stderr:
+ console.print(f"[red]Error output: {e.stderr}[/red]")
+ console.print("[yellow]Tip: Authenticate to the registry first:[/yellow]")
+ console.print(f" {container_runtime} login {registry_config.url}")
+ raise
+
+ # Update component image reference to use registry path for manifests
+ component.image = registry_image
+
+ return registry_image
+
+
+# Docker Compose Utilities
+
+
+def get_docker_compose_command() -> List[str]:
+ """Detect and return available docker compose command.
+
+ Tries to detect docker compose plugin first, then falls back to
+ standalone docker-compose command.
+
+ Returns:
+ Command to use: ["docker", "compose"] or ["docker-compose"]
+
+ Raises:
+ RuntimeError: If neither command is available
+
+ Examples:
+ >>> # Test that function returns a list
+ >>> try:
+ ... cmd = get_docker_compose_command()
+ ... isinstance(cmd, list)
+ ... except RuntimeError:
+ ... # Docker compose not installed in test environment
+ ... True
+ True
+
+ >>> # Test that it returns valid command formats
+ >>> try:
+ ... cmd = get_docker_compose_command()
+ ... # Should be either ["docker", "compose"] or ["docker-compose"]
+ ... cmd in [["docker", "compose"], ["docker-compose"]]
+ ... except RuntimeError:
+ ... # Docker compose not installed
+ ... True
+ True
+
+ >>> # Test error case (requires mocking, shown for documentation)
+ >>> # from unittest.mock import patch
+ >>> # with patch('shutil.which', return_value=None):
+ >>> # try:
+ >>> # get_docker_compose_command()
+ >>> # except RuntimeError as e:
+ >>> # "Docker Compose not found" in str(e)
+ >>> # True
+ """
+ # Try docker compose (new plugin) first
+ if shutil.which("docker"):
+ try:
+ subprocess.run(["docker", "compose", "version"], capture_output=True, check=True) # nosec B603, B607
+ return ["docker", "compose"]
+ except (subprocess.CalledProcessError, FileNotFoundError):
+ pass
+
+ # Fall back to standalone docker-compose
+ if shutil.which("docker-compose"):
+ return ["docker-compose"]
+
+ raise RuntimeError("Docker Compose not found. Install docker compose plugin or docker-compose.")
+
+
+def run_compose(compose_file: Path, args: List[str], verbose: bool = False, check: bool = True) -> subprocess.CompletedProcess:
+ """Run docker compose command with given arguments.
+
+ Args:
+ compose_file: Path to docker-compose.yaml
+ args: Arguments to pass to compose (e.g., ["up", "-d"])
+ verbose: Print verbose output
+ check: Raise exception on non-zero exit code
+
+ Returns:
+ CompletedProcess instance
+
+ Raises:
+ FileNotFoundError: If compose_file doesn't exist
+ RuntimeError: If docker compose command fails (when check=True)
+
+ Examples:
+ >>> from pathlib import Path
+ >>> import tempfile
+ >>> # Test with non-existent file
+ >>> try:
+ ... run_compose(Path("/nonexistent/docker-compose.yaml"), ["ps"])
+ ... except FileNotFoundError as e:
+ ... "Compose file not found" in str(e)
+ True
+
+ >>> # Test that args are properly formatted
+ >>> args = ["up", "-d"]
+ >>> isinstance(args, list)
+ True
+ >>> all(isinstance(arg, str) for arg in args)
+ True
+
+ >>> # Real execution would require docker compose installed:
+ >>> # with tempfile.NamedTemporaryFile(suffix=".yaml") as f:
+ >>> # result = run_compose(Path(f.name), ["--version"], check=False)
+ >>> # isinstance(result, subprocess.CompletedProcess)
+ """
+ if not compose_file.exists():
+ raise FileNotFoundError(f"Compose file not found: {compose_file}")
+
+ compose_cmd = get_docker_compose_command()
+ full_cmd = compose_cmd + ["-f", str(compose_file)] + args
+
+ if verbose:
+ console.print(f"[dim]Running: {' '.join(full_cmd)}[/dim]")
+
+ try:
+ result = subprocess.run(full_cmd, capture_output=True, text=True, check=check) # nosec B603, B607
+ return result
+ except subprocess.CalledProcessError as e:
+ console.print("\n[red bold]Docker Compose command failed:[/red bold]")
+ if e.stdout:
+ console.print(f"[yellow]Output:[/yellow]\n{e.stdout}")
+ if e.stderr:
+ console.print(f"[red]Error:[/red]\n{e.stderr}")
+ raise RuntimeError(f"Docker Compose failed with exit code {e.returncode}") from e
+
+
+def deploy_compose(compose_file: Path, verbose: bool = False) -> None:
+ """Deploy using docker compose up -d.
+
+ Args:
+ compose_file: Path to docker-compose.yaml
+ verbose: Print verbose output
+
+ Raises:
+ RuntimeError: If deployment fails
+
+ Examples:
+ >>> from pathlib import Path
+ >>> # Test that function signature is correct
+ >>> import inspect
+ >>> sig = inspect.signature(deploy_compose)
+ >>> 'compose_file' in sig.parameters
+ True
+ >>> 'verbose' in sig.parameters
+ True
+
+ >>> # Test with non-existent file (would fail at run_compose)
+ >>> # deploy_compose(Path("/nonexistent.yaml")) # Raises FileNotFoundError
+ """
+ result = run_compose(compose_file, ["up", "-d"], verbose=verbose)
+ if result.stdout and verbose:
+ console.print(result.stdout)
+ console.print("[green]✓ Deployed with Docker Compose[/green]")
+
+
+def verify_compose(compose_file: Path, verbose: bool = False) -> str:
+ """Verify Docker Compose deployment with ps command.
+
+ Args:
+ compose_file: Path to docker-compose.yaml
+ verbose: Print verbose output
+
+ Returns:
+ Output from docker compose ps command
+
+ Examples:
+ >>> from pathlib import Path
+ >>> # Test return type
+ >>> import inspect
+ >>> sig = inspect.signature(verify_compose)
+ >>> sig.return_annotation
+
+
+ >>> # Test parameters
+ >>> list(sig.parameters.keys())
+ ['compose_file', 'verbose']
+
+ >>> # Actual execution requires docker compose:
+ >>> # output = verify_compose(Path("docker-compose.yaml"))
+ >>> # isinstance(output, str)
+ """
+ result = run_compose(compose_file, ["ps"], verbose=verbose, check=False)
+ return result.stdout
+
+
+def destroy_compose(compose_file: Path, verbose: bool = False) -> None:
+ """Destroy Docker Compose deployment with down -v.
+
+ Args:
+ compose_file: Path to docker-compose.yaml
+ verbose: Print verbose output
+
+ Raises:
+ RuntimeError: If destruction fails
+
+ Examples:
+ >>> from pathlib import Path
+ >>> # Test with non-existent file (graceful handling)
+ >>> destroy_compose(Path("/nonexistent/docker-compose.yaml"), verbose=False)
+ Compose file not found: /nonexistent/docker-compose.yaml
+ Nothing to destroy
+
+ >>> # Test function signature
+ >>> import inspect
+ >>> sig = inspect.signature(destroy_compose)
+ >>> 'verbose' in sig.parameters
+ True
+ """
+ if not compose_file.exists():
+ console.print(f"[yellow]Compose file not found: {compose_file}[/yellow]")
+ console.print("[yellow]Nothing to destroy[/yellow]")
+ return
+
+ result = run_compose(compose_file, ["down", "-v"], verbose=verbose)
+ if result.stdout and verbose:
+ console.print(result.stdout)
+ console.print("[green]✓ Destroyed Docker Compose deployment[/green]")
+
+
+# Kubernetes kubectl utilities
+
+
+def deploy_kubernetes(manifests_dir: Path, verbose: bool = False) -> None:
+ """Deploy to Kubernetes using kubectl.
+
+ Applies manifests in correct order:
+ 1. Deployments (creates namespaces)
+ 2. Certificate resources (secrets or cert-manager CRDs)
+ 3. ConfigMaps (plugins configuration)
+ 4. Infrastructure (PostgreSQL, Redis)
+ 5. OpenShift Routes (if configured)
+
+ Excludes plugins-config.yaml (not a Kubernetes resource).
+
+ Args:
+ manifests_dir: Path to directory containing Kubernetes manifests
+ verbose: Print verbose output
+
+ Raises:
+ RuntimeError: If kubectl not found or deployment fails
+
+ Examples:
+ >>> from pathlib import Path
+ >>> import shutil
+ >>> # Test that function checks for kubectl
+ >>> if not shutil.which("kubectl"):
+ ... # Would raise RuntimeError
+ ... print("kubectl not found")
+ ... else:
+ ... print("kubectl available")
+ kubectl...
+
+ >>> # Test function signature
+ >>> import inspect
+ >>> sig = inspect.signature(deploy_kubernetes)
+ >>> list(sig.parameters.keys())
+ ['manifests_dir', 'verbose']
+ """
+ if not shutil.which("kubectl"):
+ raise RuntimeError("kubectl not found. Cannot deploy to Kubernetes.")
+
+ # Get all manifest files, excluding plugins-config.yaml (not a Kubernetes resource)
+ all_manifests = sorted(manifests_dir.glob("*.yaml"))
+ all_manifests = [m for m in all_manifests if m.name != "plugins-config.yaml"]
+
+ # Identify different types of manifests
+ cert_secrets = manifests_dir / "cert-secrets.yaml"
+ cert_manager_certs = manifests_dir / "cert-manager-certificates.yaml"
+ postgres_deploy = manifests_dir / "postgres-deployment.yaml"
+ redis_deploy = manifests_dir / "redis-deployment.yaml"
+ plugins_configmap = manifests_dir / "plugins-configmap.yaml"
+
+ # 1. Apply all deployments first (creates namespaces)
+ deployment_files = [m for m in all_manifests if m.name.endswith("-deployment.yaml") and m not in [cert_secrets, postgres_deploy, redis_deploy]]
+
+ # Apply deployment files (this creates the namespace)
+ for manifest in deployment_files:
+ result = subprocess.run(["kubectl", "apply", "-f", str(manifest)], capture_output=True, text=True, check=False) # nosec B603, B607
+ if result.stdout and verbose:
+ console.print(result.stdout)
+ if result.returncode != 0:
+ raise RuntimeError(f"kubectl apply failed: {result.stderr}")
+
+ # 2. Apply certificate resources (now namespace exists)
+ # Check for both cert-secrets.yaml (local mode) and cert-manager-certificates.yaml (cert-manager mode)
+ if cert_manager_certs.exists():
+ result = subprocess.run(["kubectl", "apply", "-f", str(cert_manager_certs)], capture_output=True, text=True, check=False) # nosec B603, B607
+ if result.stdout and verbose:
+ console.print(result.stdout)
+ if result.returncode != 0:
+ raise RuntimeError(f"kubectl apply failed: {result.stderr}")
+ elif cert_secrets.exists():
+ result = subprocess.run(["kubectl", "apply", "-f", str(cert_secrets)], capture_output=True, text=True, check=False) # nosec B603, B607
+ if result.stdout and verbose:
+ console.print(result.stdout)
+ if result.returncode != 0:
+ raise RuntimeError(f"kubectl apply failed: {result.stderr}")
+
+ # 3. Apply ConfigMaps (needed by deployments)
+ if plugins_configmap.exists():
+ result = subprocess.run(["kubectl", "apply", "-f", str(plugins_configmap)], capture_output=True, text=True, check=False) # nosec B603, B607
+ if result.stdout and verbose:
+ console.print(result.stdout)
+ if result.returncode != 0:
+ raise RuntimeError(f"kubectl apply failed: {result.stderr}")
+
+ # 4. Apply infrastructure
+ for infra_file in [postgres_deploy, redis_deploy]:
+ if infra_file.exists():
+ result = subprocess.run(["kubectl", "apply", "-f", str(infra_file)], capture_output=True, text=True, check=False) # nosec B603, B607
+ if result.stdout and verbose:
+ console.print(result.stdout)
+ if result.returncode != 0:
+ raise RuntimeError(f"kubectl apply failed: {result.stderr}")
+
+ # 5. Apply OpenShift Routes (if configured)
+ gateway_route = manifests_dir / "gateway-route.yaml"
+ if gateway_route.exists():
+ result = subprocess.run(["kubectl", "apply", "-f", str(gateway_route)], capture_output=True, text=True, check=False) # nosec B603, B607
+ if result.stdout and verbose:
+ console.print(result.stdout)
+ if result.returncode != 0:
+ # Don't fail on Route errors (may not be on OpenShift)
+ if verbose:
+ console.print(f"[yellow]Warning: Could not apply Route (may not be on OpenShift): {result.stderr}[/yellow]")
+
+ console.print("[green]✓ Deployed to Kubernetes[/green]")
+
+
+def verify_kubernetes(namespace: str, wait: bool = False, timeout: int = 300, verbose: bool = False) -> str:
+ """Verify Kubernetes deployment health.
+
+ Args:
+ namespace: Kubernetes namespace to check
+ wait: Wait for pods to be ready
+ timeout: Wait timeout in seconds
+ verbose: Print verbose output
+
+ Returns:
+ String output from kubectl get pods
+
+ Raises:
+ RuntimeError: If kubectl not found or verification fails
+
+ Examples:
+ >>> # Test function signature and return type
+ >>> import inspect
+ >>> sig = inspect.signature(verify_kubernetes)
+ >>> sig.return_annotation
+
+
+ >>> # Test parameters
+ >>> params = list(sig.parameters.keys())
+ >>> 'namespace' in params and 'wait' in params and 'timeout' in params
+ True
+
+ >>> # Test default timeout value
+ >>> sig.parameters['timeout'].default
+ 300
+ """
+ if not shutil.which("kubectl"):
+ raise RuntimeError("kubectl not found. Cannot verify Kubernetes deployment.")
+
+ # Get pod status
+ result = subprocess.run(["kubectl", "get", "pods", "-n", namespace], capture_output=True, text=True, check=False) # nosec B603, B607
+ output = result.stdout if result.stdout else ""
+ if result.returncode != 0:
+ raise RuntimeError(f"kubectl get pods failed: {result.stderr}")
+
+ # Wait for pods if requested
+ if wait:
+ result = subprocess.run(["kubectl", "wait", "--for=condition=Ready", "pod", "--all", "-n", namespace, f"--timeout={timeout}s"], capture_output=True, text=True, check=False) # nosec B603, B607
+ if result.stdout and verbose:
+ console.print(result.stdout)
+ if result.returncode != 0:
+ raise RuntimeError(f"kubectl wait failed: {result.stderr}")
+
+ return output
+
+
+def destroy_kubernetes(manifests_dir: Path, verbose: bool = False) -> None:
+ """Destroy Kubernetes deployment.
+
+ Args:
+ manifests_dir: Path to directory containing Kubernetes manifests
+ verbose: Print verbose output
+
+ Raises:
+ RuntimeError: If kubectl not found or destruction fails
+
+ Examples:
+ >>> from pathlib import Path
+ >>> # Test with non-existent directory (graceful handling)
+ >>> import shutil
+ >>> if shutil.which("kubectl"):
+ ... destroy_kubernetes(Path("/nonexistent/manifests"), verbose=False)
+ ... else:
+ ... print("kubectl not available")
+ Manifests directory not found: /nonexistent/manifests
+ Nothing to destroy
+
+ >>> # Test function signature
+ >>> import inspect
+ >>> sig = inspect.signature(destroy_kubernetes)
+ >>> list(sig.parameters.keys())
+ ['manifests_dir', 'verbose']
+ """
+ if not shutil.which("kubectl"):
+ raise RuntimeError("kubectl not found. Cannot destroy Kubernetes deployment.")
+
+ if not manifests_dir.exists():
+ console.print(f"[yellow]Manifests directory not found: {manifests_dir}[/yellow]")
+ console.print("[yellow]Nothing to destroy[/yellow]")
+ return
+
+ # Delete all manifests except plugins-config.yaml
+ all_manifests = sorted(manifests_dir.glob("*.yaml"))
+ all_manifests = [m for m in all_manifests if m.name != "plugins-config.yaml"]
+
+ for manifest in all_manifests:
+ result = subprocess.run(["kubectl", "delete", "-f", str(manifest), "--ignore-not-found=true"], capture_output=True, text=True, check=False) # nosec B603, B607
+ if result.stdout and verbose:
+ console.print(result.stdout)
+ if result.returncode != 0 and "NotFound" not in result.stderr:
+ console.print(f"[yellow]Warning: {result.stderr}[/yellow]")
+
+ console.print("[green]✓ Destroyed Kubernetes deployment[/green]")
diff --git a/mcpgateway/tools/builder/dagger_deploy.py b/mcpgateway/tools/builder/dagger_deploy.py
new file mode 100644
index 000000000..81367625d
--- /dev/null
+++ b/mcpgateway/tools/builder/dagger_deploy.py
@@ -0,0 +1,557 @@
+# -*- coding: utf-8 -*-
+"""Location: ./mcpgateway/tools/builder/dagger_deploy.py
+Copyright 2025
+SPDX-License-Identifier: Apache-2.0
+Authors: Teryl Taylor
+
+Dagger-based MCP Stack Deployment Module
+
+This module provides optimized build and deployment using Dagger.
+
+Features:
+- Automatic caching and parallelization
+- Content-addressable storage
+- Efficient multi-stage builds
+- Built-in layer caching
+"""
+
+# Standard
+from pathlib import Path
+from typing import List, Optional
+
+try:
+ # Third-Party
+ import dagger
+ from dagger import dag
+
+ DAGGER_AVAILABLE = True
+except ImportError:
+ DAGGER_AVAILABLE = False
+ dagger = None # type: ignore
+ dag = None # type: ignore
+
+# Third-Party
+from rich.console import Console
+from rich.progress import Progress, SpinnerColumn, TextColumn
+
+# First-Party
+from mcpgateway.tools.builder.common import (
+ deploy_compose,
+ deploy_kubernetes,
+ destroy_compose,
+ destroy_kubernetes,
+ generate_compose_manifests,
+ generate_kubernetes_manifests,
+ generate_plugin_config,
+ get_deploy_dir,
+ handle_registry_operations,
+ load_config,
+ verify_compose,
+ verify_kubernetes,
+)
+from mcpgateway.tools.builder.common import copy_env_template as copy_template
+from mcpgateway.tools.builder.pipeline import CICDModule
+from mcpgateway.tools.builder.schema import BuildableConfig, MCPStackConfig
+
+console = Console()
+
+
+class MCPStackDagger(CICDModule):
+ """Dagger-based implementation of MCP Stack deployment."""
+
+ def __init__(self, verbose: bool = False):
+ """Initialize MCPStackDagger instance.
+
+ Args:
+ verbose: Enable verbose output
+
+ Raises:
+ ImportError: If dagger is not installed
+ """
+ if not DAGGER_AVAILABLE:
+ raise ImportError("Dagger is not installed. Install with: pip install dagger-io\n" "Alternatively, use the plain Python deployer with --deployer=python")
+ super().__init__(verbose)
+
+ async def build(self, config_file: str, plugins_only: bool = False, specific_plugins: Optional[List[str]] = None, no_cache: bool = False, copy_env_templates: bool = False) -> None:
+ """Build gateway and plugin containers using Dagger.
+
+ Args:
+ config_file: Path to mcp-stack.yaml
+ plugins_only: Only build plugins, skip gateway
+ specific_plugins: List of specific plugin names to build
+ no_cache: Disable Dagger cache
+ copy_env_templates: Copy .env.template files from cloned repos
+
+ Raises:
+ Exception: If build fails for any component
+ """
+ config = load_config(config_file)
+
+ async with dagger.connection(dagger.Config(workdir=str(Path.cwd()))):
+ # Build gateway (unless plugins_only=True)
+ if not plugins_only:
+ gateway = config.gateway
+ if gateway.repo:
+ with Progress(SpinnerColumn(), TextColumn("[progress.description]{task.description}"), console=self.console) as progress:
+ task = progress.add_task("Building gateway...", total=None)
+ try:
+ await self._build_component_with_dagger(gateway, "gateway", no_cache=no_cache)
+ progress.update(task, completed=1, description="[green]✓ Built gateway[/green]")
+ except Exception as e:
+ progress.update(task, completed=1, description="[red]✗ Failed gateway[/red]")
+ # Print full error after progress bar closes
+ self.console.print("\n[red bold]Gateway build failed:[/red bold]")
+ self.console.print(f"[red]{type(e).__name__}: {str(e)}[/red]")
+ if self.verbose:
+ # Standard
+ import traceback
+
+ self.console.print(f"[dim]{traceback.format_exc()}[/dim]")
+ raise
+ elif self.verbose:
+ self.console.print("[dim]Skipping gateway build (using pre-built image)[/dim]")
+
+ # Build plugins
+ plugins = config.plugins
+
+ if specific_plugins:
+ plugins = [p for p in plugins if p.name in specific_plugins]
+
+ if not plugins:
+ self.console.print("[yellow]No plugins to build[/yellow]")
+ return
+
+ with Progress(SpinnerColumn(), TextColumn("[progress.description]{task.description}"), console=self.console) as progress:
+
+ for plugin in plugins:
+ plugin_name = plugin.name
+
+ # Skip if pre-built image specified
+ if plugin.image and not plugin.repo:
+ task = progress.add_task(f"Skipping {plugin_name} (using pre-built image)", total=1)
+ progress.update(task, completed=1)
+ continue
+
+ task = progress.add_task(f"Building {plugin_name}...", total=None)
+
+ try:
+ await self._build_component_with_dagger(plugin, plugin_name, no_cache=no_cache, copy_env_templates=copy_env_templates)
+ progress.update(task, completed=1, description=f"[green]✓ Built {plugin_name}[/green]")
+ except Exception as e:
+ progress.update(task, completed=1, description=f"[red]✗ Failed {plugin_name}[/red]")
+ # Print full error after progress bar closes
+ self.console.print(f"\n[red bold]Plugin '{plugin_name}' build failed:[/red bold]")
+ self.console.print(f"[red]{type(e).__name__}: {str(e)}[/red]")
+ if self.verbose:
+ # Standard
+ import traceback
+
+ self.console.print(f"[dim]{traceback.format_exc()}[/dim]")
+ raise
+
+ async def generate_certificates(self, config_file: str) -> None:
+ """Generate mTLS certificates for plugins.
+
+ Supports two modes:
+ 1. Local generation (use_cert_manager=false): Uses Dagger to generate certificates locally
+ 2. cert-manager (use_cert_manager=true): Skips local generation, cert-manager will create certificates
+
+ Args:
+ config_file: Path to mcp-stack.yaml
+
+ Raises:
+ dagger.ExecError: If certificate generation command fails (when using local generation)
+ dagger.QueryError: If Dagger query fails (when using local generation)
+ """
+ config = load_config(config_file)
+
+ # Check if using cert-manager
+ cert_config = config.certificates
+ use_cert_manager = cert_config.use_cert_manager if cert_config else False
+ validity_days = cert_config.validity_days if cert_config else 825
+
+ if use_cert_manager:
+ # Skip local generation - cert-manager will handle certificate creation
+ if self.verbose:
+ self.console.print("[blue]Using cert-manager for certificate management[/blue]")
+ self.console.print("[dim]Skipping local certificate generation (cert-manager will create certificates)[/dim]")
+ return
+
+ # Local certificate generation (backward compatibility)
+ if self.verbose:
+ self.console.print("[blue]Generating mTLS certificates locally...[/blue]")
+
+ # Use Dagger container to run certificate generation
+ async with dagger.connection(dagger.Config(workdir=str(Path.cwd()))):
+ # Mount current directory
+ source = dag.host().directory(".")
+ try:
+ # Use Alpine with openssl
+ container = (
+ dag.container()
+ .from_("alpine:latest")
+ .with_exec(["apk", "add", "--no-cache", "openssl", "python3", "py3-pip", "make", "bash"])
+ .with_mounted_directory("/workspace", source)
+ .with_workdir("/workspace")
+ # .with_exec(["python3", "-m", "venv", ".venv"])
+ # .with_exec(["sh", "-c", "source .venv/bin/activate && pip install pyyaml"])
+ # .with_exec(["pip", "install", "pyyaml"])
+ )
+
+ # Generate CA
+ container = container.with_exec(["sh", "-c", f"make certs-mcp-ca MCP_CERT_DAYS={validity_days}"])
+
+ # Generate gateway cert
+ container = container.with_exec(["sh", "-c", f"make certs-mcp-gateway MCP_CERT_DAYS={validity_days}"])
+
+ # Generate plugin certificates
+ plugins = config.plugins
+ for plugin in plugins:
+ plugin_name = plugin.name
+ container = container.with_exec(["sh", "-c", f"make certs-mcp-plugin PLUGIN_NAME={plugin_name} MCP_CERT_DAYS={validity_days}"])
+
+ # Export certificates back to host
+ output = container.directory("/workspace/certs")
+ await output.export("./certs")
+ except dagger.ExecError as e:
+ self.console.print(f"Dagger Exec Error: {e.message}")
+ self.console.print(f"Exit Code: {e.exit_code}")
+ self.console.print(f"Stderr: {e.stderr}")
+ raise
+ except dagger.QueryError as e:
+ self.console.print(f"Dagger Query Error: {e.errors}")
+ self.console.print(f"Debug Query: {e.debug_query()}")
+ raise
+ except Exception as e:
+ self.console.print(f"An unexpected error occurred: {e}")
+ raise
+
+ if self.verbose:
+ self.console.print("[green]✓ Certificates generated locally[/green]")
+
+ async def deploy(self, config_file: str, dry_run: bool = False, skip_build: bool = False, skip_certs: bool = False, output_dir: Optional[str] = None) -> None:
+ """Deploy MCP stack.
+
+ Args:
+ config_file: Path to mcp-stack.yaml
+ dry_run: Generate manifests without deploying
+ skip_build: Skip building containers
+ skip_certs: Skip certificate generation
+ output_dir: Output directory for manifests (default: ./deploy)
+
+ Raises:
+ ValueError: If unsupported deployment type specified
+ dagger.ExecError: If deployment command fails
+ dagger.QueryError: If Dagger query fails
+ """
+ config = load_config(config_file)
+
+ # Build containers
+ if not skip_build:
+ await self.build(config_file)
+
+ # Generate certificates (only if mTLS is enabled)
+ gateway_mtls = config.gateway.mtls_enabled if config.gateway.mtls_enabled is not None else True
+ plugin_mtls = any((p.mtls_enabled if p.mtls_enabled is not None else True) for p in config.plugins)
+ mtls_needed = gateway_mtls or plugin_mtls
+
+ if not skip_certs and mtls_needed:
+ await self.generate_certificates(config_file)
+ elif not skip_certs and not mtls_needed:
+ if self.verbose:
+ self.console.print("[dim]Skipping certificate generation (mTLS disabled)[/dim]")
+
+ # Generate manifests
+ manifests_dir = self.generate_manifests(config_file, output_dir=output_dir)
+
+ if dry_run:
+ self.console.print(f"[yellow]Dry-run: Manifests generated in {manifests_dir}[/yellow]")
+ return
+
+ # Apply deployment
+ deployment_type = config.deployment.type
+
+ async with dagger.connection(dagger.Config(workdir=str(Path.cwd()))):
+ try:
+ if deployment_type == "kubernetes":
+ await self._deploy_kubernetes(manifests_dir)
+ elif deployment_type == "compose":
+ await self._deploy_compose(manifests_dir)
+ else:
+ raise ValueError(f"Unsupported deployment type: {deployment_type}")
+ except dagger.ExecError as e:
+ self.console.print(f"Dagger Exec Error: {e.message}")
+ self.console.print(f"Exit Code: {e.exit_code}")
+ self.console.print(f"Stderr: {e.stderr}")
+ raise
+ except dagger.QueryError as e:
+ self.console.print(f"Dagger Query Error: {e.errors}")
+ self.console.print(f"Debug Query: {e.debug_query()}")
+ raise
+ except Exception as e:
+ # Extract detailed error from Dagger exception
+ error_msg = str(e)
+ self.console.print("\n[red bold]Deployment failed:[/red bold]")
+ self.console.print(f"[red]{error_msg}[/red]")
+
+ # Check if it's a compose-specific error and try to provide more context
+ if "compose" in error_msg.lower() and self.verbose:
+ self.console.print("\n[yellow]Hint:[/yellow] Check the generated docker-compose.yaml:")
+ self.console.print(f"[dim] {manifests_dir}/docker-compose.yaml[/dim]")
+ self.console.print("[yellow]Try running manually:[/yellow]")
+ self.console.print(f"[dim] cd {manifests_dir} && docker compose up[/dim]")
+
+ raise
+
+ async def verify(self, config_file: str, wait: bool = False, timeout: int = 300) -> None:
+ """Verify deployment health.
+
+ Args:
+ config_file: Path to mcp-stack.yaml
+ wait: Wait for deployment to be ready
+ timeout: Wait timeout in seconds
+ """
+ config = load_config(config_file)
+ deployment_type = config.deployment.type
+
+ if self.verbose:
+ self.console.print("[blue]Verifying deployment...[/blue]")
+
+ async with dagger.connection(dagger.Config(workdir=str(Path.cwd()))):
+ if deployment_type == "kubernetes":
+ await self._verify_kubernetes(config, wait=wait, timeout=timeout)
+ elif deployment_type == "compose":
+ await self._verify_compose(config, wait=wait, timeout=timeout)
+
+ async def destroy(self, config_file: str) -> None:
+ """Destroy deployed MCP stack.
+
+ Args:
+ config_file: Path to mcp-stack.yaml
+ """
+ config = load_config(config_file)
+ deployment_type = config.deployment.type
+
+ if self.verbose:
+ self.console.print("[blue]Destroying deployment...[/blue]")
+
+ async with dagger.connection(dagger.Config(workdir=str(Path.cwd()))):
+ if deployment_type == "kubernetes":
+ await self._destroy_kubernetes(config)
+ elif deployment_type == "compose":
+ await self._destroy_compose(config)
+
+ def generate_manifests(self, config_file: str, output_dir: Optional[str] = None) -> Path:
+ """Generate deployment manifests.
+
+ Args:
+ config_file: Path to mcp-stack.yaml
+ output_dir: Output directory for manifests
+
+ Returns:
+ Path to generated manifests directory
+
+ Raises:
+ ValueError: If unsupported deployment type specified
+ """
+ config = load_config(config_file)
+ deployment_type = config.deployment.type
+
+ if output_dir is None:
+ deploy_dir = get_deploy_dir()
+ # Separate subdirectories for kubernetes and compose
+ manifests_path = deploy_dir / "manifests" / deployment_type
+ else:
+ manifests_path = Path(output_dir)
+
+ manifests_path.mkdir(parents=True, exist_ok=True)
+
+ # Store output dir for later use
+ self._last_output_dir = manifests_path
+
+ # Generate plugin config.yaml for gateway (shared function)
+ generate_plugin_config(config, manifests_path, verbose=self.verbose)
+
+ if deployment_type == "kubernetes":
+ generate_kubernetes_manifests(config, manifests_path, verbose=self.verbose)
+ elif deployment_type == "compose":
+ generate_compose_manifests(config, manifests_path, verbose=self.verbose)
+ else:
+ raise ValueError(f"Unsupported deployment type: {deployment_type}")
+
+ return manifests_path
+
+ # Private helper methods
+
+ async def _build_component_with_dagger(self, component: BuildableConfig, component_name: str, no_cache: bool = False, copy_env_templates: bool = False) -> None:
+ """Build a component (gateway or plugin) container using Dagger.
+
+ Args:
+ component: Component configuration (GatewayConfig or PluginConfig)
+ component_name: Name of the component (gateway or plugin name)
+ no_cache: Disable cache
+ copy_env_templates: Copy .env.template from repo if it exists
+
+ Raises:
+ ValueError: If component has no repo field
+ Exception: If build or export fails
+ """
+ repo = component.repo
+
+ if not repo:
+ raise ValueError(f"Component '{component_name}' has no 'repo' field")
+
+ # Clone repository to local directory for env template access
+ git_ref = component.ref or "main"
+ clone_dir = Path(f"./build/{component_name}")
+
+ # For Dagger, we still need local clone if copying env templates
+ if copy_env_templates:
+ # Standard
+ import subprocess # nosec B404
+
+ clone_dir.mkdir(parents=True, exist_ok=True)
+
+ if (clone_dir / ".git").exists():
+ subprocess.run(["git", "fetch", "origin", git_ref], cwd=clone_dir, check=True, capture_output=True) # nosec B603, B607
+ # Checkout what we just fetched (FETCH_HEAD)
+ subprocess.run(["git", "checkout", "FETCH_HEAD"], cwd=clone_dir, check=True, capture_output=True) # nosec B603, B607
+ else:
+ subprocess.run(["git", "clone", "--branch", git_ref, "--depth", "1", repo, str(clone_dir)], check=True, capture_output=True) # nosec B603, B607
+
+ # Determine build context
+ build_context = component.context or "."
+ build_dir = clone_dir / build_context
+
+ # Copy env template using shared function
+ copy_template(component_name, build_dir, verbose=self.verbose)
+
+ # Use Dagger for the actual build
+ source = dag.git(repo).branch(git_ref).tree()
+
+ # If component has context subdirectory, navigate to it
+ build_context = component.context or "."
+ if build_context != ".":
+ source = source.directory(build_context)
+
+ # Detect Containerfile/Dockerfile
+ containerfile = component.containerfile or "Containerfile"
+
+ # Build container - determine image tag
+ if component.image:
+ # Use explicitly specified image name
+ image_tag = component.image
+ else:
+ # Generate default image name based on component type
+ image_tag = f"mcpgateway-{component_name.lower()}:latest"
+
+ # Build with optional target stage for multi-stage builds
+ build_kwargs = {"dockerfile": containerfile}
+ if component.target:
+ build_kwargs["target"] = component.target
+
+ # Use docker_build on the directory
+ container = source.docker_build(**build_kwargs)
+
+ # Export image to Docker daemon (always export, Dagger handles caching)
+ # Workaround for dagger-io 0.19.0 bug: export_image returns None instead of Void
+ # The export actually works, but beartype complains about the return type
+ try:
+ await container.export_image(image_tag)
+ except Exception as e:
+ # Ignore beartype validation error - the export actually succeeds
+ if "BeartypeCallHintReturnViolation" not in str(type(e)):
+ raise
+
+ # Handle registry operations (tag and push if enabled)
+ # Note: Dagger exports to local docker/podman, so we need to detect which runtime to use
+ # Standard
+ import shutil
+
+ container_runtime = "docker" if shutil.which("docker") else "podman"
+ image_tag = handle_registry_operations(component, component_name, image_tag, container_runtime, verbose=self.verbose)
+
+ if self.verbose:
+ self.console.print(f"[green]✓ Built {component_name} -> {image_tag}[/green]")
+
+ async def _deploy_kubernetes(self, manifests_dir: Path) -> None:
+ """Deploy to Kubernetes using kubectl.
+
+ Uses shared deploy_kubernetes() from common.py to avoid code duplication.
+
+ Args:
+ manifests_dir: Path to directory containing Kubernetes manifests
+ """
+ deploy_kubernetes(manifests_dir, verbose=self.verbose)
+
+ async def _deploy_compose(self, manifests_dir: Path) -> None:
+ """Deploy using Docker Compose.
+
+ Uses shared deploy_compose() from common.py to avoid code duplication.
+
+ Args:
+ manifests_dir: Path to directory containing compose manifest
+ """
+ compose_file = manifests_dir / "docker-compose.yaml"
+ deploy_compose(compose_file, verbose=self.verbose)
+
+ async def _verify_kubernetes(self, config: MCPStackConfig, wait: bool = False, timeout: int = 300) -> None:
+ """Verify Kubernetes deployment health.
+
+ Uses shared verify_kubernetes() from common.py to avoid code duplication.
+
+ Args:
+ config: Parsed configuration Pydantic model
+ wait: Wait for pods to be ready
+ timeout: Wait timeout in seconds
+ """
+ namespace = config.deployment.namespace or "mcp-gateway"
+ output = verify_kubernetes(namespace, wait=wait, timeout=timeout, verbose=self.verbose)
+ self.console.print(output)
+
+ async def _verify_compose(self, config: MCPStackConfig, wait: bool = False, timeout: int = 300) -> None:
+ """Verify Docker Compose deployment health.
+
+ Uses shared verify_compose() from common.py to avoid code duplication.
+
+ Args:
+ config: Parsed configuration Pydantic model
+ wait: Wait for containers to be ready
+ timeout: Wait timeout in seconds
+ """
+ _ = config, wait, timeout # Reserved for future use
+ # Use the same manifests directory as generate_manifests
+ deploy_dir = get_deploy_dir()
+ output_dir = getattr(self, "_last_output_dir", deploy_dir / "manifests" / "compose")
+ compose_file = output_dir / "docker-compose.yaml"
+ output = verify_compose(compose_file, verbose=self.verbose)
+ self.console.print(output)
+
+ async def _destroy_kubernetes(self, config: MCPStackConfig) -> None:
+ """Destroy Kubernetes deployment.
+
+ Uses shared destroy_kubernetes() from common.py to avoid code duplication.
+
+ Args:
+ config: Parsed configuration Pydantic model
+ """
+ _ = config # Reserved for future use (namespace, labels, etc.)
+ # Use the same manifests directory as generate_manifests
+ deploy_dir = get_deploy_dir()
+ manifests_dir = getattr(self, "_last_output_dir", deploy_dir / "manifests" / "kubernetes")
+ destroy_kubernetes(manifests_dir, verbose=self.verbose)
+
+ async def _destroy_compose(self, config: MCPStackConfig) -> None:
+ """Destroy Docker Compose deployment.
+
+ Uses shared destroy_compose() from common.py to avoid code duplication.
+
+ Args:
+ config: Parsed configuration Pydantic model
+ """
+ _ = config # Reserved for future use (project name, networks, etc.)
+ # Use the same manifests directory as generate_manifests
+ deploy_dir = get_deploy_dir()
+ output_dir = getattr(self, "_last_output_dir", deploy_dir / "manifests" / "compose")
+ compose_file = output_dir / "docker-compose.yaml"
+ destroy_compose(compose_file, verbose=self.verbose)
diff --git a/mcpgateway/tools/builder/factory.py b/mcpgateway/tools/builder/factory.py
new file mode 100644
index 000000000..1353bd733
--- /dev/null
+++ b/mcpgateway/tools/builder/factory.py
@@ -0,0 +1,146 @@
+# -*- coding: utf-8 -*-
+"""Location: ./mcpgateway/tools/builder/factory.py
+Copyright 2025
+SPDX-License-Identifier: Apache-2.0
+Authors: Teryl Taylor
+
+Factory for creating MCP Stack deployment implementations.
+
+This module provides a factory pattern for creating the appropriate deployment
+implementation (Dagger or Plain Python) based on availability and user preference.
+
+The factory handles graceful fallback from Dagger to Python if dependencies are
+unavailable, ensuring the deployment system works in various environments.
+
+Example:
+ >>> deployer, mode = DeployFactory.create_deployer("dagger", verbose=False)
+ ⚠ Dagger not installed. Using plain python.
+ >>> # Validate configuration (output varies by config)
+ >>> # deployer.validate("mcp-stack.yaml")
+"""
+
+# Standard
+from enum import Enum
+
+# Third-Party
+from rich.console import Console
+
+# First-Party
+from mcpgateway.tools.builder.pipeline import CICDModule
+
+
+class CICDTypes(str, Enum):
+ """Deployment implementation types.
+
+ Attributes:
+ DAGGER: Dagger-based implementation (optimal performance)
+ PYTHON: Plain Python implementation (fallback, no dependencies)
+
+ Examples:
+ >>> # Test enum values
+ >>> CICDTypes.DAGGER.value
+ 'dagger'
+ >>> CICDTypes.PYTHON.value
+ 'python'
+
+ >>> # Test enum comparison
+ >>> CICDTypes.DAGGER == "dagger"
+ True
+ >>> CICDTypes.PYTHON == "python"
+ True
+
+ >>> # Test enum membership
+ >>> "dagger" in [t.value for t in CICDTypes]
+ True
+ >>> "python" in [t.value for t in CICDTypes]
+ True
+
+ >>> # Test enum iteration
+ >>> types = list(CICDTypes)
+ >>> len(types)
+ 2
+ >>> CICDTypes.DAGGER in types
+ True
+ """
+
+ DAGGER = "dagger"
+ PYTHON = "python"
+
+
+console = Console()
+
+
+class DeployFactory:
+ """Factory for creating MCP Stack deployment implementations.
+
+ This factory implements the Strategy pattern, allowing dynamic selection
+ between Dagger and Python implementations based on availability.
+ """
+
+ @staticmethod
+ def create_deployer(deployer: str, verbose: bool = False) -> tuple[CICDModule, CICDTypes]:
+ """Create a deployment implementation instance.
+
+ Attempts to load the requested deployer type with automatic fallback
+ to Python implementation if dependencies are missing.
+
+ Args:
+ deployer: Deployment type to create ("dagger" or "python")
+ verbose: Enable verbose logging during creation
+
+ Returns:
+ tuple: (deployer_instance, actual_type)
+ - deployer_instance: Instance of MCPStackDagger or MCPStackPython
+ - actual_type: CICDTypes enum indicating which implementation was loaded
+
+ Raises:
+ RuntimeError: If no implementation can be loaded (critical failure)
+
+ Example:
+ >>> # Try to load Dagger, fall back to Python if unavailable
+ >>> deployer, mode = DeployFactory.create_deployer("dagger", verbose=False)
+ ⚠ Dagger not installed. Using plain python.
+ >>> if mode == CICDTypes.DAGGER:
+ ... print("Using optimized Dagger implementation")
+ ... else:
+ ... print("Using fallback Python implementation")
+ Using fallback Python implementation
+ """
+ # Attempt to load Dagger implementation first if requested
+ if deployer == "dagger":
+ try:
+ # First-Party
+ from mcpgateway.tools.builder.dagger_deploy import DAGGER_AVAILABLE, MCPStackDagger
+
+ # Check if dagger is actually available (not just the module)
+ if not DAGGER_AVAILABLE:
+ raise ImportError("Dagger SDK not installed")
+
+ if verbose:
+ console.print("[green]✓ Dagger module loaded[/green]")
+
+ return (MCPStackDagger(verbose), CICDTypes.DAGGER)
+
+ except ImportError:
+ # Dagger dependencies not available, fall back to Python
+ console.print("[yellow]⚠ Dagger not installed. Using plain python.[/yellow]")
+
+ # Load plain Python implementation (fallback or explicitly requested)
+ try:
+ # First-Party
+ from mcpgateway.tools.builder.python_deploy import MCPStackPython
+
+ if verbose and deployer != "dagger":
+ console.print("[blue]Using plain Python implementation[/blue]")
+
+ return (MCPStackPython(verbose), CICDTypes.PYTHON)
+
+ except ImportError as e:
+ # Critical failure - neither implementation can be loaded
+ console.print("[red]✗ ERROR: Cannot import deployment modules[/red]")
+ console.print(f"[red] Details: {e}[/red]")
+ console.print("[yellow] Make sure you're running from the project root[/yellow]")
+ console.print("[yellow] and PYTHONPATH is set correctly[/yellow]")
+
+ # This should never be reached if PYTHONPATH is set correctly
+ raise RuntimeError(f"Unable to load deployer of type '{deployer}'. ")
diff --git a/mcpgateway/tools/builder/pipeline.py b/mcpgateway/tools/builder/pipeline.py
new file mode 100644
index 000000000..e7fcd098c
--- /dev/null
+++ b/mcpgateway/tools/builder/pipeline.py
@@ -0,0 +1,367 @@
+# -*- coding: utf-8 -*-
+"""Location: ./mcpgateway/tools/builder/pipeline.py
+Copyright 2025
+SPDX-License-Identifier: Apache-2.0
+Authors: Teryl Taylor
+
+Abstract base class for MCP Stack deployment implementations.
+
+This module defines the CICDModule interface that all deployment implementations
+must implement. It provides a common API for building, deploying, and managing
+MCP Gateway stacks with external plugin servers.
+
+The base class implements shared functionality (validation) while requiring
+subclasses to implement deployment-specific logic (build, deploy, etc.).
+
+Design Pattern:
+ Strategy Pattern - Different implementations (Dagger vs Python) can be
+ swapped transparently via the DeployFactory.
+
+Example:
+ >>> from mcpgateway.tools.builder.factory import DeployFactory
+ >>> deployer, mode = DeployFactory.create_deployer("dagger", verbose=False)
+ ⚠ Dagger not installed. Using plain python.
+ >>> # Validate configuration (output varies by config)
+ >>> # deployer.validate("mcp-stack.yaml")
+ >>> # Async methods must be called with await (see method examples below)
+"""
+
+# Standard
+from abc import ABC, abstractmethod
+from pathlib import Path
+from typing import Optional
+
+# Third-Party
+from pydantic import ValidationError
+from rich.console import Console
+import yaml
+
+# First-Party
+from mcpgateway.tools.builder.schema import MCPStackConfig
+
+# Shared console instance for consistent output formatting
+console = Console()
+
+
+class CICDModule(ABC):
+ """Abstract base class for MCP Stack deployment implementations.
+
+ This class defines the interface that all deployment implementations must
+ implement. It provides common initialization and validation logic while
+ deferring implementation-specific details to subclasses.
+
+ Attributes:
+ verbose (bool): Enable verbose output during operations
+ console (Console): Rich console for formatted output
+
+ Implementations:
+ - MCPStackDagger: High-performance implementation using Dagger SDK
+ - MCPStackPython: Fallback implementation using plain Python + Docker/Podman
+
+ Examples:
+ >>> # Test that CICDModule is abstract
+ >>> from abc import ABC
+ >>> issubclass(CICDModule, ABC)
+ True
+
+ >>> # Test initialization with defaults
+ >>> class TestDeployer(CICDModule):
+ ... async def build(self, config_file: str, **kwargs) -> None:
+ ... pass
+ ... async def generate_certificates(self, config_file: str) -> None:
+ ... pass
+ ... async def deploy(self, config_file: str, **kwargs) -> None:
+ ... pass
+ ... async def verify(self, config_file: str, **kwargs) -> None:
+ ... pass
+ ... async def destroy(self, config_file: str) -> None:
+ ... pass
+ ... def generate_manifests(self, config_file: str, **kwargs) -> Path:
+ ... return Path(".")
+ >>> deployer = TestDeployer()
+ >>> deployer.verbose
+ False
+
+ >>> # Test initialization with verbose=True
+ >>> verbose_deployer = TestDeployer(verbose=True)
+ >>> verbose_deployer.verbose
+ True
+
+ >>> # Test that console is available
+ >>> hasattr(deployer, 'console')
+ True
+ """
+
+ def __init__(self, verbose: bool = False):
+ """Initialize the deployment module.
+
+ Args:
+ verbose: Enable verbose output during all operations
+
+ Examples:
+ >>> # Cannot instantiate abstract class directly
+ >>> try:
+ ... CICDModule()
+ ... except TypeError as e:
+ ... "abstract" in str(e).lower()
+ True
+ """
+ self.verbose = verbose
+ self.console = console
+
+ def validate(self, config_file: str) -> None:
+ """Validate mcp-stack.yaml configuration using Pydantic schemas.
+
+ This method provides comprehensive validation of the MCP stack configuration
+ using Pydantic models defined in schema.py. It validates:
+ - Required sections (deployment, gateway, plugins)
+ - Deployment type (kubernetes or compose)
+ - Gateway image specification
+ - Plugin configurations (name, repo/image, etc.)
+ - Custom business rules (unique names, valid combinations)
+
+ Args:
+ config_file: Path to mcp-stack.yaml configuration file
+
+ Raises:
+ ValueError: If configuration is invalid, with formatted error details
+ ValidationError: If Pydantic schema validation fails
+ FileNotFoundError: If config_file does not exist
+
+ Examples:
+ >>> import tempfile
+ >>> import yaml
+ >>> from pathlib import Path
+ >>> # Create a test deployer
+ >>> class TestDeployer(CICDModule):
+ ... async def build(self, config_file: str, **kwargs) -> None:
+ ... pass
+ ... async def generate_certificates(self, config_file: str) -> None:
+ ... pass
+ ... async def deploy(self, config_file: str, **kwargs) -> None:
+ ... pass
+ ... async def verify(self, config_file: str, **kwargs) -> None:
+ ... pass
+ ... async def destroy(self, config_file: str) -> None:
+ ... pass
+ ... def generate_manifests(self, config_file: str, **kwargs) -> Path:
+ ... return Path(".")
+ >>> deployer = TestDeployer(verbose=False)
+
+ >>> # Test with valid minimal config
+ >>> with tempfile.NamedTemporaryFile(mode='w', suffix='.yaml', delete=False) as f:
+ ... config = {
+ ... 'deployment': {'type': 'compose'},
+ ... 'gateway': {'image': 'test:latest'},
+ ... 'plugins': []
+ ... }
+ ... yaml.dump(config, f)
+ ... config_path = f.name
+ >>> deployer.validate(config_path)
+ >>> import os
+ >>> os.unlink(config_path)
+
+ >>> # Test with missing file
+ >>> try:
+ ... deployer.validate("/nonexistent/config.yaml")
+ ... except FileNotFoundError as e:
+ ... "config.yaml" in str(e)
+ True
+
+ >>> # Test with invalid config (missing required fields)
+ >>> with tempfile.NamedTemporaryFile(mode='w', suffix='.yaml', delete=False) as f:
+ ... bad_config = {'deployment': {'type': 'compose'}}
+ ... yaml.dump(bad_config, f)
+ ... bad_path = f.name
+ >>> try:
+ ... deployer.validate(bad_path)
+ ... except ValueError as e:
+ ... "validation failed" in str(e).lower()
+ True
+ >>> os.unlink(bad_path)
+ """
+ if self.verbose:
+ self.console.print(f"[blue]Validating {config_file}...[/blue]")
+
+ # Load YAML configuration
+ with open(config_file, "r") as f:
+ config_dict = yaml.safe_load(f)
+
+ # Validate using Pydantic schema
+ try:
+ # Local
+
+ MCPStackConfig(**config_dict)
+ except ValidationError as e:
+ # Format validation errors for better readability
+ error_msg = "Configuration validation failed:\n"
+ for error in e.errors():
+ # Join the error location path (e.g., plugins -> 0 -> name)
+ loc = " -> ".join(str(x) for x in error["loc"])
+ error_msg += f" • {loc}: {error['msg']}\n"
+ raise ValueError(error_msg) from e
+
+ if self.verbose:
+ self.console.print("[green]✓ Configuration valid[/green]")
+
+ @abstractmethod
+ async def build(self, config_file: str, plugins_only: bool = False, specific_plugins: Optional[list[str]] = None, no_cache: bool = False, copy_env_templates: bool = False) -> None:
+ """Build container images for plugins and/or gateway.
+
+ Subclasses must implement this to build Docker/Podman images from
+ Git repositories or use pre-built images.
+
+ Args:
+ config_file: Path to mcp-stack.yaml
+ plugins_only: Only build plugins, skip gateway
+ specific_plugins: List of specific plugin names to build (optional)
+ no_cache: Disable build cache for fresh builds
+ copy_env_templates: Copy .env.template files from cloned repos
+
+ Raises:
+ RuntimeError: If build fails
+ ValueError: If plugin configuration is invalid
+
+ Example:
+ # await deployer.build("mcp-stack.yaml", plugins_only=True)
+ # ✓ Built OPAPluginFilter
+ # ✓ Built LLMGuardPlugin
+ """
+
+ @abstractmethod
+ async def generate_certificates(self, config_file: str) -> None:
+ """Generate mTLS certificates for gateway and plugins.
+
+ Creates a certificate authority (CA) and issues certificates for:
+ - Gateway (client certificates for connecting to plugins)
+ - Each plugin (server certificates for accepting connections)
+
+ Certificates are stored in the paths defined in the config's
+ certificates section (default: ./certs/mcp/).
+
+ Args:
+ config_file: Path to mcp-stack.yaml
+
+ Raises:
+ RuntimeError: If certificate generation fails
+ FileNotFoundError: If required tools (openssl) are not available
+
+ Example:
+ # await deployer.generate_certificates("mcp-stack.yaml")
+ # ✓ Certificates generated
+ """
+
+ @abstractmethod
+ async def deploy(self, config_file: str, dry_run: bool = False, skip_build: bool = False, skip_certs: bool = False) -> None:
+ """Deploy the MCP stack to Kubernetes or Docker Compose.
+
+ This is the main deployment method that orchestrates:
+ 1. Building containers (unless skip_build=True)
+ 2. Generating mTLS certificates (unless skip_certs=True or mTLS disabled)
+ 3. Generating manifests (Kubernetes YAML or docker-compose.yaml)
+ 4. Applying the deployment (unless dry_run=True)
+
+ Args:
+ config_file: Path to mcp-stack.yaml
+ dry_run: Generate manifests without actually deploying
+ skip_build: Skip building containers (use existing images)
+ skip_certs: Skip certificate generation (use existing certs)
+
+ Raises:
+ RuntimeError: If deployment fails at any stage
+ ValueError: If configuration is invalid
+
+ Example:
+ # Full deployment
+ # await deployer.deploy("mcp-stack.yaml")
+ # ✓ Build complete
+ # ✓ Certificates generated
+ # ✓ Deployment complete
+
+ # Dry run (generate manifests only)
+ # await deployer.deploy("mcp-stack.yaml", dry_run=True)
+ # ✓ Dry-run complete (no changes made)
+ """
+
+ @abstractmethod
+ async def verify(self, config_file: str, wait: bool = False, timeout: int = 300) -> None:
+ """Verify deployment health and readiness.
+
+ Checks that all deployed services are healthy and ready:
+ - Kubernetes: Checks pod status, optionally waits for Ready
+ - Docker Compose: Checks container status
+
+ Args:
+ config_file: Path to mcp-stack.yaml
+ wait: Wait for deployment to become ready
+ timeout: Maximum time to wait in seconds (default: 300)
+
+ Raises:
+ RuntimeError: If verification fails or timeout is reached
+ TimeoutError: If wait=True and deployment doesn't become ready
+
+ Example:
+ # Quick health check
+ # await deployer.verify("mcp-stack.yaml")
+ # NAME READY STATUS RESTARTS AGE
+ # mcpgateway-xxx 1/1 Running 0 2m
+ # mcp-plugin-opa-xxx 1/1 Running 0 2m
+
+ # Wait for ready state
+ # await deployer.verify("mcp-stack.yaml", wait=True, timeout=600)
+ # ✓ Deployment healthy
+ """
+
+ @abstractmethod
+ async def destroy(self, config_file: str) -> None:
+ """Destroy the deployed MCP stack.
+
+ Removes all deployed resources:
+ - Kubernetes: Deletes all resources in the namespace
+ - Docker Compose: Stops and removes containers, networks, volumes
+
+ WARNING: This is destructive and cannot be undone!
+
+ Args:
+ config_file: Path to mcp-stack.yaml
+
+ Raises:
+ RuntimeError: If destruction fails
+
+ Example:
+ # await deployer.destroy("mcp-stack.yaml")
+ # ✓ Deployment destroyed
+ """
+
+ @abstractmethod
+ def generate_manifests(self, config_file: str, output_dir: Optional[str] = None) -> Path:
+ """Generate deployment manifests (Kubernetes YAML or docker-compose.yaml).
+
+ Creates deployment manifests based on configuration:
+ - Kubernetes: Generates Deployment, Service, ConfigMap, Secret YAML files
+ - Docker Compose: Generates docker-compose.yaml with all services
+
+ Also generates:
+ - plugins-config.yaml: Plugin manager configuration for gateway
+ - Environment files: .env files for each service
+
+ Args:
+ config_file: Path to mcp-stack.yaml
+ output_dir: Output directory for manifests (default: ./deploy/manifests)
+
+ Returns:
+ Path: Directory containing generated manifests
+
+ Raises:
+ ValueError: If configuration is invalid
+ OSError: If output directory cannot be created
+
+ Example:
+ # manifests_path = deployer.generate_manifests("mcp-stack.yaml")
+ # print(f"Manifests generated in: {manifests_path}")
+ # Manifests generated in: /path/to/deploy/manifests
+
+ # Custom output directory
+ # deployer.generate_manifests("mcp-stack.yaml", output_dir="./my-manifests")
+ # ✓ Manifests generated: ./my-manifests
+ """
diff --git a/mcpgateway/tools/builder/python_deploy.py b/mcpgateway/tools/builder/python_deploy.py
new file mode 100644
index 000000000..a07dc938d
--- /dev/null
+++ b/mcpgateway/tools/builder/python_deploy.py
@@ -0,0 +1,603 @@
+# -*- coding: utf-8 -*-
+"""Location: ./mcpgateway/tools/builder/python_deploy.py
+Copyright 2025
+SPDX-License-Identifier: Apache-2.0
+Authors: Teryl Taylor
+
+Plain Python MCP Stack Deployment Module
+
+This module provides deployment functionality using only standard Python
+and system commands (docker/podman, kubectl, docker-compose).
+
+This is the fallback implementation when Dagger is not available.
+"""
+
+# Standard
+from pathlib import Path
+import shutil
+import subprocess # nosec B404
+from typing import List, Optional
+
+# Third-Party
+from rich.console import Console
+from rich.progress import Progress, SpinnerColumn, TextColumn
+
+# First-Party
+from mcpgateway.tools.builder.common import (
+ deploy_compose,
+ deploy_kubernetes,
+ destroy_compose,
+ destroy_kubernetes,
+ generate_compose_manifests,
+ generate_kubernetes_manifests,
+ generate_plugin_config,
+ get_deploy_dir,
+ handle_registry_operations,
+ load_config,
+ verify_compose,
+ verify_kubernetes,
+)
+from mcpgateway.tools.builder.common import copy_env_template as copy_template
+from mcpgateway.tools.builder.pipeline import CICDModule
+from mcpgateway.tools.builder.schema import BuildableConfig, MCPStackConfig
+
+console = Console()
+
+
+class MCPStackPython(CICDModule):
+ """Plain Python implementation of MCP Stack deployment.
+
+ This implementation uses standard Python and system commands (docker/podman,
+ kubectl, docker-compose) without requiring additional dependencies like Dagger.
+
+ Examples:
+ >>> # Test class instantiation
+ >>> deployer = MCPStackPython(verbose=False)
+ >>> deployer.verbose
+ False
+
+ >>> # Test with verbose mode
+ >>> deployer_verbose = MCPStackPython(verbose=True)
+ >>> deployer_verbose.verbose
+ True
+
+ >>> # Test that console is available
+ >>> hasattr(deployer, 'console')
+ True
+
+ >>> # Test that it's a CICDModule subclass
+ >>> from mcpgateway.tools.builder.pipeline import CICDModule
+ >>> isinstance(deployer, CICDModule)
+ True
+ """
+
+ async def build(self, config_file: str, plugins_only: bool = False, specific_plugins: Optional[List[str]] = None, no_cache: bool = False, copy_env_templates: bool = False) -> None:
+ """Build gateway and plugin containers using docker/podman.
+
+ Args:
+ config_file: Path to mcp-stack.yaml
+ plugins_only: Only build plugins, skip gateway
+ specific_plugins: List of specific plugin names to build
+ no_cache: Disable build cache
+ copy_env_templates: Copy .env.template files from cloned repos
+
+ Raises:
+ Exception: If build fails for any component
+ """
+ config = load_config(config_file)
+
+ # Build gateway (unless plugins_only=True)
+ if not plugins_only:
+ gateway = config.gateway
+ if gateway.repo:
+ with Progress(SpinnerColumn(), TextColumn("[progress.description]{task.description}"), console=self.console) as progress:
+ task = progress.add_task("Building gateway...", total=None)
+ try:
+ self._build_component(gateway, config, "gateway", no_cache=no_cache)
+ progress.update(task, completed=1, description="[green]✓ Built gateway[/green]")
+ except Exception as e:
+ progress.update(task, completed=1, description="[red]✗ Failed gateway[/red]")
+ # Print full error after progress bar closes
+ self.console.print("\n[red bold]Gateway build failed:[/red bold]")
+ self.console.print(f"[red]{type(e).__name__}: {str(e)}[/red]")
+ if self.verbose:
+ # Standard
+ import traceback
+
+ self.console.print(f"[dim]{traceback.format_exc()}[/dim]")
+ raise
+ elif self.verbose:
+ self.console.print("[dim]Skipping gateway build (using pre-built image)[/dim]")
+
+ # Build plugins
+ plugins = config.plugins
+
+ if specific_plugins:
+ plugins = [p for p in plugins if p.name in specific_plugins]
+
+ if not plugins:
+ self.console.print("[yellow]No plugins to build[/yellow]")
+ return
+
+ with Progress(SpinnerColumn(), TextColumn("[progress.description]{task.description}"), console=self.console) as progress:
+
+ for plugin in plugins:
+ plugin_name = plugin.name
+
+ # Skip if pre-built image specified
+ if plugin.image and not plugin.repo:
+ task = progress.add_task(f"Skipping {plugin_name} (using pre-built image)", total=1)
+ progress.update(task, completed=1)
+ continue
+
+ task = progress.add_task(f"Building {plugin_name}...", total=None)
+
+ try:
+ self._build_component(plugin, config, plugin_name, no_cache=no_cache, copy_env_templates=copy_env_templates)
+ progress.update(task, completed=1, description=f"[green]✓ Built {plugin_name}[/green]")
+ except Exception as e:
+ progress.update(task, completed=1, description=f"[red]✗ Failed {plugin_name}[/red]")
+ # Print full error after progress bar closes
+ self.console.print(f"\n[red bold]Plugin '{plugin_name}' build failed:[/red bold]")
+ self.console.print(f"[red]{type(e).__name__}: {str(e)}[/red]")
+ if self.verbose:
+ # Standard
+ import traceback
+
+ self.console.print(f"[dim]{traceback.format_exc()}[/dim]")
+ raise
+
+ async def generate_certificates(self, config_file: str) -> None:
+ """Generate mTLS certificates for plugins.
+
+ Supports two modes:
+ 1. Local generation (use_cert_manager=false): Uses Makefile to generate certificates locally
+ 2. cert-manager (use_cert_manager=true): Skips local generation, cert-manager will create certificates
+
+ Args:
+ config_file: Path to mcp-stack.yaml
+
+ Raises:
+ RuntimeError: If make command not found (when using local generation)
+ """
+ config = load_config(config_file)
+
+ # Check if using cert-manager
+ cert_config = config.certificates
+ use_cert_manager = cert_config.use_cert_manager if cert_config else False
+ validity_days = cert_config.validity_days if cert_config else 825
+
+ if use_cert_manager:
+ # Skip local generation - cert-manager will handle certificate creation
+ if self.verbose:
+ self.console.print("[blue]Using cert-manager for certificate management[/blue]")
+ self.console.print("[dim]Skipping local certificate generation (cert-manager will create certificates)[/dim]")
+ return
+
+ # Local certificate generation (backward compatibility)
+ if self.verbose:
+ self.console.print("[blue]Generating mTLS certificates locally...[/blue]")
+
+ # Check if make is available
+ if not shutil.which("make"):
+ raise RuntimeError("'make' command not found. Cannot generate certificates.")
+
+ # Generate CA
+ self._run_command(["make", "certs-mcp-ca", f"MCP_CERT_DAYS={validity_days}"])
+
+ # Generate gateway cert
+ self._run_command(["make", "certs-mcp-gateway", f"MCP_CERT_DAYS={validity_days}"])
+
+ # Generate plugin certificates
+ plugins = config.plugins
+ for plugin in plugins:
+ plugin_name = plugin.name
+ self._run_command(["make", "certs-mcp-plugin", f"PLUGIN_NAME={plugin_name}", f"MCP_CERT_DAYS={validity_days}"])
+
+ if self.verbose:
+ self.console.print("[green]✓ Certificates generated locally[/green]")
+
+ async def deploy(self, config_file: str, dry_run: bool = False, skip_build: bool = False, skip_certs: bool = False, output_dir: Optional[str] = None) -> None:
+ """Deploy MCP stack.
+
+ Args:
+ config_file: Path to mcp-stack.yaml
+ dry_run: Generate manifests without deploying
+ skip_build: Skip building containers
+ skip_certs: Skip certificate generation
+ output_dir: Output directory for manifests (default: ./deploy)
+
+ Raises:
+ ValueError: If unsupported deployment type specified
+ """
+ config = load_config(config_file)
+
+ # Build containers
+ if not skip_build:
+ await self.build(config_file)
+
+ # Generate certificates (only if mTLS is enabled)
+ gateway_mtls = config.gateway.mtls_enabled if config.gateway.mtls_enabled is not None else True
+ plugin_mtls = any((p.mtls_enabled if p.mtls_enabled is not None else True) for p in config.plugins)
+ mtls_needed = gateway_mtls or plugin_mtls
+
+ if not skip_certs and mtls_needed:
+ await self.generate_certificates(config_file)
+ elif not skip_certs and not mtls_needed:
+ if self.verbose:
+ self.console.print("[dim]Skipping certificate generation (mTLS disabled)[/dim]")
+
+ # Generate manifests
+ manifests_dir = self.generate_manifests(config_file, output_dir=output_dir)
+
+ if dry_run:
+ self.console.print(f"[yellow]Dry-run: Manifests generated in {manifests_dir}[/yellow]")
+ return
+
+ # Apply deployment
+ deployment_type = config.deployment.type
+
+ if deployment_type == "kubernetes":
+ self._deploy_kubernetes(manifests_dir)
+ elif deployment_type == "compose":
+ self._deploy_compose(manifests_dir)
+ else:
+ raise ValueError(f"Unsupported deployment type: {deployment_type}")
+
+ async def verify(self, config_file: str, wait: bool = False, timeout: int = 300) -> None:
+ """Verify deployment health.
+
+ Args:
+ config_file: Path to mcp-stack.yaml
+ wait: Wait for deployment to be ready
+ timeout: Wait timeout in seconds
+ """
+ config = load_config(config_file)
+ deployment_type = config.deployment.type
+
+ if self.verbose:
+ self.console.print("[blue]Verifying deployment...[/blue]")
+
+ if deployment_type == "kubernetes":
+ self._verify_kubernetes(config, wait=wait, timeout=timeout)
+ elif deployment_type == "compose":
+ self._verify_compose(config, wait=wait, timeout=timeout)
+
+ async def destroy(self, config_file: str) -> None:
+ """Destroy deployed MCP stack.
+
+ Args:
+ config_file: Path to mcp-stack.yaml
+ """
+ config = load_config(config_file)
+ deployment_type = config.deployment.type
+
+ if self.verbose:
+ self.console.print("[blue]Destroying deployment...[/blue]")
+
+ if deployment_type == "kubernetes":
+ self._destroy_kubernetes(config)
+ elif deployment_type == "compose":
+ self._destroy_compose(config)
+
+ def generate_manifests(self, config_file: str, output_dir: Optional[str] = None) -> Path:
+ """Generate deployment manifests.
+
+ Args:
+ config_file: Path to mcp-stack.yaml
+ output_dir: Output directory for manifests
+
+ Returns:
+ Path to generated manifests directory
+
+ Raises:
+ ValueError: If unsupported deployment type specified
+
+ Examples:
+ >>> import tempfile
+ >>> import yaml
+ >>> from pathlib import Path
+ >>> deployer = MCPStackPython(verbose=False)
+
+ >>> # Test method signature and return type
+ >>> import inspect
+ >>> sig = inspect.signature(deployer.generate_manifests)
+ >>> 'config_file' in sig.parameters
+ True
+ >>> 'output_dir' in sig.parameters
+ True
+ >>> sig.return_annotation
+
+
+ >>> # Test that method exists and is callable
+ >>> callable(deployer.generate_manifests)
+ True
+ """
+ config = load_config(config_file)
+ deployment_type = config.deployment.type
+
+ if output_dir is None:
+ deploy_dir = get_deploy_dir()
+ # Separate subdirectories for kubernetes and compose
+ output_dir = deploy_dir / "manifests" / deployment_type
+ else:
+ output_dir = Path(output_dir)
+
+ output_dir.mkdir(parents=True, exist_ok=True)
+
+ # Store output dir for later use
+ self._last_output_dir = output_dir
+
+ # Generate plugin config.yaml for gateway (shared function)
+ generate_plugin_config(config, output_dir, verbose=self.verbose)
+
+ if deployment_type == "kubernetes":
+ generate_kubernetes_manifests(config, output_dir, verbose=self.verbose)
+ elif deployment_type == "compose":
+ generate_compose_manifests(config, output_dir, verbose=self.verbose)
+ else:
+ raise ValueError(f"Unsupported deployment type: {deployment_type}")
+
+ return output_dir
+
+ # Private helper methods
+
+ def _detect_container_engine(self, config: MCPStackConfig) -> str:
+ """Detect available container engine (docker or podman).
+
+ Supports both engine names ("docker", "podman") and full paths ("/opt/podman/bin/podman").
+
+ Args:
+ config: MCP Stack configuration containing deployment settings
+
+ Returns:
+ Name or full path to available engine
+
+ Raises:
+ RuntimeError: If no container engine found
+
+ Examples:
+ >>> from mcpgateway.tools.builder.schema import MCPStackConfig, DeploymentConfig, GatewayConfig
+ >>> deployer = MCPStackPython(verbose=False)
+
+ >>> # Test with docker specified
+ >>> config = MCPStackConfig(
+ ... deployment=DeploymentConfig(type="compose", container_engine="docker"),
+ ... gateway=GatewayConfig(image="test:latest"),
+ ... plugins=[]
+ ... )
+ >>> result = deployer._detect_container_engine(config)
+ >>> result in ["docker", "podman"] # Returns available engine
+ True
+
+ >>> # Test that method returns a string
+ >>> import shutil
+ >>> if shutil.which("docker") or shutil.which("podman"):
+ ... config = MCPStackConfig(
+ ... deployment=DeploymentConfig(type="compose"),
+ ... gateway=GatewayConfig(image="test:latest"),
+ ... plugins=[]
+ ... )
+ ... engine = deployer._detect_container_engine(config)
+ ... isinstance(engine, str)
+ ... else:
+ ... True # Skip test if no container engine available
+ True
+ """
+ if config.deployment.container_engine:
+ engine = config.deployment.container_engine
+
+ # Check if it's a full path
+ if "/" in engine:
+ if Path(engine).exists() and Path(engine).is_file():
+ return engine
+ else:
+ raise RuntimeError(f"Specified container engine path does not exist: {engine}")
+
+ # Otherwise treat as command name and check PATH
+ if shutil.which(engine):
+ return engine
+ else:
+ raise RuntimeError(f"Unable to find specified container engine: {engine}")
+
+ # Auto-detect
+ if shutil.which("docker"):
+ return "docker"
+ elif shutil.which("podman"):
+ return "podman"
+ else:
+ raise RuntimeError("No container engine found. Install docker or podman.")
+
+ def _run_command(self, cmd: List[str], cwd: Optional[Path] = None, capture_output: bool = False) -> subprocess.CompletedProcess:
+ """Run a shell command.
+
+ Args:
+ cmd: Command and arguments
+ cwd: Working directory
+ capture_output: Capture stdout/stderr
+
+ Returns:
+ CompletedProcess instance
+
+ Raises:
+ subprocess.CalledProcessError: If command fails
+ """
+ if self.verbose:
+ self.console.print(f"[dim]Running: {' '.join(cmd)}[/dim]")
+
+ result = subprocess.run(cmd, cwd=cwd, capture_output=capture_output, text=True, check=True) # nosec B603, B607
+
+ return result
+
+ def _build_component(self, component: BuildableConfig, config: MCPStackConfig, component_name: str, no_cache: bool = False, copy_env_templates: bool = False) -> None:
+ """Build a component (gateway or plugin) container using docker/podman.
+
+ Args:
+ component: Component configuration (GatewayConfig or PluginConfig)
+ config: Overall stack configuration
+ component_name: Name of the component (gateway or plugin name)
+ no_cache: Disable cache
+ copy_env_templates: Copy .env.template from repo if it exists
+
+ Raises:
+ ValueError: If component has no repo field
+ FileNotFoundError: If build context or containerfile not found
+ """
+ repo = component.repo
+
+ container_engine = self._detect_container_engine(config)
+
+ if not repo:
+ raise ValueError(f"Component '{component_name}' has no 'repo' field")
+
+ # Clone repository
+ git_ref = component.ref or "main"
+ clone_dir = Path(f"./build/{component_name}")
+ clone_dir.mkdir(parents=True, exist_ok=True)
+
+ # Clone or update repo
+ if (clone_dir / ".git").exists():
+ if self.verbose:
+ self.console.print(f"[dim]Updating {component_name} repository...[/dim]")
+ self._run_command(["git", "fetch", "origin", git_ref], cwd=clone_dir)
+ # Checkout what we just fetched (FETCH_HEAD)
+ self._run_command(["git", "checkout", "FETCH_HEAD"], cwd=clone_dir)
+ else:
+ if self.verbose:
+ self.console.print(f"[dim]Cloning {component_name} repository...[/dim]")
+ self._run_command(["git", "clone", "--branch", git_ref, "--depth", "1", repo, str(clone_dir)])
+
+ # Determine build context (subdirectory within repo)
+ build_context = component.context or "."
+ build_dir = clone_dir / build_context
+
+ if not build_dir.exists():
+ raise FileNotFoundError(f"Build context not found: {build_dir}")
+
+ # Detect Containerfile/Dockerfile
+ containerfile = component.containerfile or "Containerfile"
+ containerfile_path = build_dir / containerfile
+
+ if not containerfile_path.exists():
+ containerfile = "Dockerfile"
+ containerfile_path = build_dir / containerfile
+ if not containerfile_path.exists():
+ raise FileNotFoundError(f"No Containerfile or Dockerfile found in {build_dir}")
+
+ # Build container - determine image tag
+ if component.image:
+ # Use explicitly specified image name
+ image_tag = component.image
+ else:
+ # Generate default image name based on component type
+ image_tag = f"mcpgateway-{component_name.lower()}:latest"
+
+ build_cmd = [container_engine, "build", "-f", containerfile, "-t", image_tag]
+
+ if no_cache:
+ build_cmd.append("--no-cache")
+
+ # Add target stage if specified (for multi-stage builds)
+ if component.target:
+ build_cmd.extend(["--target", component.target])
+
+ # For Docker, add --load to ensure image is loaded into daemon
+ # (needed for buildx/docker-container driver)
+ if container_engine == "docker":
+ build_cmd.append("--load")
+
+ build_cmd.append(".")
+
+ self._run_command(build_cmd, cwd=build_dir)
+
+ # Handle registry operations (tag and push if enabled)
+ image_tag = handle_registry_operations(component, component_name, image_tag, container_engine, verbose=self.verbose)
+
+ # Copy .env.template if requested and exists
+ if copy_env_templates:
+ copy_template(component_name, build_dir, verbose=self.verbose)
+
+ if self.verbose:
+ self.console.print(f"[green]✓ Built {component_name} -> {image_tag}[/green]")
+
+ def _deploy_kubernetes(self, manifests_dir: Path) -> None:
+ """Deploy to Kubernetes using kubectl.
+
+ Uses shared deploy_kubernetes() from common.py to avoid code duplication.
+
+ Args:
+ manifests_dir: Path to directory containing Kubernetes manifests
+ """
+ deploy_kubernetes(manifests_dir, verbose=self.verbose)
+
+ def _deploy_compose(self, manifests_dir: Path) -> None:
+ """Deploy using Docker Compose.
+
+ Uses shared deploy_compose() from common.py to avoid code duplication.
+
+ Args:
+ manifests_dir: Path to directory containing compose manifest
+ """
+ compose_file = manifests_dir / "docker-compose.yaml"
+ deploy_compose(compose_file, verbose=self.verbose)
+
+ def _verify_kubernetes(self, config: MCPStackConfig, wait: bool = False, timeout: int = 300) -> None:
+ """Verify Kubernetes deployment health.
+
+ Uses shared verify_kubernetes() from common.py to avoid code duplication.
+
+ Args:
+ config: Parsed configuration Pydantic model
+ wait: Wait for pods to be ready
+ timeout: Wait timeout in seconds
+ """
+ namespace = config.deployment.namespace or "mcp-gateway"
+ output = verify_kubernetes(namespace, wait=wait, timeout=timeout, verbose=self.verbose)
+ self.console.print(output)
+
+ def _verify_compose(self, config: MCPStackConfig, wait: bool = False, timeout: int = 300) -> None:
+ """Verify Docker Compose deployment health.
+
+ Uses shared verify_compose() from common.py to avoid code duplication.
+
+ Args:
+ config: Parsed configuration Pydantic model
+ wait: Wait for containers to be ready
+ timeout: Wait timeout in seconds
+ """
+ _ = config, wait, timeout # Reserved for future use
+ # Use the same manifests directory as generate_manifests
+ deploy_dir = get_deploy_dir()
+ output_dir = getattr(self, "_last_output_dir", deploy_dir / "manifests" / "compose")
+ compose_file = output_dir / "docker-compose.yaml"
+ output = verify_compose(compose_file, verbose=self.verbose)
+ self.console.print(output)
+
+ def _destroy_kubernetes(self, config: MCPStackConfig) -> None:
+ """Destroy Kubernetes deployment.
+
+ Uses shared destroy_kubernetes() from common.py to avoid code duplication.
+
+ Args:
+ config: Parsed configuration Pydantic model
+ """
+ _ = config # Reserved for future use (namespace, labels, etc.)
+ # Use the same manifests directory as generate_manifests
+ deploy_dir = get_deploy_dir()
+ manifests_dir = getattr(self, "_last_output_dir", deploy_dir / "manifests" / "kubernetes")
+ destroy_kubernetes(manifests_dir, verbose=self.verbose)
+
+ def _destroy_compose(self, config: MCPStackConfig) -> None:
+ """Destroy Docker Compose deployment.
+
+ Uses shared destroy_compose() from common.py to avoid code duplication.
+
+ Args:
+ config: Parsed configuration Pydantic model
+ """
+ _ = config # Reserved for future use (project name, networks, etc.)
+ # Use the same manifests directory as generate_manifests
+ deploy_dir = get_deploy_dir()
+ output_dir = getattr(self, "_last_output_dir", deploy_dir / "manifests" / "compose")
+ compose_file = output_dir / "docker-compose.yaml"
+ destroy_compose(compose_file, verbose=self.verbose)
diff --git a/mcpgateway/tools/builder/schema.py b/mcpgateway/tools/builder/schema.py
new file mode 100644
index 000000000..657398a74
--- /dev/null
+++ b/mcpgateway/tools/builder/schema.py
@@ -0,0 +1,475 @@
+# -*- coding: utf-8 -*-
+"""Location: ./mcpgateway/tools/builder/schema.py
+Copyright 2025
+SPDX-License-Identifier: Apache-2.0
+Authors: Teryl Taylor
+
+Pydantic schemas for MCP Stack configuration validation"""
+
+# Standard
+from typing import Any, Dict, List, Literal, Optional
+
+# Third-Party
+from pydantic import BaseModel, ConfigDict, Field, field_validator
+
+
+class OpenShiftConfig(BaseModel):
+ """OpenShift-specific configuration.
+
+ Routes are OpenShift's native way of exposing services externally (predates Kubernetes Ingress).
+ They provide built-in TLS termination and are integrated with OpenShift's router/HAProxy infrastructure.
+
+ Attributes:
+ create_routes: Create OpenShift Route resources for external access (default: False)
+ domain: OpenShift apps domain for route hostnames (default: auto-detected from cluster)
+ tls_termination: TLS termination mode - edge, passthrough, or reencrypt (default: edge)
+
+ Examples:
+ >>> # Test with default values
+ >>> config = OpenShiftConfig()
+ >>> config.create_routes
+ False
+ >>> config.tls_termination
+ 'edge'
+
+ >>> # Test with custom values
+ >>> config = OpenShiftConfig(
+ ... create_routes=True,
+ ... domain="apps.example.com",
+ ... tls_termination="passthrough"
+ ... )
+ >>> config.create_routes
+ True
+ >>> config.domain
+ 'apps.example.com'
+ >>> config.tls_termination
+ 'passthrough'
+
+ >>> # Test valid TLS termination modes
+ >>> for mode in ["edge", "passthrough", "reencrypt"]:
+ ... cfg = OpenShiftConfig(tls_termination=mode)
+ ... cfg.tls_termination == mode
+ True
+ True
+ True
+ """
+
+ create_routes: bool = Field(False, description="Create OpenShift Route resources")
+ domain: Optional[str] = Field(None, description="OpenShift apps domain (e.g., apps-crc.testing)")
+ tls_termination: Literal["edge", "passthrough", "reencrypt"] = Field("edge", description="TLS termination mode")
+
+
+class DeploymentConfig(BaseModel):
+ """Deployment configuration
+
+ Examples:
+ >>> # Test compose deployment
+ >>> config = DeploymentConfig(type="compose", project_name="test-project")
+ >>> config.type
+ 'compose'
+ >>> config.project_name
+ 'test-project'
+
+ >>> # Test kubernetes deployment
+ >>> config = DeploymentConfig(type="kubernetes", namespace="mcp-test")
+ >>> config.type
+ 'kubernetes'
+ >>> config.namespace
+ 'mcp-test'
+
+ >>> # Test container engine options
+ >>> config = DeploymentConfig(type="compose", container_engine="podman")
+ >>> config.container_engine
+ 'podman'
+
+ >>> # Test with OpenShift config
+ >>> config = DeploymentConfig(
+ ... type="kubernetes",
+ ... namespace="test",
+ ... openshift=OpenShiftConfig(create_routes=True)
+ ... )
+ >>> config.openshift.create_routes
+ True
+ """
+
+ type: Literal["kubernetes", "compose"] = Field(..., description="Deployment type")
+ container_engine: Optional[str] = Field(default=None, description="Container engine: 'podman', 'docker', or full path (e.g., '/opt/podman/bin/podman')")
+ project_name: Optional[str] = Field(None, description="Project name for compose")
+ namespace: Optional[str] = Field(None, description="Namespace for Kubernetes")
+ openshift: Optional[OpenShiftConfig] = Field(None, description="OpenShift-specific configuration")
+
+
+class RegistryConfig(BaseModel):
+ """Container registry configuration.
+
+ Optional configuration for pushing built images to a container registry.
+ When enabled, images will be tagged with the full registry path and optionally pushed.
+
+ Authentication:
+ Users must authenticate to the registry before running the build:
+ - Docker Hub: `docker login`
+ - Quay.io: `podman login quay.io`
+ - OpenShift internal: `podman login $(oc registry info) -u $(oc whoami) -p $(oc whoami -t)`
+ - Private registry: `podman login your-registry.com -u username`
+
+ Attributes:
+ enabled: Enable registry integration (default: False)
+ url: Registry URL (e.g., "docker.io", "quay.io", "default-route-openshift-image-registry.apps-crc.testing")
+ namespace: Registry namespace/organization/project (e.g., "myorg", "mcp-gateway-test")
+ push: Push image after build (default: True)
+ image_pull_policy: Kubernetes imagePullPolicy (default: "IfNotPresent")
+
+ Examples:
+ >>> # Test with defaults (registry disabled)
+ >>> config = RegistryConfig()
+ >>> config.enabled
+ False
+ >>> config.push
+ True
+ >>> config.image_pull_policy
+ 'IfNotPresent'
+
+ >>> # Test Docker Hub configuration
+ >>> config = RegistryConfig(
+ ... enabled=True,
+ ... url="docker.io",
+ ... namespace="myusername"
+ ... )
+ >>> config.enabled
+ True
+ >>> config.url
+ 'docker.io'
+ >>> config.namespace
+ 'myusername'
+
+ >>> # Test with custom pull policy
+ >>> config = RegistryConfig(
+ ... enabled=True,
+ ... url="quay.io",
+ ... namespace="myorg",
+ ... image_pull_policy="Always"
+ ... )
+ >>> config.image_pull_policy
+ 'Always'
+
+ >>> # Test tag-only mode (no push)
+ >>> config = RegistryConfig(
+ ... enabled=True,
+ ... url="registry.local",
+ ... namespace="test",
+ ... push=False
+ ... )
+ >>> config.push
+ False
+ """
+
+ enabled: bool = Field(False, description="Enable registry push")
+ url: Optional[str] = Field(None, description="Registry URL (e.g., docker.io, quay.io, or internal registry)")
+ namespace: Optional[str] = Field(None, description="Registry namespace/organization/project")
+ push: bool = Field(True, description="Push image after build")
+ image_pull_policy: Optional[str] = Field("IfNotPresent", description="Kubernetes imagePullPolicy (IfNotPresent, Always, Never)")
+
+
+class BuildableConfig(BaseModel):
+ """Base class for components that can be built from source or use pre-built images.
+
+ This base class provides common configuration for both gateway and plugins,
+ supporting two build modes:
+ 1. Pre-built image: Specify only 'image' field
+ 2. Build from source: Specify 'repo' and optionally 'ref', 'context', 'containerfile', 'target'
+
+ Attributes:
+ image: Pre-built Docker image name (e.g., "mcpgateway/mcpgateway:latest")
+ repo: Git repository URL to build from
+ ref: Git branch/tag/commit to checkout (default: "main")
+ context: Build context subdirectory within repo (default: ".")
+ containerfile: Path to Containerfile/Dockerfile (default: "Containerfile")
+ target: Target stage for multi-stage builds (optional)
+ host_port: Host port mapping for direct access (optional)
+ env_vars: Environment variables for container
+ env_file: Path to environment file (.env)
+ mtls_enabled: Enable mutual TLS authentication (default: True)
+ """
+
+ # Allow attribute assignment after model creation (needed for auto-detection of env_file)
+ model_config = ConfigDict(validate_assignment=True)
+
+ # Build configuration
+ image: Optional[str] = Field(None, description="Pre-built Docker image")
+ repo: Optional[str] = Field(None, description="Git repository URL")
+ ref: Optional[str] = Field("main", description="Git branch/tag/commit")
+ context: Optional[str] = Field(".", description="Build context subdirectory")
+ containerfile: Optional[str] = Field("Containerfile", description="Containerfile path")
+ target: Optional[str] = Field(None, description="Multi-stage build target")
+
+ # Runtime configuration
+ host_port: Optional[int] = Field(None, description="Host port mapping")
+ env_vars: Optional[Dict[str, Any]] = Field(default_factory=dict, description="Environment variables")
+ env_file: Optional[str] = Field(None, description="Path to environment file (.env)")
+ mtls_enabled: Optional[bool] = Field(True, description="Enable mTLS")
+
+ # Registry configuration
+ registry: Optional[RegistryConfig] = Field(None, description="Container registry configuration")
+
+ def model_post_init(self, _: Any) -> None:
+ """Validate that either image or repo is specified
+
+ Raises:
+ ValueError: If neither image nor repo is specified
+
+ Examples:
+ >>> # Test that error is raised when neither image nor repo specified
+ >>> try:
+ ... # BuildableConfig can't be instantiated directly, use GatewayConfig
+ ... from mcpgateway.tools.builder.schema import GatewayConfig
+ ... GatewayConfig()
+ ... except ValueError as e:
+ ... "must specify either 'image' or 'repo'" in str(e)
+ True
+
+ >>> # Test valid config with image
+ >>> from mcpgateway.tools.builder.schema import GatewayConfig
+ >>> config = GatewayConfig(image="mcpgateway:latest")
+ >>> config.image
+ 'mcpgateway:latest'
+
+ >>> # Test valid config with repo
+ >>> from mcpgateway.tools.builder.schema import GatewayConfig
+ >>> config = GatewayConfig(repo="https://github.com/example/repo")
+ >>> config.repo
+ 'https://github.com/example/repo'
+ """
+ if not self.image and not self.repo:
+ component_type = self.__class__.__name__.replace("Config", "")
+ raise ValueError(f"{component_type} must specify either 'image' or 'repo'")
+
+
+class GatewayConfig(BuildableConfig):
+ """Gateway configuration.
+
+ Extends BuildableConfig to support either pre-built gateway images or
+ building the gateway from source repository.
+
+ Attributes:
+ port: Gateway internal port (default: 4444)
+
+ Examples:
+ >>> # Test with pre-built image
+ >>> config = GatewayConfig(image="mcpgateway:latest")
+ >>> config.image
+ 'mcpgateway:latest'
+ >>> config.port
+ 4444
+
+ >>> # Test with custom port
+ >>> config = GatewayConfig(image="mcpgateway:latest", port=8080)
+ >>> config.port
+ 8080
+
+ >>> # Test with source repository
+ >>> config = GatewayConfig(
+ ... repo="https://github.com/example/gateway",
+ ... ref="v1.0.0"
+ ... )
+ >>> config.repo
+ 'https://github.com/example/gateway'
+ >>> config.ref
+ 'v1.0.0'
+
+ >>> # Test with environment variables
+ >>> config = GatewayConfig(
+ ... image="mcpgateway:latest",
+ ... env_vars={"LOG_LEVEL": "DEBUG", "PORT": "4444"}
+ ... )
+ >>> config.env_vars['LOG_LEVEL']
+ 'DEBUG'
+
+ >>> # Test with mTLS enabled
+ >>> config = GatewayConfig(image="mcpgateway:latest", mtls_enabled=True)
+ >>> config.mtls_enabled
+ True
+ """
+
+ port: Optional[int] = Field(4444, description="Gateway port")
+
+
+class PluginConfig(BuildableConfig):
+ """Plugin configuration.
+
+ Extends BuildableConfig to support plugin-specific configuration while
+ inheriting common build and runtime capabilities.
+
+ Attributes:
+ name: Unique plugin identifier
+ port: Plugin internal port (default: 8000)
+ expose_port: Whether to expose plugin port on host (default: False)
+ plugin_overrides: Plugin-specific override configuration
+ """
+
+ name: str = Field(..., description="Plugin name")
+ port: Optional[int] = Field(8000, description="Plugin port")
+ expose_port: Optional[bool] = Field(False, description="Expose port on host")
+ plugin_overrides: Optional[Dict[str, Any]] = Field(default_factory=dict, description="Plugin overrides")
+
+ @field_validator("name")
+ @classmethod
+ def validate_name(cls, v: str) -> str:
+ """Validate plugin name is non-empty
+
+ Args:
+ v: Plugin name value to validate
+
+ Returns:
+ Validated plugin name
+
+ Raises:
+ ValueError: If plugin name is empty or whitespace only
+
+ Examples:
+ >>> # Test valid plugin names
+ >>> PluginConfig.validate_name("my-plugin")
+ 'my-plugin'
+ >>> PluginConfig.validate_name("plugin_123")
+ 'plugin_123'
+ >>> PluginConfig.validate_name("TestPlugin")
+ 'TestPlugin'
+
+ >>> # Test empty name raises error
+ >>> try:
+ ... PluginConfig.validate_name("")
+ ... except ValueError as e:
+ ... "cannot be empty" in str(e)
+ True
+
+ >>> # Test whitespace-only name raises error
+ >>> try:
+ ... PluginConfig.validate_name(" ")
+ ... except ValueError as e:
+ ... "cannot be empty" in str(e)
+ True
+ """
+ if not v or not v.strip():
+ raise ValueError("Plugin name cannot be empty")
+ return v
+
+
+class CertificatesConfig(BaseModel):
+ """Certificate configuration.
+
+ Supports two modes:
+ 1. Local certificate generation (use_cert_manager=false, default):
+ - Certificates generated locally using OpenSSL (via Makefile)
+ - Deployed to Kubernetes as secrets via kubectl
+ - Manual rotation required before expiry
+
+ 2. cert-manager integration (use_cert_manager=true, Kubernetes only):
+ - Certificates managed by cert-manager controller
+ - Automatic renewal before expiry (default: at 2/3 of lifetime)
+ - Native Kubernetes Certificate resources
+ - Requires cert-manager to be installed in cluster
+
+ Attributes:
+ validity_days: Certificate validity period in days (default: 825 ≈ 2.25 years)
+ auto_generate: Auto-generate certificates locally (default: True)
+ use_cert_manager: Use cert-manager for certificate management (default: False, Kubernetes only)
+ cert_manager_issuer: Name of cert-manager Issuer/ClusterIssuer (default: "mcp-ca-issuer")
+ cert_manager_kind: Type of issuer - Issuer or ClusterIssuer (default: "Issuer")
+ ca_path: Path to CA certificates for local generation (default: "./certs/mcp/ca")
+ gateway_path: Path to gateway certificates for local generation (default: "./certs/mcp/gateway")
+ plugins_path: Path to plugin certificates for local generation (default: "./certs/mcp/plugins")
+ """
+
+ validity_days: Optional[int] = Field(825, description="Certificate validity in days")
+ auto_generate: Optional[bool] = Field(True, description="Auto-generate certificates locally")
+
+ # cert-manager integration (Kubernetes only)
+ use_cert_manager: Optional[bool] = Field(False, description="Use cert-manager for certificate management (Kubernetes only)")
+ cert_manager_issuer: Optional[str] = Field("mcp-ca-issuer", description="cert-manager Issuer/ClusterIssuer name")
+ cert_manager_kind: Optional[Literal["Issuer", "ClusterIssuer"]] = Field("Issuer", description="cert-manager issuer kind")
+
+ ca_path: Optional[str] = Field("./certs/mcp/ca", description="CA certificate path")
+ gateway_path: Optional[str] = Field("./certs/mcp/gateway", description="Gateway cert path")
+ plugins_path: Optional[str] = Field("./certs/mcp/plugins", description="Plugins cert path")
+
+
+class PostgresConfig(BaseModel):
+ """PostgreSQL database configuration"""
+
+ enabled: Optional[bool] = Field(True, description="Enable PostgreSQL deployment")
+ image: Optional[str] = Field("quay.io/sclorg/postgresql-15-c9s:latest", description="PostgreSQL image (default is OpenShift-compatible)")
+ database: Optional[str] = Field("mcp", description="Database name")
+ user: Optional[str] = Field("postgres", description="Database user")
+ password: Optional[str] = Field("mysecretpassword", description="Database password")
+ storage_size: Optional[str] = Field("10Gi", description="Persistent volume size (Kubernetes only)")
+ storage_class: Optional[str] = Field(None, description="Storage class name (Kubernetes only)")
+
+
+class RedisConfig(BaseModel):
+ """Redis cache configuration"""
+
+ enabled: Optional[bool] = Field(True, description="Enable Redis deployment")
+ image: Optional[str] = Field("redis:latest", description="Redis image")
+
+
+class InfrastructureConfig(BaseModel):
+ """Infrastructure services configuration"""
+
+ postgres: Optional[PostgresConfig] = Field(default_factory=PostgresConfig)
+ redis: Optional[RedisConfig] = Field(default_factory=RedisConfig)
+
+
+class MCPStackConfig(BaseModel):
+ """Complete MCP Stack configuration"""
+
+ deployment: DeploymentConfig
+ gateway: GatewayConfig
+ plugins: List[PluginConfig] = Field(default_factory=list)
+ certificates: Optional[CertificatesConfig] = Field(default_factory=CertificatesConfig)
+ infrastructure: Optional[InfrastructureConfig] = Field(default_factory=InfrastructureConfig)
+
+ @field_validator("plugins")
+ @classmethod
+ def validate_plugin_names_unique(cls, v: List[PluginConfig]) -> List[PluginConfig]:
+ """Ensure plugin names are unique
+
+ Args:
+ v: List of plugin configurations to validate
+
+ Returns:
+ Validated list of plugin configurations
+
+ Raises:
+ ValueError: If duplicate plugin names are found
+
+ Examples:
+ >>> from mcpgateway.tools.builder.schema import PluginConfig
+ >>> # Test with unique names (valid)
+ >>> plugins = [
+ ... PluginConfig(name="plugin1", image="img1:latest"),
+ ... PluginConfig(name="plugin2", image="img2:latest")
+ ... ]
+ >>> result = MCPStackConfig.validate_plugin_names_unique(plugins)
+ >>> len(result) == 2
+ True
+
+ >>> # Test with duplicate names (invalid)
+ >>> try:
+ ... duplicates = [
+ ... PluginConfig(name="duplicate", image="img1:latest"),
+ ... PluginConfig(name="duplicate", image="img2:latest")
+ ... ]
+ ... MCPStackConfig.validate_plugin_names_unique(duplicates)
+ ... except ValueError as e:
+ ... "Duplicate plugin names found" in str(e)
+ True
+
+ >>> # Test with empty list (valid)
+ >>> empty = MCPStackConfig.validate_plugin_names_unique([])
+ >>> len(empty) == 0
+ True
+ """
+ names = [p.name for p in v]
+ if len(names) != len(set(names)):
+ duplicates = [name for name in names if names.count(name) > 1]
+ raise ValueError(f"Duplicate plugin names found: {duplicates}")
+ return v
diff --git a/mcpgateway/tools/builder/templates/compose/docker-compose.yaml.j2 b/mcpgateway/tools/builder/templates/compose/docker-compose.yaml.j2
new file mode 100644
index 000000000..aaf2fc04e
--- /dev/null
+++ b/mcpgateway/tools/builder/templates/compose/docker-compose.yaml.j2
@@ -0,0 +1,198 @@
+# Location: ./mcpgateway/tools/builder/templates/compose/docker-compose.yaml.j2
+# Copyright 2025
+# SPDX-License-Identifier: Apache-2.0
+# Authors: Teryl Taylor
+# Docker Compose manifest for MCP Stack
+# Generated from mcp-stack.yaml
+
+version: '3.8'
+
+networks:
+ mcp-network:
+ driver: bridge
+
+volumes:
+ gateway-data:
+ driver: local
+ pgdata:
+ driver: local
+{% for plugin in plugins %}
+ {{ plugin.name | lower }}-data:
+ driver: local
+{% endfor %}
+
+services:
+ # MCP Gateway
+ mcpgateway:
+ image: {{ gateway.image }}
+ container_name: mcpgateway
+ hostname: mcpgateway
+
+ {% if gateway.env_file is defined %}
+ env_file:
+ - {{ gateway.env_file }}
+ {% endif %}
+
+ environment:
+ {% if gateway.env_vars is defined and gateway.env_vars %}
+ # User-defined environment variables
+ {% for key, value in gateway.env_vars.items() %}
+ - {{ key }}={{ value }}
+ {% endfor %}
+ {% endif %}
+ # Database configuration
+ - DATABASE_URL=postgresql://postgres:$${POSTGRES_PASSWORD:-mysecretpassword}@postgres:5432/mcp
+ - REDIS_URL=redis://redis:6379/0
+ {% if gateway.mtls_enabled | default(true) %}
+ # mTLS client configuration (gateway connects to external plugins)
+ - PLUGINS_CLIENT_MTLS_CA_BUNDLE=/app/certs/mcp/ca/ca.crt
+ - PLUGINS_CLIENT_MTLS_CERTFILE=/app/certs/mcp/gateway/client.crt
+ - PLUGINS_CLIENT_MTLS_KEYFILE=/app/certs/mcp/gateway/client.key
+ - PLUGINS_CLIENT_MTLS_VERIFY={{ gateway.mtls_verify | default('true') }}
+ - PLUGINS_CLIENT_MTLS_CHECK_HOSTNAME={{ gateway.mtls_check_hostname | default('false') }}
+ {% endif %}
+
+ ports:
+ - "{{ gateway.host_port | default(4444) }}:{{ gateway.port | default(4444) }}"
+
+ volumes:
+ - gateway-data:/app/data
+ {% if gateway.mtls_enabled | default(true) %}
+ - {{ cert_paths.gateway_cert_dir }}:/app/certs/mcp/gateway:ro
+ - {{ cert_paths.ca_cert_file }}:/app/certs/mcp/ca/ca.crt:ro
+ {% endif %}
+ # Auto-generated plugin configuration
+ - ./plugins-config.yaml:/app/config/plugins.yaml:ro
+
+ networks:
+ - mcp-network
+
+ restart: unless-stopped
+
+ healthcheck:
+ test: ["CMD", "python3", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:{{ gateway.port | default(4444) }}/health').read()"]
+ interval: 30s
+ timeout: 10s
+ retries: 3
+ start_period: 40s
+
+ depends_on:
+ postgres:
+ condition: service_healthy
+ redis:
+ condition: service_started
+{% for plugin in plugins %} {{ plugin.name | lower }}:
+ condition: service_started
+{% endfor %}
+
+{% for plugin in plugins %}
+ # Plugin: {{ plugin.name }}
+ {{ plugin.name | lower }}:
+ image: {{ plugin.image | default('mcpgateway-' + plugin.name | lower + ':latest') }}
+ container_name: mcp-plugin-{{ plugin.name | lower }}
+ hostname: {{ plugin.name | lower }}
+
+ {% if plugin.env_file is defined %}
+ env_file:
+ - {{ plugin.env_file }}
+ {% endif %}
+
+ environment:
+ {% if plugin.env_vars is defined and plugin.env_vars %}
+ # User-defined environment variables
+ {% for key, value in plugin.env_vars.items() %}
+ - {{ key }}={{ value }}
+ {% endfor %}
+ {% endif %}
+ {% if plugin.mtls_enabled | default(true) %}
+ # mTLS server configuration (plugin accepts gateway connections)
+ - PLUGINS_TRANSPORT=http
+ - PLUGINS_SERVER_HOST=0.0.0.0
+ - PLUGINS_SERVER_PORT={{ plugin.port | default(8000) }}
+ - PLUGINS_SERVER_SSL_ENABLED=true
+ - PLUGINS_SERVER_SSL_KEYFILE=/app/certs/mcp/server.key
+ - PLUGINS_SERVER_SSL_CERTFILE=/app/certs/mcp/server.crt
+ - PLUGINS_SERVER_SSL_CA_CERTS=/app/certs/mcp/ca.crt
+ - PLUGINS_SERVER_SSL_CERT_REQS=2 # CERT_REQUIRED - enforce client certificates
+ {% endif %}
+
+ {% if plugin.expose_port | default(false) %}
+ ports:
+ - "{{ plugin.host_port }}:{{ plugin.port | default(8000) }}"
+ {% endif %}
+
+ volumes:
+ - {{ plugin.name | lower }}-data:/app/data
+ {% if plugin.mtls_enabled | default(true) %}
+ - {{ cert_paths.plugins_cert_base }}/{{ plugin.name }}:/app/certs/mcp:ro
+ {% endif %}
+
+ networks:
+ - mcp-network
+
+ restart: unless-stopped
+
+ healthcheck:
+ {% if plugin.mtls_enabled | default(true) %}
+ # When mTLS is enabled, health check uses separate HTTP server on port+1000
+ test: ["CMD", "python3", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:{{ (plugin.port | default(8000)) + 1000 }}/health').read()"]
+ {% else %}
+ # When mTLS is disabled, health check uses main server
+ test: ["CMD", "python3", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:{{ plugin.port | default(8000) }}/health').read()"]
+ {% endif %}
+ interval: 30s
+ timeout: 10s
+ retries: 3
+ start_period: 40s
+
+ {% if plugin.depends_on is defined %}
+ depends_on:
+ {% for dep in plugin.depends_on %}
+ - {{ dep }}
+ {% endfor %}
+ {% endif %}
+
+{% endfor %}
+ # PostgreSQL Database
+ postgres:
+ image: postgres:17
+ container_name: mcp-postgres
+ hostname: postgres
+
+ environment:
+ - POSTGRES_USER=postgres
+ - POSTGRES_PASSWORD=$${POSTGRES_PASSWORD:-mysecretpassword}
+ - POSTGRES_DB=mcp
+
+ ports:
+ - "5432:5432"
+
+ volumes:
+ - pgdata:/var/lib/postgresql/data
+
+ networks:
+ - mcp-network
+
+ restart: unless-stopped
+
+ healthcheck:
+ test: ["CMD-SHELL", "pg_isready -U postgres"]
+ interval: 30s
+ timeout: 5s
+ retries: 5
+ start_period: 20s
+
+ # Redis Cache
+ redis:
+ image: redis:latest
+ container_name: mcp-redis
+ hostname: redis
+
+ ports:
+ - "6379:6379"
+
+ networks:
+ - mcp-network
+
+ restart: unless-stopped
+
diff --git a/mcpgateway/tools/builder/templates/kubernetes/cert-manager-certificates.yaml.j2 b/mcpgateway/tools/builder/templates/kubernetes/cert-manager-certificates.yaml.j2
new file mode 100644
index 000000000..e11963573
--- /dev/null
+++ b/mcpgateway/tools/builder/templates/kubernetes/cert-manager-certificates.yaml.j2
@@ -0,0 +1,62 @@
+# Location: ./mcpgateway/tools/builder/templates/kubernetes/cert-manager-certificates.yaml.j2
+# Copyright 2025
+# SPDX-License-Identifier: Apache-2.0
+# Authors: Teryl Taylor
+# cert-manager Certificate Resources
+# Gateway Certificate
+apiVersion: cert-manager.io/v1
+kind: Certificate
+metadata:
+ name: mcp-{{ gateway_name }}-cert
+ namespace: {{ namespace }}
+spec:
+ secretName: mcp-{{ gateway_name }}-server-cert
+ duration: {{ duration }}h
+ renewBefore: {{ renew_before }}h
+ isCA: false
+ privateKey:
+ algorithm: RSA
+ size: 2048
+ usages:
+ - digital signature
+ - key encipherment
+ - server auth
+ - client auth
+ dnsNames:
+ - {{ gateway_name }}
+ - {{ gateway_name }}.{{ namespace }}
+ - {{ gateway_name }}.{{ namespace }}.svc
+ - {{ gateway_name }}.{{ namespace }}.svc.cluster.local
+ issuerRef:
+ name: {{ issuer_name }}
+ kind: {{ issuer_kind }}
+{% for plugin in plugins %}
+---
+# Plugin {{ plugin.name }} Certificate
+apiVersion: cert-manager.io/v1
+kind: Certificate
+metadata:
+ name: mcp-{{ plugin.name }}-cert
+ namespace: {{ namespace }}
+spec:
+ secretName: mcp-{{ plugin.name }}-server-cert
+ duration: {{ duration }}h
+ renewBefore: {{ renew_before }}h
+ isCA: false
+ privateKey:
+ algorithm: RSA
+ size: 2048
+ usages:
+ - digital signature
+ - key encipherment
+ - server auth
+ - client auth
+ dnsNames:
+ - {{ plugin.name }}
+ - {{ plugin.name }}.{{ namespace }}
+ - {{ plugin.name }}.{{ namespace }}.svc
+ - {{ plugin.name }}.{{ namespace }}.svc.cluster.local
+ issuerRef:
+ name: {{ issuer_name }}
+ kind: {{ issuer_kind }}
+{% endfor %}
diff --git a/mcpgateway/tools/builder/templates/kubernetes/cert-secrets.yaml.j2 b/mcpgateway/tools/builder/templates/kubernetes/cert-secrets.yaml.j2
new file mode 100644
index 000000000..67e5a1e87
--- /dev/null
+++ b/mcpgateway/tools/builder/templates/kubernetes/cert-secrets.yaml.j2
@@ -0,0 +1,38 @@
+# Location: ./mcpgateway/tools/builder/templates/kubernetes/cert-secrets.yaml.j2
+# Copyright 2025
+# SPDX-License-Identifier: Apache-2.0
+# Authors: Teryl Taylor
+# mTLS Certificate Secrets
+# CA Certificate (shared by all components)
+apiVersion: v1
+kind: Secret
+metadata:
+ name: mcp-ca-secret
+ namespace: {{ namespace }}
+type: Opaque
+data:
+ ca.crt: {{ ca_cert_b64 }}
+---
+# Gateway Client Certificate
+apiVersion: v1
+kind: Secret
+metadata:
+ name: mcp-{{ gateway_name }}-server-cert
+ namespace: {{ namespace }}
+type: kubernetes.io/tls
+data:
+ tls.crt: {{ gateway_cert_b64 }}
+ tls.key: {{ gateway_key_b64 }}
+{% for plugin in plugins %}
+---
+# Plugin {{ plugin.name }} Server Certificate
+apiVersion: v1
+kind: Secret
+metadata:
+ name: mcp-{{ plugin.name }}-server-cert
+ namespace: {{ namespace }}
+type: kubernetes.io/tls
+data:
+ tls.crt: {{ plugin.cert_b64 }}
+ tls.key: {{ plugin.key_b64 }}
+{% endfor %}
diff --git a/mcpgateway/tools/builder/templates/kubernetes/deployment.yaml.j2 b/mcpgateway/tools/builder/templates/kubernetes/deployment.yaml.j2
new file mode 100644
index 000000000..843bb5fd4
--- /dev/null
+++ b/mcpgateway/tools/builder/templates/kubernetes/deployment.yaml.j2
@@ -0,0 +1,248 @@
+# Location: ./mcpgateway/tools/builder/templates/kubernetes/deployment.yaml.j2
+# Copyright 2025
+# SPDX-License-Identifier: Apache-2.0
+# Authors: Teryl Taylor
+# Kubernetes Deployment for {{ name }}
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: {{ namespace }}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ name }}-env
+ namespace: {{ namespace }}
+type: Opaque
+stringData:
+{% if env_vars is defined and env_vars %}
+ # Environment variables
+ # NOTE: In production, these should come from CI/CD vault secrets
+{% for key, value in env_vars.items() %}
+ {{ key }}: "{{ value }}"
+{% endfor %}
+{% endif %}
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ name }}
+ namespace: {{ namespace }}
+ labels:
+ app: {{ name }}
+ component: {% if name == 'mcpgateway' %}gateway{% else %}plugin{% endif %}
+spec:
+ replicas: {{ replicas | default(1) }}
+ selector:
+ matchLabels:
+ app: {{ name }}
+ template:
+ metadata:
+ labels:
+ app: {{ name }}
+ component: {% if name == 'mcpgateway' %}gateway{% else %}plugin{% endif %}
+ spec:
+ {% if image_pull_secret is defined %}
+ imagePullSecrets:
+ - name: {{ image_pull_secret }}
+ {% endif %}
+
+ {% if init_containers is defined %}
+ initContainers:
+ {% for init_container in init_containers %}
+ - name: {{ init_container.name }}
+ image: {{ init_container.image }}
+ command: {{ init_container.command | tojson }}
+ {% endfor %}
+ {% endif %}
+
+ containers:
+ - name: {{ name }}
+ image: {{ image }}
+ imagePullPolicy: {{ image_pull_policy | default('IfNotPresent') }}
+
+ ports:
+ - name: http
+ containerPort: {{ port | default(8000) }}
+ protocol: TCP
+ {% if mtls_enabled | default(true) and name != 'mcpgateway' %}
+ - name: health
+ containerPort: 9000
+ protocol: TCP
+ {% endif %}
+
+ env:
+ {% if mtls_enabled | default(true) %}
+ {% if name == 'mcpgateway' %}
+ # mTLS client configuration (gateway connects to plugins)
+ - name: PLUGINS_CLIENT_MTLS_CA_BUNDLE
+ value: "/app/certs/ca/ca.crt"
+ - name: PLUGINS_CLIENT_MTLS_CERTFILE
+ value: "/app/certs/mcp/tls.crt"
+ - name: PLUGINS_CLIENT_MTLS_KEYFILE
+ value: "/app/certs/mcp/tls.key"
+ - name: PLUGINS_CLIENT_MTLS_VERIFY
+ value: "{{ mtls_verify | default('true') }}"
+ - name: PLUGINS_CLIENT_MTLS_CHECK_HOSTNAME
+ value: "{{ mtls_check_hostname | default('false') }}"
+ {% else %}
+ # mTLS server configuration (plugin accepts gateway connections)
+ - name: PLUGINS_TRANSPORT
+ value: "http"
+ - name: PLUGINS_SERVER_HOST
+ value: "0.0.0.0"
+ - name: PLUGINS_SERVER_PORT
+ value: "{{ port | default(8000) }}"
+ - name: PLUGINS_SERVER_SSL_ENABLED
+ value: "true"
+ - name: PLUGINS_SERVER_SSL_KEYFILE
+ value: "/app/certs/mcp/tls.key"
+ - name: PLUGINS_SERVER_SSL_CERTFILE
+ value: "/app/certs/mcp/tls.crt"
+ - name: PLUGINS_SERVER_SSL_CA_CERTS
+ value: "/app/certs/ca/ca.crt"
+ - name: PLUGINS_SERVER_SSL_CERT_REQS
+ value: "2" # CERT_REQUIRED
+ {% endif %}
+ {% endif %}
+
+ envFrom:
+ - secretRef:
+ name: {{ name }}-env
+
+ {% if health_check | default(true) %}
+ livenessProbe:
+ httpGet:
+ path: /health
+ {% if mtls_enabled | default(true) and name != 'mcpgateway' %}
+ # Plugin with mTLS: use separate health check server on port 9000
+ port: health
+ scheme: HTTP
+ {% else %}
+ # Gateway or non-mTLS: health check on main HTTP port
+ port: http
+ scheme: HTTP
+ {% endif %}
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 3
+
+ readinessProbe:
+ httpGet:
+ path: /health
+ {% if mtls_enabled | default(true) and name != 'mcpgateway' %}
+ # Plugin with mTLS: use separate health check server on port 9000
+ port: health
+ scheme: HTTP
+ {% else %}
+ # Gateway or non-mTLS: health check on main HTTP port
+ port: http
+ scheme: HTTP
+ {% endif %}
+ initialDelaySeconds: 10
+ periodSeconds: 5
+ timeoutSeconds: 3
+ failureThreshold: 3
+ {% endif %}
+
+ resources:
+ requests:
+ memory: "{{ memory_request | default('256Mi') }}"
+ cpu: "{{ cpu_request | default('100m') }}"
+ limits:
+ memory: "{{ memory_limit | default('512Mi') }}"
+ cpu: "{{ cpu_limit | default('500m') }}"
+
+ volumeMounts:
+ {% if mtls_enabled | default(true) %}
+ - name: server-cert
+ mountPath: /app/certs/mcp
+ readOnly: true
+ - name: ca-cert
+ mountPath: /app/certs/ca
+ readOnly: true
+ {% endif %}
+ {% if name == 'mcpgateway' and has_plugins | default(false) %}
+ - name: plugins-config
+ mountPath: /app/config
+ readOnly: true
+ {% endif %}
+
+ {% if volume_mounts is defined %}
+ {% for mount in volume_mounts %}
+ - name: {{ mount.name }}
+ mountPath: {{ mount.path }}
+ {% if mount.readonly | default(false) %}
+ readOnly: true
+ {% endif %}
+ {% endfor %}
+ {% endif %}
+
+ securityContext:
+ runAsNonRoot: true
+ {% if run_as_user is defined %}
+ runAsUser: {{ run_as_user }}
+ {% endif %}
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ readOnlyRootFilesystem: false
+
+ volumes:
+ {% if mtls_enabled | default(true) %}
+ - name: server-cert
+ secret:
+ secretName: mcp-{{ name }}-server-cert
+ defaultMode: 0444
+ - name: ca-cert
+ secret:
+ secretName: mcp-ca-secret
+ defaultMode: 0444
+ {% endif %}
+ {% if name == 'mcpgateway' and has_plugins | default(false) %}
+ - name: plugins-config
+ configMap:
+ name: plugins-config
+ defaultMode: 0444
+ {% endif %}
+
+ {% if volumes is defined %}
+ {% for volume in volumes %}
+ - name: {{ volume.name }}
+ {% if volume.type == 'secret' %}
+ secret:
+ secretName: {{ volume.secret_name }}
+ {% if volume.default_mode is defined %}
+ defaultMode: {{ volume.default_mode }}
+ {% endif %}
+ {% elif volume.type == 'configmap' %}
+ configMap:
+ name: {{ volume.configmap_name }}
+ {% elif volume.type == 'persistentVolumeClaim' %}
+ persistentVolumeClaim:
+ claimName: {{ volume.claim_name }}
+ {% endif %}
+ {% endfor %}
+ {% endif %}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ name }}
+ namespace: {{ namespace }}
+ labels:
+ app: {{ name }}
+spec:
+ type: {{ service_type | default('ClusterIP') }}
+ ports:
+ - name: http
+ port: {{ port | default(8000) }}
+ targetPort: http
+ protocol: TCP
+ {% if service_type == 'NodePort' and node_port is defined %}
+ nodePort: {{ node_port }}
+ {% endif %}
+ selector:
+ app: {{ name }}
diff --git a/mcpgateway/tools/builder/templates/kubernetes/plugins-configmap.yaml.j2 b/mcpgateway/tools/builder/templates/kubernetes/plugins-configmap.yaml.j2
new file mode 100644
index 000000000..d517d8459
--- /dev/null
+++ b/mcpgateway/tools/builder/templates/kubernetes/plugins-configmap.yaml.j2
@@ -0,0 +1,13 @@
+# Location: ./mcpgateway/tools/builder/templates/kubernetes/plugins-configmap.yaml.j2
+# Copyright 2025
+# SPDX-License-Identifier: Apache-2.0
+# Authors: Teryl Taylor
+# ConfigMap for plugins configuration
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: plugins-config
+ namespace: {{ namespace }}
+data:
+ plugins.yaml: |
+{{ plugins_config | safe | indent(4, first=True) }}
diff --git a/mcpgateway/tools/builder/templates/kubernetes/postgres.yaml.j2 b/mcpgateway/tools/builder/templates/kubernetes/postgres.yaml.j2
new file mode 100644
index 000000000..de58a288e
--- /dev/null
+++ b/mcpgateway/tools/builder/templates/kubernetes/postgres.yaml.j2
@@ -0,0 +1,125 @@
+# Location: ./mcpgateway/tools/builder/templates/kubernetes/postgres.yaml.j2
+# Copyright 2025
+# SPDX-License-Identifier: Apache-2.0
+# Authors: Teryl Taylor
+# PostgreSQL Database for MCP Gateway
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: postgres-pvc
+ namespace: {{ namespace }}
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: {{ storage_size }}
+ {% if storage_class %}
+ storageClassName: {{ storage_class }}
+ {% endif %}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: postgres-secret
+ namespace: {{ namespace }}
+type: Opaque
+stringData:
+ # Official PostgreSQL image variables
+ POSTGRES_USER: {{ user }}
+ POSTGRES_PASSWORD: {{ password }}
+ POSTGRES_DB: {{ database }}
+ # Red Hat/SCL PostgreSQL image variables (OpenShift-compatible)
+ POSTGRESQL_USER: {{ user }}
+ POSTGRESQL_PASSWORD: {{ password }}
+ POSTGRESQL_DATABASE: {{ database }}
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: postgres
+ namespace: {{ namespace }}
+ labels:
+ app: postgres
+ component: database
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: postgres
+ template:
+ metadata:
+ labels:
+ app: postgres
+ component: database
+ spec:
+ containers:
+ - name: postgres
+ image: {{ image }}
+ imagePullPolicy: IfNotPresent
+
+ ports:
+ - name: postgres
+ containerPort: 5432
+ protocol: TCP
+
+ envFrom:
+ - secretRef:
+ name: postgres-secret
+
+ volumeMounts:
+ - name: postgres-data
+ mountPath: /var/lib/postgresql/data
+ subPath: postgres
+
+ livenessProbe:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - pg_isready -U {{ user }}
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 3
+
+ readinessProbe:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - pg_isready -U {{ user }}
+ initialDelaySeconds: 10
+ periodSeconds: 5
+ timeoutSeconds: 3
+ failureThreshold: 3
+
+ resources:
+ requests:
+ memory: "256Mi"
+ cpu: "100m"
+ limits:
+ memory: "512Mi"
+ cpu: "500m"
+
+ volumes:
+ - name: postgres-data
+ persistentVolumeClaim:
+ claimName: postgres-pvc
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: postgres
+ namespace: {{ namespace }}
+ labels:
+ app: postgres
+spec:
+ type: ClusterIP
+ ports:
+ - name: postgres
+ port: 5432
+ targetPort: postgres
+ protocol: TCP
+ selector:
+ app: postgres
diff --git a/mcpgateway/tools/builder/templates/kubernetes/redis.yaml.j2 b/mcpgateway/tools/builder/templates/kubernetes/redis.yaml.j2
new file mode 100644
index 000000000..340e2c71a
--- /dev/null
+++ b/mcpgateway/tools/builder/templates/kubernetes/redis.yaml.j2
@@ -0,0 +1,76 @@
+# Location: ./mcpgateway/tools/builder/templates/kubernetes/redis.yaml.j2
+# Copyright 2025
+# SPDX-License-Identifier: Apache-2.0
+# Authors: Teryl Taylor
+# Redis Cache for MCP Gateway
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: redis
+ namespace: {{ namespace }}
+ labels:
+ app: redis
+ component: cache
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: redis
+ template:
+ metadata:
+ labels:
+ app: redis
+ component: cache
+ spec:
+ containers:
+ - name: redis
+ image: {{ image }}
+ imagePullPolicy: IfNotPresent
+
+ ports:
+ - name: redis
+ containerPort: 6379
+ protocol: TCP
+
+ livenessProbe:
+ tcpSocket:
+ port: redis
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 3
+
+ readinessProbe:
+ exec:
+ command:
+ - redis-cli
+ - ping
+ initialDelaySeconds: 10
+ periodSeconds: 5
+ timeoutSeconds: 3
+ failureThreshold: 3
+
+ resources:
+ requests:
+ memory: "128Mi"
+ cpu: "50m"
+ limits:
+ memory: "256Mi"
+ cpu: "200m"
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: redis
+ namespace: {{ namespace }}
+ labels:
+ app: redis
+spec:
+ type: ClusterIP
+ ports:
+ - name: redis
+ port: 6379
+ targetPort: redis
+ protocol: TCP
+ selector:
+ app: redis
diff --git a/mcpgateway/tools/builder/templates/kubernetes/route.yaml.j2 b/mcpgateway/tools/builder/templates/kubernetes/route.yaml.j2
new file mode 100644
index 000000000..815ace26d
--- /dev/null
+++ b/mcpgateway/tools/builder/templates/kubernetes/route.yaml.j2
@@ -0,0 +1,25 @@
+# Location: ./mcpgateway/tools/builder/templates/kubernetes/route.yaml.j2
+# Copyright 2025
+# SPDX-License-Identifier: Apache-2.0
+# Authors: Teryl Taylor
+# OpenShift Route for external access to MCP Gateway
+apiVersion: route.openshift.io/v1
+kind: Route
+metadata:
+ name: mcpgateway-admin
+ namespace: {{ namespace }}
+ labels:
+ app: mcpgateway
+ component: gateway
+spec:
+ host: mcpgateway-admin-{{ namespace }}.{{ openshift_domain }}
+ path: /
+ to:
+ kind: Service
+ name: mcpgateway
+ weight: 100
+ port:
+ targetPort: http
+ tls:
+ termination: {{ tls_termination }}
+ wildcardPolicy: None
diff --git a/mcpgateway/tools/builder/templates/plugins-config.yaml.j2 b/mcpgateway/tools/builder/templates/plugins-config.yaml.j2
new file mode 100644
index 000000000..a8221873a
--- /dev/null
+++ b/mcpgateway/tools/builder/templates/plugins-config.yaml.j2
@@ -0,0 +1,49 @@
+# Location: ./mcpgateway/tools/builder/templates/compose/plugins-config.yaml.j2
+# Copyright 2025
+# SPDX-License-Identifier: Apache-2.0
+# Authors: Teryl Taylor
+# Plugin configuration for MCP Gateway
+# Auto-generated from mcp-stack.yaml
+
+# Global plugin settings
+plugin_settings:
+ parallel_execution_within_band: true
+ plugin_timeout: 120
+ fail_on_plugin_error: false
+ enable_plugin_api: true
+ plugin_health_check_interval: 60
+
+# External plugin connections
+plugins:
+{% for plugin in plugins -%}
+- name: {{ plugin.name }}
+ kind: external
+{%- if plugin.description %}
+ description: "{{ plugin.description }}"
+{%- endif %}
+{%- if plugin.version %}
+ version: "{{ plugin.version }}"
+{%- endif %}
+{%- if plugin.author %}
+ author: "{{ plugin.author }}"
+{%- endif %}
+{%- if plugin.hooks %}
+ hooks: {{ plugin.hooks }}
+{%- endif %}
+{%- if plugin.tags %}
+ tags: {{ plugin.tags }}
+{%- endif %}
+{%- if plugin.mode %}
+ mode: "{{ plugin.mode }}"
+{%- endif %}
+{%- if plugin.priority %}
+ priority: {{ plugin.priority }}
+{%- endif %}
+{%- if plugin.conditions %}
+ conditions: {{ plugin.conditions }}
+{%- endif %}
+ mcp:
+ proto: STREAMABLEHTTP
+ url: {{ plugin.url }}
+
+{% endfor %}
diff --git a/mcpgateway/tools/cli.py b/mcpgateway/tools/cli.py
new file mode 100644
index 000000000..d7a869c77
--- /dev/null
+++ b/mcpgateway/tools/cli.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 -*-
+"""Location: ./mcpgateway/tools/cli.py
+Copyright 2025
+SPDX-License-Identifier: Apache-2.0
+Authors: Teryl Taylor
+
+cforge CLI ─ command line tools for building and deploying the
+MCP Gateway and its plugins.
+
+This module is exposed as a **console-script** via:
+
+ [project.scripts]
+ cforge = "mcpgateway.tools.cli:main"
+
+so that a user can simply type `cforge ...` to use the CLI.
+
+Features
+─────────
+* plugin:
+ - bootstrap: Creates a new plugin project from template │
+ - install: Installs plugins into a Python environment │
+ - package: Builds an MCP server to serve plugins as tools
+* gateway:
+ - Validates deploy.yaml configuration
+ - Builds plugin containers from git repos
+ - Generates mTLS certificates
+ - Deploys to Kubernetes or Docker Compose
+ - Integrates with CI/CD vault secrets
+
+
+Typical usage
+─────────────
+```console
+$ cforge --help
+```
+"""
+
+# Third-Party
+import typer
+
+# First-Party
+import mcpgateway.plugins.tools.cli as plugins
+import mcpgateway.tools.builder.cli as builder
+
+app = typer.Typer(help="Command line tools for building, deploying, and interacting with the ContextForge MCP Gateway")
+
+app.add_typer(plugins.app, name="plugin", help="Manage the plugin lifecycle")
+app.add_typer(builder.app, name="gateway", help="Manage the building and deployment of the gateway")
+
+
+def main() -> None: # noqa: D401 - imperative mood is fine here
+ """Entry point for the *cforge* console script."""
+ app(obj={})
+
+
+if __name__ == "__main__":
+ main()
diff --git a/mcpgateway/translate_grpc.py b/mcpgateway/translate_grpc.py
index f1aa9a4ba..58b80ab8f 100644
--- a/mcpgateway/translate_grpc.py
+++ b/mcpgateway/translate_grpc.py
@@ -173,7 +173,7 @@ async def _discover_service_details(self, stub, service_name: str) -> None:
# Add to pool (ignore if already exists)
try:
self._pool.Add(file_desc_proto)
- except Exception as e: # noqa: B110
+ except Exception as e: # pylint: disable=broad-except
# Descriptor already in pool, safe to skip
logger.debug(f"Descriptor already in pool: {e}")
diff --git a/mcpgateway/utils/correlation_id.py b/mcpgateway/utils/correlation_id.py
new file mode 100644
index 000000000..6701405e3
--- /dev/null
+++ b/mcpgateway/utils/correlation_id.py
@@ -0,0 +1,175 @@
+# -*- coding: utf-8 -*-
+"""Location: ./mcpgateway/utils/correlation_id.py
+Copyright 2025
+SPDX-License-Identifier: Apache-2.0
+Authors: MCP Gateway Contributors
+
+Correlation ID (Request ID) Utilities.
+
+This module provides async-safe utilities for managing correlation IDs (also known as
+request IDs) throughout the request lifecycle using Python's contextvars.
+
+The correlation ID is a unique identifier that tracks a single request as it flows
+through all components of the system (HTTP → Middleware → Services → Plugins → Logs).
+
+Key concepts:
+- ContextVar provides per-request isolation in async environments
+- Correlation IDs can be client-provided (X-Correlation-ID header) or auto-generated
+- The same ID is used as request_id throughout logs, services, and plugin contexts
+- Thread-safe and async-safe (no cross-contamination between concurrent requests)
+"""
+
+# Standard
+from contextvars import ContextVar
+import logging
+from typing import Dict, Optional
+import uuid
+
+logger = logging.getLogger(__name__)
+
+# Context variable for storing correlation ID (request ID) per-request
+# This is async-safe and provides automatic isolation between concurrent requests
+_correlation_id_context: ContextVar[Optional[str]] = ContextVar("correlation_id", default=None)
+
+
+def get_correlation_id() -> Optional[str]:
+ """Get the current correlation ID (request ID) from context.
+
+ Returns the correlation ID for the current async task/request. Each request
+ has its own isolated context, so concurrent requests won't interfere.
+
+ Returns:
+ Optional[str]: The correlation ID if set, None otherwise
+ """
+ return _correlation_id_context.get()
+
+
+def set_correlation_id(correlation_id: str) -> None:
+ """Set the correlation ID (request ID) for the current context.
+
+ Stores the correlation ID in a context variable that's automatically isolated
+ per async task. This ID will be used as request_id throughout the system.
+
+ Args:
+ correlation_id: The correlation ID to set (typically a UUID or client-provided ID)
+ """
+ _correlation_id_context.set(correlation_id)
+
+
+def clear_correlation_id() -> None:
+ """Clear the correlation ID (request ID) from the current context.
+
+ Should be called at the end of request processing to clean up context.
+ In practice, FastAPI middleware automatically handles context cleanup.
+
+ Note: This is optional as ContextVar automatically cleans up when the
+ async task completes.
+ """
+ _correlation_id_context.set(None)
+
+
+def generate_correlation_id() -> str:
+ """Generate a new correlation ID (UUID4 hex format).
+
+ Creates a new random UUID suitable for use as a correlation ID.
+ Uses UUID4 which provides 122 bits of randomness.
+
+ Returns:
+ str: A new UUID in hex format (32 characters, no hyphens)
+ """
+ return uuid.uuid4().hex
+
+
+def extract_correlation_id_from_headers(headers: Dict[str, str], header_name: str = "X-Correlation-ID") -> Optional[str]:
+ """Extract correlation ID from HTTP headers.
+
+ Searches for the correlation ID header (case-insensitive) and returns its value.
+ Validates that the value is non-empty after stripping whitespace.
+
+ Args:
+ headers: Dictionary of HTTP headers
+ header_name: Name of the correlation ID header (default: X-Correlation-ID)
+
+ Returns:
+ Optional[str]: The correlation ID if found and valid, None otherwise
+
+ Example:
+ >>> headers = {"X-Correlation-ID": "abc-123"}
+ >>> extract_correlation_id_from_headers(headers)
+ 'abc-123'
+
+ >>> headers = {"x-correlation-id": "def-456"} # Case insensitive
+ >>> extract_correlation_id_from_headers(headers)
+ 'def-456'
+ """
+ # Headers can be accessed case-insensitively in FastAPI/Starlette
+ for key, value in headers.items():
+ if key.lower() == header_name.lower():
+ correlation_id = value.strip()
+ if correlation_id:
+ return correlation_id
+ return None
+
+
+def get_or_generate_correlation_id() -> str:
+ """Get the current correlation ID or generate a new one if not set.
+
+ This is a convenience function that ensures you always have a correlation ID.
+ If the current context doesn't have a correlation ID, it generates and sets
+ a new one.
+
+ Returns:
+ str: The correlation ID (either existing or newly generated)
+
+ Example:
+ >>> # First call generates new ID
+ >>> id1 = get_or_generate_correlation_id()
+ >>> # Second call returns same ID
+ >>> id2 = get_or_generate_correlation_id()
+ >>> assert id1 == id2
+ """
+ correlation_id = get_correlation_id()
+ if not correlation_id:
+ correlation_id = generate_correlation_id()
+ set_correlation_id(correlation_id)
+ return correlation_id
+
+
+def validate_correlation_id(correlation_id: Optional[str], max_length: int = 255) -> bool:
+ """Validate a correlation ID for safety and length.
+
+ Checks that the correlation ID is:
+ - Non-empty after stripping whitespace
+ - Within the maximum length limit
+ - Contains only safe characters (alphanumeric, hyphens, underscores)
+
+ Args:
+ correlation_id: The correlation ID to validate
+ max_length: Maximum allowed length (default: 255)
+
+ Returns:
+ bool: True if valid, False otherwise
+
+ Example:
+ >>> validate_correlation_id("abc-123")
+ True
+ >>> validate_correlation_id("abc 123") # Spaces not allowed
+ False
+ >>> validate_correlation_id("a" * 300) # Too long
+ False
+ """
+ if not correlation_id or not correlation_id.strip():
+ return False
+
+ correlation_id = correlation_id.strip()
+
+ if len(correlation_id) > max_length:
+ logger.warning(f"Correlation ID too long: {len(correlation_id)} > {max_length}")
+ return False
+
+ # Allow alphanumeric, hyphens, and underscores only
+ if not all(c.isalnum() or c in ("-", "_") for c in correlation_id):
+ logger.warning(f"Correlation ID contains invalid characters: {correlation_id}")
+ return False
+
+ return True
diff --git a/mcpgateway/utils/retry_manager.py b/mcpgateway/utils/retry_manager.py
index c8cb8283f..3e721167e 100644
--- a/mcpgateway/utils/retry_manager.py
+++ b/mcpgateway/utils/retry_manager.py
@@ -301,7 +301,7 @@ async def _sleep_with_jitter(self, base: float, jitter_range: float):
True
"""
# random.uniform() is safe here as jitter is only used for retry timing, not security
- delay = base + random.uniform(0, jitter_range) # noqa: DUO102 # nosec B311
+ delay = base + random.uniform(0, jitter_range) # nosec B311 # noqa: DUO102
# Ensure delay doesn't exceed the max allowed
delay = min(delay, self.max_delay)
await asyncio.sleep(delay)
diff --git a/mcpgateway/utils/ssl_key_manager.py b/mcpgateway/utils/ssl_key_manager.py
new file mode 100644
index 000000000..8c4fa4533
--- /dev/null
+++ b/mcpgateway/utils/ssl_key_manager.py
@@ -0,0 +1,174 @@
+# -*- coding: utf-8 -*-
+"""Location: ./mcpgateway/utils/ssl_key_manager.py
+Copyright 2025
+SPDX-License-Identifier: Apache-2.0
+Authors: Keval Mahajan
+
+SSL key management utilities for handling passphrase-protected keys.
+
+This module provides utilities for managing SSL private keys, including support
+for passphrase-protected keys. It handles decryption and secure temporary file
+management for use with Gunicorn and other servers that don't natively support
+passphrase-protected keys.
+"""
+
+# Standard
+import atexit
+import logging
+import os
+from pathlib import Path
+import tempfile
+from typing import Optional
+
+# Third-Party
+from cryptography.hazmat.primitives import serialization
+from cryptography.hazmat.primitives.serialization import load_pem_private_key
+
+logger = logging.getLogger(__name__)
+
+
+class SSLKeyManager:
+ """Manages SSL private keys, including passphrase-protected keys.
+
+ This class handles the decryption of passphrase-protected private keys
+ and creates temporary unencrypted key files for use with servers that
+ don't support passphrase-protected keys directly (like Gunicorn).
+
+ The temporary files are created with secure permissions (0o600) and are
+ automatically cleaned up on process exit.
+
+ Examples:
+ >>> manager = SSLKeyManager()
+ >>> key_path = manager.prepare_key_file("certs/key.pem") # doctest: +SKIP
+ >>> # Use key_path with Gunicorn
+ >>> manager.cleanup() # doctest: +SKIP
+ """
+
+ def __init__(self):
+ """Initialize the SSL key manager."""
+ self._temp_key_file: Optional[Path] = None
+
+ def prepare_key_file(
+ self,
+ key_file: str | Path,
+ password: Optional[str] = None,
+ ) -> str:
+ """Prepare a key file for use with Gunicorn.
+
+ If the key is passphrase-protected, decrypt it and write to a
+ temporary file with secure permissions. Otherwise, return the
+ original path.
+
+ Args:
+ key_file: Path to the private key file
+ password: Optional passphrase for encrypted key
+
+ Returns:
+ Path to the usable key file (original or temporary)
+
+ Raises:
+ FileNotFoundError: If the key file doesn't exist
+ ValueError: If decryption fails (wrong passphrase, invalid key, etc.)
+
+ Examples:
+ >>> manager = SSLKeyManager()
+ >>> # Unencrypted key - returns original path
+ >>> path = manager.prepare_key_file("certs/key.pem") # doctest: +SKIP
+ >>> # Encrypted key - returns temporary decrypted path
+ >>> path = manager.prepare_key_file("certs/key-enc.pem", "secret") # doctest: +SKIP
+ """
+ key_path = Path(key_file)
+
+ if not key_path.exists():
+ raise FileNotFoundError(f"Key file not found: {key_file}")
+
+ # If no password, use the key as-is
+ if not password:
+ logger.info(f"Using unencrypted key file: {key_file}")
+ return str(key_path)
+
+ # Decrypt the key and write to temporary file
+ logger.info("Decrypting passphrase-protected key...")
+
+ try:
+ # Read and decrypt the key
+ with open(key_path, "rb") as f:
+ key_data = f.read()
+
+ private_key = load_pem_private_key(
+ key_data,
+ password=password.encode() if password else None,
+ )
+
+ # Serialize to unencrypted PEM
+ unencrypted_pem = private_key.private_bytes(
+ encoding=serialization.Encoding.PEM,
+ format=serialization.PrivateFormat.TraditionalOpenSSL,
+ encryption_algorithm=serialization.NoEncryption(),
+ )
+
+ # Write to temporary file with secure permissions
+ fd, temp_path = tempfile.mkstemp(suffix=".pem", prefix="ssl_key_")
+ self._temp_key_file = Path(temp_path)
+
+ # Set restrictive permissions (owner read/write only)
+ os.chmod(temp_path, 0o600)
+
+ # Write the decrypted key
+ with os.fdopen(fd, "wb") as f:
+ f.write(unencrypted_pem)
+
+ logger.info(f"Decrypted key written to temporary file: {temp_path}")
+
+ # Register cleanup on exit
+ atexit.register(self.cleanup)
+
+ return temp_path
+
+ except Exception as e:
+ logger.error(f"Failed to decrypt key: {e}")
+ self.cleanup()
+ raise ValueError("Failed to decrypt private key. Check that the passphrase is correct.") from e
+
+ def cleanup(self):
+ """Remove temporary key file if it exists.
+
+ This method is automatically called on process exit via atexit,
+ but can also be called manually for explicit cleanup.
+ """
+ if self._temp_key_file and self._temp_key_file.exists():
+ try:
+ self._temp_key_file.unlink()
+ logger.info(f"Cleaned up temporary key file: {self._temp_key_file}")
+ except Exception as e:
+ logger.warning(f"Failed to clean up temporary key file: {e}")
+ finally:
+ self._temp_key_file = None
+
+
+# Global instance for convenience
+_key_manager = SSLKeyManager()
+
+
+def prepare_ssl_key(key_file: str, password: Optional[str] = None) -> str:
+ """Prepare an SSL key file for use with Gunicorn.
+
+ This is a convenience function that uses the global key manager instance.
+
+ Args:
+ key_file: Path to the private key file
+ password: Optional passphrase for encrypted key
+
+ Returns:
+ Path to the usable key file (original or temporary)
+
+ Raises:
+ FileNotFoundError: If the key file doesn't exist
+ ValueError: If decryption fails
+
+ Examples:
+ >>> from mcpgateway.utils.ssl_key_manager import prepare_ssl_key
+ >>> key_path = prepare_ssl_key("certs/key.pem") # doctest: +SKIP
+ >>> key_path = prepare_ssl_key("certs/key-enc.pem", "secret") # doctest: +SKIP
+ """
+ return _key_manager.prepare_key_file(key_file, password)
diff --git a/mcpgateway/utils/validate_signature.py b/mcpgateway/utils/validate_signature.py
index 35216d5c4..647b37fb7 100755
--- a/mcpgateway/utils/validate_signature.py
+++ b/mcpgateway/utils/validate_signature.py
@@ -115,6 +115,14 @@ def validate_signature(data: bytes, signature: bytes | str, public_key_pem: str)
>>> # Test invalid signature
>>> validate_signature(b"wrong data", signature, public_pem)
False
+ >>>
+ >>> # Test with string data (gets encoded)
+ >>> validate_signature("test message", signature, public_pem)
+ True
+ >>>
+ >>> # Test invalid hex signature format
+ >>> validate_signature(data, "not-valid-hex", public_pem)
+ False
"""
if isinstance(data, str):
data = data.encode()
@@ -182,6 +190,20 @@ def resign_data(
>>> new_sig = resign_data(data, old_public_pem, "", new_private_pem)
>>> isinstance(new_sig, str)
True
+ >>>
+ >>> # Test re-signing with valid old signature
+ >>> old_sig = old_private.sign(data)
+ >>> new_sig2 = resign_data(data, old_public_pem, old_sig, new_private_pem)
+ >>> isinstance(new_sig2, str)
+ True
+ >>> new_sig2 != old_sig.hex() # New signature should be different
+ True
+ >>>
+ >>> # Test with invalid old signature (should return None)
+ >>> bad_sig = b"invalid signature bytes"
+ >>> result = resign_data(data, old_public_pem, bad_sig, new_private_pem)
+ >>> result is None
+ True
"""
# Handle first-time signing (no old signature)
if not old_signature:
diff --git a/plugins/config.yaml b/plugins/config.yaml
index 7c821daf6..e36d45fe1 100644
--- a/plugins/config.yaml
+++ b/plugins/config.yaml
@@ -755,7 +755,7 @@ plugins:
hooks: ["tool_pre_invoke"]
tags: ["security", "vault", "OAUTH2"]
# mode: "permissive"
- mode: "disabled"
+ mode: "permissive"
priority: 10
conditions:
- prompts: []
diff --git a/plugins/external/cedar/.dockerignore b/plugins/external/cedar/.dockerignore
new file mode 100644
index 000000000..e9a71f900
--- /dev/null
+++ b/plugins/external/cedar/.dockerignore
@@ -0,0 +1,363 @@
+# syntax=docker/dockerfile:1
+#----------------------------------------------------------------------
+# Docker Build Context Optimization
+#
+# This .dockerignore file excludes unnecessary files from the Docker
+# build context to improve build performance and security.
+#----------------------------------------------------------------------
+
+#----------------------------------------------------------------------
+# 1. Development and source directories (not needed in production)
+#----------------------------------------------------------------------
+agent_runtimes/
+charts/
+deployment/
+docs/
+deployment/k8s/
+mcp-servers/
+tests/
+test/
+attic/
+*.md
+.benchmarks/
+
+# Development environment directories
+.devcontainer/
+.github/
+.vscode/
+.idea/
+
+#----------------------------------------------------------------------
+# 2. Version control
+#----------------------------------------------------------------------
+.git/
+.gitignore
+.gitattributes
+.gitmodules
+
+#----------------------------------------------------------------------
+# 3. Python build artifacts and caches
+#----------------------------------------------------------------------
+# Byte-compiled files
+__pycache__/
+*.py[cod]
+*.pyc
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+.wily/
+
+# PyInstaller
+*.manifest
+*.spec
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+cover/
+
+# Translations
+*.mo
+*.pot
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+.pytype/
+
+# Cython debug symbols
+cython_debug/
+
+#----------------------------------------------------------------------
+# 4. Virtual environments
+#----------------------------------------------------------------------
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+.python37/
+.python39/
+.python-version
+
+# PDM
+pdm.lock
+.pdm.toml
+.pdm-python
+
+#----------------------------------------------------------------------
+# 5. Package managers and dependencies
+#----------------------------------------------------------------------
+# Node.js
+node_modules/
+npm-debug.log*
+yarn-debug.log*
+yarn-error.log*
+.npm
+.yarn
+
+# pip
+pip-log.txt
+pip-delete-this-directory.txt
+
+#----------------------------------------------------------------------
+# 6. Docker and container files (avoid recursive copies)
+#----------------------------------------------------------------------
+Dockerfile
+Dockerfile.*
+Containerfile
+Containerfile.*
+docker-compose.yml
+docker-compose.*.yml
+podman-compose*.yaml
+.dockerignore
+
+#----------------------------------------------------------------------
+# 7. IDE and editor files
+#----------------------------------------------------------------------
+# JetBrains
+.idea/
+*.iml
+*.iws
+*.ipr
+
+# VSCode
+.vscode/
+*.code-workspace
+
+# Vim
+*.swp
+*.swo
+*~
+
+# Emacs
+*~
+\#*\#
+.\#*
+
+# macOS
+.DS_Store
+.AppleDouble
+.LSOverride
+
+#----------------------------------------------------------------------
+# 8. Build tools and CI/CD configurations
+#----------------------------------------------------------------------
+# Testing configurations
+.coveragerc
+.pylintrc
+.flake8
+pytest.ini
+tox.ini
+.pytest.ini
+
+# Linting and formatting
+.hadolint.yaml
+.pre-commit-config.yaml
+.pycodestyle
+.pyre_configuration
+.pyspelling.yaml
+.ruff.toml
+.shellcheckrc
+
+# Build configurations
+Makefile
+setup.cfg
+pyproject.toml.bak
+MANIFEST.in
+
+# CI/CD
+.travis.*
+.gitlab-ci.yml
+.circleci/
+.github/
+azure-pipelines.yml
+Jenkinsfile
+
+# Code quality
+sonar-code.properties
+sonar-project.properties
+.scannerwork/
+whitesource.config
+.whitesource
+
+# Other tools
+.bumpversion.cfg
+.editorconfig
+mypy.ini
+
+#----------------------------------------------------------------------
+# 9. Application runtime files (should not be in image)
+#----------------------------------------------------------------------
+# Databases
+*.db
+*.sqlite
+*.sqlite3
+mcp.db
+db.sqlite3
+
+# Logs
+*.log
+logs/
+log/
+
+# Certificates and secrets
+certs/
+*.pem
+*.key
+*.crt
+*.csr
+.env
+.env.*
+
+# Generated files
+public/
+static/
+media/
+
+# Application instances
+instance/
+local_settings.py
+
+#----------------------------------------------------------------------
+# 10. Framework-specific files
+#----------------------------------------------------------------------
+# Django
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+media/
+
+# Flask
+instance/
+.webassets-cache
+
+# Scrapy
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+docs/build/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+*.ipynb
+
+# IPython
+profile_default/
+ipython_config.py
+
+# celery
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+#----------------------------------------------------------------------
+# 11. Backup and temporary files
+#----------------------------------------------------------------------
+*.bak
+*.backup
+*.tmp
+*.temp
+*.orig
+*.rej
+.backup/
+backup/
+tmp/
+temp/
+
+#----------------------------------------------------------------------
+# 12. Documentation and miscellaneous
+#----------------------------------------------------------------------
+*.md
+!README.md
+LICENSE
+CHANGELOG
+AUTHORS
+CONTRIBUTORS
+TODO
+TODO.md
+DEVELOPING.md
+CONTRIBUTING.md
+
+# Spelling
+.spellcheck-en.txt
+*.dic
+
+# Shell scripts (if not needed in container)
+test.sh
+scripts/test/
+scripts/dev/
+
+#----------------------------------------------------------------------
+# 13. OS-specific files
+#----------------------------------------------------------------------
+# Windows
+Thumbs.db
+ehthumbs.db
+Desktop.ini
+$RECYCLE.BIN/
+
+# Linux
+*~
+.fuse_hidden*
+.directory
+.Trash-*
+.nfs*
+
+#----------------------------------------------------------------------
+# End of .dockerignore
+#----------------------------------------------------------------------
diff --git a/plugins/external/cedar/.env.template b/plugins/external/cedar/.env.template
new file mode 100644
index 000000000..5dbc57403
--- /dev/null
+++ b/plugins/external/cedar/.env.template
@@ -0,0 +1,63 @@
+#####################################
+# Plugins Settings
+#####################################
+
+# Enable the plugin framework
+PLUGINS_ENABLED=false
+
+# Enable auto-completion for plugins CLI
+PLUGINS_CLI_COMPLETION=false
+
+# default host port to listen on
+PLUGINS_SERVER_HOST=0.0.0.0
+
+# Set markup mode for plugins CLI
+# Valid options:
+# rich: use rich markup
+# markdown: allow markdown in help strings
+# disabled: disable markup
+# If unset (commented out), uses "rich" if rich is detected, otherwise disables it.
+PLUGINS_CLI_MARKUP_MODE=rich
+
+# Configuration path for plugin loader
+PLUGINS_CONFIG=./resources/plugins/config.yaml
+
+# Configuration path for chuck mcp runtime
+CHUK_MCP_CONFIG_PATH=./resources/runtime/config.yaml
+
+# Configuration for plugins transport
+PLUGINS_TRANSPORT=streamablehttp
+
+#####################################
+# MCP External Plugin Server - mTLS Configuration
+#####################################
+
+# Enable SSL/TLS for external plugin MCP server
+# Options: true, false (default)
+# When true: Enables HTTPS and optionally mTLS for the plugin MCP server
+MCP_SSL_ENABLED=false
+
+# SSL/TLS Certificate Files
+# Path to server private key (required when MCP_SSL_ENABLED=true)
+# Generate with: openssl genrsa -out certs/mcp/server.key 2048
+# MCP_SSL_KEYFILE=certs/mcp/server.key
+
+# Path to server certificate (required when MCP_SSL_ENABLED=true)
+# Generate with: openssl req -new -x509 -key certs/mcp/server.key -out certs/mcp/server.crt -days 365
+# MCP_SSL_CERTFILE=certs/mcp/server.crt
+
+# Optional password for encrypted private key
+# MCP_SSL_KEYFILE_PASSWORD=
+
+# mTLS (Mutual TLS) Configuration
+# Client certificate verification mode:
+# 0 (CERT_NONE): No client certificate required - standard TLS (default)
+# 1 (CERT_OPTIONAL): Client certificate optional - validate if provided
+# 2 (CERT_REQUIRED): Client certificate required - full mTLS
+# Default: 0 (standard TLS without client verification)
+MCP_SSL_CERT_REQS=0
+
+# CA certificate bundle for verifying client certificates
+# Required when MCP_SSL_CERT_REQS=1 or MCP_SSL_CERT_REQS=2
+# Can be a single CA file or a bundle containing multiple CAs
+# MCP_SSL_CA_CERTS=certs/mcp/ca.crt
diff --git a/plugins/external/cedar/.ruff.toml b/plugins/external/cedar/.ruff.toml
new file mode 100644
index 000000000..443a275df
--- /dev/null
+++ b/plugins/external/cedar/.ruff.toml
@@ -0,0 +1,63 @@
+# Exclude a variety of commonly ignored directories.
+exclude = [
+ ".bzr",
+ ".direnv",
+ ".eggs",
+ ".git",
+ ".git-rewrite",
+ ".hg",
+ ".ipynb_checkpoints",
+ ".mypy_cache",
+ ".nox",
+ ".pants.d",
+ ".pyenv",
+ ".pytest_cache",
+ ".pytype",
+ ".ruff_cache",
+ ".svn",
+ ".tox",
+ ".venv",
+ ".vscode",
+ "__pypackages__",
+ "_build",
+ "buck-out",
+ "build",
+ "dist",
+ "node_modules",
+ "site-packages",
+ "venv",
+ "docs",
+ "test"
+]
+
+# 200 line length
+line-length = 200
+indent-width = 4
+
+# Assume Python 3.11
+target-version = "py311"
+
+[lint]
+# Enable Pyflakes (`F`) and a subset of the pycodestyle (`E`) codes by default.
+select = ["E4", "E7", "E9", "F"]
+ignore = []
+
+# Allow fix for all enabled rules (when `--fix`) is provided.
+fixable = ["ALL"]
+unfixable = []
+
+# Allow unused variables when underscore-prefixed.
+dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$"
+
+[format]
+# Like Black, use double quotes for strings.
+quote-style = "double"
+
+# Like Black, indent with spaces, rather than tabs.
+indent-style = "space"
+
+# Like Black, respect magic trailing commas.
+skip-magic-trailing-comma = false
+
+# Like Black, automatically detect the appropriate line ending.
+line-ending = "auto"
diff --git a/plugins/external/cedar/Containerfile b/plugins/external/cedar/Containerfile
new file mode 100644
index 000000000..d2d5f6748
--- /dev/null
+++ b/plugins/external/cedar/Containerfile
@@ -0,0 +1,47 @@
+# syntax=docker/dockerfile:1.7
+ARG UBI=python-312-minimal
+
+FROM registry.access.redhat.com/ubi9/${UBI} AS builder
+
+ARG PYTHON_VERSION=3.12
+
+ARG VERSION
+ARG COMMIT_ID
+ARG SKILLS_SDK_COMMIT_ID
+ARG SKILLS_SDK_VERSION
+ARG BUILD_TIME_SKILLS_INSTALL
+
+ENV APP_HOME=/app
+
+USER 0
+
+# Image pre-requisites
+RUN INSTALL_PKGS="git make gcc gcc-c++ python${PYTHON_VERSION}-devel" && \
+ microdnf -y --setopt=tsflags=nodocs --setopt=install_weak_deps=0 install $INSTALL_PKGS && \
+ microdnf -y clean all --enablerepo='*'
+
+# Setup alias from HOME to APP_HOME
+RUN mkdir -p ${APP_HOME} && \
+ chown -R 1001:0 ${APP_HOME} && \
+ ln -s ${HOME} ${APP_HOME} && \
+ mkdir -p ${HOME}/resources/config && \
+ chown -R 1001:0 ${HOME}/resources/config
+
+USER 1001
+
+# Install plugin package
+COPY . .
+RUN pip install --no-cache-dir uv && python -m uv pip install .
+
+# Make default cache directory writable
+RUN mkdir -p -m 0776 ${HOME}/.cache
+
+# Update labels
+LABEL maintainer="Context Forge MCP Gateway Team" \
+ name="mcp/mcppluginserver" \
+ version="${VERSION}" \
+ url="https://github.com/IBM/mcp-context-forge" \
+ description="MCP Plugin Server for the Context Forge MCP Gateway"
+
+# App entrypoint
+ENTRYPOINT ["sh", "-c", "${HOME}/run-server.sh"]
diff --git a/plugins/external/cedar/MANIFEST.in b/plugins/external/cedar/MANIFEST.in
new file mode 100644
index 000000000..1fb92c60a
--- /dev/null
+++ b/plugins/external/cedar/MANIFEST.in
@@ -0,0 +1,67 @@
+# ──────────────────────────────────────────────────────────────
+# MANIFEST.in - source-distribution contents for cedarpolicyplugin
+# ──────────────────────────────────────────────────────────────
+
+# 1️⃣ Core project files that SDists/Wheels should always carry
+include LICENSE
+include README.md
+include pyproject.toml
+include Containerfile
+
+# 2️⃣ Top-level config, examples and helper scripts
+include *.py
+include *.md
+include *.example
+include *.lock
+include *.properties
+include *.toml
+include *.yaml
+include *.yml
+include *.json
+include *.sh
+include *.txt
+recursive-include tests/async *.py
+recursive-include tests/async *.yaml
+
+# 3️⃣ Tooling/lint configuration dot-files (explicit so they're not lost)
+include .env.make
+include .interrogaterc
+include .jshintrc
+include whitesource.config
+include .darglint
+include .dockerignore
+include .flake8
+include .htmlhintrc
+include .pycodestyle
+include .pylintrc
+include .whitesource
+include .coveragerc
+# include .gitignore # purely optional but many projects ship it
+include .bumpversion.cfg
+include .yamllint
+include .editorconfig
+include .snyk
+
+# 4️⃣ Runtime data that lives *inside* the package at import time
+recursive-include resources/plugins *.yaml
+recursive-include cedarpolicyplugin *.yaml
+
+# 5️⃣ (Optional) include MKDocs-based docs in the sdist
+# graft docs
+
+# 6️⃣ Never publish caches, compiled or build outputs, deployment, agent_runtimes, etc.
+global-exclude __pycache__ *.py[cod] *.so *.dylib
+prune build
+prune dist
+prune .eggs
+prune *.egg-info
+prune charts
+prune k8s
+prune .devcontainer
+exclude CLAUDE.*
+exclude llms-full.txt
+
+# Exclude deployment, mcp-servers and agent_runtimes
+prune deployment
+prune mcp-servers
+prune agent_runtimes
diff --git a/plugins/external/cedar/Makefile b/plugins/external/cedar/Makefile
new file mode 100644
index 000000000..a6855e6e3
--- /dev/null
+++ b/plugins/external/cedar/Makefile
@@ -0,0 +1,449 @@
+
+REQUIRED_BUILD_BINS := uv
+
+SHELL := /bin/bash
+.SHELLFLAGS := -eu -o pipefail -c
+
+# Project variables
+PACKAGE_NAME = cedarpolicyplugin
+PROJECT_NAME = cedarpolicyplugin
+TARGET ?= cedarpolicyplugin
+
+# Virtual-environment variables
+VENVS_DIR ?= $(HOME)/.venv
+VENV_DIR ?= $(VENVS_DIR)/$(PROJECT_NAME)
+
+# =============================================================================
+# Linters
+# =============================================================================
+
+black:
+ @echo "🎨 black $(TARGET)..." && $(VENV_DIR)/bin/black -l 200 $(TARGET)
+
+black-check:
+ @echo "🎨 black --check $(TARGET)..." && $(VENV_DIR)/bin/black -l 200 --check --diff $(TARGET)
+
+ruff:
+ @echo "⚡ ruff $(TARGET)..." && $(VENV_DIR)/bin/ruff check $(TARGET) && $(VENV_DIR)/bin/ruff format $(TARGET)
+
+ruff-check:
+ @echo "⚡ ruff check $(TARGET)..." && $(VENV_DIR)/bin/ruff check $(TARGET)
+
+ruff-fix:
+ @echo "⚡ ruff check --fix $(TARGET)..." && $(VENV_DIR)/bin/ruff check --fix $(TARGET)
+
+ruff-format:
+ @echo "⚡ ruff format $(TARGET)..." && $(VENV_DIR)/bin/ruff format $(TARGET)
+
+# =============================================================================
+# Container runtime configuration and operations
+# =============================================================================
+
+# Container resource limits
+CONTAINER_MEMORY = 2048m
+CONTAINER_CPUS = 2
+
+# Auto-detect container runtime if not specified - DEFAULT TO DOCKER
+CONTAINER_RUNTIME ?= $(shell command -v docker >/dev/null 2>&1 && echo docker || echo podman)
+
+# Alternative: Always default to docker unless explicitly overridden
+# CONTAINER_RUNTIME ?= docker
+
+# Container port
+CONTAINER_PORT ?= 8000
+CONTAINER_INTERNAL_PORT ?= 8000
+
+print-runtime:
+ @echo Using container runtime: $(CONTAINER_RUNTIME)
+
+# Base image name (without any prefix)
+IMAGE_BASE ?= mcpgateway/$(PROJECT_NAME)
+IMAGE_TAG ?= latest
+
+# Handle runtime-specific image naming
+ifeq ($(CONTAINER_RUNTIME),podman)
+ # Podman adds localhost/ prefix for local builds
+ IMAGE_LOCAL := localhost/$(IMAGE_BASE):$(IMAGE_TAG)
+ IMAGE_LOCAL_DEV := localhost/$(IMAGE_BASE)-dev:$(IMAGE_TAG)
+ IMAGE_PUSH := $(IMAGE_BASE):$(IMAGE_TAG)
+else
+ # Docker doesn't add prefix
+ IMAGE_LOCAL := $(IMAGE_BASE):$(IMAGE_TAG)
+ IMAGE_LOCAL_DEV := $(IMAGE_BASE)-dev:$(IMAGE_TAG)
+ IMAGE_PUSH := $(IMAGE_BASE):$(IMAGE_TAG)
+endif
+
+print-image:
+ @echo "🐳 Container Runtime: $(CONTAINER_RUNTIME)"
+ @echo "Using image: $(IMAGE_LOCAL)"
+ @echo "Development image: $(IMAGE_LOCAL_DEV)"
+ @echo "Push image: $(IMAGE_PUSH)"
+
+
+
+# Function to get the actual image name as it appears in image list
+define get_image_name
+$(shell $(CONTAINER_RUNTIME) images --format "{{.Repository}}:{{.Tag}}" | grep -E "(localhost/)?$(IMAGE_BASE):$(IMAGE_TAG)" | head -1)
+endef
+
+# Function to normalize image name for operations
+define normalize_image
+$(if $(findstring localhost/,$(1)),$(1),$(if $(filter podman,$(CONTAINER_RUNTIME)),localhost/$(1),$(1)))
+endef
+
+# Containerfile to use (can be overridden)
+#CONTAINER_FILE ?= Containerfile
+CONTAINER_FILE ?= $(shell [ -f "Containerfile" ] && echo "Containerfile" || echo "Dockerfile")
+
+# Define COMMA for the conditional Z flag
+COMMA := ,
+
+container-info:
+ @echo "🐳 Container Runtime Configuration"
+ @echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
+ @echo "Runtime: $(CONTAINER_RUNTIME)"
+ @echo "Base Image: $(IMAGE_BASE)"
+ @echo "Tag: $(IMAGE_TAG)"
+ @echo "Local Image: $(IMAGE_LOCAL)"
+ @echo "Push Image: $(IMAGE_PUSH)"
+ @echo "Actual Image: $(call get_image_name)"
+ @echo "Container File: $(CONTAINER_FILE)"
+ @echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
+
+# Auto-detect platform based on uname
+PLATFORM ?= linux/$(shell uname -m | sed 's/x86_64/amd64/;s/aarch64/arm64/')
+
+container-build:
+ @echo "🔨 Building with $(CONTAINER_RUNTIME) for platform $(PLATFORM)..."
+ $(CONTAINER_RUNTIME) build \
+ --platform=$(PLATFORM) \
+ -f $(CONTAINER_FILE) \
+ --tag $(IMAGE_BASE):$(IMAGE_TAG) \
+ .
+ @echo "✅ Built image: $(call get_image_name)"
+ $(CONTAINER_RUNTIME) images $(IMAGE_BASE):$(IMAGE_TAG)
+
+container-run: container-check-image
+ @echo "🚀 Running with $(CONTAINER_RUNTIME)..."
+ -$(CONTAINER_RUNTIME) stop $(PROJECT_NAME) 2>/dev/null || true
+ -$(CONTAINER_RUNTIME) rm $(PROJECT_NAME) 2>/dev/null || true
+ $(CONTAINER_RUNTIME) run --name $(PROJECT_NAME) \
+ --env-file=.env \
+ -p $(CONTAINER_PORT):$(CONTAINER_INTERNAL_PORT) \
+ --restart=always \
+ --memory=$(CONTAINER_MEMORY) --cpus=$(CONTAINER_CPUS) \
+ --health-cmd="curl --fail http://localhost:$(CONTAINER_INTERNAL_PORT)/health || exit 1" \
+ --health-interval=1m --health-retries=3 \
+ --health-start-period=30s --health-timeout=10s \
+ -d $(call get_image_name)
+ @sleep 2
+ @echo "✅ Container started"
+ @echo "🔍 Health check status:"
+ @$(CONTAINER_RUNTIME) inspect $(PROJECT_NAME) --format='{{.State.Health.Status}}' 2>/dev/null || echo "No health check configured"
+
+container-run-host: container-check-image
+ @echo "🚀 Running with $(CONTAINER_RUNTIME)..."
+ -$(CONTAINER_RUNTIME) stop $(PROJECT_NAME) 2>/dev/null || true
+ -$(CONTAINER_RUNTIME) rm $(PROJECT_NAME) 2>/dev/null || true
+ $(CONTAINER_RUNTIME) run --name $(PROJECT_NAME) \
+ --env-file=.env \
+ --network=host \
+ -p $(CONTAINER_PORT):$(CONTAINER_INTERNAL_PORT) \
+ --restart=always \
+ --memory=$(CONTAINER_MEMORY) --cpus=$(CONTAINER_CPUS) \
+ --health-cmd="curl --fail http://localhost:$(CONTAINER_INTERNAL_PORT)/health || exit 1" \
+ --health-interval=1m --health-retries=3 \
+ --health-start-period=30s --health-timeout=10s \
+ -d $(call get_image_name)
+ @sleep 2
+ @echo "✅ Container started"
+ @echo "🔍 Health check status:"
+ @$(CONTAINER_RUNTIME) inspect $(PROJECT_NAME) --format='{{.State.Health.Status}}' 2>/dev/null || echo "No health check configured"
+
+container-push: container-check-image
+ @echo "📤 Preparing to push image..."
+ @# For Podman, we need to remove localhost/ prefix for push
+ @if [ "$(CONTAINER_RUNTIME)" = "podman" ]; then \
+ actual_image=$$($(CONTAINER_RUNTIME) images --format "{{.Repository}}:{{.Tag}}" | grep -E "$(IMAGE_BASE):$(IMAGE_TAG)" | head -1); \
+ if echo "$$actual_image" | grep -q "^localhost/"; then \
+ echo "🏷️ Tagging for push (removing localhost/ prefix)..."; \
+ $(CONTAINER_RUNTIME) tag "$$actual_image" $(IMAGE_PUSH); \
+ fi; \
+ fi
+ $(CONTAINER_RUNTIME) push $(IMAGE_PUSH)
+ @echo "✅ Pushed: $(IMAGE_PUSH)"
+
+container-check-image:
+ @echo "🔍 Checking for image..."
+ @if [ "$(CONTAINER_RUNTIME)" = "podman" ]; then \
+ if ! $(CONTAINER_RUNTIME) image exists $(IMAGE_LOCAL) 2>/dev/null && \
+ ! $(CONTAINER_RUNTIME) image exists $(IMAGE_BASE):$(IMAGE_TAG) 2>/dev/null; then \
+ echo "❌ Image not found: $(IMAGE_LOCAL)"; \
+ echo "💡 Run 'make container-build' first"; \
+ exit 1; \
+ fi; \
+ else \
+ if ! $(CONTAINER_RUNTIME) images -q $(IMAGE_LOCAL) 2>/dev/null | grep -q . && \
+ ! $(CONTAINER_RUNTIME) images -q $(IMAGE_BASE):$(IMAGE_TAG) 2>/dev/null | grep -q .; then \
+ echo "❌ Image not found: $(IMAGE_LOCAL)"; \
+ echo "💡 Run 'make container-build' first"; \
+ exit 1; \
+ fi; \
+ fi
+ @echo "✅ Image found"
+
+container-stop:
+ @echo "🛑 Stopping container..."
+ -$(CONTAINER_RUNTIME) stop $(PROJECT_NAME) 2>/dev/null || true
+ -$(CONTAINER_RUNTIME) rm $(PROJECT_NAME) 2>/dev/null || true
+ @echo "✅ Container stopped and removed"
+
+container-logs:
+ @echo "📜 Streaming logs (Ctrl+C to exit)..."
+ $(CONTAINER_RUNTIME) logs -f $(PROJECT_NAME)
+
+container-shell:
+ @echo "🔧 Opening shell in container..."
+ @if ! $(CONTAINER_RUNTIME) ps -q -f name=$(PROJECT_NAME) | grep -q .; then \
+ echo "❌ Container $(PROJECT_NAME) is not running"; \
+ echo "💡 Run 'make container-run' first"; \
+ exit 1; \
+ fi
+ @$(CONTAINER_RUNTIME) exec -it $(PROJECT_NAME) /bin/bash 2>/dev/null || \
+ $(CONTAINER_RUNTIME) exec -it $(PROJECT_NAME) /bin/sh
+
+container-health:
+ @echo "🏥 Checking container health..."
+ @if ! $(CONTAINER_RUNTIME) ps -q -f name=$(PROJECT_NAME) | grep -q .; then \
+ echo "❌ Container $(PROJECT_NAME) is not running"; \
+ exit 1; \
+ fi
+ @echo "Status: $$($(CONTAINER_RUNTIME) inspect $(PROJECT_NAME) --format='{{.State.Health.Status}}' 2>/dev/null || echo 'No health check')"
+ @echo "Logs:"
+ @$(CONTAINER_RUNTIME) inspect $(PROJECT_NAME) --format='{{range .State.Health.Log}}{{.Output}}{{end}}' 2>/dev/null || true
+
+container-build-multi:
+ @echo "🔨 Building multi-architecture image..."
+ @if [ "$(CONTAINER_RUNTIME)" = "docker" ]; then \
+ if ! docker buildx inspect $(PROJECT_NAME)-builder >/dev/null 2>&1; then \
+ echo "📦 Creating buildx builder..."; \
+ docker buildx create --name $(PROJECT_NAME)-builder; \
+ fi; \
+ docker buildx use $(PROJECT_NAME)-builder; \
+ docker buildx build \
+ --platform=linux/amd64,linux/arm64 \
+ -f $(CONTAINER_FILE) \
+ --tag $(IMAGE_BASE):$(IMAGE_TAG) \
+ --push \
+ .; \
+ elif [ "$(CONTAINER_RUNTIME)" = "podman" ]; then \
+ echo "📦 Building manifest with Podman..."; \
+ $(CONTAINER_RUNTIME) build --platform=linux/amd64,linux/arm64 \
+ -f $(CONTAINER_FILE) \
+ --manifest $(IMAGE_BASE):$(IMAGE_TAG) \
+ .; \
+ echo "💡 To push: podman manifest push $(IMAGE_BASE):$(IMAGE_TAG)"; \
+ else \
+ echo "❌ Multi-arch builds require Docker buildx or Podman"; \
+ exit 1; \
+ fi
+
+# Helper targets for debugging image issues
+image-list:
+ @echo "📋 Images matching $(IMAGE_BASE):"
+ @$(CONTAINER_RUNTIME) images --format "table {{.Repository}}:{{.Tag}}\t{{.ID}}\t{{.Created}}\t{{.Size}}" | \
+ grep -E "(IMAGE|$(IMAGE_BASE))" || echo "No matching images found"
+
+image-clean:
+ @echo "🧹 Removing all $(IMAGE_BASE) images..."
+ @$(CONTAINER_RUNTIME) images --format "{{.Repository}}:{{.Tag}}" | \
+ grep -E "(localhost/)?$(IMAGE_BASE)" | \
+ xargs $(XARGS_FLAGS) $(CONTAINER_RUNTIME) rmi -f 2>/dev/null
+ @echo "✅ Images cleaned"
+
+# Fix image naming issues
+image-retag:
+ @echo "🏷️ Retagging images for consistency..."
+ @if [ "$(CONTAINER_RUNTIME)" = "podman" ]; then \
+ if $(CONTAINER_RUNTIME) image exists $(IMAGE_BASE):$(IMAGE_TAG) 2>/dev/null; then \
+ $(CONTAINER_RUNTIME) tag $(IMAGE_BASE):$(IMAGE_TAG) $(IMAGE_LOCAL) 2>/dev/null || true; \
+ fi; \
+ else \
+ if $(CONTAINER_RUNTIME) images -q $(IMAGE_LOCAL) 2>/dev/null | grep -q .; then \
+ $(CONTAINER_RUNTIME) tag $(IMAGE_LOCAL) $(IMAGE_BASE):$(IMAGE_TAG) 2>/dev/null || true; \
+ fi; \
+ fi
+ @echo "✅ Images retagged" # This always shows success
+
+# Runtime switching helpers
+use-docker:
+ @echo "export CONTAINER_RUNTIME=docker"
+ @echo "💡 Run: export CONTAINER_RUNTIME=docker"
+
+use-podman:
+ @echo "export CONTAINER_RUNTIME=podman"
+ @echo "💡 Run: export CONTAINER_RUNTIME=podman"
+
+show-runtime:
+ @echo "Current runtime: $(CONTAINER_RUNTIME)"
+ @echo "Detected from: $$(command -v $(CONTAINER_RUNTIME) || echo 'not found')" # Added
+ @echo "To switch: make use-docker or make use-podman"
+
+
+
+# =============================================================================
+# Targets
+# =============================================================================
+
+.PHONY: venv
+venv:
+ @rm -Rf "$(VENV_DIR)"
+ @test -d "$(VENVS_DIR)" || mkdir -p "$(VENVS_DIR)"
+ @python3 -m venv "$(VENV_DIR)"
+ @/bin/bash -c "source $(VENV_DIR)/bin/activate && python3 -m pip install --upgrade pip setuptools pdm uv"
+ @echo -e "✅ Virtual env created.\n💡 Enter it with:\n . $(VENV_DIR)/bin/activate\n"
+
+.PHONY: install
+install: venv
+ $(foreach bin,$(REQUIRED_BUILD_BINS), $(if $(shell command -v $(bin) 2> /dev/null),,$(error Couldn't find `$(bin)`)))
+ @/bin/bash -c "source $(VENV_DIR)/bin/activate && python3 -m uv pip install ."
+
+.PHONY: install-dev
+install-dev: venv
+ $(foreach bin,$(REQUIRED_BUILD_BINS), $(if $(shell command -v $(bin) 2> /dev/null),,$(error Couldn't find `$(bin)`)))
+ @/bin/bash -c "source $(VENV_DIR)/bin/activate && python3 -m uv pip install -e .[dev]"
+
+.PHONY: install-editable
+install-editable: venv
+ $(foreach bin,$(REQUIRED_BUILD_BINS), $(if $(shell command -v $(bin) 2> /dev/null),,$(error Couldn't find `$(bin)`)))
+ @/bin/bash -c "source $(VENV_DIR)/bin/activate && python3 -m uv pip install -e .[dev]"
+
+.PHONY: uninstall
+uninstall:
+ pip uninstall $(PACKAGE_NAME)
+
+.PHONY: dist
+dist: clean ## Build wheel + sdist into ./dist
+ @test -d "$(VENV_DIR)" || $(MAKE) --no-print-directory venv
+ @/bin/bash -eu -c "\
+ source $(VENV_DIR)/bin/activate && \
+ python3 -m pip install --quiet --upgrade pip build && \
+ python3 -m build"
+ @echo '🛠 Wheel & sdist written to ./dist'
+
+.PHONY: wheel
+wheel: ## Build wheel only
+ @test -d "$(VENV_DIR)" || $(MAKE) --no-print-directory venv
+ @/bin/bash -eu -c "\
+ source $(VENV_DIR)/bin/activate && \
+ python3 -m pip install --quiet --upgrade pip build && \
+ python3 -m build -w"
+ @echo '🛠 Wheel written to ./dist'
+
+.PHONY: sdist
+sdist: ## Build source distribution only
+ @test -d "$(VENV_DIR)" || $(MAKE) --no-print-directory venv
+ @/bin/bash -eu -c "\
+ source $(VENV_DIR)/bin/activate && \
+ python3 -m pip install --quiet --upgrade pip build && \
+ python3 -m build -s"
+ @echo '🛠 Source distribution written to ./dist'
+
+.PHONY: verify
+verify: dist ## Build, run metadata & manifest checks
+ @/bin/bash -c "source $(VENV_DIR)/bin/activate && \
+ twine check dist/* && \
+ check-manifest && \
+ pyroma -d ."
+ @echo "✅ Package verified - ready to publish."
+
+.PHONY: lint-fix
+lint-fix:
+ @# Handle file arguments
+ @target_file="$(word 2,$(MAKECMDGOALS))"; \
+ if [ -n "$$target_file" ] && [ "$$target_file" != "" ]; then \
+ actual_target="$$target_file"; \
+ else \
+ actual_target="$(TARGET)"; \
+ fi; \
+ for target in $$(echo $$actual_target); do \
+ if [ ! -e "$$target" ]; then \
+ echo "❌ File/directory not found: $$target"; \
+ exit 1; \
+ fi; \
+ done; \
+ echo "🔧 Fixing lint issues in $$actual_target..."; \
+ $(MAKE) --no-print-directory black TARGET="$$actual_target"; \
+ $(MAKE) --no-print-directory ruff-fix TARGET="$$actual_target"
+
+.PHONY: lint-check
+lint-check:
+ @# Handle file arguments
+ @target_file="$(word 2,$(MAKECMDGOALS))"; \
+ if [ -n "$$target_file" ] && [ "$$target_file" != "" ]; then \
+ actual_target="$$target_file"; \
+ else \
+ actual_target="$(TARGET)"; \
+ fi; \
+ for target in $$(echo $$actual_target); do \
+ if [ ! -e "$$target" ]; then \
+ echo "❌ File/directory not found: $$target"; \
+ exit 1; \
+ fi; \
+ done; \
+ echo "🔧 Fixing lint issues in $$actual_target..."; \
+ $(MAKE) --no-print-directory black-check TARGET="$$actual_target"; \
+ $(MAKE) --no-print-directory ruff-check TARGET="$$actual_target"
+
+.PHONY: lock
+lock:
+ $(foreach bin,$(REQUIRED_BUILD_BINS), $(if $(shell command -v $(bin) 2> /dev/null),,$(error Couldn't find `$(bin)`. Please run `make init`)))
+ uv lock
+
+.PHONY: test
+test:
+ pytest tests
+
+.PHONY: serve
+serve:
+ @echo "Implement me."
+
+.PHONY: build
+build:
+ @$(MAKE) container-build
+
+.PHONY: start
+start:
+ @$(MAKE) container-run
+
+.PHONY: stop
+stop:
+ @$(MAKE) container-stop
+
+.PHONY: clean
+clean:
+ find . -type f -name '*.py[co]' -delete -o -type d -name __pycache__ -delete
+ rm -rf *.egg-info .pytest_cache tests/.pytest_cache build dist .ruff_cache .coverage
+
+.PHONY: help
+help:
+ @echo "This Makefile is offered for convenience."
+ @echo ""
+ @echo "The following are the valid targets for this Makefile:"
+ @echo "...install Install package from sources"
+ @echo "...install-dev Install package from sources with dev packages"
+ @echo "...install-editable Install package from sources in editabled mode"
+ @echo "...uninstall Uninstall package"
+ @echo "...dist Clean-build wheel *and* sdist into ./dist"
+ @echo "...wheel Build wheel only"
+ @echo "...sdist Build source distribution only"
+ @echo "...verify Build + twine + check-manifest + pyroma (no upload)"
+ @echo "...serve Start API server locally"
+ @echo "...build Build API server container image"
+ @echo "...start Start the API server container"
+ @echo "...start Stop the API server container"
+ @echo "...lock Lock dependencies"
+ @echo "...lint-fix Check and fix lint errors"
+ @echo "...lint-check Check for lint errors"
+ @echo "...test Run all tests"
+ @echo "...clean Remove all artifacts and builds"
diff --git a/plugins/external/cedar/README.md b/plugins/external/cedar/README.md
new file mode 100644
index 000000000..4d99b6e09
--- /dev/null
+++ b/plugins/external/cedar/README.md
@@ -0,0 +1,332 @@
+# Cedar RBAC Plugin for MCP Gateway
+
+> Author: Shriti Priya
+> Version: 0.1.0
+
+A plugin that evaluates Cedar policies and user‑friendly custom-DSL policies on incoming requests, and then allows or denies those requests using RBAC-based decisions which are enforced in cedar language and using library `cedarpy`.
+
+## Cedar Language
+
+Cedar is an open-source language and specification for defining and evaluating permission policies. It allows you to specify who is authorized to perform which actions within your application.
+For more details: https://www.cedarpolicy.com/en
+
+## RBAC
+
+Role-based access control (RBAC) is an authorization model where permissions are attached to roles (like admin, manager, viewer), and users are assigned to those roles instead of getting permissions directly. This makes access control easier to manage and reason about in larger systems.
+
+## CedarPolicyPlugin
+
+This plugin supports two ways of defining policies in the configuration file, controlled by the `policy_lang` parameter.
+
+### Cedar Mode
+
+`plugins/external/cedar/resources/config.yaml`
+
+When `policy_lang` is set to cedar, policies are written in the Cedar language under the policy key, using the following structure:
+
+```yaml
+ - id: allow-employee-basic-access
+ effect: Permit
+ principal: Role::"employee"
+ action:
+ - Action::"get_leave_balance" #tool name
+ - Action::"request_certificate"
+ resource:
+ - Server::"askHR" # mcp-server name
+ - Agent::"employee_agent" # agent name
+```
+1. **id** is a unique string identifier for the policy.
+2. **effect** can be either Permit or Forbid and determines whether matching requests are allowed or denied.
+3. **principal** specifies who the policy applies to; here it targets the employee role.
+4. **action** lists one or more tools that the principal is attempting to invoke. It could also be actions controlling the visibility of output, either to see full output or redacted output based on user role.
+5. **resource** lists the servers, agents, prompts and resources that the actions can target.
+
+### Custom DSL mode
+
+`plugins/external/cedar/examples/config-dsl.yaml`
+
+When `policy_lang` is set to `custom_dsl`, policies are written in a compact, human-readable mini-language as a YAML multiline string. This allows non-experts to define role, resource, and action in a single, easy-to-scan block.
+following syntax:
+
+
+## Syntax
+
+Policies use the following basic pattern:
+
+```
+[role::/]
+
+
+```
+
+For example:
+
+```yaml
+ [role:hr:server/hr_tool]
+ update_payroll
+```
+
+In this example, role is hr, resource is server, and action is hr_tool. The line update_payroll represents the specific operation being authorized for that role–resource–action tuple.
+
+
+## Configuration
+
+1. **policy_lang**: Specifies the policy language used, `cedar` or `custom_dsl`.
+2. **policy_output_keywords**: Defines keywords for output views such as `view_full_output` and `view_redacted_output` which can be used in policies or applications to control the output visibility.
+3. **policy_redaction_spec**: Contains a regex pattern for redaction; in this case, the pattern matches currency-like strings (e.g., "$123,456") for potential redaction in the policy output, protecting sensitive information.
+4. **policy**: Defines the RBAC policy
+
+## Installation
+
+1. In the folder `plugins/external/cedar`, copy `.env.example` to `.env` file.
+2. If you are using `policy_lang` to be `cedar`, add the plugin configuration to `plugins/external/cedar/resources/plugins/config.yaml`:
+
+```yaml
+plugins:
+ - name: "CedarPolicyPlugin"
+ kind: "cedarpolicyplugin.plugin.CedarPolicyPlugin"
+ description: "A plugin that does policy decision and enforcement using cedar"
+ version: "0.1.0"
+ author: "Shriti Priya"
+ hooks: ["prompt_pre_fetch", "prompt_post_fetch", "tool_pre_invoke", "tool_post_invoke", "resource_pre_fetch", "resource_post_fetch"]
+ tags: ["plugin"]
+ mode: "enforce" # enforce | permissive | disabled
+ priority: 150
+ conditions:
+ # Apply to specific tools/servers
+ - server_ids: [] # Apply to all servers
+ tenant_ids: [] # Apply to all tenants
+ config:
+ policy_lang: cedar
+ policy_output_keywords:
+ view_full: "view_full_output"
+ view_redacted: "view_redacted_output"
+ policy_redaction_spec:
+ pattern: '"\$\d{1,}(,\d{1,})*"' # provide regex, if none, then replace all
+ policy:
+ ### Tool invocation policies ###
+ - id: allow-employee-basic-access
+ effect: Permit
+ principal: Role::"employee"
+ action:
+ - Action::"get_leave_balance" #tool name
+ - Action::"request_certificate"
+ resource:
+ - Server::"askHR" # mcp-server name
+ - Agent::"employee_agent" # agent name
+
+ - id: allow-manager-full-access
+ effect: Permit
+ principal: Role::"manager"
+ action:
+ - Action::"get_leave_balance"
+ - Action::"approve_leave"
+ - Action::"promote_employee"
+ - Action::"view_performance"
+ - Action::"view_full_output"
+ resource:
+ - Agent::"manager_agent"
+ - Server::"payroll_tool"
+
+ - id: allow-hr-hr_tool
+ effect: Permit
+ principal: Role::"hr"
+ action:
+ - Action::"update_payroll"
+ - Action::"view_performance"
+ - Action::"view_full_output"
+ resource: Server::"hr_tool"
+
+ - id: redact-non-manager-views
+ effect: Permit
+ principal: Role::"employee"
+ action: Action::"view_redacted_output"
+ resource:
+ - Server::"payroll_tool"
+ - Agent::"manager_agent"
+ - Server::"askHR"
+
+ ### Resource invocation policies ###
+ - id: allow-admin-resources # policy for resources
+ effect: Permit
+ principal: Role::"admin"
+ action:
+ - Action::"view_full_output"
+ resource: Resource::""https://example.com/data"" #Resource::
+
+ - id: allow-employee-redacted-resources # policy for resources
+ effect: Permit
+ principal: Role::"employee"
+ action:
+ - Action::"view_redacted_output"
+ resource: Resource::""https://example.com/data"" #Resource::
+
+ ### Prompt invocation policies ###
+ - id: allow-admin-prompts # policy for resources
+ effect: Permit
+ principal: Role::"admin"
+ action:
+ - Action::"view_full_output"
+ resource: Prompt::"judge_prompts" #Prompt::
+
+
+ - id: allow-employee-redacted-prompts # policy for resources
+ effect: Permit
+ principal: Role::"employee"
+ action:
+ - Action::"view_redacted_output"
+ resource: Prompt::"judge_prompts" #Prompt::
+
+```
+
+#### Tool Invocation Policies
+
+For the RBAC policy related to `tool_pre_invoke` and `tool_post_invoke`
+Example:
+```yaml
+ - id: allow-employee-basic-access
+ effect: Permit
+ principal: Role::"employee"
+ action:
+ - Action::"get_leave_balance" #tool name
+ - Action::"request_certificate"
+ resource:
+ - Server::"askHR" # mcp-server name
+ - Agent::"employee_agent" # agent name
+```
+
+Here, user with role `employee` (**Role**) is only allowed to invoke tool `get_leave_balance` (**Action**) belonging to the MCP server or (**Server**).
+
+In another policy defined for tools
+
+```yaml
+
+ - id: allow-hr-hr_tool
+ effect: Permit
+ principal: Role::"hr"
+ action:
+ - Action::"update_payroll"
+ - Action::"view_performance"
+ - Action::"view_full_output"
+ resource: Server::"hr_tool"
+
+ - id: redact-non-manager-views
+ effect: Permit
+ principal: Role::"employee"
+ action: Action::"view_redacted_output"
+ resource:
+ - Server::"payroll_tool"
+ - Agent::"manager_agent"
+ - Server::"askHR"
+```
+
+
+The actions like `view_full_output` and `view_redacted_output` has been used. This basically controls the
+level of output visibile to the user. In the above policy, user with role `hr` is only allowed to view the output of `update_payroll`. Similary for the second policy, user with role `employee` is only allowed to view redacted output of the tool.
+
+
+#### Prompt Invocation Policies
+
+
+```yaml
+
+ ### Prompt invocation policies ###
+ - id: allow-admin-prompts # policy for resources
+ effect: Permit
+ principal: Role::"admin"
+ action:
+ - Action::"view_full_output"
+ resource: Prompt::"judge_prompts" #Prompt::
+
+
+ - id: allow-employee-redacted-prompts # policy for resources
+ effect: Permit
+ principal: Role::"employee"
+ action:
+ - Action::"view_redacted_output"
+ resource: Prompt::"judge_prompts" #Prompt::
+```
+
+Here, in the above polcicy, given a prompt template `judge_prompts`, user of role `admin` is only allowed to view full prompt. However, if a user is of role `employee`, then it could only see redacted version of the prompt.
+
+
+#### Resource Invocation Policies
+
+**NOTE:** Please don't be confused with the word resource in cedar to the word resource in MCP ContextForge.
+
+```yaml
+
+ - id: allow-admin-resources # policy for resources
+ effect: Permit
+ principal: Role::"admin"
+ action:
+ - Action::"view_full_output"
+ resource: Resource::"https://example.com/data" #Resource::
+
+ - id: allow-employee-redacted-resources # policy for resources
+ effect: Permit
+ principal: Role::"employee"
+ action:
+ - Action::"view_redacted_output"
+ resource: Resource::"https://example.com/data" #Resource::
+```
+
+Here, `Resource` word used in policy, is if resource hooks are invoked. So, in the above policy,
+user with role `admin` is only allowed to view full output of uri `https://example.com/data`. Where, the user is of `employee` role, it can only see the redacted versionaaaaa of the resource output.
+
+
+#### policy_output_keywords
+
+```
+ view_full: "view_full_output"
+ view_redacted: "view_redacted_output"
+```
+
+has been provided, so everytime a user defines a policy, if it wants to control the output visibility of
+any of the tool, prompt, resource or agent in MCP gateway, it can provide the keyword, it's supposed to use in the policy in `policy_output_keywords`. CedarPolicyPlugin will internally use this mapping to redact or fully display the tool, prompt or resource response in post hooks.
+
+
+
+
+3. Now, the policy and plugin configurations are defined in `resources/config.yaml` file, next step is build this as an external MCP server.
+
+* `make venv`: This will create a virtual environment to develop or build your plugin.
+* `make install && make install-dev`: To install all the required libraries in the environment.
+* `make build`: This will build a docker image named `mcpgateway/cedarpolicyplugin`
+* `make start`: This will start the cedarpolicyplugin container.
+
+This confirms that your container is running fine:
+```
+WARNING:mcpgateway.observability:OpenTelemetry not installed. Proceeding with graceful fallbacks.
+INFO: Started server process [9]
+INFO: Waiting for application startup.
+INFO: Application startup complete.
+INFO: Uvicorn running on http://0.0.0.0:8000 (Press CTRL+C to quit)
+INFO: 127.0.0.1:55196 - "GET /health HTTP/1.1" 200 OK
+
+```
+
+4. Now, you can add this external plugin configuration, in `plugins/config.yaml`:
+3. The next step is to enable the opa plugin which you can do by adding `PLUGINS_ENABLED=true` and the following blob in `plugins/config.yaml` file. This will indicate that OPA Plugin is running as an external MCP server.
+
+ ```yaml
+ - name: "CedarPolicyPlugin"
+ kind: "external"
+ priority: 10 # adjust the priority
+ mcp:
+ proto: STREAMABLEHTTP
+ url: http://127.0.0.1:8000/mcp
+ ```
+
+## Testing
+
+There are set of test cases in the `cedar/tests` folder. The file named `test_cedarpolicyplugin.py` file which contains detailed test cases for RBAC policies enforced on tools, prompts and resources.
+run `make test` to run all the test cases.
+
+
+
+## Difference from OPAPlugin
+
+The OPA plugin runs an OPA server to enforce policies, whereas the Cedar plugin uses the `cedarpy` library and performs policy enforcement locally without requiring an external service.
+OPA plugin requires to know `rego` to define policies by user while the `Cedar` plugin can be defined either in `cedar` or user friendly `custom_dsl` language.
+Right now, the cedar plugin enforces RBAC policies and it could be extended to enforce ABAC policies using the same plugin.
diff --git a/plugins/external/cedar/cedarpolicyplugin/__init__.py b/plugins/external/cedar/cedarpolicyplugin/__init__.py
new file mode 100644
index 000000000..52cdda086
--- /dev/null
+++ b/plugins/external/cedar/cedarpolicyplugin/__init__.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+"""MCP Gateway CedarPolicyPlugin Plugin - A plugin that does policy decision and enforcement using cedar.
+
+Copyright 2025
+SPDX-License-Identifier: Apache-2.0
+Authors: Shriti Priya
+
+"""
+
+import importlib.metadata
+
+# Package version
+try:
+ __version__ = importlib.metadata.version("cedarpolicyplugin")
+except Exception:
+ __version__ = "0.1.0"
+
+__author__ = "Shriti Priya"
+__copyright__ = "Copyright 2025"
+__license__ = "Apache 2.0"
+__description__ = "A plugin that does policy decision and enforcement using cedar"
+__url__ = "https://ibm.github.io/mcp-context-forge/"
+__download_url__ = "https://github.com/IBM/mcp-context-forge"
+__packages__ = ["cedarpolicyplugin"]
diff --git a/plugins/external/cedar/cedarpolicyplugin/plugin-manifest.yaml b/plugins/external/cedar/cedarpolicyplugin/plugin-manifest.yaml
new file mode 100644
index 000000000..38ec3ceea
--- /dev/null
+++ b/plugins/external/cedar/cedarpolicyplugin/plugin-manifest.yaml
@@ -0,0 +1,9 @@
+description: "A plugin that does policy decision and enforcement using cedar"
+author: "Shriti Priya"
+version: "0.1.0"
+available_hooks:
+ - "prompt_pre_hook"
+ - "prompt_post_hook"
+ - "tool_pre_hook"
+ - "tool_post_hook"
+default_configs:
diff --git a/plugins/external/cedar/cedarpolicyplugin/plugin.py b/plugins/external/cedar/cedarpolicyplugin/plugin.py
new file mode 100644
index 000000000..dd98f5f1b
--- /dev/null
+++ b/plugins/external/cedar/cedarpolicyplugin/plugin.py
@@ -0,0 +1,678 @@
+# -*- coding: utf-8 -*-
+"""A plugin that does policy decision and enforcement using cedar.
+
+Copyright 2025
+SPDX-License-Identifier: Apache-2.0
+Authors: Shriti Priya
+
+This module loads configurations for plugins.
+"""
+
+# Standard
+from enum import Enum
+import re
+from typing import Any
+from urllib.parse import urlparse
+
+# Third-Party
+from cedarpolicyplugin.schema import CedarConfig, CedarInput
+from cedarpy import AuthzResult, Decision, is_authorized
+
+# First-Party
+from mcpgateway.plugins.framework import (
+ Plugin,
+ PluginConfig,
+ PluginContext,
+ PluginError,
+ PluginErrorModel,
+ PluginViolation,
+ PromptPosthookPayload,
+ PromptPosthookResult,
+ PromptPrehookPayload,
+ PromptPrehookResult,
+ ToolPostInvokePayload,
+ ToolPostInvokeResult,
+ ToolPreInvokePayload,
+ ToolPreInvokeResult,
+)
+from mcpgateway.plugins.framework.hooks.resources import ResourcePostFetchPayload, ResourcePostFetchResult, ResourcePreFetchPayload, ResourcePreFetchResult
+from mcpgateway.services.logging_service import LoggingService
+
+# Initialize logging service first
+logging_service = LoggingService()
+logger = logging_service.get_logger(__name__)
+
+
+class CedarCodes(str, Enum):
+ """CedarCodes implementation."""
+
+ ALLOW_CODE = "ALLOW"
+ DENIAL_CODE = "DENY"
+ AUDIT_CODE = "AUDIT"
+ REQUIRES_HUMAN_APPROVAL_CODE = "REQUIRES_APPROVAL"
+
+
+class CedarResponseTemplates(str, Enum):
+ """CedarResponseTemplates implementation."""
+
+ CEDAR_REASON = "Cedar policy denied for {hook_type}"
+ CEDAR_DESC = "{hook_type} not allowed"
+
+
+class CedarResourceTemplates(str, Enum):
+ """CedarResourceTemplates implementation."""
+
+ SERVER = 'Server::"{resource_type}"'
+ AGENT = 'Agent::"{resource_type}"'
+ PROMPT = 'Prompt::"{resource_type}"'
+ RESOURCE = 'Resource::"{resource_type}"'
+
+
+class CedarErrorCodes(str, Enum):
+ """CedarPolicyPlugin errors"""
+
+ UNSUPPORTED_RESOURCE_TYPE = "Unspecified resource types, accepted resources server, prompt, agent and resource"
+ UNSPECIFIED_USER_ROLE = "User role is not defined"
+ UNSPECIFIED_POLICY = "No policy has been provided"
+ UNSPECIFIED_OUTPUT_ACTION = "Unspecified output action in policy configuration"
+ UNSPECIFIED_SERVER = "Unspecified server for tool request"
+ UNSUPPORTED_CONTENT_TYPE = "Unsupported content type"
+
+
+class CedarPolicyPlugin(Plugin):
+ """A plugin that does policy decision and enforcement using cedar."""
+
+ def __init__(self, config: PluginConfig):
+ """Entry init block for plugin.
+
+ Args:
+ logger: logger that the skill can make use of
+ config: the skill configuration
+ """
+ super().__init__(config)
+ self.cedar_config = CedarConfig.model_validate(self._config.config)
+ self.cedar_context_key = "cedar_policy_context"
+ self.jwt_info = {}
+ logger.info(f"CedarPolicyPlugin initialised with configuration {self.cedar_config}")
+
+ def _set_jwt_info(self, user_role_mapping: dict) -> None:
+ """Sets user role mapping information from jwt tokens
+
+ Args:
+ info(dict): with user mappings
+ """
+ self.jwt_info["users"] = user_role_mapping
+
+ def _extract_payload_key(self, content: Any = None, key: str = None, result: dict[str, list] = None) -> None:
+ """Function to extract values of passed in key in the payload recursively based on if the content is of type list, dict
+ str or pydantic structure. The value is inplace updated in result.
+
+ Args:
+ content: The content of post hook results.
+ key: The key for which value needs to be extracted for.
+ result: A list of all the values for a key.
+ """
+ if isinstance(content, list):
+ for element in content:
+ if isinstance(element, dict) and key in element:
+ self._extract_payload_key(element, key, result)
+ elif isinstance(content, dict):
+ if key in content or hasattr(content, key):
+ result[key].append(content[key])
+ elif isinstance(content, str):
+ result[key].append(content)
+ elif hasattr(content, key):
+ result[key].append(getattr(content, key))
+ else:
+ logger.error(f"{CedarErrorCodes.UNSUPPORTED_CONTENT_TYPE.value}: {type(content)}")
+ raise PluginError(PluginErrorModel(message=CedarErrorCodes.UNSUPPORTED_CONTENT_TYPE.value, plugin_name="CedarPolicyPlugin"))
+
+ def _evaluate_policy(self, request: dict, policy_expr: str) -> str:
+ """Function that evaluates and enforce cedar policy using is_authorized function in cedarpy library
+ Args:
+ request(dict): The request dict consisting of principal, action, resource or context keys.
+ policy_exp(str): The policy expression to evaluate the request on
+
+ Returns:
+ decision(str): "Allow" or "Deny"
+ """
+ result: AuthzResult = is_authorized(request, policy_expr, [])
+ decision = "Allow" if result.decision == Decision.Allow else "Deny"
+ return decision
+
+ def _yamlpolicy2text(self, policies: list) -> str:
+ """Function to convert yaml representation of policies to text
+ Args:
+ policies(list): A list of cedar policies with dict values consisting of individual policies
+
+ Returns:
+ cedar_policy_text(str): string representation of policy
+ """
+ cedar_policy_text = ""
+ for policy in policies:
+ actions = policy["action"] if isinstance(policy["action"], list) else [policy["action"]]
+ resources = policy["resource"] if isinstance(policy["resource"], list) else [policy["resource"]]
+
+ for res in resources:
+ actions_str = ", ".join(actions)
+ cedar_policy_text += "permit(\n"
+ cedar_policy_text += f' principal == {policy["principal"]},\n'
+ cedar_policy_text += f" action in [{actions_str}],\n"
+ cedar_policy_text += f" resource == {res}\n"
+ cedar_policy_text += ");\n\n"
+
+ return cedar_policy_text
+
+ def _dsl2cedar(self, policy_string: str) -> str:
+ """Function to convert custom dsl representation of policies to cedar
+ Args:
+ policy_string: string representation of policies
+
+ Returns:
+ cedar_policy_text(str): string representation of policy
+ """
+ lines = [line.strip() for line in policy_string.splitlines() if line.strip()]
+ policies = []
+ current_role = None
+ current_actions = []
+ resource_category = None
+ resource_name = None
+
+ pattern = r"\[role:([A-Za-z0-9_]+):(resource|prompt|server|agent)/([^\]]+)\]"
+ for line in lines:
+ match = re.match(pattern, line)
+ if match:
+ if current_role and resource_category and resource_name and current_actions:
+ resource_category = resource_category.capitalize()
+ policies.append(
+ {
+ "id": f"allow-{current_role}-{resource_category}",
+ "effect": "Permit",
+ "principal": f'Role::"{current_role}"',
+ "action": [f'Action::"{a}"' for a in current_actions],
+ "resource": f'{resource_category}::"{resource_name}"',
+ }
+ )
+ current_role, resource_category, resource_name = match.groups()
+ current_actions = []
+ else:
+ current_actions.append(line)
+ if current_role and resource_category and resource_name and current_actions:
+ resource_category = resource_category.capitalize()
+ policies.append(
+ {
+ "id": f"allow-{current_role}-{resource_category}",
+ "effect": "Permit",
+ "principal": f'Role::"{current_role}"',
+ "action": [f'Action::"{a}"' for a in current_actions],
+ "resource": f'{resource_category}::"{resource_name}"',
+ }
+ )
+
+ cedar_policy_text = self._yamlpolicy2text(policies)
+ return cedar_policy_text
+
+ def _preprocess_request(self, user: str, action: str, resource: str, hook_type: str) -> CedarInput:
+ """Function to pre process request into a format that cedar accepts
+ Args:
+ user(str): name of the user
+ action(str): action requested by the user
+ resource(str): resource requested by the user
+ hook_type(str): the hook type on which invocation is made
+
+ Returns:
+ request(CedarInput): pydantic representation of request as excpected by cedar policy
+ """
+ user_role = ""
+ if hook_type in ["tool_post_invoke", "tool_pre_invoke"]:
+ resource_expr = CedarResourceTemplates.SERVER.format(resource_type=resource)
+ elif hook_type in ["agent_post_invoke", "agent_pre_invoke"]:
+ resource_expr = CedarResourceTemplates.AGENT.format(resource_type=resource)
+ elif hook_type in ["resource_post_fetch", "resource_pre_fetch"]:
+ resource_expr = CedarResourceTemplates.RESOURCE.format(resource_type=resource)
+ elif hook_type in ["prompt_post_fetch", "prompt_pre_fetch"]:
+ resource_expr = CedarResourceTemplates.PROMPT.format(resource_type=resource)
+ else:
+ logger.error(f"{CedarErrorCodes.UNSUPPORTED_RESOURCE_TYPE.value}: {hook_type}")
+ raise PluginError(PluginErrorModel(message=CedarErrorCodes.UNSUPPORTED_RESOURCE_TYPE.value, plugin_name="CedarPolicyPlugin"))
+
+ if len(self.jwt_info) > 0 and "users" in self.jwt_info:
+ user_role = self.jwt_info["users"].get(user)
+ else:
+ logger.error(f"{CedarErrorCodes.UNSPECIFIED_USER_ROLE.value}")
+ raise PluginError(PluginErrorModel(message=CedarErrorCodes.UNSPECIFIED_USER_ROLE.value, plugin_name="CedarPolicyPlugin"))
+
+ principal_expr = f'Role::"{user_role}"'
+ action_expr = f'Action::"{action}"'
+ request = CedarInput(principal=principal_expr, action=action_expr, resource=resource_expr, context={}).model_dump()
+ return request
+
+ def _redact_output(self, payload: str, pattern: str) -> str:
+ """Function that redacts the output of prompt, tool or resource
+ NOTE: It's an extremely simple logic for redaction, could be replaced with more advanced
+ as per need.
+ Args:
+ payload(str): payload or output
+ pattern(str): regex expression to replace
+ Returns:
+ redacted_text(str): redacted representation of payload string
+ """
+ redacted_text = ""
+ if not pattern:
+ redacted_text = payload
+ elif pattern == "all":
+ redacted_text = "[REDACTED]"
+ else:
+ redacted_text = re.sub(pattern, "[REDACTED]", payload)
+ return redacted_text
+
+ async def prompt_pre_fetch(self, payload: PromptPrehookPayload, context: PluginContext) -> PromptPrehookResult:
+ """The plugin hook run before a prompt is retrieved and rendered.
+
+ Args:
+ payload: The prompt payload to be analyzed.
+ context: contextual information about the hook call.
+
+ Returns:
+ The result of the plugin's analysis, including whether the prompt can proceed.
+ """
+ hook_type = "prompt_pre_fetch"
+ logger.info(f"Processing {hook_type} for '{payload.args}' with {len(payload.args) if payload.args else 0}")
+ logger.info(f"Processing context {context}")
+
+ if not payload.args:
+ return PromptPrehookResult()
+
+ policy = None
+ user = ""
+ result_full = None
+ result_redacted = None
+
+ if self.cedar_config.policy_lang == "cedar":
+ if self.cedar_config.policy:
+ policy = self._yamlpolicy2text(self.cedar_config.policy)
+ else:
+ logger.error(f"{CedarErrorCodes.UNSPECIFIED_POLICY.value}")
+ raise PluginError(PluginErrorModel(message=CedarErrorCodes.UNSPECIFIED_POLICY.value, plugin_name="CedarPolicyPlugin"))
+ if self.cedar_config.policy_lang == "custom_dsl":
+ if self.cedar_config.policy:
+ policy = self._dsl2cedar(self.cedar_config.policy)
+ else:
+ logger.error(f"{CedarErrorCodes.UNSPECIFIED_POLICY.value}")
+ raise PluginError(PluginErrorModel(message=CedarErrorCodes.UNSPECIFIED_POLICY.value, plugin_name="CedarPolicyPlugin"))
+
+ if context.global_context.user:
+ user = context.global_context.user
+
+ if self.cedar_config.policy_output_keywords:
+ view_full = self.cedar_config.policy_output_keywords.get("view_full", None)
+ view_redacted = self.cedar_config.policy_output_keywords.get("view_redacted", None)
+ if not view_full and not view_redacted:
+ logger.error(f"{CedarErrorCodes.UNSPECIFIED_OUTPUT_ACTION.value}")
+ raise PluginError(PluginErrorModel(message=CedarErrorCodes.UNSPECIFIED_OUTPUT_ACTION.value, plugin_name="CedarPolicyPlugin"))
+ if view_full and policy:
+ request = self._preprocess_request(user, view_full, payload.prompt_id, hook_type)
+ result_full = self._evaluate_policy(request, policy)
+ if view_redacted and policy:
+ request = self._preprocess_request(user, view_redacted, payload.prompt_id, hook_type)
+ result_redacted = self._evaluate_policy(request, policy)
+
+ if result_full == Decision.Deny.value and result_redacted == Decision.Deny.value:
+ violation = PluginViolation(
+ reason=CedarResponseTemplates.CEDAR_REASON.format(hook_type=hook_type),
+ description=CedarResponseTemplates.CEDAR_DESC.format(hook_type=hook_type),
+ code=CedarCodes.DENIAL_CODE,
+ details={},
+ )
+ return PromptPrehookResult(modified_payload=payload, violation=violation, continue_processing=False)
+ return PromptPrehookResult(continue_processing=True)
+
+ async def prompt_post_fetch(self, payload: PromptPosthookPayload, context: PluginContext) -> PromptPosthookResult:
+ """Plugin hook run after a prompt is rendered.
+
+ Args:
+ payload: The prompt payload to be analyzed.
+ context: Contextual information about the hook call.
+
+ Returns:
+ The result of the plugin's analysis, including whether the prompt can proceed.
+ """
+ hook_type = "prompt_post_fetch"
+ logger.info(f"Processing {hook_type} for '{payload.result}'")
+ logger.info(f"Processing context {context}")
+
+ if not payload.result:
+ return PromptPosthookResult()
+
+ policy = None
+ user = ""
+ result_full = None
+ result_redacted = None
+
+ if self.cedar_config.policy_lang == "cedar":
+ if self.cedar_config.policy:
+ policy = self._yamlpolicy2text(self.cedar_config.policy)
+ else:
+ logger.error(f"{CedarErrorCodes.UNSPECIFIED_POLICY.value}")
+ raise PluginError(PluginErrorModel(message=CedarErrorCodes.UNSPECIFIED_POLICY.value, plugin_name="CedarPolicyPlugin"))
+ if self.cedar_config.policy_lang == "custom_dsl":
+ if self.cedar_config.policy:
+ policy = self._dsl2cedar(self.cedar_config.policy)
+ else:
+ logger.error(f"{CedarErrorCodes.UNSPECIFIED_POLICY.value}")
+ raise PluginError(PluginErrorModel(message=CedarErrorCodes.UNSPECIFIED_POLICY.value, plugin_name="CedarPolicyPlugin"))
+
+ if context.global_context.user:
+ user = context.global_context.user
+
+ if self.cedar_config.policy_output_keywords:
+ view_full = self.cedar_config.policy_output_keywords.get("view_full", None)
+ view_redacted = self.cedar_config.policy_output_keywords.get("view_redacted", None)
+ if not view_full and not view_redacted:
+ logger.error(f"{CedarErrorCodes.UNSPECIFIED_OUTPUT_ACTION.value}")
+ raise PluginError(PluginErrorModel(message=CedarErrorCodes.UNSPECIFIED_OUTPUT_ACTION.value, plugin_name="CedarPolicyPlugin"))
+ if view_full and policy:
+ request = self._preprocess_request(user, view_full, payload.prompt_id, hook_type)
+ result_full = self._evaluate_policy(request, policy)
+ if view_redacted and policy:
+ request = self._preprocess_request(user, view_redacted, payload.prompt_id, hook_type)
+ result_redacted = self._evaluate_policy(request, policy)
+
+ if result_full == Decision.Allow.value:
+ return PromptPosthookResult(continue_processing=True)
+
+ if result_redacted == Decision.Allow.value:
+ if payload.result.messages:
+ for index, message in enumerate(payload.result.messages):
+ value = self._redact_output(message.content.text, self.cedar_config.policy_redaction_spec.pattern)
+ payload.result.messages[index].content.text = value
+ return PromptPosthookResult(modified_payload=payload, continue_processing=True)
+
+ violation = PluginViolation(
+ reason=CedarResponseTemplates.CEDAR_REASON.format(hook_type=hook_type),
+ description=CedarResponseTemplates.CEDAR_DESC.format(hook_type=hook_type),
+ code=CedarCodes.DENIAL_CODE,
+ details={},
+ )
+ return PromptPosthookResult(modified_payload=payload, violation=violation, continue_processing=False)
+ return PromptPosthookResult(continue_processing=True)
+
+ async def tool_pre_invoke(self, payload: ToolPreInvokePayload, context: PluginContext) -> ToolPreInvokeResult:
+ """Plugin hook run before a tool is invoked.
+
+ Args:
+ payload: The tool payload to be analyzed.
+ context: Contextual information about the hook call.
+
+ Returns:
+ The result of the plugin's analysis, including whether the tool can proceed.
+ """
+ hook_type = "tool_pre_invoke"
+ logger.info(f"Processing {hook_type} for '{payload.args}' with {len(payload.args) if payload.args else 0}")
+ logger.info(f"Processing context {context}")
+
+ if not payload.args:
+ return ToolPreInvokeResult()
+
+ policy = None
+ user = ""
+ server_id = ""
+
+ if self.cedar_config.policy_lang == "cedar":
+ if self.cedar_config.policy:
+ policy = self._yamlpolicy2text(self.cedar_config.policy)
+ else:
+ logger.error(f"{CedarErrorCodes.UNSPECIFIED_POLICY.value}")
+ raise PluginError(PluginErrorModel(message=CedarErrorCodes.UNSPECIFIED_POLICY.value, plugin_name="CedarPolicyPlugin"))
+ if self.cedar_config.policy_lang == "custom_dsl":
+ if self.cedar_config.policy:
+ policy = self._dsl2cedar(self.cedar_config.policy)
+ else:
+ logger.error(f"{CedarErrorCodes.UNSPECIFIED_POLICY.value}")
+ raise PluginError(PluginErrorModel(message=CedarErrorCodes.UNSPECIFIED_POLICY.value, plugin_name="CedarPolicyPlugin"))
+
+ if context.global_context.user:
+ user = context.global_context.user
+ server_id = context.global_context.server_id
+
+ if server_id:
+ request = self._preprocess_request(user, payload.name, server_id, hook_type)
+ else:
+ logger.error(f"{CedarErrorCodes.UNSPECIFIED_SERVER.value}")
+ raise PluginError(PluginErrorModel(message=CedarErrorCodes.UNSPECIFIED_SERVER.value, plugin_name="CedarPolicyPlugin"))
+
+ if policy:
+ decision = self._evaluate_policy(request, policy)
+ if decision == Decision.Deny.value:
+ violation = PluginViolation(
+ reason=CedarResponseTemplates.CEDAR_REASON.format(hook_type=hook_type),
+ description=CedarResponseTemplates.CEDAR_DESC.format(hook_type=hook_type),
+ code=CedarCodes.DENIAL_CODE,
+ details={},
+ )
+ return ToolPreInvokeResult(modified_payload=payload, violation=violation, continue_processing=False)
+ return ToolPreInvokeResult(continue_processing=True)
+
+ async def tool_post_invoke(self, payload: ToolPostInvokePayload, context: PluginContext) -> ToolPostInvokeResult:
+ """Plugin hook run after a tool is invoked.
+
+ Args:
+ payload: The tool result payload to be analyzed.
+ context: Contextual information about the hook call.
+
+ Returns:
+ The result of the plugin's analysis, including whether the tool result should proceed.
+ """
+
+ hook_type = "tool_post_invoke"
+ logger.info(f"Processing {hook_type} for '{payload.result}' with {len(payload.result) if payload.result else 0}")
+ logger.info(f"Processing context {context}")
+
+ if not payload.result:
+ return ToolPostInvokeResult()
+
+ policy = None
+ user = ""
+ server_id = ""
+ result_full = None
+ result_redacted = None
+
+ if self.cedar_config.policy_lang == "cedar":
+ if self.cedar_config.policy:
+ policy = self._yamlpolicy2text(self.cedar_config.policy)
+ else:
+ logger.error(f"{CedarErrorCodes.UNSPECIFIED_POLICY.value}")
+ raise PluginError(PluginErrorModel(message=CedarErrorCodes.UNSPECIFIED_POLICY.value, plugin_name="CedarPolicyPlugin"))
+ if self.cedar_config.policy_lang == "custom_dsl":
+ if self.cedar_config.policy:
+ policy = self._dsl2cedar(self.cedar_config.policy)
+ else:
+ logger.error(f"{CedarErrorCodes.UNSPECIFIED_POLICY.value}")
+ raise PluginError(PluginErrorModel(message=CedarErrorCodes.UNSPECIFIED_POLICY.value, plugin_name="CedarPolicyPlugin"))
+
+ if context.global_context.user:
+ user = context.global_context.user
+ server_id = context.global_context.server_id
+
+ if self.cedar_config.policy_output_keywords:
+ view_full = self.cedar_config.policy_output_keywords.get("view_full", None)
+ view_redacted = self.cedar_config.policy_output_keywords.get("view_redacted", None)
+ if not view_full and not view_redacted:
+ logger.error(f"{CedarErrorCodes.UNSPECIFIED_OUTPUT_ACTION.value}")
+ raise PluginError(PluginErrorModel(message=CedarErrorCodes.UNSPECIFIED_OUTPUT_ACTION.value, plugin_name="CedarPolicyPlugin"))
+ if view_full and policy:
+ request = self._preprocess_request(user, view_full, server_id, hook_type)
+ result_full = self._evaluate_policy(request, policy)
+ if view_redacted and policy:
+ request = self._preprocess_request(user, view_redacted, server_id, hook_type)
+ result_redacted = self._evaluate_policy(request, policy)
+
+ # Evaluate Policy and based on that redact output
+ if policy:
+ request = self._preprocess_request(user, payload.name, server_id, hook_type)
+ result_action = self._evaluate_policy(request, policy)
+ # Check if full output view is allowed by policy
+ if result_action == Decision.Allow.value:
+ if result_full == Decision.Allow.value:
+ return ToolPostInvokeResult(continue_processing=True)
+ if result_redacted == Decision.Allow.value:
+ if payload.result and isinstance(payload.result, dict):
+ for key in payload.result:
+ if isinstance(payload.result[key], str):
+ value = self._redact_output(payload.result[key], self.cedar_config.policy_redaction_spec.pattern)
+ payload.result[key] = value
+ elif payload.result and isinstance(payload.result, str):
+ payload.result = self._redact_output(payload.result, self.cedar_config.policy_redaction_spec.pattern)
+ return ToolPostInvokeResult(continue_processing=True, modified_payload=payload)
+ # If none of the redacted or full output views are allowed by policy then deny
+ else:
+ violation = PluginViolation(
+ reason=CedarResponseTemplates.CEDAR_REASON.format(hook_type=hook_type),
+ description=CedarResponseTemplates.CEDAR_DESC.format(hook_type=hook_type),
+ code=CedarCodes.DENIAL_CODE,
+ details={},
+ )
+ return ToolPostInvokeResult(modified_payload=payload, violation=violation, continue_processing=False)
+ return ToolPostInvokeResult(continue_processing=True)
+
+ async def resource_pre_fetch(self, payload: ResourcePreFetchPayload, context: PluginContext) -> ResourcePreFetchResult:
+ """OPA Plugin hook that runs after resource pre fetch. This hook takes in payload and context and further evaluates rego
+ policies on the input by sending the request to opa server.
+
+ Args:
+ payload: The resource pre fetch input or payload to be analyzed.
+ context: Contextual information about the hook call.
+
+ Returns:
+ The result of the plugin's analysis, including whether the resource input can be passed further.
+ """
+
+ hook_type = "resource_pre_fetch"
+ logger.info(f"Processing {hook_type} for '{payload.uri}'")
+ logger.info(f"Processing context {context}")
+
+ if not payload.uri:
+ return ResourcePreFetchResult()
+
+ try:
+ parsed = urlparse(payload.uri)
+ except Exception as e:
+ violation = PluginViolation(reason="Invalid URI", description=f"Could not parse resource URI: {e}", code="INVALID_URI", details={"uri": payload.uri, "error": str(e)})
+ return ResourcePreFetchResult(continue_processing=False, violation=violation)
+
+ # Check if URI has a scheme
+ if not parsed.scheme:
+ violation = PluginViolation(reason="Invalid URI format", description="URI must have a valid scheme (protocol)", code="INVALID_URI", details={"uri": payload.uri})
+ return ResourcePreFetchResult(continue_processing=False, violation=violation)
+
+ policy = None
+ user = ""
+ result_full = None
+ result_redacted = None
+
+ if self.cedar_config.policy_lang == "cedar":
+ if self.cedar_config.policy:
+ policy = self._yamlpolicy2text(self.cedar_config.policy)
+ else:
+ logger.error(f"{CedarErrorCodes.UNSPECIFIED_POLICY.value}")
+ raise PluginError(PluginErrorModel(message=CedarErrorCodes.UNSPECIFIED_POLICY.value, plugin_name="CedarPolicyPlugin"))
+ if self.cedar_config.policy_lang == "custom_dsl":
+ if self.cedar_config.policy:
+ policy = self._dsl2cedar(self.cedar_config.policy)
+ else:
+ logger.error(f"{CedarErrorCodes.UNSPECIFIED_POLICY.value}")
+ raise PluginError(PluginErrorModel(message=CedarErrorCodes.UNSPECIFIED_POLICY.value, plugin_name="CedarPolicyPlugin"))
+
+ if context.global_context.user:
+ user = context.global_context.user
+
+ if self.cedar_config.policy_output_keywords:
+ view_full = self.cedar_config.policy_output_keywords.get("view_full", None)
+ view_redacted = self.cedar_config.policy_output_keywords.get("view_redacted", None)
+ if not view_full and not view_redacted:
+ logger.error(f"{CedarErrorCodes.UNSPECIFIED_OUTPUT_ACTION.value}")
+ raise PluginError(PluginErrorModel(message=CedarErrorCodes.UNSPECIFIED_OUTPUT_ACTION.value, plugin_name="CedarPolicyPlugin"))
+ if view_full and policy:
+ request = self._preprocess_request(user, view_full, payload.uri, hook_type)
+ result_full = self._evaluate_policy(request, policy)
+ if view_redacted and policy:
+ request = self._preprocess_request(user, view_redacted, payload.uri, hook_type)
+ result_redacted = self._evaluate_policy(request, policy)
+
+ if result_full == Decision.Deny.value and result_redacted == Decision.Deny.value:
+ violation = PluginViolation(
+ reason=CedarResponseTemplates.CEDAR_REASON.format(hook_type=hook_type),
+ description=CedarResponseTemplates.CEDAR_DESC.format(hook_type=hook_type),
+ code=CedarCodes.DENIAL_CODE,
+ details={},
+ )
+ return ResourcePreFetchResult(modified_payload=payload, violation=violation, continue_processing=False)
+ return ResourcePreFetchResult(continue_processing=True)
+
+ async def resource_post_fetch(self, payload: ResourcePostFetchPayload, context: PluginContext) -> ResourcePostFetchResult:
+ """OPA Plugin hook that runs after resource post fetch. This hook takes in payload and context and further evaluates rego
+ policies on the output by sending the request to opa server.
+
+ Args:
+ payload: The resource post fetch output or payload to be analyzed.
+ context: Contextual information about the hook call.
+
+ Returns:
+ The result of the plugin's analysis, including whether the resource output can be passed further.
+ """
+ hook_type = "resource_post_fetch"
+ logger.info(f"Processing {hook_type} for '{payload.uri}'")
+ logger.info(f"Processing context {context}")
+
+ policy = None
+ user = ""
+ result_full = None
+ result_redacted = None
+
+ if self.cedar_config.policy_lang == "cedar":
+ if self.cedar_config.policy:
+ policy = self._yamlpolicy2text(self.cedar_config.policy)
+ else:
+ logger.error(f"{CedarErrorCodes.UNSPECIFIED_POLICY.value}")
+ raise PluginError(PluginErrorModel(message=CedarErrorCodes.UNSPECIFIED_POLICY.value, plugin_name="CedarPolicyPlugin"))
+ if self.cedar_config.policy_lang == "custom_dsl":
+ if self.cedar_config.policy:
+ policy = self._dsl2cedar(self.cedar_config.policy)
+ else:
+ logger.error(f"{CedarErrorCodes.UNSPECIFIED_POLICY.value}")
+ raise PluginError(PluginErrorModel(message=CedarErrorCodes.UNSPECIFIED_POLICY.value, plugin_name="CedarPolicyPlugin"))
+
+ if context.global_context.user:
+ user = context.global_context.user
+
+ if self.cedar_config.policy_output_keywords:
+ view_full = self.cedar_config.policy_output_keywords.get("view_full", None)
+ view_redacted = self.cedar_config.policy_output_keywords.get("view_redacted", None)
+ if not view_full and not view_redacted:
+ logger.error(f"{CedarErrorCodes.UNSPECIFIED_OUTPUT_ACTION.value}")
+ raise PluginError(PluginErrorModel(message=CedarErrorCodes.UNSPECIFIED_OUTPUT_ACTION.value, plugin_name="CedarPolicyPlugin"))
+ if view_full and policy:
+ request = self._preprocess_request(user, view_full, payload.uri, hook_type)
+ result_full = self._evaluate_policy(request, policy)
+ if view_redacted and policy:
+ request = self._preprocess_request(user, view_redacted, payload.uri, hook_type)
+ result_redacted = self._evaluate_policy(request, policy)
+
+ if result_full == Decision.Allow.value:
+ return ResourcePostFetchResult(continue_processing=True)
+
+ if result_redacted == Decision.Allow.value:
+ if payload.content:
+ if hasattr(payload.content, "text"):
+ value = self._redact_output(payload.content.text, self.cedar_config.policy_redaction_spec.pattern)
+ payload.content.text = value
+ return ResourcePostFetchResult(modified_payload=payload, continue_processing=True)
+
+ violation = PluginViolation(
+ reason=CedarResponseTemplates.CEDAR_REASON.format(hook_type=hook_type),
+ description=CedarResponseTemplates.CEDAR_DESC.format(hook_type=hook_type),
+ code=CedarCodes.DENIAL_CODE,
+ details={},
+ )
+ return ResourcePostFetchResult(modified_payload=payload, violation=violation, continue_processing=False)
+ return ResourcePostFetchResult(continue_processing=True)
diff --git a/plugins/external/cedar/cedarpolicyplugin/schema.py b/plugins/external/cedar/cedarpolicyplugin/schema.py
new file mode 100644
index 000000000..9274e7674
--- /dev/null
+++ b/plugins/external/cedar/cedarpolicyplugin/schema.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 -*-
+"""A schema file for OPA plugin.
+
+Copyright 2025
+SPDX-License-Identifier: Apache-2.0
+Authors: Shriti Priya
+
+This module defines schema for Cedar plugin.
+"""
+
+# Standard
+from typing import Any, Optional, Union
+
+# Third-Party
+from pydantic import BaseModel
+
+
+class CedarInput(BaseModel):
+ """BaseOPAInputKeys
+
+ Attributes:
+ user (str) : specifying the user
+ action (str): specifies the action
+ resource (str): specifies the resource
+ context (Optional[dict[str, Any]]) : context provided for policy evaluation.
+ """
+
+ principal: str = ""
+ action: str = ""
+ resource: str = ""
+ context: Optional[dict[Any, Any]] = None
+
+
+class Redaction(BaseModel):
+ """Configuration for Redaction
+
+ Attributes:
+ pattern (str) : pattern detected in output to redact
+ """
+
+ pattern: str = ""
+
+
+class CedarConfig(BaseModel):
+ """Configuration for the Cedar plugin.
+
+ Attributes:
+ policy_land (str) : cedar or custom_dsl. If policy is represented in cedar mode or custom_dsl mode
+ policy (Union[list, str]): RBAC policy defined
+ policy_output_keywords (dict): this is to internally check if certain type of views are allowed for outputs
+ policy_redaction_spec (Redaction) : pattern or other parameters provided to redact the output
+ """
+
+ policy_lang: str = "None"
+ policy: Union[list, str] = None
+ policy_output_keywords: Optional[dict] = None
+ policy_redaction_spec: Optional[Redaction] = None
diff --git a/plugins/external/cedar/examples/config-dsl.yaml b/plugins/external/cedar/examples/config-dsl.yaml
new file mode 100644
index 000000000..e1584c8af
--- /dev/null
+++ b/plugins/external/cedar/examples/config-dsl.yaml
@@ -0,0 +1,43 @@
+plugins:
+ - name: "CedarPolicyPlugin"
+ kind: "cedarpolicyplugin.plugin.CedarPolicyPlugin"
+ description: "A plugin that does policy decision and enforcement using cedar"
+ version: "0.1.0"
+ author: "Shriti Priya"
+ hooks: ["prompt_pre_fetch", "prompt_post_fetch", "tool_pre_invoke", "tool_post_invoke"]
+ tags: ["plugin"]
+ mode: "enforce" # enforce | permissive | disabled
+ priority: 150
+ conditions:
+ # Apply to specific tools/servers
+ - server_ids: [] # Apply to all servers
+ tenant_ids: [] # Apply to all tenants
+ config:
+ policy_lang: custom_dsl
+ policy_output_keywords:
+ view_full: "view_full_output"
+ view_redacted: "view_redacted_output"
+ policy_redaction_spec:
+ pattern: '"\$\d{1,}(,\d{1,})*"' # provide regex, if none, then replace all
+ policy: |
+ [role:hr:server/hr_tool]
+ update_payroll
+
+ [role:admin:resource/example.com/data]
+ view_full_output
+
+ [role:admin:prompt/judge_prompts]
+ view_full_output
+
+
+# Plugin directories to scan
+plugin_dirs:
+ - "cedarpolicyplugin"
+
+# Global plugin settings
+plugin_settings:
+ parallel_execution_within_band: true
+ plugin_timeout: 30
+ fail_on_plugin_error: false
+ enable_plugin_api: true
+ plugin_health_check_interval: 60
diff --git a/plugins/external/cedar/pyproject.toml b/plugins/external/cedar/pyproject.toml
new file mode 100644
index 000000000..334583a7a
--- /dev/null
+++ b/plugins/external/cedar/pyproject.toml
@@ -0,0 +1,99 @@
+# ----------------------------------------------------------------
+# 💡 Build system (PEP 517)
+# - setuptools ≥ 77 gives SPDX licence support (PEP 639)
+# - wheel is needed by most build front-ends
+# ----------------------------------------------------------------
+[build-system]
+requires = ["setuptools>=77", "wheel"]
+build-backend = "setuptools.build_meta"
+
+# ----------------------------------------------------------------
+# 📦 Core project metadata (PEP 621)
+# ----------------------------------------------------------------
+[project]
+name = "cedarpolicyplugin"
+version = "0.1.0"
+description = "A plugin that does policy decision and enforcement using cedar"
+keywords = ["MCP","API","gateway","tools",
+ "agents","agentic ai","model context protocol","multi-agent","fastapi",
+ "json-rpc","sse","websocket","federation","security","authentication"
+]
+classifiers = [
+ "Development Status :: 4 - Beta",
+ "Intended Audience :: Developers",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.11",
+ "Programming Language :: Python :: 3.12",
+ "Programming Language :: Python :: 3.13",
+ "Framework :: FastAPI",
+ "Framework :: AsyncIO",
+ "Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
+ "Topic :: Software Development :: Libraries :: Application Frameworks"
+]
+readme = "README.md"
+requires-python = ">=3.11,<3.14"
+license = "Apache-2.0"
+license-files = ["LICENSE"]
+
+maintainers = [
+ {name = "Shriti Priya", email = "shritip@ibm.com"}
+]
+
+authors = [
+ {name = "Shriti Priya", email = "shritip@ibm.com"}
+]
+
+dependencies = [
+ "mcp>=1.16.0",
+ "mcp-contextforge-gateway",
+ "cedarpy>=4.1.0"
+]
+
+# URLs
+[project.urls]
+Homepage = "https://ibm.github.io/mcp-context-forge/"
+Documentation = "https://ibm.github.io/mcp-context-forge/"
+Repository = "https://github.com/IBM/mcp-context-forge"
+"Bug Tracker" = "https://github.com/IBM/mcp-context-forge/issues"
+Changelog = "https://github.com/IBM/mcp-context-forge/blob/main/CHANGELOG.md"
+
+[tool.uv.sources]
+mcp-contextforge-gateway = { git = "https://github.com/IBM/mcp-context-forge.git", rev = "main" }
+
+# ----------------------------------------------------------------
+# Optional dependency groups (extras)
+# ----------------------------------------------------------------
+[project.optional-dependencies]
+dev = [
+ "black>=25.1.0",
+ "pytest>=8.4.1",
+ "pytest-asyncio>=1.1.0",
+ "pytest-cov>=6.2.1",
+ "pytest-dotenv>=0.5.2",
+ "pytest-env>=1.1.5",
+ "pytest-examples>=0.0.18",
+ "pytest-md-report>=0.7.0",
+ "pytest-rerunfailures>=15.1",
+ "pytest-trio>=0.8.0",
+ "pytest-xdist>=3.8.0",
+ "ruff>=0.12.9",
+ "unimport>=1.2.1",
+ "uv>=0.8.11",
+]
+
+# --------------------------------------------------------------------
+# 🔧 setuptools-specific configuration
+# --------------------------------------------------------------------
+[tool.setuptools]
+include-package-data = true # ensure wheels include the data files
+
+# Automatic discovery: keep every package that starts with "cedarpolicyplugin"
+[tool.setuptools.packages.find]
+include = ["cedarpolicyplugin*"]
+exclude = ["tests*"]
+
+## Runtime data files ------------------------------------------------
+[tool.setuptools.package-data]
+cedarpolicyplugin = [
+ "resources/plugins/config.yaml",
+]
diff --git a/plugins/external/cedar/resources/plugins/config.yaml b/plugins/external/cedar/resources/plugins/config.yaml
new file mode 100644
index 000000000..23d048311
--- /dev/null
+++ b/plugins/external/cedar/resources/plugins/config.yaml
@@ -0,0 +1,102 @@
+plugins:
+ - name: "CedarPolicyPlugin"
+ kind: "cedarpolicyplugin.plugin.CedarPolicyPlugin"
+ description: "A plugin that does policy decision and enforcement using cedar"
+ version: "0.1.0"
+ author: "Shriti Priya"
+ hooks: ["prompt_pre_fetch", "prompt_post_fetch", "tool_pre_invoke", "tool_post_invoke"]
+ tags: ["plugin"]
+ mode: "enforce" # enforce | permissive | disabled
+ priority: 150
+ conditions:
+ # Apply to specific tools/servers
+ - server_ids: [] # Apply to all servers
+ tenant_ids: [] # Apply to all tenants
+ config:
+ policy_lang: cedar
+ policy_output_keywords:
+ view_full: "view_full_output"
+ view_redacted: "view_redacted_output"
+ policy_redaction_spec:
+ pattern: '"\$\d{1,}(,\d{1,})*"' # provide regex, if none, then replace all
+ policy:
+ - id: allow-employee-basic-access
+ effect: Permit
+ principal: Role::"employee"
+ action:
+ - Action::"get_leave_balance" #tool name
+ - Action::"request_certificate"
+ resource:
+ - Server::"askHR" # mcp-server name
+ - Agent::"employee_agent" # agent name
+
+ - id: allow-manager-full-access
+ effect: Permit
+ principal: Role::"manager"
+ action:
+ - Action::"get_leave_balance"
+ - Action::"approve_leave"
+ - Action::"promote_employee"
+ - Action::"view_performance"
+ - Action::"view_full_output"
+ resource:
+ - Agent::"manager_agent"
+ - Server::"payroll_tool"
+
+ - id: allow-hr-hr_tool
+ effect: Permit
+ principal: Role::"hr"
+ action:
+ - Action::"update_payroll"
+ - Action::"view_performance"
+ - Action::"view_full_output"
+ resource: Server::"hr_tool"
+
+ - id: redact-non-manager-views
+ effect: Permit
+ principal: Role::"employee"
+ action: Action::"view_redacted_output"
+ resource:
+ - Server::"payroll_tool"
+ - Agent::"manager_agent"
+ - Server::"askHR"
+
+ - id: allow-admin-resources # policy for resources
+ effect: Permit
+ principal: Role::"admin"
+ action:
+ - Action::"view_full_output"
+ resource: Resource::""https://example.com/data"" #Resource::
+
+ - id: allow-employee-redacted-resources # policy for resources
+ effect: Permit
+ principal: Role::"employee"
+ action:
+ - Action::"view_redacted_output"
+ resource: Resource::""https://example.com/data"" #Resource::
+
+ - id: allow-admin-prompts # policy for resources
+ effect: Permit
+ principal: Role::"admin"
+ action:
+ - Action::"view_full_output"
+ resource: Prompts::"judge_prompts" #Prompt::
+
+ - id: allow-employee-redacted-prompts # policy for resources
+ effect: Permit
+ principal: Role::"employee"
+ action:
+ - Action::"view_redacted_output"
+ resource: Prompts::"judge_prompts" #Prompt::
+
+# Plugin directories to scan
+plugin_dirs:
+ - "cedarpolicyplugin"
+
+# Global plugin settings
+plugin_settings:
+ parallel_execution_within_band: true
+ plugin_timeout: 30
+ fail_on_plugin_error: false
+ enable_plugin_api: true
+ plugin_health_check_interval: 60
diff --git a/plugins/external/cedar/resources/runtime/config.yaml b/plugins/external/cedar/resources/runtime/config.yaml
new file mode 100644
index 000000000..5b26791f5
--- /dev/null
+++ b/plugins/external/cedar/resources/runtime/config.yaml
@@ -0,0 +1,71 @@
+# config.yaml
+host:
+ name: "cedarpolicyplugin"
+ log_level: "INFO"
+
+server:
+ type: "streamable-http" # "stdio" or "sse" or "streamable-http"
+ #auth: "bearer" # this line is needed to enable bearer auth
+
+# Logging configuration - controls all logging behavior
+logging:
+ level: "WARNING" # Changed from INFO to WARNING for quieter default
+ format: "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
+ reset_handlers: true
+ quiet_libraries: true
+
+ # Specific logger overrides to silence noisy components
+ loggers:
+ # Your existing overrides
+ "chuk_mcp_runtime.proxy": "WARNING"
+ "chuk_mcp_runtime.proxy.manager": "WARNING"
+ "chuk_mcp_runtime.proxy.tool_wrapper": "WARNING"
+ "chuk_tool_processor.mcp.stream_manager": "WARNING"
+ "chuk_tool_processor.mcp.register": "WARNING"
+ "chuk_tool_processor.mcp.setup_stdio": "WARNING"
+ "chuk_mcp_runtime.common.tool_naming": "WARNING"
+ "chuk_mcp_runtime.common.openai_compatibility": "WARNING"
+
+ # NEW: Add the noisy loggers you're seeing
+ "chuk_sessions.session_manager": "ERROR"
+ "chuk_mcp_runtime.session.native": "ERROR"
+ "chuk_mcp_runtime.tools.artifacts": "ERROR"
+ "chuk_mcp_runtime.tools.session": "ERROR"
+ "chuk_artifacts.store": "ERROR"
+ "chuk_mcp_runtime.entry": "WARNING" # Keep some info but less chatty
+ "chuk_mcp_runtime.server": "WARNING" # Server start/stop messages
+
+# optional overrides
+sse:
+ host: "0.0.0.0"
+ port: 8000
+ sse_path: "/sse"
+ message_path: "/messages/"
+ health_path: "/health"
+ log_level: "info"
+ access_log: true
+
+streamable-http:
+ host: "0.0.0.0"
+ port: 8000
+ mcp_path: "/mcp"
+ stateless: true
+ json_response: true
+ health_path: "/health"
+ log_level: "info"
+ access_log: true
+
+proxy:
+ enabled: false
+ namespace: "proxy"
+ openai_compatible: false # ← set to true if you want underscores
+
+# Session tools (disabled by default - must enable explicitly)
+session_tools:
+ enabled: false # Must explicitly enable
+
+# Artifact storage (disabled by default - must enable explicitly)
+artifacts:
+ enabled: false # Must explicitly enable
+ storage_provider: "filesystem"
+ session_provider: "memory"
diff --git a/plugins/external/cedar/run-server.sh b/plugins/external/cedar/run-server.sh
new file mode 100755
index 000000000..d73f57de5
--- /dev/null
+++ b/plugins/external/cedar/run-server.sh
@@ -0,0 +1,43 @@
+#!/usr/bin/env bash
+#───────────────────────────────────────────────────────────────────────────────
+# Script : run-server.sh
+# Purpose: Launch the MCP Gateway's Plugin API
+#
+# Description:
+# This script launches an API server using
+# chuck runtime.
+#
+# Environment Variables:
+# API_SERVER_SCRIPT : Path to the server script (optional, auto-detected)
+# PLUGINS_CONFIG_PATH : Path to the plugin config (optional, default: ./resources/plugins/config.yaml)
+# CHUK_MCP_CONFIG_PATH : Path to the chuck-mcp-runtime config (optional, default: ./resources/runtime/config.yaml)
+#
+# Usage:
+# ./run-server.sh # Run server
+#───────────────────────────────────────────────────────────────────────────────
+
+# Exit immediately on error, undefined variable, or pipe failure
+set -euo pipefail
+
+#────────────────────────────────────────────────────────────────────────────────
+# SECTION 1: Script Location Detection
+# Determine the absolute path of the API server script
+#────────────────────────────────────────────────────────────────────────────────
+if [[ -z "${API_SERVER_SCRIPT:-}" ]]; then
+ API_SERVER_SCRIPT="$(python -c 'import mcpgateway.plugins.framework.external.mcp.server.runtime as server; print(server.__file__)')"
+ echo "✓ API server script path auto-detected: ${API_SERVER_SCRIPT}"
+else
+ echo "✓ Using provided API server script path: ${API_SERVER_SCRIPT}"
+fi
+
+#────────────────────────────────────────────────────────────────────────────────
+# SECTION 2: Run the API server
+# Run the API server from configuration
+#────────────────────────────────────────────────────────────────────────────────
+
+PLUGINS_CONFIG_PATH=${PLUGINS_CONFIG_PATH:-./resources/plugins/config.yaml}
+CHUK_MCP_CONFIG_PATH=${CHUK_MCP_CONFIG_PATH:-./resources/runtime/config.yaml}
+
+echo "✓ Using plugin config from: ${PLUGINS_CONFIG_PATH}"
+echo "✓ Running API server with config from: ${CHUK_MCP_CONFIG_PATH}"
+python ${API_SERVER_SCRIPT}
diff --git a/plugins/external/cedar/tests/__init__.py b/plugins/external/cedar/tests/__init__.py
new file mode 100644
index 000000000..2e033f69b
--- /dev/null
+++ b/plugins/external/cedar/tests/__init__.py
@@ -0,0 +1,6 @@
+# -*- coding: utf-8 -*-
+"""Location: ./tests/__init__.py
+Copyright 2025
+SPDX-License-Identifier: Apache-2.0
+Authors: Shriti Priya
+"""
diff --git a/plugins/external/cedar/tests/pytest.ini b/plugins/external/cedar/tests/pytest.ini
new file mode 100644
index 000000000..ff60648e6
--- /dev/null
+++ b/plugins/external/cedar/tests/pytest.ini
@@ -0,0 +1,13 @@
+[pytest]
+log_cli = false
+log_cli_level = INFO
+log_cli_format = %(asctime)s [%(module)s] [%(levelname)s] %(message)s
+log_cli_date_format = %Y-%m-%d %H:%M:%S
+log_level = INFO
+log_format = %(asctime)s [%(module)s] [%(levelname)s] %(message)s
+log_date_format = %Y-%m-%d %H:%M:%S
+addopts = --cov --cov-report term-missing
+env_files = .env
+pythonpath = . src
+filterwarnings =
+ ignore::DeprecationWarning:pydantic.*
diff --git a/plugins/external/cedar/tests/test_cedarpolicyplugin.py b/plugins/external/cedar/tests/test_cedarpolicyplugin.py
new file mode 100644
index 000000000..52e897408
--- /dev/null
+++ b/plugins/external/cedar/tests/test_cedarpolicyplugin.py
@@ -0,0 +1,642 @@
+# -*- coding: utf-8 -*-
+"""Tests for plugin."""
+
+# Third-Party
+from cedarpolicyplugin.plugin import CedarPolicyPlugin
+import pytest
+
+# First-Party
+from mcpgateway.common.models import Message, PromptResult, ResourceContent, Role, TextContent
+from mcpgateway.plugins.framework.hooks.prompts import PromptPosthookPayload, PromptPrehookPayload
+from mcpgateway.plugins.framework.hooks.resources import ResourcePostFetchPayload, ResourcePreFetchPayload
+from mcpgateway.plugins.framework.hooks.tools import ToolPostInvokePayload, ToolPreInvokePayload
+from mcpgateway.plugins.framework.models import (
+ GlobalContext,
+ PluginConfig,
+ PluginContext,
+)
+
+
+# This test case is responsible for verifying cedarplugin functionality for post tool hooks in cdear native mode
+@pytest.mark.asyncio
+async def test_cedarpolicyplugin_post_tool_invoke_rbac():
+ """Test plugin for post tool invocation"""
+ policy_config = [
+ {
+ "id": "allow-employee-basic-access",
+ "effect": "Permit",
+ "principal": 'Role::"employee"',
+ "action": ['Action::"get_leave_balance"', 'Action::"request_certificate"'],
+ "resource": ['Server::"askHR"', 'Agent::"employee_agent"'],
+ },
+ {
+ "id": "allow-manager-full-access",
+ "effect": "Permit",
+ "principal": 'Role::"manager"',
+ "action": ['Action::"get_leave_balance"', 'Action::"approve_leave"', 'Action::"promote_employee"', 'Action::"view_performance"', 'Action::"view_full_output"'],
+ "resource": ['Agent::"manager_agent"', 'Server::"payroll_tool"'],
+ },
+ {
+ "id": "allow-hr-hr_tool",
+ "effect": "Permit",
+ "principal": 'Role::"hr"',
+ "action": ['Action::"update_payroll"', 'Action::"view_performance"', 'Action::"view_full_output"'],
+ "resource": ['Server::"hr_tool"'],
+ },
+ {
+ "id": "redact-non-manager-views",
+ "effect": "Permit",
+ "principal": 'Role::"employee"',
+ "action": ['Action::"view_redacted_output"'],
+ "resource": ['Server::"payroll_tool"', 'Agent::"manager_agent"', 'Server::"askHR"'],
+ },
+ ]
+
+ policy_output_keywords = {"view_full": "view_full_output", "view_redacted": "view_redacted_output"}
+ policy_redaction_spec = {"pattern": r"\$\d{1,}(,\d{1,})*"}
+ config = PluginConfig(
+ name="test",
+ kind="cedarpolicyplugin.CedarPolicyPlugin",
+ hooks=["tool_pre_invoke"],
+ config={"policy_lang": "cedar", "policy": policy_config, "policy_output_keywords": policy_output_keywords, "policy_redaction_spec": policy_redaction_spec},
+ )
+ plugin = CedarPolicyPlugin(config)
+ info = {"alice": "employee", "bob": "manager", "carol": "hr", "robert": "admin"}
+ plugin._set_jwt_info(info)
+ requests = [
+ {"user": "alice", "action": "get_leave_balance", "resource": "askHR"},
+ {"user": "bob", "action": "view_performance", "resource": "payroll_tool"},
+ {"user": "carol", "action": "update_payroll", "resource": "hr_tool"},
+ {"user": "alice", "action": "update_payroll", "resource": "hr_tool"},
+ ]
+
+ redact_count = 0
+ allow_count = 0
+ deny_count = 0
+ for req in requests:
+ payload = ToolPostInvokePayload(name=req["action"], result={"text": "Alice has a salary of $250,000"})
+ context = PluginContext(global_context=GlobalContext(request_id="1", server_id=req["resource"], user=req["user"]))
+ result = await plugin.tool_post_invoke(payload, context)
+ if result.modified_payload and "[REDACTED]" in result.modified_payload.result["text"]:
+ redact_count += 1
+ if result.continue_processing:
+ allow_count += 1
+ if not result.continue_processing:
+ deny_count += 1
+
+ assert redact_count == 1
+ assert allow_count == 3
+ assert deny_count == 1
+
+
+# This test case is responsible for verifying cedarplugin functionality for post tool invocation with policy in custom dsl mode
+@pytest.mark.asyncio
+async def test_cedarpolicyplugin_post_tool_invoke_custom_dsl_rbac():
+ """Test plugin for post tool invocation"""
+ policy_config = "[role:employee:server/askHR]\nget_leave_balance\nrequest_certificate\n\n\
+ [role:employee:agent/employee_agent]\nget_leave_balance\nrequest_certificate\n\n[role:manager:agent/manager_agent]\nget_leave_balance\napprove_leave\npromote_employee\nview_performance\nview_full_output\n\n[role:manager:server/payroll_tool]\
+ \nget_leave_balance\napprove_leave\npromote_employee\nview_performance\nview_full_output\n\n[role:hr:server/hr_tool]\nupdate_payroll\nview_performance\nview_full_output\n\n[role:employee:server/payroll_tool]\nview_redacted_output\n\n[role:employee:agent/manager_agent]\nview_redacted_output\n\n\
+ [role:employee:server/askHR]\nview_redacted_output"
+
+ policy_output_keywords = {"view_full": "view_full_output", "view_redacted": "view_redacted_output"}
+ policy_redaction_spec = {"pattern": r"\$\d{1,}(,\d{1,})*"}
+ config = PluginConfig(
+ name="test",
+ kind="cedarpolicyplugin.CedarPolicyPlugin",
+ hooks=["tool_pre_invoke"],
+ config={"policy_lang": "custom_dsl", "policy": policy_config, "policy_output_keywords": policy_output_keywords, "policy_redaction_spec": policy_redaction_spec},
+ )
+ plugin = CedarPolicyPlugin(config)
+ info = {"alice": "employee", "bob": "manager", "carol": "hr", "robert": "admin"}
+ plugin._set_jwt_info(info)
+ requests = [
+ {"user": "alice", "action": "get_leave_balance", "resource": "askHR"},
+ {"user": "bob", "action": "view_performance", "resource": "payroll_tool"},
+ {"user": "carol", "action": "update_payroll", "resource": "hr_tool"},
+ {"user": "alice", "action": "update_payroll", "resource": "hr_tool"},
+ ]
+
+ redact_count = 0
+ allow_count = 0
+ deny_count = 0
+ for req in requests:
+ payload = ToolPostInvokePayload(name=req["action"], result={"text": "Alice has a salary of $250,000"})
+ context = PluginContext(global_context=GlobalContext(request_id="1", server_id=req["resource"], user=req["user"]))
+ result = await plugin.tool_post_invoke(payload, context)
+ if result.modified_payload and "[REDACTED]" in result.modified_payload.result["text"]:
+ redact_count += 1
+ if result.continue_processing:
+ allow_count += 1
+ if not result.continue_processing:
+ deny_count += 1
+
+ assert redact_count == 1
+ assert allow_count == 3
+ assert deny_count == 1
+
+
+# This test case is responsible for verifying cedarplugin functionality for tool pre invoke in cedar native mode
+@pytest.mark.asyncio
+async def test_cedarpolicyplugin_pre_tool_invoke_cedar_rbac():
+ """Test plugin tool pre invoke hook."""
+ policy_config = [
+ {
+ "id": "allow-employee-basic-access",
+ "effect": "Permit",
+ "principal": 'Role::"employee"',
+ "action": ['Action::"get_leave_balance"', 'Action::"request_certificate"'],
+ "resource": ['Server::"askHR"', 'Agent::"employee_agent"'],
+ },
+ {
+ "id": "allow-manager-full-access",
+ "effect": "Permit",
+ "principal": 'Role::"manager"',
+ "action": ['Action::"get_leave_balance"', 'Action::"approve_leave"', 'Action::"promote_employee"', 'Action::"view_performance"', 'Action::"view_full_output"'],
+ "resource": ['Agent::"manager_agent"', 'Server::"payroll_tool"'],
+ },
+ {
+ "id": "allow-hr-hr_tool",
+ "effect": "Permit",
+ "principal": 'Role::"hr"',
+ "action": ['Action::"update_payroll"', 'Action::"view_performance"', 'Action::"view_full_output"'],
+ "resource": ['Server::"hr_tool"'],
+ },
+ {
+ "id": "redact-non-manager-views",
+ "effect": "Permit",
+ "principal": 'Role::"employee"',
+ "action": ['Action::"view_redacted_output"'],
+ "resource": ['Server::"payroll_tool"', 'Agent::"manager_agent"', 'Server::"askHR"'],
+ },
+ ]
+
+ policy_output_keywords = {"view_full": "view_full_output", "view_redacted": "view_redacted_output"}
+ policy_redaction_spec = {"pattern": r"\$\d{1,}(,\d{1,})*"}
+ config = PluginConfig(
+ name="test",
+ kind="cedarpolicyplugin.CedarPolicyPlugin",
+ hooks=["tool_pre_invoke"],
+ config={"policy_lang": "cedar", "policy": policy_config, "policy_output_keywords": policy_output_keywords, "policy_redaction_spec": policy_redaction_spec},
+ )
+ plugin = CedarPolicyPlugin(config)
+ info = {"alice": "employee", "bob": "manager", "carol": "hr", "robert": "admin"}
+ plugin._set_jwt_info(info)
+ requests = [
+ {"user": "alice", "action": "get_leave_balance", "resource": "askHR"},
+ {"user": "bob", "action": "view_performance", "resource": "payroll_tool"},
+ {"user": "carol", "action": "update_payroll", "resource": "hr_tool"},
+ {"user": "alice", "action": "update_payroll", "resource": "hr_tool"},
+ ]
+
+ allow_count = 0
+ deny_count = 0
+ for req in requests:
+ payload = ToolPreInvokePayload(name=req["action"], args={"arg1": "sample arg"})
+ context = PluginContext(global_context=GlobalContext(request_id="1", server_id=req["resource"], user=req["user"]))
+ result = await plugin.tool_pre_invoke(payload, context)
+ if result.continue_processing:
+ allow_count += 1
+ if not result.continue_processing:
+ deny_count += 1
+
+ assert allow_count == 3
+ assert deny_count == 1
+
+
+# This test case is responsible for verifying cedarplugin functionality for tool pre invoke in custom dsl mode
+@pytest.mark.asyncio
+async def test_cedarpolicyplugin_pre_tool_invoke_custom_dsl_rbac():
+ """Test plugin tool pre invoke."""
+ policy_config = "[role:employee:server/askHR]\nget_leave_balance\nrequest_certificate\n\n[role:employee:agent/employee_agent]\n\
+ get_leave_balance\nrequest_certificate\n\n[role:manager:agent/manager_agent]\nget_leave_balance\napprove_leave\npromote_employee\n\
+ view_performance\nview_full_output\n\n[role:manager:server/payroll_tool]\nget_leave_balance\napprove_leave\npromote_employee\nview_performance\n\
+ view_full_output\n\n[role:hr:server/hr_tool]\nupdate_payroll\nview_performance\nview_full_output\n\n[role:employee:server/payroll_tool]\n\
+ view_redacted_output\n\n[role:employee:agent/manager_agent]\nview_redacted_output\n\n[role:employee:server/askHR]\nview_redacted_output"
+ policy_output_keywords = {"view_full": "view_full_output", "view_redacted": "view_redacted_output"}
+ policy_redaction_spec = {"pattern": r"\$\d{1,}(,\d{1,})*"}
+ config = PluginConfig(
+ name="test",
+ kind="cedarpolicyplugin.CedarPolicyPlugin",
+ hooks=["tool_pre_invoke"],
+ config={"policy_lang": "custom_dsl", "policy": policy_config, "policy_output_keywords": policy_output_keywords, "policy_redaction_spec": policy_redaction_spec},
+ )
+ plugin = CedarPolicyPlugin(config)
+ info = {"alice": "employee", "bob": "manager", "carol": "hr", "robert": "admin"}
+ plugin._set_jwt_info(info)
+ requests = [
+ {"user": "alice", "action": "get_leave_balance", "resource": "askHR"},
+ {"user": "bob", "action": "view_performance", "resource": "payroll_tool"},
+ {"user": "carol", "action": "update_payroll", "resource": "hr_tool"},
+ {"user": "alice", "action": "update_payroll", "resource": "hr_tool"},
+ ]
+
+ allow_count = 0
+ deny_count = 0
+ for req in requests:
+ payload = ToolPreInvokePayload(name=req["action"], args={"arg1": "sample arg"})
+ context = PluginContext(global_context=GlobalContext(request_id="1", server_id=req["resource"], user=req["user"]))
+ result = await plugin.tool_pre_invoke(payload, context)
+ if result.continue_processing:
+ allow_count += 1
+ if not result.continue_processing:
+ deny_count += 1
+
+ assert allow_count == 3
+ assert deny_count == 1
+
+
+# This test case is responsible for verifying cedarplugin functionality for prompt pre fetch in cedar mode
+@pytest.mark.asyncio
+async def test_cedarpolicyplugin_prompt_pre_fetch_rbac():
+ """Test plugin prompt prefetch hook."""
+ policy_config = [
+ {"id": "redact-non-admin-views", "effect": "Permit", "principal": 'Role::"employee"', "action": ['Action::"view_redacted_output"'], "resource": 'Prompt::"judge_prompts"'},
+ {
+ "id": "allow-admin-prompts", # policy for resources
+ "effect": "Permit",
+ "principal": 'Role::"admin"',
+ "action": ['Action::"view_full_output"'],
+ "resource": 'Prompt::"judge_prompts"', # Prompt::
+ },
+ ]
+
+ policy_output_keywords = {"view_full": "view_full_output", "view_redacted": "view_redacted_output"}
+ policy_redaction_spec = {"pattern": "all"}
+ config = PluginConfig(
+ name="test",
+ kind="cedarpolicyplugin.CedarPolicyPlugin",
+ hooks=["tool_pre_invoke"],
+ config={"policy_lang": "cedar", "policy": policy_config, "policy_output_keywords": policy_output_keywords, "policy_redaction_spec": policy_redaction_spec},
+ )
+ plugin = CedarPolicyPlugin(config)
+ info = {"alice": "employee", "bob": "manager", "carol": "hr", "robert": "admin"}
+ plugin._set_jwt_info(info)
+ requests = [
+ {"user": "alice", "resource": "judge_prompts"}, # allow
+ {"user": "robert", "resource": "judge_prompts"}, # allow
+ {"user": "carol", "resource": "judge_prompts"}, # deny
+ ]
+
+ allow_count = 0
+ deny_count = 0
+
+ for req in requests:
+
+ # Prompt pre hook input
+ payload = PromptPrehookPayload(prompt_id=req["resource"], args={"text": "You are curseword"})
+ context = PluginContext(global_context=GlobalContext(request_id="1", server_id="2", user=req["user"]))
+ result = await plugin.prompt_pre_fetch(payload, context)
+ if result.continue_processing:
+ allow_count += 1
+ if not result.continue_processing:
+ deny_count += 1
+
+ assert allow_count == 2
+ assert deny_count == 1
+
+
+# This test case is responsible for verifying cedarplugin functionality for prompt pre fetch in custom dsl mode
+@pytest.mark.asyncio
+async def test_cedarpolicyplugin_prompt_pre_fetch_custom_dsl_rbac():
+ """Test plugin prompt prefetch hook."""
+ policy_config = "[role:employee:prompt/judge_prompts]\nview_redacted_output\n\n[role:admin:prompt/judge_prompts]\nview_full_output"
+
+ policy_output_keywords = {"view_full": "view_full_output", "view_redacted": "view_redacted_output"}
+ policy_redaction_spec = {"pattern": "all"}
+ config = PluginConfig(
+ name="test",
+ kind="cedarpolicyplugin.CedarPolicyPlugin",
+ hooks=["tool_pre_invoke"],
+ config={"policy_lang": "custom_dsl", "policy": policy_config, "policy_output_keywords": policy_output_keywords, "policy_redaction_spec": policy_redaction_spec},
+ )
+ plugin = CedarPolicyPlugin(config)
+ info = {"alice": "employee", "bob": "manager", "carol": "hr", "robert": "admin"}
+ plugin._set_jwt_info(info)
+ requests = [
+ {"user": "alice", "resource": "judge_prompts"}, # allow
+ {"user": "robert", "resource": "judge_prompts"}, # allow
+ {"user": "carol", "resource": "judge_prompts"}, # deny
+ ]
+
+ allow_count = 0
+ deny_count = 0
+
+ for req in requests:
+
+ # Prompt pre hook input
+ payload = PromptPrehookPayload(prompt_id=req["resource"], args={"text": "You are curseword"})
+ context = PluginContext(global_context=GlobalContext(request_id="1", server_id="2", user=req["user"]))
+ result = await plugin.prompt_pre_fetch(payload, context)
+ if result.continue_processing:
+ allow_count += 1
+ if not result.continue_processing:
+ deny_count += 1
+
+ assert allow_count == 2
+ assert deny_count == 1
+
+
+# This test case is responsible for verifying cedarplugin functionality for prompt post fetch in cedar native mode
+@pytest.mark.asyncio
+async def test_cedarpolicyplugin_prompt_post_fetch_cedar_rbac():
+ """Test plugin prompt postfetch hook."""
+ policy_config = [
+ {"id": "redact-non-admin-views", "effect": "Permit", "principal": 'Role::"employee"', "action": ['Action::"view_redacted_output"'], "resource": 'Prompt::"judge_prompts"'},
+ {
+ "id": "allow-admin-prompts", # policy for resources
+ "effect": "Permit",
+ "principal": 'Role::"admin"',
+ "action": ['Action::"view_full_output"'],
+ "resource": 'Prompt::"judge_prompts"', # Prompt::
+ },
+ ]
+
+ policy_output_keywords = {"view_full": "view_full_output", "view_redacted": "view_redacted_output"}
+ policy_redaction_spec = {"pattern": "all"}
+ config = PluginConfig(
+ name="test",
+ kind="cedarpolicyplugin.CedarPolicyPlugin",
+ hooks=["tool_pre_invoke"],
+ config={"policy_lang": "cedar", "policy": policy_config, "policy_output_keywords": policy_output_keywords, "policy_redaction_spec": policy_redaction_spec},
+ )
+ plugin = CedarPolicyPlugin(config)
+ info = {"alice": "employee", "bob": "manager", "carol": "hr", "robert": "admin"}
+ plugin._set_jwt_info(info)
+ requests = [
+ {"user": "alice", "resource": "judge_prompts"}, # allow
+ {"user": "robert", "resource": "judge_prompts"}, # allow
+ {"user": "carol", "resource": "judge_prompts"}, # deny
+ ]
+
+ allow_count = 0
+ deny_count = 0
+ redact_count = 0
+
+ for req in requests:
+
+ # Prompt post hook output
+ message = Message(content=TextContent(type="text", text="abc"), role=Role.USER)
+ prompt_result = PromptResult(messages=[message])
+ payload = PromptPosthookPayload(prompt_id=req["resource"], result=prompt_result)
+ context = PluginContext(global_context=GlobalContext(request_id="1", server_id="2", user=req["user"]))
+ result = await plugin.prompt_post_fetch(payload, context)
+ if result.continue_processing:
+ allow_count += 1
+ if result.modified_payload and "[REDACTED]" in result.modified_payload.result.messages[0].content.text:
+ redact_count += 1
+ if not result.continue_processing:
+ deny_count += 1
+
+ assert allow_count == 2
+ assert deny_count == 1
+ assert redact_count == 1
+
+
+# This test case is responsible for verifying cedarplugin functionality for prompt post fetch in custom dsl mode
+@pytest.mark.asyncio
+async def test_cedarpolicyplugin_prompt_post_fetch_custom_dsl_rbac():
+ """Test plugin prompt postfetch hook."""
+ policy_config = "[role:employee:prompt/judge_prompts]\nview_redacted_output\n\n[role:admin:prompt/judge_prompts]\nview_full_output"
+
+ policy_output_keywords = {"view_full": "view_full_output", "view_redacted": "view_redacted_output"}
+ policy_redaction_spec = {"pattern": "all"}
+ config = PluginConfig(
+ name="test",
+ kind="cedarpolicyplugin.CedarPolicyPlugin",
+ hooks=["tool_pre_invoke"],
+ config={"policy_lang": "custom_dsl", "policy": policy_config, "policy_output_keywords": policy_output_keywords, "policy_redaction_spec": policy_redaction_spec},
+ )
+ plugin = CedarPolicyPlugin(config)
+ info = {"alice": "employee", "bob": "manager", "carol": "hr", "robert": "admin"}
+ plugin._set_jwt_info(info)
+ requests = [
+ {"user": "alice", "resource": "judge_prompts"}, # allow
+ {"user": "robert", "resource": "judge_prompts"}, # allow
+ {"user": "carol", "resource": "judge_prompts"}, # deny
+ ]
+
+ allow_count = 0
+ deny_count = 0
+ redact_count = 0
+
+ for req in requests:
+
+ # Prompt post hook output
+ message = Message(content=TextContent(type="text", text="abc"), role=Role.USER)
+ prompt_result = PromptResult(messages=[message])
+ payload = PromptPosthookPayload(prompt_id=req["resource"], result=prompt_result)
+ context = PluginContext(global_context=GlobalContext(request_id="1", server_id="2", user=req["user"]))
+ result = await plugin.prompt_post_fetch(payload, context)
+ if result.continue_processing:
+ allow_count += 1
+ if result.modified_payload and "[REDACTED]" in result.modified_payload.result.messages[0].content.text:
+ redact_count += 1
+ if not result.continue_processing:
+ deny_count += 1
+
+ assert allow_count == 2
+ assert deny_count == 1
+ assert redact_count == 1
+
+
+# This test case is responsible for verifying cedarplugin functionality for resource pre fetch in cedar native mode
+@pytest.mark.asyncio
+async def test_cedarpolicyplugin_resource_pre_fetch_cedar_rbac():
+ """Test plugin resource prefetch hook."""
+ policy_config = [
+ {
+ "id": "redact-non-admin-resource-views",
+ "effect": "Permit",
+ "principal": 'Role::"employee"',
+ "action": ['Action::"view_redacted_output"'],
+ "resource": 'Resource::"https://example.com/data"',
+ },
+ {
+ "id": "allow-admin-resources", # policy for resources
+ "effect": "Permit",
+ "principal": 'Role::"admin"',
+ "action": ['Action::"view_full_output"'],
+ "resource": 'Resource::"https://example.com/data"',
+ },
+ ]
+
+ policy_output_keywords = {"view_full": "view_full_output", "view_redacted": "view_redacted_output"}
+ policy_redaction_spec = {"pattern": "[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,}"}
+ config = PluginConfig(
+ name="test",
+ kind="cedarpolicyplugin.CedarPolicyPlugin",
+ hooks=["tool_pre_invoke"],
+ config={"policy_lang": "cedar", "policy": policy_config, "policy_output_keywords": policy_output_keywords, "policy_redaction_spec": policy_redaction_spec},
+ )
+ plugin = CedarPolicyPlugin(config)
+ info = {"alice": "employee", "bob": "manager", "carol": "hr", "robert": "admin"}
+ plugin._set_jwt_info(info)
+ requests = [
+ {"user": "alice", "resource": "https://example.com/data"}, # allow
+ {"user": "robert", "resource": "https://example.com/data"}, # allow
+ {"user": "carol", "resource": "https://example.com/data"}, # deny
+ ]
+
+ allow_count = 0
+ deny_count = 0
+
+ for req in requests:
+
+ # Prompt post hook output
+ payload = ResourcePreFetchPayload(uri="https://example.com/data", metadata={})
+ context = PluginContext(global_context=GlobalContext(request_id="1", server_id="2", user=req["user"]))
+ result = await plugin.resource_pre_fetch(payload, context)
+ if result.continue_processing:
+ allow_count += 1
+ if not result.continue_processing:
+ deny_count += 1
+
+ assert allow_count == 2
+ assert deny_count == 1
+
+
+# This test case is responsible for verifying cedarplugin functionality for resource pre fetch in custom dsl mode
+@pytest.mark.asyncio
+async def test_cedarpolicyplugin_resource_pre_fetch_custom_dsl_rbac():
+ """Test plugin resource prefetch hook."""
+ policy_config = "[role:employee:resource/https://example.com/data]\nview_redacted_output\n\n[role:admin:resource/https://example.com/data]\nview_full_output"
+
+ policy_output_keywords = {"view_full": "view_full_output", "view_redacted": "view_redacted_output"}
+ policy_redaction_spec = {"pattern": "[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,}"}
+ config = PluginConfig(
+ name="test",
+ kind="cedarpolicyplugin.CedarPolicyPlugin",
+ hooks=["tool_pre_invoke"],
+ config={"policy_lang": "custom_dsl", "policy": policy_config, "policy_output_keywords": policy_output_keywords, "policy_redaction_spec": policy_redaction_spec},
+ )
+ plugin = CedarPolicyPlugin(config)
+ info = {"alice": "employee", "bob": "manager", "carol": "hr", "robert": "admin"}
+ plugin._set_jwt_info(info)
+ requests = [
+ {"user": "alice", "resource": "https://example.com/data"}, # allow
+ {"user": "robert", "resource": "https://example.com/data"}, # allow
+ {"user": "carol", "resource": "https://example.com/data"}, # deny
+ ]
+
+ allow_count = 0
+ deny_count = 0
+
+ for req in requests:
+
+ # Prompt post hook output
+ payload = ResourcePreFetchPayload(uri="https://example.com/data", metadata={})
+ context = PluginContext(global_context=GlobalContext(request_id="1", server_id="2", user=req["user"]))
+ result = await plugin.resource_pre_fetch(payload, context)
+ if result.continue_processing:
+ allow_count += 1
+ if not result.continue_processing:
+ deny_count += 1
+
+ assert allow_count == 2
+ assert deny_count == 1
+
+
+# This test case is responsible for verifying cedarplugin functionality for resource post fetch in cedar native mode
+@pytest.mark.asyncio
+async def test_cedarpolicyplugin_resource_post_fetch_cedar_rbac():
+ """Test plugin resource post fetch."""
+ policy_config = [
+ {
+ "id": "redact-non-admin-resource-views",
+ "effect": "Permit",
+ "principal": 'Role::"employee"',
+ "action": ['Action::"view_redacted_output"'],
+ "resource": 'Resource::"https://example.com/data"',
+ },
+ {
+ "id": "allow-admin-resources", # policy for resources
+ "effect": "Permit",
+ "principal": 'Role::"admin"',
+ "action": ['Action::"view_full_output"'],
+ "resource": 'Resource::"https://example.com/data"',
+ },
+ ]
+
+ policy_output_keywords = {"view_full": "view_full_output", "view_redacted": "view_redacted_output"}
+ policy_redaction_spec = {"pattern": "[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,}"}
+ config = PluginConfig(
+ name="test",
+ kind="cedarpolicyplugin.CedarPolicyPlugin",
+ hooks=["tool_pre_invoke"],
+ config={"policy_lang": "cedar", "policy": policy_config, "policy_output_keywords": policy_output_keywords, "policy_redaction_spec": policy_redaction_spec},
+ )
+ plugin = CedarPolicyPlugin(config)
+ info = {"alice": "employee", "bob": "manager", "carol": "hr", "robert": "admin"}
+ plugin._set_jwt_info(info)
+ requests = [
+ {"user": "alice", "resource": "https://example.com/data"}, # allow
+ {"user": "robert", "resource": "https://example.com/data"}, # allow
+ {"user": "carol", "resource": "https://example.com/data"}, # deny
+ ]
+
+ allow_count = 0
+ deny_count = 0
+ redact_count = 0
+
+ for req in requests:
+
+ # Prompt post hook output
+ content = ResourceContent(type="resource", uri="test://large", text="test://abc@example.com", id="1")
+ payload = ResourcePostFetchPayload(uri="https://example.com/data", content=content)
+ context = PluginContext(global_context=GlobalContext(request_id="1", server_id="2", user=req["user"]))
+ result = await plugin.resource_post_fetch(payload, context)
+ if result.continue_processing:
+ allow_count += 1
+ if result.modified_payload and "[REDACTED]" in result.modified_payload.content.text:
+ redact_count += 1
+ if not result.continue_processing:
+ deny_count += 1
+
+ assert allow_count == 2
+ assert deny_count == 1
+ assert redact_count == 1
+
+
+# This test case is responsible for verifying cedarplugin functionality for resource post fetch in custom dsl mode
+@pytest.mark.asyncio
+async def test_cedarpolicyplugin_resource_post_fetch_custom_dsl_rbac():
+ """Test plugin resource postfetch hook."""
+ policy_config = "[role:employee:resource/https://example.com/data]\nview_redacted_output\n\n[role:admin:resource/https://example.com/data]\nview_full_output"
+ policy_output_keywords = {"view_full": "view_full_output", "view_redacted": "view_redacted_output"}
+ policy_redaction_spec = {"pattern": "[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,}"}
+ config = PluginConfig(
+ name="test",
+ kind="cedarpolicyplugin.CedarPolicyPlugin",
+ hooks=["tool_pre_invoke"],
+ config={"policy_lang": "custom_dsl", "policy": policy_config, "policy_output_keywords": policy_output_keywords, "policy_redaction_spec": policy_redaction_spec},
+ )
+ plugin = CedarPolicyPlugin(config)
+ info = {"alice": "employee", "bob": "manager", "carol": "hr", "robert": "admin"}
+ plugin._set_jwt_info(info)
+ requests = [
+ {"user": "alice", "resource": "https://example.com/data"}, # allow
+ {"user": "robert", "resource": "https://example.com/data"}, # allow
+ {"user": "carol", "resource": "https://example.com/data"}, # deny
+ ]
+
+ allow_count = 0
+ deny_count = 0
+ redact_count = 0
+
+ for req in requests:
+
+ # Prompt post hook output
+ content = ResourceContent(type="resource", uri="test://large", text="test://abc@example.com", id="1")
+ payload = ResourcePostFetchPayload(uri="https://example.com/data", content=content)
+ context = PluginContext(global_context=GlobalContext(request_id="1", server_id="2", user=req["user"]))
+ result = await plugin.resource_post_fetch(payload, context)
+ if result.continue_processing:
+ allow_count += 1
+ if result.modified_payload and "[REDACTED]" in result.modified_payload.content.text:
+ redact_count += 1
+ if not result.continue_processing:
+ deny_count += 1
+
+ assert allow_count == 2
+ assert deny_count == 1
+ assert redact_count == 1
diff --git a/plugins/vault/README.md b/plugins/vault/README.md
index 7f61bb01b..15f3440a2 100644
--- a/plugins/vault/README.md
+++ b/plugins/vault/README.md
@@ -222,4 +222,3 @@ curl -s -X POST -H "Authorization: Bearer $MCPGATEWAY_BEARER_TOKEN" \
}' \
http://localhost:4444/tools/invoke
```
-
diff --git a/pyproject.toml b/pyproject.toml
index c8afa56f6..aa8c35355 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -282,6 +282,7 @@ Changelog = "https://github.com/IBM/mcp-context-forge/blob/main/CHANGELOG.md"
[project.scripts]
mcpgateway = "mcpgateway.cli:main"
mcpplugins = "mcpgateway.plugins.tools.cli:main"
+cforge = "mcpgateway.tools.cli:main"
# --------------------------------------------------------------------
# 🔧 setuptools-specific configuration
@@ -300,6 +301,9 @@ exclude = ["tests*"]
# - templates -> Jinja2 templates shipped at runtime
[tool.setuptools.package-data]
mcpgateway = [
+ "tools/builder/templates/*.yaml.j2",
+ "tools/builder/templates/compose/*.yaml.j2",
+ "tools/builder/templates/kubernetes/*.yaml.j2",
"py.typed",
"static/*.css",
"static/*.js",
@@ -674,7 +678,11 @@ omit = [
"*/test_*.py",
"*/__init__.py",
"*/alembic/*",
- "*/version.py"
+ "*/version.py",
+ # Builder deployment files - require external tools (docker, kubectl, templates)
+ "mcpgateway/tools/builder/common.py",
+ "mcpgateway/tools/builder/dagger_deploy.py",
+ "mcpgateway/tools/builder/python_deploy.py"
]
# --------------------------------------------------------------------
diff --git a/run-gunicorn.sh b/run-gunicorn.sh
index 61addbbb1..e20e3ea3b 100755
--- a/run-gunicorn.sh
+++ b/run-gunicorn.sh
@@ -278,6 +278,13 @@ echo " Developer Mode: ${GUNICORN_DEV_MODE}"
SSL=${SSL:-false} # Enable/disable SSL (default: false)
CERT_FILE=${CERT_FILE:-certs/cert.pem} # Path to SSL certificate file
KEY_FILE=${KEY_FILE:-certs/key.pem} # Path to SSL private key file
+KEY_FILE_PASSWORD=${KEY_FILE_PASSWORD:-} # Optional passphrase for encrypted key
+CERT_PASSPHRASE=${CERT_PASSPHRASE:-} # Alternative name for passphrase
+
+# Use CERT_PASSPHRASE if KEY_FILE_PASSWORD is not set (for compatibility)
+if [[ -z "${KEY_FILE_PASSWORD}" && -n "${CERT_PASSPHRASE}" ]]; then
+ KEY_FILE_PASSWORD="${CERT_PASSPHRASE}"
+fi
# Verify SSL settings if enabled
if [[ "${SSL}" == "true" ]]; then
@@ -305,9 +312,22 @@ if [[ "${SSL}" == "true" ]]; then
exit 1
fi
+ # Check if passphrase is provided
+ if [[ -n "${KEY_FILE_PASSWORD}" ]]; then
+ echo "🔑 Passphrase-protected key detected"
+ echo " Note: Key will be decrypted by Python SSL key manager"
+ # Export for Python to access
+ export SSL_KEY_PASSWORD="${KEY_FILE_PASSWORD}"
+ fi
+
echo "✓ TLS enabled - using:"
echo " Certificate: ${CERT_FILE}"
echo " Private Key: ${KEY_FILE}"
+ if [[ -n "${KEY_FILE_PASSWORD}" ]]; then
+ echo " Passphrase: ******** (protected)"
+ else
+ echo " Passphrase: (none)"
+ fi
else
echo "🔓 Running without TLS (HTTP only)"
fi
@@ -381,6 +401,7 @@ fi
# Add SSL arguments if enabled
if [[ "${SSL}" == "true" ]]; then
cmd+=( --certfile "${CERT_FILE}" --keyfile "${KEY_FILE}" )
+ # If passphrase is set, it will be available to Python via SSL_KEY_PASSWORD env var
fi
# Add the application module
diff --git a/tests/conftest.py b/tests/conftest.py
index 5c813749f..69b3a0e31 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -121,8 +121,20 @@ def app():
import mcpgateway.main as main_mod
mp.setattr(main_mod, "SessionLocal", TestSessionLocal, raising=False)
- # (patch engine too if your code references it)
- mp.setattr(main_mod, "engine", engine, raising=False)
+
+ # Also patch security_logger and auth_middleware's SessionLocal
+ # First-Party
+ import mcpgateway.middleware.auth_middleware as auth_middleware_mod
+ import mcpgateway.services.security_logger as sec_logger_mod
+ import mcpgateway.services.structured_logger as struct_logger_mod
+ import mcpgateway.services.audit_trail_service as audit_trail_mod
+ import mcpgateway.services.log_aggregator as log_aggregator_mod
+
+ mp.setattr(auth_middleware_mod, "SessionLocal", TestSessionLocal, raising=False)
+ mp.setattr(sec_logger_mod, "SessionLocal", TestSessionLocal, raising=False)
+ mp.setattr(struct_logger_mod, "SessionLocal", TestSessionLocal, raising=False)
+ mp.setattr(audit_trail_mod, "SessionLocal", TestSessionLocal, raising=False)
+ mp.setattr(log_aggregator_mod, "SessionLocal", TestSessionLocal, raising=False)
# 4) create schema
db_mod.Base.metadata.create_all(bind=engine)
@@ -186,8 +198,20 @@ def app_with_temp_db():
import mcpgateway.main as main_mod
mp.setattr(main_mod, "SessionLocal", TestSessionLocal, raising=False)
- # (patch engine too if your code references it)
- mp.setattr(main_mod, "engine", engine, raising=False)
+
+ # Also patch security_logger and auth_middleware's SessionLocal
+ # First-Party
+ import mcpgateway.middleware.auth_middleware as auth_middleware_mod
+ import mcpgateway.services.security_logger as sec_logger_mod
+ import mcpgateway.services.structured_logger as struct_logger_mod
+ import mcpgateway.services.audit_trail_service as audit_trail_mod
+ import mcpgateway.services.log_aggregator as log_aggregator_mod
+
+ mp.setattr(auth_middleware_mod, "SessionLocal", TestSessionLocal, raising=False)
+ mp.setattr(sec_logger_mod, "SessionLocal", TestSessionLocal, raising=False)
+ mp.setattr(struct_logger_mod, "SessionLocal", TestSessionLocal, raising=False)
+ mp.setattr(audit_trail_mod, "SessionLocal", TestSessionLocal, raising=False)
+ mp.setattr(log_aggregator_mod, "SessionLocal", TestSessionLocal, raising=False)
# 4) create schema
db_mod.Base.metadata.create_all(bind=engine)
diff --git a/tests/e2e/test_main_apis.py b/tests/e2e/test_main_apis.py
index bb8c6e29c..ab22b1126 100644
--- a/tests/e2e/test_main_apis.py
+++ b/tests/e2e/test_main_apis.py
@@ -218,9 +218,18 @@ def mock_get_permission_service(*args, **kwargs):
app.dependency_overrides[get_permission_service] = mock_get_permission_service
app.dependency_overrides[get_db] = override_get_db
+ # Mock security_logger to prevent database access issues
+ mock_sec_logger = MagicMock()
+ mock_sec_logger.log_authentication_attempt = MagicMock(return_value=None)
+ mock_sec_logger.log_security_event = MagicMock(return_value=None)
+ # Patch at the middleware level where security_logger is used
+ sec_patcher = patch("mcpgateway.middleware.auth_middleware.security_logger", mock_sec_logger)
+ sec_patcher.start()
+
yield engine
# Cleanup
+ sec_patcher.stop()
app.dependency_overrides.clear()
os.close(db_fd)
os.unlink(db_path)
diff --git a/tests/fuzz/conftest.py b/tests/fuzz/conftest.py
index 6b9326b4b..a92cd87ca 100644
--- a/tests/fuzz/conftest.py
+++ b/tests/fuzz/conftest.py
@@ -7,13 +7,68 @@
Fuzzing test configuration.
"""
+# Standard
+import os
+import tempfile
+
# Third-Party
+from _pytest.monkeypatch import MonkeyPatch
from hypothesis import HealthCheck, settings, Verbosity
import pytest
+from sqlalchemy import create_engine
+from sqlalchemy.orm import sessionmaker
+from sqlalchemy.pool import StaticPool
# Mark all tests in this directory as fuzz tests
pytestmark = pytest.mark.fuzz
+
+@pytest.fixture(autouse=True)
+def mock_logging_services(monkeypatch):
+ """Mock logging services to prevent database access during fuzz tests.
+
+ This fixture patches SessionLocal in the db module and all modules that
+ import it, ensuring they use a test database with all tables created.
+ """
+ # Create a temp database for the fuzz tests
+ fd, path = tempfile.mkstemp(suffix=".db")
+ url = f"sqlite:///{path}"
+
+ engine = create_engine(url, connect_args={"check_same_thread": False}, poolclass=StaticPool)
+ TestSessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
+
+ # First-Party
+ import mcpgateway.db as db_mod
+ from mcpgateway.db import Base
+ import mcpgateway.main as main_mod
+ import mcpgateway.middleware.auth_middleware as auth_middleware_mod
+ import mcpgateway.services.security_logger as sec_logger_mod
+ import mcpgateway.services.structured_logger as struct_logger_mod
+
+ # Patch the core db module
+ monkeypatch.setattr(db_mod, "engine", engine)
+ monkeypatch.setattr(db_mod, "SessionLocal", TestSessionLocal)
+
+ # Patch main module's SessionLocal (it imports SessionLocal from db)
+ monkeypatch.setattr(main_mod, "SessionLocal", TestSessionLocal)
+
+ # Patch auth_middleware's SessionLocal
+ monkeypatch.setattr(auth_middleware_mod, "SessionLocal", TestSessionLocal)
+
+ # Patch security_logger and structured_logger SessionLocal
+ monkeypatch.setattr(sec_logger_mod, "SessionLocal", TestSessionLocal)
+ monkeypatch.setattr(struct_logger_mod, "SessionLocal", TestSessionLocal)
+
+ # Create all tables
+ Base.metadata.create_all(bind=engine)
+
+ yield
+
+ # Cleanup
+ engine.dispose()
+ os.close(fd)
+ os.unlink(path)
+
# Configure Hypothesis profiles for different environments
settings.register_profile("dev", max_examples=100, verbosity=Verbosity.normal, suppress_health_check=[HealthCheck.too_slow])
diff --git a/tests/fuzz/test_schema_validation_fuzz.py b/tests/fuzz/test_schema_validation_fuzz.py
index fcd6ea479..4c75f2a62 100644
--- a/tests/fuzz/test_schema_validation_fuzz.py
+++ b/tests/fuzz/test_schema_validation_fuzz.py
@@ -135,9 +135,9 @@ def test_tool_create_tags_field(self, tags):
"""Test tags field with various lists."""
try:
tool = ToolCreate(name="test", url="http://example.com", tags=tags)
- # If validation succeeds, tags should be list of strings
+ # If validation succeeds, tags should be list of dicts with id/label keys
assert isinstance(tool.tags, list)
- assert all(isinstance(tag, str) for tag in tool.tags)
+ assert all(isinstance(tag, dict) and "id" in tag and "label" in tag for tag in tool.tags)
except ValidationError:
# Expected for invalid tag structures
pass
diff --git a/tests/fuzz/test_security_fuzz.py b/tests/fuzz/test_security_fuzz.py
index 7da56e4c5..b4494d5d9 100644
--- a/tests/fuzz/test_security_fuzz.py
+++ b/tests/fuzz/test_security_fuzz.py
@@ -99,7 +99,7 @@ def test_integer_overflow_handling(self, large_int):
response = client.post("/admin/tools", json=payload, headers={"Authorization": "Basic YWRtaW46Y2hhbmdlbWU="})
- assert response.status_code in [200, 201, 400, 422]
+ assert response.status_code in [200, 201, 400, 401, 422]
def test_path_traversal_resistance(self):
"""Test resistance to path traversal attacks."""
@@ -330,4 +330,4 @@ def test_rate_limiting_behavior(self):
# Should either accept all or start rate limiting
# Rate limiting typically returns 429
for status in responses:
- assert status in [200, 201, 400, 422, 429, 409]
+ assert status in [200, 201, 400, 401, 422, 429, 409]
diff --git a/tests/security/test_rpc_endpoint_validation.py b/tests/security/test_rpc_endpoint_validation.py
index 2ec390eee..71af40285 100644
--- a/tests/security/test_rpc_endpoint_validation.py
+++ b/tests/security/test_rpc_endpoint_validation.py
@@ -14,6 +14,7 @@
# Standard
import logging
+from unittest.mock import MagicMock, patch
# Third-Party
from fastapi.testclient import TestClient
@@ -37,9 +38,14 @@ class TestRPCEndpointValidation:
"""
@pytest.fixture
- def client(self):
- """Create a test client for the FastAPI app."""
- return TestClient(app)
+ def client(self, app):
+ """Create a test client for the FastAPI app with mocked security_logger."""
+ # Mock security_logger to prevent database access
+ mock_sec_logger = MagicMock()
+ mock_sec_logger.log_authentication_attempt = MagicMock(return_value=None)
+ mock_sec_logger.log_security_event = MagicMock(return_value=None)
+ with patch("mcpgateway.middleware.auth_middleware.security_logger", mock_sec_logger):
+ yield TestClient(app)
@pytest.fixture
def auth_headers(self):
@@ -269,8 +275,14 @@ class TestRPCValidationBypass:
"""Test various techniques to bypass RPC validation."""
@pytest.fixture
- def client(self):
- return TestClient(app)
+ def client(self, app):
+ """Create a test client for the FastAPI app with mocked security_logger."""
+ # Mock security_logger to prevent database access
+ mock_sec_logger = MagicMock()
+ mock_sec_logger.log_authentication_attempt = MagicMock(return_value=None)
+ mock_sec_logger.log_security_event = MagicMock(return_value=None)
+ with patch("mcpgateway.middleware.auth_middleware.security_logger", mock_sec_logger):
+ yield TestClient(app)
def test_bypass_techniques(self, client):
"""Test various bypass techniques."""
diff --git a/tests/unit/mcpgateway/middleware/test_auth_middleware.py b/tests/unit/mcpgateway/middleware/test_auth_middleware.py
index cf8b85aa3..5882e20af 100644
--- a/tests/unit/mcpgateway/middleware/test_auth_middleware.py
+++ b/tests/unit/mcpgateway/middleware/test_auth_middleware.py
@@ -103,10 +103,18 @@ async def test_authentication_failure(monkeypatch):
request.url.path = "/api/data"
request.cookies = {"jwt_token": "bad_token"}
request.headers = {}
+ # Mock request.client for security_logger
+ request.client = MagicMock()
+ request.client.host = "127.0.0.1"
+
+ # Mock security_logger to prevent database operations
+ mock_security_logger = MagicMock()
+ mock_security_logger.log_authentication_attempt = MagicMock(return_value=None)
with patch("mcpgateway.middleware.auth_middleware.SessionLocal", return_value=MagicMock()) as mock_session, \
patch("mcpgateway.middleware.auth_middleware.get_current_user", AsyncMock(side_effect=Exception("Invalid token"))), \
- patch("mcpgateway.middleware.auth_middleware.logger") as mock_logger:
+ patch("mcpgateway.middleware.auth_middleware.logger") as mock_logger, \
+ patch("mcpgateway.middleware.auth_middleware.security_logger", mock_security_logger):
response = await middleware.dispatch(request, call_next)
call_next.assert_awaited_once_with(request)
diff --git a/tests/unit/mcpgateway/middleware/test_correlation_id.py b/tests/unit/mcpgateway/middleware/test_correlation_id.py
new file mode 100644
index 000000000..029d482fc
--- /dev/null
+++ b/tests/unit/mcpgateway/middleware/test_correlation_id.py
@@ -0,0 +1,230 @@
+# -*- coding: utf-8 -*-
+"""Tests for correlation ID middleware."""
+
+import pytest
+from unittest.mock import Mock, patch
+from fastapi import FastAPI, Request
+from fastapi.testclient import TestClient
+
+from mcpgateway.middleware.correlation_id import CorrelationIDMiddleware
+from mcpgateway.utils.correlation_id import get_correlation_id
+
+
+@pytest.fixture
+def app():
+ """Create a test FastAPI app with correlation ID middleware."""
+ test_app = FastAPI()
+
+ # Add the correlation ID middleware
+ test_app.add_middleware(CorrelationIDMiddleware)
+
+ @test_app.get("/test")
+ async def test_endpoint(request: Request):
+ # Get correlation ID from context
+ correlation_id = get_correlation_id()
+ return {"correlation_id": correlation_id}
+
+ return test_app
+
+
+@pytest.fixture
+def client(app):
+ """Create a test client."""
+ return TestClient(app)
+
+
+def test_middleware_generates_correlation_id_when_not_provided(client):
+ """Test that middleware generates a correlation ID when not provided by client."""
+ response = client.get("/test")
+
+ assert response.status_code == 200
+ data = response.json()
+
+ # Should have a correlation ID in response body
+ assert "correlation_id" in data
+ assert data["correlation_id"] is not None
+ assert len(data["correlation_id"]) == 32 # UUID hex format
+
+ # Should have correlation ID in response headers
+ assert "X-Correlation-ID" in response.headers
+ assert response.headers["X-Correlation-ID"] == data["correlation_id"]
+
+
+def test_middleware_preserves_client_correlation_id(client):
+ """Test that middleware preserves correlation ID from client."""
+ client_id = "client-provided-id-123"
+
+ response = client.get("/test", headers={"X-Correlation-ID": client_id})
+
+ assert response.status_code == 200
+ data = response.json()
+
+ # Should use the client-provided ID
+ assert data["correlation_id"] == client_id
+
+ # Should echo it back in response headers
+ assert response.headers["X-Correlation-ID"] == client_id
+
+
+def test_middleware_case_insensitive_header(client):
+ """Test that middleware handles case-insensitive headers."""
+ client_id = "lowercase-header-id"
+
+ response = client.get("/test", headers={"x-correlation-id": client_id})
+
+ assert response.status_code == 200
+ data = response.json()
+
+ # Should use the client-provided ID regardless of case
+ assert data["correlation_id"] == client_id
+
+
+def test_middleware_strips_whitespace_from_header(client):
+ """Test that middleware strips whitespace from correlation ID header."""
+ client_id = " whitespace-id "
+
+ response = client.get("/test", headers={"X-Correlation-ID": client_id})
+
+ assert response.status_code == 200
+ data = response.json()
+
+ # Should strip whitespace
+ assert data["correlation_id"] == "whitespace-id"
+
+
+def test_middleware_clears_correlation_id_after_request(app):
+ """Test that middleware clears correlation ID after request completes."""
+ client = TestClient(app)
+
+ # Make a request
+ response = client.get("/test")
+ assert response.status_code == 200
+
+ # After request completes, correlation ID should be cleared
+ # (Note: This happens in a different context, so we can't directly test it here,
+ # but we verify that multiple requests get different IDs)
+ response2 = client.get("/test")
+ assert response2.status_code == 200
+
+ # Two requests without client-provided IDs should have different correlation IDs
+ assert response.json()["correlation_id"] != response2.json()["correlation_id"]
+
+
+def test_middleware_handles_empty_header(client):
+ """Test that middleware generates new ID when header is empty."""
+ response = client.get("/test", headers={"X-Correlation-ID": ""})
+
+ assert response.status_code == 200
+ data = response.json()
+
+ # Should generate a new ID when header is empty
+ assert data["correlation_id"] is not None
+ assert len(data["correlation_id"]) == 32
+
+
+def test_middleware_with_custom_settings(monkeypatch):
+ """Test middleware with custom configuration settings."""
+ # Create a mock settings object
+ mock_settings = Mock()
+ mock_settings.correlation_id_header = "X-Request-ID"
+ mock_settings.correlation_id_preserve = False
+ mock_settings.correlation_id_response_header = False
+
+ # Create app with custom settings
+ app = FastAPI()
+
+ # Patch settings at module level
+ with patch("mcpgateway.middleware.correlation_id.settings", mock_settings):
+ app.add_middleware(CorrelationIDMiddleware)
+
+ @app.get("/test")
+ async def test_endpoint():
+ return {"correlation_id": get_correlation_id()}
+
+ client = TestClient(app)
+
+ # Test with custom header name
+ response = client.get("/test", headers={"X-Request-ID": "custom-id"})
+
+ assert response.status_code == 200
+
+ # When preserve=False, should always generate new ID (not use client's)
+ # When response_header=False, should not include in response headers
+ assert "X-Request-ID" not in response.headers
+
+
+def test_middleware_integration_with_multiple_requests(client):
+ """Test middleware properly isolates correlation IDs across multiple requests."""
+ ids = []
+
+ for i in range(5):
+ response = client.get("/test", headers={"X-Correlation-ID": f"request-{i}"})
+ assert response.status_code == 200
+ ids.append(response.json()["correlation_id"])
+
+ # Each request should have its unique correlation ID
+ assert len(ids) == 5
+ assert len(set(ids)) == 5 # All unique
+ for i, correlation_id in enumerate(ids):
+ assert correlation_id == f"request-{i}"
+
+
+def test_middleware_context_isolation():
+ """Test that correlation ID is properly isolated per request context."""
+ app = FastAPI()
+ app.add_middleware(CorrelationIDMiddleware)
+
+ correlation_ids_seen = []
+
+ @app.get("/capture")
+ async def capture_endpoint():
+ # Capture the correlation ID during request handling
+ correlation_id = get_correlation_id()
+ correlation_ids_seen.append(correlation_id)
+ return {"captured": correlation_id}
+
+ client = TestClient(app)
+
+ # Make multiple concurrent-like requests
+ for i in range(3):
+ response = client.get("/capture", headers={"X-Correlation-ID": f"id-{i}"})
+ assert response.status_code == 200
+
+ # Each request should have captured its own unique ID
+ assert len(correlation_ids_seen) == 3
+ assert correlation_ids_seen[0] == "id-0"
+ assert correlation_ids_seen[1] == "id-1"
+ assert correlation_ids_seen[2] == "id-2"
+
+
+def test_middleware_preserves_correlation_id_through_request_lifecycle():
+ """Test that correlation ID remains consistent throughout entire request."""
+ captured_ids = []
+
+ app = FastAPI()
+
+ @app.middleware("http")
+ async def capture_middleware(request: Request, call_next):
+ # Capture ID at middleware level (after CorrelationIDMiddleware sets it)
+ captured_ids.append(("middleware", get_correlation_id()))
+ response = await call_next(request)
+ return response
+
+ # Add CorrelationIDMiddleware last so it executes first (LIFO)
+ app.add_middleware(CorrelationIDMiddleware)
+
+ @app.get("/test")
+ async def test_endpoint():
+ # Capture ID at endpoint level
+ captured_ids.append(("endpoint", get_correlation_id()))
+ return {"ok": True}
+
+ client = TestClient(app)
+ response = client.get("/test", headers={"X-Correlation-ID": "consistent-id"})
+
+ assert response.status_code == 200
+
+ # Both captures should have the same correlation ID
+ assert len(captured_ids) == 2
+ assert captured_ids[0][1] == "consistent-id" # Middleware capture
+ assert captured_ids[1][1] == "consistent-id" # Endpoint capture
diff --git a/tests/unit/mcpgateway/middleware/test_request_logging_middleware.py b/tests/unit/mcpgateway/middleware/test_request_logging_middleware.py
index 30a2a3c26..e905d9716 100644
--- a/tests/unit/mcpgateway/middleware/test_request_logging_middleware.py
+++ b/tests/unit/mcpgateway/middleware/test_request_logging_middleware.py
@@ -7,6 +7,7 @@
"""
import json
import pytest
+from unittest.mock import MagicMock
from fastapi import Request, Response
from starlette.datastructures import Headers
from starlette.types import Scope
@@ -28,7 +29,7 @@ def __init__(self):
def isEnabledFor(self, level):
return self.enabled
- def log(self, level, msg):
+ def log(self, level, msg, extra=None):
self.logged.append((level, msg))
def warning(self, msg):
@@ -40,6 +41,15 @@ def dummy_logger(monkeypatch):
monkeypatch.setattr("mcpgateway.middleware.request_logging_middleware.logger", logger)
return logger
+
+@pytest.fixture
+def mock_structured_logger(monkeypatch):
+ """Mock the structured_logger to prevent database writes."""
+ mock_logger = MagicMock()
+ mock_logger.log = MagicMock()
+ monkeypatch.setattr("mcpgateway.middleware.request_logging_middleware.structured_logger", mock_logger)
+ return mock_logger
+
@pytest.fixture
def dummy_call_next():
async def _call_next(request):
@@ -112,8 +122,8 @@ def test_mask_sensitive_headers_non_sensitive():
# --- RequestLoggingMiddleware tests ---
@pytest.mark.asyncio
-async def test_dispatch_logs_json_body(dummy_logger, dummy_call_next):
- middleware = RequestLoggingMiddleware(app=None)
+async def test_dispatch_logs_json_body(dummy_logger, mock_structured_logger, dummy_call_next):
+ middleware = RequestLoggingMiddleware(app=None, enable_gateway_logging=False, log_detailed_requests=True)
body = json.dumps({"password": "123", "data": "ok"}).encode()
request = make_request(body=body, headers={"Authorization": "Bearer abc"})
response = await middleware.dispatch(request, dummy_call_next)
@@ -122,8 +132,8 @@ async def test_dispatch_logs_json_body(dummy_logger, dummy_call_next):
assert "******" in dummy_logger.logged[0][1]
@pytest.mark.asyncio
-async def test_dispatch_logs_non_json_body(dummy_logger, dummy_call_next):
- middleware = RequestLoggingMiddleware(app=None)
+async def test_dispatch_logs_non_json_body(dummy_logger, mock_structured_logger, dummy_call_next):
+ middleware = RequestLoggingMiddleware(app=None, enable_gateway_logging=False, log_detailed_requests=True)
body = b"token=abc"
request = make_request(body=body)
response = await middleware.dispatch(request, dummy_call_next)
@@ -131,8 +141,8 @@ async def test_dispatch_logs_non_json_body(dummy_logger, dummy_call_next):
assert any("" in msg for _, msg in dummy_logger.logged)
@pytest.mark.asyncio
-async def test_dispatch_large_body_truncated(dummy_logger, dummy_call_next):
- middleware = RequestLoggingMiddleware(app=None, max_body_size=10)
+async def test_dispatch_large_body_truncated(dummy_logger, mock_structured_logger, dummy_call_next):
+ middleware = RequestLoggingMiddleware(app=None, enable_gateway_logging=False, log_detailed_requests=True, max_body_size=10)
body = b"{" + b"a" * 100 + b"}"
request = make_request(body=body)
response = await middleware.dispatch(request, dummy_call_next)
@@ -140,8 +150,8 @@ async def test_dispatch_large_body_truncated(dummy_logger, dummy_call_next):
assert any("[truncated]" in msg for _, msg in dummy_logger.logged)
@pytest.mark.asyncio
-async def test_dispatch_logging_disabled(dummy_logger, dummy_call_next):
- middleware = RequestLoggingMiddleware(app=None, log_requests=False)
+async def test_dispatch_logging_disabled(dummy_logger, mock_structured_logger, dummy_call_next):
+ middleware = RequestLoggingMiddleware(app=None, enable_gateway_logging=False, log_detailed_requests=False)
body = b"{}"
request = make_request(body=body)
response = await middleware.dispatch(request, dummy_call_next)
@@ -149,9 +159,9 @@ async def test_dispatch_logging_disabled(dummy_logger, dummy_call_next):
assert dummy_logger.logged == []
@pytest.mark.asyncio
-async def test_dispatch_logger_disabled(dummy_logger, dummy_call_next):
+async def test_dispatch_logger_disabled(dummy_logger, mock_structured_logger, dummy_call_next):
dummy_logger.enabled = False
- middleware = RequestLoggingMiddleware(app=None)
+ middleware = RequestLoggingMiddleware(app=None, enable_gateway_logging=False, log_detailed_requests=True)
body = b"{}"
request = make_request(body=body)
response = await middleware.dispatch(request, dummy_call_next)
@@ -159,12 +169,12 @@ async def test_dispatch_logger_disabled(dummy_logger, dummy_call_next):
assert dummy_logger.logged == []
@pytest.mark.asyncio
-async def test_dispatch_exception_handling(dummy_logger, dummy_call_next, monkeypatch):
+async def test_dispatch_exception_handling(dummy_logger, mock_structured_logger, dummy_call_next, monkeypatch):
async def bad_body():
raise ValueError("fail")
request = make_request()
monkeypatch.setattr(request, "body", bad_body)
- middleware = RequestLoggingMiddleware(app=None)
+ middleware = RequestLoggingMiddleware(app=None, enable_gateway_logging=False, log_detailed_requests=True)
response = await middleware.dispatch(request, dummy_call_next)
assert response.status_code == 200
assert any("Failed to log request body" in msg for msg in dummy_logger.warnings)
diff --git a/tests/unit/mcpgateway/plugins/framework/external/mcp/server/test_runtime.py b/tests/unit/mcpgateway/plugins/framework/external/mcp/server/test_runtime.py
index 1d675a70f..b7c25724b 100644
--- a/tests/unit/mcpgateway/plugins/framework/external/mcp/server/test_runtime.py
+++ b/tests/unit/mcpgateway/plugins/framework/external/mcp/server/test_runtime.py
@@ -9,6 +9,7 @@
# Standard
import asyncio
+import json
# Third-Party
import pytest
diff --git a/tests/unit/mcpgateway/plugins/framework/external/mcp/server/test_server.py b/tests/unit/mcpgateway/plugins/framework/external/mcp/server/test_server.py
new file mode 100644
index 000000000..0c171d7cc
--- /dev/null
+++ b/tests/unit/mcpgateway/plugins/framework/external/mcp/server/test_server.py
@@ -0,0 +1,421 @@
+# -*- coding: utf-8 -*-
+"""Location: ./tests/unit/mcpgateway/plugins/framework/external/mcp/server/test_server.py
+Copyright 2025
+SPDX-License-Identifier: Apache-2.0
+Authors: Fred Araujo
+
+Comprehensive unit tests for ExternalPluginServer.
+"""
+
+# Standard
+import os
+from unittest.mock import Mock, patch
+
+# Third-Party
+import pytest
+
+# First-Party
+from mcpgateway.common.models import Message, PromptResult, Role, TextContent
+from mcpgateway.plugins.framework import (
+ GlobalContext,
+ PluginContext,
+ PromptHookType,
+ PromptPosthookPayload,
+ PromptPrehookPayload,
+ ToolHookType,
+ ToolPreInvokePayload,
+)
+from mcpgateway.plugins.framework.errors import PluginError
+from mcpgateway.plugins.framework.external.mcp.server.server import ExternalPluginServer
+from mcpgateway.plugins.framework.models import MCPServerConfig, PluginErrorModel
+
+
+@pytest.fixture
+def server_with_plugins():
+ """Create a server with valid plugin configuration."""
+ return ExternalPluginServer(config_path="./tests/unit/mcpgateway/plugins/fixtures/configs/valid_multiple_plugins_filter.yaml")
+
+
+@pytest.fixture
+async def initialized_server(server_with_plugins):
+ """Create and initialize a server."""
+ await server_with_plugins.initialize()
+ yield server_with_plugins
+ await server_with_plugins.shutdown()
+
+
+class TestExternalPluginServerInit:
+ """Tests for ExternalPluginServer initialization."""
+
+ def test_init_with_config_path(self):
+ """Test initialization with explicit config path."""
+ server = ExternalPluginServer(config_path="./tests/unit/mcpgateway/plugins/fixtures/configs/valid_single_plugin.yaml")
+ assert server._config_path == "./tests/unit/mcpgateway/plugins/fixtures/configs/valid_single_plugin.yaml"
+ assert server._config is not None
+ assert server._plugin_manager is not None
+
+ def test_init_with_env_var(self):
+ """Test initialization using PLUGINS_CONFIG_PATH environment variable."""
+ os.environ["PLUGINS_CONFIG_PATH"] = "./tests/unit/mcpgateway/plugins/fixtures/configs/valid_single_plugin.yaml"
+ try:
+ server = ExternalPluginServer()
+ assert server._config_path == "./tests/unit/mcpgateway/plugins/fixtures/configs/valid_single_plugin.yaml"
+ assert server._config is not None
+ finally:
+ if "PLUGINS_CONFIG_PATH" in os.environ:
+ del os.environ["PLUGINS_CONFIG_PATH"]
+
+ def test_init_with_default_path(self):
+ """Test initialization with default config path."""
+ # Temporarily remove env var if it exists
+ env_backup = os.environ.pop("PLUGINS_CONFIG_PATH", None)
+ try:
+ with patch("os.path.join", return_value="./resources/plugins/config.yaml"):
+ with patch("mcpgateway.plugins.framework.loader.config.ConfigLoader.load_config") as mock_load:
+ mock_load.return_value = Mock(plugins=[], server_settings=None)
+ server = ExternalPluginServer()
+ assert "./resources/plugins/config.yaml" in server._config_path
+ finally:
+ if env_backup:
+ os.environ["PLUGINS_CONFIG_PATH"] = env_backup
+
+ def test_init_with_invalid_config(self):
+ """Test initialization with invalid config path uses defaults or raises error."""
+ # ConfigLoader may handle missing files by returning empty config
+ # This test verifies the server can be instantiated (or raises if validation fails)
+ try:
+ server = ExternalPluginServer(config_path="./nonexistent/path/config.yaml")
+ # If it succeeds, just verify server was created
+ assert server is not None
+ except Exception:
+ # If it raises, that's also acceptable behavior
+ pass
+
+
+class TestGetPluginConfigs:
+ """Tests for get_plugin_configs method."""
+
+ @pytest.mark.asyncio
+ async def test_get_plugin_configs_multiple(self, server_with_plugins):
+ """Test getting multiple plugin configurations."""
+ configs = await server_with_plugins.get_plugin_configs()
+ assert isinstance(configs, list)
+ assert len(configs) > 0
+ # Verify each config is a dict with expected keys
+ for config in configs:
+ assert isinstance(config, dict)
+ assert "name" in config
+
+ @pytest.mark.asyncio
+ async def test_get_plugin_configs_single(self):
+ """Test getting plugin configs with single plugin."""
+ server = ExternalPluginServer(config_path="./tests/unit/mcpgateway/plugins/fixtures/configs/valid_single_plugin.yaml")
+ configs = await server.get_plugin_configs()
+ assert len(configs) == 1
+ assert configs[0]["name"] == "ReplaceBadWordsPlugin"
+
+ @pytest.mark.asyncio
+ async def test_get_plugin_configs_empty(self):
+ """Test getting plugin configs when no plugins configured."""
+ server = ExternalPluginServer(config_path="./tests/unit/mcpgateway/plugins/fixtures/configs/valid_single_plugin.yaml")
+ # Mock empty plugins list
+ server._config.plugins = None
+ configs = await server.get_plugin_configs()
+ assert configs == []
+
+
+class TestGetPluginConfig:
+ """Tests for get_plugin_config method."""
+
+ @pytest.mark.asyncio
+ async def test_get_plugin_config_found(self, server_with_plugins):
+ """Test getting a specific plugin config by name."""
+ config = await server_with_plugins.get_plugin_config(name="DenyListPlugin")
+ assert config is not None
+ assert config["name"] == "DenyListPlugin"
+
+ @pytest.mark.asyncio
+ async def test_get_plugin_config_case_insensitive(self, server_with_plugins):
+ """Test that plugin name lookup is case-insensitive."""
+ config = await server_with_plugins.get_plugin_config(name="denylistplugin")
+ assert config is not None
+ assert config["name"] == "DenyListPlugin"
+
+ @pytest.mark.asyncio
+ async def test_get_plugin_config_not_found(self, server_with_plugins):
+ """Test getting a non-existent plugin config returns None."""
+ config = await server_with_plugins.get_plugin_config(name="NonExistentPlugin")
+ assert config is None
+
+ @pytest.mark.asyncio
+ async def test_get_plugin_config_empty_plugins(self):
+ """Test getting plugin config when no plugins configured."""
+ server = ExternalPluginServer(config_path="./tests/unit/mcpgateway/plugins/fixtures/configs/valid_single_plugin.yaml")
+ server._config.plugins = None
+ config = await server.get_plugin_config(name="AnyPlugin")
+ assert config is None
+
+
+class TestInvokeHook:
+ """Tests for invoke_hook method."""
+
+ @pytest.mark.asyncio
+ async def test_invoke_hook_success(self, initialized_server):
+ """Test successful hook invocation."""
+ payload = PromptPrehookPayload(prompt_id="123", name="test_prompt", args={"user": "This is so innovative"})
+ context = PluginContext(global_context=GlobalContext(request_id="1", server_id="2"))
+
+ result = await initialized_server.invoke_hook(PromptHookType.PROMPT_PRE_FETCH, "DenyListPlugin", payload.model_dump(), context.model_dump())
+
+ assert result is not None
+ assert "plugin_name" in result
+ assert result["plugin_name"] == "DenyListPlugin"
+ assert "result" in result
+ assert result["result"]["continue_processing"] is False
+
+ @pytest.mark.asyncio
+ async def test_invoke_hook_with_context_update(self, initialized_server):
+ """Test that hook invocation includes updated context in response."""
+ payload = PromptPrehookPayload(prompt_id="123", name="test_prompt", args={"user": "normal text"})
+ context = PluginContext(global_context=GlobalContext(request_id="1", server_id="2"))
+
+ result = await initialized_server.invoke_hook(PromptHookType.PROMPT_PRE_FETCH, "DenyListPlugin", payload.model_dump(), context.model_dump())
+
+ assert result is not None
+ assert "plugin_name" in result
+ # Context may or may not be included depending on whether it was modified
+
+ @pytest.mark.asyncio
+ async def test_invoke_hook_plugin_error(self, initialized_server):
+ """Test hook invocation when plugin raises PluginError."""
+ with patch("mcpgateway.plugins.framework.manager.PluginManager.invoke_hook_for_plugin") as mock_invoke:
+ # Simulate a PluginError
+ error = PluginErrorModel(message="Test error", plugin_name="TestPlugin", code="TEST_ERROR")
+ mock_invoke.side_effect = PluginError(error=error)
+
+ payload = PromptPrehookPayload(prompt_id="123", args={})
+ context = PluginContext(global_context=GlobalContext(request_id="1", server_id="2"))
+
+ result = await initialized_server.invoke_hook(PromptHookType.PROMPT_PRE_FETCH, "DenyListPlugin", payload.model_dump(), context.model_dump())
+
+ assert result is not None
+ assert "error" in result
+ # error is a PluginErrorModel object, not a dict
+ error_obj = result["error"]
+ assert isinstance(error_obj, PluginErrorModel)
+ assert error_obj.message == "Test error"
+ assert error_obj.plugin_name == "TestPlugin"
+
+ @pytest.mark.asyncio
+ async def test_invoke_hook_generic_exception(self, initialized_server):
+ """Test hook invocation when plugin raises generic exception."""
+ with patch("mcpgateway.plugins.framework.manager.PluginManager.invoke_hook_for_plugin") as mock_invoke:
+ # Simulate a generic exception
+ mock_invoke.side_effect = ValueError("Unexpected error")
+
+ payload = PromptPrehookPayload(prompt_id="123", args={})
+ context = PluginContext(global_context=GlobalContext(request_id="1", server_id="2"))
+
+ result = await initialized_server.invoke_hook(PromptHookType.PROMPT_PRE_FETCH, "DenyListPlugin", payload.model_dump(), context.model_dump())
+
+ assert result is not None
+ assert "error" in result
+ assert "Unexpected error" in result["error"]["message"]
+ assert result["error"]["plugin_name"] == "DenyListPlugin"
+
+ @pytest.mark.asyncio
+ async def test_invoke_hook_invalid_context(self, initialized_server):
+ """Test hook invocation with invalid context data returns error."""
+ payload = PromptPrehookPayload(prompt_id="123", args={})
+ # Invalid context dict
+ invalid_context = {"invalid": "data"}
+
+ # The method catches exceptions and returns them in the result
+ result = await initialized_server.invoke_hook(PromptHookType.PROMPT_PRE_FETCH, "DenyListPlugin", payload.model_dump(), invalid_context)
+
+ # Should return an error result instead of raising
+ assert result is not None
+ assert "error" in result
+
+ @pytest.mark.asyncio
+ async def test_invoke_hook_tool_hooks(self, initialized_server):
+ """Test invoking tool pre/post hooks."""
+ # Test tool pre-invoke
+ payload = ToolPreInvokePayload(name="test_tool", args={"arg": "value"})
+ context = PluginContext(global_context=GlobalContext(request_id="1", server_id="2"))
+
+ result = await initialized_server.invoke_hook(ToolHookType.TOOL_PRE_INVOKE, "ReplaceBadWordsPlugin", payload.model_dump(), context.model_dump())
+
+ assert result is not None
+ assert "plugin_name" in result
+ assert result["plugin_name"] == "ReplaceBadWordsPlugin"
+
+ @pytest.mark.asyncio
+ async def test_invoke_hook_prompt_post_fetch(self, initialized_server):
+ """Test invoking prompt post-fetch hook."""
+ message = Message(content=TextContent(type="text", text="test content"), role=Role.USER)
+ prompt_result = PromptResult(messages=[message])
+ payload = PromptPosthookPayload(prompt_id="123", result=prompt_result)
+ context = PluginContext(global_context=GlobalContext(request_id="1", server_id="2"))
+
+ result = await initialized_server.invoke_hook(PromptHookType.PROMPT_POST_FETCH, "ReplaceBadWordsPlugin", payload.model_dump(), context.model_dump())
+
+ assert result is not None
+ assert "plugin_name" in result
+ assert result["plugin_name"] == "ReplaceBadWordsPlugin"
+
+
+class TestInitializeShutdown:
+ """Tests for initialize and shutdown methods."""
+
+ @pytest.mark.asyncio
+ async def test_initialize_success(self, server_with_plugins):
+ """Test successful initialization."""
+ result = await server_with_plugins.initialize()
+ assert result is True
+ assert server_with_plugins._plugin_manager.initialized is True
+ await server_with_plugins.shutdown()
+
+ @pytest.mark.asyncio
+ async def test_initialize_idempotent(self, server_with_plugins):
+ """Test that multiple initializations are safe."""
+ await server_with_plugins.initialize()
+ await server_with_plugins.initialize()
+ # Should still return True
+ assert server_with_plugins._plugin_manager.initialized is True
+ await server_with_plugins.shutdown()
+
+ @pytest.mark.asyncio
+ async def test_shutdown_when_initialized(self, initialized_server):
+ """Test shutdown on initialized server."""
+ assert initialized_server._plugin_manager.initialized is True
+ await initialized_server.shutdown()
+ assert initialized_server._plugin_manager.initialized is False
+
+ @pytest.mark.asyncio
+ async def test_shutdown_when_not_initialized(self, server_with_plugins):
+ """Test shutdown on non-initialized server (should be safe)."""
+ assert server_with_plugins._plugin_manager.initialized is False
+ # Should not raise an error
+ await server_with_plugins.shutdown()
+ assert server_with_plugins._plugin_manager.initialized is False
+
+ @pytest.mark.asyncio
+ async def test_shutdown_idempotent(self, initialized_server):
+ """Test that multiple shutdowns are safe."""
+ await initialized_server.shutdown()
+ # Second shutdown should be safe
+ await initialized_server.shutdown()
+
+
+class TestGetServerConfig:
+ """Tests for get_server_config method."""
+
+ def test_get_server_config_with_settings(self):
+ """Test getting server config when server_settings is configured."""
+ server = ExternalPluginServer(config_path="./tests/unit/mcpgateway/plugins/fixtures/configs/valid_single_plugin.yaml")
+
+ # Mock server settings
+ expected_config = MCPServerConfig(host="0.0.0.0", port=8080, tls_enabled=False)
+ server._config.server_settings = expected_config
+
+ config = server.get_server_config()
+ assert config == expected_config
+ assert config.host == "0.0.0.0"
+ assert config.port == 8080
+
+ def test_get_server_config_from_env(self):
+ """Test getting server config from environment variables."""
+ server = ExternalPluginServer(config_path="./tests/unit/mcpgateway/plugins/fixtures/configs/valid_single_plugin.yaml")
+ server._config.server_settings = None
+
+ # Set environment variables
+ os.environ["MCP_SERVER_HOST"] = "127.0.0.1"
+ os.environ["MCP_SERVER_PORT"] = "9090"
+
+ try:
+ config = server.get_server_config()
+ assert config is not None
+ # Should have loaded from env or defaults
+ finally:
+ # Cleanup
+ os.environ.pop("MCP_SERVER_HOST", None)
+ os.environ.pop("MCP_SERVER_PORT", None)
+
+ def test_get_server_config_defaults(self):
+ """Test getting server config with defaults."""
+ server = ExternalPluginServer(config_path="./tests/unit/mcpgateway/plugins/fixtures/configs/valid_single_plugin.yaml")
+ server._config.server_settings = None
+
+ config = server.get_server_config()
+ assert config is not None
+ assert isinstance(config, MCPServerConfig)
+
+ def test_get_server_config_with_tls(self, tmp_path):
+ """Test getting server config with TLS enabled."""
+ # First-Party
+ from mcpgateway.plugins.framework.models import MCPServerTLSConfig
+
+ server = ExternalPluginServer(config_path="./tests/unit/mcpgateway/plugins/fixtures/configs/valid_single_plugin.yaml")
+
+ # Create dummy cert files for validation
+ cert_file = tmp_path / "cert.pem"
+ key_file = tmp_path / "key.pem"
+ cert_file.write_text("cert")
+ key_file.write_text("key")
+
+ tls_settings = MCPServerTLSConfig(certfile=str(cert_file), keyfile=str(key_file))
+ tls_config = MCPServerConfig(host="0.0.0.0", port=8443, tls=tls_settings)
+ server._config.server_settings = tls_config
+
+ config = server.get_server_config()
+ assert config.tls is not None
+ assert config.tls.certfile == str(cert_file)
+ assert config.tls.keyfile == str(key_file)
+
+
+class TestEdgeCases:
+ """Tests for edge cases and error conditions."""
+
+ def test_doctest_example(self):
+ """Test the doctest example from __init__."""
+ server = ExternalPluginServer(config_path="./tests/unit/mcpgateway/plugins/fixtures/configs/valid_multiple_plugins_filter.yaml")
+ assert server is not None
+
+ @pytest.mark.asyncio
+ async def test_doctest_get_plugin_configs(self):
+ """Test the doctest example from get_plugin_configs."""
+ server = ExternalPluginServer(config_path="./tests/unit/mcpgateway/plugins/fixtures/configs/valid_multiple_plugins_filter.yaml")
+ plugins = await server.get_plugin_configs()
+ assert len(plugins) > 0
+
+ @pytest.mark.asyncio
+ async def test_doctest_get_plugin_config(self):
+ """Test the doctest example from get_plugin_config."""
+ server = ExternalPluginServer(config_path="./tests/unit/mcpgateway/plugins/fixtures/configs/valid_multiple_plugins_filter.yaml")
+ config = await server.get_plugin_config(name="DenyListPlugin")
+ assert config is not None
+ assert config["name"] == "DenyListPlugin"
+
+ @pytest.mark.asyncio
+ async def test_invoke_hook_with_empty_payload(self, initialized_server):
+ """Test hook invocation with minimal/empty payload."""
+ payload = PromptPrehookPayload(prompt_id="123", args={})
+ context = PluginContext(global_context=GlobalContext(request_id="1", server_id="2"))
+
+ result = await initialized_server.invoke_hook(PromptHookType.PROMPT_PRE_FETCH, "DenyListPlugin", payload.model_dump(), context.model_dump())
+
+ assert result is not None
+ assert "plugin_name" in result
+
+ @pytest.mark.asyncio
+ async def test_invoke_hook_with_complex_payload(self, initialized_server):
+ """Test hook invocation with multiple arguments."""
+ # PromptPrehookPayload args values must be strings
+ payload = PromptPrehookPayload(prompt_id="123", args={"user": "test message", "system": "system prompt", "context": "additional context"})
+ context = PluginContext(global_context=GlobalContext(request_id="1", server_id="2"))
+
+ result = await initialized_server.invoke_hook(PromptHookType.PROMPT_PRE_FETCH, "DenyListPlugin", payload.model_dump(), context.model_dump())
+
+ assert result is not None
+ assert "plugin_name" in result
diff --git a/tests/unit/mcpgateway/plugins/framework/external/mcp/test_tls_utils.py b/tests/unit/mcpgateway/plugins/framework/external/mcp/test_tls_utils.py
new file mode 100644
index 000000000..751045d33
--- /dev/null
+++ b/tests/unit/mcpgateway/plugins/framework/external/mcp/test_tls_utils.py
@@ -0,0 +1,369 @@
+# -*- coding: utf-8 -*-
+"""Location: ./tests/unit/mcpgateway/plugins/framework/external/mcp/test_tls_utils.py
+Copyright 2025
+SPDX-License-Identifier: Apache-2.0
+Authors: Fred Araujo
+
+Additional unit tests for TLS utilities to improve code coverage.
+"""
+
+# Standard
+import ssl
+from unittest.mock import patch
+
+# Third-Party
+import pytest
+
+# First-Party
+from mcpgateway.plugins.framework.errors import PluginError
+from mcpgateway.plugins.framework.external.mcp.tls_utils import create_ssl_context
+from mcpgateway.plugins.framework.models import MCPClientTLSConfig
+
+
+class TestCreateSSLContextBasicConfig:
+ """Tests for basic SSL context configuration."""
+
+ def test_create_ssl_context_minimal_config(self):
+ """Test creating SSL context with minimal configuration."""
+ tls_config = MCPClientTLSConfig(verify=True)
+
+ ssl_context = create_ssl_context(tls_config, "MinimalPlugin")
+
+ assert ssl_context is not None
+ assert ssl_context.verify_mode == ssl.CERT_REQUIRED
+ assert ssl_context.check_hostname is True
+ assert ssl_context.minimum_version == ssl.TLSVersion.TLSv1_2
+
+ def test_create_ssl_context_verify_disabled(self):
+ """Test creating SSL context with verification disabled."""
+ tls_config = MCPClientTLSConfig(verify=False, check_hostname=False)
+
+ ssl_context = create_ssl_context(tls_config, "InsecurePlugin")
+
+ assert ssl_context is not None
+ assert ssl_context.verify_mode == ssl.CERT_NONE
+ assert ssl_context.check_hostname is False
+
+ def test_create_ssl_context_with_ca_bundle(self, tmp_path):
+ """Test creating SSL context with CA bundle."""
+ # Create a temporary CA file
+ ca_file = tmp_path / "ca.pem"
+ ca_file.write_text("-----BEGIN CERTIFICATE-----\ntest\n-----END CERTIFICATE-----")
+
+ tls_config = MCPClientTLSConfig(ca_bundle=str(ca_file), verify=True)
+
+ # Will fail to load the invalid cert but we're testing the path is used
+ with pytest.raises(PluginError):
+ create_ssl_context(tls_config, "TestPlugin")
+
+ def test_create_ssl_context_hostname_check_disabled(self):
+ """Test creating SSL context with hostname checking disabled but verify enabled."""
+ tls_config = MCPClientTLSConfig(verify=True, check_hostname=False)
+
+ ssl_context = create_ssl_context(tls_config, "NoHostnameCheckPlugin")
+
+ assert ssl_context is not None
+ assert ssl_context.verify_mode == ssl.CERT_REQUIRED
+ assert ssl_context.check_hostname is False
+
+
+class TestCreateSSLContextClientCertificates:
+ """Tests for SSL context with client certificates (mTLS)."""
+
+ def test_create_ssl_context_with_client_cert(self, tmp_path):
+ """Test creating SSL context with client certificate."""
+ cert_file = tmp_path / "client.crt"
+ key_file = tmp_path / "client.key"
+ cert_file.write_text("-----BEGIN CERTIFICATE-----\ntest\n-----END CERTIFICATE-----")
+ key_file.write_text("-----BEGIN PRIVATE KEY-----\ntest\n-----END PRIVATE KEY-----")
+
+ tls_config = MCPClientTLSConfig(certfile=str(cert_file), keyfile=str(key_file), verify=False)
+
+ # Will fail to load the invalid cert but we're testing the path is used
+ with pytest.raises(PluginError):
+ create_ssl_context(tls_config, "mTLSPlugin")
+
+ def test_create_ssl_context_with_cert_no_key(self, tmp_path):
+ """Test creating SSL context with cert but no key (should use same file)."""
+ cert_file = tmp_path / "combined.pem"
+ cert_file.write_text("-----BEGIN CERTIFICATE-----\ntest\n-----END CERTIFICATE-----")
+
+ tls_config = MCPClientTLSConfig(certfile=str(cert_file), keyfile=None, verify=False)
+
+ # Will fail to load the invalid cert
+ with pytest.raises(PluginError):
+ create_ssl_context(tls_config, "CombinedPEMPlugin")
+
+ def test_create_ssl_context_with_encrypted_key(self, tmp_path):
+ """Test creating SSL context with encrypted private key."""
+ cert_file = tmp_path / "client.crt"
+ key_file = tmp_path / "client.key"
+ cert_file.write_text("-----BEGIN CERTIFICATE-----\ntest\n-----END CERTIFICATE-----")
+ key_file.write_text("-----BEGIN ENCRYPTED PRIVATE KEY-----\ntest\n-----END ENCRYPTED PRIVATE KEY-----")
+
+ tls_config = MCPClientTLSConfig(certfile=str(cert_file), keyfile=str(key_file), keyfile_password="secret123", verify=False)
+
+ # Will fail to load the invalid cert
+ with pytest.raises(PluginError):
+ create_ssl_context(tls_config, "EncryptedKeyPlugin")
+
+
+class TestCreateSSLContextSecuritySettings:
+ """Tests for SSL context security settings."""
+
+ def test_ssl_context_enforces_tls_1_2_minimum(self):
+ """Test that SSL context enforces TLS 1.2 as minimum version."""
+ tls_config = MCPClientTLSConfig(verify=True)
+
+ ssl_context = create_ssl_context(tls_config, "SecurePlugin")
+
+ assert ssl_context.minimum_version == ssl.TLSVersion.TLSv1_2
+ # Ensure weak protocols are not allowed
+ assert ssl_context.minimum_version > ssl.TLSVersion.TLSv1_1
+
+ def test_ssl_context_uses_default_context_security(self):
+ """Test that ssl.create_default_context() security settings are preserved."""
+ tls_config = MCPClientTLSConfig(verify=True)
+
+ ssl_context = create_ssl_context(tls_config, "DefaultSecurityPlugin")
+
+ # create_default_context() sets secure defaults
+ # Verify CERT_REQUIRED is set (from create_default_context)
+ assert ssl_context.verify_mode == ssl.CERT_REQUIRED
+
+
+class TestCreateSSLContextErrorHandling:
+ """Tests for error handling in create_ssl_context."""
+
+ def test_create_ssl_context_invalid_ca_bundle(self, tmp_path):
+ """Test that invalid CA bundle content raises PluginError."""
+ # Create a file with invalid certificate content
+ ca_file = tmp_path / "invalid_ca.pem"
+ ca_file.write_text("INVALID CERTIFICATE CONTENT")
+
+ tls_config = MCPClientTLSConfig(ca_bundle=str(ca_file), verify=True)
+
+ with pytest.raises(PluginError) as exc_info:
+ create_ssl_context(tls_config, "InvalidCAPlugin")
+
+ assert "InvalidCAPlugin" in str(exc_info.value)
+ assert "Failed to configure SSL context" in str(exc_info.value)
+
+ def test_create_ssl_context_invalid_client_cert(self, tmp_path):
+ """Test that invalid client certificate content raises PluginError."""
+ # Create files with invalid certificate/key content
+ cert_file = tmp_path / "invalid_cert.pem"
+ key_file = tmp_path / "invalid_key.pem"
+ cert_file.write_text("INVALID CERT")
+ key_file.write_text("INVALID KEY")
+
+ tls_config = MCPClientTLSConfig(certfile=str(cert_file), keyfile=str(key_file), verify=False)
+
+ with pytest.raises(PluginError) as exc_info:
+ create_ssl_context(tls_config, "InvalidCertPlugin")
+
+ assert "InvalidCertPlugin" in str(exc_info.value)
+ assert "Failed to configure SSL context" in str(exc_info.value)
+
+ def test_create_ssl_context_exception_includes_plugin_name(self, tmp_path):
+ """Test that PluginError includes the plugin name in error details."""
+ # Create a file with invalid content
+ ca_file = tmp_path / "bad_ca.pem"
+ ca_file.write_text("BAD CONTENT")
+
+ tls_config = MCPClientTLSConfig(ca_bundle=str(ca_file), verify=True)
+
+ with pytest.raises(PluginError) as exc_info:
+ create_ssl_context(tls_config, "MyTestPlugin")
+
+ error = exc_info.value
+ assert error.error.plugin_name == "MyTestPlugin"
+ assert "MyTestPlugin" in error.error.message
+
+ def test_create_ssl_context_generic_exception_handling(self):
+ """Test that any exception during SSL context creation is caught and wrapped."""
+ tls_config = MCPClientTLSConfig(verify=True)
+
+ with patch("ssl.create_default_context") as mock_create:
+ mock_create.side_effect = RuntimeError("SSL initialization failed")
+
+ with pytest.raises(PluginError) as exc_info:
+ create_ssl_context(tls_config, "FailingPlugin")
+
+ assert "Failed to configure SSL context" in str(exc_info.value)
+ assert "FailingPlugin" in str(exc_info.value)
+
+
+class TestCreateSSLContextLogging:
+ """Tests for logging in create_ssl_context."""
+
+ def test_create_ssl_context_logs_verification_disabled(self):
+ """Test that disabling verification logs a warning."""
+ tls_config = MCPClientTLSConfig(verify=False)
+
+ with patch("mcpgateway.plugins.framework.external.mcp.tls_utils.logger") as mock_logger:
+ create_ssl_context(tls_config, "InsecurePlugin")
+
+ # Should log warning about disabled verification
+ assert mock_logger.warning.called
+ warning_calls = [call for call in mock_logger.warning.call_args_list]
+ assert any("verification disabled" in str(call).lower() for call in warning_calls)
+
+ def test_create_ssl_context_logs_hostname_check_disabled(self):
+ """Test that disabling hostname checking logs a warning."""
+ tls_config = MCPClientTLSConfig(verify=True, check_hostname=False)
+
+ with patch("mcpgateway.plugins.framework.external.mcp.tls_utils.logger") as mock_logger:
+ create_ssl_context(tls_config, "NoHostnamePlugin")
+
+ # Should log warning about disabled hostname verification
+ assert mock_logger.warning.called
+ warning_calls = [call for call in mock_logger.warning.call_args_list]
+ assert any("hostname" in str(call).lower() for call in warning_calls)
+
+ def test_create_ssl_context_logs_mtls_enabled(self, tmp_path):
+ """Test that mTLS configuration is logged."""
+ cert_file = tmp_path / "client.crt"
+ key_file = tmp_path / "client.key"
+ # Create minimal valid-looking PEM files
+ cert_file.write_text("-----BEGIN CERTIFICATE-----\ntest\n-----END CERTIFICATE-----")
+ key_file.write_text("-----BEGIN PRIVATE KEY-----\ntest\n-----END PRIVATE KEY-----")
+
+ tls_config = MCPClientTLSConfig(certfile=str(cert_file), keyfile=str(key_file), verify=False)
+
+ with patch("mcpgateway.plugins.framework.external.mcp.tls_utils.logger"):
+ # Will fail but we can check if debug logging was attempted
+ try:
+ create_ssl_context(tls_config, "mTLSPlugin")
+ except PluginError:
+ pass # Expected to fail with invalid cert
+
+ # Should have attempted to log debug message about mTLS
+ # (even though it failed)
+
+ def test_create_ssl_context_logs_debug_info(self):
+ """Test that SSL context configuration is logged at debug level."""
+ tls_config = MCPClientTLSConfig(verify=True)
+
+ with patch("mcpgateway.plugins.framework.external.mcp.tls_utils.logger") as mock_logger:
+ create_ssl_context(tls_config, "DebugPlugin")
+
+ # Should log debug message with context details
+ assert mock_logger.debug.called
+
+ def test_create_ssl_context_logs_error_on_failure(self, tmp_path):
+ """Test that errors are logged."""
+ # Create a file with invalid content
+ ca_file = tmp_path / "bad.pem"
+ ca_file.write_text("INVALID")
+
+ tls_config = MCPClientTLSConfig(ca_bundle=str(ca_file), verify=True)
+
+ with patch("mcpgateway.plugins.framework.external.mcp.tls_utils.logger") as mock_logger:
+ with pytest.raises(PluginError):
+ create_ssl_context(tls_config, "ErrorPlugin")
+
+ # Should log error
+ assert mock_logger.error.called
+
+
+class TestCreateSSLContextIntegration:
+ """Integration tests for create_ssl_context."""
+
+ def test_create_ssl_context_production_like_config(self):
+ """Test creating SSL context with production-like configuration."""
+ tls_config = MCPClientTLSConfig(verify=True, check_hostname=True)
+
+ ssl_context = create_ssl_context(tls_config, "ProductionPlugin")
+
+ # Verify all security features are enabled
+ assert ssl_context.verify_mode == ssl.CERT_REQUIRED
+ assert ssl_context.check_hostname is True
+ assert ssl_context.minimum_version == ssl.TLSVersion.TLSv1_2
+
+ def test_create_ssl_context_development_config(self):
+ """Test creating SSL context with development/testing configuration."""
+ tls_config = MCPClientTLSConfig(verify=False, check_hostname=False)
+
+ ssl_context = create_ssl_context(tls_config, "DevPlugin")
+
+ # Verify security is relaxed
+ assert ssl_context.verify_mode == ssl.CERT_NONE
+ assert ssl_context.check_hostname is False
+
+ def test_create_ssl_context_mixed_security_config(self):
+ """Test creating SSL context with mixed security settings."""
+ # Verify enabled but hostname check disabled
+ tls_config = MCPClientTLSConfig(verify=True, check_hostname=False)
+
+ ssl_context = create_ssl_context(tls_config, "MixedPlugin")
+
+ assert ssl_context.verify_mode == ssl.CERT_REQUIRED
+ assert ssl_context.check_hostname is False
+
+
+class TestCreateSSLContextCompliance:
+ """Tests for SSL context compliance with security standards."""
+
+ def test_ssl_context_meets_tls_requirements(self):
+ """Test that SSL context meets modern TLS requirements."""
+ tls_config = MCPClientTLSConfig(verify=True)
+
+ ssl_context = create_ssl_context(tls_config, "CompliancePlugin")
+
+ # Modern security requirements
+ assert ssl_context.minimum_version >= ssl.TLSVersion.TLSv1_2
+ assert ssl_context.verify_mode in [ssl.CERT_REQUIRED, ssl.CERT_OPTIONAL]
+
+ def test_ssl_context_default_is_secure(self):
+ """Test that default SSL context configuration is secure."""
+ tls_config = MCPClientTLSConfig() # All defaults
+
+ ssl_context = create_ssl_context(tls_config, "DefaultPlugin")
+
+ # Defaults should be secure
+ assert ssl_context.verify_mode == ssl.CERT_REQUIRED
+ assert ssl_context.check_hostname is True
+ assert ssl_context.minimum_version == ssl.TLSVersion.TLSv1_2
+
+
+class TestCreateSSLContextEdgeCases:
+ """Tests for edge cases in create_ssl_context."""
+
+ def test_create_ssl_context_empty_plugin_name(self):
+ """Test creating SSL context with empty plugin name."""
+ tls_config = MCPClientTLSConfig(verify=True)
+
+ ssl_context = create_ssl_context(tls_config, "")
+
+ assert ssl_context is not None
+
+ def test_create_ssl_context_special_chars_in_plugin_name(self):
+ """Test creating SSL context with special characters in plugin name."""
+ tls_config = MCPClientTLSConfig(verify=True)
+
+ ssl_context = create_ssl_context(tls_config, "Plugin-Name_123!@#")
+
+ assert ssl_context is not None
+
+ def test_create_ssl_context_unicode_plugin_name(self):
+ """Test creating SSL context with unicode characters in plugin name."""
+ tls_config = MCPClientTLSConfig(verify=True)
+
+ ssl_context = create_ssl_context(tls_config, "プラグイン")
+
+ assert ssl_context is not None
+
+ def test_create_ssl_context_verify_true_hostname_false(self):
+ """Test the combination of verify=True with check_hostname=False."""
+ tls_config = MCPClientTLSConfig(verify=True, check_hostname=False)
+
+ with patch("mcpgateway.plugins.framework.external.mcp.tls_utils.logger") as mock_logger:
+ ssl_context = create_ssl_context(tls_config, "PartialSecurityPlugin")
+
+ # Should warn about hostname verification being disabled
+ assert mock_logger.warning.called
+ # Should still have CERT_REQUIRED
+ assert ssl_context.verify_mode == ssl.CERT_REQUIRED
+ # But hostname check should be disabled
+ assert ssl_context.check_hostname is False
diff --git a/tests/unit/mcpgateway/services/test_a2a_service.py b/tests/unit/mcpgateway/services/test_a2a_service.py
index 34a2e34b2..0b45fe87c 100644
--- a/tests/unit/mcpgateway/services/test_a2a_service.py
+++ b/tests/unit/mcpgateway/services/test_a2a_service.py
@@ -21,6 +21,21 @@
from mcpgateway.schemas import A2AAgentCreate, A2AAgentUpdate
from mcpgateway.services.a2a_service import A2AAgentError, A2AAgentNameConflictError, A2AAgentNotFoundError, A2AAgentService
+
+@pytest.fixture(autouse=True)
+def mock_logging_services():
+ """Mock structured_logger and audit_trail to prevent database writes during tests."""
+ with patch("mcpgateway.services.a2a_service.structured_logger") as mock_a2a_logger, \
+ patch("mcpgateway.services.tool_service.structured_logger") as mock_tool_logger, \
+ patch("mcpgateway.services.tool_service.audit_trail") as mock_tool_audit:
+ mock_a2a_logger.log = MagicMock(return_value=None)
+ mock_a2a_logger.info = MagicMock(return_value=None)
+ mock_tool_logger.log = MagicMock(return_value=None)
+ mock_tool_logger.info = MagicMock(return_value=None)
+ mock_tool_audit.log_action = MagicMock(return_value=None)
+ yield {"structured_logger": mock_a2a_logger, "tool_logger": mock_tool_logger, "tool_audit": mock_tool_audit}
+
+
class TestA2AAgentService:
"""Test suite for A2A Agent Service."""
diff --git a/tests/unit/mcpgateway/services/test_correlation_id_json_formatter.py b/tests/unit/mcpgateway/services/test_correlation_id_json_formatter.py
new file mode 100644
index 000000000..337e23f27
--- /dev/null
+++ b/tests/unit/mcpgateway/services/test_correlation_id_json_formatter.py
@@ -0,0 +1,307 @@
+# -*- coding: utf-8 -*-
+"""Tests for correlation ID JSON formatter."""
+
+import json
+import logging
+from datetime import datetime, timezone
+from io import StringIO
+from unittest.mock import Mock, patch
+
+import pytest
+
+from mcpgateway.services.logging_service import CorrelationIdJsonFormatter
+from mcpgateway.utils.correlation_id import set_correlation_id, clear_correlation_id
+
+
+@pytest.fixture
+def formatter():
+ """Create a test JSON formatter."""
+ return CorrelationIdJsonFormatter()
+
+
+@pytest.fixture
+def logger_with_formatter(formatter):
+ """Create a test logger with JSON formatter."""
+ logger = logging.getLogger("test_correlation_logger")
+ logger.setLevel(logging.DEBUG)
+ logger.handlers.clear()
+
+ # Add string stream handler
+ stream = StringIO()
+ handler = logging.StreamHandler(stream)
+ handler.setFormatter(formatter)
+ logger.addHandler(handler)
+
+ return logger, stream
+
+
+def test_formatter_includes_correlation_id(logger_with_formatter):
+ """Test that formatter includes correlation ID in log records."""
+ logger, stream = logger_with_formatter
+
+ # Set correlation ID
+ test_id = "test-correlation-123"
+ set_correlation_id(test_id)
+
+ # Log a message
+ logger.info("Test message")
+
+ # Get the logged output
+ output = stream.getvalue()
+ log_record = json.loads(output.strip())
+
+ # Should include correlation ID
+ assert "request_id" in log_record
+ assert log_record["request_id"] == test_id
+
+ clear_correlation_id()
+
+
+def test_formatter_without_correlation_id(logger_with_formatter):
+ """Test formatter when correlation ID is not set."""
+ logger, stream = logger_with_formatter
+
+ # Clear any existing correlation ID
+ clear_correlation_id()
+
+ # Log a message
+ logger.info("Test message without correlation ID")
+
+ # Get the logged output
+ output = stream.getvalue()
+ log_record = json.loads(output.strip())
+
+ # request_id should not be present
+ assert "request_id" not in log_record or log_record.get("request_id") is None
+
+
+def test_formatter_includes_standard_fields(logger_with_formatter):
+ """Test that formatter includes standard log fields."""
+ logger, stream = logger_with_formatter
+
+ # Log a message
+ logger.info("Standard fields test")
+
+ # Get the logged output
+ output = stream.getvalue()
+ log_record = json.loads(output.strip())
+
+ # Check for standard fields
+ assert "message" in log_record
+ assert log_record["message"] == "Standard fields test"
+ assert "@timestamp" in log_record
+ assert "hostname" in log_record
+ assert "process_id" in log_record
+ # Note: levelname is included by the JsonFormatter format string if specified
+
+
+def test_formatter_includes_opentelemetry_trace_context(logger_with_formatter):
+ """Test that formatter includes OpenTelemetry trace context when available."""
+ logger, stream = logger_with_formatter
+
+ # Mock OpenTelemetry span
+ mock_span_context = Mock()
+ mock_span_context.trace_id = 0x1234567890abcdef1234567890abcdef
+ mock_span_context.span_id = 0x1234567890abcdef
+ mock_span_context.trace_flags = 0x01
+ mock_span_context.is_valid = True
+
+ mock_span = Mock()
+ mock_span.is_recording.return_value = True
+ mock_span.get_span_context.return_value = mock_span_context
+
+ with patch("mcpgateway.services.logging_service.trace") as mock_trace:
+ mock_trace.get_current_span.return_value = mock_span
+
+ # Log a message
+ logger.info("Test with trace context")
+
+ # Get the logged output
+ output = stream.getvalue()
+ log_record = json.loads(output.strip())
+
+ # Should include trace context
+ assert "trace_id" in log_record
+ assert "span_id" in log_record
+ assert "trace_flags" in log_record
+
+ # Verify hex formatting
+ assert log_record["trace_id"] == "1234567890abcdef1234567890abcdef"
+ assert log_record["span_id"] == "1234567890abcdef"
+ assert log_record["trace_flags"] == "01"
+
+
+def test_formatter_handles_missing_opentelemetry(logger_with_formatter):
+ """Test that formatter gracefully handles missing OpenTelemetry."""
+ logger, stream = logger_with_formatter
+
+ # Simulate ImportError for opentelemetry
+ import sys
+ with patch.dict(sys.modules, {"opentelemetry.trace": None}):
+ # Log a message
+ logger.info("Test without OpenTelemetry")
+
+ # Get the logged output
+ output = stream.getvalue()
+ log_record = json.loads(output.strip())
+
+ # Should not fail, just exclude trace fields
+ assert "trace_id" not in log_record
+ assert "span_id" not in log_record
+ assert "message" in log_record
+
+
+def test_formatter_timestamp_format(logger_with_formatter):
+ """Test that timestamp is in ISO 8601 format with 'Z' suffix."""
+ logger, stream = logger_with_formatter
+
+ # Log a message
+ logger.info("Timestamp test")
+
+ # Get the logged output
+ output = stream.getvalue()
+ log_record = json.loads(output.strip())
+
+ # Check timestamp format
+ assert "@timestamp" in log_record
+ timestamp = log_record["@timestamp"]
+
+ # Should end with 'Z' (Zulu/UTC time)
+ assert timestamp.endswith("Z")
+
+ # Should be parseable as ISO 8601
+ # Remove 'Z' and parse
+ datetime.fromisoformat(timestamp.replace("Z", "+00:00"))
+
+
+def test_formatter_with_extra_fields(logger_with_formatter):
+ """Test that formatter includes extra fields from log record."""
+ logger, stream = logger_with_formatter
+
+ # Log with extra fields
+ logger.info("Extra fields test", extra={"user_id": "user-123", "action": "login"})
+
+ # Get the logged output
+ output = stream.getvalue()
+ log_record = json.loads(output.strip())
+
+ # Should include extra fields
+ assert log_record.get("user_id") == "user-123"
+ assert log_record.get("action") == "login"
+
+
+def test_formatter_correlation_id_with_trace_context(logger_with_formatter):
+ """Test that both correlation ID and trace context coexist."""
+ logger, stream = logger_with_formatter
+
+ # Set correlation ID
+ set_correlation_id("both-test-id")
+
+ # Mock OpenTelemetry span
+ mock_span_context = Mock()
+ mock_span_context.trace_id = 0xabcdef
+ mock_span_context.span_id = 0x123456
+ mock_span_context.trace_flags = 0x01
+ mock_span_context.is_valid = True
+
+ mock_span = Mock()
+ mock_span.is_recording.return_value = True
+ mock_span.get_span_context.return_value = mock_span_context
+
+ with patch("mcpgateway.services.logging_service.trace") as mock_trace:
+ mock_trace.get_current_span.return_value = mock_span
+
+ # Log a message
+ logger.info("Test with both IDs")
+
+ # Get the logged output
+ output = stream.getvalue()
+ log_record = json.loads(output.strip())
+
+ # Should include both correlation ID and trace context
+ assert log_record.get("request_id") == "both-test-id"
+ assert "trace_id" in log_record
+ assert "span_id" in log_record
+
+ clear_correlation_id()
+
+
+def test_formatter_multiple_log_entries(logger_with_formatter):
+ """Test that formatter handles multiple log entries correctly."""
+ logger, stream = logger_with_formatter
+
+ # Log multiple messages with different correlation IDs
+ set_correlation_id("first-id")
+ logger.info("First message")
+
+ set_correlation_id("second-id")
+ logger.info("Second message")
+
+ clear_correlation_id()
+ logger.info("Third message")
+
+ # Get all logged output
+ output = stream.getvalue()
+ log_lines = output.strip().split("\n")
+
+ assert len(log_lines) == 3
+
+ # Parse each line
+ first_record = json.loads(log_lines[0])
+ second_record = json.loads(log_lines[1])
+ third_record = json.loads(log_lines[2])
+
+ # Verify correlation IDs
+ assert first_record.get("request_id") == "first-id"
+ assert second_record.get("request_id") == "second-id"
+ assert "request_id" not in third_record or third_record.get("request_id") is None
+
+
+def test_formatter_process_id_and_hostname(logger_with_formatter):
+ """Test that formatter includes process ID and hostname."""
+ logger, stream = logger_with_formatter
+
+ # Log a message
+ logger.info("Process info test")
+
+ # Get the logged output
+ output = stream.getvalue()
+ log_record = json.loads(output.strip())
+
+ # Check process_id and hostname
+ assert "process_id" in log_record
+ assert isinstance(log_record["process_id"], int)
+ assert log_record["process_id"] > 0
+
+ assert "hostname" in log_record
+ assert isinstance(log_record["hostname"], str)
+ assert len(log_record["hostname"]) > 0
+
+
+def test_formatter_handles_invalid_span_context(logger_with_formatter):
+ """Test that formatter handles invalid span context gracefully."""
+ logger, stream = logger_with_formatter
+
+ # Mock span with invalid context
+ mock_span_context = Mock()
+ mock_span_context.is_valid = False
+
+ mock_span = Mock()
+ mock_span.is_recording.return_value = True
+ mock_span.get_span_context.return_value = mock_span_context
+
+ with patch("mcpgateway.services.logging_service.trace") as mock_trace:
+ mock_trace.get_current_span.return_value = mock_span
+
+ # Log a message
+ logger.info("Test with invalid span")
+
+ # Get the logged output
+ output = stream.getvalue()
+ log_record = json.loads(output.strip())
+
+ # Should not include trace context when invalid
+ assert "trace_id" not in log_record
+ assert "span_id" not in log_record
+ # But message should still be logged
+ assert log_record["message"] == "Test with invalid span"
diff --git a/tests/unit/mcpgateway/services/test_export_service.py b/tests/unit/mcpgateway/services/test_export_service.py
index 0c60f803b..4f921a140 100644
--- a/tests/unit/mcpgateway/services/test_export_service.py
+++ b/tests/unit/mcpgateway/services/test_export_service.py
@@ -726,7 +726,7 @@ async def test_export_servers_with_data(export_service, mock_db):
mock_server.name = "test_server"
mock_server.description = "Test server"
mock_server.associated_tools = ["tool1", "tool2"]
- mock_server.is_active = True
+ mock_server.enabled = True
mock_server.tags = ["test", "api"]
export_service.server_service.list_servers.return_value = [mock_server]
@@ -803,7 +803,7 @@ async def test_export_resources_with_data(export_service, mock_db):
mock_resource.uri = "file:///workspace/test.txt"
mock_resource.description = "Test resource file"
mock_resource.mime_type = "text/plain"
- mock_resource.is_active = True
+ mock_resource.enabled = True
mock_resource.tags = ["file", "text"]
mock_resource.updated_at = datetime.now(timezone.utc)
diff --git a/tests/unit/mcpgateway/services/test_gateway_service.py b/tests/unit/mcpgateway/services/test_gateway_service.py
index 0a6d4e06f..7e443279b 100644
--- a/tests/unit/mcpgateway/services/test_gateway_service.py
+++ b/tests/unit/mcpgateway/services/test_gateway_service.py
@@ -67,6 +67,16 @@ def _make_execute_result(*, scalar: _R | None = None, scalars_list: list[_R] | N
return result
+@pytest.fixture(autouse=True)
+def mock_logging_services():
+ """Mock audit_trail and structured_logger to prevent database writes during tests."""
+ with patch("mcpgateway.services.gateway_service.audit_trail") as mock_audit, \
+ patch("mcpgateway.services.gateway_service.structured_logger") as mock_logger:
+ mock_audit.log_action = MagicMock(return_value=None)
+ mock_logger.log = MagicMock(return_value=None)
+ yield {"audit_trail": mock_audit, "structured_logger": mock_logger}
+
+
@pytest.fixture(autouse=True)
def _bypass_gatewayread_validation(monkeypatch):
"""
diff --git a/tests/unit/mcpgateway/services/test_gateway_service_extended.py b/tests/unit/mcpgateway/services/test_gateway_service_extended.py
index 55a2cba62..b1196ad8d 100644
--- a/tests/unit/mcpgateway/services/test_gateway_service_extended.py
+++ b/tests/unit/mcpgateway/services/test_gateway_service_extended.py
@@ -1363,4 +1363,4 @@ async def test_helper_methods_complete_removal_scenario(self):
assert len(prompts_to_remove) == 1
assert tools_to_remove[0].original_name == "old_tool"
assert resources_to_remove[0].uri == "file:///old.txt"
- assert prompts_to_remove[0].name == "old_prompt"
\ No newline at end of file
+ assert prompts_to_remove[0].name == "old_prompt"
diff --git a/tests/unit/mcpgateway/services/test_prompt_service.py b/tests/unit/mcpgateway/services/test_prompt_service.py
index 8282f5c19..dbbcb806a 100644
--- a/tests/unit/mcpgateway/services/test_prompt_service.py
+++ b/tests/unit/mcpgateway/services/test_prompt_service.py
@@ -44,6 +44,16 @@
# ---------------------------------------------------------------------------
+@pytest.fixture(autouse=True)
+def mock_logging_services():
+ """Mock audit_trail and structured_logger to prevent database writes during tests."""
+ with patch("mcpgateway.services.prompt_service.audit_trail") as mock_audit, \
+ patch("mcpgateway.services.prompt_service.structured_logger") as mock_logger:
+ mock_audit.log_action = MagicMock(return_value=None)
+ mock_logger.log = MagicMock(return_value=None)
+ yield {"audit_trail": mock_audit, "structured_logger": mock_logger}
+
+
@pytest.fixture
def mock_prompt():
"""Create a mock prompt model."""
@@ -225,7 +235,7 @@ async def test_get_prompt_rendered(self, prompt_service, test_db):
db_prompt = _build_db_prompt(template="Hello, {{ name }}!")
test_db.execute = Mock(return_value=_make_execute_result(scalar=db_prompt))
- pr: PromptResult = await prompt_service.get_prompt(test_db, 1, {"name": "Alice"})
+ pr: PromptResult = await prompt_service.get_prompt(test_db, "1", {"name": "Alice"})
assert isinstance(pr, PromptResult)
assert len(pr.messages) == 1
@@ -239,7 +249,7 @@ async def test_get_prompt_not_found(self, prompt_service, test_db):
test_db.execute = Mock(return_value=_make_execute_result(scalar=None))
with pytest.raises(PromptNotFoundError):
- await prompt_service.get_prompt(test_db, 999)
+ await prompt_service.get_prompt(test_db, "999")
@pytest.mark.asyncio
async def test_get_prompt_inactive(self, prompt_service, test_db):
@@ -251,7 +261,7 @@ async def test_get_prompt_inactive(self, prompt_service, test_db):
]
)
with pytest.raises(PromptNotFoundError) as exc_info:
- await prompt_service.get_prompt(test_db, 1)
+ await prompt_service.get_prompt(test_db, "1")
assert "inactive" in str(exc_info.value)
@pytest.mark.asyncio
@@ -260,7 +270,7 @@ async def test_get_prompt_render_error(self, prompt_service, test_db):
test_db.execute = Mock(return_value=_make_execute_result(scalar=db_prompt))
db_prompt.validate_arguments.side_effect = Exception("bad args")
with pytest.raises(PromptError) as exc_info:
- await prompt_service.get_prompt(test_db, 1, {"name": "Alice"})
+ await prompt_service.get_prompt(test_db, "1", {"name": "Alice"})
assert "Failed to process prompt" in str(exc_info.value)
@pytest.mark.asyncio
diff --git a/tests/unit/mcpgateway/services/test_resource_ownership.py b/tests/unit/mcpgateway/services/test_resource_ownership.py
index 6c70cb399..c3e4f82d6 100644
--- a/tests/unit/mcpgateway/services/test_resource_ownership.py
+++ b/tests/unit/mcpgateway/services/test_resource_ownership.py
@@ -26,6 +26,26 @@
from mcpgateway.services.a2a_service import A2AAgentService
+@pytest.fixture(autouse=True)
+def mock_logging_services():
+ """Mock audit_trail and structured_logger to prevent database writes during tests."""
+ with patch("mcpgateway.services.gateway_service.audit_trail") as mock_gw_audit, \
+ patch("mcpgateway.services.gateway_service.structured_logger") as mock_gw_logger, \
+ patch("mcpgateway.services.tool_service.audit_trail") as mock_tool_audit, \
+ patch("mcpgateway.services.tool_service.structured_logger") as mock_tool_logger, \
+ patch("mcpgateway.services.resource_service.audit_trail") as mock_res_audit, \
+ patch("mcpgateway.services.resource_service.structured_logger") as mock_res_logger, \
+ patch("mcpgateway.services.prompt_service.audit_trail") as mock_prompt_audit, \
+ patch("mcpgateway.services.prompt_service.structured_logger") as mock_prompt_logger, \
+ patch("mcpgateway.services.a2a_service.structured_logger") as mock_a2a_logger:
+ for mock in [mock_gw_audit, mock_tool_audit, mock_res_audit, mock_prompt_audit]:
+ mock.log_action = MagicMock(return_value=None)
+ for mock in [mock_gw_logger, mock_tool_logger, mock_res_logger, mock_prompt_logger, mock_a2a_logger]:
+ mock.log = MagicMock(return_value=None)
+ mock.info = MagicMock(return_value=None)
+ yield
+
+
@pytest.fixture
def mock_db_session():
"""Create a mock database session."""
diff --git a/tests/unit/mcpgateway/services/test_resource_service.py b/tests/unit/mcpgateway/services/test_resource_service.py
index 632b35cc1..2c9cb1517 100644
--- a/tests/unit/mcpgateway/services/test_resource_service.py
+++ b/tests/unit/mcpgateway/services/test_resource_service.py
@@ -37,6 +37,16 @@
# --------------------------------------------------------------------------- #
+@pytest.fixture(autouse=True)
+def mock_logging_services():
+ """Mock audit_trail and structured_logger to prevent database writes during tests."""
+ with patch("mcpgateway.services.resource_service.audit_trail") as mock_audit, \
+ patch("mcpgateway.services.resource_service.structured_logger") as mock_logger:
+ mock_audit.log_action = MagicMock(return_value=None)
+ mock_logger.log = MagicMock(return_value=None)
+ yield {"audit_trail": mock_audit, "structured_logger": mock_logger}
+
+
@pytest.fixture
def resource_service(monkeypatch):
"""Create a ResourceService instance."""
@@ -1588,7 +1598,7 @@ async def test_read_template_resource_not_found(self):
# One template in cache — but it does NOT match URI
template_obj = ResourceTemplate(
- id=1,
+ id="1",
uriTemplate="file://search/{query}",
name="search_template",
description="Template for performing a file search",
diff --git a/tests/unit/mcpgateway/services/test_resource_service_plugins.py b/tests/unit/mcpgateway/services/test_resource_service_plugins.py
index 33856187f..d609c6a15 100644
--- a/tests/unit/mcpgateway/services/test_resource_service_plugins.py
+++ b/tests/unit/mcpgateway/services/test_resource_service_plugins.py
@@ -177,7 +177,7 @@ async def test_read_resource_with_pre_fetch_hook(self, mock_ssl, resource_servic
mock_ctx = MagicMock()
mock_ssl.return_value = mock_ctx
-
+
# Mock DB row returned by scalar_one_or_none
mock_db_row = MagicMock()
mock_db_row.content = fake_resource_content
@@ -288,7 +288,7 @@ async def test_read_resource_uri_modified_by_plugin(self, mock_ssl, mock_db, res
mock_db_row.content = fake_resource_content
mock_db_row.uri = fake_resource_content.uri
mock_db_row.uri_template = None
-
+
mock_ctx = MagicMock()
mock_ssl.return_value = mock_ctx
@@ -616,7 +616,7 @@ async def test_read_resource_no_request_id(self, mock_ssl,resource_service_with_
mock_ctx = MagicMock()
mock_ssl.return_value = mock_ctx
-
+
# Setup mock resource
mock_resource = MagicMock()
mock_resource.content = ResourceContent(type="resource", id="test://resource", uri="test://resource", text="Test")
diff --git a/tests/unit/mcpgateway/services/test_server_service.py b/tests/unit/mcpgateway/services/test_server_service.py
index 333cae283..27ff9ef0f 100644
--- a/tests/unit/mcpgateway/services/test_server_service.py
+++ b/tests/unit/mcpgateway/services/test_server_service.py
@@ -516,16 +516,16 @@ async def test_update_server(self, server_service, mock_server, test_db, mock_to
side_effect=lambda cls, _id: (
mock_server
if (cls, _id) == (DbServer, 1)
- else None
+ else None
)
)
# FIX: Configure db.execute to handle both the conflict check and the bulk item fetches
mock_db_result = MagicMock()
-
+
# 1. Handle name conflict check: scalar_one_or_none() -> None
mock_db_result.scalar_one_or_none.return_value = None
-
+
# 2. Handle bulk fetches: scalars().all() -> lists of items
# The code executes bulk queries in this order: Tools -> Resources -> Prompts
mock_db_result.scalars.return_value.all.side_effect = [
@@ -533,9 +533,9 @@ async def test_update_server(self, server_service, mock_server, test_db, mock_to
[new_resource], # Second call: select(DbResource)...
[new_prompt] # Third call: select(DbPrompt)...
]
-
+
test_db.execute = Mock(return_value=mock_db_result)
-
+
test_db.commit = Mock()
test_db.refresh = Mock()
@@ -553,7 +553,7 @@ async def test_update_server(self, server_service, mock_server, test_db, mock_to
mock_tools.__iter__ = Mock(return_value=iter(tool_items))
mock_resources.__iter__ = Mock(return_value=iter(resource_items))
mock_prompts.__iter__ = Mock(return_value=iter(prompt_items))
-
+
# Capture assignment to the lists (since the new code does server.tools = list(...))
mock_server.tools = tool_items
mock_server.resources = resource_items
diff --git a/tests/unit/mcpgateway/services/test_tool_service.py b/tests/unit/mcpgateway/services/test_tool_service.py
index 5beeeab27..6f6fe4e90 100644
--- a/tests/unit/mcpgateway/services/test_tool_service.py
+++ b/tests/unit/mcpgateway/services/test_tool_service.py
@@ -36,6 +36,16 @@
from mcpgateway.utils.services_auth import encode_auth
+@pytest.fixture(autouse=True)
+def mock_logging_services():
+ """Mock audit_trail and structured_logger to prevent database writes during tests."""
+ with patch("mcpgateway.services.tool_service.audit_trail") as mock_audit, \
+ patch("mcpgateway.services.tool_service.structured_logger") as mock_logger:
+ mock_audit.log_action = MagicMock(return_value=None)
+ mock_logger.log = MagicMock(return_value=None)
+ yield {"audit_trail": mock_audit, "structured_logger": mock_logger}
+
+
@pytest.fixture
def tool_service():
"""Create a tool service instance."""
@@ -290,7 +300,8 @@ async def test_register_tool(self, tool_service, mock_tool, test_db):
# Verify DB operations
test_db.add.assert_called_once()
test_db.commit.assert_called_once()
- test_db.refresh.assert_called_once()
+ # refresh is called twice: once after commit and once after logging commits
+ assert test_db.refresh.call_count == 2
# Verify result
assert result.name == "test-gateway-test-tool"
@@ -1875,7 +1886,7 @@ async def test_aggregate_metrics_no_data(self, tool_service):
"avg_response_time": None,
"last_execution_time": None,
}
-
+
# Verify optimization
assert mock_db.execute.call_count == 1
@@ -1987,7 +1998,7 @@ async def test_get_top_tools(self, tool_service, test_db):
with patch("mcpgateway.services.tool_service.build_top_performers") as mock_build:
mock_build.return_value = ["top_performer1", "top_performer2"]
-
+
# Run the method
result = await tool_service.get_top_tools(test_db, limit=5)
@@ -1996,7 +2007,7 @@ async def test_get_top_tools(self, tool_service, test_db):
# Assert build_top_performers was called with the mock results
mock_build.assert_called_once_with(mock_results)
-
+
# Verify that the execute method was called once
test_db.execute.assert_called_once()
diff --git a/tests/unit/mcpgateway/test_admin.py b/tests/unit/mcpgateway/test_admin.py
index 21b5791f1..5eb16a540 100644
--- a/tests/unit/mcpgateway/test_admin.py
+++ b/tests/unit/mcpgateway/test_admin.py
@@ -804,17 +804,19 @@ async def test_admin_list_resources_with_complex_data(self, mock_list_resources,
@patch.object(ResourceService, "get_resource_by_id")
@patch.object(ResourceService, "read_resource")
async def test_admin_get_resource_with_read_error(self, mock_read_resource, mock_get_resource, mock_db):
- """Test getting resource when content read fails."""
- # Resource exists
+ """Test: read_resource should not be called at all."""
+
mock_resource = MagicMock()
mock_resource.model_dump.return_value = {"id": 1, "uri": "/test/resource"}
mock_get_resource.return_value = mock_resource
- # But reading content fails
mock_read_resource.side_effect = IOError("Cannot read resource content")
- with pytest.raises(IOError):
- await admin_get_resource("1", mock_db, "test-user")
+ result = await admin_get_resource("1", mock_db, "test-user")
+
+ assert result["resource"]["id"] == 1
+ mock_read_resource.assert_not_called()
+
@patch.object(ResourceService, "register_resource")
async def test_admin_add_resource_with_valid_mime_type(self, mock_register_resource, mock_request, mock_db):
@@ -1357,7 +1359,8 @@ async def test_admin_test_gateway_various_methods(self):
mock_client_class.return_value = mock_client
- result = await admin_test_gateway(request, "test-user")
+ mock_db = MagicMock()
+ result = await admin_test_gateway(request, None, "test-user", mock_db)
assert result.status_code == 200
mock_client.request.assert_called_once()
@@ -1396,7 +1399,8 @@ async def test_admin_test_gateway_url_construction(self):
mock_client_class.return_value = mock_client
- await admin_test_gateway(request, "test-user")
+ mock_db = MagicMock()
+ await admin_test_gateway(request, None, "test-user", mock_db)
call_args = mock_client.request.call_args
assert call_args[1]["url"] == expected_url
@@ -1422,7 +1426,8 @@ async def test_admin_test_gateway_timeout_handling(self):
mock_client_class.return_value = mock_client
- result = await admin_test_gateway(request, "test-user")
+ mock_db = MagicMock()
+ result = await admin_test_gateway(request, None, "test-user", mock_db)
assert result.status_code == 502
assert "Request timed out" in str(result.body)
@@ -1459,7 +1464,8 @@ async def test_admin_test_gateway_non_json_response(self):
mock_client_class.return_value = mock_client
- result = await admin_test_gateway(request, "test-user")
+ mock_db = MagicMock()
+ result = await admin_test_gateway(request, None, "test-user", mock_db)
assert result.status_code == 200
assert result.body["details"] == response_text
diff --git a/tests/unit/mcpgateway/test_main.py b/tests/unit/mcpgateway/test_main.py
index 94e0beb80..fabf401bc 100644
--- a/tests/unit/mcpgateway/test_main.py
+++ b/tests/unit/mcpgateway/test_main.py
@@ -193,13 +193,20 @@ def test_client(app):
# Patch the auth function used by DocsAuthMiddleware
# Standard
- from unittest.mock import patch
+ from unittest.mock import MagicMock, patch
# Third-Party
from fastapi import HTTPException, status
# First-Party
+ # Mock security_logger to prevent database access
+ mock_sec_logger = MagicMock()
+ mock_sec_logger.log_authentication_attempt = MagicMock(return_value=None)
+ mock_sec_logger.log_security_event = MagicMock(return_value=None)
+ sec_patcher = patch("mcpgateway.middleware.auth_middleware.security_logger", mock_sec_logger)
+ sec_patcher.start()
+
# Create a mock that validates JWT tokens properly
async def mock_require_auth_override(auth_header=None, jwt_token=None):
# Third-Party
@@ -270,6 +277,7 @@ async def mock_check_permission(self, user_email: str, permission: str, resource
app.dependency_overrides.pop(get_current_user, None)
app.dependency_overrides.pop(get_current_user_with_permissions, None)
patcher.stop() # Stop the require_auth_override patch
+ sec_patcher.stop() # Stop the security_logger patch
if hasattr(PermissionService, "_original_check_permission"):
PermissionService.check_permission = PermissionService._original_check_permission
diff --git a/tests/unit/mcpgateway/test_main_extended.py b/tests/unit/mcpgateway/test_main_extended.py
index ceb40f763..2079b692d 100644
--- a/tests/unit/mcpgateway/test_main_extended.py
+++ b/tests/unit/mcpgateway/test_main_extended.py
@@ -324,7 +324,7 @@ def test_server_toggle_edge_cases(self, test_client, auth_headers):
def test_client(app):
"""Test client with auth override for testing protected endpoints."""
# Standard
- from unittest.mock import patch
+ from unittest.mock import MagicMock, patch
# First-Party
from mcpgateway.auth import get_current_user
@@ -341,6 +341,13 @@ def test_client(app):
auth_provider="test",
)
+ # Mock security_logger to prevent database access
+ mock_sec_logger = MagicMock()
+ mock_sec_logger.log_authentication_attempt = MagicMock(return_value=None)
+ mock_sec_logger.log_security_event = MagicMock(return_value=None)
+ sec_patcher = patch("mcpgateway.middleware.auth_middleware.security_logger", mock_sec_logger)
+ sec_patcher.start()
+
# Mock require_auth_override function
def mock_require_auth_override(user: str) -> str:
return user
@@ -390,6 +397,7 @@ async def mock_check_permission(
app.dependency_overrides.pop(get_current_user, None)
app.dependency_overrides.pop(get_current_user_with_permissions, None)
patcher.stop() # Stop the require_auth_override patch
+ sec_patcher.stop() # Stop the security_logger patch
if hasattr(PermissionService, "_original_check_permission"):
PermissionService.check_permission = PermissionService._original_check_permission
diff --git a/tests/unit/mcpgateway/test_oauth_manager.py b/tests/unit/mcpgateway/test_oauth_manager.py
index 3431119a6..01bc68614 100644
--- a/tests/unit/mcpgateway/test_oauth_manager.py
+++ b/tests/unit/mcpgateway/test_oauth_manager.py
@@ -2168,7 +2168,7 @@ def test_is_token_expired_no_expires_at(self):
result = service._is_token_expired(token_record)
- assert result is True
+ assert result is False
def test_is_token_expired_past_expiry(self):
"""Test _is_token_expired with past expiration."""
diff --git a/tests/unit/mcpgateway/test_translate_stdio_endpoint.py b/tests/unit/mcpgateway/test_translate_stdio_endpoint.py
index 708d605ed..a2464b33d 100644
--- a/tests/unit/mcpgateway/test_translate_stdio_endpoint.py
+++ b/tests/unit/mcpgateway/test_translate_stdio_endpoint.py
@@ -315,7 +315,7 @@ async def test_empty_env_vars(self, echo_script):
await endpoint.send("hello world\n")
# Wait for response
- await asyncio.sleep(0.1)
+ await asyncio.sleep(0.5)
# Check that process was started
assert endpoint._proc is not None
diff --git a/tests/unit/mcpgateway/tools/__init__.py b/tests/unit/mcpgateway/tools/__init__.py
new file mode 100644
index 000000000..eee1aa024
--- /dev/null
+++ b/tests/unit/mcpgateway/tools/__init__.py
@@ -0,0 +1,6 @@
+# -*- coding: utf-8 -*-
+"""Location: ./tests/unit/mcpgateway/tools/__init__.py
+Copyright 2025
+SPDX-License-Identifier: Apache-2.0
+Authors: Teryl Taylor
+"""
diff --git a/tests/unit/mcpgateway/tools/builder/__init__.py b/tests/unit/mcpgateway/tools/builder/__init__.py
new file mode 100644
index 000000000..e63d648ed
--- /dev/null
+++ b/tests/unit/mcpgateway/tools/builder/__init__.py
@@ -0,0 +1,6 @@
+# -*- coding: utf-8 -*-
+"""Location: ./tests/unit/mcpgateway/tools/builder/__init__.py
+Copyright 2025
+SPDX-License-Identifier: Apache-2.0
+Authors: Teryl Taylor
+"""
diff --git a/tests/unit/mcpgateway/tools/builder/test_cli.py b/tests/unit/mcpgateway/tools/builder/test_cli.py
new file mode 100644
index 000000000..5328f03c3
--- /dev/null
+++ b/tests/unit/mcpgateway/tools/builder/test_cli.py
@@ -0,0 +1,509 @@
+# -*- coding: utf-8 -*-
+"""Location: ./tests/unit/mcpgateway/tools/builder/test_cli.py
+Copyright 2025
+SPDX-License-Identifier: Apache-2.0
+Authors: Teryl Taylor
+
+Unit tests for builder CLI commands.
+"""
+
+# Standard
+import os
+from pathlib import Path
+from unittest.mock import AsyncMock, MagicMock, Mock, patch
+
+# Third-Party
+import pytest
+import typer
+from typer.testing import CliRunner
+
+# First-Party
+from mcpgateway.tools.builder.cli import app, main
+
+
+@pytest.fixture
+def runner():
+ """Create CLI test runner."""
+ return CliRunner()
+
+
+@pytest.fixture
+def mock_deployer():
+ """Create mock deployer instance."""
+ deployer = MagicMock()
+ deployer.validate = MagicMock()
+ deployer.build = AsyncMock()
+ deployer.generate_certificates = AsyncMock()
+ deployer.deploy = AsyncMock()
+ deployer.verify = AsyncMock()
+ deployer.destroy = AsyncMock()
+ deployer.generate_manifests = MagicMock(return_value=Path("/tmp/manifests"))
+ return deployer
+
+
+class TestCLICallback:
+ """Test CLI callback initialization."""
+
+ @patch("mcpgateway.tools.builder.cli.DeployFactory.create_deployer")
+ def test_cli_callback_default(self, mock_factory, runner):
+ """Test CLI callback with default options (Python mode by default)."""
+ mock_deployer = MagicMock()
+ mock_factory.return_value = (mock_deployer, "python")
+
+ result = runner.invoke(app, ["--help"])
+ assert result.exit_code == 0
+
+ @patch("mcpgateway.tools.builder.cli.DeployFactory.create_deployer")
+ def test_cli_callback_verbose(self, mock_factory, runner):
+ """Test CLI callback with verbose flag (Python mode by default)."""
+ mock_deployer = MagicMock()
+ mock_factory.return_value = (mock_deployer, "python")
+
+ result = runner.invoke(app, ["--verbose", "--help"])
+ assert result.exit_code == 0
+
+ @patch("mcpgateway.tools.builder.cli.DeployFactory.create_deployer")
+ def test_cli_callback_with_dagger(self, mock_factory, runner, tmp_path):
+ """Test CLI callback with --dagger flag (opt-in)."""
+ mock_deployer = MagicMock()
+ mock_deployer.validate = MagicMock()
+ mock_factory.return_value = (mock_deployer, "dagger")
+
+ config_file = tmp_path / "test-config.yaml"
+ config_file.write_text("deployment:\n type: compose\n")
+
+ # Use validate command which invokes the callback
+ result = runner.invoke(app, ["--dagger", "validate", str(config_file)])
+ assert result.exit_code == 0
+ # Verify dagger mode was requested
+ mock_factory.assert_called_once_with("dagger", False)
+
+ @patch("mcpgateway.tools.builder.cli.DeployFactory.create_deployer")
+ def test_cli_callback_default_python(self, mock_factory, runner, tmp_path):
+ """Test CLI callback defaults to Python mode."""
+ mock_deployer = MagicMock()
+ mock_deployer.validate = MagicMock()
+ mock_factory.return_value = (mock_deployer, "python")
+
+ config_file = tmp_path / "test-config.yaml"
+ config_file.write_text("deployment:\n type: compose\n")
+
+ # Use validate command without --dagger flag to test default
+ result = runner.invoke(app, ["validate", str(config_file)])
+ assert result.exit_code == 0
+ # Verify python mode was requested (default)
+ mock_factory.assert_called_once_with("python", False)
+
+
+class TestValidateCommand:
+ """Test validate command."""
+
+ @patch("mcpgateway.tools.builder.cli.DeployFactory.create_deployer")
+ def test_validate_success(self, mock_factory, runner, tmp_path, mock_deployer):
+ """Test successful configuration validation."""
+ config_file = tmp_path / "mcp-stack.yaml"
+ config_file.write_text("deployment:\n type: compose\n")
+
+ mock_factory.return_value = (mock_deployer, "python")
+ mock_deployer.validate.return_value = None
+
+ result = runner.invoke(app, ["validate", str(config_file)])
+ assert result.exit_code == 0
+ assert "Configuration valid" in result.stdout
+ mock_deployer.validate.assert_called_once()
+
+ @patch("mcpgateway.tools.builder.cli.DeployFactory.create_deployer")
+ def test_validate_failure(self, mock_factory, runner, tmp_path, mock_deployer):
+ """Test validation failure."""
+ config_file = tmp_path / "invalid-config.yaml"
+ config_file.write_text("invalid: yaml\n")
+
+ mock_factory.return_value = (mock_deployer, "python")
+ mock_deployer.validate.side_effect = ValueError("Invalid configuration")
+
+ result = runner.invoke(app, ["validate", str(config_file)])
+ assert result.exit_code == 1
+ assert "Validation failed" in result.stdout
+
+
+class TestBuildCommand:
+ """Test build command."""
+
+ @patch("mcpgateway.tools.builder.cli.DeployFactory.create_deployer")
+ def test_build_success(self, mock_factory, runner, tmp_path, mock_deployer):
+ """Test successful build."""
+ config_file = tmp_path / "mcp-stack.yaml"
+ config_file.write_text("gateway:\n image: test:latest\n")
+
+ mock_factory.return_value = (mock_deployer, "python")
+
+ result = runner.invoke(app, ["build", str(config_file)])
+ assert result.exit_code == 0
+ assert "Build complete" in result.stdout
+ mock_deployer.build.assert_called_once()
+
+ @patch("mcpgateway.tools.builder.cli.DeployFactory.create_deployer")
+ def test_build_plugins_only(self, mock_factory, runner, tmp_path, mock_deployer):
+ """Test building only plugins."""
+ config_file = tmp_path / "mcp-stack.yaml"
+ config_file.write_text("plugins:\n - name: TestPlugin\n")
+
+ mock_factory.return_value = (mock_deployer, "python")
+
+ result = runner.invoke(app, ["build", str(config_file), "--plugins-only"])
+ assert result.exit_code == 0
+ # Verify plugins_only flag was passed
+ call_kwargs = mock_deployer.build.call_args[1]
+ assert call_kwargs["plugins_only"] is True
+
+ @patch("mcpgateway.tools.builder.cli.DeployFactory.create_deployer")
+ def test_build_specific_plugins(self, mock_factory, runner, tmp_path, mock_deployer):
+ """Test building specific plugins."""
+ config_file = tmp_path / "mcp-stack.yaml"
+ config_file.write_text("plugins:\n - name: Plugin1\n - name: Plugin2\n")
+
+ mock_factory.return_value = (mock_deployer, "python")
+
+ result = runner.invoke(
+ app, ["build", str(config_file), "--plugin", "Plugin1", "--plugin", "Plugin2"]
+ )
+ assert result.exit_code == 0
+
+ @patch("mcpgateway.tools.builder.cli.DeployFactory.create_deployer")
+ def test_build_no_cache(self, mock_factory, runner, tmp_path, mock_deployer):
+ """Test building with --no-cache flag."""
+ config_file = tmp_path / "mcp-stack.yaml"
+ config_file.write_text("gateway:\n image: test:latest\n")
+
+ mock_factory.return_value = (mock_deployer, "python")
+
+ result = runner.invoke(app, ["build", str(config_file), "--no-cache"])
+ assert result.exit_code == 0
+ call_kwargs = mock_deployer.build.call_args[1]
+ assert call_kwargs["no_cache"] is True
+
+ @patch("mcpgateway.tools.builder.cli.DeployFactory.create_deployer")
+ def test_build_failure(self, mock_factory, runner, tmp_path, mock_deployer):
+ """Test build failure."""
+ config_file = tmp_path / "mcp-stack.yaml"
+ config_file.write_text("gateway:\n image: test:latest\n")
+
+ mock_factory.return_value = (mock_deployer, "python")
+ mock_deployer.build.side_effect = RuntimeError("Build failed")
+
+ result = runner.invoke(app, ["build", str(config_file)])
+ assert result.exit_code == 1
+ assert "Build failed" in result.stdout
+
+
+class TestCertsCommand:
+ """Test certs command."""
+
+ @patch("mcpgateway.tools.builder.cli.DeployFactory.create_deployer")
+ def test_certs_success(self, mock_factory, runner, tmp_path, mock_deployer):
+ """Test successful certificate generation."""
+ config_file = tmp_path / "mcp-stack.yaml"
+ config_file.write_text("plugins:\n - name: TestPlugin\n")
+
+ mock_factory.return_value = (mock_deployer, "python")
+
+ result = runner.invoke(app, ["certs", str(config_file)])
+ assert result.exit_code == 0
+ assert "Certificates generated" in result.stdout
+ mock_deployer.generate_certificates.assert_called_once()
+
+ @patch("mcpgateway.tools.builder.cli.DeployFactory.create_deployer")
+ def test_certs_failure(self, mock_factory, runner, tmp_path, mock_deployer):
+ """Test certificate generation failure."""
+ config_file = tmp_path / "mcp-stack.yaml"
+ config_file.write_text("plugins:\n - name: TestPlugin\n")
+
+ mock_factory.return_value = (mock_deployer, "python")
+ mock_deployer.generate_certificates.side_effect = RuntimeError("Cert generation failed")
+
+ result = runner.invoke(app, ["certs", str(config_file)])
+ assert result.exit_code == 1
+ assert "Certificate generation failed" in result.stdout
+
+
+class TestDeployCommand:
+ """Test deploy command."""
+
+ @patch("mcpgateway.tools.builder.cli.DeployFactory.create_deployer")
+ def test_deploy_success(self, mock_factory, runner, tmp_path, mock_deployer):
+ """Test successful deployment."""
+ config_file = tmp_path / "mcp-stack.yaml"
+ config_file.write_text("deployment:\n type: compose\n")
+
+ mock_factory.return_value = (mock_deployer, "python")
+
+ result = runner.invoke(app, ["deploy", str(config_file)])
+ assert result.exit_code == 0
+ assert "Deployment complete" in result.stdout
+ mock_deployer.deploy.assert_called_once()
+
+ @patch("mcpgateway.tools.builder.cli.DeployFactory.create_deployer")
+ def test_deploy_dry_run(self, mock_factory, runner, tmp_path, mock_deployer):
+ """Test dry-run deployment."""
+ config_file = tmp_path / "mcp-stack.yaml"
+ config_file.write_text("deployment:\n type: compose\n")
+
+ mock_factory.return_value = (mock_deployer, "python")
+
+ result = runner.invoke(app, ["deploy", str(config_file), "--dry-run"])
+ assert result.exit_code == 0
+ assert "Dry-run complete" in result.stdout
+ call_kwargs = mock_deployer.deploy.call_args[1]
+ assert call_kwargs["dry_run"] is True
+
+ @patch("mcpgateway.tools.builder.cli.DeployFactory.create_deployer")
+ def test_deploy_skip_build(self, mock_factory, runner, tmp_path, mock_deployer):
+ """Test deployment with --skip-build."""
+ config_file = tmp_path / "mcp-stack.yaml"
+ config_file.write_text("deployment:\n type: compose\n")
+
+ mock_factory.return_value = (mock_deployer, "python")
+
+ result = runner.invoke(app, ["deploy", str(config_file), "--skip-build"])
+ assert result.exit_code == 0
+ call_kwargs = mock_deployer.deploy.call_args[1]
+ assert call_kwargs["skip_build"] is True
+
+ @patch("mcpgateway.tools.builder.cli.DeployFactory.create_deployer")
+ def test_deploy_skip_certs(self, mock_factory, runner, tmp_path, mock_deployer):
+ """Test deployment with --skip-certs."""
+ config_file = tmp_path / "mcp-stack.yaml"
+ config_file.write_text("deployment:\n type: compose\n")
+
+ mock_factory.return_value = (mock_deployer, "python")
+
+ result = runner.invoke(app, ["deploy", str(config_file), "--skip-certs"])
+ assert result.exit_code == 0
+ call_kwargs = mock_deployer.deploy.call_args[1]
+ assert call_kwargs["skip_certs"] is True
+
+ @patch("mcpgateway.tools.builder.cli.DeployFactory.create_deployer")
+ def test_deploy_custom_output_dir(self, mock_factory, runner, tmp_path, mock_deployer):
+ """Test deployment with custom output directory."""
+ config_file = tmp_path / "mcp-stack.yaml"
+ config_file.write_text("deployment:\n type: compose\n")
+ output_dir = tmp_path / "custom-output"
+
+ mock_factory.return_value = (mock_deployer, "python")
+
+ result = runner.invoke(app, ["deploy", str(config_file), "--output-dir", str(output_dir)])
+ assert result.exit_code == 0
+
+ @patch("mcpgateway.tools.builder.cli.DeployFactory.create_deployer")
+ def test_deploy_failure(self, mock_factory, runner, tmp_path, mock_deployer):
+ """Test deployment failure."""
+ config_file = tmp_path / "mcp-stack.yaml"
+ config_file.write_text("deployment:\n type: compose\n")
+
+ mock_factory.return_value = (mock_deployer, "python")
+ mock_deployer.deploy.side_effect = RuntimeError("Deployment failed")
+
+ result = runner.invoke(app, ["deploy", str(config_file)])
+ assert result.exit_code == 1
+ assert "Deployment failed" in result.stdout
+
+
+class TestVerifyCommand:
+ """Test verify command."""
+
+ @patch("mcpgateway.tools.builder.cli.DeployFactory.create_deployer")
+ def test_verify_success(self, mock_factory, runner, tmp_path, mock_deployer):
+ """Test successful deployment verification."""
+ config_file = tmp_path / "mcp-stack.yaml"
+ config_file.write_text("deployment:\n type: compose\n")
+
+ mock_factory.return_value = (mock_deployer, "python")
+
+ result = runner.invoke(app, ["verify", str(config_file)])
+ assert result.exit_code == 0
+ assert "Deployment healthy" in result.stdout
+ mock_deployer.verify.assert_called_once()
+
+ @patch("mcpgateway.tools.builder.cli.DeployFactory.create_deployer")
+ def test_verify_with_wait(self, mock_factory, runner, tmp_path, mock_deployer):
+ """Test verification with default wait behavior (wait=True by default)."""
+ config_file = tmp_path / "mcp-stack.yaml"
+ config_file.write_text("deployment:\n type: compose\n")
+
+ mock_factory.return_value = (mock_deployer, "python")
+
+ # Default wait is True, so just run verify without any flags
+ result = runner.invoke(app, ["verify", str(config_file)])
+ assert result.exit_code == 0
+ call_kwargs = mock_deployer.verify.call_args[1]
+ assert call_kwargs["wait"] is True
+
+ @patch("mcpgateway.tools.builder.cli.DeployFactory.create_deployer")
+ def test_verify_with_timeout(self, mock_factory, runner, tmp_path, mock_deployer):
+ """Test verification with custom timeout."""
+ config_file = tmp_path / "mcp-stack.yaml"
+ config_file.write_text("deployment:\n type: compose\n")
+
+ mock_factory.return_value = (mock_deployer, "python")
+
+ result = runner.invoke(app, ["verify", str(config_file), "--timeout", "600"])
+ assert result.exit_code == 0
+ call_kwargs = mock_deployer.verify.call_args[1]
+ assert call_kwargs["timeout"] == 600
+
+ @patch("mcpgateway.tools.builder.cli.DeployFactory.create_deployer")
+ def test_verify_failure(self, mock_factory, runner, tmp_path, mock_deployer):
+ """Test verification failure."""
+ config_file = tmp_path / "mcp-stack.yaml"
+ config_file.write_text("deployment:\n type: compose\n")
+
+ mock_factory.return_value = (mock_deployer, "python")
+ mock_deployer.verify.side_effect = RuntimeError("Verification failed")
+
+ result = runner.invoke(app, ["verify", str(config_file)])
+ assert result.exit_code == 1
+ assert "Verification failed" in result.stdout
+
+
+class TestDestroyCommand:
+ """Test destroy command."""
+
+ @patch("mcpgateway.tools.builder.cli.DeployFactory.create_deployer")
+ def test_destroy_with_force(self, mock_factory, runner, tmp_path, mock_deployer):
+ """Test destroy with --force flag."""
+ config_file = tmp_path / "mcp-stack.yaml"
+ config_file.write_text("deployment:\n type: compose\n")
+
+ mock_factory.return_value = (mock_deployer, "python")
+
+ result = runner.invoke(app, ["destroy", str(config_file), "--force"])
+ assert result.exit_code == 0
+ assert "Deployment destroyed" in result.stdout
+ mock_deployer.destroy.assert_called_once()
+
+ @patch("mcpgateway.tools.builder.cli.DeployFactory.create_deployer")
+ def test_destroy_with_confirmation(self, mock_factory, runner, tmp_path, mock_deployer):
+ """Test destroy with user confirmation."""
+ config_file = tmp_path / "mcp-stack.yaml"
+ config_file.write_text("deployment:\n type: compose\n")
+
+ mock_factory.return_value = (mock_deployer, "python")
+
+ # Simulate user confirming "yes"
+ result = runner.invoke(app, ["destroy", str(config_file)], input="y\n")
+ assert result.exit_code == 0
+ assert "Deployment destroyed" in result.stdout
+
+ @patch("mcpgateway.tools.builder.cli.DeployFactory.create_deployer")
+ def test_destroy_abort(self, mock_factory, runner, tmp_path, mock_deployer):
+ """Test aborting destroy command."""
+ config_file = tmp_path / "mcp-stack.yaml"
+ config_file.write_text("deployment:\n type: compose\n")
+
+ mock_factory.return_value = (mock_deployer, "python")
+
+ # Simulate user declining "no"
+ result = runner.invoke(app, ["destroy", str(config_file)], input="n\n")
+ assert "Aborted" in result.stdout
+ mock_deployer.destroy.assert_not_called()
+
+ @patch("mcpgateway.tools.builder.cli.DeployFactory.create_deployer")
+ def test_destroy_failure(self, mock_factory, runner, tmp_path, mock_deployer):
+ """Test destroy failure."""
+ config_file = tmp_path / "mcp-stack.yaml"
+ config_file.write_text("deployment:\n type: compose\n")
+
+ mock_factory.return_value = (mock_deployer, "python")
+ mock_deployer.destroy.side_effect = RuntimeError("Destruction failed")
+
+ result = runner.invoke(app, ["destroy", str(config_file), "--force"])
+ assert result.exit_code == 1
+ assert "Destruction failed" in result.stdout
+
+
+class TestGenerateCommand:
+ """Test generate command."""
+
+ @patch("mcpgateway.tools.builder.cli.DeployFactory.create_deployer")
+ def test_generate_success(self, mock_factory, runner, tmp_path, mock_deployer):
+ """Test successful manifest generation."""
+ config_file = tmp_path / "mcp-stack.yaml"
+ config_file.write_text("deployment:\n type: compose\n")
+
+ mock_factory.return_value = (mock_deployer, "python")
+
+ result = runner.invoke(app, ["generate", str(config_file)])
+ assert result.exit_code == 0
+ assert "Manifests generated" in result.stdout
+ mock_deployer.generate_manifests.assert_called_once()
+
+ @patch("mcpgateway.tools.builder.cli.DeployFactory.create_deployer")
+ def test_generate_with_output_dir(self, mock_factory, runner, tmp_path, mock_deployer):
+ """Test manifest generation with custom output directory."""
+ config_file = tmp_path / "mcp-stack.yaml"
+ config_file.write_text("deployment:\n type: compose\n")
+ output_dir = tmp_path / "custom-manifests"
+
+ mock_factory.return_value = (mock_deployer, "python")
+
+ result = runner.invoke(app, ["generate", str(config_file), "--output", str(output_dir)])
+ assert result.exit_code == 0
+
+ @patch("mcpgateway.tools.builder.cli.DeployFactory.create_deployer")
+ def test_generate_failure(self, mock_factory, runner, tmp_path, mock_deployer):
+ """Test manifest generation failure."""
+ config_file = tmp_path / "mcp-stack.yaml"
+ config_file.write_text("deployment:\n type: compose\n")
+
+ mock_factory.return_value = (mock_deployer, "python")
+ mock_deployer.generate_manifests.side_effect = ValueError("Generation failed")
+
+ result = runner.invoke(app, ["generate", str(config_file)])
+ assert result.exit_code == 1
+ assert "Manifest generation failed" in result.stdout
+
+
+class TestVersionCommand:
+ """Test version command."""
+
+ def test_version(self, runner):
+ """Test version command."""
+ result = runner.invoke(app, ["version"])
+ assert result.exit_code == 0
+ assert "MCP Deploy" in result.stdout
+ assert "Version" in result.stdout
+
+
+class TestMainFunction:
+ """Test main entry point."""
+
+ @patch("mcpgateway.tools.builder.cli.app")
+ def test_main_success(self, mock_app):
+ """Test successful main execution."""
+ mock_app.return_value = None
+ main()
+ mock_app.assert_called_once()
+
+ @patch("mcpgateway.tools.builder.cli.app")
+ def test_main_keyboard_interrupt(self, mock_app):
+ """Test main with keyboard interrupt."""
+ mock_app.side_effect = KeyboardInterrupt()
+ with pytest.raises(SystemExit) as exc_info:
+ main()
+ assert exc_info.value.code == 130
+
+ @patch("mcpgateway.tools.builder.cli.app")
+ def test_main_exception_no_debug(self, mock_app):
+ """Test main with exception (no debug mode)."""
+ mock_app.side_effect = RuntimeError("Test error")
+ with pytest.raises(SystemExit) as exc_info:
+ main()
+ assert exc_info.value.code == 1
+
+ @patch("mcpgateway.tools.builder.cli.app")
+ @patch.dict(os.environ, {"MCP_DEBUG": "1"})
+ def test_main_exception_debug_mode(self, mock_app):
+ """Test main with exception (debug mode enabled)."""
+ mock_app.side_effect = RuntimeError("Test error")
+ with pytest.raises(RuntimeError, match="Test error"):
+ main()
diff --git a/tests/unit/mcpgateway/tools/builder/test_common.py b/tests/unit/mcpgateway/tools/builder/test_common.py
new file mode 100644
index 000000000..fdfc26036
--- /dev/null
+++ b/tests/unit/mcpgateway/tools/builder/test_common.py
@@ -0,0 +1,994 @@
+# -*- coding: utf-8 -*-
+"""Location: ./tests/unit/mcpgateway/tools/builder/test_common.py
+Copyright 2025
+SPDX-License-Identifier: Apache-2.0
+Authors: Teryl Taylor
+
+Unit tests for builder common utilities.
+"""
+
+# Standard
+import os
+from pathlib import Path
+import shutil
+import subprocess
+from unittest.mock import MagicMock, Mock, patch
+from mcpgateway.tools.builder.schema import MCPStackConfig
+
+# Third-Party
+import pytest
+import yaml
+
+# First-Party
+from mcpgateway.tools.builder.common import (
+ copy_env_template,
+ deploy_compose,
+ deploy_kubernetes,
+ destroy_compose,
+ destroy_kubernetes,
+ generate_compose_manifests,
+ generate_kubernetes_manifests,
+ generate_plugin_config,
+ get_deploy_dir,
+ get_docker_compose_command,
+ load_config,
+ run_compose,
+ verify_compose,
+ verify_kubernetes,
+)
+
+
+class TestGetDeployDir:
+ """Test get_deploy_dir function."""
+
+ def test_default_deploy_dir(self):
+ """Test default deploy directory."""
+ with patch.dict(os.environ, {}, clear=True):
+ result = get_deploy_dir()
+ assert result == Path("./deploy")
+
+ def test_custom_deploy_dir(self):
+ """Test custom deploy directory from environment variable."""
+ with patch.dict(os.environ, {"MCP_DEPLOY_DIR": "/custom/deploy"}):
+ result = get_deploy_dir()
+ assert result == Path("/custom/deploy")
+
+
+class TestLoadConfig:
+ """Test load_config function."""
+
+ def test_load_valid_config(self, tmp_path):
+ """Test loading valid YAML configuration."""
+ config_file = tmp_path / "mcp-stack.yaml"
+ config_data = {
+ "deployment": {"type": "compose", "project_name": "test"},
+ "gateway": {"image": "mcpgateway:latest"},
+ "plugins": [],
+ }
+ config_file.write_text(yaml.dump(config_data))
+
+ result = load_config(str(config_file))
+ assert result.deployment.type == "compose"
+ assert result.gateway.image == "mcpgateway:latest"
+
+ def test_load_nonexistent_config(self):
+ """Test loading non-existent configuration file."""
+ with pytest.raises(FileNotFoundError, match="Configuration file not found"):
+ load_config("/nonexistent/config.yaml")
+
+
+class TestGeneratePluginConfig:
+ """Test generate_plugin_config function."""
+
+ @patch("mcpgateway.tools.builder.common.Environment")
+ def test_generate_plugin_config_compose(self, mock_env_class, tmp_path):
+ """Test generating plugin config for Docker Compose deployment."""
+ # Setup mock template
+ mock_template = MagicMock()
+ mock_template.render.return_value = "plugins:\n - name: TestPlugin\n"
+ mock_env = MagicMock()
+ mock_env.get_template.return_value = mock_template
+ mock_env_class.return_value = mock_env
+
+ # Create fake template directory
+ template_dir = tmp_path / "templates"
+ template_dir.mkdir()
+
+ config = MCPStackConfig.model_validate({
+ "gateway": {"image": "mcpgateway:latest"},
+ "deployment": {"type": "compose"},
+ "plugins": [
+ {"name": "TestPlugin", "port": 8000, "mtls_enabled": True, "repo": "https://github.com/test/plugin.git"}
+ ],
+ })
+
+ with patch("mcpgateway.tools.builder.common.Path") as mock_path:
+ mock_path.return_value.__truediv__.return_value = template_dir
+ output_dir = tmp_path / "output"
+ output_dir.mkdir()
+
+ result = generate_plugin_config(config, output_dir)
+
+ # Verify template was called
+ mock_env.get_template.assert_called_once_with("plugins-config.yaml.j2")
+ assert result == output_dir / "plugins-config.yaml"
+
+ @patch("mcpgateway.tools.builder.common.Environment")
+ def test_generate_plugin_config_kubernetes(self, mock_env_class, tmp_path):
+ """Test generating plugin config for Kubernetes deployment."""
+ # Setup mock template
+ mock_template = MagicMock()
+ mock_template.render.return_value = "plugins:\n - name: TestPlugin\n"
+ mock_env = MagicMock()
+ mock_env.get_template.return_value = mock_template
+ mock_env_class.return_value = mock_env
+
+ # Create fake template directory
+ template_dir = tmp_path / "templates"
+ template_dir.mkdir()
+
+ config = MCPStackConfig.model_validate({
+ "gateway": {"image": "mcpgateway:latest"},
+ "deployment": {"type": "kubernetes", "namespace": "test-ns"},
+ "plugins": [
+ {"name": "TestPlugin", "port": 8000, "mtls_enabled": False, "repo": "https://github.com/test/plugin1.git"}
+ ],
+ })
+
+ with patch("mcpgateway.tools.builder.common.Path") as mock_path:
+ mock_path.return_value.__truediv__.return_value = template_dir
+ output_dir = tmp_path / "output"
+ output_dir.mkdir()
+
+ result = generate_plugin_config(config, output_dir)
+
+ # Verify template was called
+ assert mock_env.get_template.called
+ assert result == output_dir / "plugins-config.yaml"
+
+ @patch("mcpgateway.tools.builder.common.Environment")
+ def test_generate_plugin_config_with_overrides(self, mock_env_class, tmp_path):
+ """Test generating plugin config with plugin_overrides."""
+ # Setup mock template
+ mock_template = MagicMock()
+ mock_template.render.return_value = "plugins:\n - name: TestPlugin\n"
+ mock_env = MagicMock()
+ mock_env.get_template.return_value = mock_template
+ mock_env_class.return_value = mock_env
+
+ # Create fake template directory
+ template_dir = tmp_path / "templates"
+ template_dir.mkdir()
+
+ config = MCPStackConfig.model_validate({
+ "deployment": {"type": "compose"},
+ "gateway": {"image": "mcpgateway:latest"},
+ "plugins": [
+ {
+ "name": "TestPlugin",
+ "port": 8000,
+ "plugin_overrides": {
+ "priority": 10,
+ "mode": "enforce",
+ "tags": ["security"],
+ },
+ "repo": "https://github.com/test/plugin1.git"
+ }
+ ],
+ })
+
+ with patch("mcpgateway.tools.builder.common.Path") as mock_path:
+ mock_path.return_value.__truediv__.return_value = template_dir
+ output_dir = tmp_path / "output"
+ output_dir.mkdir()
+
+ result = generate_plugin_config(config, output_dir)
+ assert result == output_dir / "plugins-config.yaml"
+
+
+class TestCopyEnvTemplate:
+ """Test copy_env_template function."""
+
+ def test_copy_env_template_success(self, tmp_path):
+ """Test successful copying of .env.template."""
+ # Create plugin build dir with .env.template
+ plugin_dir = tmp_path / "plugin"
+ plugin_dir.mkdir()
+ template_file = plugin_dir / ".env.template"
+ template_file.write_text("TEST_VAR=value\n")
+
+ # Setup deploy dir
+ deploy_dir = tmp_path / "deploy"
+
+ with patch("mcpgateway.tools.builder.common.get_deploy_dir", return_value=deploy_dir):
+ copy_env_template("TestPlugin", plugin_dir)
+
+ target_file = deploy_dir / "env" / ".env.TestPlugin"
+ assert target_file.exists()
+ assert target_file.read_text() == "TEST_VAR=value\n"
+
+ def test_copy_env_template_no_template(self, tmp_path):
+ """Test when .env.template doesn't exist."""
+ plugin_dir = tmp_path / "plugin"
+ plugin_dir.mkdir()
+
+ deploy_dir = tmp_path / "deploy"
+
+ with patch("mcpgateway.tools.builder.common.get_deploy_dir", return_value=deploy_dir):
+ # Should not raise error, just skip
+ copy_env_template("TestPlugin", plugin_dir, verbose=True)
+
+ def test_copy_env_template_target_exists(self, tmp_path):
+ """Test when target file already exists."""
+ # Create plugin build dir with .env.template
+ plugin_dir = tmp_path / "plugin"
+ plugin_dir.mkdir()
+ template_file = plugin_dir / ".env.template"
+ template_file.write_text("NEW_VAR=newvalue\n")
+
+ # Setup deploy dir with existing target
+ deploy_dir = tmp_path / "deploy"
+ deploy_dir.mkdir()
+ env_dir = deploy_dir / "env"
+ env_dir.mkdir()
+ target_file = env_dir / ".env.TestPlugin"
+ target_file.write_text("OLD_VAR=oldvalue\n")
+
+ with patch("mcpgateway.tools.builder.common.get_deploy_dir", return_value=deploy_dir):
+ copy_env_template("TestPlugin", plugin_dir)
+
+ # Should not overwrite
+ assert target_file.read_text() == "OLD_VAR=oldvalue\n"
+
+
+class TestGetDockerComposeCommand:
+ """Test get_docker_compose_command function."""
+
+ @patch("mcpgateway.tools.builder.common.shutil.which")
+ @patch("mcpgateway.tools.builder.common.subprocess.run")
+ def test_docker_compose_plugin(self, mock_run, mock_which):
+ """Test detecting docker compose plugin."""
+ mock_which.return_value = "/usr/bin/docker"
+ mock_run.return_value = Mock(returncode=0)
+
+ result = get_docker_compose_command()
+ assert result == ["docker", "compose"]
+
+ @patch("mcpgateway.tools.builder.common.shutil.which")
+ @patch("mcpgateway.tools.builder.common.subprocess.run")
+ def test_docker_compose_standalone(self, mock_run, mock_which):
+ """Test detecting standalone docker-compose."""
+
+ def which_side_effect(cmd):
+ if cmd == "docker":
+ return "/usr/bin/docker"
+ elif cmd == "docker-compose":
+ return "/usr/bin/docker-compose"
+ return None
+
+ mock_which.side_effect = which_side_effect
+ mock_run.side_effect = subprocess.CalledProcessError(1, "cmd")
+
+ result = get_docker_compose_command()
+ assert result == ["docker-compose"]
+
+ @patch("mcpgateway.tools.builder.common.shutil.which")
+ def test_docker_compose_not_found(self, mock_which):
+ """Test when docker compose is not available."""
+ mock_which.return_value = None
+
+ with pytest.raises(RuntimeError, match="Docker Compose not found"):
+ get_docker_compose_command()
+
+
+class TestRunCompose:
+ """Test run_compose function."""
+
+ @patch("mcpgateway.tools.builder.common.get_docker_compose_command")
+ @patch("mcpgateway.tools.builder.common.subprocess.run")
+ def test_run_compose_success(self, mock_run, mock_get_cmd, tmp_path):
+ """Test successful compose command execution."""
+ compose_file = tmp_path / "docker-compose.yaml"
+ compose_file.write_text("services:\n test: {}\n")
+
+ mock_get_cmd.return_value = ["docker", "compose"]
+ mock_run.return_value = Mock(returncode=0, stdout="Success", stderr="")
+
+ result = run_compose(compose_file, ["ps"])
+ assert result.returncode == 0
+ mock_run.assert_called_once()
+
+ @patch("mcpgateway.tools.builder.common.get_docker_compose_command")
+ def test_run_compose_file_not_found(self, mock_get_cmd, tmp_path):
+ """Test run_compose with non-existent file."""
+ compose_file = tmp_path / "nonexistent.yaml"
+ mock_get_cmd.return_value = ["docker", "compose"]
+
+ with pytest.raises(FileNotFoundError, match="Compose file not found"):
+ run_compose(compose_file, ["ps"])
+
+ @patch("mcpgateway.tools.builder.common.get_docker_compose_command")
+ @patch("mcpgateway.tools.builder.common.subprocess.run")
+ def test_run_compose_command_failure(self, mock_run, mock_get_cmd, tmp_path):
+ """Test run_compose command failure."""
+ compose_file = tmp_path / "docker-compose.yaml"
+ compose_file.write_text("services:\n test: {}\n")
+
+ mock_get_cmd.return_value = ["docker", "compose"]
+ mock_run.side_effect = subprocess.CalledProcessError(
+ 1, "cmd", output="", stderr="Error"
+ )
+
+ with pytest.raises(RuntimeError, match="Docker Compose failed"):
+ run_compose(compose_file, ["up", "-d"])
+
+
+class TestDeployCompose:
+ """Test deploy_compose function."""
+
+ @patch("mcpgateway.tools.builder.common.run_compose")
+ def test_deploy_compose_success(self, mock_run, tmp_path):
+ """Test successful Docker Compose deployment."""
+ compose_file = tmp_path / "docker-compose.yaml"
+ mock_run.return_value = Mock(stdout="Deployed", stderr="")
+
+ deploy_compose(compose_file)
+ mock_run.assert_called_once_with(compose_file, ["up", "-d"], verbose=False)
+
+
+class TestVerifyCompose:
+ """Test verify_compose function."""
+
+ @patch("mcpgateway.tools.builder.common.run_compose")
+ def test_verify_compose(self, mock_run, tmp_path):
+ """Test verifying Docker Compose deployment."""
+ compose_file = tmp_path / "docker-compose.yaml"
+ mock_run.return_value = Mock(stdout="test-service running", stderr="")
+
+ result = verify_compose(compose_file)
+ assert "test-service running" in result
+ mock_run.assert_called_once_with(compose_file, ["ps"], verbose=False, check=False)
+
+
+class TestDestroyCompose:
+ """Test destroy_compose function."""
+
+ @patch("mcpgateway.tools.builder.common.run_compose")
+ def test_destroy_compose_success(self, mock_run, tmp_path):
+ """Test successful Docker Compose destruction."""
+ compose_file = tmp_path / "docker-compose.yaml"
+ compose_file.write_text("services:\n test: {}\n")
+ mock_run.return_value = Mock(stdout="Removed", stderr="")
+
+ destroy_compose(compose_file)
+ mock_run.assert_called_once_with(compose_file, ["down", "-v"], verbose=False)
+
+ def test_destroy_compose_file_not_found(self, tmp_path):
+ """Test destroying with non-existent compose file."""
+ compose_file = tmp_path / "nonexistent.yaml"
+
+ # Should not raise error, just print warning
+ destroy_compose(compose_file)
+
+
+class TestDeployKubernetes:
+ """Test deploy_kubernetes function."""
+
+ @patch("mcpgateway.tools.builder.common.shutil.which")
+ @patch("mcpgateway.tools.builder.common.subprocess.run")
+ def test_deploy_kubernetes_success(self, mock_run, mock_which, tmp_path):
+ """Test successful Kubernetes deployment."""
+ mock_which.return_value = "/usr/bin/kubectl"
+ mock_run.return_value = Mock(returncode=0, stdout="created", stderr="")
+
+ manifests_dir = tmp_path / "manifests"
+ manifests_dir.mkdir()
+ (manifests_dir / "gateway-deployment.yaml").write_text("apiVersion: v1\n")
+ (manifests_dir / "plugins-config.yaml").write_text("plugins: []\n")
+
+ deploy_kubernetes(manifests_dir)
+ assert mock_run.called
+
+ @patch("mcpgateway.tools.builder.common.shutil.which")
+ def test_deploy_kubernetes_kubectl_not_found(self, mock_which, tmp_path):
+ """Test deployment when kubectl is not available."""
+ mock_which.return_value = None
+ manifests_dir = tmp_path / "manifests"
+
+ with pytest.raises(RuntimeError, match="kubectl not found"):
+ deploy_kubernetes(manifests_dir)
+
+ @patch("mcpgateway.tools.builder.common.shutil.which")
+ @patch("mcpgateway.tools.builder.common.subprocess.run")
+ def test_deploy_kubernetes_with_certs(self, mock_run, mock_which, tmp_path):
+ """Test Kubernetes deployment with certificate secrets."""
+ mock_which.return_value = "/usr/bin/kubectl"
+ mock_run.return_value = Mock(returncode=0, stdout="created", stderr="")
+
+ manifests_dir = tmp_path / "manifests"
+ manifests_dir.mkdir()
+ (manifests_dir / "gateway-deployment.yaml").write_text("apiVersion: v1\n")
+ (manifests_dir / "cert-secrets.yaml").write_text("apiVersion: v1\n")
+
+ deploy_kubernetes(manifests_dir)
+ assert mock_run.called
+
+
+class TestVerifyKubernetes:
+ """Test verify_kubernetes function."""
+
+ @patch("mcpgateway.tools.builder.common.shutil.which")
+ @patch("mcpgateway.tools.builder.common.subprocess.run")
+ def test_verify_kubernetes_success(self, mock_run, mock_which):
+ """Test successful Kubernetes verification."""
+ mock_which.return_value = "/usr/bin/kubectl"
+ mock_run.return_value = Mock(
+ returncode=0, stdout="pod-1 Running\npod-2 Running", stderr=""
+ )
+
+ result = verify_kubernetes("test-ns")
+ assert "Running" in result
+ mock_run.assert_called_once()
+
+ @patch("mcpgateway.tools.builder.common.shutil.which")
+ def test_verify_kubernetes_kubectl_not_found(self, mock_which):
+ """Test verification when kubectl is not available."""
+ mock_which.return_value = None
+
+ with pytest.raises(RuntimeError, match="kubectl not found"):
+ verify_kubernetes("test-ns")
+
+ @patch("mcpgateway.tools.builder.common.shutil.which")
+ @patch("mcpgateway.tools.builder.common.subprocess.run")
+ def test_verify_kubernetes_with_wait(self, mock_run, mock_which):
+ """Test Kubernetes verification with wait."""
+ mock_which.return_value = "/usr/bin/kubectl"
+ mock_run.return_value = Mock(returncode=0, stdout="Ready", stderr="")
+
+ result = verify_kubernetes("test-ns", wait=True, timeout=60)
+ assert mock_run.call_count >= 1
+
+
+class TestDestroyKubernetes:
+ """Test destroy_kubernetes function."""
+
+ @patch("mcpgateway.tools.builder.common.shutil.which")
+ @patch("mcpgateway.tools.builder.common.subprocess.run")
+ def test_destroy_kubernetes_success(self, mock_run, mock_which, tmp_path):
+ """Test successful Kubernetes destruction."""
+ mock_which.return_value = "/usr/bin/kubectl"
+ mock_run.return_value = Mock(returncode=0, stdout="deleted", stderr="")
+
+ manifests_dir = tmp_path / "manifests"
+ manifests_dir.mkdir()
+ (manifests_dir / "gateway-deployment.yaml").write_text("apiVersion: v1\n")
+ (manifests_dir / "plugins-config.yaml").write_text("plugins: []\n")
+
+ destroy_kubernetes(manifests_dir)
+ assert mock_run.called
+
+ @patch("mcpgateway.tools.builder.common.shutil.which")
+ def test_destroy_kubernetes_kubectl_not_found(self, mock_which, tmp_path):
+ """Test destruction when kubectl is not available."""
+ mock_which.return_value = None
+ manifests_dir = tmp_path / "manifests"
+
+ with pytest.raises(RuntimeError, match="kubectl not found"):
+ destroy_kubernetes(manifests_dir)
+
+ def test_destroy_kubernetes_dir_not_found(self, tmp_path):
+ """Test destroying with non-existent manifests directory."""
+ manifests_dir = tmp_path / "nonexistent"
+
+ with patch("mcpgateway.tools.builder.common.shutil.which", return_value="/usr/bin/kubectl"):
+ # Should not raise error, just print warning
+ destroy_kubernetes(manifests_dir)
+
+
+class TestGenerateKubernetesManifests:
+ """Test generate_kubernetes_manifests function with real template rendering."""
+
+ def test_generate_manifests_gateway_only(self, tmp_path):
+ """Test generating Kubernetes manifests for gateway only."""
+ output_dir = tmp_path / "manifests"
+ output_dir.mkdir()
+
+ config = MCPStackConfig.model_validate({
+ "deployment": {"type": "kubernetes", "namespace": "test-ns"},
+ "gateway": {
+ "image": "mcpgateway:latest",
+ "port": 4444,
+ "mtls_enabled": False,
+ },
+ "plugins": [],
+ })
+
+ generate_kubernetes_manifests(config, output_dir)
+
+ # Verify gateway deployment was created
+ gateway_file = output_dir / "gateway-deployment.yaml"
+ assert gateway_file.exists()
+
+ # Parse and validate YAML
+ with open(gateway_file) as f:
+ docs = list(yaml.safe_load_all(f))
+
+ # Should have Deployment and Service
+ assert len(docs) >= 2
+
+ # Validate Deployment
+ deployment = next((d for d in docs if d.get("kind") == "Deployment"), None)
+ assert deployment is not None
+ assert deployment["metadata"]["name"] == "mcpgateway"
+ assert deployment["metadata"]["namespace"] == "test-ns"
+ assert deployment["spec"]["template"]["spec"]["containers"][0]["image"] == "mcpgateway:latest"
+
+ # Validate Service
+ service = next((d for d in docs if d.get("kind") == "Service"), None)
+ assert service is not None
+ assert service["metadata"]["name"] == "mcpgateway"
+ assert service["spec"]["ports"][0]["port"] == 4444
+
+ def test_generate_manifests_with_plugins(self, tmp_path):
+ """Test generating Kubernetes manifests with plugins."""
+ output_dir = tmp_path / "manifests"
+ output_dir.mkdir()
+
+ config = MCPStackConfig.model_validate({
+ "deployment": {"type": "kubernetes", "namespace": "mcp-test"},
+ "gateway": {
+ "image": "mcpgateway:latest",
+ "port": 4444,
+ "mtls_enabled": False,
+ },
+ "plugins": [
+ {
+ "name": "TestPlugin",
+ "image": "test-plugin:v1",
+ "port": 8000,
+ "mtls_enabled": False,
+ },
+ {
+ "name": "AnotherPlugin",
+ "image": "another-plugin:v2",
+ "port": 8001,
+ "mtls_enabled": False,
+ },
+ ],
+ })
+
+ generate_kubernetes_manifests(config, output_dir)
+
+ # Verify plugin deployments were created
+ plugin1_file = output_dir / "plugin-testplugin-deployment.yaml"
+ plugin2_file = output_dir / "plugin-anotherplugin-deployment.yaml"
+
+ assert plugin1_file.exists()
+ assert plugin2_file.exists()
+
+ # Parse and validate first plugin
+ with open(plugin1_file) as f:
+ docs = list(yaml.safe_load_all(f))
+
+ deployment = next((d for d in docs if d.get("kind") == "Deployment"), None)
+ assert deployment is not None
+ assert deployment["metadata"]["name"] == "mcp-plugin-testplugin"
+ assert deployment["metadata"]["namespace"] == "mcp-test"
+ assert deployment["spec"]["template"]["spec"]["containers"][0]["image"] == "test-plugin:v1"
+
+ def test_generate_manifests_with_mtls(self, tmp_path):
+ """Test generating Kubernetes manifests with mTLS enabled."""
+ # Change to tmp_path to ensure we have a valid working directory
+ original_dir = None
+ try:
+ original_dir = os.getcwd()
+ except (FileNotFoundError, OSError):
+ pass # Current directory doesn't exist
+
+ os.chdir(tmp_path)
+
+ try:
+ output_dir = tmp_path / "manifests"
+ output_dir.mkdir()
+
+ # Create fake certificate files in the actual location where the code looks
+ certs_dir = Path("certs/mcp")
+ ca_dir = certs_dir / "ca"
+ gateway_dir = certs_dir / "gateway"
+ plugin_dir = certs_dir / "plugins" / "SecurePlugin"
+
+ ca_dir.mkdir(parents=True, exist_ok=True)
+ gateway_dir.mkdir(parents=True, exist_ok=True)
+ plugin_dir.mkdir(parents=True, exist_ok=True)
+
+ (ca_dir / "ca.crt").write_bytes(b"fake-ca-cert")
+ (gateway_dir / "client.crt").write_bytes(b"fake-gateway-cert")
+ (gateway_dir / "client.key").write_bytes(b"fake-gateway-key")
+ (plugin_dir / "server.crt").write_bytes(b"fake-plugin-cert")
+ (plugin_dir / "server.key").write_bytes(b"fake-plugin-key")
+
+ config = MCPStackConfig.model_validate({
+ "deployment": {"type": "kubernetes", "namespace": "secure-ns"},
+ "gateway": {
+ "image": "mcpgateway:latest",
+ "port": 4444,
+ "mtls_enabled": True,
+ },
+ "plugins": [
+ {
+ "name": "SecurePlugin",
+ "image": "secure-plugin:v1",
+ "port": 8000,
+ "mtls_enabled": True,
+ }
+ ],
+ })
+
+ generate_kubernetes_manifests(config, output_dir)
+ finally:
+ # Clean up created certificate files
+ if Path("certs").exists():
+ shutil.rmtree("certs")
+
+ # Restore original directory if it exists
+ if original_dir and Path(original_dir).exists():
+ os.chdir(original_dir)
+
+ # Verify certificate secrets were created
+ cert_secrets_file = output_dir / "cert-secrets.yaml"
+ assert cert_secrets_file.exists()
+
+ # Parse and validate secrets
+ with open(cert_secrets_file) as f:
+ docs = list(yaml.safe_load_all(f))
+
+ # Should have secrets for CA, gateway, and plugin
+ secrets = [d for d in docs if d.get("kind") == "Secret"]
+ assert len(secrets) >= 2 # At least gateway and plugin secrets
+
+ def test_generate_manifests_with_infrastructure(self, tmp_path):
+ """Test generating Kubernetes manifests with PostgreSQL and Redis."""
+ output_dir = tmp_path / "manifests"
+ output_dir.mkdir()
+
+ config = MCPStackConfig.model_validate({
+ "deployment": {"type": "kubernetes", "namespace": "infra-ns"},
+ "gateway": {
+ "image": "mcpgateway:latest",
+ "port": 4444,
+ "mtls_enabled": False,
+ },
+ "plugins": [],
+ "infrastructure": {
+ "postgres": {
+ "enabled": True,
+ "image": "postgres:17",
+ "database": "testdb",
+ "user": "testuser",
+ "password": "testpass",
+ },
+ "redis": {
+ "enabled": True,
+ "image": "redis:alpine",
+ },
+ },
+ })
+
+ generate_kubernetes_manifests(config, output_dir)
+
+ # Verify infrastructure manifests were created
+ postgres_file = output_dir / "postgres-deployment.yaml"
+ redis_file = output_dir / "redis-deployment.yaml"
+
+ assert postgres_file.exists()
+ assert redis_file.exists()
+
+ # Parse and validate PostgreSQL
+ with open(postgres_file) as f:
+ docs = list(yaml.safe_load_all(f))
+
+ postgres_deployment = next((d for d in docs if d.get("kind") == "Deployment"), None)
+ assert postgres_deployment is not None
+ assert postgres_deployment["metadata"]["name"] == "postgres"
+ assert postgres_deployment["spec"]["template"]["spec"]["containers"][0]["image"] == "postgres:17"
+
+ # Parse and validate Redis
+ with open(redis_file) as f:
+ docs = list(yaml.safe_load_all(f))
+
+ redis_deployment = next((d for d in docs if d.get("kind") == "Deployment"), None)
+ assert redis_deployment is not None
+ assert redis_deployment["metadata"]["name"] == "redis"
+
+ # Verify gateway has database environment variables in Secret
+ gateway_file = output_dir / "gateway-deployment.yaml"
+ with open(gateway_file) as f:
+ docs = list(yaml.safe_load_all(f))
+
+ # Find the Secret containing environment variables
+ secret = next((d for d in docs if d.get("kind") == "Secret" and d["metadata"]["name"] == "mcpgateway-env"), None)
+ assert secret is not None
+ assert "stringData" in secret
+
+ string_data = secret["stringData"]
+
+ # Check DATABASE_URL is set
+ assert "DATABASE_URL" in string_data
+ assert "postgresql://" in string_data["DATABASE_URL"]
+ assert "testuser:testpass" in string_data["DATABASE_URL"]
+
+ # Check REDIS_URL is set
+ assert "REDIS_URL" in string_data
+ assert "redis://redis:6379" in string_data["REDIS_URL"]
+
+ # Verify deployment references the Secret via envFrom
+ gateway_deployment = next((d for d in docs if d.get("kind") == "Deployment"), None)
+ assert gateway_deployment is not None
+ env_from = gateway_deployment["spec"]["template"]["spec"]["containers"][0]["envFrom"]
+ assert any(ref.get("secretRef", {}).get("name") == "mcpgateway-env" for ref in env_from)
+
+
+class TestGenerateComposeManifests:
+ """Test generate_compose_manifests function with real template rendering."""
+
+ def test_generate_compose_gateway_only(self, tmp_path):
+ """Test generating Docker Compose manifest for gateway only."""
+ output_dir = tmp_path / "manifests"
+ output_dir.mkdir()
+
+ config = MCPStackConfig.model_validate({
+ "deployment": {"type": "compose", "project_name": "test-mcp"},
+ "gateway": {
+ "image": "mcpgateway:latest",
+ "port": 4444,
+ "host_port": 4444,
+ "mtls_enabled": False,
+ },
+ "plugins": [],
+ })
+
+ with patch("mcpgateway.tools.builder.common.Path.cwd", return_value=tmp_path):
+ generate_compose_manifests(config, output_dir)
+
+ # Verify compose file was created
+ compose_file = output_dir / "docker-compose.yaml"
+ assert compose_file.exists()
+
+ # Parse and validate
+ with open(compose_file) as f:
+ compose_data = yaml.safe_load(f)
+
+ assert "services" in compose_data
+ assert "mcpgateway" in compose_data["services"]
+
+ gateway = compose_data["services"]["mcpgateway"]
+ assert gateway["image"] == "mcpgateway:latest"
+ assert gateway["ports"] == ["4444:4444"]
+
+ def test_generate_compose_with_plugins(self, tmp_path):
+ """Test generating Docker Compose manifest with plugins."""
+ output_dir = tmp_path / "manifests"
+ output_dir.mkdir()
+
+ config = MCPStackConfig.model_validate({
+ "deployment": {"type": "compose", "project_name": "mcp-stack"},
+ "gateway": {
+ "image": "mcpgateway:latest",
+ "port": 4444,
+ "host_port": 4444,
+ "mtls_enabled": False,
+ },
+ "plugins": [
+ {
+ "name": "Plugin1",
+ "image": "plugin1:v1",
+ "port": 8000,
+ "expose_port": True,
+ "host_port": 8000,
+ "mtls_enabled": False,
+ },
+ {
+ "name": "Plugin2",
+ "image": "plugin2:v1",
+ "port": 8001,
+ "expose_port": False,
+ "mtls_enabled": False,
+ },
+ ],
+ })
+
+ with patch("mcpgateway.tools.builder.common.Path.cwd", return_value=tmp_path):
+ generate_compose_manifests(config, output_dir)
+
+ # Parse and validate
+ compose_file = output_dir / "docker-compose.yaml"
+ with open(compose_file) as f:
+ compose_data = yaml.safe_load(f)
+
+ # Verify plugins are in services
+ assert "plugin1" in compose_data["services"]
+ assert "plugin2" in compose_data["services"]
+
+ plugin1 = compose_data["services"]["plugin1"]
+ assert plugin1["image"] == "plugin1:v1"
+ assert "8000:8000" in plugin1["ports"] # Exposed
+
+ plugin2 = compose_data["services"]["plugin2"]
+ assert plugin2["image"] == "plugin2:v1"
+ # Plugin2 should not have host port mapping since expose_port is False
+
+ def test_generate_compose_with_mtls(self, tmp_path):
+ """Test generating Docker Compose manifest with mTLS certificates."""
+ output_dir = tmp_path / "manifests"
+ output_dir.mkdir()
+
+ # Create fake certificate structure
+ certs_dir = tmp_path / "certs" / "mcp"
+ ca_dir = certs_dir / "ca"
+ gateway_dir = certs_dir / "gateway"
+ plugin_dir = certs_dir / "plugins" / "SecurePlugin"
+
+ ca_dir.mkdir(parents=True)
+ gateway_dir.mkdir(parents=True)
+ plugin_dir.mkdir(parents=True)
+
+ (ca_dir / "ca.crt").write_text("fake-ca")
+ (gateway_dir / "client.crt").write_text("fake-cert")
+ (gateway_dir / "client.key").write_text("fake-key")
+ (plugin_dir / "server.crt").write_text("fake-plugin-cert")
+ (plugin_dir / "server.key").write_text("fake-plugin-key")
+
+ config = MCPStackConfig.model_validate({
+ "deployment": {"type": "compose"},
+ "gateway": {
+ "image": "mcpgateway:latest",
+ "port": 4444,
+ "host_port": 4444,
+ "mtls_enabled": True,
+ },
+ "plugins": [
+ {
+ "name": "SecurePlugin",
+ "image": "secure:v1",
+ "port": 8000,
+ "mtls_enabled": True,
+ }
+ ],
+ })
+
+ with patch("mcpgateway.tools.builder.common.Path.cwd", return_value=tmp_path):
+ generate_compose_manifests(config, output_dir)
+
+ # Parse and validate
+ compose_file = output_dir / "docker-compose.yaml"
+ with open(compose_file) as f:
+ compose_data = yaml.safe_load(f)
+
+ # Verify gateway has certificate volumes
+ gateway = compose_data["services"]["mcpgateway"]
+ assert "volumes" in gateway
+ # Should have volume mounts for certificates
+ volumes = gateway["volumes"]
+ assert any("certs" in str(v) or "ca.crt" in str(v) for v in volumes)
+
+ # Verify plugin has certificate volumes
+ plugin = compose_data["services"]["secureplugin"]
+ assert "volumes" in plugin
+
+ def test_generate_compose_with_env_files(self, tmp_path):
+ """Test generating Docker Compose manifest with environment files."""
+ output_dir = tmp_path / "manifests"
+ output_dir.mkdir()
+
+ # Create env files
+ deploy_dir = tmp_path / "deploy"
+ env_dir = deploy_dir / "env"
+ env_dir.mkdir(parents=True)
+ (env_dir / ".env.gateway").write_text("GATEWAY_VAR=value1\n")
+ (env_dir / ".env.TestPlugin").write_text("PLUGIN_VAR=value2\n")
+
+ config = MCPStackConfig.model_validate({
+ "deployment": {"type": "compose"},
+ "gateway": {
+ "image": "mcpgateway:latest",
+ "port": 4444,
+ "mtls_enabled": False,
+ },
+ "plugins": [
+ {
+ "name": "TestPlugin",
+ "image": "test:v1",
+ "port": 8000,
+ "mtls_enabled": False,
+ }
+ ],
+ })
+
+ with patch("mcpgateway.tools.builder.common.get_deploy_dir", return_value=deploy_dir):
+ with patch("mcpgateway.tools.builder.common.Path.cwd", return_value=tmp_path):
+ generate_compose_manifests(config, output_dir)
+
+ # Parse and validate
+ compose_file = output_dir / "docker-compose.yaml"
+ with open(compose_file) as f:
+ compose_data = yaml.safe_load(f)
+
+ # Verify env_file is set
+ gateway = compose_data["services"]["mcpgateway"]
+ assert "env_file" in gateway
+
+ plugin = compose_data["services"]["testplugin"]
+ assert "env_file" in plugin
+
+ def test_generate_compose_with_infrastructure(self, tmp_path):
+ """Test generating Docker Compose manifest with PostgreSQL and Redis.
+
+ Note: Currently the template uses hardcoded infrastructure images/config.
+ Infrastructure customization is not yet implemented for Docker Compose.
+ """
+ output_dir = tmp_path / "manifests"
+ output_dir.mkdir()
+
+ config = MCPStackConfig.model_validate({
+ "deployment": {"type": "compose"},
+ "gateway": {
+ "image": "mcpgateway:latest",
+ "port": 4444,
+ "mtls_enabled": False,
+ },
+ "plugins": [],
+ "infrastructure": {
+ "postgres": {
+ "enabled": True,
+ "image": "postgres:17",
+ "database": "mcpdb",
+ "user": "mcpuser",
+ "password": "secret123",
+ },
+ "redis": {
+ "enabled": True,
+ "image": "redis:7-alpine",
+ },
+ },
+ })
+
+ with patch("mcpgateway.tools.builder.common.Path.cwd", return_value=tmp_path):
+ generate_compose_manifests(config, output_dir)
+
+ # Parse and validate
+ compose_file = output_dir / "docker-compose.yaml"
+ with open(compose_file) as f:
+ compose_data = yaml.safe_load(f)
+
+ # Verify PostgreSQL service exists
+ # Note: Template uses hardcoded "postgres:17" and "mcp" database
+ assert "postgres" in compose_data["services"]
+ postgres = compose_data["services"]["postgres"]
+ assert postgres["image"] == "postgres:17" # Hardcoded in template
+ assert "environment" in postgres
+
+ # Verify database name is "mcp" (hardcoded default, not "mcpdb" from config)
+ env = postgres["environment"]
+ if isinstance(env, list):
+ assert any("POSTGRES_DB=mcp" in str(e) for e in env)
+ else:
+ assert env["POSTGRES_DB"] == "mcp"
+
+ # Verify Redis service exists
+ # Note: Template uses hardcoded "redis:latest"
+ assert "redis" in compose_data["services"]
+ redis = compose_data["services"]["redis"]
+ assert redis["image"] == "redis:latest" # Hardcoded in template
+
+ # Verify gateway has database environment variables
+ gateway = compose_data["services"]["mcpgateway"]
+ assert "environment" in gateway
+ env = gateway["environment"]
+
+ # Should have DATABASE_URL with default values
+ if isinstance(env, list):
+ db_url = next((e for e in env if "DATABASE_URL" in str(e)), None)
+ else:
+ db_url = env.get("DATABASE_URL")
+ assert db_url is not None
+ assert "postgresql://" in str(db_url)
diff --git a/tests/unit/mcpgateway/tools/builder/test_dagger_deploy.py b/tests/unit/mcpgateway/tools/builder/test_dagger_deploy.py
new file mode 100644
index 000000000..bc0f8ee87
--- /dev/null
+++ b/tests/unit/mcpgateway/tools/builder/test_dagger_deploy.py
@@ -0,0 +1,451 @@
+# -*- coding: utf-8 -*-
+"""Location: ./tests/unit/mcpgateway/tools/builder/test_dagger_deploy.py
+Copyright 2025
+SPDX-License-Identifier: Apache-2.0
+Authors: Teryl Taylor
+
+Unit tests for Dagger-based MCP Stack deployment.
+
+These tests are skipped if Dagger is not installed.
+"""
+
+# Standard
+from pathlib import Path
+from unittest.mock import AsyncMock, MagicMock, Mock, patch
+
+# Third-Party
+import pytest
+
+# Check if dagger is available
+try:
+ import dagger
+
+ DAGGER_AVAILABLE = True
+except ImportError:
+ DAGGER_AVAILABLE = False
+
+# Skip all tests in this module if Dagger is not available
+pytestmark = pytest.mark.skipif(not DAGGER_AVAILABLE, reason="Dagger not installed")
+
+# Conditional import to avoid errors when Dagger is not installed
+if DAGGER_AVAILABLE:
+ # First-Party
+ from mcpgateway.tools.builder.dagger_deploy import MCPStackDagger
+else:
+ # Create a dummy class to avoid NameError in decorators
+ MCPStackDagger = type("MCPStackDagger", (), {})
+
+
+@pytest.fixture
+def mock_dagger_connection(tmp_path):
+ """Fixture to mock Dagger connection and dag."""
+ with patch("mcpgateway.tools.builder.dagger_deploy.dagger.connection") as mock_conn:
+ with patch("mcpgateway.tools.builder.dagger_deploy.dag") as mock_dag:
+ with patch("mcpgateway.tools.builder.dagger_deploy.Path.cwd") as mock_cwd:
+ # Mock Path.cwd() to return a valid temporary directory
+ mock_cwd.return_value = tmp_path
+
+ # Mock the async context manager
+ mock_conn_ctx = AsyncMock()
+ mock_conn.return_value = mock_conn_ctx
+ mock_conn_ctx.__aenter__.return_value = None
+ mock_conn_ctx.__aexit__.return_value = None
+
+ # Setup dag mocks (use regular Mock for synchronous Dagger API)
+ mock_git = Mock()
+ mock_tree = Mock()
+ mock_container = Mock()
+ mock_container.export_image = AsyncMock() # Only export_image is async
+ mock_host = Mock()
+ mock_dir = Mock()
+ mock_dir.export = AsyncMock() # export is async
+
+ # Set up the method chain for git operations
+ mock_dag.git.return_value = mock_git
+ mock_git.branch.return_value = mock_git
+ mock_git.tree.return_value = mock_tree
+ mock_tree.docker_build.return_value = mock_container
+
+ # Set up container operations
+ mock_dag.container.return_value = mock_container
+ mock_container.from_.return_value = mock_container
+ mock_container.with_exec.return_value = mock_container
+ mock_container.with_mounted_directory.return_value = mock_container
+ mock_container.with_workdir.return_value = mock_container
+ mock_container.directory.return_value = mock_dir
+
+ # Set up host operations
+ mock_dag.host.return_value = mock_host
+ mock_host.directory.return_value = mock_dir
+
+ yield {"connection": mock_conn, "dag": mock_dag, "container": mock_container}
+
+
+class TestMCPStackDaggerInit:
+ """Test MCPStackDagger initialization."""
+
+ def test_init_default(self):
+ """Test default initialization."""
+ stack = MCPStackDagger()
+ assert stack.verbose is False
+
+ def test_init_verbose(self):
+ """Test initialization with verbose flag."""
+ stack = MCPStackDagger(verbose=True)
+ assert stack.verbose is True
+
+
+class TestMCPStackDaggerBuild:
+ """Test MCPStackDagger build method."""
+
+ @patch("mcpgateway.tools.builder.dagger_deploy.get_deploy_dir")
+ @patch("mcpgateway.tools.builder.dagger_deploy.load_config")
+ @pytest.mark.asyncio
+ async def test_build_gateway_only(self, mock_load, mock_get_deploy, mock_dagger_connection, tmp_path):
+ """Test building gateway container with Dagger."""
+ mock_load.return_value = {
+ "gateway": {"repo": "https://github.com/test/gateway.git", "ref": "main"},
+ "plugins": [],
+ }
+ mock_get_deploy.return_value = tmp_path / "deploy"
+
+ stack = MCPStackDagger()
+ await stack.build("test-config.yaml")
+
+ mock_load.assert_called_once_with("test-config.yaml")
+
+ @patch("mcpgateway.tools.builder.dagger_deploy.get_deploy_dir")
+ @patch("mcpgateway.tools.builder.dagger_deploy.load_config")
+ @pytest.mark.asyncio
+ async def test_build_plugins_only(self, mock_load, mock_get_deploy, mock_dagger_connection, tmp_path):
+ """Test building only plugins."""
+ mock_load.return_value = {
+ "gateway": {"repo": "https://github.com/test/gateway.git"},
+ "plugins": [
+ {"name": "Plugin1", "repo": "https://github.com/test/plugin1.git"}
+ ],
+ }
+ mock_get_deploy.return_value = tmp_path / "deploy"
+
+ stack = MCPStackDagger()
+ await stack.build("test-config.yaml", plugins_only=True)
+
+ mock_load.assert_called_once()
+
+ @patch("mcpgateway.tools.builder.dagger_deploy.get_deploy_dir")
+ @patch("mcpgateway.tools.builder.dagger_deploy.load_config")
+ @pytest.mark.asyncio
+ async def test_build_specific_plugins(self, mock_load, mock_get_deploy, mock_dagger_connection, tmp_path):
+ """Test building specific plugins only."""
+ mock_load.return_value = {
+ "gateway": {"image": "mcpgateway:latest"},
+ "plugins": [
+ {"name": "Plugin1", "repo": "https://github.com/test/plugin1.git"},
+ {"name": "Plugin2", "repo": "https://github.com/test/plugin2.git"},
+ ],
+ }
+ mock_get_deploy.return_value = tmp_path / "deploy"
+
+ stack = MCPStackDagger()
+ await stack.build("test-config.yaml", specific_plugins=["Plugin1"])
+
+ mock_load.assert_called_once()
+
+ @patch("mcpgateway.tools.builder.dagger_deploy.get_deploy_dir")
+ @patch("mcpgateway.tools.builder.dagger_deploy.load_config")
+ @pytest.mark.asyncio
+ async def test_build_no_plugins(self, mock_load, mock_get_deploy, mock_dagger_connection, tmp_path):
+ """Test building when no plugins are defined."""
+ mock_load.return_value = {
+ "gateway": {"image": "mcpgateway:latest"},
+ "plugins": [],
+ }
+ mock_get_deploy.return_value = tmp_path / "deploy"
+
+ stack = MCPStackDagger()
+ # Should not raise error
+ await stack.build("test-config.yaml", plugins_only=True)
+
+
+class TestMCPStackDaggerGenerateCertificates:
+ """Test MCPStackDagger generate_certificates method."""
+
+ @patch("mcpgateway.tools.builder.dagger_deploy.get_deploy_dir")
+ @patch("mcpgateway.tools.builder.dagger_deploy.load_config")
+ @pytest.mark.asyncio
+ async def test_generate_certificates(self, mock_load, mock_get_deploy, mock_dagger_connection, tmp_path):
+ """Test certificate generation with Dagger."""
+ mock_load.return_value = {
+ "plugins": [
+ {"name": "Plugin1"},
+ {"name": "Plugin2"},
+ ]
+ }
+ mock_get_deploy.return_value = tmp_path / "deploy"
+
+ stack = MCPStackDagger()
+ await stack.generate_certificates("test-config.yaml")
+
+ mock_load.assert_called_once()
+
+
+class TestMCPStackDaggerDeploy:
+ """Test MCPStackDagger deploy method."""
+
+ @patch("mcpgateway.tools.builder.dagger_deploy.get_deploy_dir")
+ @patch("mcpgateway.tools.builder.dagger_deploy.load_config")
+ @patch.object(MCPStackDagger, "build")
+ @patch.object(MCPStackDagger, "generate_certificates")
+ @patch.object(MCPStackDagger, "generate_manifests")
+ @patch.object(MCPStackDagger, "_deploy_compose")
+ @pytest.mark.asyncio
+ async def test_deploy_compose_full(
+ self, mock_deploy, mock_gen_manifests, mock_certs, mock_build, mock_load, mock_get_deploy, mock_dagger_connection, tmp_path
+ ):
+ """Test full Docker Compose deployment with Dagger."""
+ mock_load.return_value = {
+ "deployment": {"type": "compose", "project_name": "test"},
+ "gateway": {"repo": "https://github.com/test/gateway.git", "mtls_enabled": True},
+ "plugins": [],
+ }
+ mock_gen_manifests.return_value = Path("/tmp/manifests")
+ mock_get_deploy.return_value = tmp_path / "deploy"
+
+ stack = MCPStackDagger()
+ await stack.deploy("test-config.yaml")
+
+ mock_build.assert_called_once()
+ mock_certs.assert_called_once()
+ mock_gen_manifests.assert_called_once()
+ mock_deploy.assert_called_once()
+
+ @patch("mcpgateway.tools.builder.dagger_deploy.load_config")
+ @patch.object(MCPStackDagger, "generate_manifests")
+ @pytest.mark.asyncio
+ async def test_deploy_dry_run(self, mock_gen_manifests, mock_load, mock_dagger_connection, tmp_path):
+ """Test dry-run deployment with Dagger."""
+ mock_load.return_value = {
+ "deployment": {"type": "compose"},
+ "gateway": {"image": "mcpgateway:latest"},
+ "plugins": [],
+ }
+ mock_gen_manifests.return_value = Path("/tmp/manifests")
+
+ stack = MCPStackDagger()
+ await stack.deploy("test-config.yaml", dry_run=True, skip_build=True, skip_certs=True)
+
+ mock_gen_manifests.assert_called_once()
+
+ @patch("mcpgateway.tools.builder.dagger_deploy.get_deploy_dir")
+ @patch("mcpgateway.tools.builder.dagger_deploy.load_config")
+ @patch.object(MCPStackDagger, "generate_manifests")
+ @patch.object(MCPStackDagger, "_deploy_kubernetes")
+ @pytest.mark.asyncio
+ async def test_deploy_kubernetes(self, mock_deploy, mock_gen_manifests, mock_load, mock_get_deploy, mock_dagger_connection, tmp_path):
+ """Test Kubernetes deployment with Dagger."""
+ mock_load.return_value = {
+ "deployment": {"type": "kubernetes", "namespace": "test-ns"},
+ "gateway": {"image": "mcpgateway:latest", "mtls_enabled": False},
+ "plugins": [],
+ }
+ mock_gen_manifests.return_value = Path("/tmp/manifests")
+ mock_get_deploy.return_value = tmp_path / "deploy"
+
+ stack = MCPStackDagger()
+ await stack.deploy("test-config.yaml", skip_build=True, skip_certs=True)
+
+ mock_deploy.assert_called_once()
+
+
+class TestMCPStackDaggerVerify:
+ """Test MCPStackDagger verify method."""
+
+ @patch("mcpgateway.tools.builder.dagger_deploy.get_deploy_dir")
+ @patch("mcpgateway.tools.builder.dagger_deploy.load_config")
+ @patch.object(MCPStackDagger, "_verify_kubernetes")
+ @pytest.mark.asyncio
+ async def test_verify_kubernetes(self, mock_verify_kubernetes, mock_load, mock_get_deploy, mock_dagger_connection, tmp_path):
+ """Test Kubernetes deployment verification with Dagger."""
+ mock_load.return_value = {
+ "deployment": {"type": "kubernetes", "namespace": "test-ns"}
+ }
+ mock_get_deploy.return_value = tmp_path / "deploy"
+
+ stack = MCPStackDagger()
+ await stack.verify("test-config.yaml")
+
+ mock_verify_kubernetes.assert_called_once()
+
+ @patch("mcpgateway.tools.builder.dagger_deploy.get_deploy_dir")
+ @patch("mcpgateway.tools.builder.dagger_deploy.load_config")
+ @patch.object(MCPStackDagger, "_verify_compose")
+ @pytest.mark.asyncio
+ async def test_verify_compose(self, mock_verify_compose, mock_load, mock_get_deploy, mock_dagger_connection, tmp_path):
+ """Test Docker Compose deployment verification with Dagger."""
+ mock_load.return_value = {"deployment": {"type": "compose"}}
+ mock_get_deploy.return_value = tmp_path / "deploy"
+
+ stack = MCPStackDagger()
+ await stack.verify("test-config.yaml")
+
+ mock_verify_compose.assert_called_once()
+
+
+class TestMCPStackDaggerDestroy:
+ """Test MCPStackDagger destroy method."""
+
+ @patch("mcpgateway.tools.builder.dagger_deploy.get_deploy_dir")
+ @patch("mcpgateway.tools.builder.dagger_deploy.load_config")
+ @patch.object(MCPStackDagger, "_destroy_kubernetes")
+ @pytest.mark.asyncio
+ async def test_destroy_kubernetes(self, mock_destroy_kubernetes, mock_load, mock_get_deploy, mock_dagger_connection, tmp_path):
+ """Test Kubernetes deployment destruction with Dagger."""
+ mock_load.return_value = {"deployment": {"type": "kubernetes"}}
+ mock_get_deploy.return_value = tmp_path / "deploy"
+
+ stack = MCPStackDagger()
+ await stack.destroy("test-config.yaml")
+
+ mock_destroy_kubernetes.assert_called_once()
+
+ @patch("mcpgateway.tools.builder.dagger_deploy.get_deploy_dir")
+ @patch("mcpgateway.tools.builder.dagger_deploy.load_config")
+ @patch.object(MCPStackDagger, "_destroy_compose")
+ @pytest.mark.asyncio
+ async def test_destroy_compose(self, mock_destroy_compose, mock_load, mock_get_deploy, mock_dagger_connection, tmp_path):
+ """Test Docker Compose deployment destruction with Dagger."""
+ mock_load.return_value = {"deployment": {"type": "compose"}}
+ mock_get_deploy.return_value = tmp_path / "deploy"
+
+ stack = MCPStackDagger()
+ await stack.destroy("test-config.yaml")
+
+ mock_destroy_compose.assert_called_once()
+
+
+class TestMCPStackDaggerGenerateManifests:
+ """Test MCPStackDagger generate_manifests method."""
+
+ @patch("mcpgateway.tools.builder.dagger_deploy.load_config")
+ @patch("mcpgateway.tools.builder.dagger_deploy.generate_plugin_config")
+ @patch("mcpgateway.tools.builder.dagger_deploy.generate_kubernetes_manifests")
+ def test_generate_manifests_kubernetes(
+ self, mock_k8s_gen, mock_plugin_gen, mock_load, tmp_path
+ ):
+ """Test generating Kubernetes manifests with Dagger."""
+ mock_load.return_value = {
+ "deployment": {"type": "kubernetes", "namespace": "test-ns"},
+ "gateway": {"image": "mcpgateway:latest"},
+ "plugins": [],
+ }
+
+ stack = MCPStackDagger()
+ result = stack.generate_manifests("test-config.yaml", output_dir=str(tmp_path))
+
+ mock_plugin_gen.assert_called_once()
+ mock_k8s_gen.assert_called_once()
+ assert result == tmp_path
+
+ @patch("mcpgateway.tools.builder.dagger_deploy.load_config")
+ @patch("mcpgateway.tools.builder.dagger_deploy.generate_plugin_config")
+ @patch("mcpgateway.tools.builder.dagger_deploy.generate_compose_manifests")
+ def test_generate_manifests_compose(
+ self, mock_compose_gen, mock_plugin_gen, mock_load, tmp_path
+ ):
+ """Test generating Docker Compose manifests with Dagger."""
+ mock_load.return_value = {
+ "deployment": {"type": "compose"},
+ "gateway": {"image": "mcpgateway:latest"},
+ "plugins": [],
+ }
+
+ stack = MCPStackDagger()
+ result = stack.generate_manifests("test-config.yaml", output_dir=str(tmp_path))
+
+ mock_plugin_gen.assert_called_once()
+ mock_compose_gen.assert_called_once()
+ assert result == tmp_path
+
+ @patch("mcpgateway.tools.builder.dagger_deploy.get_deploy_dir")
+ @patch("mcpgateway.tools.builder.dagger_deploy.load_config")
+ def test_generate_manifests_invalid_type(self, mock_load, mock_get_deploy, tmp_path):
+ """Test generating manifests with invalid deployment type."""
+ mock_load.return_value = {
+ "deployment": {"type": "invalid"},
+ "gateway": {"image": "mcpgateway:latest"},
+ }
+ mock_get_deploy.return_value = tmp_path / "deploy"
+
+ stack = MCPStackDagger()
+ with pytest.raises(ValueError, match="Unsupported deployment type"):
+ stack.generate_manifests("test-config.yaml")
+
+
+class TestMCPStackDaggerBuildComponent:
+ """Test MCPStackDagger _build_component_with_dagger method."""
+
+ @pytest.mark.asyncio
+ async def test_build_component_basic(self, mock_dagger_connection, tmp_path):
+ """Test basic component build with Dagger."""
+ component = {
+ "repo": "https://github.com/test/component.git",
+ "ref": "main",
+ "context": ".",
+ "containerfile": "Containerfile",
+ "image": "test-component:latest",
+ }
+
+ stack = MCPStackDagger()
+ await stack._build_component_with_dagger(component, "test-component")
+
+ # Verify Dagger operations were called (using mocks from fixture)
+ mock_dag = mock_dagger_connection["dag"]
+ mock_dag.git.assert_called_once()
+
+ # Get the mock git object
+ mock_git = mock_dag.git.return_value
+ mock_git.branch.assert_called_with("main")
+
+ # Get the mock tree object
+ mock_tree = mock_git.tree.return_value
+ mock_tree.docker_build.assert_called_once()
+
+ @pytest.mark.asyncio
+ async def test_build_component_with_target(self, mock_dagger_connection, tmp_path):
+ """Test component build with multi-stage target."""
+ component = {
+ "repo": "https://github.com/test/component.git",
+ "ref": "main",
+ "context": ".",
+ "image": "test:latest",
+ "target": "production",
+ }
+
+ stack = MCPStackDagger()
+ await stack._build_component_with_dagger(component, "test")
+
+ # Verify docker_build was called with target parameter
+ mock_dag = mock_dagger_connection["dag"]
+ mock_git = mock_dag.git.return_value
+ mock_tree = mock_git.tree.return_value
+ call_args = mock_tree.docker_build.call_args
+ assert "target" in call_args[1] or call_args[0]
+
+ @pytest.mark.asyncio
+ async def test_build_component_with_env_vars(self, mock_dagger_connection, tmp_path):
+ """Test component build with environment variables."""
+ component = {
+ "repo": "https://github.com/test/component.git",
+ "ref": "main",
+ "image": "test:latest",
+ "env_vars": {"BUILD_ENV": "production", "VERSION": "1.0"},
+ }
+
+ stack = MCPStackDagger()
+ await stack._build_component_with_dagger(component, "test")
+
+ # Verify docker_build was called
+ mock_dag = mock_dagger_connection["dag"]
+ mock_git = mock_dag.git.return_value
+ mock_tree = mock_git.tree.return_value
+ mock_tree.docker_build.assert_called_once()
diff --git a/tests/unit/mcpgateway/tools/builder/test_python_deploy.py b/tests/unit/mcpgateway/tools/builder/test_python_deploy.py
new file mode 100644
index 000000000..1f46d1601
--- /dev/null
+++ b/tests/unit/mcpgateway/tools/builder/test_python_deploy.py
@@ -0,0 +1,294 @@
+# -*- coding: utf-8 -*-
+"""Location: ./tests/unit/mcpgateway/tools/builder/test_python_deploy.py
+Copyright 2025
+SPDX-License-Identifier: Apache-2.0
+Authors: Teryl Taylor
+
+Unit tests for plain Python MCP Stack deployment.
+"""
+
+# Standard
+from pathlib import Path
+import re
+import subprocess
+from unittest.mock import MagicMock, Mock, patch, call
+
+# Third-Party
+import pytest
+from pydantic import ValidationError
+
+# First-Party
+from mcpgateway.tools.builder.python_deploy import MCPStackPython
+from mcpgateway.tools.builder.schema import BuildableConfig, MCPStackConfig
+
+
+class TestMCPStackPython:
+ """Test MCPStackPython deployment class."""
+
+ @patch("mcpgateway.tools.builder.python_deploy.shutil.which")
+ @patch("mcpgateway.tools.builder.python_deploy.load_config")
+ @pytest.mark.asyncio
+ async def test_build_no_plugins(self, mock_load, mock_which):
+ """Test building when no plugins are defined."""
+ mock_which.return_value = "/usr/bin/docker"
+ mock_load.return_value = MCPStackConfig.model_validate({
+ "deployment": {"type": "compose"},
+ "gateway": {"image": "mcpgateway:latest"},
+ "plugins": [],
+ })
+
+ stack = MCPStackPython()
+ # Should not raise error
+ await stack.build("test-config.yaml", plugins_only=True)
+
+ @patch("mcpgateway.tools.builder.python_deploy.shutil.which")
+ @patch("mcpgateway.tools.builder.python_deploy.load_config")
+ @patch("mcpgateway.tools.builder.python_deploy.shutil.which", return_value="/usr/bin/make")
+ @patch.object(MCPStackPython, "_run_command")
+ @pytest.mark.asyncio
+ async def test_generate_certificates(self, mock_run, mock_make, mock_load, mock_which_runtime):
+ """Test certificate generation."""
+ mock_which_runtime.return_value = "/usr/bin/docker"
+ mock_load.return_value = MCPStackConfig.model_validate({
+ "gateway": {"image": "mcpgateway:latest"},
+ "deployment": {"type": "compose"},
+ "plugins": [
+ {"name": "Plugin1", "repo": "https://github.com/test/plugin1.git"},
+ {"name": "Plugin2", "repo": "https://github.com/test/plugin2.git"},
+ ]
+ })
+
+ stack = MCPStackPython()
+ await stack.generate_certificates("test-config.yaml")
+
+ # Should call make commands for CA, gateway, and each plugin
+ assert mock_run.call_count == 4 # CA + gateway + 2 plugins
+
+ @patch("mcpgateway.tools.builder.python_deploy.shutil.which")
+ @patch("mcpgateway.tools.builder.python_deploy.load_config")
+ @patch.object(MCPStackPython, "build")
+ @patch.object(MCPStackPython, "generate_certificates")
+ @patch.object(MCPStackPython, "generate_manifests")
+ @patch.object(MCPStackPython, "_deploy_compose")
+ @pytest.mark.asyncio
+ async def test_deploy_compose(
+ self, mock_deploy, mock_gen_manifests, mock_certs, mock_build, mock_load, mock_which
+ ):
+ """Test full compose deployment."""
+ mock_which.return_value = "/usr/bin/docker"
+ mock_load.return_value = MCPStackConfig.model_validate({
+ "deployment": {"type": "compose", "project_name": "test"},
+ "gateway": {"image": "mcpgateway:latest", "mtls_enabled": True},
+ "plugins": [],
+ })
+ mock_gen_manifests.return_value = Path("/tmp/manifests")
+
+ stack = MCPStackPython()
+ await stack.deploy("test-config.yaml")
+
+ mock_build.assert_called_once()
+ mock_certs.assert_called_once()
+ mock_gen_manifests.assert_called_once()
+ mock_deploy.assert_called_once()
+
+ @patch("mcpgateway.tools.builder.python_deploy.shutil.which")
+ @patch("mcpgateway.tools.builder.python_deploy.load_config")
+ @patch.object(MCPStackPython, "build")
+ @patch.object(MCPStackPython, "generate_manifests")
+ @pytest.mark.asyncio
+ async def test_deploy_dry_run(self, mock_gen_manifests, mock_build, mock_load, mock_which):
+ """Test dry-run deployment."""
+ mock_which.return_value = "/usr/bin/docker"
+ mock_load.return_value = MCPStackConfig.model_validate({
+ "deployment": {"type": "compose"},
+ "gateway": {"image": "mcpgateway:latest"},
+ "plugins": [],
+ })
+ mock_gen_manifests.return_value = Path("/tmp/manifests")
+
+ stack = MCPStackPython()
+ await stack.deploy("test-config.yaml", dry_run=True, skip_build=True, skip_certs=True)
+
+ mock_gen_manifests.assert_called_once()
+ # Should not call actual deployment
+
+ @patch("mcpgateway.tools.builder.python_deploy.shutil.which")
+ @patch("mcpgateway.tools.builder.python_deploy.load_config")
+ @patch.object(MCPStackPython, "generate_manifests")
+ @pytest.mark.asyncio
+ async def test_deploy_skip_certs_mtls_disabled(self, mock_gen_manifests, mock_load, mock_which):
+ """Test deployment with mTLS disabled."""
+ mock_which.return_value = "/usr/bin/docker"
+ mock_load.return_value = MCPStackConfig.model_validate({
+ "deployment": {"type": "compose"},
+ "gateway": {"image": "mcpgateway:latest", "mtls_enabled": False},
+ "plugins": [],
+ })
+ mock_gen_manifests.return_value = Path("/tmp/manifests")
+
+ stack = MCPStackPython()
+ with patch.object(stack, "generate_certificates") as mock_certs:
+ await stack.deploy("test-config.yaml", dry_run=True, skip_build=True)
+
+ # Certificates should not be generated
+ mock_certs.assert_not_called()
+
+ @patch("mcpgateway.tools.builder.python_deploy.shutil.which")
+ @patch("mcpgateway.tools.builder.python_deploy.load_config")
+ @patch.object(MCPStackPython, "_verify_kubernetes")
+ @pytest.mark.asyncio
+ async def test_verify_kubernetes(self, mock_verify, mock_load, mock_which):
+ """Test Kubernetes deployment verification."""
+ mock_which.return_value = "/usr/bin/docker"
+ mock_load.return_value = MCPStackConfig.model_validate({
+ "gateway": {"image": "mcpgateway:latest", "mtls_enabled": False},
+ "deployment": {"type": "kubernetes", "namespace": "test-ns"}
+ })
+
+ stack = MCPStackPython()
+ await stack.verify("test-config.yaml")
+
+ mock_verify.assert_called_once()
+
+ @patch("mcpgateway.tools.builder.python_deploy.shutil.which")
+ @patch("mcpgateway.tools.builder.python_deploy.load_config")
+ @patch.object(MCPStackPython, "_verify_compose")
+ @pytest.mark.asyncio
+ async def test_verify_compose(self, mock_verify, mock_load, mock_which):
+ """Test Docker Compose deployment verification."""
+ mock_which.return_value = "/usr/bin/docker"
+ mock_load.return_value = MCPStackConfig.model_validate({"deployment": {"type": "compose"},
+ "gateway": {"image": "mcpgateway:latest", "mtls_enabled": False},
+ })
+
+ stack = MCPStackPython()
+ await stack.verify("test-config.yaml")
+
+ mock_verify.assert_called_once()
+
+ @patch("mcpgateway.tools.builder.python_deploy.shutil.which")
+ @patch("mcpgateway.tools.builder.python_deploy.load_config")
+ @patch.object(MCPStackPython, "_destroy_kubernetes")
+ @pytest.mark.asyncio
+ async def test_destroy_kubernetes(self, mock_destroy, mock_load, mock_which):
+ """Test Kubernetes deployment destruction."""
+ mock_which.return_value = "/usr/bin/docker"
+ mock_load.return_value = MCPStackConfig.model_validate({"deployment": {"type": "kubernetes"},
+ "gateway": {"image": "mcpgateway:latest", "mtls_enabled": False},
+ })
+
+ stack = MCPStackPython()
+ await stack.destroy("test-config.yaml")
+
+ mock_destroy.assert_called_once()
+
+ @patch("mcpgateway.tools.builder.python_deploy.shutil.which")
+ @patch("mcpgateway.tools.builder.python_deploy.load_config")
+ @patch.object(MCPStackPython, "_destroy_compose")
+ @pytest.mark.asyncio
+ async def test_destroy_compose(self, mock_destroy, mock_load, mock_which):
+ """Test Docker Compose deployment destruction."""
+ mock_which.return_value = "/usr/bin/docker"
+ mock_load.return_value = MCPStackConfig.model_validate({"deployment": {"type": "compose"},
+ "gateway": {"image": "mcpgateway:latest", "mtls_enabled": False},
+ })
+
+ stack = MCPStackPython()
+ await stack.destroy("test-config.yaml")
+
+ mock_destroy.assert_called_once()
+
+ @patch("mcpgateway.tools.builder.python_deploy.shutil.which")
+ @patch("mcpgateway.tools.builder.python_deploy.load_config")
+ @patch("mcpgateway.tools.builder.python_deploy.generate_plugin_config")
+ @patch("mcpgateway.tools.builder.python_deploy.generate_kubernetes_manifests")
+ def test_generate_manifests_kubernetes(
+ self, mock_k8s_gen, mock_plugin_gen, mock_load, mock_which, tmp_path
+ ):
+ """Test generating Kubernetes manifests."""
+ mock_which.return_value = "/usr/bin/docker"
+ mock_load.return_value = MCPStackConfig.model_validate({
+ "deployment": {"type": "kubernetes", "namespace": "test-ns"},
+ "gateway": {"image": "mcpgateway:latest"},
+ "plugins": [],
+ })
+
+ stack = MCPStackPython()
+ result = stack.generate_manifests("test-config.yaml", output_dir=str(tmp_path))
+
+ mock_plugin_gen.assert_called_once()
+ mock_k8s_gen.assert_called_once()
+ assert result == tmp_path
+
+ @patch("mcpgateway.tools.builder.python_deploy.shutil.which")
+ @patch("mcpgateway.tools.builder.python_deploy.load_config")
+ @patch("mcpgateway.tools.builder.python_deploy.generate_plugin_config")
+ @patch("mcpgateway.tools.builder.python_deploy.generate_compose_manifests")
+ def test_generate_manifests_compose(
+ self, mock_compose_gen, mock_plugin_gen, mock_load, mock_which, tmp_path
+ ):
+ """Test generating Docker Compose manifests."""
+ mock_which.return_value = "/usr/bin/docker"
+ mock_load.return_value = MCPStackConfig.model_validate({
+ "deployment": {"type": "compose"},
+ "gateway": {"image": "mcpgateway:latest"},
+ "plugins": [],
+ })
+
+ stack = MCPStackPython()
+ result = stack.generate_manifests("test-config.yaml", output_dir=str(tmp_path))
+
+ mock_plugin_gen.assert_called_once()
+ mock_compose_gen.assert_called_once()
+ assert result == tmp_path
+
+ @patch("mcpgateway.tools.builder.python_deploy.shutil.which")
+ @patch("mcpgateway.tools.builder.python_deploy.load_config")
+ @patch("mcpgateway.tools.builder.python_deploy.get_deploy_dir")
+ def test_generate_manifests_invalid_type(self, mock_get_deploy, mock_load, mock_which, tmp_path):
+ """Test generating manifests with invalid deployment type."""
+ mock_which.return_value = "/usr/bin/docker"
+ with pytest.raises(ValidationError, match=re.escape("1 validation error for MCPStackConfig\ndeployment.type\n Input should be 'kubernetes' or 'compose' [type=literal_error, input_value='invalid', input_type=str]\n For further information visit https://errors.pydantic.dev/2.12/v/literal_error")):
+ mock_load.return_value = MCPStackConfig.model_validate({
+ "deployment": {"type": "invalid"},
+ "gateway": {"image": "mcpgateway:latest"},
+ })
+
+class TestRunCommand:
+ """Test _run_command method."""
+
+ @patch("mcpgateway.tools.builder.python_deploy.shutil.which")
+ @patch("mcpgateway.tools.builder.python_deploy.subprocess.run")
+ def test_run_command_success(self, mock_run, mock_which):
+ """Test successful command execution."""
+ mock_which.return_value = "/usr/bin/docker"
+ mock_run.return_value = Mock(returncode=0, stdout="Success", stderr="")
+
+ stack = MCPStackPython()
+ result = stack._run_command(["echo", "test"])
+
+ assert result.returncode == 0
+ mock_run.assert_called_once()
+
+ @patch("mcpgateway.tools.builder.python_deploy.shutil.which")
+ @patch("mcpgateway.tools.builder.python_deploy.subprocess.run")
+ def test_run_command_failure(self, mock_run, mock_which):
+ """Test command execution failure."""
+ mock_which.return_value = "/usr/bin/docker"
+ mock_run.side_effect = subprocess.CalledProcessError(1, "cmd")
+
+ stack = MCPStackPython()
+ with pytest.raises(subprocess.CalledProcessError):
+ stack._run_command(["false"])
+
+ @patch("mcpgateway.tools.builder.python_deploy.shutil.which")
+ @patch("mcpgateway.tools.builder.python_deploy.subprocess.run")
+ def test_run_command_with_cwd(self, mock_run, mock_which, tmp_path):
+ """Test command execution with working directory."""
+ mock_which.return_value = "/usr/bin/docker"
+ mock_run.return_value = Mock(returncode=0)
+
+ stack = MCPStackPython()
+ stack._run_command(["ls"], cwd=tmp_path)
+
+ assert mock_run.call_args[1]["cwd"] == tmp_path
diff --git a/tests/unit/mcpgateway/tools/builder/test_schema.py b/tests/unit/mcpgateway/tools/builder/test_schema.py
new file mode 100644
index 000000000..63897bdaf
--- /dev/null
+++ b/tests/unit/mcpgateway/tools/builder/test_schema.py
@@ -0,0 +1,330 @@
+# -*- coding: utf-8 -*-
+"""Location: ./tests/unit/mcpgateway/tools/builder/test_schema.py
+Copyright 2025
+SPDX-License-Identifier: Apache-2.0
+Authors: Teryl Taylor
+
+Unit tests for builder schema validation (Pydantic models).
+"""
+
+# Third-Party
+import pytest
+from pydantic import ValidationError
+
+# First-Party
+from mcpgateway.tools.builder.schema import (
+ BuildableConfig,
+ CertificatesConfig,
+ DeploymentConfig,
+ GatewayConfig,
+ InfrastructureConfig,
+ MCPStackConfig,
+ PluginConfig,
+ PostgresConfig,
+ RedisConfig,
+)
+
+
+class TestDeploymentConfig:
+ """Test DeploymentConfig validation."""
+
+ def test_valid_kubernetes_deployment(self):
+ """Test valid Kubernetes deployment configuration."""
+ config = DeploymentConfig(type="kubernetes", namespace="test-ns")
+ assert config.type == "kubernetes"
+ assert config.namespace == "test-ns"
+ assert config.project_name is None
+
+ def test_valid_compose_deployment(self):
+ """Test valid Docker Compose deployment configuration."""
+ config = DeploymentConfig(type="compose", project_name="test-project")
+ assert config.type == "compose"
+ assert config.project_name == "test-project"
+ assert config.namespace is None
+
+ def test_invalid_deployment_type(self):
+ """Test invalid deployment type."""
+ with pytest.raises(ValidationError):
+ DeploymentConfig(type="invalid")
+
+
+class TestGatewayConfig:
+ """Test GatewayConfig validation."""
+
+ def test_gateway_with_image(self):
+ """Test gateway config with pre-built image."""
+ config = GatewayConfig(image="mcpgateway:latest", port=4444)
+ assert config.image == "mcpgateway:latest"
+ assert config.port == 4444
+ assert config.repo is None
+
+ def test_gateway_with_repo(self):
+ """Test gateway config with repository build."""
+ config = GatewayConfig(
+ repo="https://github.com/org/repo.git",
+ ref="main",
+ context=".",
+ port=4444
+ )
+ assert config.repo == "https://github.com/org/repo.git"
+ assert config.ref == "main"
+ assert config.image is None
+
+ def test_gateway_without_image_or_repo(self):
+ """Test that gateway requires either image or repo."""
+ with pytest.raises(ValueError, match="must specify either 'image' or 'repo'"):
+ GatewayConfig(port=4444)
+
+ def test_gateway_defaults(self):
+ """Test gateway default values."""
+ config = GatewayConfig(image="test:latest")
+ assert config.port == 4444
+ assert config.mtls_enabled is True
+ assert config.ref == "main"
+ assert config.context == "."
+ assert config.containerfile == "Containerfile"
+
+
+class TestPluginConfig:
+ """Test PluginConfig validation."""
+
+ def test_plugin_with_image(self):
+ """Test plugin config with pre-built image."""
+ config = PluginConfig(name="TestPlugin", image="test:latest")
+ assert config.name == "TestPlugin"
+ assert config.image == "test:latest"
+ assert config.repo is None
+
+ def test_plugin_with_repo(self):
+ """Test plugin config with repository build."""
+ config = PluginConfig(
+ name="TestPlugin",
+ repo="https://github.com/org/plugin.git",
+ ref="v1.0.0",
+ context="plugins/test"
+ )
+ assert config.name == "TestPlugin"
+ assert config.repo == "https://github.com/org/plugin.git"
+ assert config.ref == "v1.0.0"
+ assert config.context == "plugins/test"
+
+ def test_plugin_without_name(self):
+ """Test that plugin requires name."""
+ with pytest.raises(ValidationError):
+ PluginConfig(image="test:latest")
+
+ def test_plugin_empty_name(self):
+ """Test that plugin name cannot be empty."""
+ with pytest.raises(ValidationError, match="Plugin name cannot be empty"):
+ PluginConfig(name="", image="test:latest")
+
+ def test_plugin_whitespace_name(self):
+ """Test that plugin name cannot be whitespace only."""
+ with pytest.raises(ValidationError, match="Plugin name cannot be empty"):
+ PluginConfig(name=" ", image="test:latest")
+
+ def test_plugin_defaults(self):
+ """Test plugin default values."""
+ config = PluginConfig(name="TestPlugin", image="test:latest")
+ assert config.port == 8000
+ assert config.expose_port is False
+ assert config.mtls_enabled is True
+ assert config.plugin_overrides == {}
+
+ def test_plugin_overrides(self):
+ """Test plugin with overrides."""
+ config = PluginConfig(
+ name="TestPlugin",
+ image="test:latest",
+ plugin_overrides={
+ "priority": 10,
+ "mode": "enforce",
+ "tags": ["security", "filter"]
+ }
+ )
+ assert config.plugin_overrides["priority"] == 10
+ assert config.plugin_overrides["mode"] == "enforce"
+ assert config.plugin_overrides["tags"] == ["security", "filter"]
+
+
+class TestCertificatesConfig:
+ """Test CertificatesConfig validation."""
+
+ def test_certificates_defaults(self):
+ """Test certificates default values."""
+ config = CertificatesConfig()
+ assert config.validity_days == 825
+ assert config.auto_generate is True
+ assert config.ca_path == "./certs/mcp/ca"
+ assert config.gateway_path == "./certs/mcp/gateway"
+ assert config.plugins_path == "./certs/mcp/plugins"
+
+ def test_certificates_custom_values(self):
+ """Test certificates with custom values."""
+ config = CertificatesConfig(
+ validity_days=365,
+ auto_generate=False,
+ ca_path="/custom/ca",
+ gateway_path="/custom/gateway",
+ plugins_path="/custom/plugins"
+ )
+ assert config.validity_days == 365
+ assert config.auto_generate is False
+ assert config.ca_path == "/custom/ca"
+
+
+class TestInfrastructureConfig:
+ """Test InfrastructureConfig validation."""
+
+ def test_postgres_defaults(self):
+ """Test PostgreSQL default configuration."""
+ config = PostgresConfig()
+ assert config.enabled is True
+ assert config.image == "quay.io/sclorg/postgresql-15-c9s:latest"
+ assert config.database == "mcp"
+ assert config.user == "postgres"
+ assert config.password == "mysecretpassword"
+ assert config.storage_size == "10Gi"
+
+ def test_postgres_custom(self):
+ """Test PostgreSQL custom configuration."""
+ config = PostgresConfig(
+ enabled=True,
+ image="postgres:16",
+ database="customdb",
+ user="customuser",
+ password="custompass",
+ storage_size="20Gi",
+ storage_class="fast-ssd"
+ )
+ assert config.image == "postgres:16"
+ assert config.database == "customdb"
+ assert config.storage_class == "fast-ssd"
+
+ def test_redis_defaults(self):
+ """Test Redis default configuration."""
+ config = RedisConfig()
+ assert config.enabled is True
+ assert config.image == "redis:latest"
+
+ def test_infrastructure_defaults(self):
+ """Test infrastructure with default values."""
+ config = InfrastructureConfig()
+ assert config.postgres.enabled is True
+ assert config.redis.enabled is True
+
+
+class TestMCPStackConfig:
+ """Test complete MCPStackConfig validation."""
+
+ def test_minimal_config(self):
+ """Test minimal valid configuration."""
+ config = MCPStackConfig(
+ deployment=DeploymentConfig(type="compose", project_name="test"),
+ gateway=GatewayConfig(image="mcpgateway:latest")
+ )
+ assert config.deployment.type == "compose"
+ assert config.gateway.image == "mcpgateway:latest"
+ assert config.plugins == []
+
+ def test_full_config(self):
+ """Test full configuration with all options."""
+ config = MCPStackConfig(
+ deployment=DeploymentConfig(type="kubernetes", namespace="prod"),
+ gateway=GatewayConfig(
+ image="mcpgateway:latest",
+ port=4444,
+ mtls_enabled=True
+ ),
+ plugins=[
+ PluginConfig(name="Plugin1", image="plugin1:latest"),
+ PluginConfig(name="Plugin2", image="plugin2:latest")
+ ],
+ certificates=CertificatesConfig(validity_days=365),
+ infrastructure=InfrastructureConfig()
+ )
+ assert config.deployment.namespace == "prod"
+ assert len(config.plugins) == 2
+ assert config.certificates.validity_days == 365
+
+ def test_duplicate_plugin_names(self):
+ """Test that duplicate plugin names are rejected."""
+ with pytest.raises(ValidationError, match="Duplicate plugin names found"):
+ MCPStackConfig(
+ deployment=DeploymentConfig(type="compose"),
+ gateway=GatewayConfig(image="test:latest"),
+ plugins=[
+ PluginConfig(name="DuplicatePlugin", image="plugin1:latest"),
+ PluginConfig(name="DuplicatePlugin", image="plugin2:latest")
+ ]
+ )
+
+ def test_unique_plugin_names(self):
+ """Test that unique plugin names are accepted."""
+ config = MCPStackConfig(
+ deployment=DeploymentConfig(type="compose"),
+ gateway=GatewayConfig(image="test:latest"),
+ plugins=[
+ PluginConfig(name="Plugin1", image="plugin1:latest"),
+ PluginConfig(name="Plugin2", image="plugin2:latest"),
+ PluginConfig(name="Plugin3", image="plugin3:latest")
+ ]
+ )
+ assert len(config.plugins) == 3
+ assert [p.name for p in config.plugins] == ["Plugin1", "Plugin2", "Plugin3"]
+
+ def test_config_with_repo_builds(self):
+ """Test configuration with repository builds."""
+ config = MCPStackConfig(
+ deployment=DeploymentConfig(type="compose"),
+ gateway=GatewayConfig(
+ repo="https://github.com/org/gateway.git",
+ ref="v2.0.0"
+ ),
+ plugins=[
+ PluginConfig(
+ name="BuiltPlugin",
+ repo="https://github.com/org/plugin.git",
+ ref="main",
+ context="plugins/src"
+ )
+ ]
+ )
+ assert config.gateway.repo is not None
+ assert config.gateway.ref == "v2.0.0"
+ assert config.plugins[0].repo is not None
+ assert config.plugins[0].context == "plugins/src"
+
+
+class TestBuildableConfig:
+ """Test BuildableConfig base class validation."""
+
+ def test_mtls_defaults(self):
+ """Test mTLS default settings."""
+ config = GatewayConfig(image="test:latest")
+ assert config.mtls_enabled is True
+
+ def test_mtls_disabled(self):
+ """Test mTLS can be disabled."""
+ config = GatewayConfig(image="test:latest", mtls_enabled=False)
+ assert config.mtls_enabled is False
+
+ def test_env_vars(self):
+ """Test environment variables."""
+ config = GatewayConfig(
+ image="test:latest",
+ env_vars={"LOG_LEVEL": "DEBUG", "PORT": "4444"}
+ )
+ assert config.env_vars["LOG_LEVEL"] == "DEBUG"
+ assert config.env_vars["PORT"] == "4444"
+
+ def test_multi_stage_build(self):
+ """Test multi-stage build target."""
+ config = PluginConfig(
+ name="TestPlugin",
+ repo="https://github.com/org/plugin.git",
+ containerfile="Dockerfile",
+ target="production"
+ )
+ assert config.containerfile == "Dockerfile"
+ assert config.target == "production"
diff --git a/tests/unit/mcpgateway/utils/test_correlation_id.py b/tests/unit/mcpgateway/utils/test_correlation_id.py
new file mode 100644
index 000000000..6b80ae163
--- /dev/null
+++ b/tests/unit/mcpgateway/utils/test_correlation_id.py
@@ -0,0 +1,216 @@
+# -*- coding: utf-8 -*-
+"""Tests for correlation ID utilities."""
+
+import asyncio
+import pytest
+from mcpgateway.utils.correlation_id import (
+ clear_correlation_id,
+ extract_correlation_id_from_headers,
+ generate_correlation_id,
+ get_correlation_id,
+ get_or_generate_correlation_id,
+ set_correlation_id,
+ validate_correlation_id,
+)
+
+
+def test_generate_correlation_id():
+ """Test correlation ID generation."""
+ id1 = generate_correlation_id()
+ id2 = generate_correlation_id()
+
+ assert id1 is not None
+ assert id2 is not None
+ assert id1 != id2
+ assert len(id1) == 32 # UUID4 hex is 32 characters
+ assert len(id2) == 32
+
+
+def test_set_and_get_correlation_id():
+ """Test setting and getting correlation ID."""
+ test_id = "test-correlation-123"
+
+ set_correlation_id(test_id)
+ retrieved_id = get_correlation_id()
+
+ assert retrieved_id == test_id
+
+ clear_correlation_id()
+
+
+def test_clear_correlation_id():
+ """Test clearing correlation ID."""
+ test_id = "test-correlation-456"
+
+ set_correlation_id(test_id)
+ assert get_correlation_id() == test_id
+
+ clear_correlation_id()
+ assert get_correlation_id() is None
+
+
+def test_get_correlation_id_returns_none_when_not_set():
+ """Test getting correlation ID when not set."""
+ clear_correlation_id()
+ assert get_correlation_id() is None
+
+
+def test_extract_correlation_id_from_headers():
+ """Test extracting correlation ID from headers."""
+ headers = {"X-Correlation-ID": "header-correlation-789"}
+
+ correlation_id = extract_correlation_id_from_headers(headers)
+ assert correlation_id == "header-correlation-789"
+
+
+def test_extract_correlation_id_from_headers_case_insensitive():
+ """Test case-insensitive header extraction."""
+ headers = {"x-correlation-id": "lowercase-id"}
+
+ correlation_id = extract_correlation_id_from_headers(headers)
+ assert correlation_id == "lowercase-id"
+
+
+def test_extract_correlation_id_from_headers_custom_header():
+ """Test extracting from custom header name."""
+ headers = {"X-Request-ID": "custom-request-id"}
+
+ correlation_id = extract_correlation_id_from_headers(headers, "X-Request-ID")
+ assert correlation_id == "custom-request-id"
+
+
+def test_extract_correlation_id_from_headers_not_found():
+ """Test when correlation ID header is not present."""
+ headers = {"Content-Type": "application/json"}
+
+ correlation_id = extract_correlation_id_from_headers(headers)
+ assert correlation_id is None
+
+
+def test_extract_correlation_id_from_headers_empty_value():
+ """Test when correlation ID header has empty value."""
+ headers = {"X-Correlation-ID": " "}
+
+ correlation_id = extract_correlation_id_from_headers(headers)
+ assert correlation_id is None
+
+
+def test_get_or_generate_correlation_id_when_not_set():
+ """Test get_or_generate when ID is not set."""
+ clear_correlation_id()
+
+ correlation_id = get_or_generate_correlation_id()
+
+ assert correlation_id is not None
+ assert len(correlation_id) == 32
+ assert get_correlation_id() == correlation_id # Should be stored
+
+ clear_correlation_id()
+
+
+def test_get_or_generate_correlation_id_when_already_set():
+ """Test get_or_generate when ID is already set."""
+ test_id = "existing-correlation-id"
+ set_correlation_id(test_id)
+
+ correlation_id = get_or_generate_correlation_id()
+
+ assert correlation_id == test_id
+
+ clear_correlation_id()
+
+
+def test_validate_correlation_id_valid():
+ """Test validation of valid correlation IDs."""
+ assert validate_correlation_id("abc-123") is True
+ assert validate_correlation_id("test_id_456") is True
+ assert validate_correlation_id("UPPER-lower-123_mix") is True
+
+
+def test_validate_correlation_id_invalid():
+ """Test validation of invalid correlation IDs."""
+ assert validate_correlation_id(None) is False
+ assert validate_correlation_id("") is False
+ assert validate_correlation_id(" ") is False
+ assert validate_correlation_id("id with spaces") is False
+ assert validate_correlation_id("id@special!chars") is False
+
+
+def test_validate_correlation_id_too_long():
+ """Test validation rejects overly long IDs."""
+ long_id = "a" * 256 # Default max is 255
+
+ assert validate_correlation_id(long_id) is False
+ assert validate_correlation_id(long_id, max_length=300) is True
+
+
+@pytest.mark.asyncio
+async def test_correlation_id_isolation_between_async_tasks():
+ """Test that correlation IDs are isolated between concurrent async tasks."""
+ results = []
+
+ async def task_with_id(task_id: str):
+ set_correlation_id(task_id)
+ await asyncio.sleep(0.01) # Simulate async work
+ retrieved_id = get_correlation_id()
+ results.append((task_id, retrieved_id))
+ clear_correlation_id()
+
+ # Run multiple tasks concurrently
+ await asyncio.gather(
+ task_with_id("task-1"),
+ task_with_id("task-2"),
+ task_with_id("task-3"),
+ )
+
+ # Each task should have retrieved its own ID
+ assert len(results) == 3
+ for task_id, retrieved_id in results:
+ assert task_id == retrieved_id
+
+
+@pytest.mark.asyncio
+async def test_correlation_id_inheritance_in_nested_tasks():
+ """Test that correlation ID is inherited by child async tasks."""
+
+ async def parent_task():
+ set_correlation_id("parent-id")
+ parent_id = get_correlation_id()
+
+ async def child_task():
+ return get_correlation_id()
+
+ child_id = await child_task()
+
+ clear_correlation_id()
+ return parent_id, child_id
+
+ parent_id, child_id = await parent_task()
+
+ # Child should inherit parent's correlation ID
+ assert parent_id == "parent-id"
+ assert child_id == "parent-id"
+
+
+def test_correlation_id_context_isolation():
+ """Test that correlation ID is properly isolated per context."""
+ clear_correlation_id()
+
+ # Set ID in one context
+ set_correlation_id("context-1")
+ assert get_correlation_id() == "context-1"
+
+ # Overwrite with new ID
+ set_correlation_id("context-2")
+ assert get_correlation_id() == "context-2"
+
+ clear_correlation_id()
+ assert get_correlation_id() is None
+
+
+def test_extract_correlation_id_strips_whitespace():
+ """Test that extracted correlation ID has whitespace stripped."""
+ headers = {"X-Correlation-ID": " trimmed-id "}
+
+ correlation_id = extract_correlation_id_from_headers(headers)
+ assert correlation_id == "trimmed-id"
diff --git a/tests/unit/mcpgateway/utils/test_ssl_key_manager.py b/tests/unit/mcpgateway/utils/test_ssl_key_manager.py
new file mode 100644
index 000000000..b1a4291c4
--- /dev/null
+++ b/tests/unit/mcpgateway/utils/test_ssl_key_manager.py
@@ -0,0 +1,253 @@
+# -*- coding: utf-8 -*-
+"""Location: ./tests/unit/mcpgateway/utils/test_ssl_key_manager.py
+Copyright 2025
+SPDX-License-Identifier: Apache-2.0
+Authors: Keval Mahajan
+
+Unit tests for SSL key manager utility.
+"""
+
+# Standard
+import os
+from pathlib import Path
+import tempfile
+
+# Third-Party
+from cryptography.hazmat.primitives import serialization
+from cryptography.hazmat.primitives.asymmetric import rsa
+import pytest
+
+# First-Party
+from mcpgateway.utils.ssl_key_manager import SSLKeyManager, prepare_ssl_key
+
+
+@pytest.fixture
+def temp_cert_dir(tmp_path):
+ """Create a temporary directory for test certificates."""
+ cert_dir = tmp_path / "certs"
+ cert_dir.mkdir()
+ return cert_dir
+
+
+@pytest.fixture
+def unencrypted_key(temp_cert_dir):
+ """Generate an unencrypted RSA private key for testing."""
+ # Generate a test RSA key
+ private_key = rsa.generate_private_key(
+ public_exponent=65537,
+ key_size=2048,
+ )
+
+ # Save as unencrypted PEM
+ key_path = temp_cert_dir / "key.pem"
+ with open(key_path, "wb") as f:
+ f.write(
+ private_key.private_bytes(
+ encoding=serialization.Encoding.PEM,
+ format=serialization.PrivateFormat.TraditionalOpenSSL,
+ encryption_algorithm=serialization.NoEncryption(),
+ )
+ )
+
+ return key_path
+
+
+@pytest.fixture
+def encrypted_key(temp_cert_dir):
+ """Generate a passphrase-protected RSA private key for testing."""
+ # Generate a test RSA key
+ private_key = rsa.generate_private_key(
+ public_exponent=65537,
+ key_size=2048,
+ )
+
+ # Save as encrypted PEM with passphrase "test123"
+ key_path = temp_cert_dir / "key-encrypted.pem"
+ with open(key_path, "wb") as f:
+ f.write(
+ private_key.private_bytes(
+ encoding=serialization.Encoding.PEM,
+ format=serialization.PrivateFormat.TraditionalOpenSSL,
+ encryption_algorithm=serialization.BestAvailableEncryption(b"test123"),
+ )
+ )
+
+ return key_path, "test123"
+
+
+class TestSSLKeyManager:
+ """Test suite for SSLKeyManager class."""
+
+ def test_prepare_key_file_unencrypted(self, unencrypted_key):
+ """Test that unencrypted keys are returned as-is."""
+ manager = SSLKeyManager()
+
+ result = manager.prepare_key_file(str(unencrypted_key))
+
+ # Should return the original path
+ assert result == str(unencrypted_key)
+
+ # No temporary file should be created
+ assert manager._temp_key_file is None
+
+ def test_prepare_key_file_encrypted(self, encrypted_key):
+ """Test that encrypted keys are decrypted to temporary files."""
+ key_path, passphrase = encrypted_key
+ manager = SSLKeyManager()
+
+ result = manager.prepare_key_file(str(key_path), passphrase)
+
+ # Should return a different path (temporary file)
+ assert result != str(key_path)
+
+ # Temporary file should exist
+ temp_path = Path(result)
+ assert temp_path.exists()
+
+ # Temporary file should have restrictive permissions (0o600)
+ stat_info = os.stat(result)
+ permissions = stat_info.st_mode & 0o777
+ assert permissions == 0o600
+
+ # Temporary file should be tracked
+ assert manager._temp_key_file == temp_path
+
+ # Verify the decrypted key is valid
+ with open(result, "rb") as f:
+ key_data = f.read()
+ # Should be able to load without password
+ from cryptography.hazmat.primitives.serialization import load_pem_private_key
+ private_key = load_pem_private_key(key_data, password=None)
+ assert private_key is not None
+
+ # Cleanup
+ manager.cleanup()
+ assert not temp_path.exists()
+
+ def test_prepare_key_file_wrong_passphrase(self, encrypted_key):
+ """Test that wrong passphrase raises ValueError."""
+ key_path, _ = encrypted_key
+ manager = SSLKeyManager()
+
+ with pytest.raises(ValueError, match="Failed to decrypt private key"):
+ manager.prepare_key_file(str(key_path), "wrong_password")
+
+ # Ensure cleanup was called
+ assert manager._temp_key_file is None
+
+ def test_prepare_key_file_missing_file(self, temp_cert_dir):
+ """Test that missing key file raises FileNotFoundError."""
+ manager = SSLKeyManager()
+ missing_path = temp_cert_dir / "nonexistent.pem"
+
+ with pytest.raises(FileNotFoundError, match="Key file not found"):
+ manager.prepare_key_file(str(missing_path))
+
+ def test_cleanup_removes_temp_file(self, encrypted_key):
+ """Test that cleanup removes temporary files."""
+ key_path, passphrase = encrypted_key
+ manager = SSLKeyManager()
+
+ # Create temporary file
+ temp_path = manager.prepare_key_file(str(key_path), passphrase)
+ assert Path(temp_path).exists()
+
+ # Cleanup should remove it
+ manager.cleanup()
+ assert not Path(temp_path).exists()
+ assert manager._temp_key_file is None
+
+ def test_cleanup_idempotent(self):
+ """Test that cleanup can be called multiple times safely."""
+ manager = SSLKeyManager()
+
+ # Should not raise even if no temp file exists
+ manager.cleanup()
+ manager.cleanup()
+
+ def test_prepare_ssl_key_convenience_function(self, unencrypted_key):
+ """Test the convenience function prepare_ssl_key."""
+ result = prepare_ssl_key(str(unencrypted_key))
+
+ # Should work the same as the manager method
+ assert result == str(unencrypted_key)
+
+ def test_prepare_ssl_key_with_passphrase(self, encrypted_key):
+ """Test convenience function with passphrase."""
+ key_path, passphrase = encrypted_key
+
+ result = prepare_ssl_key(str(key_path), passphrase)
+
+ # Should return a temporary file path
+ assert result != str(key_path)
+ assert Path(result).exists()
+
+ # Verify it's a valid unencrypted key
+ with open(result, "rb") as f:
+ key_data = f.read()
+ from cryptography.hazmat.primitives.serialization import load_pem_private_key
+ private_key = load_pem_private_key(key_data, password=None)
+ assert private_key is not None
+
+
+class TestSSLKeyManagerIntegration:
+ """Integration tests for SSL key manager."""
+
+ def test_atexit_cleanup(self, encrypted_key):
+ """Test that atexit handler is registered for cleanup."""
+ import atexit
+
+ key_path, passphrase = encrypted_key
+ manager = SSLKeyManager()
+
+ # Get initial atexit handlers count
+ initial_handlers = len(atexit._exithandlers) if hasattr(atexit, '_exithandlers') else 0
+
+ # Prepare key (should register cleanup)
+ temp_path = manager.prepare_key_file(str(key_path), passphrase)
+
+ # Verify atexit handler was registered
+ # Note: This is implementation-dependent and may vary by Python version
+ if hasattr(atexit, '_exithandlers'):
+ assert len(atexit._exithandlers) > initial_handlers
+
+ # Manual cleanup for test
+ manager.cleanup()
+
+ def test_multiple_keys(self, temp_cert_dir):
+ """Test handling multiple keys (should only track the last one)."""
+ # Generate two encrypted keys
+ key1 = rsa.generate_private_key(public_exponent=65537, key_size=2048)
+ key2 = rsa.generate_private_key(public_exponent=65537, key_size=2048)
+
+ key1_path = temp_cert_dir / "key1.pem"
+ key2_path = temp_cert_dir / "key2.pem"
+
+ for key, path in [(key1, key1_path), (key2, key2_path)]:
+ with open(path, "wb") as f:
+ f.write(
+ key.private_bytes(
+ encoding=serialization.Encoding.PEM,
+ format=serialization.PrivateFormat.TraditionalOpenSSL,
+ encryption_algorithm=serialization.BestAvailableEncryption(b"test"),
+ )
+ )
+
+ manager = SSLKeyManager()
+
+ # Prepare first key
+ temp1 = manager.prepare_key_file(str(key1_path), "test")
+ temp1_path = Path(temp1)
+ assert temp1_path.exists()
+
+ # Prepare second key (should replace the first)
+ temp2 = manager.prepare_key_file(str(key2_path), "test")
+ temp2_path = Path(temp2)
+ assert temp2_path.exists()
+
+ # Only the second temp file should be tracked
+ assert manager._temp_key_file == temp2_path
+
+ # Cleanup should only remove the second file
+ manager.cleanup()
+ assert not temp2_path.exists()
diff --git a/tests/unit/mcpgateway/utils/test_verify_credentials.py b/tests/unit/mcpgateway/utils/test_verify_credentials.py
index dabf49f63..942f2f800 100644
--- a/tests/unit/mcpgateway/utils/test_verify_credentials.py
+++ b/tests/unit/mcpgateway/utils/test_verify_credentials.py
@@ -281,9 +281,16 @@ async def test_require_auth_override_basic_auth_disabled(monkeypatch):
@pytest.fixture
-def test_client():
- if app is None:
- pytest.skip("FastAPI app not importable")
+def test_client(app, monkeypatch):
+ """Create a test client with the properly configured app fixture from conftest."""
+ from unittest.mock import MagicMock
+
+ # Patch security_logger at the middleware level where it's imported and called
+ mock_sec_logger = MagicMock()
+ mock_sec_logger.log_authentication_attempt = MagicMock(return_value=None)
+ mock_sec_logger.log_security_event = MagicMock(return_value=None)
+ monkeypatch.setattr("mcpgateway.middleware.auth_middleware.security_logger", mock_sec_logger)
+
return TestClient(app)
|