From 33c4be6b66ea9297af90e2855935e6827408a73e Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Sun, 28 Dec 2025 13:13:15 -0500 Subject: [PATCH 01/20] use bindfs for the src dir for permission handling --- src/commands/runtime/provision.rs | 189 +++------------------------ src/commands/sdk/run.rs | 93 +------------- src/utils/config.rs | 67 ++++++++++ src/utils/container.rs | 204 +++++++++++++++++++++++++++++- 4 files changed, 290 insertions(+), 263 deletions(-) diff --git a/src/commands/runtime/provision.rs b/src/commands/runtime/provision.rs index ab70267..bee72f4 100644 --- a/src/commands/runtime/provision.rs +++ b/src/commands/runtime/provision.rs @@ -308,10 +308,8 @@ impl RuntimeProvisionCommand { return Err(anyhow::anyhow!("Failed to provision runtime")); } - // Fix file ownership if --out was specified - if let Some(out_path) = &self.config.out { - self.fix_output_permissions(out_path).await?; - } + // Note: File ownership is automatically handled by bindfs permission translation, + // so no explicit chown is needed for the output directory. // Copy state file back from container if it exists if let Some((ref state_file_path, ref container_state_path)) = state_file_info { @@ -492,123 +490,6 @@ impl RuntimeProvisionCommand { Ok(()) } - /// Fix file ownership of output directory to match calling user - async fn fix_output_permissions(&self, out_path: &str) -> Result<()> { - // Get the absolute path to the output directory - let src_dir = std::env::current_dir()?; - let out_dir = src_dir.join(out_path); - - // Only proceed if the directory exists - if !out_dir.exists() { - if self.config.verbose { - print_info( - &format!("Output directory does not exist yet: {}", out_dir.display()), - OutputLevel::Verbose, - ); - } - return Ok(()); - } - - // Get current user's UID and GID - #[cfg(unix)] - { - // Get the UID and GID of the calling user - let uid = unsafe { libc::getuid() }; - let gid = unsafe { libc::getgid() }; - - if self.config.verbose { - print_info( - &format!( - "Fixing ownership of {} to {}:{}", - out_dir.display(), - uid, - gid - ), - OutputLevel::Verbose, - ); - } - - // Load configuration to get container image - let config = load_config(&self.config.config_path)?; - let container_image = config - .get_sdk_image() - .context("No SDK container image specified in configuration")?; - - // Build the chown command to run inside the container - let container_out_path = format!("/opt/src/{}", out_path); - let chown_script = format!("chown -R {}:{} '{}'", uid, gid, container_out_path); - - // Run chown inside a container with the same volume mounts - let container_tool = "docker"; - let volume_manager = - VolumeManager::new(container_tool.to_string(), self.config.verbose); - let volume_state = volume_manager.get_or_create_volume(&src_dir).await?; - - let mut chown_cmd = vec![ - container_tool.to_string(), - "run".to_string(), - "--rm".to_string(), - ]; - - // Mount the source directory - chown_cmd.push("-v".to_string()); - chown_cmd.push(format!("{}:/opt/src:rw", src_dir.display())); - - // Mount the volume - chown_cmd.push("-v".to_string()); - chown_cmd.push(format!("{}:/opt/_avocado:rw", volume_state.volume_name)); - - // Add the container image - chown_cmd.push(container_image.to_string()); - - // Add the command - chown_cmd.push("bash".to_string()); - chown_cmd.push("-c".to_string()); - chown_cmd.push(chown_script); - - if self.config.verbose { - print_info( - &format!("Running: {}", chown_cmd.join(" ")), - OutputLevel::Verbose, - ); - } - - let mut cmd = tokio::process::Command::new(&chown_cmd[0]); - cmd.args(&chown_cmd[1..]); - cmd.stdout(std::process::Stdio::null()) - .stderr(std::process::Stdio::null()); - - let status = cmd - .status() - .await - .context("Failed to execute chown command")?; - - if !status.success() { - print_info( - "Warning: Failed to fix ownership of output directory. Files may be owned by root.", - OutputLevel::Normal, - ); - } else if self.config.verbose { - print_info( - "Successfully fixed output directory ownership", - OutputLevel::Verbose, - ); - } - } - - #[cfg(not(unix))] - { - if self.config.verbose { - print_info( - "Skipping ownership fix on non-Unix platform", - OutputLevel::Verbose, - ); - } - } - - Ok(()) - } - fn create_provision_script(&self, target_arch: &str) -> Result { let script = format!( r#" @@ -812,68 +693,28 @@ avocado-provision-{} {} ); } - // Get current user's UID and GID for proper ownership - #[cfg(unix)] - let (uid, gid) = { - let uid = unsafe { libc::getuid() }; - let gid = unsafe { libc::getgid() }; - (uid, gid) - }; - - #[cfg(not(unix))] - let (uid, gid) = (0u32, 0u32); - - // Ensure parent directory exists on host and copy file with correct ownership + // Ensure parent directory exists on host if let Some(parent) = host_state_file.parent() { std::fs::create_dir_all(parent)?; } - // Copy from container to host src_dir with proper ownership + // Copy from container to host src_dir + // Note: File ownership is automatically handled by bindfs permission translation let copy_script = format!( - "cp '{}' '/opt/src/{}' && chown {}:{} '/opt/src/{}'", - container_state_path, state_file_path, uid, gid, state_file_path + "cp '{}' '/opt/src/{}'", + container_state_path, state_file_path ); - let mut copy_cmd = vec![ - container_tool.to_string(), - "run".to_string(), - "--rm".to_string(), - ]; - - // Mount the source directory (read-write to copy file back) - copy_cmd.push("-v".to_string()); - copy_cmd.push(format!("{}:/opt/src:rw", src_dir.display())); - - // Mount the volume - copy_cmd.push("-v".to_string()); - copy_cmd.push(format!("{}:/opt/_avocado:ro", volume_state.volume_name)); - - // Add the container image - copy_cmd.push(container_image.to_string()); - - // Add the command - copy_cmd.push("bash".to_string()); - copy_cmd.push("-c".to_string()); - copy_cmd.push(copy_script); - - if self.config.verbose { - print_info( - &format!("Running: {}", copy_cmd.join(" ")), - OutputLevel::Verbose, - ); - } - - let mut cmd = tokio::process::Command::new(©_cmd[0]); - cmd.args(©_cmd[1..]); - cmd.stdout(std::process::Stdio::null()) - .stderr(std::process::Stdio::null()); + // Use shared SdkContainer for running the command + let container_helper = SdkContainer::new() + .with_src_dir(Some(src_dir.to_path_buf())) + .verbose(self.config.verbose); - let status = cmd - .status() - .await - .context("Failed to copy state file from container")?; + let success = container_helper + .run_simple_command(&container_image, ©_script, true) + .await?; - if !status.success() { + if !success { print_info( "Warning: Failed to copy state file from container", OutputLevel::Normal, diff --git a/src/commands/sdk/run.rs b/src/commands/sdk/run.rs index 0c7bc5e..92601e0 100644 --- a/src/commands/sdk/run.rs +++ b/src/commands/sdk/run.rs @@ -5,9 +5,8 @@ use anyhow::{Context, Result}; use crate::utils::{ config::Config, container::{RunConfig, SdkContainer}, - output::{print_error, print_success, OutputLevel}, + output::{print_success, OutputLevel}, target::validate_and_log_target, - volume::VolumeManager, }; /// Implementation of the 'sdk run' command. @@ -159,11 +158,14 @@ impl SdkRunCommand { let container_helper = SdkContainer::from_config(&self.config_path, &config)?.verbose(self.verbose); - // Create shared RunConfig for all execution modes + // Create RunConfig - detach mode is now handled by the shared run_in_container let run_config = RunConfig { container_image: container_image.to_string(), target: target.clone(), command: command.clone(), + container_name: self.name.clone(), + detach: self.detach, + rm: self.rm, verbose: self.verbose, source_environment: self.env, interactive: self.interactive, @@ -177,12 +179,8 @@ impl SdkRunCommand { ..Default::default() }; - let success = if self.detach { - self.run_detached_container(&container_helper, &run_config) - .await? - } else { - container_helper.run_in_container(run_config).await? - }; + // Use shared run_in_container for both detached and non-detached modes + let success = container_helper.run_in_container(run_config).await?; if success { print_success("SDK command completed successfully.", OutputLevel::Normal); @@ -190,83 +188,6 @@ impl SdkRunCommand { Ok(()) } - - /// Run container in detached mode - async fn run_detached_container( - &self, - container_helper: &SdkContainer, - config: &RunConfig, - ) -> Result { - // Get or create docker volume for persistent state - let volume_manager = VolumeManager::new(container_helper.container_tool.clone(), false); - let volume_state = volume_manager - .get_or_create_volume(&container_helper.cwd) - .await?; - // Build container command for detached mode - let mut container_cmd = vec![ - container_helper.container_tool.clone(), - "run".to_string(), - "-d".to_string(), - ]; - - if self.rm { - container_cmd.push("--rm".to_string()); - } - - if let Some(ref name) = self.name { - container_cmd.push("--name".to_string()); - container_cmd.push(name.clone()); - } - - // Volume mounts: docker volume for persistent state, bind mount for source - container_cmd.push("-v".to_string()); - let src_path = container_helper - .src_dir - .as_ref() - .unwrap_or(&container_helper.cwd); - container_cmd.push(format!("{}:/opt/src:rw", src_path.display())); - container_cmd.push("-v".to_string()); - container_cmd.push(format!("{}:/opt/_avocado:rw", volume_state.volume_name)); - - // Add environment variables - container_cmd.push("-e".to_string()); - container_cmd.push(format!("AVOCADO_TARGET={}", config.target)); - container_cmd.push("-e".to_string()); - container_cmd.push(format!("AVOCADO_SDK_TARGET={}", config.target)); - - // Add merged container args - if let Some(args) = &config.container_args { - container_cmd.extend_from_slice(args); - } - - // Add the container image - container_cmd.push(config.container_image.clone()); - - // Add the command - container_cmd.push("bash".to_string()); - container_cmd.push("-c".to_string()); - container_cmd.push(config.command.clone()); - - // Execute using tokio Command - let output = tokio::process::Command::new(&container_cmd[0]) - .args(&container_cmd[1..]) - .output() - .await - .with_context(|| "Failed to execute detached container command")?; - - if output.status.success() { - let container_id = String::from_utf8_lossy(&output.stdout).trim().to_string(); - println!("Container started in detached mode with ID: {container_id}"); - Ok(true) - } else { - let stderr = String::from_utf8_lossy(&output.stderr); - print_error( - &format!("Container execution failed: {stderr}"), - OutputLevel::Normal, - ); - Ok(false) - } - } } #[cfg(test)] diff --git a/src/utils/config.rs b/src/utils/config.rs index 762aa26..2ba9c2c 100644 --- a/src/utils/config.rs +++ b/src/utils/config.rs @@ -201,6 +201,10 @@ pub struct SdkConfig { #[serde(default, deserialize_with = "container_args_deserializer::deserialize")] pub container_args: Option>, pub disable_weak_dependencies: Option, + /// Host UID for bindfs permission translation (overrides libc::getuid()) + pub host_uid: Option, + /// Host GID for bindfs permission translation (overrides libc::getgid()) + pub host_gid: Option, } /// Compile configuration for SDK @@ -2520,6 +2524,12 @@ fn merge_sdk_configs(mut base: SdkConfig, target: SdkConfig) -> SdkConfig { if target.container_args.is_some() { base.container_args = target.container_args; } + if target.host_uid.is_some() { + base.host_uid = target.host_uid; + } + if target.host_gid.is_some() { + base.host_gid = target.host_gid; + } // For dependencies and compile, merge the HashMaps if let Some(target_deps) = target.dependencies { @@ -2555,6 +2565,63 @@ fn merge_sdk_configs(mut base: SdkConfig, target: SdkConfig) -> SdkConfig { base } +/// Resolve host UID/GID for bindfs permission translation. +/// +/// Priority (highest first): +/// 1. Environment variables: `AVOCADO_HOST_UID` / `AVOCADO_HOST_GID` +/// 2. Config file: `sdk.host_uid` / `sdk.host_gid` +/// 3. libc calls: `libc::getuid()` / `libc::getgid()` (default fallback) +/// +/// # Arguments +/// * `config` - Optional SDK configuration to check for host_uid/host_gid +/// +/// # Returns +/// Tuple of (uid, gid) resolved according to priority chain +pub fn resolve_host_uid_gid(config: Option<&SdkConfig>) -> (u32, u32) { + // Get fallback values from libc + #[cfg(unix)] + let (fallback_uid, fallback_gid) = { + let uid = unsafe { libc::getuid() }; + let gid = unsafe { libc::getgid() }; + (uid, gid) + }; + + #[cfg(not(unix))] + let (fallback_uid, fallback_gid) = (0u32, 0u32); + + // Resolve UID: env var > config > libc + let uid = if let Ok(env_uid) = env::var("AVOCADO_HOST_UID") { + env_uid.parse::().unwrap_or_else(|_| { + eprintln!( + "Warning: Invalid AVOCADO_HOST_UID '{}', using fallback", + env_uid + ); + fallback_uid + }) + } else if let Some(cfg) = config { + cfg.host_uid.unwrap_or(fallback_uid) + } else { + fallback_uid + }; + + // Resolve GID: env var > config > libc + let gid = if let Ok(env_gid) = env::var("AVOCADO_HOST_GID") { + env_gid.parse::().unwrap_or_else(|_| { + eprintln!( + "Warning: Invalid AVOCADO_HOST_GID '{}', using fallback", + env_gid + ); + fallback_gid + }) + } else if let Some(cfg) = config { + cfg.host_gid.unwrap_or(fallback_gid) + } else { + fallback_gid + }; + + (uid, gid) +} + /// Convenience function to load a config file #[allow(dead_code)] pub fn load_config>(config_path: P) -> Result { diff --git a/src/utils/container.rs b/src/utils/container.rs index ec607d7..6d90c43 100644 --- a/src/utils/container.rs +++ b/src/utils/container.rs @@ -218,10 +218,17 @@ impl SdkContainer { container_cmd.push("-t".to_string()); } + // Add FUSE device and capability for bindfs support + container_cmd.push("--device".to_string()); + container_cmd.push("/dev/fuse".to_string()); + container_cmd.push("--cap-add".to_string()); + container_cmd.push("SYS_ADMIN".to_string()); + // Volume mounts: docker volume for persistent state, bind mount for source + // Source is mounted to /mnt/src, then bindfs remounts it to /opt/src with permission translation container_cmd.push("-v".to_string()); let src_path = self.src_dir.as_ref().unwrap_or(&self.cwd); - container_cmd.push(format!("{}:/opt/src:rw", src_path.display())); + container_cmd.push(format!("{}:/mnt/src:rw", src_path.display())); container_cmd.push("-v".to_string()); container_cmd.push(format!("{}:/opt/_avocado:rw", volume_state.volume_name)); @@ -270,6 +277,13 @@ impl SdkContainer { container_cmd.push("-e".to_string()); container_cmd.push("AVOCADO_SRC_DIR=/opt/src".to_string()); + // Pass host UID/GID for bindfs permission translation + let (host_uid, host_gid) = crate::utils::config::resolve_host_uid_gid(None); + container_cmd.push("-e".to_string()); + container_cmd.push(format!("AVOCADO_HOST_UID={}", host_uid)); + container_cmd.push("-e".to_string()); + container_cmd.push(format!("AVOCADO_HOST_GID={}", host_gid)); + // Add signing-related environment variables if config.signing_socket_path.is_some() { container_cmd.push("-e".to_string()); @@ -379,7 +393,7 @@ impl SdkContainer { if config.verbose || self.verbose { print_info( &format!( - "Mounting source directory: {} -> /opt/src", + "Mounting source directory: {} -> /mnt/src (bindfs -> /opt/src)", self.cwd.display() ), OutputLevel::Normal, @@ -532,7 +546,7 @@ impl SdkContainer { if verbose { print_info( &format!( - "Mounting source directory: {} -> /opt/src", + "Mounting source directory: {} -> /mnt/src (bindfs -> /opt/src)", self.cwd.display() ), OutputLevel::Normal, @@ -579,6 +593,123 @@ impl SdkContainer { } } + /// Run a simple command in the container without the full SDK entrypoint. + /// + /// This is useful for quick one-off operations like chown, cp, etc. + /// that don't need the SDK environment setup. + /// + /// # Arguments + /// * `container_image` - The container image to use + /// * `command` - The bash command to run + /// * `rm` - Whether to remove the container after exit + /// + /// # Returns + /// `true` if the command succeeded, `false` otherwise + pub async fn run_simple_command( + &self, + container_image: &str, + command: &str, + rm: bool, + ) -> Result { + // Get or create docker volume for persistent state + let volume_manager = VolumeManager::new(self.container_tool.clone(), self.verbose); + let volume_state = volume_manager.get_or_create_volume(&self.cwd).await?; + + let mut container_cmd = vec![self.container_tool.clone(), "run".to_string()]; + + if rm { + container_cmd.push("--rm".to_string()); + } + + // Add FUSE device and capability for bindfs support + container_cmd.push("--device".to_string()); + container_cmd.push("/dev/fuse".to_string()); + container_cmd.push("--cap-add".to_string()); + container_cmd.push("SYS_ADMIN".to_string()); + + // Volume mounts: docker volume for persistent state, bind mount for source + container_cmd.push("-v".to_string()); + let src_path = self.src_dir.as_ref().unwrap_or(&self.cwd); + container_cmd.push(format!("{}:/mnt/src:rw", src_path.display())); + container_cmd.push("-v".to_string()); + container_cmd.push(format!("{}:/opt/_avocado:rw", volume_state.volume_name)); + + // Pass host UID/GID for bindfs permission translation + let (host_uid, host_gid) = crate::utils::config::resolve_host_uid_gid(None); + container_cmd.push("-e".to_string()); + container_cmd.push(format!("AVOCADO_HOST_UID={}", host_uid)); + container_cmd.push("-e".to_string()); + container_cmd.push(format!("AVOCADO_HOST_GID={}", host_gid)); + + // Add the container image + container_cmd.push(container_image.to_string()); + + // Add the command + container_cmd.push("bash".to_string()); + container_cmd.push("-c".to_string()); + + // Prepend bindfs check and setup to the command + // If host UID is 0 (root), skip bindfs and use simple bind mount + let full_command = if host_uid == 0 && host_gid == 0 { + format!( + "mkdir -p /opt/src && mount --bind /mnt/src /opt/src && {}", + command + ) + } else { + format!( + r#"if ! command -v bindfs >/dev/null 2>&1; then + echo "[ERROR] bindfs is not installed in this container image." + echo "" + echo "bindfs is required for proper file permission handling between the host and container." + echo "" + echo "To install bindfs in your container image, add one of the following to your Dockerfile:" + echo "" + echo " # For Ubuntu/Debian-based images:" + echo " RUN apt-get update && apt-get install -y bindfs" + echo "" + echo " # For Fedora/RHEL-based images:" + echo " RUN dnf install -y bindfs" + echo "" + echo " # For Alpine-based images:" + echo " RUN apk add --no-cache bindfs" + echo "" + echo " # For Arch-based images:" + echo " RUN pacman -S --noconfirm bindfs" + echo "" + exit 1 +fi +mkdir -p /opt/src && bindfs --map=$AVOCADO_HOST_UID/0:@$AVOCADO_HOST_GID/@0 /mnt/src /opt/src && {}"#, + command + ) + }; + container_cmd.push(full_command); + + if self.verbose { + print_info( + &format!( + "Mounting source directory: {} -> /mnt/src (bindfs -> /opt/src)", + src_path.display() + ), + OutputLevel::Normal, + ); + print_info( + &format!("Simple container command: {}", container_cmd.join(" ")), + OutputLevel::Normal, + ); + } + + let mut cmd = AsyncCommand::new(&container_cmd[0]); + cmd.args(&container_cmd[1..]); + cmd.stdout(Stdio::null()).stderr(Stdio::null()); + + let status = cmd + .status() + .await + .with_context(|| "Failed to execute simple container command")?; + + Ok(status.success()) + } + /// Create the entrypoint script for SDK initialization pub fn create_entrypoint_script( &self, @@ -600,6 +731,51 @@ impl SdkContainer { r#" set -e +# Remount source directory with permission translation via bindfs +# This maps host UID/GID to root inside the container for seamless file access +mkdir -p /opt/src + +# Check if bindfs is available +if ! command -v bindfs >/dev/null 2>&1; then + echo "[ERROR] bindfs is not installed in this container image." + echo "" + echo "bindfs is required for proper file permission handling between the host and container." + echo "" + echo "To install bindfs in your container image, add one of the following to your Dockerfile:" + echo "" + echo " # For Ubuntu/Debian-based images:" + echo " RUN apt-get update && apt-get install -y bindfs" + echo "" + echo " # For Fedora/RHEL-based images:" + echo " RUN dnf install -y bindfs" + echo "" + echo " # For Alpine-based images:" + echo " RUN apk add --no-cache bindfs" + echo "" + echo " # For Arch-based images:" + echo " RUN pacman -S --noconfirm bindfs" + echo "" + exit 1 +fi + +if [ -n "$AVOCADO_HOST_UID" ] && [ -n "$AVOCADO_HOST_GID" ]; then + # If host user is already root (UID 0), no mapping needed - just bind mount + if [ "$AVOCADO_HOST_UID" = "0" ] && [ "$AVOCADO_HOST_GID" = "0" ]; then + mount --bind /mnt/src /opt/src + if [ -n "$AVOCADO_VERBOSE" ]; then echo "[INFO] Mounted /mnt/src -> /opt/src (host is root, no mapping needed)"; fi + else + # Use --map with colon-separated user and group mappings + # Maps host UID -> 0 (root) and host GID -> 0 (root group) + # Format: --map=uid1/uid2:@gid1/@gid2 + bindfs --map=$AVOCADO_HOST_UID/0:@$AVOCADO_HOST_GID/@0 /mnt/src /opt/src + if [ -n "$AVOCADO_VERBOSE" ]; then echo "[INFO] Mounted /mnt/src -> /opt/src with UID/GID mapping ($AVOCADO_HOST_UID:$AVOCADO_HOST_GID -> 0:0)"; fi + fi +else + # Fallback: simple bind mount without permission translation + mount --bind /mnt/src /opt/src + if [ -n "$AVOCADO_VERBOSE" ]; then echo "[INFO] Mounted /mnt/src -> /opt/src (no UID/GID mapping)"; fi +fi + # Get repo url from environment or default to prod if [ -n "$AVOCADO_SDK_REPO_URL" ]; then REPO_URL="$AVOCADO_SDK_REPO_URL" @@ -986,6 +1162,19 @@ mod tests { assert!(cmd.contains(&"test".to_string())); // Verify AVOCADO_SRC_DIR is set assert!(cmd.contains(&"AVOCADO_SRC_DIR=/opt/src".to_string())); + // Verify FUSE device and capability for bindfs support + assert!(cmd.contains(&"--device".to_string())); + assert!(cmd.contains(&"/dev/fuse".to_string())); + assert!(cmd.contains(&"--cap-add".to_string())); + assert!(cmd.contains(&"SYS_ADMIN".to_string())); + // Verify host UID/GID are passed as env vars + let has_uid_env = cmd.iter().any(|s| s.starts_with("AVOCADO_HOST_UID=")); + let has_gid_env = cmd.iter().any(|s| s.starts_with("AVOCADO_HOST_GID=")); + assert!(has_uid_env, "AVOCADO_HOST_UID should be set"); + assert!(has_gid_env, "AVOCADO_HOST_GID should be set"); + // Verify source mount uses /mnt/src (bindfs will remount to /opt/src) + let has_mnt_src_mount = cmd.iter().any(|s| s.contains(":/mnt/src:")); + assert!(has_mnt_src_mount, "Source should be mounted to /mnt/src"); } #[test] @@ -996,6 +1185,15 @@ mod tests { assert!(script.contains("DNF_SDK_HOST")); assert!(script.contains("environment-setup")); assert!(script.contains("cd /opt/src")); + // Verify bindfs check is included + assert!(script.contains("command -v bindfs")); + assert!(script.contains("[ERROR] bindfs is not installed")); + // Verify bindfs setup is included with correct syntax + // --map=uid1/uid2:@gid1/@gid2 for combined user and group mapping + assert!(script.contains( + "bindfs --map=$AVOCADO_HOST_UID/0:@$AVOCADO_HOST_GID/@0 /mnt/src /opt/src" + )); + assert!(script.contains("mkdir -p /opt/src")); } #[test] From c9ef1e124b5d2000f1779a7cd5197f6ba4c6ca50 Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Sun, 28 Dec 2025 15:57:38 -0500 Subject: [PATCH 02/20] add --runs-on for executing remote commands using ssh --- src/commands/build.rs | 43 +- src/commands/ext/build.rs | 77 +- src/commands/ext/image.rs | 15 + src/commands/ext/install.rs | 19 + src/commands/hitl/server.rs | 45 +- src/commands/install.rs | 22 +- src/commands/provision.rs | 14 + src/commands/runtime/build.rs | 19 + src/commands/runtime/install.rs | 19 + src/commands/runtime/provision.rs | 22 + src/commands/sdk/install.rs | 25 + src/main.rs | 21 +- src/utils/container.rs | 317 +++++++- src/utils/mod.rs | 3 + src/utils/nfs_server.rs | 732 +++++++++++++++++ src/utils/remote.rs | 675 ++++++++++++++++ src/utils/runs_on.rs | 504 ++++++++++++ tests/runs_on_integration.rs | 1249 +++++++++++++++++++++++++++++ 18 files changed, 3780 insertions(+), 41 deletions(-) create mode 100644 src/utils/nfs_server.rs create mode 100644 src/utils/remote.rs create mode 100644 src/utils/runs_on.rs create mode 100644 tests/runs_on_integration.rs diff --git a/src/commands/build.rs b/src/commands/build.rs index c10592c..af8a57e 100644 --- a/src/commands/build.rs +++ b/src/commands/build.rs @@ -41,6 +41,10 @@ pub struct BuildCommand { pub dnf_args: Option>, /// Disable stamp validation and writing pub no_stamps: bool, + /// Remote host to run on (format: user@host) + pub runs_on: Option, + /// NFS port for remote execution + pub nfs_port: Option, } impl BuildCommand { @@ -63,6 +67,8 @@ impl BuildCommand { container_args, dnf_args, no_stamps: false, + runs_on: None, + nfs_port: None, } } @@ -72,6 +78,13 @@ impl BuildCommand { self } + /// Set remote execution options + pub fn with_runs_on(mut self, runs_on: Option, nfs_port: Option) -> Self { + self.runs_on = runs_on; + self.nfs_port = nfs_port; + self + } + /// Execute the build command pub async fn execute(&self) -> Result<()> { // Early target validation - load basic config first @@ -143,7 +156,8 @@ impl BuildCommand { self.container_args.clone(), self.dnf_args.clone(), ) - .with_no_stamps(self.no_stamps); + .with_no_stamps(self.no_stamps) + .with_runs_on(self.runs_on.clone(), self.nfs_port); ext_build_cmd.execute().await.with_context(|| { format!("Failed to build extension '{extension_name}'") })?; @@ -212,7 +226,8 @@ impl BuildCommand { self.container_args.clone(), self.dnf_args.clone(), ) - .with_no_stamps(self.no_stamps); + .with_no_stamps(self.no_stamps) + .with_runs_on(self.runs_on.clone(), self.nfs_port); ext_image_cmd.execute().await.with_context(|| { format!("Failed to create image for extension '{extension_name}'") })?; @@ -275,7 +290,8 @@ impl BuildCommand { self.container_args.clone(), self.dnf_args.clone(), ) - .with_no_stamps(self.no_stamps); + .with_no_stamps(self.no_stamps) + .with_runs_on(self.runs_on.clone(), self.nfs_port); runtime_build_cmd .execute() .await @@ -600,7 +616,8 @@ impl BuildCommand { self.container_args.clone(), self.dnf_args.clone(), ) - .with_no_stamps(self.no_stamps); + .with_no_stamps(self.no_stamps) + .with_runs_on(self.runs_on.clone(), self.nfs_port); // Execute the extension build using the external config match ext_build_cmd.execute().await { @@ -661,7 +678,8 @@ impl BuildCommand { self.container_args.clone(), self.dnf_args.clone(), ) - .with_no_stamps(self.no_stamps); + .with_no_stamps(self.no_stamps) + .with_runs_on(self.runs_on.clone(), self.nfs_port); // Execute the image creation ext_image_cmd.execute().await.with_context(|| { @@ -917,7 +935,8 @@ echo "Successfully created image for versioned extension '$EXT_NAME-$EXT_VERSION self.container_args.clone(), self.dnf_args.clone(), ) - .with_no_stamps(self.no_stamps); + .with_no_stamps(self.no_stamps) + .with_runs_on(self.runs_on.clone(), self.nfs_port); ext_build_cmd .execute() .await @@ -954,7 +973,8 @@ echo "Successfully created image for versioned extension '$EXT_NAME-$EXT_VERSION self.container_args.clone(), self.dnf_args.clone(), ) - .with_no_stamps(self.no_stamps); + .with_no_stamps(self.no_stamps) + .with_runs_on(self.runs_on.clone(), self.nfs_port); ext_image_cmd.execute().await.with_context(|| { format!("Failed to create image for extension '{ext_name}'") })?; @@ -1070,7 +1090,8 @@ echo "Successfully created image for versioned extension '$EXT_NAME-$EXT_VERSION self.container_args.clone(), self.dnf_args.clone(), ) - .with_no_stamps(self.no_stamps); + .with_no_stamps(self.no_stamps) + .with_runs_on(self.runs_on.clone(), self.nfs_port); ext_build_cmd.execute().await.with_context(|| { format!("Failed to build extension '{extension_name}'") })?; @@ -1084,7 +1105,8 @@ echo "Successfully created image for versioned extension '$EXT_NAME-$EXT_VERSION self.container_args.clone(), self.dnf_args.clone(), ) - .with_no_stamps(self.no_stamps); + .with_no_stamps(self.no_stamps) + .with_runs_on(self.runs_on.clone(), self.nfs_port); ext_image_cmd.execute().await.with_context(|| { format!("Failed to create image for extension '{extension_name}'") })?; @@ -1150,7 +1172,8 @@ echo "Successfully created image for versioned extension '$EXT_NAME-$EXT_VERSION self.container_args.clone(), self.dnf_args.clone(), ) - .with_no_stamps(self.no_stamps); + .with_no_stamps(self.no_stamps) + .with_runs_on(self.runs_on.clone(), self.nfs_port); runtime_build_cmd .execute() .await diff --git a/src/commands/ext/build.rs b/src/commands/ext/build.rs index e2b159a..8fd8afa 100644 --- a/src/commands/ext/build.rs +++ b/src/commands/ext/build.rs @@ -24,13 +24,31 @@ enum OverlayMode { } pub struct ExtBuildCommand { - extension: String, - config_path: String, - verbose: bool, - target: Option, - container_args: Option>, - dnf_args: Option>, - no_stamps: bool, + pub extension: String, + pub config_path: String, + pub verbose: bool, + pub target: Option, + pub container_args: Option>, + pub dnf_args: Option>, + pub no_stamps: bool, + pub runs_on: Option, + pub nfs_port: Option, +} + +impl Default for ExtBuildCommand { + fn default() -> Self { + Self { + extension: String::new(), + config_path: String::new(), + verbose: false, + target: None, + container_args: None, + dnf_args: None, + no_stamps: false, + runs_on: None, + nfs_port: None, + } + } } impl ExtBuildCommand { @@ -50,6 +68,8 @@ impl ExtBuildCommand { container_args, dnf_args, no_stamps: false, + runs_on: None, + nfs_port: None, } } @@ -59,6 +79,13 @@ impl ExtBuildCommand { self } + /// Set remote execution options + pub fn with_runs_on(mut self, runs_on: Option, nfs_port: Option) -> Self { + self.runs_on = runs_on; + self.nfs_port = nfs_port; + self + } + pub async fn execute(&self) -> Result<()> { // Load configuration and parse raw TOML let config = Config::load(&self.config_path)?; @@ -1616,6 +1643,7 @@ mod tests { container_args: None, dnf_args: None, no_stamps: false, + ..Default::default() }; let script = cmd.create_sysext_build_script( @@ -1673,6 +1701,7 @@ mod tests { container_args: None, dnf_args: None, no_stamps: false, + ..Default::default() }; let script = cmd.create_confext_build_script( @@ -1727,6 +1756,7 @@ mod tests { container_args: None, dnf_args: None, no_stamps: false, + ..Default::default() }; let script = cmd.create_sysext_build_script( @@ -1755,6 +1785,7 @@ mod tests { container_args: None, dnf_args: None, no_stamps: false, + ..Default::default() }; let script = cmd.create_confext_build_script( @@ -1783,6 +1814,7 @@ mod tests { container_args: None, dnf_args: None, no_stamps: false, + ..Default::default() }; let enable_services = vec!["peridiod.service".to_string(), "test.service".to_string()]; @@ -1835,6 +1867,7 @@ mod tests { container_args: None, dnf_args: None, no_stamps: false, + ..Default::default() }; let script = cmd.create_sysext_build_script( @@ -1868,6 +1901,7 @@ mod tests { container_args: None, dnf_args: None, no_stamps: false, + ..Default::default() }; let overlay_config = OverlayConfig { @@ -1907,6 +1941,7 @@ mod tests { container_args: None, dnf_args: None, no_stamps: false, + ..Default::default() }; let overlay_config = OverlayConfig { @@ -1946,6 +1981,7 @@ mod tests { container_args: None, dnf_args: None, no_stamps: false, + ..Default::default() }; let overlay_config = OverlayConfig { @@ -1987,6 +2023,7 @@ mod tests { container_args: None, dnf_args: None, no_stamps: false, + ..Default::default() }; let overlay_config = OverlayConfig { @@ -2028,6 +2065,7 @@ mod tests { container_args: None, dnf_args: None, no_stamps: false, + ..Default::default() }; let script_sysext = cmd.create_sysext_build_script( @@ -2072,6 +2110,7 @@ mod tests { container_args: None, dnf_args: None, no_stamps: false, + ..Default::default() }; let modprobe_modules = vec!["nfs".to_string(), "ext4".to_string()]; @@ -2116,6 +2155,7 @@ mod tests { container_args: None, dnf_args: None, no_stamps: false, + ..Default::default() }; let script = cmd.create_sysext_build_script( @@ -2153,6 +2193,7 @@ mod tests { container_args: None, dnf_args: None, no_stamps: false, + ..Default::default() }; let script = cmd.create_confext_build_script( @@ -2187,6 +2228,7 @@ mod tests { container_args: None, dnf_args: None, no_stamps: false, + ..Default::default() }; let script = cmd.create_confext_build_script( @@ -2220,6 +2262,7 @@ mod tests { container_args: None, dnf_args: None, no_stamps: false, + ..Default::default() }; let on_merge_commands = vec![ @@ -2259,6 +2302,7 @@ mod tests { container_args: None, dnf_args: None, no_stamps: false, + ..Default::default() }; let on_unmerge_commands = vec![ @@ -2301,6 +2345,7 @@ mod tests { container_args: None, dnf_args: None, no_stamps: false, + ..Default::default() }; let on_unmerge_commands = vec!["systemctl stop myservice.service".to_string()]; @@ -2335,6 +2380,7 @@ mod tests { container_args: None, dnf_args: None, no_stamps: false, + ..Default::default() }; let on_merge_commands = vec!["systemctl restart sshd.socket".to_string()]; @@ -2369,6 +2415,7 @@ mod tests { container_args: None, dnf_args: None, no_stamps: false, + ..Default::default() }; let script = cmd.create_sysext_build_script( @@ -2401,6 +2448,7 @@ mod tests { container_args: None, dnf_args: None, no_stamps: false, + ..Default::default() }; let script = cmd.create_sysext_build_script( @@ -2431,6 +2479,7 @@ mod tests { container_args: None, dnf_args: None, no_stamps: false, + ..Default::default() }; // Test with the example modules from the user's request @@ -2479,6 +2528,7 @@ mod tests { container_args: None, dnf_args: None, no_stamps: false, + ..Default::default() }; // Test with both modprobe modules and custom commands @@ -2542,6 +2592,7 @@ mod tests { container_args: None, dnf_args: None, no_stamps: false, + ..Default::default() }; // Create users config matching the example in the user request @@ -2584,6 +2635,7 @@ mod tests { container_args: None, dnf_args: None, no_stamps: false, + ..Default::default() }; let script = cmd.create_users_script_section(None, None); @@ -2602,6 +2654,7 @@ mod tests { container_args: None, dnf_args: None, no_stamps: false, + ..Default::default() }; // Create users config with a user that has a non-empty password @@ -2639,6 +2692,7 @@ mod tests { container_args: None, dnf_args: None, no_stamps: false, + ..Default::default() }; // Create users config with a user that has a non-string password @@ -2672,6 +2726,7 @@ mod tests { container_args: None, dnf_args: None, no_stamps: false, + ..Default::default() }; // Create users config matching the example in the user request @@ -2726,6 +2781,7 @@ mod tests { container_args: None, dnf_args: None, no_stamps: false, + ..Default::default() }; // Create users config matching the example in the user request @@ -2778,6 +2834,7 @@ mod tests { container_args: None, dnf_args: None, no_stamps: false, + ..Default::default() }; // Test empty password - should show warning @@ -2825,6 +2882,7 @@ mod tests { container_args: None, dnf_args: None, no_stamps: false, + ..Default::default() }; // Create comprehensive groups config @@ -2909,6 +2967,7 @@ mod tests { container_args: None, dnf_args: None, no_stamps: false, + ..Default::default() }; // Create comprehensive users configuration using mixed approach @@ -3188,6 +3247,7 @@ mod tests { container_args: None, dnf_args: None, no_stamps: false, + ..Default::default() }; // Test user with just name (no fields at all) @@ -3217,6 +3277,7 @@ mod tests { container_args: None, dnf_args: None, no_stamps: false, + ..Default::default() }; let script = cmd.create_sysext_build_script( @@ -3245,6 +3306,7 @@ mod tests { container_args: None, dnf_args: None, no_stamps: false, + ..Default::default() }; let script = cmd.create_confext_build_script( @@ -3337,6 +3399,7 @@ sdk: container_args: None, dnf_args: None, no_stamps: false, + ..Default::default() }; let src_dir = "src"; diff --git a/src/commands/ext/image.rs b/src/commands/ext/image.rs index 7dbc0d8..82a6145 100644 --- a/src/commands/ext/image.rs +++ b/src/commands/ext/image.rs @@ -18,6 +18,8 @@ pub struct ExtImageCommand { container_args: Option>, dnf_args: Option>, no_stamps: bool, + runs_on: Option, + nfs_port: Option, } impl ExtImageCommand { @@ -37,6 +39,8 @@ impl ExtImageCommand { container_args, dnf_args, no_stamps: false, + runs_on: None, + nfs_port: None, } } @@ -46,6 +50,13 @@ impl ExtImageCommand { self } + /// Set remote execution options + pub fn with_runs_on(mut self, runs_on: Option, nfs_port: Option) -> Self { + self.runs_on = runs_on; + self.nfs_port = nfs_port; + self + } + pub async fn execute(&self) -> Result<()> { // Load configuration and parse raw TOML let config = Config::load(&self.config_path)?; @@ -91,6 +102,8 @@ impl ExtImageCommand { repo_release: repo_release.clone(), container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), + runs_on: self.runs_on.clone(), + nfs_port: self.nfs_port, ..Default::default() }; @@ -303,6 +316,8 @@ impl ExtImageCommand { repo_release: repo_release.cloned(), container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), + runs_on: self.runs_on.clone(), + nfs_port: self.nfs_port, ..Default::default() }; let result = container_helper.run_in_container(config).await?; diff --git a/src/commands/ext/install.rs b/src/commands/ext/install.rs index 939ae9f..f010cfe 100644 --- a/src/commands/ext/install.rs +++ b/src/commands/ext/install.rs @@ -19,6 +19,8 @@ pub struct ExtInstallCommand { container_args: Option>, dnf_args: Option>, no_stamps: bool, + runs_on: Option, + nfs_port: Option, } impl ExtInstallCommand { @@ -40,6 +42,8 @@ impl ExtInstallCommand { container_args, dnf_args, no_stamps: false, + runs_on: None, + nfs_port: None, } } @@ -49,6 +53,13 @@ impl ExtInstallCommand { self } + /// Set remote execution options + pub fn with_runs_on(mut self, runs_on: Option, nfs_port: Option) -> Self { + self.runs_on = runs_on; + self.nfs_port = nfs_port; + self + } + pub async fn execute(&self) -> Result<()> { // Load the composed configuration (merges external configs, applies interpolation) let composed = Config::load_composed(&self.config_path, self.target.as_deref()) @@ -256,6 +267,8 @@ impl ExtInstallCommand { repo_release: repo_release.clone(), container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), + runs_on: self.runs_on.clone(), + nfs_port: self.nfs_port, ..Default::default() }; @@ -314,6 +327,8 @@ impl ExtInstallCommand { repo_release: repo_release.cloned(), container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), + runs_on: self.runs_on.clone(), + nfs_port: self.nfs_port, ..Default::default() }; let sysroot_exists = container_helper.run_in_container(run_config).await?; @@ -331,6 +346,8 @@ impl ExtInstallCommand { repo_release: repo_release.cloned(), container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), + runs_on: self.runs_on.clone(), + nfs_port: self.nfs_port, ..Default::default() }; let success = container_helper.run_in_container(run_config).await?; @@ -535,6 +552,8 @@ $DNF_SDK_HOST \ container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), disable_weak_dependencies, + runs_on: self.runs_on.clone(), + nfs_port: self.nfs_port, ..Default::default() }; let install_success = container_helper.run_in_container(run_config).await?; diff --git a/src/commands/hitl/server.rs b/src/commands/hitl/server.rs index 2623d7c..fe704a7 100644 --- a/src/commands/hitl/server.rs +++ b/src/commands/hitl/server.rs @@ -1,5 +1,6 @@ use crate::utils::config::Config; use crate::utils::container::{RunConfig, SdkContainer}; +use crate::utils::nfs_server::{NfsExport, HITL_DEFAULT_PORT}; use crate::utils::output::{print_debug, print_info, OutputLevel}; use crate::utils::stamps::{ generate_batch_read_stamps_script, validate_stamps_batch, StampRequirement, @@ -7,6 +8,7 @@ use crate::utils::stamps::{ use crate::utils::target::validate_and_log_target; use anyhow::Result; use clap::Args; +use std::path::PathBuf; #[derive(Args, Debug)] pub struct HitlServerCommand { @@ -204,7 +206,8 @@ impl HitlServerCommand { commands.push(update_config_cmd); // Update NFS_Port if a port is specified (it's nested inside NFS_Core_Param block) - if let Some(port) = self.port { + let port = self.port.unwrap_or(HITL_DEFAULT_PORT); + if self.port.is_some() { let port_update_cmd = format!( "sed -i '/NFS_Core_Param {{/,/}}/s/NFS_Port = [0-9]\\+;/NFS_Port = {port};/' {config_file}" ); @@ -221,27 +224,22 @@ impl HitlServerCommand { return format!("{} &&", commands.join(" && ")); } + // Use shared NfsExport to generate export configurations for (index, extension) in self.extensions.iter().enumerate() { - let export_id = index + 1; - - // Expand the AVOCADO_PREFIX variable to its actual path + let export_id = (index + 1) as u32; let extensions_path = format!("/opt/_avocado/{target}/extensions/{extension}"); + let pseudo_path = format!("/{extension}"); - let export_content = format!( - "EXPORT {{\n\ - \x20\x20Export_Id = {export_id};\n\ - \x20\x20Path = {extensions_path};\n\ - \x20\x20Pseudo = /{extension};\n\ - \x20\x20FSAL {{\n\ - \x20\x20\x20\x20name = VFS;\n\ - \x20\x20}}\n\ - }}" - ); + // Create NfsExport using the shared type + let export = NfsExport::new(export_id, PathBuf::from(&extensions_path), pseudo_path); + + // Generate the export config content using the shared method + let export_content = Self::generate_ganesha_export_block(&export); let export_file = format!("${{AVOCADO_SDK_PREFIX}}/etc/avocado/exports.d/{extension}.conf"); - // Create a command that writes the export content to the file using echo -e to avoid here-doc issues + // Create a command that writes the export content to the file let escaped_content = export_content.replace('\\', "\\\\").replace('"', "\\\""); let write_command = format!("echo -e \"{escaped_content}\" > {export_file}"); @@ -256,6 +254,23 @@ impl HitlServerCommand { format!("{} &&", commands.join(" && ")) } + + /// Generate a Ganesha EXPORT block for the given export config + fn generate_ganesha_export_block(export: &NfsExport) -> String { + format!( + "EXPORT {{\n\ + \x20\x20Export_Id = {};\n\ + \x20\x20Path = {};\n\ + \x20\x20Pseudo = {};\n\ + \x20\x20FSAL {{\n\ + \x20\x20\x20\x20name = VFS;\n\ + \x20\x20}}\n\ + }}", + export.export_id, + export.local_path.display(), + export.pseudo_path + ) + } } #[cfg(test)] diff --git a/src/commands/install.rs b/src/commands/install.rs index b6a8f01..d796868 100644 --- a/src/commands/install.rs +++ b/src/commands/install.rs @@ -43,6 +43,10 @@ pub struct InstallCommand { pub dnf_args: Option>, /// Disable stamp validation and writing pub no_stamps: bool, + /// Remote host to run on (format: user@host) + pub runs_on: Option, + /// NFS port for remote execution + pub nfs_port: Option, } impl InstallCommand { @@ -65,6 +69,8 @@ impl InstallCommand { container_args, dnf_args, no_stamps: false, + runs_on: None, + nfs_port: None, } } @@ -74,6 +80,13 @@ impl InstallCommand { self } + /// Set remote execution options + pub fn with_runs_on(mut self, runs_on: Option, nfs_port: Option) -> Self { + self.runs_on = runs_on; + self.nfs_port = nfs_port; + self + } + /// Execute the install command pub async fn execute(&self) -> Result<()> { // Early target validation - load basic config first to validate target @@ -116,7 +129,8 @@ impl InstallCommand { self.container_args.clone(), self.dnf_args.clone(), ) - .with_no_stamps(self.no_stamps); + .with_no_stamps(self.no_stamps) + .with_runs_on(self.runs_on.clone(), self.nfs_port); sdk_install_cmd .execute() .await @@ -151,7 +165,8 @@ impl InstallCommand { self.container_args.clone(), self.dnf_args.clone(), ) - .with_no_stamps(self.no_stamps); + .with_no_stamps(self.no_stamps) + .with_runs_on(self.runs_on.clone(), self.nfs_port); ext_install_cmd.execute().await.with_context(|| { format!( "Failed to install extension dependencies for '{extension_name}'" @@ -242,7 +257,8 @@ impl InstallCommand { self.container_args.clone(), self.dnf_args.clone(), ) - .with_no_stamps(self.no_stamps); + .with_no_stamps(self.no_stamps) + .with_runs_on(self.runs_on.clone(), self.nfs_port); runtime_install_cmd.execute().await.with_context(|| { format!("Failed to install runtime dependencies for '{runtime_name}'") })?; diff --git a/src/commands/provision.rs b/src/commands/provision.rs index 02ada8e..d61ddcc 100644 --- a/src/commands/provision.rs +++ b/src/commands/provision.rs @@ -29,6 +29,10 @@ pub struct ProvisionConfig { pub dnf_args: Option>, /// Disable stamp validation and writing pub no_stamps: bool, + /// Remote host to run on (format: user@host) + pub runs_on: Option, + /// NFS port for remote execution + pub nfs_port: Option, } /// Implementation of the 'provision' command that calls through to runtime provision. @@ -70,6 +74,8 @@ impl ProvisionCommand { dnf_args: self.config.dnf_args.clone(), state_file, no_stamps: self.config.no_stamps, + runs_on: self.config.runs_on.clone(), + nfs_port: self.config.nfs_port, }, ); @@ -98,6 +104,8 @@ mod tests { container_args: Some(vec!["--privileged".to_string()]), dnf_args: Some(vec!["--nogpgcheck".to_string()]), no_stamps: false, + runs_on: None, + nfs_port: None, }; let cmd = ProvisionCommand::new(config); @@ -129,6 +137,8 @@ mod tests { container_args: None, dnf_args: None, no_stamps: false, + runs_on: None, + nfs_port: None, }; let cmd = ProvisionCommand::new(config); @@ -163,6 +173,8 @@ mod tests { container_args: None, dnf_args: None, no_stamps: false, + runs_on: None, + nfs_port: None, }; let cmd = ProvisionCommand::new(config); @@ -184,6 +196,8 @@ mod tests { container_args: None, dnf_args: None, no_stamps: false, + runs_on: None, + nfs_port: None, }; let cmd = ProvisionCommand::new(config); diff --git a/src/commands/runtime/build.rs b/src/commands/runtime/build.rs index 80aedb2..26d537d 100644 --- a/src/commands/runtime/build.rs +++ b/src/commands/runtime/build.rs @@ -19,6 +19,8 @@ pub struct RuntimeBuildCommand { container_args: Option>, dnf_args: Option>, no_stamps: bool, + runs_on: Option, + nfs_port: Option, } impl RuntimeBuildCommand { @@ -38,6 +40,8 @@ impl RuntimeBuildCommand { container_args, dnf_args, no_stamps: false, + runs_on: None, + nfs_port: None, } } @@ -47,6 +51,13 @@ impl RuntimeBuildCommand { self } + /// Set remote execution options + pub fn with_runs_on(mut self, runs_on: Option, nfs_port: Option) -> Self { + self.runs_on = runs_on; + self.nfs_port = nfs_port; + self + } + pub async fn execute(&self) -> Result<()> { // Load configuration and parse raw TOML let config = load_config(&self.config_path)?; @@ -117,6 +128,8 @@ impl RuntimeBuildCommand { repo_release: repo_release.clone(), container_args: processed_container_args.clone(), dnf_args: self.dnf_args.clone(), + runs_on: self.runs_on.clone(), + nfs_port: self.nfs_port, ..Default::default() }; @@ -228,6 +241,8 @@ impl RuntimeBuildCommand { container_args: processed_container_args.clone(), dnf_args: self.dnf_args.clone(), env_vars, + runs_on: self.runs_on.clone(), + nfs_port: self.nfs_port, ..Default::default() }; let complete_result = container_helper @@ -265,6 +280,8 @@ impl RuntimeBuildCommand { repo_release: repo_release.clone(), container_args: processed_container_args.clone(), dnf_args: self.dnf_args.clone(), + runs_on: self.runs_on.clone(), + nfs_port: self.nfs_port, ..Default::default() }; @@ -856,6 +873,8 @@ rpm --root="$AVOCADO_EXT_SYSROOTS/{ext_name}" --dbpath=/var/lib/extension.d/rpm verbose: self.verbose, source_environment: true, interactive: false, + runs_on: self.runs_on.clone(), + nfs_port: self.nfs_port, ..Default::default() }; diff --git a/src/commands/runtime/install.rs b/src/commands/runtime/install.rs index ffb7773..ed7bef1 100644 --- a/src/commands/runtime/install.rs +++ b/src/commands/runtime/install.rs @@ -19,6 +19,8 @@ pub struct RuntimeInstallCommand { container_args: Option>, dnf_args: Option>, no_stamps: bool, + runs_on: Option, + nfs_port: Option, } impl RuntimeInstallCommand { @@ -40,6 +42,8 @@ impl RuntimeInstallCommand { container_args, dnf_args, no_stamps: false, + runs_on: None, + nfs_port: None, } } @@ -49,6 +53,13 @@ impl RuntimeInstallCommand { self } + /// Set remote execution options + pub fn with_runs_on(mut self, runs_on: Option, nfs_port: Option) -> Self { + self.runs_on = runs_on; + self.nfs_port = nfs_port; + self + } + pub async fn execute(&self) -> Result<()> { // Load the configuration and parse raw TOML let config = Config::load(&self.config_path)?; @@ -197,6 +208,8 @@ impl RuntimeInstallCommand { repo_release: repo_release.clone(), container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), + runs_on: self.runs_on.clone(), + nfs_port: self.nfs_port, ..Default::default() }; @@ -268,6 +281,8 @@ impl RuntimeInstallCommand { repo_release: repo_release.cloned(), container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), + runs_on: self.runs_on.clone(), + nfs_port: self.nfs_port, ..Default::default() }; let installroot_exists = container_helper.run_in_container(run_config).await?; @@ -285,6 +300,8 @@ impl RuntimeInstallCommand { repo_release: repo_release.cloned(), container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), + runs_on: self.runs_on.clone(), + nfs_port: self.nfs_port, ..Default::default() }; let success = container_helper.run_in_container(run_config).await?; @@ -425,6 +442,8 @@ $DNF_SDK_HOST \ container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), disable_weak_dependencies: config.get_sdk_disable_weak_dependencies(), + runs_on: self.runs_on.clone(), + nfs_port: self.nfs_port, ..Default::default() }; let success = container_helper.run_in_container(run_config).await?; diff --git a/src/commands/runtime/provision.rs b/src/commands/runtime/provision.rs index bee72f4..fe1af3d 100644 --- a/src/commands/runtime/provision.rs +++ b/src/commands/runtime/provision.rs @@ -31,6 +31,10 @@ pub struct RuntimeProvisionConfig { pub state_file: Option, /// Disable stamp validation and writing pub no_stamps: bool, + /// Remote host to run on (format: user@host) + pub runs_on: Option, + /// NFS port for remote execution + pub nfs_port: Option, } pub struct RuntimeProvisionCommand { @@ -105,6 +109,8 @@ impl RuntimeProvisionCommand { verbose: false, source_environment: true, interactive: false, + runs_on: self.config.runs_on.clone(), + nfs_port: self.config.nfs_port, ..Default::default() }; @@ -283,6 +289,8 @@ impl RuntimeProvisionCommand { self.config.container_args.as_ref(), ), dnf_args: self.config.dnf_args.clone(), + runs_on: self.config.runs_on.clone(), + nfs_port: self.config.nfs_port, ..Default::default() }; @@ -349,6 +357,8 @@ impl RuntimeProvisionCommand { verbose: self.config.verbose, source_environment: true, interactive: false, + runs_on: self.config.runs_on.clone(), + nfs_port: self.config.nfs_port, ..Default::default() }; @@ -869,6 +879,8 @@ rpm --root="$AVOCADO_EXT_SYSROOTS/{ext_name}" --dbpath=/var/lib/extension.d/rpm verbose: self.config.verbose, source_environment: true, interactive: false, + runs_on: self.config.runs_on.clone(), + nfs_port: self.config.nfs_port, ..Default::default() }; @@ -919,6 +931,8 @@ mod tests { dnf_args: None, state_file: None, no_stamps: false, + runs_on: None, + nfs_port: None, }; let cmd = RuntimeProvisionCommand::new(config); @@ -946,6 +960,8 @@ mod tests { dnf_args: None, state_file: None, no_stamps: false, + runs_on: None, + nfs_port: None, }; let cmd = RuntimeProvisionCommand::new(config); @@ -995,6 +1011,8 @@ runtime: dnf_args: None, state_file: None, no_stamps: false, + runs_on: None, + nfs_port: None, }; let command = RuntimeProvisionCommand::new(provision_config); @@ -1038,6 +1056,8 @@ runtime: dnf_args: dnf_args.clone(), state_file: None, no_stamps: false, + runs_on: None, + nfs_port: None, }; let cmd = RuntimeProvisionCommand::new(config); @@ -1074,6 +1094,8 @@ runtime: dnf_args: None, state_file: None, no_stamps: false, + runs_on: None, + nfs_port: None, }; let cmd = RuntimeProvisionCommand::new(config); diff --git a/src/commands/sdk/install.rs b/src/commands/sdk/install.rs index 8c489b1..d2d187f 100644 --- a/src/commands/sdk/install.rs +++ b/src/commands/sdk/install.rs @@ -29,6 +29,10 @@ pub struct SdkInstallCommand { pub dnf_args: Option>, /// Disable stamp validation and writing pub no_stamps: bool, + /// Remote host to run on (format: user@host) + pub runs_on: Option, + /// NFS port for remote execution + pub nfs_port: Option, } impl SdkInstallCommand { @@ -49,6 +53,8 @@ impl SdkInstallCommand { container_args, dnf_args, no_stamps: false, + runs_on: None, + nfs_port: None, } } @@ -58,6 +64,13 @@ impl SdkInstallCommand { self } + /// Set remote execution options + pub fn with_runs_on(mut self, runs_on: Option, nfs_port: Option) -> Self { + self.runs_on = runs_on; + self.nfs_port = nfs_port; + self + } + /// Execute the sdk install command pub async fn execute(&self) -> Result<()> { // Early target validation - load basic config first @@ -287,6 +300,8 @@ MACROS_EOF container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), disable_weak_dependencies: config.get_sdk_disable_weak_dependencies(), + runs_on: self.runs_on.clone(), + nfs_port: self.nfs_port, ..Default::default() }; @@ -344,6 +359,8 @@ $DNF_SDK_HOST $DNF_NO_SCRIPTS \ container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), disable_weak_dependencies: config.get_sdk_disable_weak_dependencies(), + runs_on: self.runs_on.clone(), + nfs_port: self.nfs_port, ..Default::default() }; @@ -388,6 +405,8 @@ $DNF_SDK_HOST \ container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), disable_weak_dependencies: config.get_sdk_disable_weak_dependencies(), + runs_on: self.runs_on.clone(), + nfs_port: self.nfs_port, ..Default::default() }; @@ -435,6 +454,8 @@ $DNF_SDK_HOST $DNF_NO_SCRIPTS \ container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), disable_weak_dependencies: config.get_sdk_disable_weak_dependencies(), + runs_on: self.runs_on.clone(), + nfs_port: self.nfs_port, ..Default::default() }; @@ -485,6 +506,8 @@ fi container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), disable_weak_dependencies: config.get_sdk_disable_weak_dependencies(), + runs_on: self.runs_on.clone(), + nfs_port: self.nfs_port, ..Default::default() }; @@ -648,6 +671,8 @@ $DNF_SDK_HOST $DNF_NO_SCRIPTS $DNF_SDK_TARGET_REPO_CONF \ container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), disable_weak_dependencies: config.get_sdk_disable_weak_dependencies(), + runs_on: self.runs_on.clone(), + nfs_port: self.nfs_port, ..Default::default() }; diff --git a/src/main.rs b/src/main.rs index b206eaf..99bcdb0 100644 --- a/src/main.rs +++ b/src/main.rs @@ -47,6 +47,14 @@ struct Cli { /// Disable stamp validation and writing #[arg(long)] no_stamps: bool, + + /// Run command on remote host using local volume via NFS (format: user@host) + #[arg(long, value_name = "USER@HOST", global = true)] + runs_on: Option, + + /// NFS port for remote execution (auto-selects from 12050-12099 if not specified) + #[arg(long, global = true)] + nfs_port: Option, } #[derive(Subcommand)] @@ -749,7 +757,8 @@ async fn main() -> Result<()> { container_args, dnf_args, ) - .with_no_stamps(cli.no_stamps); + .with_no_stamps(cli.no_stamps) + .with_runs_on(cli.runs_on.clone(), cli.nfs_port); install_cmd.execute().await?; Ok(()) } @@ -771,7 +780,8 @@ async fn main() -> Result<()> { container_args, dnf_args, ) - .with_no_stamps(cli.no_stamps); + .with_no_stamps(cli.no_stamps) + .with_runs_on(cli.runs_on.clone(), cli.nfs_port); build_cmd.execute().await?; Ok(()) } @@ -826,6 +836,8 @@ async fn main() -> Result<()> { container_args, dnf_args, no_stamps: cli.no_stamps, + runs_on: cli.runs_on.clone(), + nfs_port: cli.nfs_port, }); provision_cmd.execute().await?; Ok(()) @@ -974,6 +986,8 @@ async fn main() -> Result<()> { dnf_args, state_file: None, // Resolved from config during execution no_stamps: cli.no_stamps, + runs_on: cli.runs_on.clone(), + nfs_port: cli.nfs_port, }, ); provision_cmd.execute().await?; @@ -1280,7 +1294,8 @@ async fn main() -> Result<()> { container_args, dnf_args, ) - .with_no_stamps(cli.no_stamps); + .with_no_stamps(cli.no_stamps) + .with_runs_on(cli.runs_on.clone(), cli.nfs_port); install_cmd.execute().await?; Ok(()) } diff --git a/src/utils/container.rs b/src/utils/container.rs index 6d90c43..c6f3109 100644 --- a/src/utils/container.rs +++ b/src/utils/container.rs @@ -36,6 +36,10 @@ pub struct RunConfig { pub signing_helper_script_path: Option, pub signing_key_name: Option, pub signing_checksum_algorithm: Option, + /// Remote host to run on (format: user@host) + pub runs_on: Option, + /// NFS port for remote execution (auto-selected if None) + pub nfs_port: Option, } impl Default for RunConfig { @@ -64,6 +68,8 @@ impl Default for RunConfig { signing_helper_script_path: None, signing_key_name: None, signing_checksum_algorithm: None, + runs_on: None, + nfs_port: None, } } } @@ -124,6 +130,11 @@ impl SdkContainer { /// Run a command in the container pub async fn run_in_container(&self, config: RunConfig) -> Result { + // Check if we should run on a remote host + if let Some(ref runs_on) = config.runs_on { + return self.run_in_container_remote(&config, runs_on).await; + } + // Get or create docker volume for persistent state let volume_manager = VolumeManager::new(self.container_tool.clone(), self.verbose); let volume_state = volume_manager.get_or_create_volume(&self.cwd).await?; @@ -192,6 +203,133 @@ impl SdkContainer { .await } + /// Run a command in a container on a remote host via NFS + async fn run_in_container_remote(&self, config: &RunConfig, runs_on: &str) -> Result { + use crate::utils::runs_on::RunsOnContext; + + // Get or create local docker volume (we need this to export via NFS) + let volume_manager = VolumeManager::new(self.container_tool.clone(), self.verbose); + let volume_state = volume_manager.get_or_create_volume(&self.cwd).await?; + + let src_dir = self.src_dir.as_ref().unwrap_or(&self.cwd); + + print_info( + &format!("Setting up remote execution on {}...", runs_on), + OutputLevel::Normal, + ); + + // Setup remote execution context + let mut context = RunsOnContext::setup( + runs_on, + config.nfs_port, + src_dir, + &volume_state.volume_name, + &self.container_tool, + &config.container_image, + config.verbose || self.verbose, + ) + .await + .context("Failed to setup remote execution")?; + + // Setup signing tunnel if signing is configured + #[cfg(unix)] + if let Some(ref socket_path) = config.signing_socket_path { + let _ = context.setup_signing_tunnel(socket_path).await; + } + + // Build environment variables + let mut env_vars = config.env_vars.clone().unwrap_or_default(); + + // Set host platform - the remote is running the container + env_vars.insert("AVOCADO_HOST_PLATFORM".to_string(), "linux".to_string()); + + if let Some(url) = &config.repo_url { + env_vars.insert("AVOCADO_SDK_REPO_URL".to_string(), url.clone()); + } + if let Some(release) = &config.repo_release { + env_vars.insert("AVOCADO_SDK_REPO_RELEASE".to_string(), release.clone()); + } + if let Some(dnf_args) = &config.dnf_args { + env_vars.insert("AVOCADO_DNF_ARGS".to_string(), dnf_args.join(" ")); + } + if config.verbose || self.verbose { + env_vars.insert("AVOCADO_VERBOSE".to_string(), "1".to_string()); + } + + // Set target and SDK-related env vars + env_vars.insert("AVOCADO_TARGET".to_string(), config.target.clone()); + env_vars.insert("AVOCADO_SDK_TARGET".to_string(), config.target.clone()); + env_vars.insert("AVOCADO_SRC_DIR".to_string(), "/opt/src".to_string()); + + // Set host UID/GID for bindfs permission mapping on remote + // This maps the local host user's files to root inside the container + let (host_uid, host_gid) = crate::utils::config::resolve_host_uid_gid(None); + env_vars.insert("AVOCADO_HOST_UID".to_string(), host_uid.to_string()); + env_vars.insert("AVOCADO_HOST_GID".to_string(), host_gid.to_string()); + + // Build the complete command with entrypoint + // NFS src volume is mounted to /mnt/src, bindfs remaps to /opt/src with UID translation + let mut full_command = String::new(); + if config.use_entrypoint { + full_command.push_str(&self.create_entrypoint_script_for_remote( + config.source_environment, + config.extension_sysroot.as_deref(), + config.runtime_sysroot.as_deref(), + &config.target, + config.no_bootstrap, + config.disable_weak_dependencies, + )); + full_command.push('\n'); + } + full_command.push_str(&config.command); + + // Build extra Docker args + let mut extra_args: Vec = vec![ + "--device".to_string(), + "/dev/fuse".to_string(), + "--cap-add".to_string(), + "SYS_ADMIN".to_string(), + ]; + + if let Some(ref args) = config.container_args { + extra_args.extend(args.clone()); + } + + if config.interactive { + extra_args.push("-it".to_string()); + } + + let extra_args_refs: Vec<&str> = extra_args.iter().map(|s| s.as_str()).collect(); + + print_info( + &format!("Running command on remote host {}...", runs_on), + OutputLevel::Normal, + ); + + // Run the container on the remote + let result = context + .run_container_command( + &config.container_image, + &full_command, + env_vars, + &extra_args_refs + .iter() + .map(|s| s.to_string()) + .collect::>(), + ) + .await; + + // Always cleanup, even on error + if let Err(e) = context.teardown().await { + print_error( + &format!("Warning: Failed to cleanup remote resources: {}", e), + OutputLevel::Normal, + ); + } + + result.context("Remote container execution failed") + } + /// Build the complete container command fn build_container_command( &self, @@ -710,6 +848,178 @@ mkdir -p /opt/src && bindfs --map=$AVOCADO_HOST_UID/0:@$AVOCADO_HOST_GID/@0 /mnt Ok(status.success()) } + /// Create the entrypoint script for remote execution (NFS volumes) + /// This skips the bindfs setup since NFS volumes are already mounted to /opt/src and /opt/_avocado + pub fn create_entrypoint_script_for_remote( + &self, + source_environment: bool, + extension_sysroot: Option<&str>, + runtime_sysroot: Option<&str>, + target: &str, + _no_bootstrap: bool, + disable_weak_dependencies: bool, + ) -> String { + // Conditionally add install_weak_deps flag + let weak_deps_flag = if disable_weak_dependencies { + "--setopt=install_weak_deps=0 \\\n" + } else { + "" + }; + + // For remote execution: + // - NFS src volume is mounted to /mnt/src (needs bindfs for UID mapping) + // - NFS state volume is mounted directly to /opt/_avocado (no mapping needed) + let mut script = format!( + r#" +set -e + +# Remote execution mode - NFS volumes mounted +if [ -n "$AVOCADO_VERBOSE" ]; then echo "[INFO] Remote execution mode - using NFS-mounted volumes"; fi + +# Remount source directory with permission translation via bindfs +# This maps host UID/GID to root inside the container for seamless file access +mkdir -p /opt/src + +# Check if bindfs is available +if ! command -v bindfs >/dev/null 2>&1; then + echo "[ERROR] bindfs is not installed in this container image." + echo "" + echo "bindfs is required for proper file permission handling." + exit 1 +fi + +if [ -n "$AVOCADO_HOST_UID" ] && [ -n "$AVOCADO_HOST_GID" ]; then + # If host user is already root (UID 0), no mapping needed - just bind mount + if [ "$AVOCADO_HOST_UID" = "0" ] && [ "$AVOCADO_HOST_GID" = "0" ]; then + mount --bind /mnt/src /opt/src + if [ -n "$AVOCADO_VERBOSE" ]; then echo "[INFO] Mounted /mnt/src -> /opt/src (host is root, no mapping needed)"; fi + else + # Use --map with colon-separated user and group mappings + # Maps host UID -> 0 (root) and host GID -> 0 (root group) + bindfs --map=$AVOCADO_HOST_UID/0:@$AVOCADO_HOST_GID/@0 /mnt/src /opt/src + if [ -n "$AVOCADO_VERBOSE" ]; then echo "[INFO] Mounted /mnt/src -> /opt/src with UID/GID mapping ($AVOCADO_HOST_UID:$AVOCADO_HOST_GID -> 0:0)"; fi + fi +else + # Fallback: simple bind mount without permission translation + mount --bind /mnt/src /opt/src + if [ -n "$AVOCADO_VERBOSE" ]; then echo "[INFO] Mounted /mnt/src -> /opt/src (no UID/GID mapping)"; fi +fi + +# Get repo url from environment or default to prod +if [ -n "$AVOCADO_SDK_REPO_URL" ]; then + REPO_URL="$AVOCADO_SDK_REPO_URL" +else + REPO_URL="https://repo.avocadolinux.org" +fi + +if [ -n "$AVOCADO_VERBOSE" ]; then echo "[INFO] Using repo URL: '$REPO_URL'"; fi + +# Get repo release from environment or default to prod +if [ -n "$AVOCADO_SDK_REPO_RELEASE" ]; then + REPO_RELEASE="$AVOCADO_SDK_REPO_RELEASE" +else + REPO_RELEASE="https://repo.avocadolinux.org" + + # Read VERSION_CODENAME from os-release, defaulting to "dev" if not found + if [ -f /etc/os-release ]; then + REPO_RELEASE=$(grep "^VERSION_CODENAME=" /etc/os-release | cut -d= -f2 | tr -d '"') + fi + REPO_RELEASE=${{REPO_RELEASE:-dev}} +fi + +if [ -n "$AVOCADO_VERBOSE" ]; then echo "[INFO] Using repo release: '$REPO_RELEASE'"; fi + +export AVOCADO_PREFIX="/opt/_avocado/${{AVOCADO_TARGET}}" +export AVOCADO_SDK_PREFIX="${{AVOCADO_PREFIX}}/sdk" +export AVOCADO_EXT_SYSROOTS="${{AVOCADO_PREFIX}}/extensions" +export DNF_SDK_HOST_PREFIX="${{AVOCADO_SDK_PREFIX}}" +export DNF_SDK_TARGET_PREFIX="${{AVOCADO_SDK_PREFIX}}/target-repoconf" +export DNF_SDK_HOST="\ +dnf \ +--releasever="$REPO_RELEASE" \ +--best \ +{weak_deps_flag}--setopt=check_config_file_age=0 \ +${{AVOCADO_DNF_ARGS:-}} \ +" + +export DNF_NO_SCRIPTS="--setopt=tsflags=noscripts" +export SSL_CERT_FILE=${{AVOCADO_SDK_PREFIX}}/etc/ssl/certs/ca-certificates.crt + +export DNF_SDK_HOST_OPTS="\ +--setopt=cachedir=${{DNF_SDK_HOST_PREFIX}}/var/cache \ +--setopt=logdir=${{DNF_SDK_HOST_PREFIX}}/var/log \ +--setopt=persistdir=${{DNF_SDK_HOST_PREFIX}}/var/lib/dnf \ +" + +export DNF_SDK_HOST_REPO_CONF="\ +--setopt=varsdir=${{DNF_SDK_HOST_PREFIX}}/etc/dnf/vars \ +--setopt=reposdir=${{DNF_SDK_HOST_PREFIX}}/etc/yum.repos.d \ +" + +export DNF_SDK_REPO_CONF="\ +--setopt=varsdir=${{DNF_SDK_HOST_PREFIX}}/etc/dnf/vars \ +--setopt=reposdir=${{DNF_SDK_TARGET_PREFIX}}/etc/yum.repos.d \ +" + +export DNF_SDK_TARGET_REPO_CONF="\ +--setopt=varsdir=${{DNF_SDK_TARGET_PREFIX}}/etc/dnf/vars \ +--setopt=reposdir=${{DNF_SDK_TARGET_PREFIX}}/etc/yum.repos.d \ +" + +mkdir -p /etc/dnf/vars +mkdir -p ${{AVOCADO_SDK_PREFIX}}/etc/dnf/vars +mkdir -p ${{AVOCADO_SDK_PREFIX}}/target-repoconf/etc/dnf/vars + +echo "${{REPO_URL}}" > /etc/dnf/vars/repo_url +echo "${{REPO_URL}}" > ${{DNF_SDK_HOST_PREFIX}}/etc/dnf/vars/repo_url +echo "${{REPO_URL}}" > ${{DNF_SDK_TARGET_PREFIX}}/etc/dnf/vars/repo_url +"# + ); + + script.push_str( + r#" +export RPM_ETCCONFIGDIR="$AVOCADO_SDK_PREFIX" + +"#, + ); + + // Conditionally change to sysroot directory or default to /opt/src + if let Some(extension_name) = extension_sysroot { + script.push_str(&format!( + "cd /opt/_avocado/{target}/extensions/{extension_name}\n" + )); + } else if let Some(runtime_name) = runtime_sysroot { + script.push_str(&format!( + "cd /opt/_avocado/{target}/runtimes/{runtime_name}\n" + )); + } else { + script.push_str("cd /opt/src\n"); + } + + // Conditionally add environment sourcing based on the source_environment parameter + if source_environment { + script.push_str( + r#" +# Source the environment setup if it exists +if [ -f "${AVOCADO_SDK_PREFIX}/environment-setup" ]; then + source "${AVOCADO_SDK_PREFIX}/environment-setup" +fi + +# Add SSL certificate path to DNF options and CURL if it exists +if [ -f "${AVOCADO_SDK_PREFIX}/etc/ssl/certs/ca-certificates.crt" ]; then + export DNF_SDK_HOST_OPTS="${DNF_SDK_HOST_OPTS} \ + --setopt=sslcacert=${SSL_CERT_FILE} \ +" + + export CURL_CA_BUNDLE=${AVOCADO_SDK_PREFIX}/etc/ssl/certs/ca-certificates.crt +fi +"#, + ); + } + + script + } + /// Create the entrypoint script for SDK initialization pub fn create_entrypoint_script( &self, @@ -1148,6 +1458,8 @@ mod tests { signing_helper_script_path: None, signing_key_name: None, signing_checksum_algorithm: None, + runs_on: None, + nfs_port: None, }; let result = container.build_container_command(&config, &command, &env_vars, &volume_state); @@ -1190,9 +1502,8 @@ mod tests { assert!(script.contains("[ERROR] bindfs is not installed")); // Verify bindfs setup is included with correct syntax // --map=uid1/uid2:@gid1/@gid2 for combined user and group mapping - assert!(script.contains( - "bindfs --map=$AVOCADO_HOST_UID/0:@$AVOCADO_HOST_GID/@0 /mnt/src /opt/src" - )); + assert!(script + .contains("bindfs --map=$AVOCADO_HOST_UID/0:@$AVOCADO_HOST_GID/@0 /mnt/src /opt/src")); assert!(script.contains("mkdir -p /opt/src")); } diff --git a/src/utils/mod.rs b/src/utils/mod.rs index 6789f86..c37ea6d 100644 --- a/src/utils/mod.rs +++ b/src/utils/mod.rs @@ -3,8 +3,11 @@ pub mod container; pub mod image_signing; pub mod interpolation; pub mod lockfile; +pub mod nfs_server; pub mod output; pub mod pkcs11_devices; +pub mod remote; +pub mod runs_on; pub mod signing_keys; #[cfg(unix)] pub mod signing_service; diff --git a/src/utils/nfs_server.rs b/src/utils/nfs_server.rs new file mode 100644 index 0000000..9f11fcb --- /dev/null +++ b/src/utils/nfs_server.rs @@ -0,0 +1,732 @@ +//! NFS Server utilities using Ganesha for remote volume sharing. +//! +//! This module provides a shared NFS server implementation that can be used +//! by both the HITL server command and the runs-on remote execution feature. + +use anyhow::{Context, Result}; +use std::net::TcpListener; +use std::ops::RangeInclusive; +use std::path::{Path, PathBuf}; +use std::process::Stdio; +use tokio::process::{Child, Command as AsyncCommand}; + +use crate::utils::output::{print_info, OutputLevel}; + +/// Default port range for NFS server auto-selection +pub const DEFAULT_NFS_PORT_RANGE: RangeInclusive = 12050..=12099; + +/// Default NFS port used by HITL server +pub const HITL_DEFAULT_PORT: u16 = 12049; + +/// An NFS export configuration +#[derive(Debug, Clone)] +pub struct NfsExport { + /// Unique export ID (1-based) + pub export_id: u32, + /// Local filesystem path to export + pub local_path: PathBuf, + /// NFS pseudo path (e.g., "/src", "/state") + pub pseudo_path: String, +} + +impl NfsExport { + /// Create a new NFS export + pub fn new(export_id: u32, local_path: PathBuf, pseudo_path: String) -> Self { + Self { + export_id, + local_path, + pseudo_path, + } + } + + /// Generate Ganesha EXPORT block for this export + pub fn to_ganesha_config(&self) -> String { + format!( + r#"EXPORT {{ + Export_Id = {}; + Path = {}; + Pseudo = {}; + FSAL {{ + name = VFS; + }} +}} +"#, + self.export_id, + self.local_path.display(), + self.pseudo_path + ) + } +} + +/// Configuration for the NFS server +#[derive(Debug, Clone)] +pub struct NfsServerConfig { + /// Port to listen on + pub port: u16, + /// List of exports + pub exports: Vec, + /// Enable verbose logging + pub verbose: bool, + /// Bind address (default: 0.0.0.0) + pub bind_addr: String, +} + +impl Default for NfsServerConfig { + fn default() -> Self { + Self { + port: *DEFAULT_NFS_PORT_RANGE.start(), + exports: Vec::new(), + verbose: false, + bind_addr: "0.0.0.0".to_string(), + } + } +} + +impl NfsServerConfig { + /// Create a new NFS server config with the given port + pub fn new(port: u16) -> Self { + Self { + port, + ..Default::default() + } + } + + /// Set verbose mode + #[allow(dead_code)] + pub fn with_verbose(mut self, verbose: bool) -> Self { + self.verbose = verbose; + self + } + + /// Add an export to the configuration + pub fn add_export(&mut self, local_path: PathBuf, pseudo_path: String) -> &mut Self { + let export_id = (self.exports.len() + 1) as u32; + self.exports + .push(NfsExport::new(export_id, local_path, pseudo_path)); + self + } + + /// Generate the complete Ganesha configuration file content + pub fn generate_ganesha_config(&self) -> String { + let log_level = if self.verbose { "DEBUG" } else { "EVENT" }; + + let mut config = format!( + r#"# Auto-generated Ganesha NFS configuration +LOG {{ + Default_Log_Level = {log_level}; +}} + +NFS_Core_Param {{ + NFS_Port = {}; + Enable_NLM = false; + Enable_RQUOTA = false; + Enable_UDP = false; + Protocols = 4; + allow_set_io_flusher_fail = true; + Nb_Max_Fd = 65536; + Max_Open_Files = 10000; + DRC_Max_Size = 32768; + Attr_Expiration_Time = 60; + Nb_Worker = 256; + Bind_addr = {}; +}} + +NFSV4 {{ + Graceless = false; + Allow_Numeric_Owners = true; + Only_Numeric_Owners = true; +}} + +# Defaults that all EXPORT{{}} blocks inherit unless they override +EXPORT_DEFAULTS {{ + Access_Type = RW; + Squash = No_Root_Squash; + Transports = TCP; + Protocols = 4; + SecType = none; + Disable_ACL = true; + Manage_Gids = false; + Anonymous_uid = 0; + Anonymous_gid = 0; + + CLIENT {{ + Clients = *; + Access_Type = RW; + }} +}} + +"#, + self.port, self.bind_addr + ); + + // Add export blocks + for export in &self.exports { + config.push_str(&export.to_ganesha_config()); + config.push('\n'); + } + + config + } +} + +/// Find an available port in the given range +/// +/// Returns the first available port, or None if all ports are in use. +pub fn find_available_port(range: RangeInclusive) -> Option { + range.into_iter().find(|&port| is_port_available(port)) +} + +/// Check if a port is available for binding +pub fn is_port_available(port: u16) -> bool { + TcpListener::bind(("0.0.0.0", port)).is_ok() +} + +/// A running NFS server instance +pub struct NfsServer { + /// The Ganesha child process (when running directly on host) + process: Option, + /// Container name (when running in a container) + container_name: Option, + /// Container tool used (docker/podman) + container_tool: Option, + /// Path to the config file (kept for potential future use) + #[allow(dead_code)] + config_path: PathBuf, + /// Path to the PID file + pid_path: PathBuf, + /// Temporary directory holding config files + #[allow(dead_code)] + temp_dir: tempfile::TempDir, + /// The port the server is running on + #[allow(dead_code)] + port: u16, + /// Whether verbose mode is enabled + verbose: bool, +} + +impl NfsServer { + /// Start a new NFS server with the given configuration + /// + /// This will: + /// 1. Generate the Ganesha configuration file + /// 2. Start ganesha.nfsd in foreground mode + /// 3. Return the running server handle + pub async fn start(config: NfsServerConfig) -> Result { + // Verify ganesha.nfsd is available + let ganesha_check = AsyncCommand::new("which") + .arg("ganesha.nfsd") + .output() + .await; + + if ganesha_check.is_err() || !ganesha_check.unwrap().status.success() { + anyhow::bail!( + "ganesha.nfsd not found. Please ensure NFS-Ganesha is installed.\n\ + On Ubuntu/Debian: apt install nfs-ganesha nfs-ganesha-vfs\n\ + On Fedora/RHEL: dnf install nfs-ganesha nfs-ganesha-vfs" + ); + } + + // Create temporary directory for config and PID files + let temp_dir = + tempfile::tempdir().context("Failed to create temp directory for NFS config")?; + let config_path = temp_dir.path().join("ganesha.conf"); + let pid_path = temp_dir.path().join("ganesha.pid"); + + // Generate and write configuration + let config_content = config.generate_ganesha_config(); + std::fs::write(&config_path, &config_content).with_context(|| { + format!( + "Failed to write Ganesha config to {}", + config_path.display() + ) + })?; + + if config.verbose { + print_info( + &format!( + "Starting NFS server on port {} with {} exports", + config.port, + config.exports.len() + ), + OutputLevel::Normal, + ); + print_info( + &format!("Config file: {}", config_path.display()), + OutputLevel::Verbose, + ); + } + + // Start ganesha.nfsd in foreground mode + let mut cmd = AsyncCommand::new("ganesha.nfsd"); + cmd.arg("-f") + .arg(&config_path) + .arg("-p") + .arg(&pid_path) + .arg("-F") // Foreground mode + .arg("-L") + .arg("/dev/stderr") // Log to stderr + .stdout(Stdio::null()) + .stderr(if config.verbose { + Stdio::inherit() + } else { + Stdio::null() + }); + + let process = cmd.spawn().context("Failed to start ganesha.nfsd")?; + + // Give it a moment to start + tokio::time::sleep(tokio::time::Duration::from_millis(500)).await; + + Ok(Self { + process: Some(process), + container_name: None, + container_tool: None, + config_path, + pid_path, + temp_dir, + port: config.port, + verbose: config.verbose, + }) + } + + /// Start a new NFS server inside a container + /// + /// This runs ganesha.nfsd inside the SDK container, which has ganesha installed. + /// The container mounts the paths to export and maps the NFS port. + /// + /// # Arguments + /// * `config` - NFS server configuration + /// * `container_tool` - Container tool to use (docker/podman) + /// * `container_image` - SDK container image to use + /// * `volume_mounts` - Additional volume mounts needed (for the docker volume) + pub async fn start_in_container( + config: NfsServerConfig, + container_tool: &str, + container_image: &str, + volume_mounts: Vec<(String, String)>, // (host_path, container_path) + ) -> Result { + // Create temporary directory for config files + let temp_dir = + tempfile::tempdir().context("Failed to create temp directory for NFS config")?; + let config_path = temp_dir.path().join("ganesha.conf"); + let pid_path = temp_dir.path().join("ganesha.pid"); + + // Generate and write configuration + let config_content = config.generate_ganesha_config(); + std::fs::write(&config_path, &config_content).with_context(|| { + format!( + "Failed to write Ganesha config to {}", + config_path.display() + ) + })?; + + // Generate unique container name + let container_name = format!( + "avocado-nfs-{}", + uuid::Uuid::new_v4() + .to_string() + .split('-') + .next() + .unwrap_or("temp") + ); + + if config.verbose { + print_info( + &format!( + "Starting NFS server in container on port {} with {} exports", + config.port, + config.exports.len() + ), + OutputLevel::Normal, + ); + } + + // Build container command + let mut args: Vec = vec![ + "run".to_string(), + "--rm".to_string(), + "-d".to_string(), // Detached mode + "--name".to_string(), + container_name.clone(), + "--privileged".to_string(), // Required for NFS + "--network".to_string(), + "host".to_string(), // Use host networking for NFS port + ]; + + // Mount the config file + args.push("-v".to_string()); + args.push(format!( + "{}:/etc/ganesha/ganesha.conf:ro", + config_path.display() + )); + + // Mount the PID file location + args.push("-v".to_string()); + args.push(format!("{}:/var/run/ganesha", temp_dir.path().display())); + + // Add volume mounts for exported paths + for (host_path, container_path) in &volume_mounts { + args.push("-v".to_string()); + args.push(format!("{}:{}", host_path, container_path)); + } + + // Also mount the exported paths from config + for export in &config.exports { + args.push("-v".to_string()); + args.push(format!( + "{}:{}", + export.local_path.display(), + export.local_path.display() + )); + } + + // Container image and command + args.push(container_image.to_string()); + args.push("ganesha.nfsd".to_string()); + args.push("-f".to_string()); + args.push("/etc/ganesha/ganesha.conf".to_string()); + args.push("-F".to_string()); // Foreground mode + args.push("-L".to_string()); + args.push("/dev/stderr".to_string()); + + if config.verbose { + print_info( + &format!("Running: {} {}", container_tool, args.join(" ")), + OutputLevel::Verbose, + ); + } + + // Start the container + let output = AsyncCommand::new(container_tool) + .args(&args) + .output() + .await + .context("Failed to start NFS server container")?; + + if !output.status.success() { + anyhow::bail!( + "Failed to start NFS server container: {}", + String::from_utf8_lossy(&output.stderr).trim() + ); + } + + // Give it a moment to start + tokio::time::sleep(tokio::time::Duration::from_millis(1000)).await; + + // Verify container is running + let check_output = AsyncCommand::new(container_tool) + .args(["inspect", "-f", "{{.State.Running}}", &container_name]) + .output() + .await?; + + if !check_output.status.success() + || String::from_utf8_lossy(&check_output.stdout).trim() != "true" + { + // Get container logs for debugging + let logs = AsyncCommand::new(container_tool) + .args(["logs", &container_name]) + .output() + .await + .ok(); + + let log_output = logs + .map(|l| String::from_utf8_lossy(&l.stderr).to_string()) + .unwrap_or_default(); + + anyhow::bail!( + "NFS server container failed to start. Logs:\n{}", + log_output + ); + } + + if config.verbose { + print_info( + &format!( + "NFS server container '{}' started successfully", + container_name + ), + OutputLevel::Normal, + ); + } + + Ok(Self { + process: None, + container_name: Some(container_name), + container_tool: Some(container_tool.to_string()), + config_path, + pid_path, + temp_dir, + port: config.port, + verbose: config.verbose, + }) + } + + /// Get the port the server is running on + #[allow(dead_code)] + pub fn port(&self) -> u16 { + self.port + } + + /// Stop the NFS server gracefully + pub async fn stop(mut self) -> Result<()> { + // Handle container-based NFS server + if let (Some(container_name), Some(container_tool)) = + (&self.container_name, &self.container_tool) + { + if self.verbose { + print_info( + &format!("Stopping NFS server container '{}'...", container_name), + OutputLevel::Normal, + ); + } + + // Stop the container + let _ = AsyncCommand::new(container_tool) + .args(["stop", "-t", "2", container_name]) + .output() + .await; + + // Remove the container (should already be removed due to --rm, but just in case) + let _ = AsyncCommand::new(container_tool) + .args(["rm", "-f", container_name]) + .output() + .await; + + if self.verbose { + print_info("NFS server container stopped", OutputLevel::Normal); + } + } + + // Handle direct process-based NFS server + if let Some(mut process) = self.process.take() { + if self.verbose { + print_info("Stopping NFS server...", OutputLevel::Normal); + } + + // Try graceful shutdown first + #[cfg(unix)] + { + if let Some(pid) = process.id() { + // Send SIGTERM + unsafe { + libc::kill(pid as i32, libc::SIGTERM); + } + } + } + + // Wait up to 2 seconds for graceful shutdown + let timeout = + tokio::time::timeout(tokio::time::Duration::from_secs(2), process.wait()).await; + + if timeout.is_err() { + if self.verbose { + print_info("Force killing NFS server...", OutputLevel::Normal); + } + // Force kill if it didn't stop gracefully + let _ = process.kill().await; + } + + if self.verbose { + print_info("NFS server stopped", OutputLevel::Normal); + } + } + + // Clean up PID file if it exists + if self.pid_path.exists() { + let _ = std::fs::remove_file(&self.pid_path); + } + + Ok(()) + } +} + +impl Drop for NfsServer { + fn drop(&mut self) { + // Try to kill the process if it's still running + if let Some(ref mut process) = self.process { + #[cfg(unix)] + { + if let Some(pid) = process.id() { + unsafe { + libc::kill(pid as i32, libc::SIGKILL); + } + } + } + } + } +} + +/// Builder for creating NFS server configurations +pub struct NfsServerBuilder { + config: NfsServerConfig, +} + +impl NfsServerBuilder { + /// Create a new builder with auto-selected port + pub fn new() -> Result { + let port = find_available_port(DEFAULT_NFS_PORT_RANGE) + .context("No available ports in range 12050-12099 for NFS server")?; + + Ok(Self { + config: NfsServerConfig::new(port), + }) + } + + /// Create a new builder with a specific port + pub fn with_port(port: u16) -> Result { + if !is_port_available(port) { + anyhow::bail!("Port {} is not available for NFS server", port); + } + + Ok(Self { + config: NfsServerConfig::new(port), + }) + } + + /// Set verbose mode + pub fn verbose(mut self, verbose: bool) -> Self { + self.config.verbose = verbose; + self + } + + /// Add an export + pub fn add_export( + mut self, + local_path: impl AsRef, + pseudo_path: impl Into, + ) -> Self { + self.config + .add_export(local_path.as_ref().to_path_buf(), pseudo_path.into()); + self + } + + /// Build and return the configuration + #[allow(dead_code)] + pub fn build(self) -> NfsServerConfig { + self.config + } + + /// Build and start the NFS server + pub async fn start(self) -> Result { + NfsServer::start(self.config).await + } +} + +impl Default for NfsServerBuilder { + fn default() -> Self { + Self::new().expect("Failed to find available port for NFS server") + } +} + +/// Get the mountpoint of a Docker volume on the host filesystem +/// +/// This queries Docker for the volume's mountpoint, which is needed to export +/// the volume contents via NFS. +pub async fn get_docker_volume_mountpoint( + container_tool: &str, + volume_name: &str, +) -> Result { + let output = AsyncCommand::new(container_tool) + .args([ + "volume", + "inspect", + volume_name, + "--format", + "{{.Mountpoint}}", + ]) + .output() + .await + .with_context(|| format!("Failed to inspect Docker volume '{}'", volume_name))?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + anyhow::bail!( + "Failed to get mountpoint for volume '{}': {}", + volume_name, + stderr + ); + } + + let mountpoint = String::from_utf8_lossy(&output.stdout).trim().to_string(); + if mountpoint.is_empty() { + anyhow::bail!("Docker volume '{}' has no mountpoint", volume_name); + } + + Ok(PathBuf::from(mountpoint)) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_nfs_export_config_generation() { + let export = NfsExport::new(1, PathBuf::from("/home/user/project"), "/src".to_string()); + + let config = export.to_ganesha_config(); + + assert!(config.contains("Export_Id = 1")); + assert!(config.contains("Path = /home/user/project")); + assert!(config.contains("Pseudo = /src")); + assert!(config.contains("FSAL {")); + assert!(config.contains("name = VFS")); + } + + #[test] + fn test_nfs_server_config_generation() { + let mut config = NfsServerConfig::new(12050); + config.add_export(PathBuf::from("/home/user/src"), "/src".to_string()); + config.add_export( + PathBuf::from("/var/lib/docker/volumes/avo-123/_data"), + "/state".to_string(), + ); + + let ganesha_config = config.generate_ganesha_config(); + + assert!(ganesha_config.contains("NFS_Port = 12050")); + assert!(ganesha_config.contains("Export_Id = 1")); + assert!(ganesha_config.contains("Export_Id = 2")); + assert!(ganesha_config.contains("Pseudo = /src")); + assert!(ganesha_config.contains("Pseudo = /state")); + assert!(ganesha_config.contains("Protocols = 4")); + assert!(ganesha_config.contains("Squash = No_Root_Squash")); + } + + #[test] + fn test_nfs_server_config_verbose_logging() { + let config = NfsServerConfig::new(12050).with_verbose(true); + let ganesha_config = config.generate_ganesha_config(); + + assert!(ganesha_config.contains("Default_Log_Level = DEBUG")); + } + + #[test] + fn test_nfs_server_config_default_logging() { + let config = NfsServerConfig::new(12050); + let ganesha_config = config.generate_ganesha_config(); + + assert!(ganesha_config.contains("Default_Log_Level = EVENT")); + } + + #[test] + fn test_find_available_port_in_range() { + // This test may be flaky depending on what ports are in use + // but it should generally find at least one available port + let port = find_available_port(50000..=50010); + assert!(port.is_some()); + } + + #[test] + fn test_nfs_server_builder() { + let config = NfsServerBuilder::with_port(50099) + .expect("Port should be available") + .verbose(true) + .add_export("/tmp/test", "/test") + .build(); + + assert_eq!(config.port, 50099); + assert!(config.verbose); + assert_eq!(config.exports.len(), 1); + assert_eq!(config.exports[0].pseudo_path, "/test"); + } +} diff --git a/src/utils/remote.rs b/src/utils/remote.rs new file mode 100644 index 0000000..0cd6acd --- /dev/null +++ b/src/utils/remote.rs @@ -0,0 +1,675 @@ +//! Remote execution utilities for SSH-based command execution and volume management. +//! +//! This module provides utilities for running avocado commands on remote hosts +//! while using NFS-backed volumes from the local machine. + +use anyhow::{Context, Result}; +use std::net::IpAddr; +use std::path::Path; +use std::process::Stdio; +use tokio::process::Command as AsyncCommand; + +use crate::utils::output::{print_info, OutputLevel}; + +/// Represents a remote host in user@host or just host format +#[derive(Debug, Clone)] +pub struct RemoteHost { + /// Username for SSH connection (None means use current user) + pub user: Option, + /// Hostname or IP address + pub host: String, +} + +impl RemoteHost { + /// Parse a remote host specification in the format "user@host" or just "host" + /// If no user is specified, SSH will use the current user. + pub fn parse(spec: &str) -> Result { + let spec = spec.trim(); + + if spec.is_empty() { + anyhow::bail!("Remote host specification cannot be empty"); + } + + if spec.contains('@') { + let parts: Vec<&str> = spec.splitn(2, '@').collect(); + let user = parts[0].to_string(); + let host = parts[1].to_string(); + + if user.is_empty() { + anyhow::bail!("Username cannot be empty in '{}'", spec); + } + + if host.is_empty() { + anyhow::bail!("Hostname cannot be empty in '{}'", spec); + } + + Ok(Self { + user: Some(user), + host, + }) + } else { + // No @ sign - just a hostname, SSH will infer the current user + Ok(Self { + user: None, + host: spec.to_string(), + }) + } + } + + /// Get the SSH target string (user@host or just host) + pub fn ssh_target(&self) -> String { + match &self.user { + Some(user) => format!("{}@{}", user, self.host), + None => self.host.clone(), + } + } +} + +/// SSH client for remote command execution +pub struct SshClient { + remote: RemoteHost, + verbose: bool, +} + +impl SshClient { + /// Create a new SSH client for the given remote host + pub fn new(remote: RemoteHost) -> Self { + Self { + remote, + verbose: false, + } + } + + /// Set verbose mode + pub fn with_verbose(mut self, verbose: bool) -> Self { + self.verbose = verbose; + self + } + + /// Check SSH connectivity to the remote host + /// + /// This runs a simple command to verify we can connect via SSH. + pub async fn check_connectivity(&self) -> Result<()> { + if self.verbose { + print_info( + &format!( + "Checking SSH connectivity to {}...", + self.remote.ssh_target() + ), + OutputLevel::Normal, + ); + } + + let output = AsyncCommand::new("ssh") + .args([ + "-o", + "BatchMode=yes", + "-o", + "ConnectTimeout=10", + "-o", + "StrictHostKeyChecking=accept-new", + &self.remote.ssh_target(), + "echo", + "ok", + ]) + .output() + .await + .context("Failed to execute SSH command")?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + anyhow::bail!( + "Cannot connect to '{}' via SSH. Ensure:\n\ + 1. SSH key-based authentication is configured\n\ + 2. The remote host is reachable\n\ + 3. The username is correct\n\ + Error: {}", + self.remote.ssh_target(), + stderr.trim() + ); + } + + if self.verbose { + print_info( + &format!("SSH connection to {} successful", self.remote.ssh_target()), + OutputLevel::Normal, + ); + } + + Ok(()) + } + + /// Check that the remote avocado CLI version is compatible + /// + /// The remote version must be equal to or greater than the local version. + /// Returns the remote version string if compatible. + /// + /// For localhost/127.0.0.1, this check is skipped since it's the same machine. + pub async fn check_cli_version(&self) -> Result { + let local_version = env!("CARGO_PKG_VERSION"); + + // Skip version check for localhost - it's the same machine + if self.remote.host == "localhost" || self.remote.host == "127.0.0.1" { + if self.verbose { + print_info( + "Skipping version check for localhost (same machine)", + OutputLevel::Normal, + ); + } + return Ok(local_version.to_string()); + } + + if self.verbose { + print_info( + &format!( + "Checking avocado CLI version on {}...", + self.remote.ssh_target() + ), + OutputLevel::Normal, + ); + } + + // Try to get the remote avocado version + let output = AsyncCommand::new("ssh") + .args([ + "-o", + "BatchMode=yes", + "-o", + "ConnectTimeout=10", + "-o", + "StrictHostKeyChecking=accept-new", + &self.remote.ssh_target(), + "avocado --version 2>/dev/null || echo 'not-installed'", + ]) + .output() + .await + .context("Failed to check remote avocado version")?; + + if !output.status.success() { + anyhow::bail!( + "Failed to check avocado version on '{}': {}", + self.remote.ssh_target(), + String::from_utf8_lossy(&output.stderr).trim() + ); + } + + let version_output = String::from_utf8_lossy(&output.stdout).trim().to_string(); + + if version_output == "not-installed" || version_output.is_empty() { + anyhow::bail!( + "avocado CLI is not installed on '{}'. Please install avocado {} or later.", + self.remote.ssh_target(), + local_version + ); + } + + // Parse version from output like "avocado 0.20.0" + let remote_version = version_output + .split_whitespace() + .last() + .unwrap_or(&version_output); + + // Compare versions + if !is_version_compatible(local_version, remote_version) { + anyhow::bail!( + "Remote avocado version '{}' is older than local version '{}'. \ + Please upgrade avocado on '{}' to version {} or later.", + remote_version, + local_version, + self.remote.ssh_target(), + local_version + ); + } + + if self.verbose { + print_info( + &format!( + "Remote avocado version: {} (local: {})", + remote_version, local_version + ), + OutputLevel::Normal, + ); + } + + Ok(remote_version.to_string()) + } + + /// Run a command on the remote host and return the output + pub async fn run_command(&self, command: &str) -> Result { + if self.verbose { + print_info( + &format!("Running remote command: {}", command), + OutputLevel::Verbose, + ); + } + + let output = AsyncCommand::new("ssh") + .args([ + "-o", + "BatchMode=yes", + "-o", + "StrictHostKeyChecking=accept-new", + &self.remote.ssh_target(), + command, + ]) + .output() + .await + .with_context(|| format!("Failed to run command on remote: {}", command))?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + anyhow::bail!( + "Remote command failed: {}\nError: {}", + command, + stderr.trim() + ); + } + + Ok(String::from_utf8_lossy(&output.stdout).trim().to_string()) + } + + /// Run a command on the remote host, inheriting stdout/stderr + pub async fn run_command_interactive(&self, command: &str) -> Result { + if self.verbose { + print_info( + &format!("Running remote command (interactive): {}", command), + OutputLevel::Verbose, + ); + } + + let status = AsyncCommand::new("ssh") + .args([ + "-o", + "BatchMode=yes", + "-o", + "StrictHostKeyChecking=accept-new", + "-t", // Force pseudo-terminal allocation for interactive commands + &self.remote.ssh_target(), + command, + ]) + .stdout(Stdio::inherit()) + .stderr(Stdio::inherit()) + .status() + .await + .with_context(|| format!("Failed to run command on remote: {}", command))?; + + Ok(status.success()) + } + + /// Get the remote host reference + #[allow(dead_code)] + pub fn remote(&self) -> &RemoteHost { + &self.remote + } +} + +/// Manager for creating and removing NFS-backed Docker volumes on remote hosts +pub struct RemoteVolumeManager { + ssh: SshClient, + container_tool: String, +} + +impl RemoteVolumeManager { + /// Create a new remote volume manager + pub fn new(ssh: SshClient, container_tool: String) -> Self { + Self { + ssh, + container_tool, + } + } + + /// Create an NFS-backed Docker volume on the remote host + /// + /// # Arguments + /// * `volume_name` - Name for the new volume + /// * `nfs_host` - NFS server hostname or IP + /// * `nfs_port` - NFS server port + /// * `export_path` - NFS pseudo path to mount (e.g., "/src", "/state") + pub async fn create_nfs_volume( + &self, + volume_name: &str, + nfs_host: &str, + nfs_port: u16, + export_path: &str, + ) -> Result<()> { + let command = format!( + "{} volume create \ + --driver local \ + --opt type=nfs \ + --opt o=addr={},rw,nfsvers=4,port={} \ + --opt device=:{} \ + {}", + self.container_tool, nfs_host, nfs_port, export_path, volume_name + ); + + self.ssh.run_command(&command).await?; + + if self.ssh.verbose { + print_info( + &format!("Created NFS volume '{}' on remote", volume_name), + OutputLevel::Normal, + ); + } + + Ok(()) + } + + /// Remove a Docker volume from the remote host + pub async fn remove_volume(&self, volume_name: &str) -> Result<()> { + let command = format!("{} volume rm -f {}", self.container_tool, volume_name); + + // Ignore errors - volume might not exist + let _ = self.ssh.run_command(&command).await; + + if self.ssh.verbose { + print_info( + &format!("Removed volume '{}' from remote", volume_name), + OutputLevel::Normal, + ); + } + + Ok(()) + } + + /// Check if a volume exists on the remote host + #[allow(dead_code)] + pub async fn volume_exists(&self, volume_name: &str) -> Result { + let command = format!( + "{} volume inspect {} >/dev/null 2>&1 && echo 'exists' || echo 'not found'", + self.container_tool, volume_name + ); + + let output = self.ssh.run_command(&command).await?; + Ok(output.trim() == "exists") + } + + /// Run a Docker container on the remote host with the given volume mappings + /// + /// # Arguments + /// * `image` - Container image to run + /// * `volumes` - Volume mappings (host_volume:container_path) + /// * `env_vars` - Environment variables + /// * `command` - Command to run in the container + /// * `extra_args` - Additional Docker arguments + #[allow(dead_code)] + pub async fn run_container( + &self, + image: &str, + volumes: &[(&str, &str)], + env_vars: &[(&str, &str)], + command: &str, + extra_args: &[&str], + ) -> Result { + let mut docker_cmd = format!("{} run --rm", self.container_tool); + + // Add volume mappings + for (host_vol, container_path) in volumes { + docker_cmd.push_str(&format!(" -v {}:{}", host_vol, container_path)); + } + + // Add environment variables + for (key, value) in env_vars { + docker_cmd.push_str(&format!(" -e {}={}", key, value)); + } + + // Add extra arguments + for arg in extra_args { + docker_cmd.push_str(&format!(" {}", arg)); + } + + // Add image and command + docker_cmd.push_str(&format!( + " {} bash -c '{}'", + image, + command.replace('\'', "'\\''") + )); + + self.ssh.run_command_interactive(&docker_cmd).await + } +} + +/// SSH tunnel for forwarding Unix sockets +#[cfg(unix)] +pub struct SshTunnel { + /// The SSH process + process: Option, + /// Remote socket path + remote_socket: String, + /// Local socket path (stored for potential debugging/logging) + #[allow(dead_code)] + local_socket: std::path::PathBuf, +} + +#[cfg(unix)] +impl SshTunnel { + /// Create an SSH tunnel forwarding a Unix socket from remote to local + /// + /// This uses SSH's `-R` option to forward a remote Unix socket to a local one, + /// allowing the remote process to communicate with a local service. + pub async fn create( + remote: &RemoteHost, + local_socket: &Path, + remote_socket: &str, + ) -> Result { + // Ensure the local socket exists + if !local_socket.exists() { + anyhow::bail!("Local socket does not exist: {}", local_socket.display()); + } + + // Start SSH with socket forwarding + // -R remote_socket:local_socket forwards from remote to local + let process = AsyncCommand::new("ssh") + .args([ + "-o", + "BatchMode=yes", + "-o", + "StrictHostKeyChecking=accept-new", + "-o", + "ExitOnForwardFailure=yes", + "-N", // Don't execute a remote command + "-R", + &format!("{}:{}", remote_socket, local_socket.display()), + &remote.ssh_target(), + ]) + .stdout(Stdio::null()) + .stderr(Stdio::null()) + .spawn() + .context("Failed to create SSH tunnel")?; + + // Give it a moment to establish + tokio::time::sleep(tokio::time::Duration::from_millis(500)).await; + + Ok(Self { + process: Some(process), + remote_socket: remote_socket.to_string(), + local_socket: local_socket.to_path_buf(), + }) + } + + /// Get the remote socket path + pub fn remote_socket(&self) -> &str { + &self.remote_socket + } + + /// Close the SSH tunnel + pub async fn close(mut self) -> Result<()> { + if let Some(mut process) = self.process.take() { + let _ = process.kill().await; + } + Ok(()) + } +} + +#[cfg(unix)] +impl Drop for SshTunnel { + fn drop(&mut self) { + if let Some(ref mut process) = self.process { + // Best effort kill + #[cfg(unix)] + { + if let Some(pid) = process.id() { + unsafe { + libc::kill(pid as i32, libc::SIGKILL); + } + } + } + } + } +} + +/// Get the local machine's IP address that is reachable from the remote host +/// +/// This tries to determine the local IP address that the remote host can use +/// to connect back to this machine (for NFS). +pub async fn get_local_ip_for_remote(remote_host: &str) -> Result { + // Try to resolve the remote host and get the local IP used to reach it + // This is done by creating a UDP socket and "connecting" to the remote + // (no actual connection is made for UDP, but the OS figures out which + // local interface would be used) + + use std::net::UdpSocket; + + // First, try to resolve the remote host + let remote_addrs: Vec<_> = tokio::net::lookup_host(format!("{}:22", remote_host)) + .await + .with_context(|| format!("Failed to resolve remote host '{}'", remote_host))? + .collect(); + + if remote_addrs.is_empty() { + anyhow::bail!("Could not resolve remote host '{}'", remote_host); + } + + // Create a UDP socket and "connect" to the remote to determine local interface + let socket = UdpSocket::bind("0.0.0.0:0").context("Failed to create UDP socket")?; + + socket + .connect(remote_addrs[0]) + .context("Failed to determine route to remote host")?; + + let local_addr = socket.local_addr().context("Failed to get local address")?; + + Ok(local_addr.ip()) +} + +/// Check if a remote version is compatible with the local version +/// +/// The remote version must be equal to or greater than the local version. +/// Uses semantic versioning comparison. +pub fn is_version_compatible(local_version: &str, remote_version: &str) -> bool { + let parse_version = |v: &str| -> Option<(u32, u32, u32)> { + let parts: Vec<&str> = v.split('.').collect(); + if parts.len() >= 3 { + Some(( + parts[0].parse().ok()?, + parts[1].parse().ok()?, + parts[2].split('-').next()?.parse().ok()?, // Handle pre-release like 0.20.0-beta + )) + } else if parts.len() == 2 { + Some((parts[0].parse().ok()?, parts[1].parse().ok()?, 0)) + } else { + None + } + }; + + match (parse_version(local_version), parse_version(remote_version)) { + (Some(local), Some(remote)) => { + // Remote must be >= local + remote >= local + } + _ => { + // If we can't parse versions, assume compatible (fail open) + true + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_remote_host_parse_valid() { + let host = RemoteHost::parse("jschneck@riptide.local").unwrap(); + assert_eq!(host.user, Some("jschneck".to_string())); + assert_eq!(host.host, "riptide.local"); + assert_eq!(host.ssh_target(), "jschneck@riptide.local"); + } + + #[test] + fn test_remote_host_parse_ip() { + let host = RemoteHost::parse("user@192.168.1.100").unwrap(); + assert_eq!(host.user, Some("user".to_string())); + assert_eq!(host.host, "192.168.1.100"); + } + + #[test] + fn test_remote_host_parse_hostname_only() { + // SSH can infer the current user when no user is specified + let host = RemoteHost::parse("hostname").unwrap(); + assert_eq!(host.user, None); + assert_eq!(host.host, "hostname"); + assert_eq!(host.ssh_target(), "hostname"); + } + + #[test] + fn test_remote_host_parse_localhost() { + let host = RemoteHost::parse("localhost").unwrap(); + assert_eq!(host.user, None); + assert_eq!(host.host, "localhost"); + assert_eq!(host.ssh_target(), "localhost"); + } + + #[test] + fn test_remote_host_parse_invalid_empty_user() { + let result = RemoteHost::parse("@hostname"); + assert!(result.is_err()); + assert!(result.unwrap_err().to_string().contains("Username")); + } + + #[test] + fn test_remote_host_parse_invalid_empty_host() { + let result = RemoteHost::parse("user@"); + assert!(result.is_err()); + assert!(result.unwrap_err().to_string().contains("Hostname")); + } + + #[test] + fn test_version_compatible_equal() { + assert!(is_version_compatible("0.20.0", "0.20.0")); + assert!(is_version_compatible("1.0.0", "1.0.0")); + } + + #[test] + fn test_version_compatible_remote_newer() { + assert!(is_version_compatible("0.20.0", "0.21.0")); + assert!(is_version_compatible("0.20.0", "1.0.0")); + assert!(is_version_compatible("0.20.0", "0.20.1")); + } + + #[test] + fn test_version_incompatible_remote_older() { + assert!(!is_version_compatible("0.21.0", "0.20.0")); + assert!(!is_version_compatible("1.0.0", "0.20.0")); + assert!(!is_version_compatible("0.20.1", "0.20.0")); + } + + #[test] + fn test_version_compatible_major_minor_only() { + assert!(is_version_compatible("0.20", "0.20.0")); + assert!(is_version_compatible("0.20.0", "0.21")); + } + + #[test] + fn test_version_compatible_with_prerelease() { + // Pre-release versions should still compare by numbers + assert!(is_version_compatible("0.20.0-beta", "0.20.0")); + assert!(is_version_compatible("0.20.0", "0.20.1-rc1")); + } + + #[test] + fn test_version_compatible_unparseable() { + // Unparseable versions should fail open (assume compatible) + assert!(is_version_compatible("unparseable", "0.20.0")); + assert!(is_version_compatible("0.20.0", "unparseable")); + } +} diff --git a/src/utils/runs_on.rs b/src/utils/runs_on.rs new file mode 100644 index 0000000..ee316ed --- /dev/null +++ b/src/utils/runs_on.rs @@ -0,0 +1,504 @@ +//! RunsOn orchestration for remote execution workflow. +//! +//! This module provides the high-level orchestration for running avocado commands +//! on remote hosts while using NFS-backed volumes from the local machine. + +use anyhow::{Context, Result}; +use std::collections::HashMap; +use std::path::Path; +use uuid::Uuid; + +use crate::utils::nfs_server::{ + find_available_port, get_docker_volume_mountpoint, is_port_available, NfsExport, NfsServer, + NfsServerConfig, DEFAULT_NFS_PORT_RANGE, +}; +use crate::utils::output::{print_info, print_success, OutputLevel}; +use crate::utils::remote::{get_local_ip_for_remote, RemoteHost, RemoteVolumeManager, SshClient}; + +#[cfg(unix)] +use crate::utils::remote::SshTunnel; + +/// Context for remote execution via `--runs-on` +/// +/// This manages the lifecycle of: +/// - NFS server on the local host +/// - NFS-backed Docker volumes on the remote host +/// - SSH tunnel for signing (if needed) +pub struct RunsOnContext { + /// The remote host + remote_host: RemoteHost, + /// SSH client for remote operations + ssh: SshClient, + /// The running NFS server + nfs_server: Option, + /// NFS port being used + #[allow(dead_code)] + nfs_port: u16, + /// Local IP address reachable from remote + #[allow(dead_code)] + local_ip: String, + /// Container tool (docker/podman) + container_tool: String, + /// Session UUID for unique volume names + session_id: String, + /// Remote volume for src_dir + remote_src_volume: Option, + /// Remote volume for _avocado state + remote_state_volume: Option, + /// SSH tunnel for signing + #[cfg(unix)] + signing_tunnel: Option, + /// Enable verbose output + verbose: bool, +} + +impl RunsOnContext { + /// Create and set up a new RunsOn context + /// + /// This will: + /// 1. Validate SSH connectivity + /// 2. Start NFS server with exports for src_dir and the avocado volume + /// 3. Create NFS-backed Docker volumes on the remote host + /// + /// # Arguments + /// * `runs_on` - Remote host specification (user@host) + /// * `nfs_port` - Optional specific port (None = auto-select) + /// * `src_dir` - Local source directory to export + /// * `local_volume_name` - Local Docker volume name (e.g., "avo-{uuid}") + /// * `container_tool` - Container tool to use (docker/podman) + /// * `container_image` - SDK container image to use for NFS server + /// * `verbose` - Enable verbose output + pub async fn setup( + runs_on: &str, + nfs_port: Option, + src_dir: &Path, + local_volume_name: &str, + container_tool: &str, + container_image: &str, + verbose: bool, + ) -> Result { + // Parse remote host + let remote_host = RemoteHost::parse(runs_on)?; + + // Print banner to indicate runs-on mode + println!(); + print_info( + &format!( + "🌐 Remote execution mode: running on {}", + remote_host.ssh_target() + ), + OutputLevel::Normal, + ); + println!(); + + // Create SSH client and verify connectivity + print_info("Checking SSH connectivity...", OutputLevel::Normal); + let ssh = SshClient::new(remote_host.clone()).with_verbose(verbose); + ssh.check_connectivity().await?; + + // Check remote CLI version compatibility + print_info("Checking remote avocado version...", OutputLevel::Normal); + let remote_version = ssh.check_cli_version().await?; + print_success( + &format!("Remote avocado version: {} ✓", remote_version), + OutputLevel::Normal, + ); + + // Determine which port to use + let port = match nfs_port { + Some(p) => { + if !is_port_available(p) { + anyhow::bail!("Specified NFS port {} is not available", p); + } + p + } + None => find_available_port(DEFAULT_NFS_PORT_RANGE) + .context("No available ports in range 12050-12099 for NFS server")?, + }; + + if verbose { + print_info( + &format!("Using NFS port {} for remote execution", port), + OutputLevel::Normal, + ); + } + + // Get local IP that the remote can reach + let local_ip = get_local_ip_for_remote(&remote_host.host) + .await + .context("Failed to determine local IP for NFS server")?; + + if verbose { + print_info( + &format!("Local IP for NFS: {}", local_ip), + OutputLevel::Normal, + ); + } + + // Get the mountpoint of the local Docker volume + let volume_mountpoint = get_docker_volume_mountpoint(container_tool, local_volume_name) + .await + .with_context(|| { + format!( + "Failed to get mountpoint for volume '{}'", + local_volume_name + ) + })?; + + if verbose { + print_info( + &format!("Local volume mountpoint: {}", volume_mountpoint.display()), + OutputLevel::Normal, + ); + } + + // Create and start NFS server inside the SDK container + // The container has ganesha.nfsd installed + let config = NfsServerConfig { + port, + exports: vec![ + NfsExport::new(1, src_dir.to_path_buf(), "/src".to_string()), + NfsExport::new(2, volume_mountpoint.clone(), "/state".to_string()), + ], + verbose, + bind_addr: "0.0.0.0".to_string(), + }; + + // Volume mounts for the container to access the paths + let volume_mounts = vec![ + ( + src_dir.to_string_lossy().to_string(), + src_dir.to_string_lossy().to_string(), + ), + ( + volume_mountpoint.to_string_lossy().to_string(), + volume_mountpoint.to_string_lossy().to_string(), + ), + ]; + + let nfs_server = + NfsServer::start_in_container(config, container_tool, container_image, volume_mounts) + .await + .context("Failed to start NFS server")?; + + print_success( + &format!("NFS server started on port {}", port), + OutputLevel::Normal, + ); + + // Generate unique session ID for volume names + let session_id = Uuid::new_v4().to_string()[..8].to_string(); + let src_volume_name = format!("avocado-src-{}", session_id); + let state_volume_name = format!("avocado-state-{}", session_id); + + // Create NFS-backed volumes on remote + print_info( + "Creating NFS volumes on remote host...", + OutputLevel::Normal, + ); + let remote_vm = RemoteVolumeManager::new( + SshClient::new(remote_host.clone()).with_verbose(verbose), + container_tool.to_string(), + ); + + // Create source volume + remote_vm + .create_nfs_volume(&src_volume_name, &local_ip.to_string(), port, "/src") + .await + .with_context(|| { + format!( + "Failed to create NFS volume '{}' on remote", + src_volume_name + ) + })?; + + // Create state volume + remote_vm + .create_nfs_volume(&state_volume_name, &local_ip.to_string(), port, "/state") + .await + .with_context(|| { + format!( + "Failed to create NFS volume '{}' on remote", + state_volume_name + ) + })?; + + print_success("Remote NFS volumes ready ✓", OutputLevel::Normal); + println!(); + print_info( + &format!("📂 src_dir: {} → remote:/opt/src", src_dir.display()), + OutputLevel::Normal, + ); + print_info( + &format!( + "📂 _avocado: {} → remote:/opt/_avocado", + volume_mountpoint.display() + ), + OutputLevel::Normal, + ); + println!(); + + Ok(Self { + remote_host, + ssh, + nfs_server: Some(nfs_server), + nfs_port: port, + local_ip: local_ip.to_string(), + container_tool: container_tool.to_string(), + session_id, + remote_src_volume: Some(src_volume_name), + remote_state_volume: Some(state_volume_name), + #[cfg(unix)] + signing_tunnel: None, + verbose, + }) + } + + /// Get the NFS port being used + #[allow(dead_code)] + pub fn nfs_port(&self) -> u16 { + self.nfs_port + } + + /// Get the session ID + #[allow(dead_code)] + pub fn session_id(&self) -> &str { + &self.session_id + } + + /// Get the remote source volume name + #[allow(dead_code)] + pub fn src_volume(&self) -> Option<&str> { + self.remote_src_volume.as_deref() + } + + /// Get the remote state volume name + #[allow(dead_code)] + pub fn state_volume(&self) -> Option<&str> { + self.remote_state_volume.as_deref() + } + + /// Setup SSH tunnel for signing + /// + /// This creates an SSH tunnel that forwards signing requests from the remote + /// back to the local signing service. + #[cfg(unix)] + pub async fn setup_signing_tunnel(&mut self, local_socket: &Path) -> Result { + let remote_socket = format!("/tmp/avocado-sign-{}.sock", self.session_id); + + if self.verbose { + print_info( + &format!( + "Setting up signing tunnel: {} -> {}", + remote_socket, + local_socket.display() + ), + OutputLevel::Normal, + ); + } + + let tunnel = SshTunnel::create(&self.remote_host, local_socket, &remote_socket) + .await + .context("Failed to create SSH tunnel for signing")?; + + let socket_path = tunnel.remote_socket().to_string(); + self.signing_tunnel = Some(tunnel); + + Ok(socket_path) + } + + /// Signing tunnel stub for non-Unix platforms + #[cfg(not(unix))] + pub async fn setup_signing_tunnel(&mut self, _local_socket: &Path) -> Result { + anyhow::bail!("Signing tunnel is only supported on Unix platforms") + } + + /// Run a command on the remote host inside a container + /// + /// This executes the given command in a container on the remote host, + /// with the NFS volumes mounted appropriately. + /// + /// # Arguments + /// * `image` - Container image to use + /// * `command` - Command to run inside the container + /// * `env_vars` - Environment variables to set + /// * `extra_docker_args` - Additional Docker arguments + pub async fn run_container_command( + &self, + image: &str, + command: &str, + env_vars: HashMap, + extra_docker_args: &[String], + ) -> Result { + let src_volume = self + .remote_src_volume + .as_ref() + .context("Source volume not created")?; + let state_volume = self + .remote_state_volume + .as_ref() + .context("State volume not created")?; + + print_info( + &format!("▶ Executing on {}...", self.remote_host.ssh_target()), + OutputLevel::Normal, + ); + println!(); + + // Build the docker run command with --rm to ensure cleanup + // Mount src volume to /mnt/src so bindfs can remap to /opt/src with UID translation + // Mount state volume directly to /opt/_avocado (no UID mapping needed) + let mut docker_cmd = format!( + "{} run --rm \ + -v {}:/mnt/src:rw \ + -v {}:/opt/_avocado:rw \ + --device /dev/fuse \ + --cap-add SYS_ADMIN", + self.container_tool, src_volume, state_volume + ); + + // Add environment variables + for (key, value) in &env_vars { + docker_cmd.push_str(&format!(" -e {}={}", key, shell_escape(value))); + } + + // Add signing socket if tunnel is active + #[cfg(unix)] + if let Some(ref tunnel) = self.signing_tunnel { + docker_cmd.push_str(&format!( + " -v {}:{} -e AVOCADO_SIGNING_SOCKET={}", + tunnel.remote_socket(), + tunnel.remote_socket(), + tunnel.remote_socket() + )); + } + + // Add extra Docker arguments + for arg in extra_docker_args { + docker_cmd.push_str(&format!(" {}", arg)); + } + + // Add image and command + docker_cmd.push_str(&format!(" {} bash -c {}", image, shell_escape(command))); + + if self.verbose { + print_info( + &format!("Running on remote: {}", docker_cmd), + OutputLevel::Verbose, + ); + } + + // Execute on remote + self.ssh.run_command_interactive(&docker_cmd).await + } + + /// Clean up all resources + /// + /// This will: + /// - Remove NFS-backed volumes from remote + /// - Close SSH tunnel (if any) + /// - Stop NFS server + pub async fn teardown(mut self) -> Result<()> { + println!(); + print_info("🧹 Cleaning up remote resources...", OutputLevel::Normal); + + // Close signing tunnel first + #[cfg(unix)] + if let Some(tunnel) = self.signing_tunnel.take() { + let _ = tunnel.close().await; + } + + // Remove remote volumes + let remote_vm = RemoteVolumeManager::new( + SshClient::new(self.remote_host.clone()).with_verbose(self.verbose), + self.container_tool.clone(), + ); + + let mut cleanup_errors = Vec::new(); + + if let Some(ref volume) = self.remote_src_volume { + if self.verbose { + print_info( + &format!("Removing remote volume: {}", volume), + OutputLevel::Normal, + ); + } + if let Err(e) = remote_vm.remove_volume(volume).await { + cleanup_errors.push(format!("Failed to remove {}: {}", volume, e)); + } + } + + if let Some(ref volume) = self.remote_state_volume { + if self.verbose { + print_info( + &format!("Removing remote volume: {}", volume), + OutputLevel::Normal, + ); + } + if let Err(e) = remote_vm.remove_volume(volume).await { + cleanup_errors.push(format!("Failed to remove {}: {}", volume, e)); + } + } + + // Stop NFS server + if self.verbose { + print_info("Stopping NFS server...", OutputLevel::Normal); + } + if let Some(server) = self.nfs_server.take() { + if let Err(e) = server.stop().await { + cleanup_errors.push(format!("Failed to stop NFS server: {}", e)); + } + } + + // Report any cleanup errors (but don't fail - cleanup is best-effort) + if !cleanup_errors.is_empty() { + for error in &cleanup_errors { + print_info(&format!("⚠ {}", error), OutputLevel::Normal); + } + } + + print_success( + &format!( + "🌐 Remote volumes cleaned up on {}", + self.remote_host.ssh_target() + ), + OutputLevel::Normal, + ); + println!(); + + Ok(()) + } +} + +/// Shell escape a string for safe use in a shell command +fn shell_escape(s: &str) -> String { + format!("'{}'", s.replace('\'', "'\\''")) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_shell_escape_simple() { + assert_eq!(shell_escape("hello"), "'hello'"); + } + + #[test] + fn test_shell_escape_with_spaces() { + assert_eq!(shell_escape("hello world"), "'hello world'"); + } + + #[test] + fn test_shell_escape_with_quotes() { + assert_eq!(shell_escape("it's"), "'it'\\''s'"); + } + + #[test] + fn test_shell_escape_complex() { + assert_eq!( + shell_escape("echo 'hello' && rm -rf /"), + "'echo '\\''hello'\\'' && rm -rf /'" + ); + } +} diff --git a/tests/runs_on_integration.rs b/tests/runs_on_integration.rs new file mode 100644 index 0000000..ca0c3b0 --- /dev/null +++ b/tests/runs_on_integration.rs @@ -0,0 +1,1249 @@ +//! Integration tests for the `--runs-on` remote execution feature. +//! +//! These tests verify: +//! - SSH connectivity and command execution +//! - NFS server configuration and exports +//! - Remote NFS volume creation +//! - Signing via SSH tunnel +//! - File access and permission mapping +//! - Read/write operations to both src_dir and _avocado volumes +//! +//! ## Running Tests +//! +//! Most tests use localhost and require: +//! - SSH key-based auth configured for localhost (ssh localhost should work without password) +//! - Docker available locally +//! +//! To set up localhost SSH (if not already configured): +//! ssh-keygen -t ed25519 # if you don't have a key +//! cat ~/.ssh/id_ed25519.pub >> ~/.ssh/authorized_keys +//! chmod 600 ~/.ssh/authorized_keys +//! +//! Run localhost tests: +//! cargo test --test runs_on_integration -- --ignored localhost +//! +//! Run with custom remote host: +//! RUNS_ON_TEST_HOST=user@hostname cargo test --test runs_on_integration -- --ignored + +#![allow(dead_code)] + +use std::fs; +#[cfg(unix)] +use std::os::unix::fs::PermissionsExt; + +mod common; + +/// Get the test remote host - defaults to current_user@localhost +fn get_test_host() -> String { + std::env::var("RUNS_ON_TEST_HOST").unwrap_or_else(|_| { + let user = std::env::var("USER").unwrap_or_else(|_| "root".to_string()); + format!("{}@localhost", user) + }) +} + +/// Check if localhost SSH is available +fn localhost_ssh_available() -> bool { + std::process::Command::new("ssh") + .args([ + "-o", + "BatchMode=yes", + "-o", + "ConnectTimeout=2", + "localhost", + "true", + ]) + .output() + .map(|o| o.status.success()) + .unwrap_or(false) +} + +// ============================================================================= +// Unit Tests (run without network) +// ============================================================================= + +mod nfs_config_tests { + use avocado_cli::utils::nfs_server::{NfsExport, NfsServerConfig}; + use std::path::PathBuf; + + #[test] + fn test_single_export_config() { + let mut config = NfsServerConfig::new(12050); + config.add_export(PathBuf::from("/home/user/src"), "/src".to_string()); + + let ganesha_config = config.generate_ganesha_config(); + + assert!(ganesha_config.contains("NFS_Port = 12050")); + assert!(ganesha_config.contains("Export_Id = 1")); + assert!(ganesha_config.contains("Path = /home/user/src")); + assert!(ganesha_config.contains("Pseudo = /src")); + } + + #[test] + fn test_dual_export_config_for_runs_on() { + let mut config = NfsServerConfig::new(12051); + config.add_export(PathBuf::from("/home/user/project"), "/src".to_string()); + config.add_export( + PathBuf::from("/var/lib/docker/volumes/avo-abc123/_data"), + "/state".to_string(), + ); + + let ganesha_config = config.generate_ganesha_config(); + + // Verify both exports are present + assert!(ganesha_config.contains("Export_Id = 1")); + assert!(ganesha_config.contains("Export_Id = 2")); + assert!(ganesha_config.contains("Pseudo = /src")); + assert!(ganesha_config.contains("Pseudo = /state")); + + // Verify security settings for remote access + assert!(ganesha_config.contains("Squash = No_Root_Squash")); + assert!(ganesha_config.contains("Access_Type = RW")); + assert!(ganesha_config.contains("SecType = none")); + } + + #[test] + fn test_export_id_auto_increment() { + let mut config = NfsServerConfig::new(12050); + config.add_export(PathBuf::from("/path1"), "/export1".to_string()); + config.add_export(PathBuf::from("/path2"), "/export2".to_string()); + config.add_export(PathBuf::from("/path3"), "/export3".to_string()); + + assert_eq!(config.exports.len(), 3); + assert_eq!(config.exports[0].export_id, 1); + assert_eq!(config.exports[1].export_id, 2); + assert_eq!(config.exports[2].export_id, 3); + } + + #[test] + fn test_nfs_export_ganesha_block() { + let export = NfsExport::new( + 42, + PathBuf::from("/var/lib/docker/volumes/test/_data"), + "/state".to_string(), + ); + + let block = export.to_ganesha_config(); + + assert!(block.contains("EXPORT {")); + assert!(block.contains("Export_Id = 42")); + assert!(block.contains("Path = /var/lib/docker/volumes/test/_data")); + assert!(block.contains("Pseudo = /state")); + assert!(block.contains("FSAL {")); + assert!(block.contains("name = VFS")); + } + + #[test] + fn test_verbose_logging_config() { + let config = NfsServerConfig::new(12050).with_verbose(true); + let ganesha_config = config.generate_ganesha_config(); + + assert!(ganesha_config.contains("Default_Log_Level = DEBUG")); + } + + #[test] + fn test_normal_logging_config() { + let config = NfsServerConfig::new(12050); + let ganesha_config = config.generate_ganesha_config(); + + assert!(ganesha_config.contains("Default_Log_Level = EVENT")); + } +} + +mod version_compatibility_tests { + use avocado_cli::utils::remote::is_version_compatible; + + #[test] + fn test_equal_versions() { + assert!(is_version_compatible("0.20.0", "0.20.0")); + assert!(is_version_compatible("1.0.0", "1.0.0")); + assert!(is_version_compatible("2.5.10", "2.5.10")); + } + + #[test] + fn test_remote_newer_patch() { + assert!(is_version_compatible("0.20.0", "0.20.1")); + assert!(is_version_compatible("0.20.0", "0.20.99")); + } + + #[test] + fn test_remote_newer_minor() { + assert!(is_version_compatible("0.20.0", "0.21.0")); + assert!(is_version_compatible("0.20.5", "0.25.0")); + } + + #[test] + fn test_remote_newer_major() { + assert!(is_version_compatible("0.20.0", "1.0.0")); + assert!(is_version_compatible("1.5.3", "2.0.0")); + } + + #[test] + fn test_remote_older_patch() { + assert!(!is_version_compatible("0.20.1", "0.20.0")); + assert!(!is_version_compatible("0.20.5", "0.20.4")); + } + + #[test] + fn test_remote_older_minor() { + assert!(!is_version_compatible("0.21.0", "0.20.0")); + assert!(!is_version_compatible("0.25.0", "0.20.5")); + } + + #[test] + fn test_remote_older_major() { + assert!(!is_version_compatible("1.0.0", "0.20.0")); + assert!(!is_version_compatible("2.0.0", "1.5.3")); + } + + #[test] + fn test_prerelease_versions() { + // Pre-release suffix should be stripped for comparison + assert!(is_version_compatible("0.20.0-beta", "0.20.0")); + assert!(is_version_compatible("0.20.0", "0.20.1-rc1")); + } +} + +mod remote_host_tests { + use avocado_cli::utils::remote::RemoteHost; + + #[test] + fn test_parse_standard_format() { + let host = RemoteHost::parse("jschneck@riptide.local").unwrap(); + assert_eq!(host.user, Some("jschneck".to_string())); + assert_eq!(host.host, "riptide.local"); + assert_eq!(host.ssh_target(), "jschneck@riptide.local"); + } + + #[test] + fn test_parse_ip_address() { + let host = RemoteHost::parse("root@192.168.1.100").unwrap(); + assert_eq!(host.user, Some("root".to_string())); + assert_eq!(host.host, "192.168.1.100"); + } + + #[test] + fn test_parse_ipv6_address() { + let host = RemoteHost::parse("user@::1").unwrap(); + assert_eq!(host.user, Some("user".to_string())); + assert_eq!(host.host, "::1"); + } + + #[test] + fn test_parse_hostname_with_domain() { + let host = RemoteHost::parse("admin@server.example.com").unwrap(); + assert_eq!(host.user, Some("admin".to_string())); + assert_eq!(host.host, "server.example.com"); + } + + #[test] + fn test_parse_hostname_only() { + // SSH can infer user when only hostname is provided + let host = RemoteHost::parse("hostname-only").unwrap(); + assert_eq!(host.user, None); + assert_eq!(host.host, "hostname-only"); + assert_eq!(host.ssh_target(), "hostname-only"); + } + + #[test] + fn test_parse_empty_username() { + let result = RemoteHost::parse("@hostname"); + assert!(result.is_err()); + let err = result.unwrap_err().to_string(); + assert!(err.contains("Username")); + } + + #[test] + fn test_parse_empty_hostname() { + let result = RemoteHost::parse("user@"); + assert!(result.is_err()); + let err = result.unwrap_err().to_string(); + assert!(err.contains("Hostname")); + } + + #[test] + fn test_parse_multiple_at_symbols() { + // Should take first @ as separator + let host = RemoteHost::parse("user@host@domain").unwrap(); + assert_eq!(host.user, Some("user".to_string())); + assert_eq!(host.host, "host@domain"); + } +} + +mod port_selection_tests { + use avocado_cli::utils::nfs_server::{find_available_port, is_port_available}; + + #[test] + fn test_find_port_in_high_range() { + // Use high ports that are likely available + let port = find_available_port(60000..=60010); + assert!(port.is_some()); + let p = port.unwrap(); + assert!(p >= 60000 && p <= 60010); + } + + #[test] + fn test_is_port_available_high_port() { + // High port should generally be available + assert!(is_port_available(59999)); + } + + #[test] + fn test_port_becomes_unavailable_after_bind() { + use std::net::TcpListener; + + // Bind to a port + let listener = TcpListener::bind("127.0.0.1:0").unwrap(); + let port = listener.local_addr().unwrap().port(); + + // Port should no longer be available + assert!(!is_port_available(port)); + + // After dropping, it should become available again + drop(listener); + // Note: There may be a brief delay before the port is released + } +} + +mod shell_escape_tests { + // These test the shell_escape function used in runs_on + + fn shell_escape(s: &str) -> String { + format!("'{}'", s.replace('\'', "'\\''")) + } + + #[test] + fn test_simple_string() { + assert_eq!(shell_escape("hello"), "'hello'"); + } + + #[test] + fn test_string_with_spaces() { + assert_eq!(shell_escape("hello world"), "'hello world'"); + } + + #[test] + fn test_string_with_single_quote() { + assert_eq!(shell_escape("it's"), "'it'\\''s'"); + } + + #[test] + fn test_command_injection_attempt() { + // Ensure shell metacharacters are safely escaped + let dangerous = "$(rm -rf /)"; + let escaped = shell_escape(dangerous); + assert_eq!(escaped, "'$(rm -rf /)'"); + } + + #[test] + fn test_newlines() { + let multiline = "line1\nline2"; + let escaped = shell_escape(multiline); + assert!(escaped.starts_with("'")); + assert!(escaped.ends_with("'")); + } +} + +// ============================================================================= +// CLI Flag Tests (run without network) +// ============================================================================= + +mod cli_flag_tests { + use crate::common; + + #[test] + fn test_runs_on_flag_appears_in_help() { + let result = common::run_cli(&["--help"]); + assert!(result.success); + assert!(result.stdout.contains("--runs-on")); + assert!(result.stdout.contains("USER@HOST")); + } + + #[test] + fn test_nfs_port_flag_appears_in_help() { + let result = common::run_cli(&["--help"]); + assert!(result.success); + assert!(result.stdout.contains("--nfs-port")); + } + + #[test] + fn test_runs_on_requires_value() { + let result = common::run_cli(&["--runs-on"]); + assert!(!result.success); + // Should error about missing value + assert!( + result.stderr.contains("value is required") + || result.stderr.contains("requires a value") + || result.stderr.contains("argument requires"), + "Expected error about missing value, got: {}", + result.stderr + ); + } + + #[test] + fn test_nfs_port_requires_value() { + let result = common::run_cli(&["--nfs-port"]); + assert!(!result.success); + assert!( + result.stderr.contains("value is required") + || result.stderr.contains("requires a value") + || result.stderr.contains("argument requires"), + "Expected error about missing value, got: {}", + result.stderr + ); + } + + #[test] + fn test_nfs_port_requires_number() { + let result = common::run_cli(&["--nfs-port", "not-a-number", "--help"]); + assert!(!result.success); + assert!(result.stderr.contains("invalid") || result.stderr.contains("number")); + } +} + +// ============================================================================= +// Integration Tests (require remote host) +// ============================================================================= + +// ============================================================================= +// Localhost SSH Tests +// ============================================================================= + +#[test] +#[ignore = "Requires localhost SSH key-based auth"] +fn test_localhost_ssh_connectivity() { + if !localhost_ssh_available() { + eprintln!("Skipping: localhost SSH not configured. Run: ssh-keygen && cat ~/.ssh/id_*.pub >> ~/.ssh/authorized_keys"); + return; + } + + let host = get_test_host(); + + // Use a simple command that should work if SSH is configured + // Pass command as single string like other tests + let result = std::process::Command::new("ssh") + .args([ + "-o", + "BatchMode=yes", + "-o", + "ConnectTimeout=5", + &host, + "echo ok", + ]) + .output() + .expect("Failed to execute ssh"); + + assert!( + result.status.success(), + "SSH connectivity to '{}' failed: {}", + host, + String::from_utf8_lossy(&result.stderr) + ); + + let stdout = String::from_utf8_lossy(&result.stdout); + assert!(stdout.trim() == "ok", "Expected 'ok', got: {}", stdout); +} + +#[test] +#[ignore = "Requires localhost SSH and avocado installed"] +fn test_localhost_cli_version_check() { + if !localhost_ssh_available() { + eprintln!("Skipping: localhost SSH not configured"); + return; + } + + let host = get_test_host(); + + // Check if avocado is available on localhost + let result = std::process::Command::new("ssh") + .args(["-o", "BatchMode=yes", &host, "avocado --version"]) + .output() + .expect("Failed to execute ssh"); + + if !result.status.success() { + eprintln!("Skipping: avocado CLI not installed on localhost"); + return; + } + + let version_output = String::from_utf8_lossy(&result.stdout); + assert!( + version_output.contains("avocado"), + "Version output should contain 'avocado': {}", + version_output + ); + + // Extract version number + let version = version_output + .split_whitespace() + .last() + .unwrap_or("unknown"); + + // Should be a valid semver-like version + assert!( + version.contains('.'), + "Version should contain a dot: {}", + version + ); +} + +#[test] +#[ignore = "Requires localhost SSH and Docker"] +fn test_localhost_docker_via_ssh() { + if !localhost_ssh_available() { + eprintln!("Skipping: localhost SSH not configured"); + return; + } + + let host = get_test_host(); + + let result = std::process::Command::new("ssh") + .args([ + "-o", + "BatchMode=yes", + &host, + "docker", + "info", + "--format", + "{{.ServerVersion}}", + ]) + .output() + .expect("Failed to execute ssh"); + + assert!( + result.status.success(), + "Docker not available via SSH to '{}': {}", + host, + String::from_utf8_lossy(&result.stderr) + ); +} + +#[test] +#[ignore = "Requires localhost SSH"] +fn test_localhost_file_transfer_via_ssh() { + if !localhost_ssh_available() { + eprintln!("Skipping: localhost SSH not configured"); + return; + } + + let host = get_test_host(); + let temp_dir = common::create_temp_dir(); + let test_content = format!("test-content-{}", uuid::Uuid::new_v4()); + let test_file = temp_dir.join("test.txt"); + + // Write a file locally + fs::write(&test_file, &test_content).expect("Failed to write test file"); + + // Read it back via SSH (simulates NFS-like access pattern) + let result = std::process::Command::new("ssh") + .args([ + "-o", + "BatchMode=yes", + &host, + "cat", + test_file.to_str().unwrap(), + ]) + .output() + .expect("Failed to execute ssh"); + + assert!(result.status.success(), "Failed to read file via SSH"); + + let read_content = String::from_utf8_lossy(&result.stdout); + assert_eq!(read_content.trim(), test_content, "File content mismatch"); + + fs::remove_dir_all(&temp_dir).ok(); +} + +#[test] +#[ignore = "Requires localhost SSH"] +fn test_localhost_write_file_via_ssh() { + if !localhost_ssh_available() { + eprintln!("Skipping: localhost SSH not configured"); + return; + } + + let host = get_test_host(); + let temp_dir = common::create_temp_dir(); + // Use a simple alphanumeric content to avoid escaping issues + let test_content = format!("remotewrite{}", &uuid::Uuid::new_v4().to_string()[..8]); + let test_file = temp_dir.join("remote-created.txt"); + + // Small delay to avoid SSH connection rate limiting + std::thread::sleep(std::time::Duration::from_millis(100)); + + // Write file via SSH - pass command as a single string + // Use -o ServerAliveInterval to keep connection stable + let write_cmd = format!("printf '{}' > '{}'", test_content, test_file.display()); + let result = std::process::Command::new("ssh") + .args([ + "-o", + "BatchMode=yes", + "-o", + "ServerAliveInterval=5", + &host, + &write_cmd, + ]) + .output() + .expect("Failed to execute ssh"); + + if !result.status.success() { + let stderr = String::from_utf8_lossy(&result.stderr); + // Connection closed errors are often transient - skip rather than fail + if stderr.contains("Connection closed") || stderr.contains("connection reset") { + eprintln!("Skipping due to transient SSH error: {}", stderr); + fs::remove_dir_all(&temp_dir).ok(); + return; + } + panic!("Failed to write file via SSH: {}", stderr); + } + + // Small delay to ensure file system sync + std::thread::sleep(std::time::Duration::from_millis(100)); + + // Read the file locally + assert!( + test_file.exists(), + "File should exist: {}", + test_file.display() + ); + let read_content = fs::read_to_string(&test_file).expect("Failed to read file locally"); + assert_eq!( + read_content, test_content, + "File content mismatch after remote write" + ); + + fs::remove_dir_all(&temp_dir).ok(); +} + +#[test] +#[ignore = "Requires localhost SSH"] +#[cfg(unix)] +fn test_localhost_permission_preservation() { + if !localhost_ssh_available() { + eprintln!("Skipping: localhost SSH not configured"); + return; + } + + let host = get_test_host(); + let temp_dir = common::create_temp_dir(); + let test_file = temp_dir.join("perms-test.txt"); + + // Create file with specific permissions + fs::write(&test_file, "test").expect("Failed to write"); + fs::set_permissions(&test_file, fs::Permissions::from_mode(0o755)) + .expect("Failed to set permissions"); + + // Check permissions via SSH - pass as single command string + let stat_cmd = format!("stat -c '%a' '{}'", test_file.display()); + let result = std::process::Command::new("ssh") + .args(["-o", "BatchMode=yes", &host, &stat_cmd]) + .output() + .expect("Failed to execute ssh"); + + assert!( + result.status.success(), + "stat command failed: {}", + String::from_utf8_lossy(&result.stderr) + ); + let mode = String::from_utf8_lossy(&result.stdout); + assert_eq!(mode.trim(), "755", "Permission should be preserved as 755"); + + fs::remove_dir_all(&temp_dir).ok(); +} + +#[test] +#[ignore = "Requires localhost SSH"] +#[cfg(unix)] +fn test_localhost_ownership_preservation() { + if !localhost_ssh_available() { + eprintln!("Skipping: localhost SSH not configured"); + return; + } + + let host = get_test_host(); + let temp_dir = common::create_temp_dir(); + let test_file = temp_dir.join("owner-test.txt"); + + // Create file + fs::write(&test_file, "test").expect("Failed to write"); + + // Get current user's UID + let local_uid = unsafe { libc::getuid() }; + + // Check owner via SSH - pass as single command string + let stat_cmd = format!("stat -c '%u' '{}'", test_file.display()); + let result = std::process::Command::new("ssh") + .args(["-o", "BatchMode=yes", &host, &stat_cmd]) + .output() + .expect("Failed to execute ssh"); + + assert!( + result.status.success(), + "stat command failed: {}", + String::from_utf8_lossy(&result.stderr) + ); + let remote_uid: u32 = String::from_utf8_lossy(&result.stdout) + .trim() + .parse() + .expect("Failed to parse UID"); + + assert_eq!( + remote_uid, local_uid, + "Owner UID should be preserved (local: {}, remote: {})", + local_uid, remote_uid + ); + + fs::remove_dir_all(&temp_dir).ok(); +} + +#[test] +#[ignore = "Requires NFS-Ganesha installed"] +fn test_ganesha_available() { + let result = std::process::Command::new("which") + .arg("ganesha.nfsd") + .output() + .expect("Failed to check for ganesha"); + + assert!( + result.status.success(), + "ganesha.nfsd not found. Install with: apt install nfs-ganesha nfs-ganesha-vfs" + ); +} + +#[test] +#[ignore = "Requires localhost SSH and Docker"] +fn test_localhost_docker_volume_create() { + if !localhost_ssh_available() { + eprintln!("Skipping: localhost SSH not configured"); + return; + } + + let host = get_test_host(); + let volume_name = format!("avocado-test-{}", &uuid::Uuid::new_v4().to_string()[..8]); + + // Create a simple Docker volume via SSH + let result = std::process::Command::new("ssh") + .args([ + "-o", + "BatchMode=yes", + &host, + "docker", + "volume", + "create", + &volume_name, + ]) + .output() + .expect("Failed to execute ssh"); + + assert!( + result.status.success(), + "Failed to create Docker volume via SSH: {}", + String::from_utf8_lossy(&result.stderr) + ); + + // Clean up the volume + let _ = std::process::Command::new("ssh") + .args([ + "-o", + "BatchMode=yes", + &host, + "docker", + "volume", + "rm", + "-f", + &volume_name, + ]) + .output(); +} + +#[test] +#[ignore = "Requires localhost SSH and Docker"] +fn test_localhost_container_run_with_volume() { + if !localhost_ssh_available() { + eprintln!("Skipping: localhost SSH not configured"); + return; + } + + let host = get_test_host(); + let temp_dir = common::create_temp_dir(); + let test_content = format!("volume-test-{}", uuid::Uuid::new_v4()); + let test_file = temp_dir.join("container-test.txt"); + + // Run a container via SSH that writes to a bind-mounted directory + // Pass the entire docker command as a single string to SSH to avoid escaping issues + let docker_cmd = format!( + "docker run --rm -v '{}:/mnt' alpine sh -c 'echo {} > /mnt/container-test.txt'", + temp_dir.display(), + test_content + ); + let result = std::process::Command::new("ssh") + .args(["-o", "BatchMode=yes", &host, &docker_cmd]) + .output() + .expect("Failed to execute ssh"); + + assert!( + result.status.success(), + "Failed to run container via SSH: {}", + String::from_utf8_lossy(&result.stderr) + ); + + // Verify the file was created locally + assert!(test_file.exists(), "Container should have created the file"); + let read_content = fs::read_to_string(&test_file).expect("Failed to read file"); + assert_eq!(read_content.trim(), test_content); + + fs::remove_dir_all(&temp_dir).ok(); +} + +#[test] +#[ignore = "Requires localhost SSH"] +#[cfg(unix)] +fn test_localhost_signing_socket_tunnel() { + use std::os::unix::net::UnixListener; + + if !localhost_ssh_available() { + eprintln!("Skipping: localhost SSH not configured"); + return; + } + + let host = get_test_host(); + let temp_dir = common::create_temp_dir(); + let local_socket_path = temp_dir.join("local-sign.sock"); + let remote_socket_path = format!( + "/tmp/avocado-test-sign-{}.sock", + &uuid::Uuid::new_v4().to_string()[..8] + ); + + // Create a local Unix socket to forward + let _listener = UnixListener::bind(&local_socket_path).expect("Failed to create local socket"); + + // Start SSH tunnel in background (forward remote socket to local) + let mut tunnel = std::process::Command::new("ssh") + .args([ + "-o", + "BatchMode=yes", + "-o", + "ExitOnForwardFailure=yes", + "-N", // Don't execute command + "-R", + &format!("{}:{}", remote_socket_path, local_socket_path.display()), + &host, + ]) + .spawn() + .expect("Failed to start SSH tunnel"); + + // Give it time to establish + std::thread::sleep(std::time::Duration::from_millis(500)); + + // Verify the tunnel process is running + assert!( + tunnel.try_wait().unwrap().is_none(), + "SSH tunnel should still be running" + ); + + // Check if remote socket exists + let check = std::process::Command::new("ssh") + .args([ + "-o", + "BatchMode=yes", + &host, + "test", + "-S", + &remote_socket_path, + ]) + .output() + .expect("Failed to check remote socket"); + + // Clean up + let _ = tunnel.kill(); + let _ = std::process::Command::new("ssh") + .args([ + "-o", + "BatchMode=yes", + &host, + "rm", + "-f", + &remote_socket_path, + ]) + .output(); + fs::remove_dir_all(&temp_dir).ok(); + + assert!( + check.status.success(), + "Remote socket should exist at {}", + remote_socket_path + ); +} + +#[test] +#[ignore = "Requires localhost SSH and Docker"] +fn test_localhost_container_reads_local_file() { + if !localhost_ssh_available() { + eprintln!("Skipping: localhost SSH not configured"); + return; + } + + let host = get_test_host(); + let temp_dir = common::create_temp_dir(); + let test_content = format!("local-file-{}", uuid::Uuid::new_v4()); + let test_file = temp_dir.join("local-data.txt"); + + // Create file locally first + fs::write(&test_file, &test_content).expect("Failed to write local file"); + + // Run container via SSH that reads the local file (simulates src_dir access) + // Pass the entire docker command as a single string + let docker_cmd = format!( + "docker run --rm -v '{}:/opt/src:ro' alpine cat /opt/src/local-data.txt", + temp_dir.display() + ); + let result = std::process::Command::new("ssh") + .args(["-o", "BatchMode=yes", &host, &docker_cmd]) + .output() + .expect("Failed to execute ssh"); + + assert!( + result.status.success(), + "Container failed to read local file: {}", + String::from_utf8_lossy(&result.stderr) + ); + + let output = String::from_utf8_lossy(&result.stdout); + assert_eq!( + output.trim(), + test_content, + "Container should read local file content" + ); + + fs::remove_dir_all(&temp_dir).ok(); +} + +#[test] +fn test_concurrent_port_allocation() { + // Test that multiple runs-on sessions can run concurrently + // by using different ports from the 12050-12099 range + + use avocado_cli::utils::nfs_server::{find_available_port, is_port_available}; + use std::net::TcpListener; + + // Find first available port + let port1 = find_available_port(50100..=50110); + assert!(port1.is_some(), "Should find first port"); + + // Bind to it to simulate it being in use + let _listener1 = TcpListener::bind(format!("0.0.0.0:{}", port1.unwrap())).unwrap(); + + // Find another port - should get a different one + let port2 = find_available_port(50100..=50110); + assert!(port2.is_some(), "Should find second port"); + assert_ne!( + port1, port2, + "Should find different port when first is in use" + ); + + // Original port should now be unavailable + assert!( + !is_port_available(port1.unwrap()), + "First port should be in use" + ); +} + +// ============================================================================= +// Full Workflow Integration Tests +// ============================================================================= + +#[test] +#[ignore = "Requires localhost SSH and Docker"] +fn test_full_localhost_workflow() { + // This is a comprehensive test simulating the runs-on workflow using localhost + + if !localhost_ssh_available() { + eprintln!("Skipping: localhost SSH not configured"); + return; + } + + let host = get_test_host(); + let temp_dir = common::create_temp_dir(); + let src_dir = temp_dir.join("src"); + let state_dir = temp_dir.join("state"); + + fs::create_dir_all(&src_dir).expect("Failed to create src dir"); + fs::create_dir_all(&state_dir).expect("Failed to create state dir"); + + // Step 1: Create test files in "src" directory + let src_content = format!("source-file-{}", uuid::Uuid::new_v4()); + fs::write(src_dir.join("source.txt"), &src_content).expect("Failed to write source file"); + + // Step 2: Run container via SSH that: + // - Reads from /opt/src (simulating src_dir mount) + // - Writes to /opt/_avocado (simulating state mount) + // Pass entire docker command as single string to avoid shell escaping issues + let state_content = format!("state-file-{}", uuid::Uuid::new_v4()); + let docker_cmd = format!( + "docker run --rm -v '{}:/opt/src:ro' -v '{}:/opt/_avocado:rw' alpine sh -c 'cat /opt/src/source.txt && echo {} > /opt/_avocado/state.txt'", + src_dir.display(), + state_dir.display(), + state_content + ); + let result = std::process::Command::new("ssh") + .args(["-o", "BatchMode=yes", &host, &docker_cmd]) + .output() + .expect("Failed to run container"); + + assert!( + result.status.success(), + "Container failed: {}", + String::from_utf8_lossy(&result.stderr) + ); + + // Step 3: Verify container could read source file + let stdout = String::from_utf8_lossy(&result.stdout); + assert!( + stdout.contains(&src_content), + "Container should read src file content" + ); + + // Step 4: Verify state file was written locally + let state_file = state_dir.join("state.txt"); + assert!( + state_file.exists(), + "State file should exist after container run" + ); + let read_state = fs::read_to_string(&state_file).expect("Failed to read state file"); + assert_eq!( + read_state.trim(), + state_content, + "State content should match" + ); + + // Step 5: Test bidirectional - modify local, verify in container + let modified_content = format!("modified-{}", uuid::Uuid::new_v4()); + fs::write(state_dir.join("modified.txt"), &modified_content).expect("Failed to write"); + + let verify_cmd = format!( + "docker run --rm -v '{}:/opt/_avocado:ro' alpine cat /opt/_avocado/modified.txt", + state_dir.display() + ); + let verify = std::process::Command::new("ssh") + .args(["-o", "BatchMode=yes", &host, &verify_cmd]) + .output() + .expect("Failed to verify"); + + assert!(verify.status.success()); + let verify_output = String::from_utf8_lossy(&verify.stdout); + assert_eq!( + verify_output.trim(), + modified_content, + "Should read locally modified file" + ); + + fs::remove_dir_all(&temp_dir).ok(); +} + +// ============================================================================= +// Error Handling Tests +// ============================================================================= + +mod error_handling_tests { + use avocado_cli::utils::remote::RemoteHost; + + #[test] + fn test_hostname_only_is_valid() { + // SSH can infer the user from the environment when only hostname is provided + let result = RemoteHost::parse("hostname"); + assert!(result.is_ok()); + let host = result.unwrap(); + assert_eq!(host.user, None); + assert_eq!(host.host, "hostname"); + } + + #[test] + fn test_empty_string_error() { + let result = RemoteHost::parse(""); + assert!(result.is_err()); + } + + #[test] + fn test_empty_user_descriptive_error() { + let result = RemoteHost::parse("@hostname"); + let err = result.unwrap_err(); + let msg = err.to_string(); + + // Should provide helpful error message + assert!(msg.contains("Username") || msg.contains("empty")); + } +} + +// ============================================================================= +// Permission and Security Tests +// ============================================================================= + +mod security_tests { + #[test] + fn test_nfs_config_has_proper_security_settings() { + use avocado_cli::utils::nfs_server::NfsServerConfig; + use std::path::PathBuf; + + let mut config = NfsServerConfig::new(12050); + config.add_export(PathBuf::from("/test"), "/test".to_string()); + + let ganesha_config = config.generate_ganesha_config(); + + // Verify security-relevant settings + assert!( + ganesha_config.contains("Squash = No_Root_Squash"), + "Should allow root access for proper UID mapping" + ); + assert!( + ganesha_config.contains("Anonymous_uid = 0"), + "Anonymous should map to root" + ); + assert!( + ganesha_config.contains("Anonymous_gid = 0"), + "Anonymous should map to root group" + ); + assert!( + ganesha_config.contains("Only_Numeric_Owners = true"), + "Should use numeric owners for cross-system compatibility" + ); + } + + #[test] + fn test_nfs_config_binds_to_all_interfaces() { + use avocado_cli::utils::nfs_server::NfsServerConfig; + + let config = NfsServerConfig::new(12050); + let ganesha_config = config.generate_ganesha_config(); + + assert!( + ganesha_config.contains("Bind_addr = 0.0.0.0"), + "Should bind to all interfaces for remote access" + ); + } +} + +// ============================================================================= +// Docker Volume Command Tests +// ============================================================================= + +mod docker_volume_tests { + #[test] + fn test_nfs_volume_create_command_format() { + let volume_name = "avocado-src-abc123"; + let nfs_host = "192.168.1.100"; + let nfs_port = 12050u16; + let export_path = "/src"; + + let command = format!( + "docker volume create \ + --driver local \ + --opt type=nfs \ + --opt o=addr={},rw,nfsvers=4,port={} \ + --opt device=:{} \ + {}", + nfs_host, nfs_port, export_path, volume_name + ); + + assert!(command.contains("--driver local")); + assert!(command.contains("type=nfs")); + assert!(command.contains(&format!("addr={}", nfs_host))); + assert!(command.contains(&format!("port={}", nfs_port))); + assert!(command.contains(&format!("device=:{}", export_path))); + assert!(command.contains(volume_name)); + } + + #[test] + fn test_nfs_volume_remove_command_format() { + let volume_name = "avocado-state-def456"; + + let command = format!("docker volume rm -f {}", volume_name); + + assert!(command.contains("volume rm")); + assert!(command.contains("-f")); + assert!(command.contains(volume_name)); + } +} + +// ============================================================================= +// Container Command Tests +// ============================================================================= + +mod container_command_tests { + use std::collections::HashMap; + + fn build_container_command( + container_tool: &str, + src_volume: &str, + state_volume: &str, + image: &str, + command: &str, + env_vars: &HashMap, + ) -> String { + let mut cmd = format!( + "{} run --rm \ + -v {}:/opt/src:rw \ + -v {}:/opt/_avocado:rw \ + --device /dev/fuse \ + --cap-add SYS_ADMIN", + container_tool, src_volume, state_volume + ); + + for (key, value) in env_vars { + cmd.push_str(&format!(" -e {}={}", key, value)); + } + + cmd.push_str(&format!(" {} bash -c '{}'", image, command)); + cmd + } + + #[test] + fn test_container_command_has_required_mounts() { + let cmd = build_container_command( + "docker", + "avocado-src-123", + "avocado-state-123", + "ghcr.io/avocado-linux/sdk:latest", + "ls -la", + &HashMap::new(), + ); + + assert!(cmd.contains("-v avocado-src-123:/opt/src:rw")); + assert!(cmd.contains("-v avocado-state-123:/opt/_avocado:rw")); + } + + #[test] + fn test_container_command_has_fuse_device() { + let cmd = + build_container_command("docker", "src", "state", "image", "cmd", &HashMap::new()); + + assert!(cmd.contains("--device /dev/fuse")); + } + + #[test] + fn test_container_command_has_sys_admin_cap() { + let cmd = + build_container_command("docker", "src", "state", "image", "cmd", &HashMap::new()); + + assert!(cmd.contains("--cap-add SYS_ADMIN")); + } + + #[test] + fn test_container_command_includes_env_vars() { + let mut env = HashMap::new(); + env.insert("AVOCADO_TARGET".to_string(), "qemux86-64".to_string()); + env.insert("CUSTOM_VAR".to_string(), "custom_value".to_string()); + + let cmd = build_container_command("docker", "src", "state", "image", "cmd", &env); + + assert!( + cmd.contains("-e AVOCADO_TARGET=qemux86-64") + || cmd.contains("-e CUSTOM_VAR=custom_value") + ); + } + + #[test] + fn test_container_command_supports_podman() { + let cmd = + build_container_command("podman", "src", "state", "image", "cmd", &HashMap::new()); + + assert!(cmd.starts_with("podman run")); + } +} From 9679fe5c66ef09e758e9526eef75b7e0b1f0e25c Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Sun, 28 Dec 2025 16:06:20 -0500 Subject: [PATCH 03/20] add clean --force --- src/commands/clean.rs | 34 ++++++++++++++++++++++++++++------ src/main.rs | 7 ++++++- 2 files changed, 34 insertions(+), 7 deletions(-) diff --git a/src/commands/clean.rs b/src/commands/clean.rs index ee95a3b..40d0d3c 100644 --- a/src/commands/clean.rs +++ b/src/commands/clean.rs @@ -26,6 +26,8 @@ pub struct CleanCommand { config_path: Option, /// Target architecture (needed for --stamps) target: Option, + /// Force removal by killing and removing containers using the volume + force: bool, } impl CleanCommand { @@ -50,6 +52,7 @@ impl CleanCommand { stamps: false, config_path: None, target: None, + force: false, } } @@ -71,6 +74,12 @@ impl CleanCommand { self } + /// Set whether to force removal by killing containers + pub fn with_force(mut self, force: bool) -> Self { + self.force = force; + self + } + /// Executes the clean command, removing volumes, state files, and optionally legacy directories. /// /// # Returns @@ -174,12 +183,25 @@ fi ); } - volume_manager - .remove_volume(&volume_state.volume_name) - .await - .with_context(|| { - format!("Failed to remove volume: {}", volume_state.volume_name) - })?; + if self.force { + // Force removal: kill and remove all containers using the volume first + volume_manager + .force_remove_volume(&volume_state.volume_name) + .await + .with_context(|| { + format!( + "Failed to force remove volume: {}", + volume_state.volume_name + ) + })?; + } else { + volume_manager + .remove_volume(&volume_state.volume_name) + .await + .with_context(|| { + format!("Failed to remove volume: {}", volume_state.volume_name) + })?; + } print_success( &format!("Removed docker volume: {}", volume_state.volume_name), diff --git a/src/main.rs b/src/main.rs index 99bcdb0..d5f519d 100644 --- a/src/main.rs +++ b/src/main.rs @@ -121,6 +121,9 @@ enum Commands { /// Target architecture (required when --stamps is used) #[arg(long)] target: Option, + /// Force removal by killing and removing containers using the volume + #[arg(short, long)] + force: bool, }, /// Install all components (SDK, extensions, and runtime dependencies) Install { @@ -730,12 +733,14 @@ async fn main() -> Result<()> { stamps, config, target, + force, } => { let clean_cmd = CleanCommand::new(directory, !skip_volumes, Some(container_tool), verbose) .with_stamps(stamps) .with_config_path(config) - .with_target(target.or(cli.target.clone())); + .with_target(target.or(cli.target.clone())) + .with_force(force); clean_cmd.execute().await?; Ok(()) } From ec93d43201253b709dbd782f3ac90c0881b96326 Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Sun, 28 Dec 2025 16:06:27 -0500 Subject: [PATCH 04/20] clean up warnings --- src/commands/ext/build.rs | 17 +------ src/commands/runtime/provision.rs | 2 +- src/utils/nfs_server.rs | 4 ++ src/utils/volume.rs | 78 +++++++++++++++++++++++++++++++ 4 files changed, 84 insertions(+), 17 deletions(-) diff --git a/src/commands/ext/build.rs b/src/commands/ext/build.rs index 8fd8afa..09aad78 100644 --- a/src/commands/ext/build.rs +++ b/src/commands/ext/build.rs @@ -23,6 +23,7 @@ enum OverlayMode { Opaque, // cp -r (replace directory contents) } +#[derive(Default)] pub struct ExtBuildCommand { pub extension: String, pub config_path: String, @@ -35,22 +36,6 @@ pub struct ExtBuildCommand { pub nfs_port: Option, } -impl Default for ExtBuildCommand { - fn default() -> Self { - Self { - extension: String::new(), - config_path: String::new(), - verbose: false, - target: None, - container_args: None, - dnf_args: None, - no_stamps: false, - runs_on: None, - nfs_port: None, - } - } -} - impl ExtBuildCommand { pub fn new( extension: String, diff --git a/src/commands/runtime/provision.rs b/src/commands/runtime/provision.rs index fe1af3d..15b1c4a 100644 --- a/src/commands/runtime/provision.rs +++ b/src/commands/runtime/provision.rs @@ -721,7 +721,7 @@ avocado-provision-{} {} .verbose(self.config.verbose); let success = container_helper - .run_simple_command(&container_image, ©_script, true) + .run_simple_command(container_image, ©_script, true) .await?; if !success { diff --git a/src/utils/nfs_server.rs b/src/utils/nfs_server.rs index 9f11fcb..eba13b9 100644 --- a/src/utils/nfs_server.rs +++ b/src/utils/nfs_server.rs @@ -99,6 +99,7 @@ impl NfsServerConfig { } /// Add an export to the configuration + #[allow(dead_code)] pub fn add_export(&mut self, local_path: PathBuf, pseudo_path: String) -> &mut Self { let export_id = (self.exports.len() + 1) as u32; self.exports @@ -211,6 +212,7 @@ impl NfsServer { /// 1. Generate the Ganesha configuration file /// 2. Start ganesha.nfsd in foreground mode /// 3. Return the running server handle + #[allow(dead_code)] pub async fn start(config: NfsServerConfig) -> Result { // Verify ganesha.nfsd is available let ganesha_check = AsyncCommand::new("which") @@ -557,10 +559,12 @@ impl Drop for NfsServer { } /// Builder for creating NFS server configurations +#[allow(dead_code)] pub struct NfsServerBuilder { config: NfsServerConfig, } +#[allow(dead_code)] impl NfsServerBuilder { /// Create a new builder with auto-selected port pub fn new() -> Result { diff --git a/src/utils/volume.rs b/src/utils/volume.rs index f53ad99..23d78d0 100644 --- a/src/utils/volume.rs +++ b/src/utils/volume.rs @@ -180,6 +180,84 @@ impl VolumeManager { Ok(()) } + + /// Force remove a docker volume by first stopping and removing all containers using it + pub async fn force_remove_volume(&self, volume_name: &str) -> Result<()> { + // Get containers using this volume + let containers = self.get_containers_using_volume(volume_name).await?; + + if !containers.is_empty() { + if self.verbose { + print_info( + &format!( + "Found {} container(s) using volume, stopping and removing...", + containers.len() + ), + OutputLevel::Normal, + ); + } + + for container_id in &containers { + // Kill the container (faster than stop) + let _ = AsyncCommand::new(&self.container_tool) + .args(["kill", container_id]) + .output() + .await; + + // Remove the container + let output = AsyncCommand::new(&self.container_tool) + .args(["rm", "-f", container_id]) + .output() + .await + .with_context(|| format!("Failed to remove container {}", container_id))?; + + if self.verbose && output.status.success() { + print_info( + &format!( + "Removed container: {}", + &container_id[..12.min(container_id.len())] + ), + OutputLevel::Normal, + ); + } + } + } + + // Now remove the volume + self.remove_volume(volume_name).await + } + + /// Get list of container IDs using a specific volume + async fn get_containers_using_volume(&self, volume_name: &str) -> Result> { + // Use docker ps with filter to find containers using this volume + // This includes both running and stopped containers + let output = AsyncCommand::new(&self.container_tool) + .args([ + "ps", + "-a", + "--filter", + &format!("volume={}", volume_name), + "--format", + "{{.ID}}", + ]) + .output() + .await + .with_context(|| "Failed to list containers using volume")?; + + if !output.status.success() { + // If the command fails, return empty list (volume might not exist) + return Ok(Vec::new()); + } + + let stdout = String::from_utf8_lossy(&output.stdout); + let containers: Vec = stdout + .lines() + .filter(|line| !line.is_empty()) + .map(|s| s.to_string()) + .collect(); + + Ok(containers) + } } /// Information about a docker volume From a52c6544de595cd4c3d9e07ebc1962ca7541791a Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Sun, 28 Dec 2025 16:45:38 -0500 Subject: [PATCH 05/20] fix up .stamps with build commands --- src/commands/runtime/build.rs | 22 ++++++++++++++-------- src/utils/container.rs | 6 ++++-- 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/src/commands/runtime/build.rs b/src/commands/runtime/build.rs index 26d537d..dd49065 100644 --- a/src/commands/runtime/build.rs +++ b/src/commands/runtime/build.rs @@ -64,9 +64,8 @@ impl RuntimeBuildCommand { let content = std::fs::read_to_string(&self.config_path)?; let parsed: serde_yaml::Value = serde_yaml::from_str(&content)?; - // Process container args with environment variable expansion - let processed_container_args = - crate::utils::config::Config::process_container_args(self.container_args.as_ref()); + // Merge container args from config and CLI with environment variable expansion + let merged_container_args = config.merge_sdk_container_args(self.container_args.as_ref()); // Get repo_url and repo_release from config let repo_url = config.get_sdk_repo_url(); @@ -126,7 +125,7 @@ impl RuntimeBuildCommand { interactive: false, repo_url: repo_url.clone(), repo_release: repo_release.clone(), - container_args: processed_container_args.clone(), + container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), runs_on: self.runs_on.clone(), nfs_port: self.nfs_port, @@ -163,6 +162,7 @@ impl RuntimeBuildCommand { target_arch.as_str(), &self.config_path, container_image, + merged_container_args.clone(), ) .await?; @@ -238,7 +238,7 @@ impl RuntimeBuildCommand { interactive: false, // build script runs non-interactively repo_url: repo_url.clone(), repo_release: repo_release.clone(), - container_args: processed_container_args.clone(), + container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), env_vars, runs_on: self.runs_on.clone(), @@ -278,7 +278,7 @@ impl RuntimeBuildCommand { interactive: false, repo_url: repo_url.clone(), repo_release: repo_release.clone(), - container_args: processed_container_args.clone(), + container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), runs_on: self.runs_on.clone(), nfs_port: self.nfs_port, @@ -729,6 +729,7 @@ avocado-build-$TARGET_ARCH $RUNTIME_NAME /// Returns a list of versioned extension names in the format "ext_name-version" /// (e.g., "my-ext-1.0.0"). This ensures AVOCADO_EXT_LIST and the build script /// use exact versions from the configuration, not wildcards. + #[allow(clippy::too_many_arguments)] async fn collect_runtime_extensions( &self, parsed: &serde_yaml::Value, @@ -737,6 +738,7 @@ avocado-build-$TARGET_ARCH $RUNTIME_NAME target_arch: &str, config_path: &str, container_image: &str, + container_args: Option>, ) -> Result> { let merged_runtime = config.get_merged_runtime_config(runtime_name, target_arch, config_path)?; @@ -766,6 +768,7 @@ avocado-build-$TARGET_ARCH $RUNTIME_NAME dep_spec, container_image, target_arch, + container_args.clone(), ) .await?; extensions.push(format!("{ext_name}-{version}")); @@ -796,6 +799,7 @@ avocado-build-$TARGET_ARCH $RUNTIME_NAME dep_spec: &serde_yaml::Value, container_image: &str, target_arch: &str, + container_args: Option>, ) -> Result { // If version is explicitly specified with vsn field, use it (unless it's a wildcard) if let Some(version) = dep_spec.get("vsn").and_then(|v| v.as_str()) { @@ -819,7 +823,7 @@ avocado-build-$TARGET_ARCH $RUNTIME_NAME } // External config but no version found or version is "*" - query RPM database return self - .query_rpm_version(ext_name, container_image, target_arch) + .query_rpm_version(ext_name, container_image, target_arch, container_args) .await; } @@ -839,7 +843,7 @@ avocado-build-$TARGET_ARCH $RUNTIME_NAME // No version found in config - this is likely a package repository extension // Query RPM database for the installed version - self.query_rpm_version(ext_name, container_image, target_arch) + self.query_rpm_version(ext_name, container_image, target_arch, container_args) .await } @@ -853,6 +857,7 @@ avocado-build-$TARGET_ARCH $RUNTIME_NAME ext_name: &str, container_image: &str, target: &str, + container_args: Option>, ) -> Result { let container_helper = SdkContainer::new(); @@ -875,6 +880,7 @@ rpm --root="$AVOCADO_EXT_SYSROOTS/{ext_name}" --dbpath=/var/lib/extension.d/rpm interactive: false, runs_on: self.runs_on.clone(), nfs_port: self.nfs_port, + container_args, ..Default::default() }; diff --git a/src/utils/container.rs b/src/utils/container.rs index c6f3109..b81730b 100644 --- a/src/utils/container.rs +++ b/src/utils/container.rs @@ -930,7 +930,8 @@ fi if [ -n "$AVOCADO_VERBOSE" ]; then echo "[INFO] Using repo release: '$REPO_RELEASE'"; fi export AVOCADO_PREFIX="/opt/_avocado/${{AVOCADO_TARGET}}" -export AVOCADO_SDK_PREFIX="${{AVOCADO_PREFIX}}/sdk" +export AVOCADO_SDK_ARCH="$(uname -m)" +export AVOCADO_SDK_PREFIX="${{AVOCADO_PREFIX}}/sdk/${{AVOCADO_SDK_ARCH}}" export AVOCADO_EXT_SYSROOTS="${{AVOCADO_PREFIX}}/extensions" export DNF_SDK_HOST_PREFIX="${{AVOCADO_SDK_PREFIX}}" export DNF_SDK_TARGET_PREFIX="${{AVOCADO_SDK_PREFIX}}/target-repoconf" @@ -1111,7 +1112,8 @@ fi if [ -n "$AVOCADO_VERBOSE" ]; then echo "[INFO] Using repo release: '$REPO_RELEASE'"; fi export AVOCADO_PREFIX="/opt/_avocado/${{AVOCADO_TARGET}}" -export AVOCADO_SDK_PREFIX="${{AVOCADO_PREFIX}}/sdk" +export AVOCADO_SDK_ARCH="$(uname -m)" +export AVOCADO_SDK_PREFIX="${{AVOCADO_PREFIX}}/sdk/${{AVOCADO_SDK_ARCH}}" export AVOCADO_EXT_SYSROOTS="${{AVOCADO_PREFIX}}/extensions" export DNF_SDK_HOST_PREFIX="${{AVOCADO_SDK_PREFIX}}" export DNF_SDK_TARGET_PREFIX="${{AVOCADO_SDK_PREFIX}}/target-repoconf" From d543c84a41be8420ced84cd6b2829817ed68894c Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Sun, 28 Dec 2025 17:47:24 -0500 Subject: [PATCH 06/20] update runs-on for fedora/rhel support --- src/utils/runs_on.rs | 3 ++- tests/runs_on_integration.rs | 11 ++++++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/src/utils/runs_on.rs b/src/utils/runs_on.rs index ee316ed..b5d5495 100644 --- a/src/utils/runs_on.rs +++ b/src/utils/runs_on.rs @@ -353,7 +353,8 @@ impl RunsOnContext { -v {}:/mnt/src:rw \ -v {}:/opt/_avocado:rw \ --device /dev/fuse \ - --cap-add SYS_ADMIN", + --cap-add SYS_ADMIN \ + --security-opt label=disable", self.container_tool, src_volume, state_volume ); diff --git a/tests/runs_on_integration.rs b/tests/runs_on_integration.rs index ca0c3b0..dc01529 100644 --- a/tests/runs_on_integration.rs +++ b/tests/runs_on_integration.rs @@ -1182,7 +1182,8 @@ mod container_command_tests { -v {}:/opt/src:rw \ -v {}:/opt/_avocado:rw \ --device /dev/fuse \ - --cap-add SYS_ADMIN", + --cap-add SYS_ADMIN \ + --security-opt label=disable", container_tool, src_volume, state_volume ); @@ -1225,6 +1226,14 @@ mod container_command_tests { assert!(cmd.contains("--cap-add SYS_ADMIN")); } + #[test] + fn test_container_command_has_selinux_label_disable() { + let cmd = + build_container_command("docker", "src", "state", "image", "cmd", &HashMap::new()); + + assert!(cmd.contains("--security-opt label=disable")); + } + #[test] fn test_container_command_includes_env_vars() { let mut env = HashMap::new(); From de1afb5e1455ae75bf79511d6df2f7af52e803bf Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Sun, 28 Dec 2025 17:47:37 -0500 Subject: [PATCH 07/20] improvements to bindfs error messages --- src/utils/container.rs | 68 ++++++++++++++++++++++++------------------ 1 file changed, 39 insertions(+), 29 deletions(-) diff --git a/src/utils/container.rs b/src/utils/container.rs index b81730b..b5a0443 100644 --- a/src/utils/container.rs +++ b/src/utils/container.rs @@ -169,6 +169,11 @@ impl SdkContainer { if config.verbose || self.verbose { env_vars.insert("AVOCADO_VERBOSE".to_string(), "1".to_string()); } + // Pass SDK image for error messages + env_vars.insert( + "AVOCADO_SDK_IMAGE".to_string(), + config.container_image.clone(), + ); // Build the complete command let mut full_command = String::new(); @@ -266,6 +271,11 @@ impl SdkContainer { let (host_uid, host_gid) = crate::utils::config::resolve_host_uid_gid(None); env_vars.insert("AVOCADO_HOST_UID".to_string(), host_uid.to_string()); env_vars.insert("AVOCADO_HOST_GID".to_string(), host_gid.to_string()); + // Pass SDK image for error messages + env_vars.insert( + "AVOCADO_SDK_IMAGE".to_string(), + config.container_image.clone(), + ); // Build the complete command with entrypoint // NFS src volume is mounted to /mnt/src, bindfs remaps to /opt/src with UID translation @@ -289,6 +299,8 @@ impl SdkContainer { "/dev/fuse".to_string(), "--cap-add".to_string(), "SYS_ADMIN".to_string(), + "--security-opt".to_string(), + "label=disable".to_string(), ]; if let Some(ref args) = config.container_args { @@ -361,6 +373,9 @@ impl SdkContainer { container_cmd.push("/dev/fuse".to_string()); container_cmd.push("--cap-add".to_string()); container_cmd.push("SYS_ADMIN".to_string()); + // Disable SELinux labeling to allow FUSE mounts (required on Fedora/RHEL hosts) + container_cmd.push("--security-opt".to_string()); + container_cmd.push("label=disable".to_string()); // Volume mounts: docker volume for persistent state, bind mount for source // Source is mounted to /mnt/src, then bindfs remounts it to /opt/src with permission translation @@ -503,6 +518,11 @@ impl SdkContainer { if config.verbose || self.verbose { env_vars.insert("AVOCADO_VERBOSE".to_string(), "1".to_string()); } + // Pass SDK image for error messages + env_vars.insert( + "AVOCADO_SDK_IMAGE".to_string(), + config.container_image.clone(), + ); // Build the complete command let mut full_command = String::new(); @@ -764,6 +784,9 @@ impl SdkContainer { container_cmd.push("/dev/fuse".to_string()); container_cmd.push("--cap-add".to_string()); container_cmd.push("SYS_ADMIN".to_string()); + // Disable SELinux labeling to allow FUSE mounts (required on Fedora/RHEL hosts) + container_cmd.push("--security-opt".to_string()); + container_cmd.push("label=disable".to_string()); // Volume mounts: docker volume for persistent state, bind mount for source container_cmd.push("-v".to_string()); @@ -778,6 +801,9 @@ impl SdkContainer { container_cmd.push(format!("AVOCADO_HOST_UID={}", host_uid)); container_cmd.push("-e".to_string()); container_cmd.push(format!("AVOCADO_HOST_GID={}", host_gid)); + // Pass SDK image for error messages + container_cmd.push("-e".to_string()); + container_cmd.push(format!("AVOCADO_SDK_IMAGE={}", container_image)); // Add the container image container_cmd.push(container_image.to_string()); @@ -798,21 +824,10 @@ impl SdkContainer { r#"if ! command -v bindfs >/dev/null 2>&1; then echo "[ERROR] bindfs is not installed in this container image." echo "" - echo "bindfs is required for proper file permission handling between the host and container." - echo "" - echo "To install bindfs in your container image, add one of the following to your Dockerfile:" - echo "" - echo " # For Ubuntu/Debian-based images:" - echo " RUN apt-get update && apt-get install -y bindfs" + echo "To resolve this, update the SDK container by running one of the following:" echo "" - echo " # For Fedora/RHEL-based images:" - echo " RUN dnf install -y bindfs" - echo "" - echo " # For Alpine-based images:" - echo " RUN apk add --no-cache bindfs" - echo "" - echo " # For Arch-based images:" - echo " RUN pacman -S --noconfirm bindfs" + echo " avocado fetch" + echo " docker pull $AVOCADO_SDK_IMAGE" echo "" exit 1 fi @@ -884,7 +899,11 @@ mkdir -p /opt/src if ! command -v bindfs >/dev/null 2>&1; then echo "[ERROR] bindfs is not installed in this container image." echo "" - echo "bindfs is required for proper file permission handling." + echo "To resolve this, update the SDK container by running one of the following:" + echo "" + echo " avocado fetch" + echo " docker pull $AVOCADO_SDK_IMAGE" + echo "" exit 1 fi @@ -1050,21 +1069,10 @@ mkdir -p /opt/src if ! command -v bindfs >/dev/null 2>&1; then echo "[ERROR] bindfs is not installed in this container image." echo "" - echo "bindfs is required for proper file permission handling between the host and container." - echo "" - echo "To install bindfs in your container image, add one of the following to your Dockerfile:" - echo "" - echo " # For Ubuntu/Debian-based images:" - echo " RUN apt-get update && apt-get install -y bindfs" - echo "" - echo " # For Fedora/RHEL-based images:" - echo " RUN dnf install -y bindfs" - echo "" - echo " # For Alpine-based images:" - echo " RUN apk add --no-cache bindfs" + echo "To resolve this, update the SDK container by running one of the following:" echo "" - echo " # For Arch-based images:" - echo " RUN pacman -S --noconfirm bindfs" + echo " avocado fetch" + echo " docker pull $AVOCADO_SDK_IMAGE" echo "" exit 1 fi @@ -1481,6 +1489,8 @@ mod tests { assert!(cmd.contains(&"/dev/fuse".to_string())); assert!(cmd.contains(&"--cap-add".to_string())); assert!(cmd.contains(&"SYS_ADMIN".to_string())); + assert!(cmd.contains(&"--security-opt".to_string())); + assert!(cmd.contains(&"label=disable".to_string())); // Verify host UID/GID are passed as env vars let has_uid_env = cmd.iter().any(|s| s.starts_with("AVOCADO_HOST_UID=")); let has_gid_env = cmd.iter().any(|s| s.starts_with("AVOCADO_HOST_GID=")); From 56e194bfdb3ddcea7046bb834bc6c742f51c10a1 Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Sun, 28 Dec 2025 18:47:13 -0500 Subject: [PATCH 08/20] fix up selinux security opts --- src/commands/runtime/provision.rs | 15 +++++- src/utils/container.rs | 85 +++++++++++++++++++++++++++---- 2 files changed, 89 insertions(+), 11 deletions(-) diff --git a/src/commands/runtime/provision.rs b/src/commands/runtime/provision.rs index 15b1c4a..519dd3c 100644 --- a/src/commands/runtime/provision.rs +++ b/src/commands/runtime/provision.rs @@ -118,9 +118,20 @@ impl RuntimeProvisionCommand { .run_in_container_with_output(run_config) .await?; + // If output is None, the container command failed - show a helpful error + let output_str = match output { + Some(ref s) => s.as_str(), + None => { + return Err(anyhow::anyhow!( + "Failed to check stamps: container command failed.\n\ + This may be caused by mount permission issues.\n\ + Try running with --verbose or use --no-stamps to skip validation." + )); + } + }; + // Validate all stamps from batch output - let validation = - validate_stamps_batch(&required, output.as_deref().unwrap_or(""), None); + let validation = validate_stamps_batch(&required, output_str, None); if !validation.is_satisfied() { let error = validation.into_error(&format!( diff --git a/src/utils/container.rs b/src/utils/container.rs index b5a0443..34b6261 100644 --- a/src/utils/container.rs +++ b/src/utils/container.rs @@ -3,13 +3,65 @@ use anyhow::{Context, Result}; use std::collections::HashMap; use std::env; -use std::path::PathBuf; +use std::path::{Path, PathBuf}; use std::process::Stdio; use tokio::process::Command as AsyncCommand; use crate::utils::output::{print_error, print_info, OutputLevel}; use crate::utils::volume::{VolumeManager, VolumeState}; +/// Check if SELinux is enabled on the host system. +/// Returns true if SELinux is present and enabled (enforcing or permissive). +fn is_selinux_enabled() -> bool { + // Check if /sys/fs/selinux exists (SELinux is compiled into the kernel) + if !Path::new("/sys/fs/selinux").exists() { + return false; + } + + // Check the enforce file to see if SELinux is actually enabled + // If we can't read it, assume SELinux is enabled to be safe + match std::fs::read_to_string("/sys/fs/selinux/enforce") { + Ok(content) => { + // Content is "0" for permissive, "1" for enforcing + // Both mean SELinux is active and we need to disable labeling + content.trim() == "0" || content.trim() == "1" + } + Err(_) => { + // If we can't read, check if the directory exists as a fallback + Path::new("/sys/fs/selinux").is_dir() + } + } +} + +/// Check if AppArmor is enabled on the host system. +/// Returns true if AppArmor is present and enabled (Ubuntu/Debian systems). +fn is_apparmor_enabled() -> bool { + // Check if AppArmor is enabled via /sys/module/apparmor + if Path::new("/sys/module/apparmor").exists() { + return true; + } + + // Alternative check: /sys/kernel/security/apparmor exists + Path::new("/sys/kernel/security/apparmor").exists() +} + +/// Add security options to container command based on host security module. +/// - SELinux (Fedora/RHEL): adds --security-opt label=disable +/// - AppArmor (Ubuntu/Debian): adds --security-opt apparmor=unconfined +fn add_security_opts(container_cmd: &mut Vec) { + if is_selinux_enabled() { + // Disable SELinux labeling to allow FUSE mounts + container_cmd.push("--security-opt".to_string()); + container_cmd.push("label=disable".to_string()); + } + + if is_apparmor_enabled() { + // Disable AppArmor confinement to allow FUSE mounts + container_cmd.push("--security-opt".to_string()); + container_cmd.push("apparmor=unconfined".to_string()); + } +} + /// Configuration for running commands in containers #[derive(Debug, Clone)] pub struct RunConfig { @@ -373,9 +425,8 @@ impl SdkContainer { container_cmd.push("/dev/fuse".to_string()); container_cmd.push("--cap-add".to_string()); container_cmd.push("SYS_ADMIN".to_string()); - // Disable SELinux labeling to allow FUSE mounts (required on Fedora/RHEL hosts) - container_cmd.push("--security-opt".to_string()); - container_cmd.push("label=disable".to_string()); + // Add security options based on host security module (SELinux/AppArmor) + add_security_opts(&mut container_cmd); // Volume mounts: docker volume for persistent state, bind mount for source // Source is mounted to /mnt/src, then bindfs remounts it to /opt/src with permission translation @@ -577,11 +628,20 @@ impl SdkContainer { Ok(Some(stdout)) } else { let stderr = String::from_utf8_lossy(&output.stderr); + let stdout = String::from_utf8_lossy(&output.stdout); + // Always log the error for debugging - this helps diagnose container failures + // that might otherwise be silently converted to "missing stamps" errors if config.verbose || self.verbose { print_error( &format!("Container execution failed: {stderr}"), OutputLevel::Normal, ); + } else if !stderr.is_empty() || !stdout.is_empty() { + // Even in non-verbose mode, print a hint about the failure + print_error( + "Container command failed. Run with --verbose to see details.", + OutputLevel::Normal, + ); } Ok(None) } @@ -784,9 +844,8 @@ impl SdkContainer { container_cmd.push("/dev/fuse".to_string()); container_cmd.push("--cap-add".to_string()); container_cmd.push("SYS_ADMIN".to_string()); - // Disable SELinux labeling to allow FUSE mounts (required on Fedora/RHEL hosts) - container_cmd.push("--security-opt".to_string()); - container_cmd.push("label=disable".to_string()); + // Add security options based on host security module (SELinux/AppArmor) + add_security_opts(&mut container_cmd); // Volume mounts: docker volume for persistent state, bind mount for source container_cmd.push("-v".to_string()); @@ -1489,8 +1548,16 @@ mod tests { assert!(cmd.contains(&"/dev/fuse".to_string())); assert!(cmd.contains(&"--cap-add".to_string())); assert!(cmd.contains(&"SYS_ADMIN".to_string())); - assert!(cmd.contains(&"--security-opt".to_string())); - assert!(cmd.contains(&"label=disable".to_string())); + // Security options are added based on host security module + if is_selinux_enabled() || is_apparmor_enabled() { + assert!(cmd.contains(&"--security-opt".to_string())); + } + if is_selinux_enabled() { + assert!(cmd.contains(&"label=disable".to_string())); + } + if is_apparmor_enabled() { + assert!(cmd.contains(&"apparmor=unconfined".to_string())); + } // Verify host UID/GID are passed as env vars let has_uid_env = cmd.iter().any(|s| s.starts_with("AVOCADO_HOST_UID=")); let has_gid_env = cmd.iter().any(|s| s.starts_with("AVOCADO_HOST_GID=")); From b8f6d9da8a81af6eab4021a74719b19487d15d9a Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Sun, 28 Dec 2025 19:33:59 -0500 Subject: [PATCH 09/20] update provision to support remote signing --- src/commands/sdk/run.rs | 168 ++++++++++++++++++++++++++++++++++- src/commands/sign.rs | 64 +++++++++---- src/main.rs | 3 +- src/utils/config.rs | 104 ++++++++++++++++++++++ src/utils/container.rs | 15 ++++ src/utils/runs_on.rs | 53 ++++++++++- src/utils/signing_service.rs | 13 +-- 7 files changed, 390 insertions(+), 30 deletions(-) diff --git a/src/commands/sdk/run.rs b/src/commands/sdk/run.rs index 92601e0..43daa93 100644 --- a/src/commands/sdk/run.rs +++ b/src/commands/sdk/run.rs @@ -1,11 +1,15 @@ //! SDK run command implementation. +#[cfg(unix)] +use crate::utils::signing_service::{generate_helper_script, SigningService, SigningServiceConfig}; use anyhow::{Context, Result}; +#[cfg(unix)] +use std::path::PathBuf; use crate::utils::{ config::Config, container::{RunConfig, SdkContainer}, - output::{print_success, OutputLevel}, + output::{print_info, print_success, OutputLevel}, target::validate_and_log_target, }; @@ -39,6 +43,13 @@ pub struct SdkRunCommand { pub dnf_args: Option>, /// Skip SDK bootstrap initialization pub no_bootstrap: bool, + /// Remote host to run on (format: user@host) + pub runs_on: Option, + /// NFS port for remote execution + pub nfs_port: Option, + /// Signing service handle (Unix only) + #[cfg(unix)] + signing_service: Option, } impl SdkRunCommand { @@ -75,11 +86,142 @@ impl SdkRunCommand { container_args, dnf_args, no_bootstrap, + runs_on: None, + nfs_port: None, + #[cfg(unix)] + signing_service: None, } } + /// Set remote execution options + pub fn with_runs_on(mut self, runs_on: Option, nfs_port: Option) -> Self { + self.runs_on = runs_on; + self.nfs_port = nfs_port; + self + } + + /// Setup signing service for runtime if signing is configured + #[cfg(unix)] + async fn setup_signing_service( + &mut self, + config: &Config, + runtime_name: &str, + ) -> Result> { + // Check if runtime has signing configuration + let signing_key_name = match config.get_runtime_signing_key(runtime_name) { + Some(keyid) => { + // Get the key name from signing_keys mapping + let signing_keys = config.get_signing_keys(); + signing_keys + .and_then(|keys| { + keys.iter() + .find(|(_, v)| *v == &keyid) + .map(|(k, _)| k.clone()) + }) + .context("Signing key ID not found in signing_keys mapping")? + } + None => { + // No signing configured for this runtime + if self.verbose { + print_info( + "No signing key configured for runtime. Signing service will not be started.", + OutputLevel::Verbose, + ); + } + return Ok(None); + } + }; + + let keyid = config + .get_runtime_signing_key(runtime_name) + .context("Failed to get signing key ID")?; + + // Get checksum algorithm (defaults to sha256) + let checksum_str = config + .runtime + .as_ref() + .and_then(|r| r.get(runtime_name)) + .and_then(|rc| rc.signing.as_ref()) + .map(|s| s.checksum_algorithm.as_str()) + .unwrap_or("sha256"); + + // Create temporary directory for socket and helper script + let temp_dir = tempfile::tempdir().context("Failed to create temp directory")?; + let socket_path = temp_dir.path().join("sign.sock"); + let helper_script_path = temp_dir.path().join("avocado-sign-request"); + + // Write helper script + let helper_script = generate_helper_script(); + std::fs::write(&helper_script_path, helper_script) + .context("Failed to write helper script")?; + + // Make helper script executable + { + use std::os::unix::fs::PermissionsExt; + let perms = std::fs::Permissions::from_mode(0o755); + std::fs::set_permissions(&helper_script_path, perms) + .context("Failed to set helper script permissions")?; + } + + if self.verbose { + print_info( + &format!( + "Starting signing service with key '{}' using {} checksums", + signing_key_name, checksum_str + ), + OutputLevel::Verbose, + ); + } + + // Start signing service + let service_config = SigningServiceConfig { + socket_path: socket_path.clone(), + runtime_name: runtime_name.to_string(), + key_name: signing_key_name.clone(), + keyid, + verbose: self.verbose, + }; + + let service = SigningService::start(service_config, temp_dir).await?; + + // Store the service handle for cleanup + self.signing_service = Some(service); + + Ok(Some(( + socket_path, + helper_script_path, + signing_key_name, + checksum_str.to_string(), + ))) + } + + /// Setup signing service stub for non-Unix platforms + #[cfg(not(unix))] + async fn setup_signing_service( + &mut self, + _config: &Config, + _runtime_name: &str, + ) -> Result> { + Ok(None) + } + + /// Cleanup signing service resources + #[cfg(unix)] + async fn cleanup_signing_service(&mut self) -> Result<()> { + if let Some(service) = self.signing_service.take() { + service.shutdown().await?; + } + Ok(()) + } + + /// Cleanup signing service stub for non-Unix platforms + #[cfg(not(unix))] + async fn cleanup_signing_service(&mut self) -> Result<()> { + Ok(()) + } + /// Execute the sdk run command - pub async fn execute(&self) -> Result<()> { + pub async fn execute(mut self) -> Result<()> { // Validate arguments if self.interactive && self.detach { return Err(anyhow::anyhow!( @@ -154,12 +296,19 @@ impl SdkRunCommand { "bash".to_string() }; + // Setup signing service if a runtime is specified + let signing_config = if let Some(runtime_name) = self.runtime.clone() { + self.setup_signing_service(&config, &runtime_name).await? + } else { + None + }; + // Use the container helper to run the command let container_helper = SdkContainer::from_config(&self.config_path, &config)?.verbose(self.verbose); // Create RunConfig - detach mode is now handled by the shared run_in_container - let run_config = RunConfig { + let mut run_config = RunConfig { container_image: container_image.to_string(), target: target.clone(), command: command.clone(), @@ -176,12 +325,25 @@ impl SdkRunCommand { extension_sysroot: self.extension.clone(), runtime_sysroot: self.runtime.clone(), no_bootstrap: self.no_bootstrap, + runs_on: self.runs_on.clone(), + nfs_port: self.nfs_port, ..Default::default() }; + // Add signing configuration to run_config if available + if let Some((socket_path, helper_script_path, key_name, checksum_algo)) = signing_config { + run_config.signing_socket_path = Some(socket_path); + run_config.signing_helper_script_path = Some(helper_script_path); + run_config.signing_key_name = Some(key_name); + run_config.signing_checksum_algorithm = Some(checksum_algo); + } + // Use shared run_in_container for both detached and non-detached modes let success = container_helper.run_in_container(run_config).await?; + // Cleanup signing service + self.cleanup_signing_service().await?; + if success { print_success("SDK command completed successfully.", OutputLevel::Normal); } diff --git a/src/commands/sign.rs b/src/commands/sign.rs index c0a3b6c..d76af02 100644 --- a/src/commands/sign.rs +++ b/src/commands/sign.rs @@ -102,36 +102,62 @@ impl SignCommand { // Collect runtimes that have signing configuration let mut runtimes_to_sign = Vec::new(); + let mut runtimes_with_unresolved_keys = Vec::new(); for runtime_name_val in runtime_section.keys() { if let Some(runtime_name) = runtime_name_val.as_str() { - // Check if this runtime has signing configuration - if config.get_runtime_signing_key(runtime_name).is_some() { - // Check target compatibility - let merged_runtime = config.get_merged_runtime_config( - runtime_name, - target, - &self.config_path, - )?; - - if let Some(merged_value) = merged_runtime { - // Check if runtime has explicit target - if let Some(runtime_target) = - merged_value.get("target").and_then(|t| t.as_str()) - { - // Runtime has explicit target - only include if it matches - if runtime_target == target { + // Check if this runtime declares a signing key + if let Some(declared_key) = config.get_runtime_signing_key_name(runtime_name) { + // Check if the signing key can be resolved + if config.get_runtime_signing_key(runtime_name).is_some() { + // Check target compatibility + let merged_runtime = config.get_merged_runtime_config( + runtime_name, + target, + &self.config_path, + )?; + + if let Some(merged_value) = merged_runtime { + // Check if runtime has explicit target + if let Some(runtime_target) = + merged_value.get("target").and_then(|t| t.as_str()) + { + // Runtime has explicit target - only include if it matches + if runtime_target == target { + runtimes_to_sign.push(runtime_name.to_string()); + } + } else { + // Runtime has no target specified - include for all targets runtimes_to_sign.push(runtime_name.to_string()); } - } else { - // Runtime has no target specified - include for all targets - runtimes_to_sign.push(runtime_name.to_string()); } + } else { + // Runtime declares a signing key but it can't be resolved + runtimes_with_unresolved_keys + .push((runtime_name.to_string(), declared_key)); } } } } + // If any runtimes have unresolved signing keys, return an error + if !runtimes_with_unresolved_keys.is_empty() { + let runtime_details: Vec = runtimes_with_unresolved_keys + .iter() + .map(|(runtime, key)| format!(" - runtime '{}' references key '{}'", runtime, key)) + .collect(); + + anyhow::bail!( + "The following runtimes have signing configuration with keys that could not be resolved:\n\ + {}\n\n\ + Please check that:\n\ + 1. A top-level `signing_keys` section exists in your config (note: underscore, not hyphen)\n\ + 2. The referenced keys are defined in the `signing_keys` section\n\ + 3. The keys are available on this host (check with: avocado signing-keys list)", + runtime_details.join("\n") + ); + } + if runtimes_to_sign.is_empty() { print_info( "No runtimes with signing configuration found.", diff --git a/src/main.rs b/src/main.rs index d5f519d..558355d 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1340,7 +1340,8 @@ async fn main() -> Result<()> { container_args, dnf_args, no_bootstrap, - ); + ) + .with_runs_on(cli.runs_on.clone(), cli.nfs_port); run_cmd.execute().await?; Ok(()) } diff --git a/src/utils/config.rs b/src/utils/config.rs index 2ba9c2c..2978665 100644 --- a/src/utils/config.rs +++ b/src/utils/config.rs @@ -1245,6 +1245,16 @@ impl Config { None } + /// Get the declared signing key name for a runtime (without resolving it). + /// + /// Returns Some(key_name) if the runtime has a signing configuration declared, + /// None if the runtime doesn't exist or has no signing section. + #[allow(dead_code)] // Public API for future use + pub fn get_runtime_signing_key_name(&self, runtime_name: &str) -> Option { + let runtime_config = self.runtime.as_ref()?.get(runtime_name)?; + Some(runtime_config.signing.as_ref()?.key.clone()) + } + /// Get signing key for a specific runtime /// /// The signing key reference in the config can be either: @@ -5514,6 +5524,100 @@ sdk: assert!(key_names.is_empty()); } + #[test] + fn test_get_runtime_signing_key_name() { + // Test that get_runtime_signing_key_name returns the declared key name + // even when the key cannot be resolved (e.g., signing_keys section missing) + let config_content = r#" +default_target: qemux86-64 + +sdk: + image: ghcr.io/avocado-framework/avocado-sdk:latest + +runtime: + dev: + signing: + key: my-key + prod: + signing: + key: production-key + no-signing: + dependencies: + some-package: '*' +"#; + + let config = Config::load_from_yaml_str(config_content).unwrap(); + + // Test that we can get the declared key name for runtimes with signing config + assert_eq!( + config.get_runtime_signing_key_name("dev"), + Some("my-key".to_string()) + ); + assert_eq!( + config.get_runtime_signing_key_name("prod"), + Some("production-key".to_string()) + ); + + // Test that runtimes without signing config return None + assert_eq!(config.get_runtime_signing_key_name("no-signing"), None); + + // Test that non-existent runtimes return None + assert_eq!(config.get_runtime_signing_key_name("nonexistent"), None); + + // Since signing_keys section is missing, get_runtime_signing_key should return None + // while get_runtime_signing_key_name still returns the declared key name + assert!(config.get_runtime_signing_key("dev").is_none()); + assert!(config.get_runtime_signing_key("prod").is_none()); + } + + #[test] + fn test_runtime_signing_key_declared_but_not_in_signing_keys() { + // Test scenario where runtime references a key that exists in signing_keys + // but uses a different name + let keyid = "abc123def456abc123def456abc123def456abc123def456abc123def456abc1"; + + let config_content = format!( + r#" +default_target: qemux86-64 + +sdk: + image: ghcr.io/avocado-framework/avocado-sdk:latest + +signing_keys: + - existing-key: {keyid} + +runtime: + dev: + signing: + key: missing-key + prod: + signing: + key: existing-key +"# + ); + + let config = Config::load_from_yaml_str(&config_content).unwrap(); + + // dev references 'missing-key' which is not in signing_keys + assert_eq!( + config.get_runtime_signing_key_name("dev"), + Some("missing-key".to_string()) + ); + // get_runtime_signing_key returns None because 'missing-key' is not resolvable + assert!(config.get_runtime_signing_key("dev").is_none()); + + // prod references 'existing-key' which is in signing_keys + assert_eq!( + config.get_runtime_signing_key_name("prod"), + Some("existing-key".to_string()) + ); + // get_runtime_signing_key returns the keyid because 'existing-key' is resolvable + assert_eq!( + config.get_runtime_signing_key("prod"), + Some(keyid.to_string()) + ); + } + #[test] fn test_discover_external_config_refs_from_runtime() { let config_content = r#" diff --git a/src/utils/container.rs b/src/utils/container.rs index 34b6261..f1e1d7b 100644 --- a/src/utils/container.rs +++ b/src/utils/container.rs @@ -329,6 +329,21 @@ impl SdkContainer { config.container_image.clone(), ); + // Add signing-related environment variables for remote execution + // (AVOCADO_SIGNING_SOCKET is set separately in runs_on.rs when the tunnel is active) + if config.signing_socket_path.is_some() { + env_vars.insert("AVOCADO_SIGNING_ENABLED".to_string(), "1".to_string()); + } + if let Some(ref key_name) = config.signing_key_name { + env_vars.insert("AVOCADO_SIGNING_KEY_NAME".to_string(), key_name.clone()); + } + if let Some(ref checksum_algo) = config.signing_checksum_algorithm { + env_vars.insert( + "AVOCADO_SIGNING_CHECKSUM".to_string(), + checksum_algo.clone(), + ); + } + // Build the complete command with entrypoint // NFS src volume is mounted to /mnt/src, bindfs remaps to /opt/src with UID translation let mut full_command = String::new(); diff --git a/src/utils/runs_on.rs b/src/utils/runs_on.rs index b5d5495..a581519 100644 --- a/src/utils/runs_on.rs +++ b/src/utils/runs_on.rs @@ -48,6 +48,9 @@ pub struct RunsOnContext { /// SSH tunnel for signing #[cfg(unix)] signing_tunnel: Option, + /// Remote path to the signing helper script + #[cfg(unix)] + remote_helper_script: Option, /// Enable verbose output verbose: bool, } @@ -250,6 +253,8 @@ impl RunsOnContext { remote_state_volume: Some(state_volume_name), #[cfg(unix)] signing_tunnel: None, + #[cfg(unix)] + remote_helper_script: None, verbose, }) } @@ -281,10 +286,14 @@ impl RunsOnContext { /// Setup SSH tunnel for signing /// /// This creates an SSH tunnel that forwards signing requests from the remote - /// back to the local signing service. + /// back to the local signing service. It also creates the helper script on + /// the remote host. #[cfg(unix)] pub async fn setup_signing_tunnel(&mut self, local_socket: &Path) -> Result { + use crate::utils::signing_service::generate_helper_script; + let remote_socket = format!("/tmp/avocado-sign-{}.sock", self.session_id); + let remote_helper_path = format!("/tmp/avocado-sign-request-{}", self.session_id); if self.verbose { print_info( @@ -297,12 +306,35 @@ impl RunsOnContext { ); } + // Create the signing tunnel let tunnel = SshTunnel::create(&self.remote_host, local_socket, &remote_socket) .await .context("Failed to create SSH tunnel for signing")?; + // Create the helper script on the remote host + let helper_script = generate_helper_script(); + // Escape the script content for shell + let escaped_script = helper_script.replace("'", "'\\''"); + let create_script_cmd = format!( + "printf '%s' '{}' > {} && chmod +x {}", + escaped_script, remote_helper_path, remote_helper_path + ); + + self.ssh + .run_command(&create_script_cmd) + .await + .context("Failed to create signing helper script on remote")?; + + if self.verbose { + print_info( + &format!("Created signing helper script at {}", remote_helper_path), + OutputLevel::Normal, + ); + } + let socket_path = tunnel.remote_socket().to_string(); self.signing_tunnel = Some(tunnel); + self.remote_helper_script = Some(remote_helper_path); Ok(socket_path) } @@ -363,7 +395,7 @@ impl RunsOnContext { docker_cmd.push_str(&format!(" -e {}={}", key, shell_escape(value))); } - // Add signing socket if tunnel is active + // Add signing socket and helper script if tunnel is active #[cfg(unix)] if let Some(ref tunnel) = self.signing_tunnel { docker_cmd.push_str(&format!( @@ -372,6 +404,14 @@ impl RunsOnContext { tunnel.remote_socket(), tunnel.remote_socket() )); + + // Mount the helper script if it exists + if let Some(ref helper_path) = self.remote_helper_script { + docker_cmd.push_str(&format!( + " -v {}:/usr/local/bin/avocado-sign-request:ro", + helper_path + )); + } } // Add extra Docker arguments @@ -409,6 +449,15 @@ impl RunsOnContext { let _ = tunnel.close().await; } + // Remove remote helper script + #[cfg(unix)] + if let Some(ref helper_path) = self.remote_helper_script { + let _ = self + .ssh + .run_command(&format!("rm -f {}", helper_path)) + .await; + } + // Remove remote volumes let remote_vm = RemoteVolumeManager::new( SshClient::new(self.remote_host.clone()).with_verbose(self.verbose), diff --git a/src/utils/signing_service.rs b/src/utils/signing_service.rs index 2e7aff1..cbe8454 100644 --- a/src/utils/signing_service.rs +++ b/src/utils/signing_service.rs @@ -329,9 +329,12 @@ RETRY_DELAY=1 # Timeout for waiting on response (signing is fast since we only send the hash) SOCKET_TIMEOUT=30 +# Socket path from environment or default +SIGNING_SOCKET="${AVOCADO_SIGNING_SOCKET:-/run/avocado/sign.sock}" + # Check if signing socket is available -if [ ! -S "/run/avocado/sign.sock" ]; then - echo "Error: Signing socket not available" >&2 +if [ ! -S "$SIGNING_SOCKET" ]; then + echo "Error: Signing socket not available at $SIGNING_SOCKET" >&2 exit 2 # Signing unavailable fi @@ -406,13 +409,13 @@ send_signing_request() { # Send request to signing service via Unix socket # The -t option for socat sets the timeout for half-close situations if command -v socat &> /dev/null; then - response=$(echo "$REQUEST" | socat -t${SOCKET_TIMEOUT} -T${SOCKET_TIMEOUT} - UNIX-CONNECT:/run/avocado/sign.sock 2>/dev/null) || true + response=$(echo "$REQUEST" | socat -t${SOCKET_TIMEOUT} -T${SOCKET_TIMEOUT} - UNIX-CONNECT:"$SIGNING_SOCKET" 2>/dev/null) || true elif command -v nc &> /dev/null; then # Try with -q option first (GNU netcat), fall back to -w only if nc -h 2>&1 | grep -q '\-q'; then - response=$(echo "$REQUEST" | nc -w ${SOCKET_TIMEOUT} -q ${SOCKET_TIMEOUT} -U /run/avocado/sign.sock 2>/dev/null) || true + response=$(echo "$REQUEST" | nc -w ${SOCKET_TIMEOUT} -q ${SOCKET_TIMEOUT} -U "$SIGNING_SOCKET" 2>/dev/null) || true else - response=$(echo "$REQUEST" | nc -w ${SOCKET_TIMEOUT} -U /run/avocado/sign.sock 2>/dev/null) || true + response=$(echo "$REQUEST" | nc -w ${SOCKET_TIMEOUT} -U "$SIGNING_SOCKET" 2>/dev/null) || true fi else echo "Error: Neither socat nor nc available for socket communication" >&2 From 4add7ed3a2d59ba49da43a142305d59b266e74a2 Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Sun, 28 Dec 2025 20:14:23 -0500 Subject: [PATCH 10/20] Update sdk install stamp to track arch --- src/commands/ext/checkout.rs | 45 ++-- src/commands/ext/package.rs | 59 +++-- src/commands/hitl/server.rs | 54 +++-- src/commands/runtime/clean.rs | 26 ++- src/commands/runtime/provision.rs | 38 +++- src/commands/sdk/compile.rs | 34 +-- src/commands/sdk/install.rs | 10 +- src/utils/remote.rs | 52 +++++ src/utils/stamps.rs | 366 +++++++++++++++++++++++++----- 9 files changed, 546 insertions(+), 138 deletions(-) diff --git a/src/commands/ext/checkout.rs b/src/commands/ext/checkout.rs index cafcbea..f5e7bf6 100644 --- a/src/commands/ext/checkout.rs +++ b/src/commands/ext/checkout.rs @@ -555,6 +555,8 @@ mod tests { #[test] fn test_checkout_stamp_requirements() { + use crate::utils::stamps::get_local_arch; + // ext checkout requires: SDK install + ext install (NOT build) // Checkout is for extracting files from the installed sysroot let requirements = [ @@ -562,8 +564,11 @@ mod tests { StampRequirement::ext_install("config-files"), ]; - // Verify correct stamp paths - assert_eq!(requirements[0].relative_path(), "sdk/install.stamp"); + // Verify correct stamp paths (SDK path includes local architecture) + assert_eq!( + requirements[0].relative_path(), + format!("sdk/{}/install.stamp", get_local_arch()) + ); assert_eq!( requirements[1].relative_path(), "ext/config-files/install.stamp" @@ -579,7 +584,9 @@ mod tests { #[test] fn test_checkout_does_not_require_build_stamp() { - use crate::utils::stamps::{validate_stamps_batch, Stamp, StampInputs, StampOutputs}; + use crate::utils::stamps::{ + get_local_arch, validate_stamps_batch, Stamp, StampInputs, StampOutputs, + }; // Checkout only needs SDK install and ext install - NOT ext build let requirements = vec![ @@ -589,7 +596,7 @@ mod tests { // Provide only SDK and ext install stamps (no build) let sdk_stamp = Stamp::sdk_install( - "qemux86-64", + get_local_arch(), StampInputs::new("hash1".to_string()), StampOutputs::default(), ); @@ -604,8 +611,10 @@ mod tests { let install_json = serde_json::to_string(&ext_install).unwrap(); let output = format!( - "sdk/install.stamp:::{}\next/my-ext/install.stamp:::{}", - sdk_json, install_json + "sdk/{}/install.stamp:::{}\next/my-ext/install.stamp:::{}", + get_local_arch(), + sdk_json, + install_json ); let result = validate_stamps_batch(&requirements, &output, None); @@ -617,7 +626,9 @@ mod tests { #[test] fn test_checkout_fails_without_ext_install() { - use crate::utils::stamps::{validate_stamps_batch, Stamp, StampInputs, StampOutputs}; + use crate::utils::stamps::{ + get_local_arch, validate_stamps_batch, Stamp, StampInputs, StampOutputs, + }; let requirements = vec![ StampRequirement::sdk_install(), @@ -626,14 +637,15 @@ mod tests { // Only SDK installed, not the extension let sdk_stamp = Stamp::sdk_install( - "qemux86-64", + get_local_arch(), StampInputs::new("hash1".to_string()), StampOutputs::default(), ); let sdk_json = serde_json::to_string(&sdk_stamp).unwrap(); let output = format!( - "sdk/install.stamp:::{}\next/my-ext/install.stamp:::null", + "sdk/{}/install.stamp:::{}\next/my-ext/install.stamp:::null", + get_local_arch(), sdk_json ); @@ -649,7 +661,9 @@ mod tests { #[test] fn test_checkout_clean_lifecycle() { - use crate::utils::stamps::{validate_stamps_batch, Stamp, StampInputs, StampOutputs}; + use crate::utils::stamps::{ + get_local_arch, validate_stamps_batch, Stamp, StampInputs, StampOutputs, + }; let requirements = vec![ StampRequirement::sdk_install(), @@ -658,7 +672,7 @@ mod tests { // Before clean: both stamps present let sdk_stamp = Stamp::sdk_install( - "qemux86-64", + get_local_arch(), StampInputs::new("hash1".to_string()), StampOutputs::default(), ); @@ -673,8 +687,10 @@ mod tests { let install_json = serde_json::to_string(&ext_install).unwrap(); let output_before = format!( - "sdk/install.stamp:::{}\next/app-config/install.stamp:::{}", - sdk_json, install_json + "sdk/{}/install.stamp:::{}\next/app-config/install.stamp:::{}", + get_local_arch(), + sdk_json, + install_json ); let result_before = validate_stamps_batch(&requirements, &output_before, None); @@ -682,7 +698,8 @@ mod tests { // After ext clean: SDK still there, ext stamp gone let output_after = format!( - "sdk/install.stamp:::{}\next/app-config/install.stamp:::null", + "sdk/{}/install.stamp:::{}\next/app-config/install.stamp:::null", + get_local_arch(), sdk_json ); diff --git a/src/commands/ext/package.rs b/src/commands/ext/package.rs index b84566a..f67fd81 100644 --- a/src/commands/ext/package.rs +++ b/src/commands/ext/package.rs @@ -1218,6 +1218,8 @@ ext: #[test] fn test_package_stamp_requirements() { + use crate::utils::stamps::get_local_arch; + // ext package requires: SDK install + ext install + ext build // Verify the stamp requirements are correct let requirements = [ @@ -1226,8 +1228,11 @@ ext: StampRequirement::ext_build("my-ext"), ]; - // Verify correct stamp paths - assert_eq!(requirements[0].relative_path(), "sdk/install.stamp"); + // Verify correct stamp paths (SDK path includes local architecture) + assert_eq!( + requirements[0].relative_path(), + format!("sdk/{}/install.stamp", get_local_arch()) + ); assert_eq!(requirements[1].relative_path(), "ext/my-ext/install.stamp"); assert_eq!(requirements[2].relative_path(), "ext/my-ext/build.stamp"); @@ -1262,7 +1267,7 @@ ext: #[test] fn test_package_fails_without_sdk_install() { - use crate::utils::stamps::validate_stamps_batch; + use crate::utils::stamps::{get_local_arch, validate_stamps_batch}; let requirements = vec![ StampRequirement::sdk_install(), @@ -1271,8 +1276,11 @@ ext: ]; // All stamps missing - let output = "sdk/install.stamp:::null\next/my-ext/install.stamp:::null\next/my-ext/build.stamp:::null"; - let result = validate_stamps_batch(&requirements, output, None); + let output = format!( + "sdk/{}/install.stamp:::null\next/my-ext/install.stamp:::null\next/my-ext/build.stamp:::null", + get_local_arch() + ); + let result = validate_stamps_batch(&requirements, &output, None); assert!(!result.is_satisfied()); assert_eq!(result.missing.len(), 3); @@ -1280,7 +1288,9 @@ ext: #[test] fn test_package_fails_without_ext_build() { - use crate::utils::stamps::{validate_stamps_batch, Stamp, StampInputs, StampOutputs}; + use crate::utils::stamps::{ + get_local_arch, validate_stamps_batch, Stamp, StampInputs, StampOutputs, + }; let requirements = vec![ StampRequirement::sdk_install(), @@ -1290,7 +1300,7 @@ ext: // SDK and ext install present, but build missing let sdk_stamp = Stamp::sdk_install( - "qemux86-64", + get_local_arch(), StampInputs::new("hash1".to_string()), StampOutputs::default(), ); @@ -1305,8 +1315,10 @@ ext: let ext_json = serde_json::to_string(&ext_install_stamp).unwrap(); let output = format!( - "sdk/install.stamp:::{}\next/my-ext/install.stamp:::{}\next/my-ext/build.stamp:::null", - sdk_json, ext_json + "sdk/{}/install.stamp:::{}\next/my-ext/install.stamp:::{}\next/my-ext/build.stamp:::null", + get_local_arch(), + sdk_json, + ext_json ); let result = validate_stamps_batch(&requirements, &output, None); @@ -1318,7 +1330,9 @@ ext: #[test] fn test_package_succeeds_with_all_stamps() { - use crate::utils::stamps::{validate_stamps_batch, Stamp, StampInputs, StampOutputs}; + use crate::utils::stamps::{ + get_local_arch, validate_stamps_batch, Stamp, StampInputs, StampOutputs, + }; let requirements = vec![ StampRequirement::sdk_install(), @@ -1328,7 +1342,7 @@ ext: // All stamps present let sdk_stamp = Stamp::sdk_install( - "qemux86-64", + get_local_arch(), StampInputs::new("hash1".to_string()), StampOutputs::default(), ); @@ -1350,8 +1364,11 @@ ext: let ext_build_json = serde_json::to_string(&ext_build_stamp).unwrap(); let output = format!( - "sdk/install.stamp:::{}\next/my-ext/install.stamp:::{}\next/my-ext/build.stamp:::{}", - sdk_json, ext_install_json, ext_build_json + "sdk/{}/install.stamp:::{}\next/my-ext/install.stamp:::{}\next/my-ext/build.stamp:::{}", + get_local_arch(), + sdk_json, + ext_install_json, + ext_build_json ); let result = validate_stamps_batch(&requirements, &output, None); @@ -1362,7 +1379,9 @@ ext: #[test] fn test_package_clean_lifecycle() { - use crate::utils::stamps::{validate_stamps_batch, Stamp, StampInputs, StampOutputs}; + use crate::utils::stamps::{ + get_local_arch, validate_stamps_batch, Stamp, StampInputs, StampOutputs, + }; let requirements = vec![ StampRequirement::sdk_install(), @@ -1372,7 +1391,7 @@ ext: // Before clean: all stamps present let sdk_stamp = Stamp::sdk_install( - "qemux86-64", + get_local_arch(), StampInputs::new("hash1".to_string()), StampOutputs::default(), ); @@ -1394,8 +1413,11 @@ ext: let build_json = serde_json::to_string(&ext_build).unwrap(); let output_before = format!( - "sdk/install.stamp:::{}\next/gpu-driver/install.stamp:::{}\next/gpu-driver/build.stamp:::{}", - sdk_json, install_json, build_json + "sdk/{}/install.stamp:::{}\next/gpu-driver/install.stamp:::{}\next/gpu-driver/build.stamp:::{}", + get_local_arch(), + sdk_json, + install_json, + build_json ); let result_before = validate_stamps_batch(&requirements, &output_before, None); @@ -1406,7 +1428,8 @@ ext: // After ext clean: SDK still there, ext stamps gone (simulating rm -rf .stamps/ext/gpu-driver) let output_after = format!( - "sdk/install.stamp:::{}\next/gpu-driver/install.stamp:::null\next/gpu-driver/build.stamp:::null", + "sdk/{}/install.stamp:::{}\next/gpu-driver/install.stamp:::null\next/gpu-driver/build.stamp:::null", + get_local_arch(), sdk_json ); diff --git a/src/commands/hitl/server.rs b/src/commands/hitl/server.rs index fe704a7..aa8c662 100644 --- a/src/commands/hitl/server.rs +++ b/src/commands/hitl/server.rs @@ -424,7 +424,9 @@ mod tests { #[test] fn test_hitl_server_stamp_validation_all_present() { - use crate::utils::stamps::{validate_stamps_batch, Stamp, StampInputs, StampOutputs}; + use crate::utils::stamps::{ + get_local_arch, validate_stamps_batch, Stamp, StampInputs, StampOutputs, + }; let extensions = vec!["gpu-driver".to_string()]; @@ -436,7 +438,7 @@ mod tests { // All stamps present let sdk_stamp = Stamp::sdk_install( - "qemux86-64", + get_local_arch(), StampInputs::new("hash1".to_string()), StampOutputs::default(), ); @@ -458,8 +460,11 @@ mod tests { let build_json = serde_json::to_string(&ext_build).unwrap(); let output = format!( - "sdk/install.stamp:::{}\next/gpu-driver/install.stamp:::{}\next/gpu-driver/build.stamp:::{}", - sdk_json, install_json, build_json + "sdk/{}/install.stamp:::{}\next/gpu-driver/install.stamp:::{}\next/gpu-driver/build.stamp:::{}", + get_local_arch(), + sdk_json, + install_json, + build_json ); let result = validate_stamps_batch(&requirements, &output, None); @@ -468,7 +473,9 @@ mod tests { #[test] fn test_hitl_server_stamp_validation_missing_build() { - use crate::utils::stamps::{validate_stamps_batch, Stamp, StampInputs, StampOutputs}; + use crate::utils::stamps::{ + get_local_arch, validate_stamps_batch, Stamp, StampInputs, StampOutputs, + }; let extensions = vec!["app".to_string()]; @@ -480,7 +487,7 @@ mod tests { // SDK and install present, build missing let sdk_stamp = Stamp::sdk_install( - "qemux86-64", + get_local_arch(), StampInputs::new("hash1".to_string()), StampOutputs::default(), ); @@ -495,8 +502,10 @@ mod tests { let install_json = serde_json::to_string(&ext_install).unwrap(); let output = format!( - "sdk/install.stamp:::{}\next/app/install.stamp:::{}\next/app/build.stamp:::null", - sdk_json, install_json + "sdk/{}/install.stamp:::{}\next/app/install.stamp:::{}\next/app/build.stamp:::null", + get_local_arch(), + sdk_json, + install_json ); let result = validate_stamps_batch(&requirements, &output, None); @@ -507,7 +516,9 @@ mod tests { #[test] fn test_hitl_server_clean_lifecycle() { - use crate::utils::stamps::{validate_stamps_batch, Stamp, StampInputs, StampOutputs}; + use crate::utils::stamps::{ + get_local_arch, validate_stamps_batch, Stamp, StampInputs, StampOutputs, + }; let extensions = vec!["network-driver".to_string()]; @@ -519,7 +530,7 @@ mod tests { // All stamps present before clean let sdk_stamp = Stamp::sdk_install( - "qemux86-64", + get_local_arch(), StampInputs::new("hash1".to_string()), StampOutputs::default(), ); @@ -541,8 +552,11 @@ mod tests { let build_json = serde_json::to_string(&ext_build).unwrap(); let output_before = format!( - "sdk/install.stamp:::{}\next/network-driver/install.stamp:::{}\next/network-driver/build.stamp:::{}", - sdk_json, install_json, build_json + "sdk/{}/install.stamp:::{}\next/network-driver/install.stamp:::{}\next/network-driver/build.stamp:::{}", + get_local_arch(), + sdk_json, + install_json, + build_json ); let result_before = validate_stamps_batch(&requirements, &output_before, None); @@ -550,7 +564,8 @@ mod tests { // After ext clean network-driver: SDK still there, ext stamps gone let output_after = format!( - "sdk/install.stamp:::{}\next/network-driver/install.stamp:::null\next/network-driver/build.stamp:::null", + "sdk/{}/install.stamp:::{}\next/network-driver/install.stamp:::null\next/network-driver/build.stamp:::null", + get_local_arch(), sdk_json ); @@ -565,7 +580,9 @@ mod tests { #[test] fn test_hitl_server_multiple_extensions_partial_clean() { - use crate::utils::stamps::{validate_stamps_batch, Stamp, StampInputs, StampOutputs}; + use crate::utils::stamps::{ + get_local_arch, validate_stamps_batch, Stamp, StampInputs, StampOutputs, + }; let extensions = vec!["ext-a".to_string(), "ext-b".to_string()]; @@ -577,7 +594,7 @@ mod tests { // All stamps present let sdk_stamp = Stamp::sdk_install( - "qemux86-64", + get_local_arch(), StampInputs::new("hash1".to_string()), StampOutputs::default(), ); @@ -615,8 +632,11 @@ mod tests { // After cleaning only ext-a: ext-a stamps return null, ext-b stamps still present let output_partial = format!( - "sdk/install.stamp:::{}\next/ext-a/install.stamp:::null\next/ext-a/build.stamp:::null\next/ext-b/install.stamp:::{}\next/ext-b/build.stamp:::{}", - sdk_json, ext_b_install_json, ext_b_build_json + "sdk/{}/install.stamp:::{}\next/ext-a/install.stamp:::null\next/ext-a/build.stamp:::null\next/ext-b/install.stamp:::{}\next/ext-b/build.stamp:::{}", + get_local_arch(), + sdk_json, + ext_b_install_json, + ext_b_build_json ); let result = validate_stamps_batch(&requirements, &output_partial, None); diff --git a/src/commands/runtime/clean.rs b/src/commands/runtime/clean.rs index cf50b98..e089af6 100644 --- a/src/commands/runtime/clean.rs +++ b/src/commands/runtime/clean.rs @@ -300,7 +300,8 @@ mod tests { #[test] fn test_clean_then_build_requires_reinstall() { use crate::utils::stamps::{ - validate_stamps_batch, Stamp, StampInputs, StampOutputs, StampRequirement, + get_local_arch, validate_stamps_batch, Stamp, StampInputs, StampOutputs, + StampRequirement, }; // Runtime build requirements after cleaning @@ -311,7 +312,7 @@ mod tests { // Before clean: all satisfied let sdk_stamp = Stamp::sdk_install( - "qemux86-64", + get_local_arch(), StampInputs::new("hash1".to_string()), StampOutputs::default(), ); @@ -326,8 +327,10 @@ mod tests { let rt_json = serde_json::to_string(&rt_install).unwrap(); let output_before = format!( - "sdk/install.stamp:::{}\nruntime/my-runtime/install.stamp:::{}", - sdk_json, rt_json + "sdk/{}/install.stamp:::{}\nruntime/my-runtime/install.stamp:::{}", + get_local_arch(), + sdk_json, + rt_json ); let result_before = validate_stamps_batch(&requirements, &output_before, None); @@ -335,7 +338,8 @@ mod tests { // After runtime clean: SDK still there, runtime stamps gone let output_after = format!( - "sdk/install.stamp:::{}\nruntime/my-runtime/install.stamp:::null", + "sdk/{}/install.stamp:::{}\nruntime/my-runtime/install.stamp:::null", + get_local_arch(), sdk_json ); @@ -351,7 +355,8 @@ mod tests { #[test] fn test_runtime_clean_preserves_sdk_and_ext_stamps() { use crate::utils::stamps::{ - validate_stamps_batch, Stamp, StampInputs, StampOutputs, StampRequirement, + get_local_arch, validate_stamps_batch, Stamp, StampInputs, StampOutputs, + StampRequirement, }; // Requirements that span SDK, extensions, and runtime @@ -364,7 +369,7 @@ mod tests { // All present before clean let sdk_stamp = Stamp::sdk_install( - "qemux86-64", + get_local_arch(), StampInputs::new("hash1".to_string()), StampOutputs::default(), ); @@ -396,8 +401,11 @@ mod tests { // After runtime clean: only runtime stamp is gone // SDK and ext stamps should remain let output_after = format!( - "sdk/install.stamp:::{}\next/my-ext/install.stamp:::{}\next/my-ext/build.stamp:::{}\nruntime/my-runtime/install.stamp:::null", - sdk_json, ext_install_json, ext_build_json + "sdk/{}/install.stamp:::{}\next/my-ext/install.stamp:::{}\next/my-ext/build.stamp:::{}\nruntime/my-runtime/install.stamp:::null", + get_local_arch(), + sdk_json, + ext_install_json, + ext_build_json ); let result = validate_stamps_batch(&requirements, &output_after, None); diff --git a/src/commands/runtime/provision.rs b/src/commands/runtime/provision.rs index 519dd3c..bb46edf 100644 --- a/src/commands/runtime/provision.rs +++ b/src/commands/runtime/provision.rs @@ -4,9 +4,11 @@ use crate::utils::{ config::load_config, container::{RunConfig, SdkContainer}, output::{print_info, print_success, OutputLevel}, + remote::{RemoteHost, SshClient}, stamps::{ - generate_batch_read_stamps_script, generate_write_stamp_script, resolve_required_stamps, - validate_stamps_batch, Stamp, StampCommand, StampComponent, StampInputs, StampOutputs, + generate_batch_read_stamps_script, generate_write_stamp_script, + resolve_required_stamps_for_arch, validate_stamps_batch, Stamp, StampCommand, + StampComponent, StampInputs, StampOutputs, }, target::resolve_target_required, volume::VolumeManager, @@ -87,17 +89,38 @@ impl RuntimeProvisionCommand { // Resolve target architecture let target_arch = resolve_target_required(self.config.target.as_deref(), &config)?; + // Detect remote host architecture if using --runs-on + // This is needed to check if the SDK is installed for the remote's architecture + let remote_arch = if let Some(ref runs_on) = self.config.runs_on { + let remote_host = RemoteHost::parse(runs_on)?; + let ssh = SshClient::new(remote_host).with_verbose(self.config.verbose); + let arch = ssh.get_architecture().await.with_context(|| { + format!("Failed to detect architecture of remote host '{}'", runs_on) + })?; + if self.config.verbose { + print_info( + &format!("Remote host architecture: {}", arch), + OutputLevel::Normal, + ); + } + Some(arch) + } else { + None + }; + // Validate stamps before proceeding (unless --no-stamps) if !self.config.no_stamps { let container_helper = SdkContainer::from_config(&self.config.config_path, &config)? .verbose(self.config.verbose); // Provision requires runtime build stamp - let required = resolve_required_stamps( + // When using --runs-on, check for SDK stamp matching remote's architecture + let required = resolve_required_stamps_for_arch( StampCommand::Provision, StampComponent::Runtime, Some(&self.config.runtime_name), &[], + remote_arch.as_deref(), ); // Batch all stamp reads into a single container invocation for performance @@ -134,10 +157,11 @@ impl RuntimeProvisionCommand { let validation = validate_stamps_batch(&required, output_str, None); if !validation.is_satisfied() { - let error = validation.into_error(&format!( - "Cannot provision runtime '{}'", - self.config.runtime_name - )); + // Include the --runs-on target in error message for SDK install hints + let error = validation.into_error_with_runs_on( + &format!("Cannot provision runtime '{}'", self.config.runtime_name), + self.config.runs_on.as_deref(), + ); return Err(error.into()); } } diff --git a/src/commands/sdk/compile.rs b/src/commands/sdk/compile.rs index d10af36..73ba11f 100644 --- a/src/commands/sdk/compile.rs +++ b/src/commands/sdk/compile.rs @@ -427,13 +427,16 @@ dependencies = { gcc = "*" } #[test] fn test_compile_stamp_requirements() { - use crate::utils::stamps::StampRequirement; + use crate::utils::stamps::{get_local_arch, StampRequirement}; // sdk compile requires only: SDK install let requirements = [StampRequirement::sdk_install()]; - // Verify correct stamp path - assert_eq!(requirements[0].relative_path(), "sdk/install.stamp"); + // Verify correct stamp path (now includes local architecture) + assert_eq!( + requirements[0].relative_path(), + format!("sdk/{}/install.stamp", get_local_arch()) + ); // Verify fix command is correct assert_eq!(requirements[0].fix_command(), "avocado sdk install"); @@ -454,13 +457,13 @@ dependencies = { gcc = "*" } #[test] fn test_compile_fails_without_sdk_install() { - use crate::utils::stamps::{validate_stamps_batch, StampRequirement}; + use crate::utils::stamps::{get_local_arch, validate_stamps_batch, StampRequirement}; let requirements = vec![StampRequirement::sdk_install()]; // SDK stamp missing - let output = "sdk/install.stamp:::null"; - let result = validate_stamps_batch(&requirements, output, None); + let output = format!("sdk/{}/install.stamp:::null", get_local_arch()); + let result = validate_stamps_batch(&requirements, &output, None); assert!(!result.is_satisfied()); assert_eq!(result.missing.len(), 1); @@ -470,19 +473,21 @@ dependencies = { gcc = "*" } #[test] fn test_compile_succeeds_with_sdk_install() { use crate::utils::stamps::{ - validate_stamps_batch, Stamp, StampInputs, StampOutputs, StampRequirement, + get_local_arch, validate_stamps_batch, Stamp, StampInputs, StampOutputs, + StampRequirement, }; let requirements = vec![StampRequirement::sdk_install()]; + // SDK stamp now uses host architecture let sdk_stamp = Stamp::sdk_install( - "qemux86-64", + get_local_arch(), StampInputs::new("hash1".to_string()), StampOutputs::default(), ); let sdk_json = serde_json::to_string(&sdk_stamp).unwrap(); - let output = format!("sdk/install.stamp:::{}", sdk_json); + let output = format!("sdk/{}/install.stamp:::{}", get_local_arch(), sdk_json); let result = validate_stamps_batch(&requirements, &output, None); assert!(result.is_satisfied()); @@ -492,26 +497,27 @@ dependencies = { gcc = "*" } #[test] fn test_compile_clean_lifecycle() { use crate::utils::stamps::{ - validate_stamps_batch, Stamp, StampInputs, StampOutputs, StampRequirement, + get_local_arch, validate_stamps_batch, Stamp, StampInputs, StampOutputs, + StampRequirement, }; let requirements = vec![StampRequirement::sdk_install()]; // Before clean: SDK stamp present let sdk_stamp = Stamp::sdk_install( - "qemux86-64", + get_local_arch(), StampInputs::new("hash1".to_string()), StampOutputs::default(), ); let sdk_json = serde_json::to_string(&sdk_stamp).unwrap(); - let output_before = format!("sdk/install.stamp:::{}", sdk_json); + let output_before = format!("sdk/{}/install.stamp:::{}", get_local_arch(), sdk_json); let result_before = validate_stamps_batch(&requirements, &output_before, None); assert!(result_before.is_satisfied(), "Should pass before clean"); // After clean --stamps: SDK stamp gone (simulating rm -rf .stamps/) - let output_after = "sdk/install.stamp:::null"; - let result_after = validate_stamps_batch(&requirements, output_after, None); + let output_after = format!("sdk/{}/install.stamp:::null", get_local_arch()); + let result_after = validate_stamps_batch(&requirements, &output_after, None); assert!( !result_after.is_satisfied(), "Should fail after clean --stamps" diff --git a/src/commands/sdk/install.rs b/src/commands/sdk/install.rs index d2d187f..24d6eab 100644 --- a/src/commands/sdk/install.rs +++ b/src/commands/sdk/install.rs @@ -9,7 +9,9 @@ use crate::utils::{ container::{RunConfig, SdkContainer}, lockfile::{build_package_spec_with_lock, LockFile, SysrootType}, output::{print_info, print_success, OutputLevel}, - stamps::{compute_sdk_input_hash, generate_write_stamp_script, Stamp, StampOutputs}, + stamps::{ + compute_sdk_input_hash, generate_write_stamp_script, get_local_arch, Stamp, StampOutputs, + }, target::validate_and_log_target, }; @@ -844,10 +846,14 @@ $DNF_SDK_HOST $DNF_NO_SCRIPTS $DNF_SDK_TARGET_REPO_CONF \ } // Write SDK install stamp (unless --no-stamps) + // The stamp uses the host architecture (CPU arch where SDK runs) rather than + // the target architecture (what you're building for). This allows --runs-on + // to detect if the SDK is installed for the remote's architecture. if !self.no_stamps { let inputs = compute_sdk_input_hash(&composed.merged_value)?; let outputs = StampOutputs::default(); - let stamp = Stamp::sdk_install(&target, inputs, outputs); + let host_arch = get_local_arch(); + let stamp = Stamp::sdk_install(host_arch, inputs, outputs); let stamp_script = generate_write_stamp_script(&stamp)?; let run_config = RunConfig { diff --git a/src/utils/remote.rs b/src/utils/remote.rs index 0cd6acd..4024e00 100644 --- a/src/utils/remote.rs +++ b/src/utils/remote.rs @@ -301,6 +301,58 @@ impl SshClient { pub fn remote(&self) -> &RemoteHost { &self.remote } + + /// Get the CPU architecture of the remote host + /// + /// Returns the architecture string from `uname -m` (e.g., "x86_64", "aarch64"). + /// This is used to verify that the SDK installed locally is compatible with the + /// remote host's architecture when using `--runs-on`. + pub async fn get_architecture(&self) -> Result { + if self.verbose { + print_info( + &format!( + "Checking CPU architecture on {}...", + self.remote.ssh_target() + ), + OutputLevel::Normal, + ); + } + + let output = AsyncCommand::new("ssh") + .args([ + "-o", + "BatchMode=yes", + "-o", + "ConnectTimeout=10", + "-o", + "StrictHostKeyChecking=accept-new", + &self.remote.ssh_target(), + "uname -m", + ]) + .output() + .await + .context("Failed to get remote architecture")?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + anyhow::bail!( + "Failed to get architecture from '{}': {}", + self.remote.ssh_target(), + stderr.trim() + ); + } + + let arch = String::from_utf8_lossy(&output.stdout).trim().to_string(); + + if self.verbose { + print_info( + &format!("Remote architecture: {}", arch), + OutputLevel::Normal, + ); + } + + Ok(arch) + } } /// Manager for creating and removing NFS-backed Docker volumes on remote hosts diff --git a/src/utils/stamps.rs b/src/utils/stamps.rs index 443f154..4d21a37 100644 --- a/src/utils/stamps.rs +++ b/src/utils/stamps.rs @@ -13,6 +13,25 @@ use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; use std::fmt; +/// Get the local machine's CPU architecture +/// +/// Returns the architecture string (e.g., "x86_64", "aarch64") for the current machine. +/// This is used to track which host architecture the SDK was installed for. +pub fn get_local_arch() -> &'static str { + #[cfg(target_arch = "x86_64")] + { + "x86_64" + } + #[cfg(target_arch = "aarch64")] + { + "aarch64" + } + #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))] + { + std::env::consts::ARCH + } +} + /// Current stamp format version pub const STAMP_VERSION: u32 = 1; @@ -270,9 +289,12 @@ impl Stamp { } /// Get the stamp file path relative to $AVOCADO_PREFIX/.stamps/ + /// + /// For SDK stamps, the path includes the target architecture (which represents + /// the host architecture where the SDK runs) to support --runs-on with different architectures. pub fn relative_path(&self) -> String { match (&self.component, &self.component_name) { - (StampComponent::Sdk, _) => format!("sdk/{}.stamp", self.command), + (StampComponent::Sdk, _) => format!("sdk/{}/{}.stamp", self.target, self.command), (StampComponent::Extension, Some(name)) => { format!("ext/{}/{}.stamp", name, self.command) } @@ -320,6 +342,11 @@ pub struct StampRequirement { pub command: StampCommand, pub component: StampComponent, pub component_name: Option, + /// Host architecture for SDK stamps (e.g., "x86_64", "aarch64"). + /// This tracks the CPU architecture of the machine running the SDK container, + /// which is different from the target architecture (what you're building FOR). + /// Required for SDK stamps to support --runs-on with different architectures. + pub host_arch: Option, } impl StampRequirement { @@ -328,12 +355,26 @@ impl StampRequirement { command, component, component_name: name.map(|s| s.to_string()), + host_arch: None, } } - /// SDK install requirement + /// SDK install requirement for the local host architecture pub fn sdk_install() -> Self { - Self::new(StampCommand::Install, StampComponent::Sdk, None) + Self::sdk_install_for_arch(get_local_arch()) + } + + /// SDK install requirement for a specific host architecture + /// + /// Use this when checking SDK stamps for --runs-on with a remote host + /// that may have a different architecture than the local machine. + pub fn sdk_install_for_arch(arch: &str) -> Self { + Self { + command: StampCommand::Install, + component: StampComponent::Sdk, + component_name: None, + host_arch: Some(arch.to_string()), + } } /// Extension install requirement @@ -374,13 +415,22 @@ impl StampRequirement { } /// Get the stamp file path relative to $AVOCADO_PREFIX/.stamps/ + /// + /// For SDK stamps, the path includes the host architecture to support + /// running on remotes with different CPU architectures via --runs-on. pub fn relative_path(&self) -> String { - match (&self.component, &self.component_name) { - (StampComponent::Sdk, _) => format!("sdk/{}.stamp", self.command), - (StampComponent::Extension, Some(name)) => { + match (&self.component, &self.component_name, &self.host_arch) { + (StampComponent::Sdk, _, Some(arch)) => { + format!("sdk/{}/{}.stamp", arch, self.command) + } + (StampComponent::Sdk, _, None) => { + // Fallback for SDK without explicit arch (use local arch) + format!("sdk/{}/{}.stamp", get_local_arch(), self.command) + } + (StampComponent::Extension, Some(name), _) => { format!("ext/{}/{}.stamp", name, self.command) } - (StampComponent::Runtime, Some(name)) => { + (StampComponent::Runtime, Some(name), _) => { format!("runtime/{}/{}.stamp", name, self.command) } _ => panic!("Component name required for Extension and Runtime"), @@ -389,12 +439,15 @@ impl StampRequirement { /// Human-readable description pub fn description(&self) -> String { - match (&self.component, &self.component_name) { - (StampComponent::Sdk, _) => format!("SDK {}", self.command), - (StampComponent::Extension, Some(name)) => { + match (&self.component, &self.component_name, &self.host_arch) { + (StampComponent::Sdk, _, Some(arch)) => { + format!("SDK {} ({})", self.command, arch) + } + (StampComponent::Sdk, _, None) => format!("SDK {}", self.command), + (StampComponent::Extension, Some(name), _) => { format!("extension '{}' {}", name, self.command) } - (StampComponent::Runtime, Some(name)) => { + (StampComponent::Runtime, Some(name), _) => { format!("runtime '{}' {}", name, self.command) } _ => format!("{} {}", self.component, self.command), @@ -402,9 +455,21 @@ impl StampRequirement { } /// Suggested fix command + /// + /// For SDK stamps with a specific host architecture (from --runs-on), the fix + /// command will suggest running on the remote to install the SDK for that arch. + #[allow(dead_code)] pub fn fix_command(&self) -> String { + self.fix_command_with_remote(None) + } + + /// Suggested fix command with optional remote host for --runs-on + pub fn fix_command_with_remote(&self, runs_on: Option<&str>) -> String { match (&self.component, &self.component_name, &self.command) { - (StampComponent::Sdk, _, StampCommand::Install) => "avocado sdk install".to_string(), + (StampComponent::Sdk, _, StampCommand::Install) => match runs_on { + Some(remote) => format!("avocado sdk install --runs-on {}", remote), + None => "avocado sdk install".to_string(), + }, (StampComponent::Extension, Some(name), StampCommand::Install) => { format!("avocado ext install -e {}", name) } @@ -490,12 +555,23 @@ impl StampValidationResult { self.stale.push((req, reason)); } + /// Convert to an error with actionable messages /// Convert to an error with actionable messages pub fn into_error(self, context: &str) -> StampValidationError { + self.into_error_with_runs_on(context, None) + } + + /// Convert to an error with actionable messages, including --runs-on hint + pub fn into_error_with_runs_on( + self, + context: &str, + runs_on: Option<&str>, + ) -> StampValidationError { StampValidationError { context: context.to_string(), missing: self.missing, stale: self.stale, + runs_on: runs_on.map(|s| s.to_string()), } } } @@ -506,6 +582,8 @@ pub struct StampValidationError { pub context: String, pub missing: Vec, pub stale: Vec<(StampRequirement, String)>, + /// Remote host if using --runs-on (for fix command suggestions) + pub runs_on: Option, } impl std::error::Error for StampValidationError {} @@ -538,12 +616,13 @@ impl fmt::Display for StampValidationError { writeln!(f, "To fix:")?; - // Collect unique fix commands + // Collect unique fix commands, using runs_on hint for SDK install commands + let runs_on_ref = self.runs_on.as_deref(); let mut fixes: Vec = self .missing .iter() .chain(self.stale.iter().map(|(req, _)| req)) - .map(|req| req.fix_command()) + .map(|req| req.fix_command_with_remote(runs_on_ref)) .collect(); fixes.sort(); fixes.dedup(); @@ -773,34 +852,54 @@ pub fn resolve_required_stamps( component_name: Option<&str>, ext_dependencies: &[String], ) -> Vec { + resolve_required_stamps_for_arch(cmd, component, component_name, ext_dependencies, None) +} + +/// Resolve required stamps with a specific host architecture for SDK stamps +/// +/// Use this when using `--runs-on` with a remote host that may have a different +/// CPU architecture than the local machine. The `host_arch` parameter specifies +/// the architecture of the remote host (e.g., "aarch64", "x86_64"). +/// +/// When `host_arch` is None, the local machine's architecture is used. +pub fn resolve_required_stamps_for_arch( + cmd: StampCommand, + component: StampComponent, + component_name: Option<&str>, + ext_dependencies: &[String], + host_arch: Option<&str>, +) -> Vec { + // Helper to create SDK install requirement with the correct arch + let sdk_install = || match host_arch { + Some(arch) => StampRequirement::sdk_install_for_arch(arch), + None => StampRequirement::sdk_install(), + }; + match (cmd, component) { // SDK install has no dependencies (StampCommand::Install, StampComponent::Sdk) => vec![], // Extension install requires SDK install (StampCommand::Install, StampComponent::Extension) => { - vec![StampRequirement::sdk_install()] + vec![sdk_install()] } // Runtime install requires SDK install (StampCommand::Install, StampComponent::Runtime) => { - vec![StampRequirement::sdk_install()] + vec![sdk_install()] } // Extension build requires SDK install + own extension install (StampCommand::Build, StampComponent::Extension) => { let ext_name = component_name.expect("Extension name required"); - vec![ - StampRequirement::sdk_install(), - StampRequirement::ext_install(ext_name), - ] + vec![sdk_install(), StampRequirement::ext_install(ext_name)] } // Extension image requires SDK install + own extension install + own extension build (StampCommand::Image, StampComponent::Extension) => { let ext_name = component_name.expect("Extension name required"); vec![ - StampRequirement::sdk_install(), + sdk_install(), StampRequirement::ext_install(ext_name), StampRequirement::ext_build(ext_name), ] @@ -811,7 +910,7 @@ pub fn resolve_required_stamps( (StampCommand::Build, StampComponent::Runtime) => { let runtime_name = component_name.expect("Runtime name required"); let mut reqs = vec![ - StampRequirement::sdk_install(), + sdk_install(), StampRequirement::runtime_install(runtime_name), ]; @@ -853,10 +952,24 @@ pub fn resolve_required_stamps_for_runtime_build( runtime_name: &str, ext_dependencies: &[RuntimeExtDep], ) -> Vec { - let mut reqs = vec![ - StampRequirement::sdk_install(), - StampRequirement::runtime_install(runtime_name), - ]; + resolve_required_stamps_for_runtime_build_with_arch(runtime_name, ext_dependencies, None) +} + +/// Resolve required stamps for runtime build with a specific host architecture +/// +/// Use this when using `--runs-on` with a remote host that may have a different +/// CPU architecture than the local machine. +pub fn resolve_required_stamps_for_runtime_build_with_arch( + runtime_name: &str, + ext_dependencies: &[RuntimeExtDep], + host_arch: Option<&str>, +) -> Vec { + let sdk_install = match host_arch { + Some(arch) => StampRequirement::sdk_install_for_arch(arch), + None => StampRequirement::sdk_install(), + }; + + let mut reqs = vec![sdk_install, StampRequirement::runtime_install(runtime_name)]; for ext_dep in ext_dependencies { let ext_name = ext_dep.name(); @@ -967,8 +1080,12 @@ mod tests { let inputs = StampInputs::new("sha256:abc123".to_string()); let outputs = StampOutputs::default(); - let sdk_stamp = Stamp::sdk_install("qemux86-64", inputs.clone(), outputs.clone()); - assert_eq!(sdk_stamp.relative_path(), "sdk/install.stamp"); + // SDK stamps now include the host architecture in the path + let sdk_stamp = Stamp::sdk_install("x86_64", inputs.clone(), outputs.clone()); + assert_eq!(sdk_stamp.relative_path(), "sdk/x86_64/install.stamp"); + + let sdk_stamp_arm = Stamp::sdk_install("aarch64", inputs.clone(), outputs.clone()); + assert_eq!(sdk_stamp_arm.relative_path(), "sdk/aarch64/install.stamp"); let ext_stamp = Stamp::ext_install("my-ext", "qemux86-64", inputs.clone(), outputs.clone()); assert_eq!(ext_stamp.relative_path(), "ext/my-ext/install.stamp"); @@ -983,7 +1100,11 @@ mod tests { #[test] fn test_stamp_requirement_description() { let req = StampRequirement::sdk_install(); - assert_eq!(req.description(), "SDK install"); + // SDK description now includes architecture + assert_eq!( + req.description(), + format!("SDK install ({})", get_local_arch()) + ); assert_eq!(req.fix_command(), "avocado sdk install"); let req = StampRequirement::ext_install("gpu-driver"); @@ -1184,7 +1305,8 @@ mod tests { // Check error message contains key elements assert!(error_str.contains("Cannot build runtime 'my-runtime'")); assert!(error_str.contains("Missing steps:")); - assert!(error_str.contains("sdk/install.stamp")); + // SDK stamp path now includes local architecture + assert!(error_str.contains(&format!("sdk/{}/install.stamp", get_local_arch()))); assert!(error_str.contains("ext/gpu-driver/install.stamp")); assert!(error_str.contains("Stale steps")); assert!(error_str.contains("config changed")); @@ -1494,8 +1616,8 @@ mod tests { let script = generate_batch_read_stamps_script(&requirements); - // Should contain all three stamp paths - assert!(script.contains("sdk/install.stamp")); + // Should contain all three stamp paths (SDK path includes local arch) + assert!(script.contains(&format!("sdk/{}/install.stamp", get_local_arch()))); assert!(script.contains("ext/my-ext/install.stamp")); assert!(script.contains("ext/my-ext/build.stamp")); @@ -1509,14 +1631,21 @@ mod tests { #[test] fn test_parse_batch_stamps_output() { - let output = r#"sdk/install.stamp:::{"version":"1.0.0","command":"install","component":"sdk"} -ext/my-ext/install.stamp:::{"version":"1.0.0","command":"install","component":"ext"} -ext/my-ext/build.stamp:::null"#; + let arch = get_local_arch(); + let output = format!( + r#"sdk/{}/install.stamp:::{{"version":"1.0.0","command":"install","component":"sdk"}} +ext/my-ext/install.stamp:::{{"version":"1.0.0","command":"install","component":"ext"}} +ext/my-ext/build.stamp:::null"#, + arch + ); - let result = parse_batch_stamps_output(output); + let result = parse_batch_stamps_output(&output); assert_eq!(result.len(), 3); - assert!(result.get("sdk/install.stamp").unwrap().is_some()); + assert!(result + .get(&format!("sdk/{}/install.stamp", arch)) + .unwrap() + .is_some()); assert!(result.get("ext/my-ext/install.stamp").unwrap().is_some()); assert!(result.get("ext/my-ext/build.stamp").unwrap().is_none()); } @@ -1546,8 +1675,10 @@ ext/my-ext/build.stamp:::null"#; let ext_json = serde_json::to_string(&ext_stamp).unwrap(); let output = format!( - "sdk/install.stamp:::{}\next/my-ext/install.stamp:::{}", - sdk_json, ext_json + "sdk/{}/install.stamp:::{}\next/my-ext/install.stamp:::{}", + get_local_arch(), + sdk_json, + ext_json ); let result = validate_stamps_batch(&requirements, &output, None); @@ -1575,7 +1706,8 @@ ext/my-ext/build.stamp:::null"#; let sdk_json = serde_json::to_string(&sdk_stamp).unwrap(); let output = format!( - "sdk/install.stamp:::{}\next/my-ext/install.stamp:::null\next/my-ext/build.stamp:::null", + "sdk/{}/install.stamp:::{}\next/my-ext/install.stamp:::null\next/my-ext/build.stamp:::null", + get_local_arch(), sdk_json ); @@ -1621,8 +1753,11 @@ ext/my-ext/build.stamp:::null"#; assert_eq!(reqs[1].fix_command(), "avocado ext install -e my-ext"); assert_eq!(reqs[2].fix_command(), "avocado ext build -e my-ext"); - // Verify descriptions are helpful - assert_eq!(reqs[0].description(), "SDK install"); + // Verify descriptions are helpful (SDK now includes architecture) + assert_eq!( + reqs[0].description(), + format!("SDK install ({})", get_local_arch()) + ); assert_eq!(reqs[1].description(), "extension 'my-ext' install"); assert_eq!(reqs[2].description(), "extension 'my-ext' build"); } @@ -1649,7 +1784,10 @@ ext/my-ext/build.stamp:::null"#; assert_eq!(reqs.len(), 1); assert_eq!(reqs[0].fix_command(), "avocado sdk install"); - assert_eq!(reqs[0].relative_path(), "sdk/install.stamp"); + assert_eq!( + reqs[0].relative_path(), + format!("sdk/{}/install.stamp", get_local_arch()) + ); } #[test] @@ -1665,8 +1803,11 @@ ext/my-ext/build.stamp:::null"#; // Total: 1 SDK + 2 per extension = 5 assert_eq!(reqs.len(), 5); - // Verify all paths are correct - assert_eq!(reqs[0].relative_path(), "sdk/install.stamp"); + // Verify all paths are correct (SDK path includes local arch) + assert_eq!( + reqs[0].relative_path(), + format!("sdk/{}/install.stamp", get_local_arch()) + ); assert_eq!(reqs[1].relative_path(), "ext/ext-a/install.stamp"); assert_eq!(reqs[2].relative_path(), "ext/ext-a/build.stamp"); assert_eq!(reqs[3].relative_path(), "ext/ext-b/install.stamp"); @@ -1754,19 +1895,22 @@ ext/my-ext/build.stamp:::null"#; #[test] fn test_sdk_clean_stamp_path_matches_sdk_install() { - // SDK clean should remove stamps at sdk/ + // SDK clean should remove stamps at sdk/{arch}/ let install_stamp = StampRequirement::sdk_install(); - assert_eq!(install_stamp.relative_path(), "sdk/install.stamp"); + assert_eq!( + install_stamp.relative_path(), + format!("sdk/{}/install.stamp", get_local_arch()) + ); - // Clean removes: rm -rf "$AVOCADO_PREFIX/.stamps/sdk" + // Clean removes: rm -rf "$AVOCADO_PREFIX/.stamps/sdk/{arch}" let path = install_stamp.relative_path(); let parent = std::path::Path::new(&path) .parent() .unwrap() .to_str() .unwrap(); - assert_eq!(parent, "sdk"); + assert_eq!(parent, format!("sdk/{}", get_local_arch())); } #[test] @@ -1797,15 +1941,18 @@ ext/my-ext/build.stamp:::null"#; // Before clean: all satisfied let output_before = format!( - "sdk/install.stamp:::{}\next/my-ext/install.stamp:::{}", - sdk_json, ext_json + "sdk/{}/install.stamp:::{}\next/my-ext/install.stamp:::{}", + get_local_arch(), + sdk_json, + ext_json ); let result_before = validate_stamps_batch(&requirements, &output_before, None); assert!(result_before.is_satisfied()); // After ext clean: SDK still there, ext stamps gone let output_after_ext_clean = format!( - "sdk/install.stamp:::{}\next/my-ext/install.stamp:::null", + "sdk/{}/install.stamp:::{}\next/my-ext/install.stamp:::null", + get_local_arch(), sdk_json ); let result_after = validate_stamps_batch(&requirements, &output_after_ext_clean, None); @@ -1829,13 +1976,16 @@ ext/my-ext/build.stamp:::null"#; ]; // After clean --stamps: all stamps return null - let output = r#"sdk/install.stamp:::null + let output = format!( + r#"sdk/{}/install.stamp:::null ext/ext-a/install.stamp:::null ext/ext-a/build.stamp:::null runtime/my-runtime/install.stamp:::null -runtime/my-runtime/build.stamp:::null"#; +runtime/my-runtime/build.stamp:::null"#, + get_local_arch() + ); - let result = validate_stamps_batch(&requirements, output, None); + let result = validate_stamps_batch(&requirements, &output, None); assert!(!result.is_satisfied()); assert!(result.satisfied.is_empty()); @@ -1899,8 +2049,10 @@ runtime/my-runtime/build.stamp:::null"#; ]; let output = format!( - "sdk/install.stamp:::{}\next/my-ext/install.stamp:::{}", - sdk_json, ext_json + "sdk/{}/install.stamp:::{}\next/my-ext/install.stamp:::{}", + get_local_arch(), + sdk_json, + ext_json ); // With changed inputs (simulating config change) @@ -1949,4 +2101,104 @@ runtime/my-runtime/build.stamp:::null"#; assert!(msg.contains("Stale steps")); assert!(msg.contains("config hash changed")); } + + // ======================================================================== + // Architecture-Specific SDK Stamp Tests + // ======================================================================== + + #[test] + fn test_sdk_install_stamp_uses_host_architecture() { + // SDK stamps now use the host architecture in the path + let local_arch = get_local_arch(); + + let req = StampRequirement::sdk_install(); + assert_eq!(req.host_arch, Some(local_arch.to_string())); + assert_eq!( + req.relative_path(), + format!("sdk/{}/install.stamp", local_arch) + ); + } + + #[test] + fn test_sdk_install_for_specific_architecture() { + // Test creating SDK stamp requirement for a specific architecture + let req_x86 = StampRequirement::sdk_install_for_arch("x86_64"); + assert_eq!(req_x86.host_arch, Some("x86_64".to_string())); + assert_eq!(req_x86.relative_path(), "sdk/x86_64/install.stamp"); + + let req_arm = StampRequirement::sdk_install_for_arch("aarch64"); + assert_eq!(req_arm.host_arch, Some("aarch64".to_string())); + assert_eq!(req_arm.relative_path(), "sdk/aarch64/install.stamp"); + } + + #[test] + fn test_sdk_stamps_for_different_architectures_are_distinct() { + // Stamps for different architectures should have different paths + let req_x86 = StampRequirement::sdk_install_for_arch("x86_64"); + let req_arm = StampRequirement::sdk_install_for_arch("aarch64"); + + assert_ne!(req_x86.relative_path(), req_arm.relative_path()); + assert_ne!(req_x86, req_arm); + } + + #[test] + fn test_resolve_required_stamps_for_arch() { + // Resolving stamps for a specific architecture + // Runtime build (which provision depends on) requires SDK install + let reqs = resolve_required_stamps_for_arch( + StampCommand::Build, + StampComponent::Runtime, + Some("my-runtime"), + &[], + Some("aarch64"), + ); + + // Should include SDK stamp for aarch64 (runtime build requires SDK) + assert!(reqs + .iter() + .any(|r| r.relative_path() == "sdk/aarch64/install.stamp")); + } + + #[test] + fn test_sdk_description_includes_architecture() { + let req = StampRequirement::sdk_install_for_arch("aarch64"); + assert!(req.description().contains("aarch64")); + } + + #[test] + fn test_fix_command_with_runs_on() { + let req = StampRequirement::sdk_install_for_arch("aarch64"); + + // Without runs-on, should suggest regular install + assert_eq!(req.fix_command(), "avocado sdk install"); + + // With runs-on, should suggest install on the remote + assert_eq!( + req.fix_command_with_remote(Some("user@remote")), + "avocado sdk install --runs-on user@remote" + ); + } + + #[test] + fn test_validation_error_includes_runs_on_hint() { + let mut result = StampValidationResult::new(); + result.add_missing(StampRequirement::sdk_install_for_arch("aarch64")); + + // Without runs_on, fix should be regular install + let error = result.into_error("Cannot provision"); + let msg = error.to_string(); + assert!(msg.contains("avocado sdk install")); + assert!(!msg.contains("--runs-on")); + } + + #[test] + fn test_validation_error_with_runs_on_includes_remote_in_fix() { + let mut result = StampValidationResult::new(); + result.add_missing(StampRequirement::sdk_install_for_arch("aarch64")); + + // With runs_on, fix should include the remote + let error = result.into_error_with_runs_on("Cannot provision", Some("user@remote")); + let msg = error.to_string(); + assert!(msg.contains("avocado sdk install --runs-on user@remote")); + } } From 2936ccf7deba384e2313ce38c4663cc48e88125b Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Mon, 29 Dec 2025 08:30:35 -0500 Subject: [PATCH 11/20] update hitl directory paths --- src/commands/hitl/server.rs | 31 +++++++++++++++---------------- src/main.rs | 4 ++-- 2 files changed, 17 insertions(+), 18 deletions(-) diff --git a/src/commands/hitl/server.rs b/src/commands/hitl/server.rs index aa8c662..6906a10 100644 --- a/src/commands/hitl/server.rs +++ b/src/commands/hitl/server.rs @@ -140,7 +140,7 @@ impl HitlServerCommand { } // Generate NFS export setup commands - let export_setup = self.generate_export_setup_commands(&target); + let export_setup = self.generate_export_setup_commands(); // Create the command to set up netconfig symlink, ganesha symlink, exports, and start HITL server let setup_command = format!( @@ -186,22 +186,21 @@ impl HitlServerCommand { } /// Generate shell commands to create NFS export configuration files - fn generate_export_setup_commands(&self, target: &str) -> String { + fn generate_export_setup_commands(&self) -> String { let mut commands = vec![ "mkdir -p ${AVOCADO_SDK_PREFIX}/etc/avocado/exports.d".to_string(), "mkdir -p ${AVOCADO_SDK_PREFIX}/etc/avocado".to_string(), ]; // Add/update the hitl-nfs.conf file with the exports.d directory directive - let exports_dir_line = format!("%dir /opt/_avocado/{target}/sdk/etc/avocado/exports.d"); - let config_file = "${AVOCADO_SDK_PREFIX}/etc/avocado/hitl-nfs.conf".to_string(); + let config_file = "${AVOCADO_SDK_PREFIX}/etc/avocado/hitl-nfs.conf"; - // Check if the line exists, if not add it + // Remove any existing %dir line (may have old unexpanded variable) and add correct one + // Use double quotes so shell expands ${AVOCADO_SDK_PREFIX} when writing let update_config_cmd = format!( "touch {config_file} && \ - if ! grep -q '^%dir /opt/_avocado/{target}/sdk/etc/avocado/exports.d$' {config_file}; then \ - echo '{exports_dir_line}' >> {config_file}; \ - fi" + sed -i '/^%dir .*\\/etc\\/avocado\\/exports\\.d$/d' {config_file} && \ + echo \"%dir ${{AVOCADO_SDK_PREFIX}}/etc/avocado/exports.d\" >> {config_file}" ); commands.push(update_config_cmd); @@ -227,7 +226,7 @@ impl HitlServerCommand { // Use shared NfsExport to generate export configurations for (index, extension) in self.extensions.iter().enumerate() { let export_id = (index + 1) as u32; - let extensions_path = format!("/opt/_avocado/{target}/extensions/{extension}"); + let extensions_path = format!("${{AVOCADO_EXT_SYSROOTS}}/{extension}"); let pseudo_path = format!("/{extension}"); // Create NfsExport using the shared type @@ -290,11 +289,11 @@ mod tests { no_stamps: false, }; - let commands = cmd.generate_export_setup_commands("x86_64"); + let commands = cmd.generate_export_setup_commands(); // Should create directories and exports.d directive but no port update assert!(commands.contains("mkdir -p ${AVOCADO_SDK_PREFIX}/etc/avocado/exports.d")); - assert!(commands.contains("%dir /opt/_avocado/x86_64/sdk/etc/avocado/exports.d")); + assert!(commands.contains("echo \"%dir ${AVOCADO_SDK_PREFIX}/etc/avocado/exports.d\"")); assert!(!commands.contains("NFS_Port =")); } @@ -311,7 +310,7 @@ mod tests { no_stamps: false, }; - let commands = cmd.generate_export_setup_commands("x86_64"); + let commands = cmd.generate_export_setup_commands(); // Should include port update commands that search within NFS_Core_Param block assert!(commands.contains("NFS_Port = 2049")); @@ -331,7 +330,7 @@ mod tests { no_stamps: false, }; - let commands = cmd.generate_export_setup_commands("x86_64"); + let commands = cmd.generate_export_setup_commands(); // Should include port update commands and debug message assert!(commands.contains("NFS_Port = 3049")); @@ -352,14 +351,14 @@ mod tests { no_stamps: false, }; - let commands = cmd.generate_export_setup_commands("aarch64"); + let commands = cmd.generate_export_setup_commands(); // Should include both port update and extension configurations assert!(commands.contains("NFS_Port = 4049")); assert!(commands.contains("Export_Id = 1")); assert!(commands.contains("Export_Id = 2")); - assert!(commands.contains("/opt/_avocado/aarch64/extensions/ext1")); - assert!(commands.contains("/opt/_avocado/aarch64/extensions/ext2")); + assert!(commands.contains("${AVOCADO_EXT_SYSROOTS}/ext1")); + assert!(commands.contains("${AVOCADO_EXT_SYSROOTS}/ext2")); } #[test] diff --git a/src/main.rs b/src/main.rs index 558355d..d0cec3c 100644 --- a/src/main.rs +++ b/src/main.rs @@ -342,8 +342,8 @@ enum SdkCommands { /// Run container in background and print container ID #[arg(short, long)] detach: bool, - /// Automatically remove the container when it exits - #[arg(long)] + /// Automatically remove the container when it exits (default: true) + #[arg(long, default_value = "true", action = clap::ArgAction::Set)] rm: bool, /// Drop into interactive shell in container #[arg(short, long)] From 78283a1fb1b0ed342f6e7a037de11578ad471333 Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Mon, 29 Dec 2025 10:14:59 -0500 Subject: [PATCH 12/20] Update runs-on to support docker-desktop --- src/commands/hitl/server.rs | 59 +++++++++++++++++-------- src/commands/sdk/install.rs | 27 +++++++++--- src/utils/container.rs | 10 +++++ src/utils/nfs_server.rs | 65 +++++++++++++++++++-------- src/utils/remote.rs | 74 +++++++++++++++++++++++++++---- src/utils/runs_on.rs | 87 ++++++++++++++++++++++++------------- src/utils/stamps.rs | 41 +++++++++++++++++ 7 files changed, 283 insertions(+), 80 deletions(-) diff --git a/src/commands/hitl/server.rs b/src/commands/hitl/server.rs index 6906a10..ddae204 100644 --- a/src/commands/hitl/server.rs +++ b/src/commands/hitl/server.rs @@ -1,5 +1,5 @@ use crate::utils::config::Config; -use crate::utils::container::{RunConfig, SdkContainer}; +use crate::utils::container::{is_docker_desktop, RunConfig, SdkContainer}; use crate::utils::nfs_server::{NfsExport, HITL_DEFAULT_PORT}; use crate::utils::output::{print_debug, print_info, OutputLevel}; use crate::utils::stamps::{ @@ -123,13 +123,34 @@ impl HitlServerCommand { } } + // Get the NFS port (used for both Ganesha and port publishing) + let nfs_port = self.port.unwrap_or(HITL_DEFAULT_PORT); + // Build container arguments with HITL-specific defaults - let mut container_args = vec![ - "--net=host".to_string(), - "--cap-add".to_string(), - "DAC_READ_SEARCH".to_string(), - "--init".to_string(), - ]; + // On Docker Desktop (macOS/Windows), --network=host doesn't expose ports to the + // actual host network (only to the Linux VM), so we use explicit port publishing. + let mut container_args = if is_docker_desktop() { + if self.verbose { + print_debug( + "Docker Desktop detected: using port publishing instead of host networking", + OutputLevel::Normal, + ); + } + vec![ + "-p".to_string(), + format!("0.0.0.0:{}:{}", nfs_port, nfs_port), + "--cap-add".to_string(), + "DAC_READ_SEARCH".to_string(), + "--init".to_string(), + ] + } else { + vec![ + "--net=host".to_string(), + "--cap-add".to_string(), + "DAC_READ_SEARCH".to_string(), + "--init".to_string(), + ] + }; // Add any additional container arguments with environment variable expansion if let Some(ref additional_args) = self.container_args { @@ -206,17 +227,16 @@ impl HitlServerCommand { // Update NFS_Port if a port is specified (it's nested inside NFS_Core_Param block) let port = self.port.unwrap_or(HITL_DEFAULT_PORT); - if self.port.is_some() { - let port_update_cmd = format!( - "sed -i '/NFS_Core_Param {{/,/}}/s/NFS_Port = [0-9]\\+;/NFS_Port = {port};/' {config_file}" - ); - commands.push(port_update_cmd); + // Always update the port to ensure consistency, especially on Docker Desktop + let port_update_cmd = format!( + "sed -i '/NFS_Core_Param {{/,/}}/s/NFS_Port = [0-9]\\+;/NFS_Port = {port};/' {config_file}" + ); + commands.push(port_update_cmd); - if self.verbose { - commands.push(format!( - "echo \"[DEBUG] Updated NFS_Port to {port} in NFS_Core_Param block in hitl-nfs.conf\"" - )); - } + if self.verbose && self.port.is_some() { + commands.push(format!( + "echo \"[DEBUG] Updated NFS_Port to {port} in NFS_Core_Param block in hitl-nfs.conf\"" + )); } if self.extensions.is_empty() { @@ -291,10 +311,11 @@ mod tests { let commands = cmd.generate_export_setup_commands(); - // Should create directories and exports.d directive but no port update + // Should create directories and exports.d directive assert!(commands.contains("mkdir -p ${AVOCADO_SDK_PREFIX}/etc/avocado/exports.d")); assert!(commands.contains("echo \"%dir ${AVOCADO_SDK_PREFIX}/etc/avocado/exports.d\"")); - assert!(!commands.contains("NFS_Port =")); + // Port is now always set to ensure consistency (uses default 12049) + assert!(commands.contains("NFS_Port = 12049")); } #[test] diff --git a/src/commands/sdk/install.rs b/src/commands/sdk/install.rs index 24d6eab..d4f3e7e 100644 --- a/src/commands/sdk/install.rs +++ b/src/commands/sdk/install.rs @@ -10,7 +10,8 @@ use crate::utils::{ lockfile::{build_package_spec_with_lock, LockFile, SysrootType}, output::{print_info, print_success, OutputLevel}, stamps::{ - compute_sdk_input_hash, generate_write_stamp_script, get_local_arch, Stamp, StampOutputs, + compute_sdk_input_hash, generate_write_sdk_stamp_script_dynamic_arch, + generate_write_stamp_script, get_local_arch, Stamp, StampOutputs, }, target::validate_and_log_target, }; @@ -583,6 +584,8 @@ $DNF_SDK_HOST \ container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), disable_weak_dependencies: config.get_sdk_disable_weak_dependencies(), + runs_on: self.runs_on.clone(), + nfs_port: self.nfs_port, ..Default::default() }; let install_success = container_helper.run_in_container(run_config).await?; @@ -796,6 +799,8 @@ $DNF_SDK_HOST $DNF_NO_SCRIPTS $DNF_SDK_TARGET_REPO_CONF \ container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), disable_weak_dependencies: config.get_sdk_disable_weak_dependencies(), + runs_on: self.runs_on.clone(), + nfs_port: self.nfs_port, ..Default::default() }; @@ -851,10 +856,20 @@ $DNF_SDK_HOST $DNF_NO_SCRIPTS $DNF_SDK_TARGET_REPO_CONF \ // to detect if the SDK is installed for the remote's architecture. if !self.no_stamps { let inputs = compute_sdk_input_hash(&composed.merged_value)?; - let outputs = StampOutputs::default(); - let host_arch = get_local_arch(); - let stamp = Stamp::sdk_install(host_arch, inputs, outputs); - let stamp_script = generate_write_stamp_script(&stamp)?; + + // When using --runs-on, we need to detect the remote architecture dynamically + // since the remote host may have a different CPU arch than the local machine. + // Otherwise, use the local architecture. + let stamp_script = if self.runs_on.is_some() { + // Use dynamic arch detection for remote execution + generate_write_sdk_stamp_script_dynamic_arch(inputs) + } else { + // Use local architecture for local execution + let outputs = StampOutputs::default(); + let host_arch = get_local_arch(); + let stamp = Stamp::sdk_install(host_arch, inputs, outputs); + generate_write_stamp_script(&stamp)? + }; let run_config = RunConfig { container_image: container_image.to_string(), @@ -868,6 +883,8 @@ $DNF_SDK_HOST $DNF_NO_SCRIPTS $DNF_SDK_TARGET_REPO_CONF \ container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), disable_weak_dependencies: config.get_sdk_disable_weak_dependencies(), + runs_on: self.runs_on.clone(), + nfs_port: self.nfs_port, ..Default::default() }; diff --git a/src/utils/container.rs b/src/utils/container.rs index f1e1d7b..04c51ad 100644 --- a/src/utils/container.rs +++ b/src/utils/container.rs @@ -45,6 +45,16 @@ fn is_apparmor_enabled() -> bool { Path::new("/sys/kernel/security/apparmor").exists() } +/// Check if we're running on Docker Desktop (macOS or Windows). +/// +/// Docker Desktop doesn't support `--network=host` properly - it only gives +/// access to the Linux VM's network, not the actual host network. For services +/// that need to be accessible from other machines, use `-p 0.0.0.0:PORT:PORT` +/// instead. +pub fn is_docker_desktop() -> bool { + cfg!(target_os = "macos") || cfg!(target_os = "windows") +} + /// Add security options to container command based on host security module. /// - SELinux (Fedora/RHEL): adds --security-opt label=disable /// - AppArmor (Ubuntu/Debian): adds --security-opt apparmor=unconfined diff --git a/src/utils/nfs_server.rs b/src/utils/nfs_server.rs index eba13b9..1871a78 100644 --- a/src/utils/nfs_server.rs +++ b/src/utils/nfs_server.rs @@ -10,6 +10,7 @@ use std::path::{Path, PathBuf}; use std::process::Stdio; use tokio::process::{Child, Command as AsyncCommand}; +use crate::utils::container::is_docker_desktop; use crate::utils::output::{print_info, OutputLevel}; /// Default port range for NFS server auto-selection @@ -44,11 +45,18 @@ impl NfsExport { format!( r#"EXPORT {{ Export_Id = {}; - Path = {}; - Pseudo = {}; + Path = "{}"; + Pseudo = "{}"; + SecType = sys,none; + Access_Type = RW; + Squash = No_Root_Squash; FSAL {{ name = VFS; }} + CLIENT {{ + Clients = *; + Access_Type = RW; + }} }} "#, self.export_id, @@ -344,6 +352,8 @@ impl NfsServer { } // Build container command + // On Docker Desktop (macOS/Windows), --network=host doesn't expose ports to the + // actual host network (only to the Linux VM), so we use explicit port publishing. let mut args: Vec = vec![ "run".to_string(), "--rm".to_string(), @@ -351,10 +361,22 @@ impl NfsServer { "--name".to_string(), container_name.clone(), "--privileged".to_string(), // Required for NFS - "--network".to_string(), - "host".to_string(), // Use host networking for NFS port ]; + if is_docker_desktop() { + if config.verbose { + print_info( + "Docker Desktop detected: using port publishing instead of host networking", + OutputLevel::Normal, + ); + } + args.push("-p".to_string()); + args.push(format!("0.0.0.0:{}:{}", config.port, config.port)); + } else { + args.push("--network".to_string()); + args.push("host".to_string()); + } + // Mount the config file args.push("-v".to_string()); args.push(format!( @@ -367,19 +389,24 @@ impl NfsServer { args.push(format!("{}:/var/run/ganesha", temp_dir.path().display())); // Add volume mounts for exported paths + // Keep track of container paths we've already mounted to avoid duplicates + let mut mounted_paths: std::collections::HashSet = std::collections::HashSet::new(); + for (host_path, container_path) in &volume_mounts { args.push("-v".to_string()); args.push(format!("{}:{}", host_path, container_path)); + mounted_paths.insert(container_path.clone()); } - // Also mount the exported paths from config + // Also mount the exported paths from config (if not already mounted) for export in &config.exports { - args.push("-v".to_string()); - args.push(format!( - "{}:{}", - export.local_path.display(), - export.local_path.display() - )); + let export_path = export.local_path.to_string_lossy().to_string(); + // Skip if this path is already mounted (e.g., on Docker Desktop where we mount by volume name) + if !mounted_paths.contains(&export_path) { + args.push("-v".to_string()); + args.push(format!("{}:{}", export_path, export_path)); + mounted_paths.insert(export_path); + } } // Container image and command @@ -412,8 +439,9 @@ impl NfsServer { ); } - // Give it a moment to start - tokio::time::sleep(tokio::time::Duration::from_millis(1000)).await; + // Give Ganesha time to fully initialize and load exports + // 2 seconds is more reliable, especially on slower systems or with many exports + tokio::time::sleep(tokio::time::Duration::from_millis(2000)).await; // Verify container is running let check_output = AsyncCommand::new(container_tool) @@ -670,8 +698,11 @@ mod tests { let config = export.to_ganesha_config(); assert!(config.contains("Export_Id = 1")); - assert!(config.contains("Path = /home/user/project")); - assert!(config.contains("Pseudo = /src")); + assert!(config.contains(r#"Path = "/home/user/project""#)); + assert!(config.contains(r#"Pseudo = "/src""#)); + assert!(config.contains("SecType = sys,none")); + assert!(config.contains("Access_Type = RW")); + assert!(config.contains("Clients = *")); assert!(config.contains("FSAL {")); assert!(config.contains("name = VFS")); } @@ -690,8 +721,8 @@ mod tests { assert!(ganesha_config.contains("NFS_Port = 12050")); assert!(ganesha_config.contains("Export_Id = 1")); assert!(ganesha_config.contains("Export_Id = 2")); - assert!(ganesha_config.contains("Pseudo = /src")); - assert!(ganesha_config.contains("Pseudo = /state")); + assert!(ganesha_config.contains(r#"Pseudo = "/src""#)); + assert!(ganesha_config.contains(r#"Pseudo = "/state""#)); assert!(ganesha_config.contains("Protocols = 4")); assert!(ganesha_config.contains("Squash = No_Root_Squash")); } diff --git a/src/utils/remote.rs b/src/utils/remote.rs index 4024e00..46920c1 100644 --- a/src/utils/remote.rs +++ b/src/utils/remote.rs @@ -170,6 +170,9 @@ impl SshClient { } // Try to get the remote avocado version + // Note: We need to source profile files because non-interactive SSH sessions + // don't load .bashrc/.profile, so avocado might not be in PATH if it's in + // ~/.cargo/bin, ~/.local/bin, or other user-specific locations. let output = AsyncCommand::new("ssh") .args([ "-o", @@ -179,7 +182,7 @@ impl SshClient { "-o", "StrictHostKeyChecking=accept-new", &self.remote.ssh_target(), - "avocado --version 2>/dev/null || echo 'not-installed'", + "source ~/.profile 2>/dev/null; source ~/.bashrc 2>/dev/null; avocado --version 2>/dev/null || echo 'not-installed'", ]) .output() .await @@ -579,7 +582,7 @@ pub async fn get_local_ip_for_remote(remote_host: &str) -> Result { // (no actual connection is made for UDP, but the OS figures out which // local interface would be used) - use std::net::UdpSocket; + use std::net::{SocketAddr, UdpSocket}; // First, try to resolve the remote host let remote_addrs: Vec<_> = tokio::net::lookup_host(format!("{}:22", remote_host)) @@ -591,16 +594,69 @@ pub async fn get_local_ip_for_remote(remote_host: &str) -> Result { anyhow::bail!("Could not resolve remote host '{}'", remote_host); } - // Create a UDP socket and "connect" to the remote to determine local interface - let socket = UdpSocket::bind("0.0.0.0:0").context("Failed to create UDP socket")?; + // Try each resolved address, preferring IPv4 + // Sort to try IPv4 first (more likely to work on typical local networks) + let mut sorted_addrs = remote_addrs.clone(); + sorted_addrs.sort_by_key(|addr| if addr.is_ipv4() { 0 } else { 1 }); - socket - .connect(remote_addrs[0]) - .context("Failed to determine route to remote host")?; + let mut last_error = None; + for remote_addr in sorted_addrs { + // Create a socket matching the address family + let bind_addr: SocketAddr = if remote_addr.is_ipv4() { + "0.0.0.0:0".parse().unwrap() + } else { + "[::]:0".parse().unwrap() + }; + + let socket = match UdpSocket::bind(bind_addr) { + Ok(s) => s, + Err(e) => { + last_error = Some(e); + continue; + } + }; + + if let Err(e) = socket.connect(remote_addr) { + last_error = Some(e); + continue; + } + + match socket.local_addr() { + Ok(local_addr) => return Ok(local_addr.ip()), + Err(e) => { + last_error = Some(e); + continue; + } + } + } - let local_addr = socket.local_addr().context("Failed to get local address")?; + // If UDP method fails, try asking SSH for the connection info + // This is a fallback that works on macOS and other systems where + // the UDP trick might fail + if let Ok(output) = AsyncCommand::new("ssh") + .args([ + "-o", + "BatchMode=yes", + "-o", + "ConnectTimeout=5", + remote_host, + "echo $SSH_CLIENT | cut -d' ' -f1", + ]) + .output() + .await + { + if output.status.success() { + let ip_str = String::from_utf8_lossy(&output.stdout).trim().to_string(); + if let Ok(ip) = ip_str.parse::() { + return Ok(ip); + } + } + } - Ok(local_addr.ip()) + // Return the last error we got + Err(last_error + .map(|e| anyhow::anyhow!("Failed to determine route to remote host: {}", e)) + .unwrap_or_else(|| anyhow::anyhow!("No valid addresses found for remote host"))) } /// Check if a remote version is compatible with the local version diff --git a/src/utils/runs_on.rs b/src/utils/runs_on.rs index a581519..7de2d58 100644 --- a/src/utils/runs_on.rs +++ b/src/utils/runs_on.rs @@ -5,9 +5,10 @@ use anyhow::{Context, Result}; use std::collections::HashMap; -use std::path::Path; +use std::path::{Path, PathBuf}; use uuid::Uuid; +use crate::utils::container::is_docker_desktop; use crate::utils::nfs_server::{ find_available_port, get_docker_volume_mountpoint, is_port_available, NfsExport, NfsServer, NfsServerConfig, DEFAULT_NFS_PORT_RANGE, @@ -138,22 +139,60 @@ impl RunsOnContext { ); } - // Get the mountpoint of the local Docker volume - let volume_mountpoint = get_docker_volume_mountpoint(container_tool, local_volume_name) - .await - .with_context(|| { - format!( - "Failed to get mountpoint for volume '{}'", - local_volume_name - ) - })?; + // On Docker Desktop (macOS/Windows), the volume mountpoint returned by Docker + // is inside the Docker Desktop VM and not accessible from the host filesystem. + // We need to mount by volume name instead of host path. + let (state_export_path, volume_mounts) = if is_docker_desktop() { + if verbose { + print_info( + "Docker Desktop detected: mounting volume by name", + OutputLevel::Normal, + ); + } + // Use a fixed container path for the state volume + let container_state_path = PathBuf::from("/opt/nfs-state"); + let mounts = vec![ + ( + src_dir.to_string_lossy().to_string(), + src_dir.to_string_lossy().to_string(), + ), + // Mount Docker volume by name to a container path + ( + local_volume_name.to_string(), + container_state_path.to_string_lossy().to_string(), + ), + ]; + (container_state_path, mounts) + } else { + // On native Docker (Linux), we can use the host volume mountpoint directly + let volume_mountpoint = get_docker_volume_mountpoint(container_tool, local_volume_name) + .await + .with_context(|| { + format!( + "Failed to get mountpoint for volume '{}'", + local_volume_name + ) + })?; + + if verbose { + print_info( + &format!("Local volume mountpoint: {}", volume_mountpoint.display()), + OutputLevel::Normal, + ); + } - if verbose { - print_info( - &format!("Local volume mountpoint: {}", volume_mountpoint.display()), - OutputLevel::Normal, - ); - } + let mounts = vec![ + ( + src_dir.to_string_lossy().to_string(), + src_dir.to_string_lossy().to_string(), + ), + ( + volume_mountpoint.to_string_lossy().to_string(), + volume_mountpoint.to_string_lossy().to_string(), + ), + ]; + (volume_mountpoint, mounts) + }; // Create and start NFS server inside the SDK container // The container has ganesha.nfsd installed @@ -161,24 +200,12 @@ impl RunsOnContext { port, exports: vec![ NfsExport::new(1, src_dir.to_path_buf(), "/src".to_string()), - NfsExport::new(2, volume_mountpoint.clone(), "/state".to_string()), + NfsExport::new(2, state_export_path.clone(), "/state".to_string()), ], verbose, bind_addr: "0.0.0.0".to_string(), }; - // Volume mounts for the container to access the paths - let volume_mounts = vec![ - ( - src_dir.to_string_lossy().to_string(), - src_dir.to_string_lossy().to_string(), - ), - ( - volume_mountpoint.to_string_lossy().to_string(), - volume_mountpoint.to_string_lossy().to_string(), - ), - ]; - let nfs_server = NfsServer::start_in_container(config, container_tool, container_image, volume_mounts) .await @@ -235,7 +262,7 @@ impl RunsOnContext { print_info( &format!( "📂 _avocado: {} → remote:/opt/_avocado", - volume_mountpoint.display() + state_export_path.display() ), OutputLevel::Normal, ); diff --git a/src/utils/stamps.rs b/src/utils/stamps.rs index 4d21a37..55044e9 100644 --- a/src/utils/stamps.rs +++ b/src/utils/stamps.rs @@ -758,6 +758,47 @@ STAMP_EOF )) } +/// Generate shell script to write an SDK install stamp with dynamic architecture detection. +/// +/// This is used when running with --runs-on where the remote host may have a different +/// architecture than the local machine. The arch is determined at runtime using `uname -m` +/// or the AVOCADO_SDK_ARCH environment variable (set by the entrypoint). +pub fn generate_write_sdk_stamp_script_dynamic_arch(inputs: StampInputs) -> String { + let timestamp = chrono::Utc::now().to_rfc3339(); + let cli_version = env!("CARGO_PKG_VERSION"); + + // Build the stamp JSON with shell variable substitution for the arch + // Note: We use double quotes for the heredoc to allow $SDK_ARCH substitution + format!( + r#" +# Write SDK install stamp with dynamic architecture detection +SDK_ARCH="${{AVOCADO_SDK_ARCH:-$(uname -m)}}" +mkdir -p "$AVOCADO_PREFIX/.stamps/sdk/$SDK_ARCH" +cat > "$AVOCADO_PREFIX/.stamps/sdk/$SDK_ARCH/install.stamp" << STAMP_EOF +{{ + "version": {version}, + "command": "install", + "component": "sdk", + "component_name": null, + "target": "$SDK_ARCH", + "timestamp": "{timestamp}", + "success": true, + "inputs": {{ + "config_hash": "{config_hash}" + }}, + "outputs": {{}}, + "cli_version": "{cli_version}" +}} +STAMP_EOF +# SDK stamp written for architecture: $SDK_ARCH +"#, + version = STAMP_VERSION, + timestamp = timestamp, + config_hash = inputs.config_hash, + cli_version = cli_version + ) +} + /// Generate shell script to read a stamp file /// Generate a single shell script that reads multiple stamps and outputs them in a parseable format. /// Each stamp is output as: `STAMP_PATH:::JSON_CONTENT` (or `STAMP_PATH:::null` if missing) From dbffc5818d19fd0e0d16b12997dc49c02578b4bf Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Mon, 29 Dec 2025 11:00:34 -0500 Subject: [PATCH 13/20] Maintain runs-on context for all commands in the run --- src/commands/ext/install.rs | 115 ++++++++++++--- src/commands/runtime/build.rs | 124 ++++++++++++---- src/commands/runtime/install.rs | 114 ++++++++++++--- src/commands/sdk/install.rs | 247 +++++++++++++++++++++----------- src/utils/container.rs | 118 +++++++++++++-- src/utils/nfs_server.rs | 21 ++- src/utils/runs_on.rs | 33 ++++- 7 files changed, 589 insertions(+), 183 deletions(-) diff --git a/src/commands/ext/install.rs b/src/commands/ext/install.rs index f010cfe..505f441 100644 --- a/src/commands/ext/install.rs +++ b/src/commands/ext/install.rs @@ -5,6 +5,7 @@ use crate::utils::config::{Config, ExtensionLocation}; use crate::utils::container::{RunConfig, SdkContainer}; use crate::utils::lockfile::{build_package_spec_with_lock, LockFile, SysrootType}; use crate::utils::output::{print_debug, print_error, print_info, print_success, OutputLevel}; +use crate::utils::runs_on::RunsOnContext; use crate::utils::stamps::{ compute_ext_input_hash, generate_write_stamp_script, Stamp, StampOutputs, }; @@ -185,6 +186,62 @@ impl ExtInstallCommand { // Use the container helper to run the setup commands let container_helper = SdkContainer::new().verbose(self.verbose); + + // Create shared RunsOnContext if running on remote host + let mut runs_on_context: Option = if let Some(ref runs_on) = self.runs_on { + Some( + container_helper + .create_runs_on_context(runs_on, self.nfs_port, container_image, self.verbose) + .await?, + ) + } else { + None + }; + + // Execute the installation and ensure cleanup + let result = self + .execute_install_internal( + config, + parsed, + &extensions_to_install, + &container_helper, + container_image, + &target, + repo_url.as_ref(), + repo_release.as_ref(), + &merged_container_args, + runs_on_context.as_ref(), + ) + .await; + + // Always teardown the context if it was created + if let Some(ref mut context) = runs_on_context { + if let Err(e) = context.teardown().await { + print_error( + &format!("Warning: Failed to cleanup remote resources: {}", e), + OutputLevel::Normal, + ); + } + } + + result + } + + /// Internal implementation of the install logic + #[allow(clippy::too_many_arguments)] + async fn execute_install_internal( + &self, + config: &Config, + parsed: &serde_yaml::Value, + extensions_to_install: &[(String, ExtensionLocation)], + container_helper: &SdkContainer, + container_image: &str, + target: &str, + repo_url: Option<&String>, + repo_release: Option<&String>, + merged_container_args: &Option>, + runs_on_context: Option<&RunsOnContext>, + ) -> Result<()> { let total = extensions_to_install.len(); // Load lock file for reproducible builds @@ -234,15 +291,16 @@ impl ExtInstallCommand { config, ext_name, &ext_config_path, - &container_helper, + container_helper, container_image, - &target, - repo_url.as_ref(), - repo_release.as_ref(), - &merged_container_args, + target, + repo_url, + repo_release, + merged_container_args, config.get_sdk_disable_weak_dependencies(), &mut lock_file, &src_dir, + runs_on_context, ) .await? { @@ -253,26 +311,25 @@ impl ExtInstallCommand { if !self.no_stamps { let inputs = compute_ext_input_hash(parsed, ext_name)?; let outputs = StampOutputs::default(); - let stamp = Stamp::ext_install(ext_name, &target, inputs, outputs); + let stamp = Stamp::ext_install(ext_name, target, inputs, outputs); let stamp_script = generate_write_stamp_script(&stamp)?; let run_config = RunConfig { container_image: container_image.to_string(), - target: target.clone(), + target: target.to_string(), command: stamp_script, verbose: self.verbose, source_environment: true, interactive: false, - repo_url: repo_url.clone(), - repo_release: repo_release.clone(), + repo_url: repo_url.cloned(), + repo_release: repo_release.cloned(), container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), - runs_on: self.runs_on.clone(), - nfs_port: self.nfs_port, + // runs_on handled by shared context ..Default::default() }; - container_helper.run_in_container(run_config).await?; + run_container_command(container_helper, run_config, runs_on_context).await?; if self.verbose { print_info( @@ -308,6 +365,7 @@ impl ExtInstallCommand { disable_weak_dependencies: bool, lock_file: &mut LockFile, src_dir: &Path, + runs_on_context: Option<&RunsOnContext>, ) -> Result { // Create the commands to check and set up the directory structure let check_command = format!("[ -d $AVOCADO_EXT_SYSROOTS/{extension} ]"); @@ -327,11 +385,11 @@ impl ExtInstallCommand { repo_release: repo_release.cloned(), container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), - runs_on: self.runs_on.clone(), - nfs_port: self.nfs_port, + // runs_on handled by shared context ..Default::default() }; - let sysroot_exists = container_helper.run_in_container(run_config).await?; + let sysroot_exists = + run_container_command(container_helper, run_config, runs_on_context).await?; if !sysroot_exists { // Create the sysroot @@ -346,11 +404,11 @@ impl ExtInstallCommand { repo_release: repo_release.cloned(), container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), - runs_on: self.runs_on.clone(), - nfs_port: self.nfs_port, + // runs_on handled by shared context ..Default::default() }; - let success = container_helper.run_in_container(run_config).await?; + let success = + run_container_command(container_helper, run_config, runs_on_context).await?; if success { print_success( @@ -552,11 +610,11 @@ $DNF_SDK_HOST \ container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), disable_weak_dependencies, - runs_on: self.runs_on.clone(), - nfs_port: self.nfs_port, + // runs_on handled by shared context ..Default::default() }; - let install_success = container_helper.run_in_container(run_config).await?; + let install_success = + run_container_command(container_helper, run_config, runs_on_context).await?; if !install_success { print_error( @@ -608,3 +666,18 @@ $DNF_SDK_HOST \ Ok(true) } } + +/// Helper function to run a container command, using shared context if available +async fn run_container_command( + container_helper: &SdkContainer, + config: RunConfig, + runs_on_context: Option<&RunsOnContext>, +) -> Result { + if let Some(context) = runs_on_context { + container_helper + .run_in_container_with_context(&config, context) + .await + } else { + container_helper.run_in_container(config).await + } +} diff --git a/src/commands/runtime/build.rs b/src/commands/runtime/build.rs index dd49065..9ab2b0f 100644 --- a/src/commands/runtime/build.rs +++ b/src/commands/runtime/build.rs @@ -1,7 +1,8 @@ use crate::utils::{ config::load_config, container::{RunConfig, SdkContainer}, - output::{print_info, print_success, OutputLevel}, + output::{print_error, print_info, print_success, OutputLevel}, + runs_on::RunsOnContext, stamps::{ compute_runtime_input_hash, generate_batch_read_stamps_script, generate_write_stamp_script, resolve_required_stamps_for_runtime_build, validate_stamps_batch, Stamp, StampOutputs, @@ -99,13 +100,66 @@ impl RuntimeBuildCommand { let container_helper = SdkContainer::from_config(&self.config_path, &config)?.verbose(self.verbose); + // Create shared RunsOnContext if running on remote host + let mut runs_on_context: Option = if let Some(ref runs_on) = self.runs_on { + Some( + container_helper + .create_runs_on_context(runs_on, self.nfs_port, container_image, self.verbose) + .await?, + ) + } else { + None + }; + + // Execute the build and ensure cleanup + let result = self + .execute_build_internal( + &config, + &parsed, + container_image, + &target_arch, + &merged_container_args, + repo_url.as_ref(), + repo_release.as_ref(), + &container_helper, + runs_on_context.as_ref(), + ) + .await; + + // Always teardown the context if it was created + if let Some(ref mut context) = runs_on_context { + if let Err(e) = context.teardown().await { + print_error( + &format!("Warning: Failed to cleanup remote resources: {}", e), + OutputLevel::Normal, + ); + } + } + + result + } + + /// Internal implementation of the build logic + #[allow(clippy::too_many_arguments)] + async fn execute_build_internal( + &self, + config: &crate::utils::config::Config, + parsed: &serde_yaml::Value, + container_image: &str, + target_arch: &str, + merged_container_args: &Option>, + repo_url: Option<&String>, + repo_release: Option<&String>, + container_helper: &SdkContainer, + runs_on_context: Option<&RunsOnContext>, + ) -> Result<()> { // Validate stamps before proceeding (unless --no-stamps) if !self.no_stamps { // Get detailed extension dependencies for this runtime // This distinguishes between local, external, and versioned extensions let ext_deps = config.get_runtime_extension_dependencies_detailed( &self.runtime_name, - &target_arch, + target_arch, &self.config_path, )?; @@ -118,17 +172,16 @@ impl RuntimeBuildCommand { let batch_script = generate_batch_read_stamps_script(&required); let run_config = RunConfig { container_image: container_image.to_string(), - target: target_arch.clone(), + target: target_arch.to_string(), command: batch_script, verbose: false, source_environment: true, interactive: false, - repo_url: repo_url.clone(), - repo_release: repo_release.clone(), + repo_url: repo_url.cloned(), + repo_release: repo_release.cloned(), container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), - runs_on: self.runs_on.clone(), - nfs_port: self.nfs_port, + // runs_on handled by shared context ..Default::default() }; @@ -156,10 +209,10 @@ impl RuntimeBuildCommand { // This ensures the build scripts know exactly which extension versions to use let resolved_extensions = self .collect_runtime_extensions( - &parsed, - &config, + parsed, + config, &self.runtime_name, - target_arch.as_str(), + target_arch, &self.config_path, container_image, merged_container_args.clone(), @@ -167,7 +220,7 @@ impl RuntimeBuildCommand { .await?; // Build var image - let build_script = self.create_build_script(&parsed, &target_arch, &resolved_extensions)?; + let build_script = self.create_build_script(parsed, target_arch, &resolved_extensions)?; if self.verbose { print_info( @@ -194,7 +247,7 @@ impl RuntimeBuildCommand { if let Some(stone_paths) = config.get_stone_include_paths_for_runtime( &self.runtime_name, - &target_arch, + target_arch, &self.config_path, )? { env_vars.insert("AVOCADO_STONE_INCLUDE_PATHS".to_string(), stone_paths); @@ -203,7 +256,7 @@ impl RuntimeBuildCommand { // Get stone manifest if configured if let Some(stone_manifest) = config.get_stone_manifest_for_runtime( &self.runtime_name, - &target_arch, + target_arch, &self.config_path, )? { env_vars.insert("AVOCADO_STONE_MANIFEST".to_string(), stone_manifest); @@ -231,22 +284,20 @@ impl RuntimeBuildCommand { let run_config = RunConfig { container_image: container_image.to_string(), - target: target_arch.clone(), + target: target_arch.to_string(), command: build_script, verbose: self.verbose, source_environment: true, // need environment for build interactive: false, // build script runs non-interactively - repo_url: repo_url.clone(), - repo_release: repo_release.clone(), + repo_url: repo_url.cloned(), + repo_release: repo_release.cloned(), container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), env_vars, - runs_on: self.runs_on.clone(), - nfs_port: self.nfs_port, + // runs_on handled by shared context ..Default::default() }; - let complete_result = container_helper - .run_in_container(run_config) + let complete_result = run_container_command(container_helper, run_config, runs_on_context) .await .context("Failed to build complete image")?; @@ -262,30 +313,29 @@ impl RuntimeBuildCommand { // Write runtime build stamp (unless --no-stamps) if !self.no_stamps { let merged_runtime = config - .get_merged_runtime_config(&self.runtime_name, &target_arch, &self.config_path)? + .get_merged_runtime_config(&self.runtime_name, target_arch, &self.config_path)? .unwrap_or_default(); let inputs = compute_runtime_input_hash(&merged_runtime, &self.runtime_name)?; let outputs = StampOutputs::default(); - let stamp = Stamp::runtime_build(&self.runtime_name, &target_arch, inputs, outputs); + let stamp = Stamp::runtime_build(&self.runtime_name, target_arch, inputs, outputs); let stamp_script = generate_write_stamp_script(&stamp)?; let run_config = RunConfig { container_image: container_image.to_string(), - target: target_arch.clone(), + target: target_arch.to_string(), command: stamp_script, verbose: self.verbose, source_environment: true, interactive: false, - repo_url: repo_url.clone(), - repo_release: repo_release.clone(), + repo_url: repo_url.cloned(), + repo_release: repo_release.cloned(), container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), - runs_on: self.runs_on.clone(), - nfs_port: self.nfs_port, + // runs_on handled by shared context ..Default::default() }; - container_helper.run_in_container(run_config).await?; + run_container_command(container_helper, run_config, runs_on_context).await?; if self.verbose { print_info( @@ -878,8 +928,7 @@ rpm --root="$AVOCADO_EXT_SYSROOTS/{ext_name}" --dbpath=/var/lib/extension.d/rpm verbose: self.verbose, source_environment: true, interactive: false, - runs_on: self.runs_on.clone(), - nfs_port: self.nfs_port, + // runs_on handled by shared context container_args, ..Default::default() }; @@ -912,6 +961,21 @@ rpm --root="$AVOCADO_EXT_SYSROOTS/{ext_name}" --dbpath=/var/lib/extension.d/rpm } } +/// Helper function to run a container command, using shared context if available +async fn run_container_command( + container_helper: &SdkContainer, + config: RunConfig, + runs_on_context: Option<&RunsOnContext>, +) -> Result { + if let Some(context) = runs_on_context { + container_helper + .run_in_container_with_context(&config, context) + .await + } else { + container_helper.run_in_container(config).await + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/src/commands/runtime/install.rs b/src/commands/runtime/install.rs index ed7bef1..e388cc5 100644 --- a/src/commands/runtime/install.rs +++ b/src/commands/runtime/install.rs @@ -5,6 +5,7 @@ use crate::utils::config::Config; use crate::utils::container::{RunConfig, SdkContainer}; use crate::utils::lockfile::{build_package_spec_with_lock, LockFile, SysrootType}; use crate::utils::output::{print_debug, print_error, print_info, print_success, OutputLevel}; +use crate::utils::runs_on::RunsOnContext; use crate::utils::stamps::{ compute_runtime_input_hash, generate_write_stamp_script, Stamp, StampOutputs, }; @@ -135,6 +136,59 @@ impl RuntimeInstallCommand { // Initialize container helper let container_helper = SdkContainer::new().verbose(self.verbose); + // Create shared RunsOnContext if running on remote host + let mut runs_on_context: Option = if let Some(ref runs_on) = self.runs_on { + Some( + container_helper + .create_runs_on_context(runs_on, self.nfs_port, container_image, self.verbose) + .await?, + ) + } else { + None + }; + + // Execute installation and ensure cleanup + let result = self + .execute_install_internal( + &parsed, + &config, + &runtimes_to_install, + &container_helper, + container_image, + repo_url.as_ref(), + repo_release.as_ref(), + &merged_container_args, + runs_on_context.as_ref(), + ) + .await; + + // Always teardown the context if it was created + if let Some(ref mut context) = runs_on_context { + if let Err(e) = context.teardown().await { + print_error( + &format!("Warning: Failed to cleanup remote resources: {}", e), + OutputLevel::Normal, + ); + } + } + + result + } + + /// Internal implementation of the install logic + #[allow(clippy::too_many_arguments)] + async fn execute_install_internal( + &self, + parsed: &serde_yaml::Value, + config: &Config, + runtimes_to_install: &[String], + container_helper: &SdkContainer, + container_image: &str, + repo_url: Option<&String>, + repo_release: Option<&String>, + merged_container_args: &Option>, + runs_on_context: Option<&RunsOnContext>, + ) -> Result<()> { // Load lock file for reproducible builds let src_dir = config .get_resolved_src_dir(&self.config_path) @@ -154,7 +208,7 @@ impl RuntimeInstallCommand { } // Install dependencies for each runtime - for runtime_name in &runtimes_to_install { + for runtime_name in runtimes_to_install { print_info( &format!("Installing dependencies for runtime '{runtime_name}'"), OutputLevel::Normal, @@ -162,16 +216,17 @@ impl RuntimeInstallCommand { let success = self .install_single_runtime( - &parsed, - &config, + parsed, + config, runtime_name, - &container_helper, + container_helper, container_image, - repo_url.as_ref(), - repo_release.as_ref(), - &merged_container_args, + repo_url, + repo_release, + merged_container_args, &mut lock_file, &src_dir, + runs_on_context, ) .await?; @@ -186,7 +241,7 @@ impl RuntimeInstallCommand { // Write runtime install stamp (unless --no-stamps) if !self.no_stamps { // Get merged runtime config for stamp input hash - let target_arch = resolve_target_required(self.target.as_deref(), &config)?; + let target_arch = resolve_target_required(self.target.as_deref(), config)?; if let Some(merged_runtime) = config.get_merged_runtime_config( runtime_name, &target_arch, @@ -204,16 +259,15 @@ impl RuntimeInstallCommand { verbose: self.verbose, source_environment: true, interactive: false, - repo_url: repo_url.clone(), - repo_release: repo_release.clone(), + repo_url: repo_url.cloned(), + repo_release: repo_release.cloned(), container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), - runs_on: self.runs_on.clone(), - nfs_port: self.nfs_port, + // runs_on handled by shared context ..Default::default() }; - container_helper.run_in_container(run_config).await?; + run_container_command(container_helper, run_config, runs_on_context).await?; if self.verbose { print_info( @@ -249,6 +303,7 @@ impl RuntimeInstallCommand { merged_container_args: &Option>, lock_file: &mut LockFile, src_dir: &Path, + runs_on_context: Option<&RunsOnContext>, ) -> Result { // Get runtime configuration let runtime_config = config_toml["runtime"][runtime].clone(); @@ -281,11 +336,11 @@ impl RuntimeInstallCommand { repo_release: repo_release.cloned(), container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), - runs_on: self.runs_on.clone(), - nfs_port: self.nfs_port, + // runs_on handled by shared context ..Default::default() }; - let installroot_exists = container_helper.run_in_container(run_config).await?; + let installroot_exists = + run_container_command(container_helper, run_config, runs_on_context).await?; if !installroot_exists { // Create the installroot @@ -300,11 +355,11 @@ impl RuntimeInstallCommand { repo_release: repo_release.cloned(), container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), - runs_on: self.runs_on.clone(), - nfs_port: self.nfs_port, + // runs_on handled by shared context ..Default::default() }; - let success = container_helper.run_in_container(run_config).await?; + let success = + run_container_command(container_helper, run_config, runs_on_context).await?; if success { print_success( @@ -442,11 +497,11 @@ $DNF_SDK_HOST \ container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), disable_weak_dependencies: config.get_sdk_disable_weak_dependencies(), - runs_on: self.runs_on.clone(), - nfs_port: self.nfs_port, + // runs_on handled by shared context ..Default::default() }; - let success = container_helper.run_in_container(run_config).await?; + let success = + run_container_command(container_helper, run_config, runs_on_context).await?; if !success { print_error( @@ -510,6 +565,21 @@ $DNF_SDK_HOST \ } } +/// Helper function to run a container command, using shared context if available +async fn run_container_command( + container_helper: &SdkContainer, + config: RunConfig, + runs_on_context: Option<&RunsOnContext>, +) -> Result { + if let Some(context) = runs_on_context { + container_helper + .run_in_container_with_context(&config, context) + .await + } else { + container_helper.run_in_container(config).await + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/src/commands/sdk/install.rs b/src/commands/sdk/install.rs index d4f3e7e..8b8a896 100644 --- a/src/commands/sdk/install.rs +++ b/src/commands/sdk/install.rs @@ -8,7 +8,8 @@ use crate::utils::{ config::Config, container::{RunConfig, SdkContainer}, lockfile::{build_package_spec_with_lock, LockFile, SysrootType}, - output::{print_info, print_success, OutputLevel}, + output::{print_error, print_info, print_success, OutputLevel}, + runs_on::RunsOnContext, stamps::{ compute_sdk_input_hash, generate_write_sdk_stamp_script_dynamic_arch, generate_write_stamp_script, get_local_arch, Stamp, StampOutputs, @@ -123,6 +124,64 @@ impl SdkInstallCommand { let container_helper = SdkContainer::from_config(&self.config_path, config)?.verbose(self.verbose); + // Create shared RunsOnContext if running on remote host + // This allows reusing the NFS server and volumes for all container runs + let mut runs_on_context: Option = if let Some(ref runs_on) = self.runs_on { + Some( + container_helper + .create_runs_on_context(runs_on, self.nfs_port, container_image, self.verbose) + .await?, + ) + } else { + None + }; + + // Execute the main installation logic, ensuring cleanup on error + let result = self + .execute_install( + config, + &composed, + &target, + container_image, + &sdk_dependencies, + &extension_sdk_dependencies, + repo_url.as_deref(), + repo_release.as_deref(), + &container_helper, + merged_container_args.as_ref(), + runs_on_context.as_ref(), + ) + .await; + + // Always teardown the context if it was created + if let Some(ref mut context) = runs_on_context { + if let Err(e) = context.teardown().await { + print_error( + &format!("Warning: Failed to cleanup remote resources: {}", e), + OutputLevel::Normal, + ); + } + } + + result + } + + /// Internal implementation of the install logic + #[allow(clippy::too_many_arguments)] + async fn execute_install( + &self, + config: &Config, + composed: &crate::utils::config::ComposedConfig, + target: &str, + container_image: &str, + sdk_dependencies: &Option>, + extension_sdk_dependencies: &HashMap>, + repo_url: Option<&str>, + repo_release: Option<&str>, + container_helper: &SdkContainer, + merged_container_args: Option<&Vec>, + runs_on_context: Option<&RunsOnContext>, + ) -> Result<()> { // Load lock file for reproducible builds let src_dir = config .get_resolved_src_dir(&self.config_path) @@ -293,22 +352,22 @@ MACROS_EOF let run_config = RunConfig { container_image: container_image.to_string(), - target: target.clone(), + target: target.to_string(), command: sdk_init_command.to_string(), verbose: self.verbose, source_environment: true, interactive: false, - repo_url: repo_url.clone(), - repo_release: repo_release.clone(), - container_args: merged_container_args.clone(), + repo_url: repo_url.map(|s| s.to_string()), + repo_release: repo_release.map(|s| s.to_string()), + container_args: merged_container_args.cloned(), dnf_args: self.dnf_args.clone(), disable_weak_dependencies: config.get_sdk_disable_weak_dependencies(), - runs_on: self.runs_on.clone(), - nfs_port: self.nfs_port, + // runs_on handled by shared context ..Default::default() }; - let init_success = container_helper.run_in_container(run_config).await?; + let init_success = + run_container_command(container_helper, run_config, runs_on_context).await?; if init_success { print_success("Initialized SDK environment.", OutputLevel::Normal); @@ -330,7 +389,7 @@ MACROS_EOF .unwrap_or("*"); let sdk_target_pkg = build_package_spec_with_lock( &lock_file, - &target, + target, &SysrootType::Sdk, &sdk_target_pkg_name, sdk_target_config_version, @@ -352,22 +411,22 @@ $DNF_SDK_HOST $DNF_NO_SCRIPTS \ let run_config = RunConfig { container_image: container_image.to_string(), - target: target.clone(), + target: target.to_string(), command: sdk_target_command, verbose: self.verbose, source_environment: true, interactive: false, - repo_url: repo_url.clone(), - repo_release: repo_release.clone(), - container_args: merged_container_args.clone(), + repo_url: repo_url.map(|s| s.to_string()), + repo_release: repo_release.map(|s| s.to_string()), + container_args: merged_container_args.cloned(), dnf_args: self.dnf_args.clone(), disable_weak_dependencies: config.get_sdk_disable_weak_dependencies(), - runs_on: self.runs_on.clone(), - nfs_port: self.nfs_port, + // runs_on handled by shared context ..Default::default() }; - let sdk_target_success = container_helper.run_in_container(run_config).await?; + let sdk_target_success = + run_container_command(container_helper, run_config, runs_on_context).await?; // Track all SDK packages installed for lock file update at the end let mut all_sdk_package_names: Vec = Vec::new(); @@ -398,22 +457,21 @@ $DNF_SDK_HOST \ let run_config = RunConfig { container_image: container_image.to_string(), - target: target.clone(), + target: target.to_string(), command: check_update_command.to_string(), verbose: self.verbose, source_environment: true, interactive: false, - repo_url: repo_url.clone(), - repo_release: repo_release.clone(), - container_args: merged_container_args.clone(), + repo_url: repo_url.map(|s| s.to_string()), + repo_release: repo_release.map(|s| s.to_string()), + container_args: merged_container_args.cloned(), dnf_args: self.dnf_args.clone(), disable_weak_dependencies: config.get_sdk_disable_weak_dependencies(), - runs_on: self.runs_on.clone(), - nfs_port: self.nfs_port, + // runs_on handled by shared context ..Default::default() }; - container_helper.run_in_container(run_config).await?; + run_container_command(container_helper, run_config, runs_on_context).await?; // Install avocado-sdk-bootstrap with version from distro.version print_info("Installing SDK bootstrap.", OutputLevel::Normal); @@ -425,7 +483,7 @@ $DNF_SDK_HOST \ .unwrap_or("*"); let bootstrap_pkg = build_package_spec_with_lock( &lock_file, - &target, + target, &SysrootType::Sdk, bootstrap_pkg_name, bootstrap_config_version, @@ -447,22 +505,22 @@ $DNF_SDK_HOST $DNF_NO_SCRIPTS \ let run_config = RunConfig { container_image: container_image.to_string(), - target: target.clone(), + target: target.to_string(), command: bootstrap_command, verbose: self.verbose, source_environment: true, interactive: false, - repo_url: repo_url.clone(), - repo_release: repo_release.clone(), - container_args: merged_container_args.clone(), + repo_url: repo_url.map(|s| s.to_string()), + repo_release: repo_release.map(|s| s.to_string()), + container_args: merged_container_args.cloned(), dnf_args: self.dnf_args.clone(), disable_weak_dependencies: config.get_sdk_disable_weak_dependencies(), - runs_on: self.runs_on.clone(), - nfs_port: self.nfs_port, + // runs_on handled by shared context ..Default::default() }; - let bootstrap_success = container_helper.run_in_container(run_config).await?; + let bootstrap_success = + run_container_command(container_helper, run_config, runs_on_context).await?; if bootstrap_success { print_success("Installed SDK bootstrap.", OutputLevel::Normal); @@ -499,22 +557,21 @@ fi let run_config = RunConfig { container_image: container_image.to_string(), - target: target.clone(), + target: target.to_string(), command: env_setup_command.to_string(), verbose: self.verbose, source_environment: true, interactive: false, - repo_url: repo_url.clone(), - repo_release: repo_release.clone(), - container_args: merged_container_args.clone(), + repo_url: repo_url.map(|s| s.to_string()), + repo_release: repo_release.map(|s| s.to_string()), + container_args: merged_container_args.cloned(), dnf_args: self.dnf_args.clone(), disable_weak_dependencies: config.get_sdk_disable_weak_dependencies(), - runs_on: self.runs_on.clone(), - nfs_port: self.nfs_port, + // runs_on handled by shared context ..Default::default() }; - container_helper.run_in_container(run_config).await?; + run_container_command(container_helper, run_config, runs_on_context).await?; // Install SDK dependencies (into SDK) let mut sdk_packages = Vec::new(); @@ -525,14 +582,14 @@ fi sdk_packages.extend(self.build_package_list_with_lock( dependencies, &lock_file, - &target, + target, &SysrootType::Sdk, )); sdk_package_names.extend(self.extract_package_names(dependencies)); } // Add extension SDK dependencies to the package list - for (ext_name, ext_deps) in &extension_sdk_dependencies { + for (ext_name, ext_deps) in extension_sdk_dependencies { if self.verbose { print_info( &format!("Adding SDK dependencies from extension '{ext_name}'"), @@ -540,7 +597,7 @@ fi ); } let ext_packages = - self.build_package_list_with_lock(ext_deps, &lock_file, &target, &SysrootType::Sdk); + self.build_package_list_with_lock(ext_deps, &lock_file, target, &SysrootType::Sdk); sdk_packages.extend(ext_packages); sdk_package_names.extend(self.extract_package_names(ext_deps)); } @@ -574,21 +631,21 @@ $DNF_SDK_HOST \ // Use the container helper's run_in_container method let run_config = RunConfig { container_image: container_image.to_string(), - target: target.clone(), + target: target.to_string(), command, verbose: self.verbose, source_environment: true, interactive: !self.force, - repo_url: repo_url.clone(), - repo_release: repo_release.clone(), - container_args: merged_container_args.clone(), + repo_url: repo_url.map(|s| s.to_string()), + repo_release: repo_release.map(|s| s.to_string()), + container_args: merged_container_args.cloned(), dnf_args: self.dnf_args.clone(), disable_weak_dependencies: config.get_sdk_disable_weak_dependencies(), - runs_on: self.runs_on.clone(), - nfs_port: self.nfs_port, + // runs_on handled by shared context ..Default::default() }; - let install_success = container_helper.run_in_container(run_config).await?; + let install_success = + run_container_command(container_helper, run_config, runs_on_context).await?; if install_success { print_success("Installed SDK dependencies.", OutputLevel::Normal); @@ -609,15 +666,15 @@ $DNF_SDK_HOST \ &SysrootType::Sdk, &all_sdk_package_names, container_image, - &target, - repo_url.clone(), - repo_release.clone(), - merged_container_args.clone(), + target, + repo_url.map(|s| s.to_string()), + repo_release.map(|s| s.to_string()), + merged_container_args.cloned(), ) .await?; if !installed_versions.is_empty() { - lock_file.update_sysroot_versions(&target, &SysrootType::Sdk, installed_versions); + lock_file.update_sysroot_versions(target, &SysrootType::Sdk, installed_versions); if self.verbose { print_info( &format!( @@ -642,7 +699,7 @@ $DNF_SDK_HOST \ .unwrap_or("*"); let rootfs_pkg = build_package_spec_with_lock( &lock_file, - &target, + target, &SysrootType::Rootfs, rootfs_base_pkg, rootfs_config_version, @@ -666,22 +723,22 @@ $DNF_SDK_HOST $DNF_NO_SCRIPTS $DNF_SDK_TARGET_REPO_CONF \ let run_config = RunConfig { container_image: container_image.to_string(), - target: target.clone(), + target: target.to_string(), command: rootfs_command, verbose: self.verbose, source_environment: false, interactive: !self.force, - repo_url: repo_url.clone(), - repo_release: repo_release.clone(), - container_args: merged_container_args.clone(), + repo_url: repo_url.map(|s| s.to_string()), + repo_release: repo_release.map(|s| s.to_string()), + container_args: merged_container_args.cloned(), dnf_args: self.dnf_args.clone(), disable_weak_dependencies: config.get_sdk_disable_weak_dependencies(), - runs_on: self.runs_on.clone(), - nfs_port: self.nfs_port, + // runs_on handled by shared context ..Default::default() }; - let rootfs_success = container_helper.run_in_container(run_config).await?; + let rootfs_success = + run_container_command(container_helper, run_config, runs_on_context).await?; if rootfs_success { print_success("Installed rootfs sysroot.", OutputLevel::Normal); @@ -692,16 +749,16 @@ $DNF_SDK_HOST $DNF_NO_SCRIPTS $DNF_SDK_TARGET_REPO_CONF \ &SysrootType::Rootfs, &[rootfs_base_pkg.to_string()], container_image, - &target, - repo_url.clone(), - repo_release.clone(), - merged_container_args.clone(), + target, + repo_url.map(|s| s.to_string()), + repo_release.map(|s| s.to_string()), + merged_container_args.cloned(), ) .await?; if !installed_versions.is_empty() { lock_file.update_sysroot_versions( - &target, + target, &SysrootType::Rootfs, installed_versions, ); @@ -729,7 +786,7 @@ $DNF_SDK_HOST $DNF_NO_SCRIPTS $DNF_SDK_TARGET_REPO_CONF \ let packages = self.build_package_list_with_lock( dependencies, &lock_file, - &target, + target, &SysrootType::TargetSysroot, ); all_compile_packages.extend(packages); @@ -765,7 +822,7 @@ $DNF_SDK_HOST $DNF_NO_SCRIPTS $DNF_SDK_TARGET_REPO_CONF \ .unwrap_or("*"); let target_sysroot_pkg = build_package_spec_with_lock( &lock_file, - &target, + target, &SysrootType::TargetSysroot, target_sysroot_base_pkg, target_sysroot_config_version, @@ -789,22 +846,22 @@ $DNF_SDK_HOST $DNF_NO_SCRIPTS $DNF_SDK_TARGET_REPO_CONF \ let run_config = RunConfig { container_image: container_image.to_string(), - target: target.clone(), + target: target.to_string(), command, verbose: self.verbose, source_environment: false, // Don't source environment - matches rootfs install behavior interactive: !self.force, - repo_url: repo_url.clone(), - repo_release: repo_release.clone(), - container_args: merged_container_args.clone(), + repo_url: repo_url.map(|s| s.to_string()), + repo_release: repo_release.map(|s| s.to_string()), + container_args: merged_container_args.cloned(), dnf_args: self.dnf_args.clone(), disable_weak_dependencies: config.get_sdk_disable_weak_dependencies(), - runs_on: self.runs_on.clone(), - nfs_port: self.nfs_port, + // runs_on handled by shared context ..Default::default() }; - let install_success = container_helper.run_in_container(run_config).await?; + let install_success = + run_container_command(container_helper, run_config, runs_on_context).await?; if install_success { print_success( @@ -821,16 +878,16 @@ $DNF_SDK_HOST $DNF_NO_SCRIPTS $DNF_SDK_TARGET_REPO_CONF \ &SysrootType::TargetSysroot, &packages_to_query, container_image, - &target, - repo_url.clone(), - repo_release.clone(), - merged_container_args.clone(), + target, + repo_url.map(|s| s.to_string()), + repo_release.map(|s| s.to_string()), + merged_container_args.cloned(), ) .await?; if !installed_versions.is_empty() { lock_file.update_sysroot_versions( - &target, + target, &SysrootType::TargetSysroot, installed_versions, ); @@ -873,22 +930,21 @@ $DNF_SDK_HOST $DNF_NO_SCRIPTS $DNF_SDK_TARGET_REPO_CONF \ let run_config = RunConfig { container_image: container_image.to_string(), - target: target.clone(), + target: target.to_string(), command: stamp_script, verbose: self.verbose, source_environment: true, interactive: false, - repo_url: repo_url.clone(), - repo_release: repo_release.clone(), - container_args: merged_container_args.clone(), + repo_url: repo_url.map(|s| s.to_string()), + repo_release: repo_release.map(|s| s.to_string()), + container_args: merged_container_args.cloned(), dnf_args: self.dnf_args.clone(), disable_weak_dependencies: config.get_sdk_disable_weak_dependencies(), - runs_on: self.runs_on.clone(), - nfs_port: self.nfs_port, + // runs_on handled by shared context ..Default::default() }; - container_helper.run_in_container(run_config).await?; + run_container_command(container_helper, run_config, runs_on_context).await?; if self.verbose { print_info("Wrote SDK install stamp.", OutputLevel::Normal); @@ -937,6 +993,23 @@ $DNF_SDK_HOST $DNF_NO_SCRIPTS $DNF_SDK_TARGET_REPO_CONF \ } } +/// Helper function to run a container command, using shared context if available +async fn run_container_command( + container_helper: &SdkContainer, + config: RunConfig, + runs_on_context: Option<&RunsOnContext>, +) -> Result { + if let Some(context) = runs_on_context { + // Use the shared context - don't set runs_on in config as we're handling it + container_helper + .run_in_container_with_context(&config, context) + .await + } else { + // No shared context - use regular execution (may create its own context if runs_on is set) + container_helper.run_in_container(config).await + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/src/utils/container.rs b/src/utils/container.rs index 04c51ad..71e9ace 100644 --- a/src/utils/container.rs +++ b/src/utils/container.rs @@ -190,6 +190,55 @@ impl SdkContainer { Ok(Self::new().with_src_dir(src_dir)) } + /// Create a shared RunsOnContext for running multiple commands on a remote host + /// + /// This sets up the NFS server and remote volumes once, which can then be reused + /// for multiple container invocations via `run_in_container_with_context()`. + /// + /// The caller is responsible for calling `context.teardown()` when done. + /// + /// # Arguments + /// * `runs_on` - Remote host specification (user@host) + /// * `nfs_port` - Optional specific NFS port (None = auto-select) + /// * `container_image` - SDK container image to use + /// * `verbose` - Enable verbose output + /// + /// # Returns + /// A `RunsOnContext` that can be reused for multiple container commands + pub async fn create_runs_on_context( + &self, + runs_on: &str, + nfs_port: Option, + container_image: &str, + verbose: bool, + ) -> Result { + use crate::utils::runs_on::RunsOnContext; + + // Get or create local docker volume (we need this to export via NFS) + let volume_manager = VolumeManager::new(self.container_tool.clone(), self.verbose); + let volume_state = volume_manager.get_or_create_volume(&self.cwd).await?; + + let src_dir = self.src_dir.as_ref().unwrap_or(&self.cwd); + + print_info( + &format!("Setting up remote execution on {}...", runs_on), + OutputLevel::Normal, + ); + + // Setup remote execution context + RunsOnContext::setup( + runs_on, + nfs_port, + src_dir, + &volume_state.volume_name, + &self.container_tool, + container_image, + verbose || self.verbose, + ) + .await + .context("Failed to setup remote execution context") + } + /// Run a command in the container pub async fn run_in_container(&self, config: RunConfig) -> Result { // Check if we should run on a remote host @@ -271,6 +320,9 @@ impl SdkContainer { } /// Run a command in a container on a remote host via NFS + /// + /// This creates a new RunsOnContext for each call. For running multiple + /// commands with a shared context, use `run_in_container_with_context()`. async fn run_in_container_remote(&self, config: &RunConfig, runs_on: &str) -> Result { use crate::utils::runs_on::RunsOnContext; @@ -304,6 +356,53 @@ impl SdkContainer { let _ = context.setup_signing_tunnel(socket_path).await; } + // Run the command using the shared implementation + let result = self + .execute_remote_command(&context, config) + .await + .context("Remote container execution failed"); + + // Always cleanup, even on error + if let Err(e) = context.teardown().await { + print_error( + &format!("Warning: Failed to cleanup remote resources: {}", e), + OutputLevel::Normal, + ); + } + + result + } + + /// Run a command in a container on a remote host using a shared RunsOnContext + /// + /// This method allows reusing the same NFS server and remote volumes across + /// multiple container invocations, which is significantly faster than creating + /// a new context for each call. + /// + /// The caller is responsible for calling `context.teardown()` when done. + pub async fn run_in_container_with_context( + &self, + config: &RunConfig, + context: &crate::utils::runs_on::RunsOnContext, + ) -> Result { + if !context.is_active() { + anyhow::bail!("RunsOnContext is not active (already torn down)"); + } + + self.execute_remote_command(context, config) + .await + .context("Remote container execution failed") + } + + /// Execute a command on a remote host using an existing RunsOnContext + /// + /// This is the shared implementation used by both `run_in_container_remote` + /// and `run_in_container_with_context`. + async fn execute_remote_command( + &self, + context: &crate::utils::runs_on::RunsOnContext, + config: &RunConfig, + ) -> Result { // Build environment variables let mut env_vars = config.env_vars.clone().unwrap_or_default(); @@ -391,12 +490,15 @@ impl SdkContainer { let extra_args_refs: Vec<&str> = extra_args.iter().map(|s| s.as_str()).collect(); print_info( - &format!("Running command on remote host {}...", runs_on), + &format!( + "Running command on remote host {}...", + context.remote_host().ssh_target() + ), OutputLevel::Normal, ); // Run the container on the remote - let result = context + context .run_container_command( &config.container_image, &full_command, @@ -406,17 +508,7 @@ impl SdkContainer { .map(|s| s.to_string()) .collect::>(), ) - .await; - - // Always cleanup, even on error - if let Err(e) = context.teardown().await { - print_error( - &format!("Warning: Failed to cleanup remote resources: {}", e), - OutputLevel::Normal, - ); - } - - result.context("Remote container execution failed") + .await } /// Build the complete container command diff --git a/src/utils/nfs_server.rs b/src/utils/nfs_server.rs index 1871a78..324ba98 100644 --- a/src/utils/nfs_server.rs +++ b/src/utils/nfs_server.rs @@ -746,20 +746,33 @@ mod tests { #[test] fn test_find_available_port_in_range() { // This test may be flaky depending on what ports are in use - // but it should generally find at least one available port - let port = find_available_port(50000..=50010); + // Use a wider range to increase chances of finding an available port + let port = find_available_port(50000..=50100); + // Skip test if no ports are available (environment issue, not a code bug) + if port.is_none() { + eprintln!("Skipping test: no available ports in range 50000-50100"); + return; + } assert!(port.is_some()); } #[test] fn test_nfs_server_builder() { - let config = NfsServerBuilder::with_port(50099) + // Find an available port dynamically instead of hardcoding + let available_port = find_available_port(50050..=50150); + // Skip test if no ports are available + let Some(port) = available_port else { + eprintln!("Skipping test: no available ports for NFS server builder test"); + return; + }; + + let config = NfsServerBuilder::with_port(port) .expect("Port should be available") .verbose(true) .add_export("/tmp/test", "/test") .build(); - assert_eq!(config.port, 50099); + assert_eq!(config.port, port); assert!(config.verbose); assert_eq!(config.exports.len(), 1); assert_eq!(config.exports[0].pseudo_path, "/test"); diff --git a/src/utils/runs_on.rs b/src/utils/runs_on.rs index 7de2d58..9ad56b3 100644 --- a/src/utils/runs_on.rs +++ b/src/utils/runs_on.rs @@ -460,13 +460,34 @@ impl RunsOnContext { self.ssh.run_command_interactive(&docker_cmd).await } + /// Check if the context is still active (not yet torn down) + pub fn is_active(&self) -> bool { + self.nfs_server.is_some() + } + + /// Get the remote host + pub fn remote_host(&self) -> &RemoteHost { + &self.remote_host + } + /// Clean up all resources /// /// This will: /// - Remove NFS-backed volumes from remote /// - Close SSH tunnel (if any) /// - Stop NFS server - pub async fn teardown(mut self) -> Result<()> { + /// + /// After calling this method, the context should not be used for running commands. + /// This method can be called multiple times safely (subsequent calls are no-ops). + pub async fn teardown(&mut self) -> Result<()> { + // If already torn down, return early + if !self.is_active() + && self.remote_src_volume.is_none() + && self.remote_state_volume.is_none() + { + return Ok(()); + } + println!(); print_info("🧹 Cleaning up remote resources...", OutputLevel::Normal); @@ -478,7 +499,7 @@ impl RunsOnContext { // Remove remote helper script #[cfg(unix)] - if let Some(ref helper_path) = self.remote_helper_script { + if let Some(helper_path) = self.remote_helper_script.take() { let _ = self .ssh .run_command(&format!("rm -f {}", helper_path)) @@ -493,26 +514,26 @@ impl RunsOnContext { let mut cleanup_errors = Vec::new(); - if let Some(ref volume) = self.remote_src_volume { + if let Some(volume) = self.remote_src_volume.take() { if self.verbose { print_info( &format!("Removing remote volume: {}", volume), OutputLevel::Normal, ); } - if let Err(e) = remote_vm.remove_volume(volume).await { + if let Err(e) = remote_vm.remove_volume(&volume).await { cleanup_errors.push(format!("Failed to remove {}: {}", volume, e)); } } - if let Some(ref volume) = self.remote_state_volume { + if let Some(volume) = self.remote_state_volume.take() { if self.verbose { print_info( &format!("Removing remote volume: {}", volume), OutputLevel::Normal, ); } - if let Err(e) = remote_vm.remove_volume(volume).await { + if let Err(e) = remote_vm.remove_volume(&volume).await { cleanup_errors.push(format!("Failed to remove {}: {}", volume, e)); } } From c916f5026cae91b242c8d03cd973f3a21cf4903c Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Mon, 29 Dec 2025 13:22:06 -0500 Subject: [PATCH 14/20] cross sdk arch runs-on checking --- src/commands/fetch.rs | 4 +-- src/commands/sdk/install.rs | 8 ++---- src/utils/container.rs | 8 +++--- src/utils/lockfile.rs | 4 +-- src/utils/nfs_server.rs | 50 ++++++++++++++++++++++++++++++++----- 5 files changed, 54 insertions(+), 20 deletions(-) diff --git a/src/commands/fetch.rs b/src/commands/fetch.rs index 67109d5..3255534 100644 --- a/src/commands/fetch.rs +++ b/src/commands/fetch.rs @@ -559,7 +559,7 @@ $DNF_SDK_HOST \ print_info("Fetching SDK target sysroot metadata", OutputLevel::Normal); // Check if SDK target sysroot exists - let check_command = "[ -d $AVOCADO_SDK_PREFIX/target-sysroot ]"; + let check_command = "[ -d $AVOCADO_PREFIX/sdk/target-sysroot ]"; let run_config = RunConfig { container_image: container_config.image.to_string(), target: container_config.target_arch.to_string(), @@ -595,7 +595,7 @@ RPM_ETCCONFIGDIR=$DNF_SDK_TARGET_PREFIX \ $DNF_SDK_HOST \ $DNF_SDK_TARGET_REPO_CONF \ --setopt=sslcacert=${{SSL_CERT_FILE}} \ - --installroot=$AVOCADO_SDK_PREFIX/target-sysroot \ + --installroot=$AVOCADO_PREFIX/sdk/target-sysroot \ {dnf_args_str} \ makecache "# diff --git a/src/commands/sdk/install.rs b/src/commands/sdk/install.rs index 8b8a896..f123847 100644 --- a/src/commands/sdk/install.rs +++ b/src/commands/sdk/install.rs @@ -757,11 +757,7 @@ $DNF_SDK_HOST $DNF_NO_SCRIPTS $DNF_SDK_TARGET_REPO_CONF \ .await?; if !installed_versions.is_empty() { - lock_file.update_sysroot_versions( - target, - &SysrootType::Rootfs, - installed_versions, - ); + lock_file.update_sysroot_versions(target, &SysrootType::Rootfs, installed_versions); if self.verbose { print_info( "Updated lock file with rootfs package version.", @@ -835,7 +831,7 @@ unset RPM_CONFIGDIR RPM_ETCCONFIGDIR="$DNF_SDK_TARGET_PREFIX" \ $DNF_SDK_HOST $DNF_NO_SCRIPTS $DNF_SDK_TARGET_REPO_CONF \ --disablerepo=${{AVOCADO_TARGET}}-target-ext \ - {} {} --installroot ${{AVOCADO_SDK_PREFIX}}/target-sysroot \ + {} {} --installroot ${{AVOCADO_PREFIX}}/sdk/target-sysroot \ install {} {} "#, dnf_args_str, diff --git a/src/utils/container.rs b/src/utils/container.rs index 71e9ace..64de531 100644 --- a/src/utils/container.rs +++ b/src/utils/container.rs @@ -1129,7 +1129,7 @@ export AVOCADO_SDK_ARCH="$(uname -m)" export AVOCADO_SDK_PREFIX="${{AVOCADO_PREFIX}}/sdk/${{AVOCADO_SDK_ARCH}}" export AVOCADO_EXT_SYSROOTS="${{AVOCADO_PREFIX}}/extensions" export DNF_SDK_HOST_PREFIX="${{AVOCADO_SDK_PREFIX}}" -export DNF_SDK_TARGET_PREFIX="${{AVOCADO_SDK_PREFIX}}/target-repoconf" +export DNF_SDK_TARGET_PREFIX="${{AVOCADO_PREFIX}}/sdk/target-repoconf" export DNF_SDK_HOST="\ dnf \ --releasever="$REPO_RELEASE" \ @@ -1164,7 +1164,7 @@ export DNF_SDK_TARGET_REPO_CONF="\ mkdir -p /etc/dnf/vars mkdir -p ${{AVOCADO_SDK_PREFIX}}/etc/dnf/vars -mkdir -p ${{AVOCADO_SDK_PREFIX}}/target-repoconf/etc/dnf/vars +mkdir -p ${{DNF_SDK_TARGET_PREFIX}}/etc/dnf/vars echo "${{REPO_URL}}" > /etc/dnf/vars/repo_url echo "${{REPO_URL}}" > ${{DNF_SDK_HOST_PREFIX}}/etc/dnf/vars/repo_url @@ -1300,7 +1300,7 @@ export AVOCADO_SDK_ARCH="$(uname -m)" export AVOCADO_SDK_PREFIX="${{AVOCADO_PREFIX}}/sdk/${{AVOCADO_SDK_ARCH}}" export AVOCADO_EXT_SYSROOTS="${{AVOCADO_PREFIX}}/extensions" export DNF_SDK_HOST_PREFIX="${{AVOCADO_SDK_PREFIX}}" -export DNF_SDK_TARGET_PREFIX="${{AVOCADO_SDK_PREFIX}}/target-repoconf" +export DNF_SDK_TARGET_PREFIX="${{AVOCADO_PREFIX}}/sdk/target-repoconf" export DNF_SDK_HOST="\ dnf \ --releasever="$REPO_RELEASE" \ @@ -1335,7 +1335,7 @@ export DNF_SDK_TARGET_REPO_CONF="\ mkdir -p /etc/dnf/vars mkdir -p ${{AVOCADO_SDK_PREFIX}}/etc/dnf/vars -mkdir -p ${{AVOCADO_SDK_PREFIX}}/target-repoconf/etc/dnf/vars +mkdir -p ${{DNF_SDK_TARGET_PREFIX}}/etc/dnf/vars echo "${{REPO_URL}}" > /etc/dnf/vars/repo_url echo "${{REPO_URL}}" > ${{DNF_SDK_HOST_PREFIX}}/etc/dnf/vars/repo_url diff --git a/src/utils/lockfile.rs b/src/utils/lockfile.rs index 03361bb..adeccdb 100644 --- a/src/utils/lockfile.rs +++ b/src/utils/lockfile.rs @@ -96,7 +96,7 @@ impl SysrootType { // Target-sysroot: same approach as rootfs - unset config and use --root rpm_etcconfigdir: None, rpm_configdir: None, - root_path: Some("$AVOCADO_SDK_PREFIX/target-sysroot".to_string()), + root_path: Some("$AVOCADO_PREFIX/sdk/target-sysroot".to_string()), }, SysrootType::Extension(name) => RpmQueryConfig { // Local/external extensions use ext-rpm-config-scripts @@ -993,7 +993,7 @@ avocado-sdk-toolchain 0.1.0-r0.x86_64_avocadosdk assert!(target_config.rpm_configdir.is_none()); assert_eq!( target_config.root_path, - Some("$AVOCADO_SDK_PREFIX/target-sysroot".to_string()) + Some("$AVOCADO_PREFIX/sdk/target-sysroot".to_string()) ); } diff --git a/src/utils/nfs_server.rs b/src/utils/nfs_server.rs index 324ba98..a016d14 100644 --- a/src/utils/nfs_server.rs +++ b/src/utils/nfs_server.rs @@ -135,13 +135,18 @@ NFS_Core_Param {{ Nb_Max_Fd = 65536; Max_Open_Files = 10000; DRC_Max_Size = 32768; - Attr_Expiration_Time = 60; - Nb_Worker = 256; + # Reduce attribute cache time for fresher file metadata during builds + Attr_Expiration_Time = 10; + # Single-client use case doesn't need many workers + Nb_Worker = 32; Bind_addr = {}; }} NFSV4 {{ - Graceless = false; + # Skip grace period - we're a fresh server with no prior client state to recover + Graceless = true; + # Shorter lease for faster client issue detection + Lease_Lifetime = 30; Allow_Numeric_Owners = true; Only_Numeric_Owners = true; }} @@ -439,9 +444,42 @@ impl NfsServer { ); } - // Give Ganesha time to fully initialize and load exports - // 2 seconds is more reliable, especially on slower systems or with many exports - tokio::time::sleep(tokio::time::Duration::from_millis(2000)).await; + // Wait for Ganesha to be ready by polling the port + // This is faster than a static sleep on systems where ganesha starts quickly + let max_wait = tokio::time::Duration::from_millis(2000); + let poll_interval = tokio::time::Duration::from_millis(100); + let start = std::time::Instant::now(); + + loop { + // Try to connect to the NFS port to check if ganesha is listening + match tokio::net::TcpStream::connect(format!("127.0.0.1:{}", config.port)).await { + Ok(_) => { + if config.verbose { + print_info( + &format!( + "NFS server ready after {}ms", + start.elapsed().as_millis() + ), + OutputLevel::Verbose, + ); + } + break; + } + Err(_) => { + if start.elapsed() >= max_wait { + // Timeout - continue anyway and let the container check handle failures + if config.verbose { + print_info( + "NFS server port check timed out, continuing...", + OutputLevel::Verbose, + ); + } + break; + } + tokio::time::sleep(poll_interval).await; + } + } + } // Verify container is running let check_output = AsyncCommand::new(container_tool) From 6543f7dc887d7bb916587af410ecb41ed6e77a1b Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Mon, 29 Dec 2025 14:05:03 -0500 Subject: [PATCH 15/20] add avocado prune to clean up abandoned state --- src/commands/mod.rs | 1 + src/commands/prune.rs | 460 ++++++++++++++++++++++++++++ src/commands/signing_keys/create.rs | 6 +- src/main.rs | 22 ++ src/utils/image_signing.rs | 6 +- src/utils/lockfile.rs | 2 +- src/utils/nfs_server.rs | 5 +- src/utils/pkcs11_devices.rs | 6 +- src/utils/signing_keys.rs | 6 +- tests/runs_on_integration.rs | 16 +- 10 files changed, 513 insertions(+), 17 deletions(-) create mode 100644 src/commands/prune.rs diff --git a/src/commands/mod.rs b/src/commands/mod.rs index c9649bb..4e9c432 100644 --- a/src/commands/mod.rs +++ b/src/commands/mod.rs @@ -6,6 +6,7 @@ pub mod hitl; pub mod init; pub mod install; pub mod provision; +pub mod prune; pub mod runtime; pub mod sdk; pub mod sign; diff --git a/src/commands/prune.rs b/src/commands/prune.rs new file mode 100644 index 0000000..e9103b1 --- /dev/null +++ b/src/commands/prune.rs @@ -0,0 +1,460 @@ +//! Prune command to clean up abandoned Docker volumes. +//! +//! This module identifies and removes Docker volumes that are no longer associated +//! with active Avocado configurations or containers. + +use anyhow::{Context, Result}; +use serde::Deserialize; +use std::collections::HashMap; +use std::path::Path; +use tokio::process::Command as AsyncCommand; + +use crate::utils::output::{print_error, print_info, print_success, print_warning, OutputLevel}; +use crate::utils::volume::VolumeState; + +/// Information about a Docker volume from `docker volume inspect` +#[derive(Debug, Clone, Deserialize)] +struct VolumeInspectInfo { + #[serde(rename = "Name")] + #[allow(dead_code)] + name: String, + #[serde(rename = "Labels")] + labels: Option>, +} + +/// Classification of a volume's status +#[derive(Debug, Clone, PartialEq)] +enum VolumeStatus { + /// Volume is actively linked to an existing config + Active, + /// Volume is abandoned and can be removed + Abandoned(String), // Reason for abandonment +} + +/// Command to prune abandoned Docker volumes. +/// +/// This command identifies and removes volumes that are no longer needed: +/// - `avo-` volumes: state volumes for avocado configs +/// - `avocado-src-*` and `avocado-state-*` volumes: container volumes +pub struct PruneCommand { + /// Container tool to use (docker/podman) + container_tool: String, + /// Enable verbose output + verbose: bool, + /// Perform dry run without actually removing volumes + dry_run: bool, +} + +impl PruneCommand { + /// Creates a new PruneCommand instance. + /// + /// # Arguments + /// * `container_tool` - Container tool to use (docker/podman) + /// * `verbose` - Enable verbose output + /// * `dry_run` - If true, only show what would be removed + pub fn new(container_tool: Option, verbose: bool, dry_run: bool) -> Self { + Self { + container_tool: container_tool.unwrap_or_else(|| "docker".to_string()), + verbose, + dry_run, + } + } + + /// Executes the prune command. + /// + /// # Returns + /// * `Ok(())` if the pruning was successful + /// * `Err` if there was an error during pruning + pub async fn execute(&self) -> Result<()> { + print_info( + "Scanning for abandoned Docker volumes...", + OutputLevel::Normal, + ); + + // Get all avocado-related volumes + let volumes = self.list_avocado_volumes().await?; + + if volumes.is_empty() { + print_info("No Avocado-related volumes found.", OutputLevel::Normal); + return Ok(()); + } + + if self.verbose { + print_info( + &format!("Found {} Avocado-related volume(s)", volumes.len()), + OutputLevel::Normal, + ); + } + + let mut abandoned_count = 0; + let mut active_count = 0; + let mut removed_count = 0; + let mut failed_count = 0; + + for volume_name in &volumes { + let status = self.classify_volume(volume_name).await?; + + match status { + VolumeStatus::Active => { + active_count += 1; + if self.verbose { + print_info( + &format!("Volume '{}' is active, skipping", volume_name), + OutputLevel::Normal, + ); + } + } + VolumeStatus::Abandoned(reason) => { + abandoned_count += 1; + + if self.dry_run { + print_warning( + &format!("[DRY RUN] Would remove '{}': {}", volume_name, reason), + OutputLevel::Normal, + ); + } else { + print_info( + &format!("Removing '{}': {}", volume_name, reason), + OutputLevel::Normal, + ); + + match self.remove_volume_with_containers(volume_name).await { + Ok(()) => { + removed_count += 1; + print_success( + &format!("Removed volume '{}'", volume_name), + OutputLevel::Normal, + ); + } + Err(e) => { + failed_count += 1; + print_error( + &format!("Failed to remove '{}': {}", volume_name, e), + OutputLevel::Normal, + ); + } + } + } + } + } + } + + // Print summary + println!(); + if self.dry_run { + print_info( + &format!( + "Dry run complete: {} active, {} would be removed", + active_count, abandoned_count + ), + OutputLevel::Normal, + ); + } else { + let mut summary = format!( + "Prune complete: {} active, {} removed", + active_count, removed_count + ); + if failed_count > 0 { + summary.push_str(&format!(", {} failed", failed_count)); + } + print_success(&summary, OutputLevel::Normal); + } + + Ok(()) + } + + /// List all Avocado-related volumes + async fn list_avocado_volumes(&self) -> Result> { + let output = AsyncCommand::new(&self.container_tool) + .args(["volume", "ls", "--format", "{{.Name}}"]) + .output() + .await + .context("Failed to list Docker volumes")?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + anyhow::bail!("Failed to list volumes: {}", stderr); + } + + let stdout = String::from_utf8_lossy(&output.stdout); + let volumes: Vec = stdout + .lines() + .filter(|line| { + line.starts_with("avo-") + || line.starts_with("avocado-src-") + || line.starts_with("avocado-state-") + }) + .map(|s| s.to_string()) + .collect(); + + Ok(volumes) + } + + /// Classify a volume as active or abandoned + async fn classify_volume(&self, volume_name: &str) -> Result { + if volume_name.starts_with("avo-") { + self.classify_avo_volume(volume_name).await + } else if volume_name.starts_with("avocado-src-") + || volume_name.starts_with("avocado-state-") + { + self.classify_container_volume(volume_name).await + } else { + Ok(VolumeStatus::Active) // Unknown volume type, don't touch + } + } + + /// Classify an avo- volume + /// + /// These are state volumes for avocado configs. Check: + /// 1. If source_path directory exists + /// 2. If .avocado-state file exists in that directory + /// 3. If the state file links back to this volume's UUID + async fn classify_avo_volume(&self, volume_name: &str) -> Result { + // Get volume info including labels + let volume_info = self.inspect_volume(volume_name).await?; + + // Get the source_path label + let source_path = match &volume_info.labels { + Some(labels) => labels.get("avocado.source_path"), + None => None, + }; + + let source_path = match source_path { + Some(path) => path, + None => { + return Ok(VolumeStatus::Abandoned( + "no source_path label found".to_string(), + )); + } + }; + + // Check if the source directory exists + let source_dir = Path::new(source_path); + if !source_dir.exists() { + return Ok(VolumeStatus::Abandoned(format!( + "source directory '{}' does not exist", + source_path + ))); + } + + // Check for .avocado-state file + let state_file = source_dir.join(".avocado-state"); + if !state_file.exists() { + return Ok(VolumeStatus::Abandoned(format!( + "no .avocado-state file in '{}'", + source_path + ))); + } + + // Load and verify the state file links to this volume + match VolumeState::load_from_dir(source_dir) { + Ok(Some(state)) => { + if state.volume_name == volume_name { + Ok(VolumeStatus::Active) + } else { + Ok(VolumeStatus::Abandoned(format!( + ".avocado-state links to '{}', not this volume", + state.volume_name + ))) + } + } + Ok(None) => Ok(VolumeStatus::Abandoned(format!( + "could not read .avocado-state in '{}'", + source_path + ))), + Err(e) => Ok(VolumeStatus::Abandoned(format!( + "error reading .avocado-state: {}", + e + ))), + } + } + + /// Classify an avocado-src-* or avocado-state-* volume + /// + /// These volumes are abandoned if: + /// - Not associated with any containers, OR + /// - Only associated with stopped containers + async fn classify_container_volume(&self, volume_name: &str) -> Result { + // Get containers using this volume + let containers = self.get_containers_using_volume(volume_name).await?; + + if containers.is_empty() { + return Ok(VolumeStatus::Abandoned( + "not associated with any containers".to_string(), + )); + } + + // Check if any container is running + for container_id in &containers { + if self.is_container_running(container_id).await? { + return Ok(VolumeStatus::Active); + } + } + + // All associated containers are stopped + Ok(VolumeStatus::Abandoned(format!( + "only associated with {} stopped container(s)", + containers.len() + ))) + } + + /// Inspect a volume and return its info + async fn inspect_volume(&self, volume_name: &str) -> Result { + let output = AsyncCommand::new(&self.container_tool) + .args(["volume", "inspect", volume_name]) + .output() + .await + .context("Failed to inspect volume")?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + anyhow::bail!("Failed to inspect volume {}: {}", volume_name, stderr); + } + + let stdout = String::from_utf8_lossy(&output.stdout); + let infos: Vec = + serde_json::from_str(&stdout).context("Failed to parse volume inspect output")?; + + infos + .into_iter() + .next() + .ok_or_else(|| anyhow::anyhow!("No volume info returned")) + } + + /// Get list of container IDs using a specific volume + async fn get_containers_using_volume(&self, volume_name: &str) -> Result> { + let output = AsyncCommand::new(&self.container_tool) + .args([ + "ps", + "-a", + "--filter", + &format!("volume={}", volume_name), + "--format", + "{{.ID}}", + ]) + .output() + .await + .context("Failed to list containers using volume")?; + + if !output.status.success() { + return Ok(Vec::new()); + } + + let stdout = String::from_utf8_lossy(&output.stdout); + let containers: Vec = stdout + .lines() + .filter(|line| !line.is_empty()) + .map(|s| s.to_string()) + .collect(); + + Ok(containers) + } + + /// Check if a container is running + async fn is_container_running(&self, container_id: &str) -> Result { + let output = AsyncCommand::new(&self.container_tool) + .args(["inspect", "--format", "{{.State.Running}}", container_id]) + .output() + .await + .context("Failed to inspect container")?; + + if !output.status.success() { + return Ok(false); + } + + let stdout = String::from_utf8_lossy(&output.stdout).trim().to_string(); + Ok(stdout == "true") + } + + /// Remove a volume, including stopping/removing any associated containers + async fn remove_volume_with_containers(&self, volume_name: &str) -> Result<()> { + // Get containers using this volume + let containers = self.get_containers_using_volume(volume_name).await?; + + // Remove associated containers + for container_id in &containers { + if self.verbose { + print_info( + &format!( + "Removing container: {}", + &container_id[..12.min(container_id.len())] + ), + OutputLevel::Normal, + ); + } + + // Kill the container (in case it's running) + let _ = AsyncCommand::new(&self.container_tool) + .args(["kill", container_id]) + .output() + .await; + + // Remove the container + let output = AsyncCommand::new(&self.container_tool) + .args(["rm", "-f", container_id]) + .output() + .await + .with_context(|| format!("Failed to remove container {}", container_id))?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + print_warning( + &format!( + "Warning: could not remove container {}: {}", + &container_id[..12.min(container_id.len())], + stderr.trim() + ), + OutputLevel::Normal, + ); + } + } + + // Remove the volume + let output = AsyncCommand::new(&self.container_tool) + .args(["volume", "rm", volume_name]) + .output() + .await + .context("Failed to remove volume")?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + anyhow::bail!("Failed to remove volume: {}", stderr.trim()); + } + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_prune_command_creation() { + let cmd = PruneCommand::new(None, false, false); + assert_eq!(cmd.container_tool, "docker"); + assert!(!cmd.verbose); + assert!(!cmd.dry_run); + } + + #[test] + fn test_prune_command_with_podman() { + let cmd = PruneCommand::new(Some("podman".to_string()), true, true); + assert_eq!(cmd.container_tool, "podman"); + assert!(cmd.verbose); + assert!(cmd.dry_run); + } + + #[test] + fn test_volume_status_equality() { + assert_eq!(VolumeStatus::Active, VolumeStatus::Active); + assert_eq!( + VolumeStatus::Abandoned("test".to_string()), + VolumeStatus::Abandoned("test".to_string()) + ); + assert_ne!( + VolumeStatus::Active, + VolumeStatus::Abandoned("test".to_string()) + ); + } +} diff --git a/src/commands/signing_keys/create.rs b/src/commands/signing_keys/create.rs index cc09ce1..ba08fa5 100644 --- a/src/commands/signing_keys/create.rs +++ b/src/commands/signing_keys/create.rs @@ -181,7 +181,11 @@ fn generate_keyid_from_uri(uri: &str) -> String { } fn hex_encode(bytes: &[u8]) -> String { - bytes.iter().map(|b| format!("{:02x}", b)).collect() + use std::fmt::Write; + bytes.iter().fold(String::with_capacity(bytes.len() * 2), |mut acc, b| { + let _ = write!(acc, "{:02x}", b); + acc + }) } #[cfg(test)] diff --git a/src/main.rs b/src/main.rs index d0cec3c..10a150e 100644 --- a/src/main.rs +++ b/src/main.rs @@ -16,6 +16,7 @@ use commands::hitl::HitlServerCommand; use commands::init::InitCommand; use commands::install::InstallCommand; use commands::provision::ProvisionCommand; +use commands::prune::PruneCommand; use commands::runtime::{ RuntimeBuildCommand, RuntimeCleanCommand, RuntimeDeployCommand, RuntimeDepsCommand, RuntimeDnfCommand, RuntimeInstallCommand, RuntimeListCommand, RuntimeProvisionCommand, @@ -287,6 +288,18 @@ enum Commands { #[arg(long = "dnf-arg", num_args = 1, allow_hyphen_values = true, action = clap::ArgAction::Append)] dnf_args: Option>, }, + /// Remove abandoned Docker volumes no longer associated with active configs + Prune { + /// Container tool to use (docker/podman) + #[arg(long, default_value = "docker")] + container_tool: String, + /// Enable verbose output + #[arg(short, long)] + verbose: bool, + /// Perform a dry run without actually removing volumes + #[arg(long)] + dry_run: bool, + }, } #[derive(Subcommand)] @@ -921,6 +934,15 @@ async fn main() -> Result<()> { sign_cmd.execute().await?; Ok(()) } + Commands::Prune { + container_tool, + verbose, + dry_run, + } => { + let prune_cmd = PruneCommand::new(Some(container_tool), verbose, dry_run); + prune_cmd.execute().await?; + Ok(()) + } Commands::Runtime { command } => match command { RuntimeCommands::Install { runtime, diff --git a/src/utils/image_signing.rs b/src/utils/image_signing.rs index 9497e21..182c891 100644 --- a/src/utils/image_signing.rs +++ b/src/utils/image_signing.rs @@ -432,7 +432,11 @@ fn sign_with_pkcs11(uri: &str, hash: &[u8]) -> Result> { } fn hex_encode(bytes: &[u8]) -> String { - bytes.iter().map(|b| format!("{:02x}", b)).collect() + use std::fmt::Write; + bytes.iter().fold(String::with_capacity(bytes.len() * 2), |mut acc, b| { + let _ = write!(acc, "{:02x}", b); + acc + }) } fn hex_decode(hex: &str) -> Result> { diff --git a/src/utils/lockfile.rs b/src/utils/lockfile.rs index adeccdb..6a51512 100644 --- a/src/utils/lockfile.rs +++ b/src/utils/lockfile.rs @@ -25,7 +25,7 @@ pub enum SysrootType { Sdk, /// Rootfs sysroot ($AVOCADO_PREFIX/rootfs) Rootfs, - /// Target sysroot ($AVOCADO_SDK_PREFIX/target-sysroot) + /// Target sysroot ($AVOCADO_PREFIX/sdk/target-sysroot) TargetSysroot, /// Local/external extension sysroot ($AVOCADO_EXT_SYSROOTS/{name}) /// Uses ext-rpm-config-scripts for RPM database diff --git a/src/utils/nfs_server.rs b/src/utils/nfs_server.rs index a016d14..6702a60 100644 --- a/src/utils/nfs_server.rs +++ b/src/utils/nfs_server.rs @@ -456,10 +456,7 @@ impl NfsServer { Ok(_) => { if config.verbose { print_info( - &format!( - "NFS server ready after {}ms", - start.elapsed().as_millis() - ), + &format!("NFS server ready after {}ms", start.elapsed().as_millis()), OutputLevel::Verbose, ); } diff --git a/src/utils/pkcs11_devices.rs b/src/utils/pkcs11_devices.rs index 7351de3..3d96bef 100644 --- a/src/utils/pkcs11_devices.rs +++ b/src/utils/pkcs11_devices.rs @@ -645,7 +645,11 @@ fn uri_decode(s: &str) -> Result { /// Hex encode bytes fn hex_encode(bytes: &[u8]) -> String { - bytes.iter().map(|b| format!("{:02x}", b)).collect() + use std::fmt::Write; + bytes.iter().fold(String::with_capacity(bytes.len() * 2), |mut acc, b| { + let _ = write!(acc, "{:02x}", b); + acc + }) } /// Initialize PKCS#11 and open session diff --git a/src/utils/signing_keys.rs b/src/utils/signing_keys.rs index 7eff8fe..cceeb9e 100644 --- a/src/utils/signing_keys.rs +++ b/src/utils/signing_keys.rs @@ -314,7 +314,11 @@ pub fn get_key_entries(key_names: &[String]) -> Result> // Add hex encoding since we need it for keyid generation mod hex { pub fn encode(bytes: &[u8]) -> String { - bytes.iter().map(|b| format!("{:02x}", b)).collect() + use std::fmt::Write; + bytes.iter().fold(String::with_capacity(bytes.len() * 2), |mut acc, b| { + let _ = write!(acc, "{:02x}", b); + acc + }) } } diff --git a/tests/runs_on_integration.rs b/tests/runs_on_integration.rs index dc01529..d2c0577 100644 --- a/tests/runs_on_integration.rs +++ b/tests/runs_on_integration.rs @@ -74,8 +74,8 @@ mod nfs_config_tests { assert!(ganesha_config.contains("NFS_Port = 12050")); assert!(ganesha_config.contains("Export_Id = 1")); - assert!(ganesha_config.contains("Path = /home/user/src")); - assert!(ganesha_config.contains("Pseudo = /src")); + assert!(ganesha_config.contains(r#"Path = "/home/user/src""#)); + assert!(ganesha_config.contains(r#"Pseudo = "/src""#)); } #[test] @@ -92,8 +92,8 @@ mod nfs_config_tests { // Verify both exports are present assert!(ganesha_config.contains("Export_Id = 1")); assert!(ganesha_config.contains("Export_Id = 2")); - assert!(ganesha_config.contains("Pseudo = /src")); - assert!(ganesha_config.contains("Pseudo = /state")); + assert!(ganesha_config.contains(r#"Pseudo = "/src""#)); + assert!(ganesha_config.contains(r#"Pseudo = "/state""#)); // Verify security settings for remote access assert!(ganesha_config.contains("Squash = No_Root_Squash")); @@ -126,8 +126,8 @@ mod nfs_config_tests { assert!(block.contains("EXPORT {")); assert!(block.contains("Export_Id = 42")); - assert!(block.contains("Path = /var/lib/docker/volumes/test/_data")); - assert!(block.contains("Pseudo = /state")); + assert!(block.contains(r#"Path = "/var/lib/docker/volumes/test/_data""#)); + assert!(block.contains(r#"Pseudo = "/state""#)); assert!(block.contains("FSAL {")); assert!(block.contains("name = VFS")); } @@ -291,8 +291,8 @@ mod port_selection_tests { fn test_port_becomes_unavailable_after_bind() { use std::net::TcpListener; - // Bind to a port - let listener = TcpListener::bind("127.0.0.1:0").unwrap(); + // Bind to a port on 0.0.0.0 (same as is_port_available checks) + let listener = TcpListener::bind("0.0.0.0:0").unwrap(); let port = listener.local_addr().unwrap().port(); // Port should no longer be available From 78d01bee0a97c8082e9b2ba8276d8035531524f9 Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Mon, 29 Dec 2025 14:43:40 -0500 Subject: [PATCH 16/20] tune nfs server settings --- src/utils/nfs_server.rs | 6 ++++-- src/utils/remote.rs | 7 ++++++- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/src/utils/nfs_server.rs b/src/utils/nfs_server.rs index 6702a60..fd66d3f 100644 --- a/src/utils/nfs_server.rs +++ b/src/utils/nfs_server.rs @@ -135,8 +135,8 @@ NFS_Core_Param {{ Nb_Max_Fd = 65536; Max_Open_Files = 10000; DRC_Max_Size = 32768; - # Reduce attribute cache time for fresher file metadata during builds - Attr_Expiration_Time = 10; + # Short attribute cache for fresher file metadata during builds + Attr_Expiration_Time = 3; # Single-client use case doesn't need many workers Nb_Worker = 32; Bind_addr = {}; @@ -149,6 +149,8 @@ NFSV4 {{ Lease_Lifetime = 30; Allow_Numeric_Owners = true; Only_Numeric_Owners = true; + # Disable delegations to prevent stale handle issues with VirtioFS + Delegations = false; }} # Defaults that all EXPORT{{}} blocks inherit unless they override diff --git a/src/utils/remote.rs b/src/utils/remote.rs index 46920c1..0dbcd38 100644 --- a/src/utils/remote.rs +++ b/src/utils/remote.rs @@ -387,11 +387,16 @@ impl RemoteVolumeManager { nfs_port: u16, export_path: &str, ) -> Result<()> { + // Mount options: + // - actimeo=3: Short attribute cache timeout (3 seconds) for fresher metadata + // - lookupcache=positive: Only cache successful lookups, not failures + // These help with stale handle issues from Docker Desktop's VirtioFS + // while maintaining reasonable performance let command = format!( "{} volume create \ --driver local \ --opt type=nfs \ - --opt o=addr={},rw,nfsvers=4,port={} \ + --opt o=addr={},rw,nfsvers=4,port={},actimeo=3,lookupcache=positive \ --opt device=:{} \ {}", self.container_tool, nfs_host, nfs_port, export_path, volume_name From cf81b7a155c61fe6afd20c3d2aaf20eb0dc23bbf Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Tue, 30 Dec 2025 20:20:06 -0500 Subject: [PATCH 17/20] install target-sysroot for any sdk.compile Signed-off-by: Justin Schneck --- src/commands/sdk/install.rs | 10 +++-- src/commands/signing_keys/create.rs | 10 +++-- src/utils/config.rs | 61 +++++++++++++++++++++++++++++ src/utils/image_signing.rs | 10 +++-- src/utils/pkcs11_devices.rs | 10 +++-- src/utils/signing_keys.rs | 10 +++-- 6 files changed, 91 insertions(+), 20 deletions(-) diff --git a/src/commands/sdk/install.rs b/src/commands/sdk/install.rs index f123847..c6414c5 100644 --- a/src/commands/sdk/install.rs +++ b/src/commands/sdk/install.rs @@ -771,11 +771,13 @@ $DNF_SDK_HOST $DNF_NO_SCRIPTS $DNF_SDK_TARGET_REPO_CONF \ return Err(anyhow::anyhow!("Failed to install rootfs sysroot.")); } - // Install target-sysroot if there are any sdk.compile dependencies - // This aggregates all dependencies from all compile sections (main config + external extensions) - let compile_dependencies = config.get_compile_dependencies(); - if !compile_dependencies.is_empty() { + // Install target-sysroot if there are any sdk.compile sections defined + // (regardless of whether they have dependencies). + // This is needed for cross-compilation support. + // The composed config already has external extension compile sections merged in. + if config.has_compile_sections() { // Aggregate all compile dependencies into a single list (with lock file support) + let compile_dependencies = config.get_compile_dependencies(); let mut all_compile_packages: Vec = Vec::new(); let mut all_compile_package_names: Vec = Vec::new(); for dependencies in compile_dependencies.values() { diff --git a/src/commands/signing_keys/create.rs b/src/commands/signing_keys/create.rs index ba08fa5..1c96609 100644 --- a/src/commands/signing_keys/create.rs +++ b/src/commands/signing_keys/create.rs @@ -182,10 +182,12 @@ fn generate_keyid_from_uri(uri: &str) -> String { fn hex_encode(bytes: &[u8]) -> String { use std::fmt::Write; - bytes.iter().fold(String::with_capacity(bytes.len() * 2), |mut acc, b| { - let _ = write!(acc, "{:02x}", b); - acc - }) + bytes + .iter() + .fold(String::with_capacity(bytes.len() * 2), |mut acc, b| { + let _ = write!(acc, "{:02x}", b); + acc + }) } #[cfg(test)] diff --git a/src/utils/config.rs b/src/utils/config.rs index 2978665..6726874 100644 --- a/src/utils/config.rs +++ b/src/utils/config.rs @@ -1997,6 +1997,20 @@ impl Config { compile_deps } + /// Check if there are any compile sections defined (regardless of whether they have dependencies) + /// + /// This is used to determine if the target-sysroot should be installed. + /// The target-sysroot is needed whenever there's any sdk.compile. section, + /// even if it doesn't define any dependencies. + pub fn has_compile_sections(&self) -> bool { + if let Some(sdk) = &self.sdk { + if let Some(compile) = &sdk.compile { + return !compile.is_empty(); + } + } + false + } + /// Get extension SDK dependencies from configuration /// Returns a HashMap where keys are extension names and values are their SDK dependencies pub fn get_extension_sdk_dependencies( @@ -4160,6 +4174,53 @@ libfoo-dev-arm64 = "*" std::fs::remove_file(temp_file).ok(); } + #[test] + fn test_has_compile_sections() { + // Test with compile sections defined + let config_with_compile = r#" +default_target = "qemux86-64" + +[sdk.compile.app] +compile = "make" + +[sdk.compile.app.dependencies] +libfoo = "*" +"#; + + let config = Config::load_from_str(config_with_compile).unwrap(); + assert!(config.has_compile_sections()); + + // Test with compile sections but no dependencies + let config_no_deps = r#" +default_target = "qemux86-64" + +[sdk.compile.app] +compile = "make" +"#; + + let config = Config::load_from_str(config_no_deps).unwrap(); + assert!(config.has_compile_sections()); + + // Test with no compile sections + let config_no_compile = r#" +default_target = "qemux86-64" + +[sdk] +image = "my-sdk-image" +"#; + + let config = Config::load_from_str(config_no_compile).unwrap(); + assert!(!config.has_compile_sections()); + + // Test with empty config (minimal) + let config_minimal = r#" +default_target = "qemux86-64" +"#; + + let config = Config::load_from_str(config_minimal).unwrap(); + assert!(!config.has_compile_sections()); + } + #[test] fn test_comprehensive_runtime_section() { let config_content = r#" diff --git a/src/utils/image_signing.rs b/src/utils/image_signing.rs index 182c891..654ba3b 100644 --- a/src/utils/image_signing.rs +++ b/src/utils/image_signing.rs @@ -433,10 +433,12 @@ fn sign_with_pkcs11(uri: &str, hash: &[u8]) -> Result> { fn hex_encode(bytes: &[u8]) -> String { use std::fmt::Write; - bytes.iter().fold(String::with_capacity(bytes.len() * 2), |mut acc, b| { - let _ = write!(acc, "{:02x}", b); - acc - }) + bytes + .iter() + .fold(String::with_capacity(bytes.len() * 2), |mut acc, b| { + let _ = write!(acc, "{:02x}", b); + acc + }) } fn hex_decode(hex: &str) -> Result> { diff --git a/src/utils/pkcs11_devices.rs b/src/utils/pkcs11_devices.rs index 3d96bef..b681c86 100644 --- a/src/utils/pkcs11_devices.rs +++ b/src/utils/pkcs11_devices.rs @@ -646,10 +646,12 @@ fn uri_decode(s: &str) -> Result { /// Hex encode bytes fn hex_encode(bytes: &[u8]) -> String { use std::fmt::Write; - bytes.iter().fold(String::with_capacity(bytes.len() * 2), |mut acc, b| { - let _ = write!(acc, "{:02x}", b); - acc - }) + bytes + .iter() + .fold(String::with_capacity(bytes.len() * 2), |mut acc, b| { + let _ = write!(acc, "{:02x}", b); + acc + }) } /// Initialize PKCS#11 and open session diff --git a/src/utils/signing_keys.rs b/src/utils/signing_keys.rs index cceeb9e..0bfee77 100644 --- a/src/utils/signing_keys.rs +++ b/src/utils/signing_keys.rs @@ -315,10 +315,12 @@ pub fn get_key_entries(key_names: &[String]) -> Result> mod hex { pub fn encode(bytes: &[u8]) -> String { use std::fmt::Write; - bytes.iter().fold(String::with_capacity(bytes.len() * 2), |mut acc, b| { - let _ = write!(acc, "{:02x}", b); - acc - }) + bytes + .iter() + .fold(String::with_capacity(bytes.len() * 2), |mut acc, b| { + let _ = write!(acc, "{:02x}", b); + acc + }) } } From ff164e1a0a2c9725713c55507978948aa559a112 Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Thu, 1 Jan 2026 23:08:00 -0500 Subject: [PATCH 18/20] update remote to support /bin/sh --- src/utils/remote.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/utils/remote.rs b/src/utils/remote.rs index 0dbcd38..d60266a 100644 --- a/src/utils/remote.rs +++ b/src/utils/remote.rs @@ -173,6 +173,8 @@ impl SshClient { // Note: We need to source profile files because non-interactive SSH sessions // don't load .bashrc/.profile, so avocado might not be in PATH if it's in // ~/.cargo/bin, ~/.local/bin, or other user-specific locations. + // We use POSIX-compatible syntax (test -f && . instead of source) because + // some embedded systems use /bin/sh which doesn't support bash-specific commands. let output = AsyncCommand::new("ssh") .args([ "-o", @@ -182,7 +184,7 @@ impl SshClient { "-o", "StrictHostKeyChecking=accept-new", &self.remote.ssh_target(), - "source ~/.profile 2>/dev/null; source ~/.bashrc 2>/dev/null; avocado --version 2>/dev/null || echo 'not-installed'", + "test -f ~/.profile && . ~/.profile; test -f ~/.bashrc && . ~/.bashrc; avocado --version 2>/dev/null || echo 'not-installed'", ]) .output() .await From 5801659070102c15f7305ec553decdecd4dbe78d Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Fri, 2 Jan 2026 06:23:11 -0500 Subject: [PATCH 19/20] Fix codeql warnings --- .github/workflows/security-reusable.yml | 2 +- src/utils/config.rs | 5 +---- tests/runs_on_integration.rs | 8 ++------ 3 files changed, 4 insertions(+), 11 deletions(-) diff --git a/.github/workflows/security-reusable.yml b/.github/workflows/security-reusable.yml index 8cb6add..a46bc53 100644 --- a/.github/workflows/security-reusable.yml +++ b/.github/workflows/security-reusable.yml @@ -29,7 +29,7 @@ jobs: key: security-cargo-${{ hashFiles('**/Cargo.lock') }} - name: Install cargo-audit - run: cargo install cargo-audit --locked + run: cargo install cargo-audit --locked --force - name: Security audit run: cargo audit diff --git a/src/utils/config.rs b/src/utils/config.rs index 6726874..8187117 100644 --- a/src/utils/config.rs +++ b/src/utils/config.rs @@ -2616,10 +2616,7 @@ pub fn resolve_host_uid_gid(config: Option<&SdkConfig>) -> (u32, u32) { // Resolve UID: env var > config > libc let uid = if let Ok(env_uid) = env::var("AVOCADO_HOST_UID") { env_uid.parse::().unwrap_or_else(|_| { - eprintln!( - "Warning: Invalid AVOCADO_HOST_UID '{}', using fallback", - env_uid - ); + eprintln!("Warning: Invalid AVOCADO_HOST_UID value, using fallback"); fallback_uid }) } else if let Some(cfg) = config { diff --git a/tests/runs_on_integration.rs b/tests/runs_on_integration.rs index d2c0577..dec3f43 100644 --- a/tests/runs_on_integration.rs +++ b/tests/runs_on_integration.rs @@ -278,7 +278,7 @@ mod port_selection_tests { let port = find_available_port(60000..=60010); assert!(port.is_some()); let p = port.unwrap(); - assert!(p >= 60000 && p <= 60010); + assert!((60000..=60010).contains(&p)); } #[test] @@ -685,11 +685,7 @@ fn test_localhost_ownership_preservation() { .parse() .expect("Failed to parse UID"); - assert_eq!( - remote_uid, local_uid, - "Owner UID should be preserved (local: {}, remote: {})", - local_uid, remote_uid - ); + assert_eq!(remote_uid, local_uid, "Owner UID should be preserved"); fs::remove_dir_all(&temp_dir).ok(); } From d5a23a774977dd1048291f7ee69e5599bd9ee9cd Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Thu, 1 Jan 2026 23:09:05 -0500 Subject: [PATCH 20/20] 0.21.0 release --- Cargo.lock | 2 +- Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d26ef4e..f357d4b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -130,7 +130,7 @@ checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "avocado-cli" -version = "0.20.0" +version = "0.21.0" dependencies = [ "anyhow", "base64", diff --git a/Cargo.toml b/Cargo.toml index 467045e..814cc68 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "avocado-cli" -version = "0.20.0" +version = "0.21.0" edition = "2021" description = "Command line interface for Avocado." authors = ["Avocado"]