#!/bin/bash ################################################################################################################ # # # WARNING: This file is automatically generated # # DO NOT modify this file directly as it will be overwritten # # # # apt-layer Tool # # Generated on: 2025-07-15 18:39:00 # # # ################################################################################################################ set -euo pipefail # apt-layer Tool - Self-contained version # This script contains all components merged into a single file # Enhanced version with container support, multiple package managers, and LIVE SYSTEM LAYERING # Inspired by Vanilla OS Apx approach, ParticleOS apt-layer, and rpm-ostree live layering # Version: 25.07.15 # apt-layer Tool # Enhanced with Container Support and LIVE SYSTEM LAYERING # Fallback logging functions (always defined first) # Color definitions RED='\033[0;31m' GREEN='\033[0;32m' YELLOW='\033[1;33m' BLUE='\033[0;34m' CYAN='\033[0;36m' PURPLE='\033[0;35m' NC='\033[0m' log_info() { local message="$1" local script_name="${2:-apt-layer}" echo -e "${BLUE}[INFO]${NC} [$script_name] $message" } log_debug() { local message="$1" local script_name="${2:-apt-layer}" echo -e "${YELLOW}[DEBUG]${NC} [$script_name] $message" } log_error() { local message="$1" local script_name="${2:-apt-layer}" echo -e "${RED}[ERROR]${NC} [$script_name] $message" >&2 } log_warning() { local message="$1" local script_name="${2:-apt-layer}" echo -e "${YELLOW}[WARNING]${NC} [$script_name] $message" >&2 } log_success() { local message="$1" local script_name="${2:-apt-layer}" echo -e "${GREEN}[SUCCESS]${NC} [$script_name] $message" } log_layer() { local message="$1" local script_name="${2:-apt-layer}" echo -e "${PURPLE}[LAYER]${NC} [$script_name] $message" } log_transaction() { local message="$1" local script_name="${2:-apt-layer}" echo -e "${CYAN}[TRANSACTION]${NC} [$script_name] $message" } # Source apt-layer configuration (if available, skip for help commands) # Skip configuration loading for help commands to avoid permission issues if [[ "${1:-}" != "--help" && "${1:-}" != "-h" && "${1:-}" != "--help-full" && "${1:-}" != "--examples" ]]; then if [[ -f "/usr/local/etc/apt-layer-config.sh" ]]; then source "/usr/local/etc/apt-layer-config.sh" log_info "Loaded apt-layer configuration" "apt-layer" else log_warning "apt-layer configuration not found, using defaults" "apt-layer" fi else log_info "Skipping configuration loading for help command" "apt-layer" fi # Embedded dependencies.json APT_LAYER_DEPENDENCIES_JSON=$(cat << 'EOF' { "core": [ "chroot", "apt-get", "dpkg", "jq", "mount", "umount", "findmnt", "numfmt" ], "container": [ "podman", "docker" ], "oci": [ "skopeo" ], "composefs": [ "mkcomposefs", "composefs-info", "mount.composefs", "mksquashfs", "unsquashfs" ], "composefs_packages": [ "composefs", "libcomposefs1" ], "bootloader": [ "efibootmgr", "grub-install", "update-grub", "bootctl" ], "security": [ "curl", "wget", "gpg" ], "package_install_commands": { "debian": { "composefs": "apt install -y composefs libcomposefs1", "container": "apt install -y podman docker.io", "oci": "apt install -y skopeo", "bootloader": "apt install -y efibootmgr grub-common systemd-boot", "core": "apt install -y squashfs-tools jq coreutils util-linux" }, "fedora": { "composefs": "dnf install -y composefs composefs-libs", "container": "dnf install -y podman docker", "oci": "dnf install -y skopeo", "bootloader": "dnf install -y efibootmgr grub2-tools systemd-boot", "core": "dnf install -y squashfs-tools jq coreutils util-linux" } } } EOF ) # ============================================================================ # Header and Shared Functions # ============================================================================ # Utility functions for Particle-OS apt-layer Tool # These functions provide system introspection and core utilities # Fallback logging functions (in case particle-config.sh is not available) if ! declare -F log_info >/dev/null 2>&1; then log_info() { local message="$1" local script_name="${2:-apt-layer}" echo "[INFO] $message" } fi if ! declare -F log_warning >/dev/null 2>&1; then log_warning() { local message="$1" local script_name="${2:-apt-layer}" echo "[WARNING] $message" } fi if ! declare -F log_error >/dev/null 2>&1; then log_error() { local message="$1" local script_name="${2:-apt-layer}" echo "[ERROR] $message" >&2 } fi if ! declare -F log_success >/dev/null 2>&1; then log_success() { local message="$1" local script_name="${2:-apt-layer}" echo "[SUCCESS] $message" } fi if ! declare -F log_debug >/dev/null 2>&1; then log_debug() { local message="$1" local script_name="${2:-apt-layer}" echo "[DEBUG] $message" } fi if ! declare -F log_transaction >/dev/null 2>&1; then log_transaction() { local message="$1" local script_name="${2:-apt-layer}" echo "[TRANSACTION] $message" } fi if ! declare -F log_layer >/dev/null 2>&1; then log_layer() { local message="$1" local script_name="${2:-apt-layer}" echo "[LAYER] $message" } fi # Global variables for cleanup CLEANUP_DIRS=() CLEANUP_MOUNTS=() CLEANUP_FILES=() # Workspace and directory variables WORKSPACE="/var/lib/particle-os" BUILD_DIR="/var/lib/particle-os/build" LIVE_OVERLAY_DIR="/var/lib/particle-os/live-overlay" COMPOSEFS_DIR="/var/lib/particle-os/composefs" COMPOSEFS_SCRIPT="/usr/local/bin/composefs-alternative.sh" # Container runtime will be detected dynamically based on configuration CONTAINER_RUNTIME="" # Transaction state variables TRANSACTION_ID="" TRANSACTION_PHASE="" TRANSACTION_TARGET="" TRANSACTION_BACKUP="" TRANSACTION_TEMP_DIRS=() TRANSACTION_STATE="/var/lib/particle-os/transaction-state" TRANSACTION_LOG="/var/lib/particle-os/transaction.log" # Trap for cleanup on exit cleanup_on_exit() { local exit_code=$? if [[ -n "$TRANSACTION_ID" ]]; then log_transaction "Cleaning up transaction $TRANSACTION_ID (exit code: $exit_code)" "apt-layer" # Clean up temporary directories for temp_dir in "${TRANSACTION_TEMP_DIRS[@]}"; do if [[ -d "$temp_dir" ]]; then log_debug "Cleaning up temporary directory: $temp_dir" "apt-layer" rm -rf "$temp_dir" 2>/dev/null || true fi done # If transaction failed, attempt rollback if [[ $exit_code -ne 0 ]] && [[ -n "$TRANSACTION_BACKUP" ]]; then log_warning "Transaction failed, attempting rollback..." "apt-layer" rollback_transaction fi # Clear transaction state clear_transaction_state fi # Clean up any remaining mounts cleanup_mounts exit $exit_code } trap cleanup_on_exit EXIT INT TERM # SECURITY: Validate and sanitize input paths validate_path() { local path="$1" local type="$2" # Check for null or empty paths if [[ -z "$path" ]]; then log_error "Empty $type path provided" "apt-layer" exit 1 fi # Check for path traversal attempts if [[ "$path" =~ \.\. ]]; then log_error "Path traversal attempt detected in $type: $path" "apt-layer" exit 1 fi # Check for absolute paths only (for source directories and mount points) if [[ "$type" == "source_dir" || "$type" == "mount_point" ]]; then if [[ ! "$path" =~ ^/ ]]; then log_error "$type must be an absolute path: $path" "apt-layer" exit 1 fi fi # Validate characters (alphanumeric, hyphens, underscores, slashes, dots) if [[ ! "$path" =~ ^[a-zA-Z0-9/._-]+$ ]]; then log_error "Invalid characters in $type: $path" "apt-layer" exit 1 fi echo "$path" } # SECURITY: Validate image name (alphanumeric, hyphens, underscores only) validate_image_name() { local name="$1" if [[ -z "$name" ]]; then log_error "Empty image name provided" "apt-layer" exit 1 fi if [[ ! "$name" =~ ^[a-zA-Z0-9/_-]+$ ]]; then log_error "Invalid image name: $name (only alphanumeric, hyphens, underscores, and slashes allowed)" "apt-layer" exit 1 fi echo "$name" } # Check if running as root check_root() { if [[ $EUID -ne 0 ]]; then log_error "This script must be run as root" "apt-layer" exit 1 fi } # Require root privileges for specific operations require_root() { local operation="${1:-this operation}" if [[ $EUID -ne 0 ]]; then log_error "Root privileges required for: $operation" "apt-layer" log_info "Please run with sudo" "apt-layer" exit 1 fi } # Check if system needs initialization check_initialization_needed() { local needs_init=false local missing_items=() # Check for configuration file if [[ ! -f "/usr/local/etc/particle-config.sh" ]]; then needs_init=true missing_items+=("configuration file") fi # Check for workspace directory if [[ ! -d "$WORKSPACE" ]]; then needs_init=true missing_items+=("workspace directory") fi # Check for log directory if [[ ! -d "/var/log/particle-os" ]]; then needs_init=true missing_items+=("log directory") fi # Check for cache directory if [[ ! -d "/var/cache/particle-os" ]]; then needs_init=true missing_items+=("cache directory") fi if [[ "$needs_init" == "true" ]]; then log_error "Particle-OS system not initialized. Missing: ${missing_items[*]}" "apt-layer" log_info "Run 'sudo $0 --init' to initialize the system" "apt-layer" exit 1 fi } # Initialize required directories and files with proper error handling initialize_directories() { log_info "Initializing Particle-OS directories and files..." "apt-layer" # Create main directories with proper error handling local dirs=( "$WORKSPACE" "$BUILD_DIR" "$LIVE_OVERLAY_DIR" "$COMPOSEFS_DIR" "/var/log/particle-os" "/var/cache/particle-os" "/usr/local/etc/particle-os" ) for dir in "${dirs[@]}"; do if ! mkdir -p "$dir" 2>/dev/null; then log_warning "Failed to create directory $dir, attempting with sudo..." "apt-layer" if ! sudo mkdir -p "$dir" 2>/dev/null; then log_error "Failed to create directory: $dir" "apt-layer" return 1 fi fi # Set proper permissions even if directory already exists if [[ -d "$dir" ]]; then sudo chown root:root "$dir" 2>/dev/null || true sudo chmod 755 "$dir" 2>/dev/null || true fi done # Create required files with proper error handling local files=( "$WORKSPACE/current-deployment" "$WORKSPACE/pending-deployment" "$WORKSPACE/deployments.json" "$TRANSACTION_STATE" "$TRANSACTION_LOG" ) for file in "${files[@]}"; do if ! touch "$file" 2>/dev/null; then log_warning "Failed to create file $file, attempting with sudo..." "apt-layer" if ! sudo touch "$file" 2>/dev/null; then log_error "Failed to create file: $file" "apt-layer" return 1 fi fi # Set proper permissions even if file already exists if [[ -f "$file" ]]; then sudo chown root:root "$file" 2>/dev/null || true sudo chmod 644 "$file" 2>/dev/null || true fi done # Initialize deployment database if it doesn't exist or is empty if [[ ! -f "$WORKSPACE/deployments.json" ]] || [[ ! -s "$WORKSPACE/deployments.json" ]]; then if ! cat > "$WORKSPACE/deployments.json" << 'EOF' { "deployments": {}, "current_deployment": null, "pending_deployment": null, "deployment_counter": 0, "created": "$(date -u +%Y-%m-%dT%H:%M:%SZ)" } EOF then log_warning "Failed to create deployment database, attempting with sudo..." "apt-layer" if ! sudo tee "$WORKSPACE/deployments.json" > /dev/null << 'EOF' { "deployments": {}, "current_deployment": null, "pending_deployment": null, "deployment_counter": 0, "created": "$(date -u +%Y-%m-%dT%H:%M:%SZ)" } EOF then log_error "Failed to create deployment database" "apt-layer" return 1 fi fi # Set proper permissions for deployment database sudo chown root:root "$WORKSPACE/deployments.json" 2>/dev/null || true sudo chmod 644 "$WORKSPACE/deployments.json" 2>/dev/null || true log_success "Deployment database initialized" "apt-layer" fi log_success "Particle-OS directories and files initialized successfully" "apt-layer" return 0 } # Initialize workspace init_workspace() { log_info "Initializing Particle-OS workspace..." "apt-layer" mkdir -p "$WORKSPACE" mkdir -p "$BUILD_DIR" mkdir -p "$LIVE_OVERLAY_DIR" # Ensure ComposeFS directory exists if [[ ! -d "$COMPOSEFS_DIR" ]]; then log_info "ComposeFS directory not found, initializing..." "apt-layer" if [[ -f "$COMPOSEFS_SCRIPT" ]]; then # Run composefs-alternative.sh status to initialize directories "$COMPOSEFS_SCRIPT" status >/dev/null 2>&1 || true fi fi log_success "Workspace initialized: $WORKSPACE" "apt-layer" } # ComposeFS helper functions composefs_create() { local image_name="$1" local source_dir="$2" log_debug "Creating ComposeFS image: $image_name from $source_dir" "apt-layer" # Try real mkcomposefs binary first if command -v mkcomposefs >/dev/null 2>&1; then # Create object store directory (same directory as image) local object_store_dir=$(dirname "$image_name") mkdir -p "$object_store_dir" if ! mkcomposefs "$source_dir" "$image_name" --digest-store="$object_store_dir"; then log_error "Failed to create ComposeFS image with mkcomposefs: $image_name" "apt-layer" return 1 fi else # Fallback to composefs-alternative.sh if ! "$COMPOSEFS_SCRIPT" create "$image_name" "$source_dir"; then log_error "Failed to create ComposeFS image: $image_name" "apt-layer" return 1 fi fi log_success "ComposeFS image created: $image_name" "apt-layer" return 0 } composefs_mount() { local image_name="$1" local mount_point="$2" log_debug "Mounting ComposeFS image: $image_name to $mount_point" "apt-layer" # Try real mount with composefs filesystem if command -v mkcomposefs >/dev/null 2>&1; then # Create mount point mkdir -p "$mount_point" # Determine object store directory (same directory as image for now) local object_store_dir=$(dirname "$image_name") # Mount using composefs filesystem type if ! mount -t composefs -o "basedir=$object_store_dir" "$image_name" "$mount_point"; then log_error "Failed to mount ComposeFS image with mount: $image_name to $mount_point" "apt-layer" return 1 fi else # Fallback to composefs-alternative.sh if ! "$COMPOSEFS_SCRIPT" mount "$image_name" "$mount_point"; then log_error "Failed to mount ComposeFS image: $image_name to $mount_point" "apt-layer" return 1 fi fi log_success "ComposeFS image mounted: $image_name to $mount_point" "apt-layer" return 0 } composefs_unmount() { local mount_point="$1" log_debug "Unmounting ComposeFS image from: $mount_point" "apt-layer" # Try real umount if command -v mkcomposefs >/dev/null 2>&1; then if ! umount "$mount_point"; then log_error "Failed to unmount ComposeFS image with umount: $mount_point" "apt-layer" return 1 fi else # Fallback to composefs-alternative.sh if ! "$COMPOSEFS_SCRIPT" unmount "$mount_point"; then log_error "Failed to unmount ComposeFS image from: $mount_point" "apt-layer" return 1 fi fi log_success "ComposeFS image unmounted from: $mount_point" "apt-layer" return 0 } composefs_list_images() { log_debug "Listing ComposeFS images" "apt-layer" # Try to list images from workspace directory if command -v mkcomposefs >/dev/null 2>&1; then # List .composefs files in the workspace find "$WORKSPACE/images" -name "*.composefs" -type f 2>/dev/null | sed 's|.*/||' | sed 's|\.composefs$||' || true else # Fallback to composefs-alternative.sh "$COMPOSEFS_SCRIPT" list-images fi } composefs_image_exists() { local image_name="$1" # Check if image exists by looking for the .composefs file if command -v mkcomposefs >/dev/null 2>&1; then if [[ -f "$WORKSPACE/images/$image_name.composefs" ]]; then return 0 else return 1 fi else # Fallback to composefs-alternative.sh if "$COMPOSEFS_SCRIPT" list-images | grep -q "^$image_name$"; then return 0 else return 1 fi fi } composefs_remove_image() { local image_name="$1" log_debug "Removing ComposeFS image: $image_name" "apt-layer" # Try real file removal if command -v mkcomposefs >/dev/null 2>&1; then if ! rm -f "$WORKSPACE/images/$image_name.composefs"; then log_error "Failed to remove ComposeFS image file: $image_name" "apt-layer" return 1 fi else # Fallback to composefs-alternative.sh if ! "$COMPOSEFS_SCRIPT" remove "$image_name"; then log_error "Failed to remove ComposeFS image: $image_name" "apt-layer" return 1 fi fi log_success "ComposeFS image removed: $image_name" "apt-layer" return 0 } # List all available branches/images list_branches() { log_info "Listing available ComposeFS images/branches..." "apt-layer" if ! composefs_list_images; then log_error "Failed to list ComposeFS images" "apt-layer" return 1 fi return 0 } # Show information about a specific branch/image show_branch_info() { local image_name="$1" log_info "Showing information for image: $image_name" "apt-layer" if ! composefs_image_exists "$image_name"; then log_error "Image not found: $image_name" "apt-layer" return 1 fi # Get basic image information echo "Image: $image_name" echo "Status: Available" # Try to get more detailed information if available if [[ -f "$COMPOSEFS_DIR/$image_name/info.json" ]]; then echo "Details:" jq -r '.' "$COMPOSEFS_DIR/$image_name/info.json" 2>/dev/null || echo " (Unable to parse info.json)" fi return 0 } # Remove an image (alias for composefs_remove_image) remove_image() { local image_name="$1" log_info "Removing image: $image_name" "apt-layer" if ! composefs_remove_image "$image_name"; then log_error "Failed to remove image: $image_name" "apt-layer" return 1 fi return 0 } composefs_get_status() { log_debug "Getting ComposeFS status" "apt-layer" "$COMPOSEFS_SCRIPT" status } # Atomic directory operations atomic_directory_swap() { local source="$1" local target="$2" local backup="$3" log_debug "Performing atomic directory swap: $source -> $target (backup: $backup)" "apt-layer" # Create backup if specified if [[ -n "$backup" ]] && [[ -d "$target" ]]; then if ! mv "$target" "$backup"; then log_error "Failed to create backup: $target -> $backup" "apt-layer" return 1 fi log_debug "Backup created: $target -> $backup" "apt-layer" fi # Move source to target if ! mv "$source" "$target"; then log_error "Failed to move source to target: $source -> $target" "apt-layer" # Restore backup if it exists if [[ -n "$backup" ]] && [[ -d "$backup" ]]; then log_warning "Restoring backup after failed move" "apt-layer" mv "$backup" "$target" 2>/dev/null || true fi return 1 fi log_debug "Atomic directory swap completed: $source -> $target" "apt-layer" return 0 } # Cleanup mounts cleanup_mounts() { log_debug "Cleaning up mounts" "apt-layer" for mount in "${CLEANUP_MOUNTS[@]}"; do if mountpoint -q "$mount" 2>/dev/null; then log_debug "Unmounting: $mount" "apt-layer" umount "$mount" 2>/dev/null || log_warning "Failed to unmount: $mount" "apt-layer" fi done } # Get system information get_system_info() { echo "Kernel: $(uname -r)" echo "Architecture: $(uname -m)" echo "Available modules:" if modprobe -n squashfs >/dev/null 2>&1; then echo " � squashfs module available" else echo " � squashfs module not available" fi if modprobe -n overlay >/dev/null 2>&1; then echo " � overlay module available" else echo " � overlay module not available" fi } # Calculate disk usage calculate_disk_usage() { local path="$1" local size size=$(du -sb "$path" 2>/dev/null | cut -f1 || echo "0") local size_mb=$((size / 1024 / 1024)) echo "$size_mb" } # Get available space get_available_space() { local path="$1" local available_space available_space=$(df "$path" | tail -1 | awk '{print $4}') local available_space_mb=$((available_space * 1024 / 1024 / 1024)) echo "$available_space_mb" } # --- END OF SCRIPTLET: 00-header.sh --- # ============================================================================ # Dependency Checking and Validation # ============================================================================ # Enhanced dependency checking and validation for Particle-OS apt-layer Tool # --- BEGIN DEPENDENCY JSON LOADING --- # The dependencies JSON will be embedded as APT_LAYER_DEPENDENCIES_JSON in the compiled script. # If not present, fallback to a default minimal set. APT_LAYER_DEPENDENCIES_JSON="${APT_LAYER_DEPENDENCIES_JSON:-} { \"core\": [\"chroot\", \"apt-get\", \"dpkg\", \"jq\", \"mount\", \"umount\", \"findmnt\", \"numfmt\"], \"container\": [\"podman\", \"docker\"], \"oci\": [\"skopeo\"], \"composefs\": [\"mkcomposefs\", \"composefs-info\", \"mount.composefs\", \"mksquashfs\", \"unsquashfs\"], \"composefs_packages\": [\"composefs\", \"libcomposefs1\"], \"bootloader\": [\"efibootmgr\", \"grub-install\", \"update-grub\", \"bootctl\"], \"security\": [\"curl\", \"wget\", \"gpg\"], \"package_install_commands\": { \"debian\": { \"composefs\": \"apt install -y composefs libcomposefs1\", \"container\": \"apt install -y podman docker.io\", \"oci\": \"apt install -y skopeo\", \"bootloader\": \"apt install -y efibootmgr grub-common systemd-boot\", \"core\": \"apt install -y squashfs-tools jq coreutils util-linux\" }, \"fedora\": { \"composefs\": \"dnf install -y composefs composefs-libs\", \"container\": \"dnf install -y podman docker\", \"oci\": \"dnf install -y skopeo\", \"bootloader\": \"dnf install -y efibootmgr grub2-tools systemd-boot\", \"core\": \"dnf install -y squashfs-tools jq coreutils util-linux\" } } }" get_deps_for_type() { local type="$1" local json="$APT_LAYER_DEPENDENCIES_JSON" case "$type" in --container|container) echo "$json" | jq -r '.core[], .container[]' ;; --oci|oci) echo "$json" | jq -r '.core[], .oci[]' ;; --composefs|composefs) echo "$json" | jq -r '.core[], .composefs[]' ;; --bootloader|bootloader) echo "$json" | jq -r '.core[], .bootloader[]' ;; --scan|security) echo "$json" | jq -r '.core[], .security[]' ;; *) echo "$json" | jq -r '.core[]' ;; esac } print_install_instructions() { local json="$APT_LAYER_DEPENDENCIES_JSON" # Detect distribution local distro="debian" if [[ -f /etc/fedora-release ]] || [[ -f /etc/redhat-release ]]; then distro="fedora" fi echo " Quick fix for common dependencies:" local core_cmd=$(echo "$json" | jq -r ".package_install_commands.$distro.core") echo " sudo $core_cmd" echo "" echo " For ComposeFS support:" local composefs_cmd=$(echo "$json" | jq -r ".package_install_commands.$distro.composefs") echo " sudo $composefs_cmd" echo "" echo " For container support:" local container_cmd=$(echo "$json" | jq -r ".package_install_commands.$distro.container") echo " sudo $container_cmd" echo "" echo " For bootloader support:" local bootloader_cmd=$(echo "$json" | jq -r ".package_install_commands.$distro.bootloader") echo " sudo $bootloader_cmd" echo "" echo " For more information, run: apt-layer --help" echo "" } check_dependencies() { local command_type="${1:-}" local packages=("${@:2}") log_info "Checking dependencies for command: ${command_type:-general}" "apt-layer" local missing_deps=() local missing_tools=() local missing_scripts=() local missing_modules=() # Use JSON to get the relevant dependencies local deps=( $(get_deps_for_type "$command_type") ) for dep in "${deps[@]}"; do if ! command -v "$dep" >/dev/null 2>&1; then missing_deps+=("$dep") missing_tools+=("$dep") fi done # Check for required scripts (unchanged) local required_scripts=( "composefs-alternative.sh:/usr/local/bin/composefs-alternative.sh" "bootc-alternative.sh:/usr/local/bin/bootc-alternative.sh" "bootupd-alternative.sh:/usr/local/bin/bootupd-alternative.sh" ) for script_info in "${required_scripts[@]}"; do local script_name="${script_info%%:*}" local script_path="${script_info##*:}" if [[ ! -f "$script_path" ]]; then missing_deps+=("$script_name") missing_scripts+=("$script_name") elif [[ ! -x "$script_path" ]]; then missing_deps+=("$script_name (not executable)") missing_scripts+=("$script_name (needs chmod +x)") fi done # Check for kernel modules (unchanged) check_kernel_modules # Validate package names if provided (unchanged) if [[ ${#packages[@]} -gt 0 ]]; then if ! validate_package_names "${packages[@]}"; then return 1 fi fi # Report missing dependencies with specific installation instructions if [[ ${#missing_deps[@]} -gt 0 ]]; then echo "" log_error "Missing dependencies detected!" "apt-layer" echo "" if [[ ${#missing_tools[@]} -gt 0 ]]; then echo " Missing system packages:" for tool in "${missing_tools[@]}"; do echo " $tool" done echo "" echo " Install with: sudo apt install -y ${missing_tools[*]}" echo "" fi if [[ ${#missing_scripts[@]} -gt 0 ]]; then echo " Missing or non-executable scripts:" for script in "${missing_scripts[@]}"; do echo " $script" done echo "" echo " Ensure scripts are installed and executable:" echo " sudo chmod +x /usr/local/bin/*-alternative.sh" echo "" fi if [[ ${#missing_modules[@]} -gt 0 ]]; then echo " Missing kernel modules:" for module in "${missing_modules[@]}"; do echo " $module" done echo "" echo " Load modules with: sudo modprobe ${missing_modules[*]}" echo " Or install with: sudo apt install linux-modules-extra-\$(uname -r)" echo "" fi print_install_instructions exit 1 fi log_success "All dependencies found and validated" "apt-layer" } # Check kernel modules check_kernel_modules() { log_debug "Checking kernel modules..." "apt-layer" local missing_modules=() local required_modules=("squashfs" "overlay" "fuse") for module in "${required_modules[@]}"; do if ! modprobe -n "$module" >/dev/null 2>&1; then missing_modules+=("$module") fi done if [[ ${#missing_modules[@]} -gt 0 ]]; then log_warning "Missing kernel modules: ${missing_modules[*]}" "apt-layer" log_info "Load modules with: sudo modprobe ${missing_modules[*]}" "apt-layer" log_info "Or install with: sudo apt install linux-modules-extra-$(uname -r)" "apt-layer" # Store missing modules for the main error report missing_modules_global=("${missing_modules[@]}") else log_debug "All required kernel modules available" "apt-layer" fi } # Check for OCI integration script check_oci_integration() { local oci_script="/usr/local/bin/oci-integration.sh" if [[ -f "$oci_script" ]] && [[ -x "$oci_script" ]]; then log_debug "OCI integration script found: $oci_script" "apt-layer" return 0 else log_warning "OCI integration script not found or not executable: $oci_script" "apt-layer" log_info "OCI export/import features will not be available" "apt-layer" return 1 fi } # Check for bootloader integration script check_bootloader_integration() { local bootloader_script="/usr/local/bin/bootloader-integration.sh" if [[ -f "$bootloader_script" ]] && [[ -x "$bootloader_script" ]]; then log_debug "Bootloader integration script found: $bootloader_script" "apt-layer" return 0 else log_warning "Bootloader integration script not found or not executable: $bootloader_script" "apt-layer" log_info "Automatic bootloader integration will not be available" "apt-layer" return 1 fi } # Validate package names validate_package_names() { local packages=("$@") local invalid_packages=() for package in "${packages[@]}"; do # Check for basic package name format if [[ ! "$package" =~ ^[a-zA-Z0-9][a-zA-Z0-9+.-]*$ ]]; then invalid_packages+=("$package") fi done if [ ${#invalid_packages[@]} -ne 0 ]; then log_error "Invalid package names: ${invalid_packages[*]}" "apt-layer" log_info "Package names must contain only alphanumeric characters, +, -, and ." "apt-layer" return 1 fi return 0 } # Check available disk space check_disk_space() { local required_space_mb="$1" local target_dir="${2:-$WORKSPACE}" local available_space_mb available_space_mb=$(get_available_space "$target_dir") if [[ $available_space_mb -lt $required_space_mb ]]; then log_error "Insufficient disk space: ${available_space_mb}MB available, need ${required_space_mb}MB" "apt-layer" return 1 fi log_debug "Disk space check passed: ${available_space_mb}MB available" "apt-layer" return 0 } # Check if system is in a bootable state check_system_state() { log_debug "Checking system state..." "apt-layer" # Check if running from a live system if [[ -f "/run/ostree-booted" ]]; then log_info "System is running from OSTree/ComposeFS" "apt-layer" return 0 fi # Check if running from a traditional system if [[ -f "/etc/os-release" ]]; then log_info "System is running from traditional filesystem" "apt-layer" return 0 fi log_warning "Unable to determine system state" "apt-layer" return 1 } # Enhanced error reporting with actionable messages show_actionable_error() { local error_type="$1" local error_message="$2" local command="${3:-}" local packages="${4:-}" echo "" log_error "$error_message" "apt-layer" echo "" case "$error_type" in "missing_dependencies") echo "� To fix this issue:" echo " 1. Install missing dependencies:" echo " sudo apt update" echo " sudo apt install -y $packages" echo "" echo " 2. Ensure scripts are executable:" echo " sudo chmod +x /usr/local/bin/*-alternative.sh" echo "" echo " 3. Load required kernel modules:" echo " sudo modprobe squashfs overlay fuse" echo "" ;; "permission_denied") echo "� Permission issue detected:" echo " This command requires root privileges." echo "" echo " Run with sudo:" echo " sudo apt-layer $command" echo "" ;; "invalid_arguments") echo "� Invalid arguments provided:" echo " Check the command syntax and try again." echo "" echo " For help, run:" echo " apt-layer --help" echo " apt-layer $command --help" echo "" ;; "system_not_initialized") echo "� System not initialized:" echo " Particle-OS needs to be initialized first." echo "" echo " Run initialization:" echo " sudo apt-layer --init" echo "" ;; "disk_space") echo "� Insufficient disk space:" echo " Free up space or use a different location." echo "" echo " Check available space:" echo " df -h" echo "" ;; *) echo "� Unknown error occurred." echo " Please check the error message above." echo "" echo " For help, run: apt-layer --help" echo "" ;; esac echo "� For more information:" echo " � apt-layer --help" echo " � apt-layer --help-full" echo " � apt-layer --examples" echo "" } # Pre-flight validation before any command pre_flight_check() { local command_type="$1" local packages=("${@:2}") log_info "Running pre-flight checks..." "apt-layer" # Check if running as root for privileged operations if [[ "$command_type" =~ ^(install|upgrade|rebase|rollback|init|live-overlay)$ ]]; then if [[ $EUID -ne 0 ]]; then show_actionable_error "permission_denied" "This command requires root privileges" "$command_type" exit 1 fi fi # Check system initialization (skip for help commands) if [[ "$command_type" != "--init" && "$command_type" != "init" && "$command_type" != "--help" && "$command_type" != "-h" && "$command_type" != "--help-full" && "$command_type" != "--examples" ]]; then if [[ ! -f "/usr/local/etc/particle-config.sh" ]]; then show_actionable_error "system_not_initialized" "Particle-OS system not initialized" "$command_type" exit 1 fi fi # Check dependencies if ! check_dependencies "$command_type" "${packages[@]}"; then exit 1 fi # Check disk space for operations that create files if [[ "$command_type" =~ ^(create|build|install|upgrade)$ ]]; then if ! check_disk_space 1000; then show_actionable_error "disk_space" "Insufficient disk space for operation" "$command_type" exit 1 fi fi log_success "Pre-flight checks passed" "apt-layer" } # --- END OF SCRIPTLET: 01-dependencies.sh --- # ============================================================================ # Transaction Management # ============================================================================ # Transaction management for apt-layer Tool # Provides atomic operations with automatic rollback and recovery # System initialization functions initialize_apt_layer_system() { log_info "Initializing apt-layer system..." "apt-layer" # Use the new system initialization function if available if command -v init_apt_layer_system >/dev/null 2>&1; then init_apt_layer_system else # Fallback to basic initialization log_info "Using fallback initialization..." "apt-layer" # Create configuration directory mkdir -p "/usr/local/etc/apt-layer" # Create workspace directory mkdir -p "/var/lib/apt-layer" # Create log directory mkdir -p "/var/log/apt-layer" # Create cache directory mkdir -p "/var/cache/apt-layer" # Create configuration file if it doesn't exist if [[ ! -f "/usr/local/etc/apt-layer-config.sh" ]]; then create_default_configuration fi # Set proper permissions chmod 755 "/var/lib/apt-layer" chmod 755 "/var/log/apt-layer" chmod 755 "/var/cache/apt-layer" chmod 644 "/usr/local/etc/apt-layer-config.sh" log_success "apt-layer system initialized successfully (fallback)" "apt-layer" fi } create_default_configuration() { log_info "Creating default configuration..." "apt-layer" cat > "/usr/local/etc/apt-layer-config.sh" << 'EOF' #!/bin/bash # apt-layer Configuration File # Generated automatically on $(date) # Core paths export APT_LAYER_WORKSPACE="/var/lib/apt-layer" export APT_LAYER_CONFIG_DIR="/usr/local/etc/apt-layer" export APT_LAYER_LOG_DIR="/var/log/apt-layer" export APT_LAYER_CACHE_DIR="/var/cache/apt-layer" # Build and temporary directories export APT_LAYER_BUILD_DIR="$APT_LAYER_WORKSPACE/build" export APT_LAYER_TEMP_DIR="$APT_LAYER_WORKSPACE/temp" export APT_LAYER_BACKUP_DIR="$APT_LAYER_WORKSPACE/backup" # Layer management export APT_LAYER_LAYERS_DIR="$APT_LAYER_WORKSPACE/layers" export APT_LAYER_IMAGES_DIR="$APT_LAYER_WORKSPACE/images" export APT_LAYER_MOUNTS_DIR="$APT_LAYER_WORKSPACE/mounts" # ComposeFS integration export APT_LAYER_COMPOSEFS_DIR="$APT_LAYER_WORKSPACE/composefs" export APT_LAYER_COMPOSEFS_SCRIPT="/usr/local/bin/composefs-alternative.sh" # Boot management export APT_LAYER_BOOTC_SCRIPT="/usr/local/bin/bootc-alternative.sh" export APT_LAYER_BOOTUPD_SCRIPT="/usr/local/bin/bootupd-alternative.sh" # Transaction management export APT_LAYER_TRANSACTION_LOG="$APT_LAYER_LOG_DIR/transactions.log" export APT_LAYER_TRANSACTION_STATE="$APT_LAYER_CACHE_DIR/transaction.state" # Logging configuration export APT_LAYER_LOG_LEVEL="INFO" export APT_LAYER_LOG_FILE="$APT_LAYER_LOG_DIR/apt-layer.log" # Security settings export APT_LAYER_SIGNING_ENABLED="false" export APT_LAYER_VERIFY_SIGNATURES="false" # Container settings - will be set dynamically export APT_LAYER_CONTAINER_RUNTIME="" export APT_LAYER_CHROOT_ENABLED="true" # Default package sources export APT_LAYER_DEFAULT_SOURCES="main restricted universe multiverse" # Performance settings export APT_LAYER_PARALLEL_JOBS="4" export APT_LAYER_CACHE_ENABLED="true" # Load configuration if it exists if [[ -f "$APT_LAYER_CONFIG_DIR/apt-layer-config.sh" ]]; then source "$APT_LAYER_CONFIG_DIR/apt-layer-config.sh" fi EOF log_success "Default configuration created: /usr/local/etc/apt-layer-config.sh" "apt-layer" } reset_apt_layer_system() { log_warning "Resetting apt-layer system..." "apt-layer" # Backup existing configuration if [[ -f "/usr/local/etc/apt-layer-config.sh" ]]; then cp "/usr/local/etc/apt-layer-config.sh" "/usr/local/etc/apt-layer-config.sh.backup.$(date +%Y%m%d_%H%M%S)" log_info "Existing configuration backed up" "apt-layer" fi # Remove existing directories rm -rf "/var/lib/apt-layer" rm -rf "/var/log/apt-layer" rm -rf "/var/cache/apt-layer" # Reinitialize system initialize_apt_layer_system log_success "apt-layer system reset successfully" "apt-layer" } # Transaction management functions start_transaction() { local operation="$1" local target="$2" TRANSACTION_ID=$(date +%Y%m%d_%H%M%S)_$$ TRANSACTION_PHASE="started" TRANSACTION_TARGET="$target" log_transaction "Starting transaction $TRANSACTION_ID: $operation -> $target" "apt-layer" # Save transaction state save_transaction_state # Log transaction start echo "$(date -u +%Y-%m-%dT%H:%M:%SZ) - START - $TRANSACTION_ID - $operation - $target" >> "$TRANSACTION_LOG" } update_transaction_phase() { local phase="$1" TRANSACTION_PHASE="$phase" log_transaction "Transaction $TRANSACTION_ID phase: $phase" "apt-layer" # Update transaction state save_transaction_state # Log phase update echo "$(date -u +%Y-%m-%dT%H:%M:%SZ) - PHASE - $TRANSACTION_ID - $phase" >> "$TRANSACTION_LOG" } commit_transaction() { log_transaction "Committing transaction $TRANSACTION_ID" "apt-layer" # Log successful completion echo "$(date -u +%Y-%m-%dT%H:%M:%SZ) - COMMIT - $TRANSACTION_ID - SUCCESS" >> "$TRANSACTION_LOG" # Clear transaction state clear_transaction_state log_success "Transaction $TRANSACTION_ID completed successfully" "apt-layer" } rollback_transaction() { log_transaction "Rolling back transaction $TRANSACTION_ID" "apt-layer" if [[ -n "$TRANSACTION_BACKUP" ]] && [[ -d "$TRANSACTION_BACKUP" ]]; then log_info "Restoring from backup: $TRANSACTION_BACKUP" "apt-layer" # Restore from backup if atomic_directory_swap "$TRANSACTION_BACKUP" "$TRANSACTION_TARGET" ""; then log_success "Rollback completed successfully" "apt-layer" else log_error "Rollback failed - manual intervention may be required" "apt-layer" fi else log_warning "No backup available for rollback" "apt-layer" fi # Log rollback echo "$(date -u +%Y-%m-%dT%H:%M:%SZ) - ROLLBACK - $TRANSACTION_ID - $TRANSACTION_PHASE" >> "$TRANSACTION_LOG" # Clear transaction state clear_transaction_state } save_transaction_state() { if [[ -n "$TRANSACTION_ID" ]]; then cat > "$TRANSACTION_STATE" << EOF TRANSACTION_ID="$TRANSACTION_ID" TRANSACTION_PHASE="$TRANSACTION_PHASE" TRANSACTION_TARGET="$TRANSACTION_TARGET" TRANSACTION_BACKUP="$TRANSACTION_BACKUP" TRANSACTION_TEMP_DIRS=(${TRANSACTION_TEMP_DIRS[*]}) EOF fi } clear_transaction_state() { TRANSACTION_ID="" TRANSACTION_PHASE="" TRANSACTION_TARGET="" TRANSACTION_BACKUP="" TRANSACTION_TEMP_DIRS=() # Remove state file rm -f "$TRANSACTION_STATE" } load_transaction_state() { if [[ -f "$TRANSACTION_STATE" ]]; then source "$TRANSACTION_STATE" return 0 else return 1 fi } check_incomplete_transactions() { log_info "Checking for incomplete transactions..." "apt-layer" if load_transaction_state; then log_warning "Found incomplete transaction: $TRANSACTION_ID (phase: $TRANSACTION_PHASE)" "apt-layer" log_info "Target: $TRANSACTION_TARGET" "apt-layer" if [[ -n "$TRANSACTION_BACKUP" ]] && [[ -d "$TRANSACTION_BACKUP" ]]; then log_info "Backup available: $TRANSACTION_BACKUP" "apt-layer" fi # Ask user what to do echo echo "Incomplete transaction detected. Choose an action:" echo "1) Attempt rollback (recommended)" echo "2) Continue transaction (risky)" echo "3) Clear transaction state (manual cleanup required)" echo "4) Exit" echo read -p "Enter choice (1-4): " choice case "$choice" in 1) log_info "Attempting rollback..." "apt-layer" rollback_transaction ;; 2) log_warning "Continuing incomplete transaction..." "apt-layer" log_info "Transaction will resume from phase: $TRANSACTION_PHASE" "apt-layer" ;; 3) log_warning "Clearing transaction state..." "apt-layer" clear_transaction_state ;; 4) log_info "Exiting..." "apt-layer" exit 0 ;; *) log_error "Invalid choice, exiting..." "apt-layer" exit 1 ;; esac else log_info "No incomplete transactions found" "apt-layer" fi } # Dry run functionality for package installation dry_run_apt_install() { local packages=("$@") local chroot_dir="${1:-}" log_info "Performing dry run for packages: ${packages[*]}" "apt-layer" local apt_cmd if [[ -n "$chroot_dir" ]]; then apt_cmd="chroot '$chroot_dir' apt-get install --simulate" else apt_cmd="apt-get install --simulate" fi # Add packages to command apt_cmd+=" ${packages[*]}" log_debug "Running: $apt_cmd" "apt-layer" # Execute dry run if eval "$apt_cmd" >/dev/null 2>&1; then log_success "Dry run completed successfully - no conflicts detected" "apt-layer" return 0 else log_error "Dry run failed - potential conflicts detected" "apt-layer" log_info "Run the command manually to see detailed output:" "apt-layer" log_info "$apt_cmd" "apt-layer" return 1 fi } # Transaction logging utilities log_transaction_event() { local event="$1" local details="$2" echo "$(date -u +%Y-%m-%dT%H:%M:%SZ) - $event - $TRANSACTION_ID - $details" >> "$TRANSACTION_LOG" } # Transaction validation validate_transaction_state() { if [[ -z "$TRANSACTION_ID" ]]; then log_error "No active transaction" "apt-layer" return 1 fi if [[ -z "$TRANSACTION_TARGET" ]]; then log_error "Transaction target not set" "apt-layer" return 1 fi return 0 } # Transaction cleanup utilities add_temp_directory() { local temp_dir="$1" TRANSACTION_TEMP_DIRS+=("$temp_dir") save_transaction_state } add_backup_path() { local backup_path="$1" TRANSACTION_BACKUP="$backup_path" save_transaction_state } # --- END OF SCRIPTLET: 02-transactions.sh --- # ============================================================================ # Traditional Layer Creation # ============================================================================ # Traditional layer creation for Ubuntu uBlue apt-layer Tool # Provides chroot-based package installation for layer creation # Create traditional layer create_layer() { local base_image="$1" local new_image="$2" shift 2 local packages=("$@") log_layer "Creating traditional layer: $new_image" "apt-layer" log_info "Base image: $base_image" "apt-layer" log_info "Packages to install: ${packages[*]}" "apt-layer" # Start transaction start_transaction "create_layer" "$new_image" # Check if base image exists if ! composefs_image_exists "$base_image"; then log_error "Base image '$base_image' not found" "apt-layer" log_info "Available images:" "apt-layer" composefs_list_images exit 1 fi # Prepare temp_layer_dir local temp_layer_dir="$BUILD_DIR/temp-layer-$(basename "$new_image")-${TRANSACTION_ID}" local final_layer_dir="$BUILD_DIR/layer-$(basename "$new_image")" local backup_dir="$BUILD_DIR/backup-layer-$(basename "$new_image")-${TRANSACTION_ID}" add_temp_directory "$temp_layer_dir" add_temp_directory "$backup_dir" rm -rf "$temp_layer_dir" 2>/dev/null || true mkdir -p "$temp_layer_dir" update_transaction_phase "checkout_base" # Mount base image to temp_layer_dir log_info "Mounting base image..." "apt-layer" if ! composefs_mount "$base_image" "$temp_layer_dir"; then log_error "Failed to mount base image" "apt-layer" exit 1 fi update_transaction_phase "setup_chroot" # Set up chroot environment log_info "Setting up chroot environment..." "apt-layer" mount --bind /proc "$temp_layer_dir/proc" mount --bind /sys "$temp_layer_dir/sys" mount --bind /dev "$temp_layer_dir/dev" # Copy host's resolv.conf for internet access cp /etc/resolv.conf "$temp_layer_dir/etc/resolv.conf" 2>/dev/null || true # Ensure /run exists and is writable mkdir -p "$temp_layer_dir/run" chmod 755 "$temp_layer_dir/run" # Set non-interactive environment for apt export DEBIAN_FRONTEND=noninteractive update_transaction_phase "dry_run_check" # Perform dry run to check for conflicts log_info "Performing dry run to check for package conflicts..." "apt-layer" if ! dry_run_apt_install "$temp_layer_dir" "${packages[@]}"; then log_error "Dry run failed - package conflicts detected" "apt-layer" log_info "Please resolve conflicts before proceeding" "apt-layer" exit 1 fi update_transaction_phase "install_packages" # Install packages in chroot log_info "Installing packages in chroot..." "apt-layer" if ! chroot "$temp_layer_dir" apt-get update; then log_error "Failed to update package lists in chroot" "apt-layer" exit 1 fi if ! chroot "$temp_layer_dir" apt-get install -y "${packages[@]}"; then log_error "Failed to install packages in chroot" "apt-layer" exit 1 fi # Clean up package cache chroot "$temp_layer_dir" apt-get clean chroot "$temp_layer_dir" apt-get autoremove -y update_transaction_phase "cleanup_mounts" # Clean up mounts umount "$temp_layer_dir/proc" 2>/dev/null || true umount "$temp_layer_dir/sys" 2>/dev/null || true umount "$temp_layer_dir/dev" 2>/dev/null || true # Remove temporary resolv.conf rm -f "$temp_layer_dir/etc/resolv.conf" update_transaction_phase "atomic_swap" # Perform atomic directory swap if [[ -d "$final_layer_dir" ]]; then log_debug "Backing up existing layer directory" "apt-layer" if ! atomic_directory_swap "$final_layer_dir" "$backup_dir" ""; then log_error "Failed to backup existing layer directory" "apt-layer" exit 1 fi add_backup_path "$backup_dir" fi # Move temporary directory to final location if ! atomic_directory_swap "$temp_layer_dir" "$final_layer_dir" ""; then log_error "Failed to perform atomic directory swap" "apt-layer" exit 1 fi update_transaction_phase "create_commit" # Create ComposeFS image from the final layer directory log_info "Creating ComposeFS image..." "apt-layer" if ! composefs_create "$new_image" "$final_layer_dir"; then log_error "Failed to create ComposeFS image" "apt-layer" exit 1 fi # Commit transaction commit_transaction log_success "Traditional layer created successfully: $new_image" "apt-layer" } # Setup chroot environment for package installation setup_chroot_environment() { local chroot_dir="$1" log_debug "Setting up chroot environment: $chroot_dir" "apt-layer" # Create necessary directories mkdir -p "$chroot_dir"/{proc,sys,dev,run} # Mount essential filesystems mount --bind /proc "$chroot_dir/proc" mount --bind /sys "$chroot_dir/sys" mount --bind /dev "$chroot_dir/dev" # Copy DNS configuration cp /etc/resolv.conf "$chroot_dir/etc/resolv.conf" 2>/dev/null || true # Set proper permissions chmod 755 "$chroot_dir/run" # Set environment variables export DEBIAN_FRONTEND=noninteractive log_debug "Chroot environment setup completed" "apt-layer" } # Cleanup chroot environment cleanup_chroot_environment() { local chroot_dir="$1" log_debug "Cleaning up chroot environment: $chroot_dir" "apt-layer" # Unmount filesystems umount "$chroot_dir/proc" 2>/dev/null || true umount "$chroot_dir/sys" 2>/dev/null || true umount "$chroot_dir/dev" 2>/dev/null || true # Remove temporary files rm -f "$chroot_dir/etc/resolv.conf" log_debug "Chroot environment cleanup completed" "apt-layer" } # Install packages in chroot with error handling install_packages_in_chroot() { local chroot_dir="$1" shift local packages=("$@") log_info "Installing packages in chroot: ${packages[*]}" "apt-layer" # Update package lists if ! chroot "$chroot_dir" apt-get update; then log_error "Failed to update package lists in chroot" "apt-layer" return 1 fi # Install packages if ! chroot "$chroot_dir" apt-get install -y "${packages[@]}"; then log_error "Failed to install packages in chroot" "apt-layer" return 1 fi # Clean up chroot "$chroot_dir" apt-get clean chroot "$chroot_dir" apt-get autoremove -y log_success "Packages installed successfully in chroot" "apt-layer" return 0 } # Validate chroot environment validate_chroot_environment() { local chroot_dir="$1" log_debug "Validating chroot environment: $chroot_dir" "apt-layer" # Check if chroot directory exists if [[ ! -d "$chroot_dir" ]]; then log_error "Chroot directory does not exist: $chroot_dir" "apt-layer" return 1 fi # Check if essential directories exist for dir in bin lib usr etc; do if [[ ! -d "$chroot_dir/$dir" ]]; then log_error "Essential directory missing in chroot: $dir" "apt-layer" return 1 fi done # Check if apt is available if [[ ! -x "$chroot_dir/usr/bin/apt-get" ]]; then log_error "apt-get not found in chroot environment" "apt-layer" return 1 fi log_debug "Chroot environment validation passed" "apt-layer" return 0 } # --- END OF SCRIPTLET: 03-traditional.sh --- # ============================================================================ # Container-based Layer Creation (Apx-style) # ============================================================================ # Container-based layer creation for Ubuntu uBlue apt-layer Tool # Provides Apx-style isolated container installation with ComposeFS backend # Container runtime detection and configuration detect_container_runtime() { log_info "Detecting container runtime" "apt-layer" # Get configured default runtime local configured_runtime if [[ -f "$CONFIG_DIR/apt-layer-settings.json" ]]; then configured_runtime=$(jq -r '.default_container_runtime // empty' "$CONFIG_DIR/apt-layer-settings.json" 2>/dev/null) fi # If configured runtime is specified and available, use it if [[ -n "$configured_runtime" ]]; then case "$configured_runtime" in podman) if command -v podman &> /dev/null; then CONTAINER_RUNTIME="podman" log_info "Using configured container runtime: podman" "apt-layer" return 0 else log_warning "Configured runtime 'podman' not found, falling back to detection" "apt-layer" fi ;; docker) if command -v docker &> /dev/null; then CONTAINER_RUNTIME="docker" log_info "Using configured container runtime: docker" "apt-layer" return 0 else log_warning "Configured runtime 'docker' not found, falling back to detection" "apt-layer" fi ;; esac fi # Auto-detection fallback (in order of preference) log_info "Auto-detecting container runtime" "apt-layer" # Check for podman (preferred for rootless) if command -v podman &> /dev/null; then CONTAINER_RUNTIME="podman" log_info "Auto-detected podman as container runtime" "apt-layer" return 0 fi # Fallback to docker if command -v docker &> /dev/null; then CONTAINER_RUNTIME="docker" log_info "Auto-detected docker as container runtime" "apt-layer" return 0 fi log_error "No supported container runtime found (podman or docker required)" "apt-layer" return 1 } # Enhanced container runtime detection with validation init_container_system() { log_info "Initializing container system" "apt-layer" # Detect container runtime if ! detect_container_runtime; then return 1 fi # Validate container runtime if ! validate_container_runtime "$CONTAINER_RUNTIME"; then return 1 fi # Set global container runtime variables set_global_container_runtime_vars # Ensure workspace directories exist mkdir -p "$WORKSPACE"/{images,temp,containers} log_success "Container system initialized with runtime: $CONTAINER_RUNTIME" "apt-layer" return 0 } # Set global container runtime variables set_global_container_runtime_vars() { log_debug "Setting global container runtime variables" "apt-layer" # Set variables used throughout the system export APT_LAYER_CONTAINER_RUNTIME="$CONTAINER_RUNTIME" export PARTICLE_CONTAINER_RUNTIME="$CONTAINER_RUNTIME" # Set runtime-specific variables case "$CONTAINER_RUNTIME" in podman) export CONTAINER_RUNTIME_TYPE="general" export CONTAINER_RUNTIME_DESCRIPTION="Rootless container runtime" ;; docker) export CONTAINER_RUNTIME_TYPE="general" export CONTAINER_RUNTIME_DESCRIPTION="Traditional container runtime" ;; esac log_debug "Container runtime variables set: $CONTAINER_RUNTIME ($CONTAINER_RUNTIME_TYPE)" "apt-layer" } # Validate container runtime capabilities validate_container_runtime() { local runtime="$1" log_info "Validating container runtime: $runtime" "apt-layer" case "$runtime" in podman) if ! podman info &> /dev/null; then log_error "podman is not properly configured" "apt-layer" return 1 fi ;; docker) if ! docker info &> /dev/null; then log_error "docker is not properly configured" "apt-layer" return 1 fi ;; *) log_error "Unsupported container runtime: $runtime" "apt-layer" return 1 ;; esac log_success "Container runtime validation passed" "apt-layer" return 0 } # Determine if base image is ComposeFS or OCI is_composefs_image() { local base_image="$1" # Check if it's a ComposeFS image path if [[ "$base_image" == *"/"* ]] && [[ -d "$WORKSPACE/images/$base_image" ]]; then return 0 # True - it's a ComposeFS image fi return 1 # False - it's likely an OCI image } # Export ComposeFS image to OCI format for container use export_composefs_to_oci() { local composefs_image="$1" local temp_oci_dir="$2" log_info "Exporting ComposeFS image to OCI format: $composefs_image" "apt-layer" # Create temporary OCI directory structure mkdir -p "$temp_oci_dir"/{blobs,refs} # Use ComposeFS backend to export (placeholder for now) # This will be fully implemented when 06-oci-integration.sh is complete if [[ -f "$COMPOSEFS_SCRIPT" ]]; then # Temporary: mount and copy filesystem local mount_point="$temp_oci_dir/mount" mkdir -p "$mount_point" if mount_composefs_image "$composefs_image" "$mount_point"; then # Create a simple OCI-like structure mkdir -p "$temp_oci_dir/rootfs" cp -a "$mount_point"/* "$temp_oci_dir/rootfs/" 2>/dev/null || true umount "$mount_point" 2>/dev/null || true log_success "ComposeFS image exported to OCI format" "apt-layer" return 0 else log_error "Failed to mount ComposeFS image for export" "apt-layer" return 1 fi else log_error "ComposeFS script not found for export" "apt-layer" return 1 fi } # Create base container image for layer creation create_base_container_image() { local base_image="$1" local container_name="$2" log_info "Creating base container image: $base_image" "apt-layer" # Determine if base_image is ComposeFS or OCI if is_composefs_image "$base_image"; then log_info "Base image is ComposeFS image: $base_image" "apt-layer" # Export ComposeFS image to OCI format for container use local temp_oci_dir="$WORKSPACE/temp/oci-export-$$" if ! export_composefs_to_oci "$base_image" "$temp_oci_dir"; then log_error "Failed to export ComposeFS image to OCI format" "apt-layer" return 1 fi # Use the exported OCI image log_success "ComposeFS image exported and ready for container use" "apt-layer" return 0 else log_info "Base image is OCI image: $base_image" "apt-layer" # Pull standard OCI image if needed case "$CONTAINER_RUNTIME" in podman) if ! podman image exists "$base_image"; then log_info "Pulling OCI image: $base_image" "apt-layer" podman pull "$base_image" fi ;; docker) if ! docker image ls "$base_image" &> /dev/null; then log_info "Pulling OCI image: $base_image" "apt-layer" docker pull "$base_image" fi ;; esac log_success "OCI base image ready: $base_image" "apt-layer" return 0 fi } # Container-based package installation (removed skopeo-based installation) container_install_packages() { local base_image="$1" local new_image="$2" local packages=("${@:3}") log_info "Container-based package installation: ${packages[*]}" "apt-layer" # Create temporary container name local container_name="apt-layer-$(date +%s)-$$" local temp_dir="$WORKSPACE/temp/$container_name" # Ensure temp directory exists mkdir -p "$temp_dir" # Start transaction start_transaction "container-install-$container_name" # Create base container image if ! create_base_container_image "$base_image" "$container_name"; then rollback_transaction return 1 fi # Run package installation in container case "$CONTAINER_RUNTIME" in podman) if ! run_podman_install "$base_image" "$container_name" "$temp_dir" "${packages[@]}"; then rollback_transaction return 1 fi ;; docker) if ! run_docker_install "$base_image" "$container_name" "$temp_dir" "${packages[@]}"; then rollback_transaction return 1 fi ;; *) log_error "Unsupported container runtime: $CONTAINER_RUNTIME" "apt-layer" rollback_transaction return 1 ;; esac # Create ComposeFS layer from container changes if ! create_composefs_layer "$temp_dir" "$new_image"; then rollback_transaction return 1 fi # Commit transaction commit_transaction # Cleanup cleanup_container_artifacts "$container_name" "$temp_dir" log_success "Container-based package installation completed" "apt-layer" return 0 } # Podman-based package installation run_podman_install() { local base_image="$1" local container_name="$2" local temp_dir="$3" shift 3 local packages=("$@") log_info "Running podman-based installation" "apt-layer" # Create container from base image local container_id if [[ -d "$WORKSPACE/images/$base_image" ]]; then # Use ComposeFS image as base container_id=$(podman create --name "$container_name" \ --mount type=bind,source="$WORKSPACE/images/$base_image",target=/ \ --mount type=bind,source="$temp_dir",target=/output \ ubuntu:24.04 /bin/bash) else # Use standard Ubuntu image container_id=$(podman create --name "$container_name" \ --mount type=bind,source="$temp_dir",target=/output \ ubuntu:24.04 /bin/bash) fi if [[ -z "$container_id" ]]; then log_error "Failed to create podman container" "apt-layer" return 1 fi # Start container and install packages if ! podman start "$container_name"; then log_error "Failed to start podman container" "apt-layer" podman rm "$container_name" 2>/dev/null || true return 1 fi # Install packages local install_cmd="apt-get update && apt-get install -y ${packages[*]} && apt-get clean" if ! podman exec "$container_name" /bin/bash -c "$install_cmd"; then log_error "Package installation failed in podman container" "apt-layer" podman stop "$container_name" 2>/dev/null || true podman rm "$container_name" 2>/dev/null || true return 1 fi # Export container filesystem if ! podman export "$container_name" | tar -x -C "$temp_dir"; then log_error "Failed to export podman container filesystem" "apt-layer" podman stop "$container_name" 2>/dev/null || true podman rm "$container_name" 2>/dev/null || true return 1 fi # Cleanup container podman stop "$container_name" 2>/dev/null || true podman rm "$container_name" 2>/dev/null || true log_success "Podman-based installation completed" "apt-layer" return 0 } # Docker-based package installation run_docker_install() { local base_image="$1" local container_name="$2" local temp_dir="$3" shift 3 local packages=("$@") log_info "Running docker-based installation" "apt-layer" # Create container from base image local container_id if [[ -d "$WORKSPACE/images/$base_image" ]]; then # Use ComposeFS image as base container_id=$(docker create --name "$container_name" \ -v "$WORKSPACE/images/$base_image:/" \ -v "$temp_dir:/output" \ ubuntu:24.04 /bin/bash) else # Use standard Ubuntu image container_id=$(docker create --name "$container_name" \ -v "$temp_dir:/output" \ ubuntu:24.04 /bin/bash) fi if [[ -z "$container_id" ]]; then log_error "Failed to create docker container" "apt-layer" return 1 fi # Start container and install packages if ! docker start "$container_name"; then log_error "Failed to start docker container" "apt-layer" docker rm "$container_name" 2>/dev/null || true return 1 fi # Install packages local install_cmd="apt-get update && apt-get install -y ${packages[*]} && apt-get clean" if ! docker exec "$container_name" /bin/bash -c "$install_cmd"; then log_error "Package installation failed in docker container" "apt-layer" docker stop "$container_name" 2>/dev/null || true docker rm "$container_name" 2>/dev/null || true return 1 fi # Export container filesystem if ! docker export "$container_name" | tar -x -C "$temp_dir"; then log_error "Failed to export docker container filesystem" "apt-layer" docker stop "$container_name" 2>/dev/null || true docker rm "$container_name" 2>/dev/null || true return 1 fi # Cleanup container docker stop "$container_name" 2>/dev/null || true docker rm "$container_name" 2>/dev/null || true log_success "Docker-based installation completed" "apt-layer" return 0 } # systemd-nspawn-based package installation run_nspawn_install() { local base_image="$1" local container_name="$2" local temp_dir="$3" shift 3 local packages=("$@") log_info "Running systemd-nspawn-based installation" "apt-layer" # Create container directory local container_dir="$temp_dir/container" mkdir -p "$container_dir" # Set up base filesystem if [[ -d "$WORKSPACE/images/$base_image" ]]; then # Use ComposeFS image as base log_info "Using ComposeFS image as base for nspawn" "apt-layer" # Mount ComposeFS image and copy contents local mount_point="$temp_dir/mount" mkdir -p "$mount_point" if ! mount_composefs_image "$base_image" "$mount_point"; then log_error "Failed to mount ComposeFS image for nspawn" "apt-layer" return 1 fi # Copy filesystem if ! cp -a "$mount_point"/* "$container_dir/"; then log_error "Failed to copy filesystem for nspawn" "apt-layer" umount "$mount_point" 2>/dev/null || true return 1 fi umount "$mount_point" 2>/dev/null || true else # Use host filesystem as base log_info "Using host filesystem as base for nspawn" "apt-layer" # Create minimal container structure mkdir -p "$container_dir"/{bin,lib,lib64,usr,etc,var} # Copy essential files from host cp -a /bin/bash "$container_dir/bin/" cp -a /lib/x86_64-linux-gnu "$container_dir/lib/" cp -a /usr/bin/apt-get "$container_dir/usr/bin/" # Add minimal /etc structure echo "deb http://archive.ubuntu.com/ubuntu/ jammy main" > "$container_dir/etc/apt/sources.list" fi # Run package installation in nspawn container local install_cmd="apt-get update && apt-get install -y ${packages[*]} && apt-get clean" if ! systemd-nspawn -D "$container_dir" /bin/bash -c "$install_cmd"; then log_error "Package installation failed in nspawn container" "apt-layer" return 1 fi # Move container contents to temp_dir mv "$container_dir"/* "$temp_dir/" 2>/dev/null || true log_success "systemd-nspawn-based installation completed" "apt-layer" return 0 } # Create ComposeFS layer from container changes create_composefs_layer() { local temp_dir="$1" local new_image="$2" log_info "Creating ComposeFS layer from container changes" "apt-layer" # Ensure new image directory exists local image_dir="$WORKSPACE/images/$new_image" mkdir -p "$image_dir" # Try real mkcomposefs binary first if command -v mkcomposefs >/dev/null 2>&1; then # Create object store directory (same directory as image) local object_store_dir=$(dirname "$new_image") mkdir -p "$object_store_dir" if ! mkcomposefs "$temp_dir" "$new_image" --digest-store="$object_store_dir"; then log_error "Failed to create ComposeFS layer with mkcomposefs" "apt-layer" return 1 fi else # Fallback to composefs-alternative.sh if ! "$COMPOSEFS_SCRIPT" create "$new_image" "$temp_dir"; then log_error "Failed to create ComposeFS layer" "apt-layer" return 1 fi fi log_success "ComposeFS layer created: $new_image" "apt-layer" return 0 } # Cleanup container artifacts cleanup_container_artifacts() { local container_name="$1" local temp_dir="$2" log_info "Cleaning up container artifacts" "apt-layer" # Remove temporary directory if [[ -d "$temp_dir" ]]; then rm -rf "$temp_dir" fi # Cleanup any remaining containers (safety) case "$CONTAINER_RUNTIME" in podman) podman rm "$container_name" 2>/dev/null || true ;; docker) docker rm "$container_name" 2>/dev/null || true ;; esac log_success "Container artifacts cleaned up" "apt-layer" } # Container-based layer removal container_remove_layer() { local image_name="$1" log_info "Removing container-based layer: $image_name" "apt-layer" # Try real file removal if command -v mkcomposefs >/dev/null 2>&1; then if ! rm -f "$WORKSPACE/images/$image_name.composefs"; then log_error "Failed to remove ComposeFS layer file: $image_name" "apt-layer" return 1 fi else # Fallback to composefs-alternative.sh if ! "$COMPOSEFS_SCRIPT" remove "$image_name"; then log_error "Failed to remove ComposeFS layer" "apt-layer" return 1 fi fi log_success "Container-based layer removed: $image_name" "apt-layer" return 0 } # Container-based layer listing container_list_layers() { log_info "Listing container-based layers" "apt-layer" # Try to list images from workspace directory if command -v mkcomposefs >/dev/null 2>&1; then # List .composefs files in the workspace find "$WORKSPACE/images" -name "*.composefs" -type f 2>/dev/null | sed 's|.*/||' | sed 's|\.composefs$||' || true else # Fallback to composefs-alternative.sh if ! "$COMPOSEFS_SCRIPT" list-images; then log_error "Failed to list ComposeFS layers" "apt-layer" return 1 fi fi return 0 } # Container-based layer information container_layer_info() { local image_name="$1" log_info "Getting container-based layer info: $image_name" "apt-layer" # Try real composefs-info binary first if command -v composefs-info >/dev/null 2>&1; then if ! composefs-info ls "$WORKSPACE/images/$image_name.composefs"; then log_error "Failed to get ComposeFS layer info with composefs-info" "apt-layer" return 1 fi else # Fallback to composefs-alternative.sh if ! "$COMPOSEFS_SCRIPT" info "$image_name"; then log_error "Failed to get ComposeFS layer info" "apt-layer" return 1 fi fi return 0 } # Container-based layer mounting container_mount_layer() { local image_name="$1" local mount_point="$2" log_info "Mounting container-based layer: $image_name at $mount_point" "apt-layer" # Try real mount with composefs filesystem if command -v mkcomposefs >/dev/null 2>&1; then # Create mount point mkdir -p "$mount_point" # Determine object store directory (same directory as image) local object_store_dir=$(dirname "$image_name") # Mount using composefs filesystem type if ! mount -t composefs -o "basedir=$object_store_dir" "$image_name" "$mount_point"; then log_error "Failed to mount ComposeFS layer with mount: $image_name to $mount_point" "apt-layer" return 1 fi else # Fallback to composefs-alternative.sh if ! "$COMPOSEFS_SCRIPT" mount "$image_name" "$mount_point"; then log_error "Failed to mount ComposeFS layer" "apt-layer" return 1 fi fi log_success "Container-based layer mounted: $image_name at $mount_point" "apt-layer" return 0 } # Container-based layer unmounting container_unmount_layer() { local mount_point="$1" log_info "Unmounting container-based layer at: $mount_point" "apt-layer" # Try real umount if command -v mkcomposefs >/dev/null 2>&1; then if ! umount "$mount_point"; then log_error "Failed to unmount ComposeFS layer with umount: $mount_point" "apt-layer" return 1 fi else # Fallback to composefs-alternative.sh if ! "$COMPOSEFS_SCRIPT" unmount "$mount_point"; then log_error "Failed to unmount ComposeFS layer" "apt-layer" return 1 fi fi log_success "Container-based layer unmounted: $mount_point" "apt-layer" return 0 } # Container runtime status check container_status() { log_info "Checking container runtime status" "apt-layer" echo "=== Container Runtime Status ===" echo "Runtime: $CONTAINER_RUNTIME" case "$CONTAINER_RUNTIME" in podman) echo "Podman version: $(podman --version 2>/dev/null || echo 'Not available')" echo "Podman info: $(podman info --format json 2>/dev/null | jq -r '.host.arch // "Unknown"' 2>/dev/null || echo 'Unknown')" ;; docker) echo "Docker version: $(docker --version 2>/dev/null || echo 'Not available')" echo "Docker info: $(docker info --format '{{.Architecture}}' 2>/dev/null || echo 'Unknown')" ;; esac echo "" echo "=== ComposeFS Backend Status ===" if [[ -f "$COMPOSEFS_SCRIPT" ]]; then echo "ComposeFS script: $COMPOSEFS_SCRIPT" echo "ComposeFS version: $("$COMPOSEFS_SCRIPT" --version 2>/dev/null || echo 'Version info not available')" else echo "ComposeFS script: Not found at $COMPOSEFS_SCRIPT" fi echo "" echo "=== Available Container Images ===" container_list_layers } # --- END OF SCRIPTLET: 04-container.sh --- # ============================================================================ # Live Overlay System (rpm-ostree style) # ============================================================================ # Ubuntu uBlue apt-layer Live Overlay System # Implements live system layering similar to rpm-ostree # Uses overlayfs for live package installation and management # ============================================================================= # LIVE OVERLAY SYSTEM FUNCTIONS # ============================================================================= # Live overlay state file (with fallbacks for when load_path_config() is not available) # These will be overridden by load_path_config() when available LIVE_OVERLAY_STATE_FILE="${LIVE_OVERLAY_STATE_FILE:-/var/lib/apt-layer/live-overlay.state}" LIVE_OVERLAY_MOUNT_POINT="${LIVE_OVERLAY_MOUNT_POINT:-/var/lib/apt-layer/live-overlay/mount}" LIVE_OVERLAY_PACKAGE_LOG="${LIVE_OVERLAY_PACKAGE_LOG:-/var/log/apt-layer/live-overlay-packages.log}" LIVE_OVERLAY_DIR="${LIVE_OVERLAY_DIR:-/var/lib/apt-layer/live-overlay}" LIVE_OVERLAY_UPPER_DIR="${LIVE_OVERLAY_UPPER_DIR:-/var/lib/apt-layer/live-overlay/upper}" LIVE_OVERLAY_WORK_DIR="${LIVE_OVERLAY_WORK_DIR:-/var/lib/apt-layer/live-overlay/work}" # Initialize live overlay system init_live_overlay_system() { log_info "Initializing live overlay system" "apt-layer" # Load system paths if available if command -v load_path_config >/dev/null 2>&1; then load_path_config fi # Create live overlay directories mkdir -p "$LIVE_OVERLAY_DIR" "$LIVE_OVERLAY_UPPER_DIR" "$LIVE_OVERLAY_WORK_DIR" mkdir -p "$LIVE_OVERLAY_MOUNT_POINT" # Set proper permissions (use sudo if needed) if [[ $EUID -eq 0 ]]; then # Running as root, use chmod directly chmod 755 "$LIVE_OVERLAY_DIR" 2>/dev/null || true chmod 700 "$LIVE_OVERLAY_UPPER_DIR" "$LIVE_OVERLAY_WORK_DIR" 2>/dev/null || true else # Running as regular user, use sudo sudo chmod 755 "$LIVE_OVERLAY_DIR" 2>/dev/null || true sudo chmod 700 "$LIVE_OVERLAY_UPPER_DIR" "$LIVE_OVERLAY_WORK_DIR" 2>/dev/null || true fi # Initialize package log if it doesn't exist if [[ ! -f "$LIVE_OVERLAY_PACKAGE_LOG" ]]; then touch "$LIVE_OVERLAY_PACKAGE_LOG" if [[ $EUID -eq 0 ]]; then chmod 644 "$LIVE_OVERLAY_PACKAGE_LOG" 2>/dev/null || true else sudo chmod 644 "$LIVE_OVERLAY_PACKAGE_LOG" 2>/dev/null || true fi fi # Conditional DNS fix for chroot overlay (WSL, etc) if [[ -d "$LIVE_OVERLAY_MOUNT_POINT" ]]; then if ! chroot "$LIVE_OVERLAY_MOUNT_POINT" getent hosts archive.ubuntu.com >/dev/null 2>&1; then log_warning "DNS resolution failed in overlay. Injecting public DNS servers..." "apt-layer" # Backup original resolv.conf if present if [[ -f "$LIVE_OVERLAY_MOUNT_POINT/etc/resolv.conf" ]]; then cp "$LIVE_OVERLAY_MOUNT_POINT/etc/resolv.conf" "$LIVE_OVERLAY_MOUNT_POINT/etc/resolv.conf.aptlayerbak" fi echo "nameserver 8.8.8.8" > "$LIVE_OVERLAY_MOUNT_POINT/etc/resolv.conf" echo "nameserver 1.1.1.1" >> "$LIVE_OVERLAY_MOUNT_POINT/etc/resolv.conf" chmod 644 "$LIVE_OVERLAY_MOUNT_POINT/etc/resolv.conf" touch "$LIVE_OVERLAY_MOUNT_POINT/.dns_fixed_by_apt_layer" log_success "DNS configuration applied to overlay" "apt-layer" else log_info "DNS resolution in overlay is working. No changes made." "apt-layer" fi fi log_success "Live overlay system initialized" "apt-layer" } # Check if live overlay is active is_live_overlay_active() { if [[ -f "$LIVE_OVERLAY_STATE_FILE" ]]; then local state state=$(cat "$LIVE_OVERLAY_STATE_FILE" 2>/dev/null || echo "") [[ "$state" == "active" ]] else false fi } # Check if system supports live overlay check_live_overlay_support() { local errors=0 local test_dir="/tmp/overlay-test-$$" local test_lower="$test_dir/lower" local test_upper="$test_dir/upper" local test_work="$test_dir/work" local test_mount="$test_dir/mount" # Check for overlay module if ! modprobe -n overlay >/dev/null 2>&1; then log_error "Overlay module not available" "apt-layer" errors=$((errors + 1)) fi # Create test directories mkdir -p "$test_lower" "$test_upper" "$test_work" "$test_mount" 2>/dev/null # Check for overlayfs mount support if ! mount -t overlay overlay -o "lowerdir=$test_lower,upperdir=$test_upper,workdir=$test_work" "$test_mount" 2>/dev/null; then log_error "Overlayfs mount not supported" "apt-layer" errors=$((errors + 1)) else umount "$test_mount" 2>/dev/null fi # Cleanup test directories rm -rf "$test_dir" 2>/dev/null # Check for read-only root filesystem if ! is_root_readonly; then log_warning "Root filesystem is not read-only - live overlay may not be necessary" "apt-layer" fi if [[ $errors -gt 0 ]]; then return 1 fi return 0 } # Check if root filesystem is read-only is_root_readonly() { local root_mount root_mount=$(findmnt -n -o OPTIONS / | grep -o "ro" || echo "") [[ -n "$root_mount" ]] } # Start live overlay start_live_overlay() { log_info "Starting live overlay system" "apt-layer" # Check if already active if is_live_overlay_active; then log_warning "Live overlay is already active" "apt-layer" return 0 fi # Check system support if ! check_live_overlay_support; then log_error "System does not support live overlay" "apt-layer" return 1 fi # Initialize system init_live_overlay_system # Create overlay mount log_info "Creating overlay mount" "apt-layer" if mount -t overlay overlay -o "lowerdir=/,upperdir=$LIVE_OVERLAY_UPPER_DIR,workdir=$LIVE_OVERLAY_WORK_DIR" "$LIVE_OVERLAY_MOUNT_POINT"; then log_success "Overlay mount created successfully" "apt-layer" # Mark overlay as active echo "active" > "$LIVE_OVERLAY_STATE_FILE" log_success "Live overlay started successfully" "apt-layer" log_info "Changes will be applied to overlay and can be committed or rolled back" "apt-layer" return 0 else log_error "Failed to create overlay mount" "apt-layer" return 1 fi } # Stop live overlay stop_live_overlay() { log_info "Stopping live overlay system" "apt-layer" # Check if overlay is active if ! is_live_overlay_active; then log_warning "Live overlay is not active" "apt-layer" return 0 fi # Check for active processes if check_active_processes; then log_warning "Active processes detected - overlay will persist until processes complete" "apt-layer" return 0 fi # Undo DNS fix if we applied it if [[ -f "$LIVE_OVERLAY_MOUNT_POINT/.dns_fixed_by_apt_layer" ]]; then if [[ -f "$LIVE_OVERLAY_MOUNT_POINT/etc/resolv.conf.aptlayerbak" ]]; then mv "$LIVE_OVERLAY_MOUNT_POINT/etc/resolv.conf.aptlayerbak" "$LIVE_OVERLAY_MOUNT_POINT/etc/resolv.conf" else rm -f "$LIVE_OVERLAY_MOUNT_POINT/etc/resolv.conf" fi rm -f "$LIVE_OVERLAY_MOUNT_POINT/.dns_fixed_by_apt_layer" log_info "DNS fix by apt-layer undone on overlay stop" "apt-layer" fi # Unmount overlay log_info "Unmounting overlay" "apt-layer" if umount "$LIVE_OVERLAY_MOUNT_POINT"; then log_success "Overlay unmounted successfully" "apt-layer" # Remove state file rm -f "$LIVE_OVERLAY_STATE_FILE" log_success "Live overlay stopped successfully" "apt-layer" return 0 else log_error "Failed to unmount overlay" "apt-layer" return 1 fi } # Check for active processes that might prevent unmounting check_active_processes() { # Check for package manager processes if pgrep -f "apt|dpkg|apt-get" >/dev/null 2>&1; then return 0 fi # Check for processes using the overlay mount if lsof "$LIVE_OVERLAY_MOUNT_POINT" >/dev/null 2>&1; then return 0 fi return 1 } # Get live overlay status get_live_overlay_status() { echo "=== Live Overlay Status ===" if is_live_overlay_active; then log_success "� Live overlay is ACTIVE" "apt-layer" # Show mount details if mountpoint -q "$LIVE_OVERLAY_MOUNT_POINT"; then log_info "Overlay mount point: $LIVE_OVERLAY_MOUNT_POINT" "apt-layer" # Show overlay usage if [[ -d "$LIVE_OVERLAY_UPPER_DIR" ]]; then local usage=$(du -sh "$LIVE_OVERLAY_UPPER_DIR" 2>/dev/null | cut -f1 || echo "unknown") log_info "Overlay usage: $usage" "apt-layer" fi # Show installed packages if [[ -f "$LIVE_OVERLAY_PACKAGE_LOG" ]]; then local package_count=$(wc -l < "$LIVE_OVERLAY_PACKAGE_LOG" 2>/dev/null || echo "0") log_info "Packages installed in overlay: $package_count" "apt-layer" fi else log_warning "�� Overlay mount point not mounted" "apt-layer" fi # Check for active processes if check_active_processes; then log_warning "�� Active processes detected - overlay cannot be stopped" "apt-layer" fi else log_info "� Live overlay is not active" "apt-layer" # Check if system supports live overlay if check_live_overlay_support >/dev/null 2>&1; then log_info "� System supports live overlay" "apt-layer" log_info "Use '--live-overlay start' to start live overlay" "apt-layer" else log_warning "�� System does not support live overlay" "apt-layer" fi fi echo "" } # Install packages in live overlay live_install() { local packages=("$@") log_info "Installing packages in live overlay: ${packages[*]}" "apt-layer" # Check if overlay is active if ! is_live_overlay_active; then log_error "Live overlay is not active" "apt-layer" log_info "Use '--live-overlay start' to start live overlay first" "apt-layer" return 1 fi # Check for root privileges if [[ $EUID -ne 0 ]]; then log_error "Root privileges required for live installation" "apt-layer" return 1 fi # Update package lists in overlay log_info "Updating package lists in overlay" "apt-layer" if ! chroot "$LIVE_OVERLAY_MOUNT_POINT" apt-get update; then log_error "Failed to update package lists" "apt-layer" log_warning "Network or DNS error? For offline or WSL overlays, use: apt-layer --live-dpkg <.deb files>" "apt-layer" return 1 fi # Install packages in overlay log_info "Installing packages in overlay" "apt-layer" if chroot "$LIVE_OVERLAY_MOUNT_POINT" apt-get install -y "${packages[@]}"; then log_success "Packages installed successfully in overlay" "apt-layer" # Log installed packages for package in "${packages[@]}"; do echo "$(date '+%Y-%m-%d %H:%M:%S') - INSTALLED: $package" >> "$LIVE_OVERLAY_PACKAGE_LOG" done log_info "Changes are applied to overlay and can be committed or rolled back" "apt-layer" return 0 else log_error "Failed to install packages in overlay" "apt-layer" log_warning "If this is a network or DNS issue, try: apt-layer --live-dpkg <.deb files>" "apt-layer" return 1 fi } # Manage live overlay manage_live_overlay() { local action="$1" shift local options=("$@") case "$action" in "start") start_live_overlay ;; "stop") stop_live_overlay ;; "status") get_live_overlay_status ;; "commit") local message="${options[0]:-Live overlay changes}" commit_live_overlay "$message" ;; "rollback") rollback_live_overlay ;; "list") list_live_overlay_packages ;; "clean") clean_live_overlay ;; *) log_error "Unknown live overlay action: $action" "apt-layer" log_info "Valid actions: start, stop, status, commit, rollback, list, clean" "apt-layer" return 1 ;; esac } # Commit live overlay changes commit_live_overlay() { local message="$1" log_info "Committing live overlay changes: $message" "apt-layer" # Check if overlay is active if ! is_live_overlay_active; then log_error "Live overlay is not active" "apt-layer" return 1 fi # Check if there are changes to commit if ! has_overlay_changes; then log_warning "No changes to commit" "apt-layer" return 0 fi # Create new ComposeFS layer from overlay changes local timestamp=$(date '+%Y%m%d_%H%M%S') local layer_name="live-overlay-commit-${timestamp}" log_info "Creating new layer: $layer_name" "apt-layer" # Create layer from overlay changes if create_layer_from_overlay "$layer_name" "$message"; then log_success "Live overlay changes committed as layer: $layer_name" "apt-layer" # Clean up overlay clean_live_overlay return 0 else log_error "Failed to commit live overlay changes" "apt-layer" return 1 fi } # Check if overlay has changes has_overlay_changes() { if [[ -d "$LIVE_OVERLAY_UPPER_DIR" ]]; then # Check if upper directory has any content if [[ -n "$(find "$LIVE_OVERLAY_UPPER_DIR" -mindepth 1 -maxdepth 1 2>/dev/null)" ]]; then return 0 fi fi return 1 } # Create layer from overlay changes create_layer_from_overlay() { local layer_name="$1" local message="$2" # Create temporary directory for layer local temp_layer_dir="${TEMP_DIR:-/tmp/apt-layer}/live-layer-${layer_name}" mkdir -p "$temp_layer_dir" # Copy overlay changes to temporary directory log_info "Copying overlay changes to temporary layer" "apt-layer" if ! cp -a "$LIVE_OVERLAY_UPPER_DIR"/* "$temp_layer_dir/" 2>/dev/null; then log_error "Failed to copy overlay changes" "apt-layer" rm -rf "$temp_layer_dir" return 1 fi # Create ComposeFS layer log_info "Creating ComposeFS layer from overlay changes" "apt-layer" if ! create_composefs_layer "$temp_layer_dir" "$layer_name" "$message"; then log_error "Failed to create ComposeFS layer" "apt-layer" rm -rf "$temp_layer_dir" return 1 fi # Clean up temporary directory rm -rf "$temp_layer_dir" return 0 } # Create ComposeFS layer from directory create_composefs_layer() { local source_dir="$1" local layer_name="$2" local message="$3" # Try real mkcomposefs binary first if command -v mkcomposefs >/dev/null 2>&1; then # Create object store directory (same directory as layer) local object_store_dir=$(dirname "$layer_name") mkdir -p "$object_store_dir" if mkcomposefs "$source_dir" "$layer_name" --digest-store="$object_store_dir"; then log_success "ComposeFS layer created using mkcomposefs" "apt-layer" return 0 fi fi # Fallback to composefs-alternative if command -v composefs-alternative >/dev/null 2>&1; then if composefs-alternative create-layer "$source_dir" "$layer_name" "$message"; then return 0 fi fi # Fallback: create simple squashfs layer local layer_file="${BUILD_DIR:-/var/lib/apt-layer/build}/${layer_name}.squashfs" mkdir -p "$(dirname "$layer_file")" if mksquashfs "$source_dir" "$layer_file" -comp "${SQUASHFS_COMPRESSION:-xz}" -b "${SQUASHFS_BLOCK_SIZE:-1M}"; then log_success "Created squashfs layer: $layer_file" "apt-layer" return 0 else log_error "Failed to create squashfs layer" "apt-layer" return 1 fi } # Rollback live overlay changes rollback_live_overlay() { log_info "Rolling back live overlay changes" "apt-layer" # Check if overlay is active if ! is_live_overlay_active; then log_error "Live overlay is not active" "apt-layer" return 1 fi # Stop overlay (this will discard changes) if stop_live_overlay; then log_success "Live overlay changes rolled back successfully" "apt-layer" return 0 else log_error "Failed to rollback live overlay changes" "apt-layer" return 1 fi } # List packages installed in live overlay list_live_overlay_packages() { log_info "Listing packages installed in live overlay" "apt-layer" if [[ -f "$LIVE_OVERLAY_PACKAGE_LOG" ]]; then if [[ -s "$LIVE_OVERLAY_PACKAGE_LOG" ]]; then echo "=== Packages Installed in Live Overlay ===" cat "$LIVE_OVERLAY_PACKAGE_LOG" echo "" else log_info "No packages installed in live overlay" "apt-layer" fi else log_info "No package log found" "apt-layer" fi } # Clean live overlay clean_live_overlay() { log_info "Cleaning live overlay" "apt-layer" # Stop overlay if active if is_live_overlay_active; then stop_live_overlay fi # Clean up overlay directories rm -rf "$LIVE_OVERLAY_UPPER_DIR"/* "$LIVE_OVERLAY_WORK_DIR"/* 2>/dev/null # Clean up package log rm -f "$LIVE_OVERLAY_PACKAGE_LOG" # Remove state file rm -f "$LIVE_OVERLAY_STATE_FILE" log_success "Live overlay cleaned successfully" "apt-layer" } # ============================================================================= # INTEGRATION FUNCTIONS # ============================================================================= # Initialize live overlay system on script startup init_live_overlay_on_startup() { # Only initialize if not already done if [[ ! -d "$LIVE_OVERLAY_DIR" ]]; then init_live_overlay_system fi } # Cleanup live overlay on script exit cleanup_live_overlay_on_exit() { # Only cleanup if overlay is active and no processes are using it if is_live_overlay_active && ! check_active_processes; then log_info "Cleaning up live overlay on exit" "apt-layer" stop_live_overlay fi } # Register cleanup function trap cleanup_live_overlay_on_exit EXIT # --- END OF SCRIPTLET: 05-live-overlay.sh --- # ============================================================================ # OCI Export/Import Integration # ============================================================================ # OCI Integration for Particle-OS apt-layer Tool # Provides ComposeFS � OCI export/import functionality for container-based layer creation # OCI registry configuration declare -A OCI_REGISTRY_CONFIG OCI_REGISTRY_CONFIG["default_registry"]="docker.io" OCI_REGISTRY_CONFIG["auth_file"]="$HOME/.docker/config.json" OCI_REGISTRY_CONFIG["insecure_registries"]="" OCI_REGISTRY_CONFIG["registry_mirrors"]="" # OCI image format validation validate_oci_image_name() { local image_name="$1" log_debug "Validating OCI image name: $image_name" "apt-layer" # Check for empty name if [[ -z "$image_name" ]]; then log_error "Empty OCI image name provided" "apt-layer" return 1 fi # Validate OCI image name format (registry/repository:tag) if [[ ! "$image_name" =~ ^[a-zA-Z0-9][a-zA-Z0-9._-]*/[a-zA-Z0-9][a-zA-Z0-9._-]*(:[a-zA-Z0-9._-]*)?$ ]] && \ [[ ! "$image_name" =~ ^[a-zA-Z0-9][a-zA-Z0-9._-]*(:[a-zA-Z0-9._-]*)?$ ]]; then log_error "Invalid OCI image name format: $image_name" "apt-layer" log_error "Expected format: [registry/]repository[:tag]" "apt-layer" return 1 fi log_success "OCI image name validated: $image_name" "apt-layer" return 0 } # Initialize OCI integration system init_oci_system() { log_info "Initializing OCI integration system" "apt-layer" # Ensure OCI workspace directories exist local oci_workspace="${OCI_WORKSPACE_DIR:-$WORKSPACE/oci}" local oci_temp="${OCI_TEMP_DIR:-$oci_workspace/temp}" local oci_cache="${OCI_CACHE_DIR:-$oci_workspace/cache}" local oci_export="${OCI_EXPORT_DIR:-$oci_workspace/export}" local oci_import="${OCI_IMPORT_DIR:-$oci_workspace/import}" mkdir -p "$oci_workspace" mkdir -p "$oci_temp" mkdir -p "$oci_cache" mkdir -p "$oci_export" mkdir -p "$oci_import" # Check for OCI tools local missing_tools=() # Check for skopeo (preferred for OCI operations) if ! command -v skopeo &> /dev/null; then missing_tools+=("skopeo") fi # Check for podman (fallback for OCI operations) if ! command -v podman &> /dev/null; then missing_tools+=("podman") fi # Check for docker (alternative fallback) if ! command -v docker &> /dev/null; then missing_tools+=("docker") fi if [[ ${#missing_tools[@]} -eq 3 ]]; then log_error "No OCI tools found (skopeo, podman, or docker required)" "apt-layer" return 1 fi # Set preferred OCI tool if command -v skopeo &> /dev/null; then OCI_TOOL="skopeo" log_info "Using skopeo for OCI operations" "apt-layer" elif command -v podman &> /dev/null; then OCI_TOOL="podman" log_info "Using podman for OCI operations" "apt-layer" else OCI_TOOL="docker" log_info "Using docker for OCI operations" "apt-layer" fi log_success "OCI integration system initialized with $OCI_TOOL" "apt-layer" return 0 } # Export ComposeFS image to OCI format export_oci_image() { local composefs_image="$1" local oci_image_name="$2" local temp_dir="${3:-$WORKSPACE/oci/export/$(date +%s)-$$}" log_info "Exporting ComposeFS image to OCI: $composefs_image -> $oci_image_name" "apt-layer" # Validate inputs if [[ -z "$composefs_image" ]] || [[ -z "$oci_image_name" ]]; then log_error "Missing required arguments for export_oci_image" "apt-layer" return 1 fi if ! validate_oci_image_name "$oci_image_name"; then return 1 fi # Check if ComposeFS image exists if command -v composefs-info >/dev/null 2>&1; then if ! composefs-info ls "$composefs_image" >/dev/null 2>&1; then log_error "ComposeFS image not found: $composefs_image" "apt-layer" return 1 fi else if ! "$COMPOSEFS_SCRIPT" info "$composefs_image" >/dev/null 2>&1; then log_error "ComposeFS image not found: $composefs_image" "apt-layer" return 1 fi fi # Create temporary directory mkdir -p "$temp_dir" local cleanup_temp=1 # Start transaction start_transaction "export-oci-$composefs_image" # Mount ComposeFS image local mount_point="$temp_dir/mount" mkdir -p "$mount_point" update_transaction_phase "mounting_composefs_image" if command -v mkcomposefs >/dev/null 2>&1; then # Determine object store directory (same directory as image) local object_store_dir=$(dirname "$composefs_image") if ! mount -t composefs -o "basedir=$object_store_dir" "$composefs_image" "$mount_point"; then log_error "Failed to mount ComposeFS image: $composefs_image" "apt-layer" rollback_transaction return 1 fi else if ! "$COMPOSEFS_SCRIPT" mount "$composefs_image" "$mount_point"; then log_error "Failed to mount ComposeFS image: $composefs_image" "apt-layer" rollback_transaction return 1 fi fi # Create OCI image structure local oci_dir="$temp_dir/oci" mkdir -p "$oci_dir" update_transaction_phase "creating_oci_structure" if ! create_oci_image_structure "$mount_point" "$oci_dir" "$oci_image_name"; then log_error "Failed to create OCI image structure" "apt-layer" rollback_transaction return 1 fi # Push OCI image to registry update_transaction_phase "pushing_oci_image" if ! push_oci_image "$oci_dir" "$oci_image_name"; then log_error "Failed to push OCI image: $oci_image_name" "apt-layer" rollback_transaction return 1 fi # Unmount ComposeFS image if command -v mkcomposefs >/dev/null 2>&1; then umount "$mount_point" 2>/dev/null || true else "$COMPOSEFS_SCRIPT" unmount "$mount_point" 2>/dev/null || true fi commit_transaction log_success "ComposeFS image exported to OCI: $oci_image_name" "apt-layer" # Cleanup if [[ $cleanup_temp -eq 1 ]]; then rm -rf "$temp_dir" fi return 0 } # Create OCI image structure from filesystem create_oci_image_structure() { local source_dir="$1" local oci_dir="$2" local image_name="$3" log_debug "Creating OCI image structure from: $source_dir" "apt-layer" # Create OCI directory structure mkdir -p "$oci_dir"/{blobs,refs} # Create manifest local manifest_file="$oci_dir/manifest.json" local config_file="$oci_dir/config.json" # Generate image configuration cat > "$config_file" << EOF { "architecture": "amd64", "config": { "Hostname": "", "Domainname": "", "User": "", "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": null, "Cmd": null, "Image": "", "Volumes": null, "WorkingDir": "", "Entrypoint": null, "OnBuild": null, "Labels": { "org.opencontainers.image.title": "$image_name", "org.opencontainers.image.description": "Exported from ComposeFS image", "org.opencontainers.image.created": "$(date -u +%Y-%m-%dT%H:%M:%SZ)" } }, "container": "", "container_config": { "Hostname": "", "Domainname": "", "User": "", "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": null, "Cmd": null, "Image": "", "Volumes": null, "WorkingDir": "", "Entrypoint": null, "OnBuild": null, "Labels": null }, "created": "$(date -u +%Y-%m-%dT%H:%M:%SZ)", "docker_version": "20.10.0", "history": [ { "created": "$(date -u +%Y-%m-%dT%H:%M:%SZ)", "created_by": "apt-layer export_oci_image", "comment": "Exported from ComposeFS image" } ], "os": "linux", "rootfs": { "type": "layers", "diff_ids": [] } } EOF # Create layer from source directory local layer_file="$oci_dir/layer.tar" if ! tar -cf "$layer_file" -C "$source_dir" .; then log_error "Failed to create layer tarball" "apt-layer" return 1 fi # Calculate layer digest local layer_digest layer_digest=$(sha256sum "$layer_file" | cut -d' ' -f1) local layer_blob="$oci_dir/blobs/sha256/$layer_digest" # Move layer to blobs directory mkdir -p "$(dirname "$layer_blob")" mv "$layer_file" "$layer_blob" # Update config with layer diff_id local diff_id="sha256:$layer_digest" jq ".rootfs.diff_ids = [\"$diff_id\"]" "$config_file" > "$config_file.tmp" && mv "$config_file.tmp" "$config_file" # Calculate config digest local config_digest config_digest=$(sha256sum "$config_file" | cut -d' ' -f1) local config_blob="$oci_dir/blobs/sha256/$config_digest" # Move config to blobs directory mkdir -p "$(dirname "$config_blob")" mv "$config_file" "$config_blob" # Create manifest cat > "$manifest_file" << EOF [ { "Config": "blobs/sha256/$config_digest", "RepoTags": ["$image_name"], "Layers": ["blobs/sha256/$layer_digest"] } ] EOF log_success "OCI image structure created" "apt-layer" return 0 } # Push OCI image to registry push_oci_image() { local oci_dir="$1" local image_name="$2" log_debug "Pushing OCI image: $image_name" "apt-layer" # Validate image name before attempting to push if ! validate_oci_image_name "$image_name"; then return 1 fi # Validate OCI directory structure if [[ ! -f "$oci_dir/manifest.json" ]]; then log_error "Invalid OCI directory structure: missing manifest.json" "apt-layer" return 1 fi case "$OCI_TOOL" in skopeo) # Push image with retry logic local retry_count=0 local max_retries=3 while [[ $retry_count -lt $max_retries ]]; do if skopeo copy "dir:$oci_dir" "docker://$image_name"; then log_success "OCI image pushed successfully: $image_name" "apt-layer" return 0 else retry_count=$((retry_count + 1)) if [[ $retry_count -lt $max_retries ]]; then log_warning "Failed to push image (attempt $retry_count/$max_retries), retrying..." "apt-layer" sleep 2 else log_error "Failed to push image after $max_retries attempts: $image_name" "apt-layer" return 1 fi fi done ;; podman) if ! podman load -i "$oci_dir/manifest.json" && \ ! podman tag "$(podman images --format '{{.ID}}' | head -1)" "$image_name" && \ ! podman push "$image_name"; then log_error "Failed to push image with podman" "apt-layer" return 1 fi ;; docker) if ! docker load -i "$oci_dir/manifest.json" && \ ! docker tag "$(docker images --format '{{.ID}}' | head -1)" "$image_name" && \ ! docker push "$image_name"; then log_error "Failed to push image with docker" "apt-layer" return 1 fi ;; esac log_success "OCI image pushed: $image_name" "apt-layer" return 0 } # Import OCI image as ComposeFS image import_oci_image() { local oci_image_name="$1" local composefs_image="$2" local temp_dir="${3:-$WORKSPACE/oci/import/$(date +%s)-$$}" log_info "Importing OCI image as ComposeFS: $oci_image_name -> $composefs_image" "apt-layer" # Validate inputs if [[ -z "$oci_image_name" ]] || [[ -z "$composefs_image" ]]; then log_error "Missing required arguments for import_oci_image" "apt-layer" return 1 fi if ! validate_oci_image_name "$oci_image_name"; then return 1 fi # Create temporary directory mkdir -p "$temp_dir" local cleanup_temp=1 # Start transaction start_transaction "import-oci-$oci_image_name" # Pull OCI image update_transaction_phase "pulling_oci_image" if ! pull_oci_image "$oci_image_name" "$temp_dir"; then log_error "Failed to pull OCI image: $oci_image_name" "apt-layer" rollback_transaction return 1 fi # Extract image filesystem update_transaction_phase "extracting_image_filesystem" local rootfs_dir="$temp_dir/rootfs" if ! extract_oci_filesystem "$temp_dir" "$rootfs_dir"; then log_error "Failed to extract OCI filesystem" "apt-layer" rollback_transaction return 1 fi # Create ComposeFS image from extracted filesystem update_transaction_phase "creating_composefs_image" if command -v mkcomposefs >/dev/null 2>&1; then # Create object store directory (same directory as image) local object_store_dir=$(dirname "$composefs_image") mkdir -p "$object_store_dir" if ! mkcomposefs "$rootfs_dir" "$composefs_image" --digest-store="$object_store_dir"; then log_error "Failed to create ComposeFS image: $composefs_image" "apt-layer" rollback_transaction return 1 fi else if ! "$COMPOSEFS_SCRIPT" create "$composefs_image" "$rootfs_dir"; then log_error "Failed to create ComposeFS image: $composefs_image" "apt-layer" rollback_transaction return 1 fi fi commit_transaction log_success "OCI image imported as ComposeFS: $composefs_image" "apt-layer" # Cleanup if [[ $cleanup_temp -eq 1 ]]; then rm -rf "$temp_dir" fi return 0 } # Pull OCI image from registry pull_oci_image() { local image_name="$1" local temp_dir="$2" log_debug "Pulling OCI image: $image_name" "apt-layer" # Validate image name before attempting to pull if ! validate_oci_image_name "$image_name"; then return 1 fi case "$OCI_TOOL" in skopeo) # Validate image exists before pulling log_debug "Validating image exists: $image_name" "apt-layer" if ! skopeo inspect "docker://$image_name" >/dev/null 2>&1; then log_error "Image not found or not accessible: $image_name" "apt-layer" return 1 fi # Pull image with retry logic local retry_count=0 local max_retries=3 while [[ $retry_count -lt $max_retries ]]; do if skopeo copy "docker://$image_name" "dir:$temp_dir"; then log_success "OCI image pulled successfully: $image_name" "apt-layer" return 0 else retry_count=$((retry_count + 1)) if [[ $retry_count -lt $max_retries ]]; then log_warning "Failed to pull image (attempt $retry_count/$max_retries), retrying..." "apt-layer" sleep 2 else log_error "Failed to pull image after $max_retries attempts: $image_name" "apt-layer" return 1 fi fi done ;; podman) if ! podman pull "$image_name" && \ ! podman save "$image_name" -o "$temp_dir/image.tar"; then log_error "Failed to pull image with podman" "apt-layer" return 1 fi ;; docker) if ! docker pull "$image_name" && \ ! docker save "$image_name" -o "$temp_dir/image.tar"; then log_error "Failed to pull image with docker" "apt-layer" return 1 fi ;; esac log_success "OCI image pulled: $image_name" "apt-layer" return 0 } # Extract filesystem from OCI image extract_oci_filesystem() { local oci_dir="$1" local rootfs_dir="$2" log_debug "Extracting OCI filesystem to: $rootfs_dir" "apt-layer" mkdir -p "$rootfs_dir" # Handle different OCI tool outputs if [[ -f "$oci_dir/manifest.json" ]]; then # skopeo output local layer_file layer_file=$(jq -r '.[0].Layers[0]' "$oci_dir/manifest.json") if [[ -f "$oci_dir/$layer_file" ]]; then tar -xf "$oci_dir/$layer_file" -C "$rootfs_dir" else log_error "Layer file not found: $oci_dir/$layer_file" "apt-layer" return 1 fi elif [[ -f "$oci_dir/image.tar" ]]; then # podman/docker output tar -xf "$oci_dir/image.tar" -C "$rootfs_dir" # Find and extract the layer local layer_file layer_file=$(find "$rootfs_dir" -name "*.tar" | head -1) if [[ -n "$layer_file" ]]; then mkdir -p "$rootfs_dir.tmp" tar -xf "$layer_file" -C "$rootfs_dir.tmp" mv "$rootfs_dir.tmp"/* "$rootfs_dir/" rmdir "$rootfs_dir.tmp" fi else log_error "No valid OCI image structure found" "apt-layer" return 1 fi log_success "OCI filesystem extracted" "apt-layer" return 0 } # List available OCI images list_oci_images() { log_info "Listing available OCI images" "apt-layer" case "$OCI_TOOL" in skopeo) # skopeo doesn't have a direct list command, but we can try to list from a registry log_info "Skopeo doesn't support listing local images" "apt-layer" log_info "Use 'skopeo list-tags docker://registry/repository' to list remote tags" "apt-layer" log_info "Or use podman/docker to list local images" "apt-layer" ;; podman) podman images --format "table {{.Repository}}:{{.Tag}}\t{{.ID}}\t{{.CreatedAt}}\t{{.Size}}" ;; docker) docker images --format "table {{.Repository}}:{{.Tag}}\t{{.ID}}\t{{.CreatedAt}}\t{{.Size}}" ;; esac } # Get OCI image information get_oci_image_info() { local image_name="$1" log_info "Getting OCI image info: $image_name" "apt-layer" if ! validate_oci_image_name "$image_name"; then return 1 fi case "$OCI_TOOL" in skopeo) # skopeo inspect provides detailed image information if ! skopeo inspect "docker://$image_name"; then log_error "Failed to inspect image: $image_name" "apt-layer" return 1 fi ;; podman) if ! podman inspect "$image_name"; then log_error "Failed to inspect image: $image_name" "apt-layer" return 1 fi ;; docker) if ! docker inspect "$image_name"; then log_error "Failed to inspect image: $image_name" "apt-layer" return 1 fi ;; esac } # Remove OCI image remove_oci_image() { local image_name="$1" log_info "Removing OCI image: $image_name" "apt-layer" if ! validate_oci_image_name "$image_name"; then return 1 fi case "$OCI_TOOL" in skopeo) # skopeo doesn't support removing images from registries # This would require registry-specific API calls log_warning "Image removal not supported with skopeo" "apt-layer" log_info "Use registry-specific tools or podman/docker to remove images" "apt-layer" return 1 ;; podman) if ! podman rmi "$image_name"; then log_error "Failed to remove image with podman" "apt-layer" return 1 fi ;; docker) if ! docker rmi "$image_name"; then log_error "Failed to remove image with docker" "apt-layer" return 1 fi ;; esac log_success "OCI image removed: $image_name" "apt-layer" return 0 } # OCI system status oci_status() { log_info "OCI Integration System Status" "apt-layer" echo "=== OCI Tool Configuration ===" echo "Preferred tool: $OCI_TOOL" echo "Available tools:" command -v skopeo &> /dev/null && echo " ✓ skopeo" command -v podman &> /dev/null && echo " ✓ podman" command -v docker &> /dev/null && echo " ✓ docker" echo "" echo "=== OCI Workspace ===" echo "OCI directory: ${OCI_WORKSPACE_DIR:-$WORKSPACE/oci}" echo "Export directory: ${OCI_EXPORT_DIR:-$WORKSPACE/oci/export}" echo "Import directory: ${OCI_IMPORT_DIR:-$WORKSPACE/oci/import}" echo "Cache directory: ${OCI_CACHE_DIR:-$WORKSPACE/oci/cache}" echo "" echo "=== ComposeFS Backend ===" if [[ -f "$COMPOSEFS_SCRIPT" ]]; then echo "ComposeFS script: $COMPOSEFS_SCRIPT" echo "ComposeFS version: $("$COMPOSEFS_SCRIPT" --version 2>/dev/null || echo 'Version info not available')" else echo "ComposeFS script: Not found at $COMPOSEFS_SCRIPT" fi echo "" echo "=== Available OCI Images ===" list_oci_images } # Skopeo-specific operations skopeo_list_tags() { local registry_repo="$1" log_info "Listing tags for: $registry_repo" "apt-layer" if ! command -v skopeo &> /dev/null; then log_error "skopeo not available" "apt-layer" return 1 fi if ! skopeo list-tags "docker://$registry_repo"; then log_error "Failed to list tags for: $registry_repo" "apt-layer" return 1 fi } skopeo_validate_image() { local image_name="$1" log_debug "Validating OCI image: $image_name" "apt-layer" if ! command -v skopeo &> /dev/null; then log_error "skopeo not available" "apt-layer" return 1 fi if ! validate_oci_image_name "$image_name"; then return 1 fi # Check if image exists and is accessible if ! skopeo inspect "docker://$image_name" >/dev/null 2>&1; then log_error "Image not found or not accessible: $image_name" "apt-layer" return 1 fi log_success "Image validated: $image_name" "apt-layer" return 0 } skopeo_copy_with_auth() { local source="$1" local destination="$2" local auth_file="${3:-}" log_debug "Copying OCI image: $source -> $destination" "apt-layer" if ! command -v skopeo &> /dev/null; then log_error "skopeo not available" "apt-layer" return 1 fi local skopeo_cmd="skopeo copy" # Add authentication if provided if [[ -n "$auth_file" ]] && [[ -f "$auth_file" ]]; then skopeo_cmd="$skopeo_cmd --authfile $auth_file" fi # Add source and destination skopeo_cmd="$skopeo_cmd $source $destination" if ! eval "$skopeo_cmd"; then log_error "Failed to copy image: $source -> $destination" "apt-layer" return 1 fi log_success "Image copied successfully: $source -> $destination" "apt-layer" return 0 } skopeo_inspect_detailed() { local image_name="$1" local output_format="${2:-json}" log_debug "Inspecting OCI image: $image_name" "apt-layer" if ! command -v skopeo &> /dev/null; then log_error "skopeo not available" "apt-layer" return 1 fi if ! validate_oci_image_name "$image_name"; then return 1 fi case "$output_format" in json) skopeo inspect "docker://$image_name" ;; raw) skopeo inspect --raw "docker://$image_name" ;; config) skopeo inspect --config "docker://$image_name" ;; *) log_error "Invalid output format: $output_format" "apt-layer" log_info "Valid formats: json, raw, config" "apt-layer" return 1 ;; esac } # --- END OF SCRIPTLET: 06-oci-integration.sh --- # ============================================================================ # Bootloader Integration (UEFI/GRUB/systemd-boot) # ============================================================================ # Ubuntu uBlue apt-layer Bootloader Integration # Provides comprehensive bootloader management for immutable deployments # Supports UEFI, GRUB, systemd-boot, and kernel argument management # ============================================================================= # BOOTLOADER SYSTEM FUNCTIONS # ============================================================================= # Bootloader configuration (with fallbacks for when particle-config.sh is not loaded) BOOTLOADER_CONFIG_DIR="${UBLUE_CONFIG_DIR:-/etc/particle-os}/bootloader" BOOTLOADER_STATE_DIR="${UBLUE_ROOT:-/var/lib/particle-os}/bootloader" BOOTLOADER_ENTRIES_DIR="$BOOTLOADER_STATE_DIR/entries" BOOTLOADER_BACKUP_DIR="$BOOTLOADER_STATE_DIR/backups" KARGS_CONFIG_DIR="${UBLUE_CONFIG_DIR:-/etc/particle-os}/kargs" KARGS_STATE_FILE="$BOOTLOADER_STATE_DIR/kargs.json" # Initialize bootloader system init_bootloader_system() { log_info "Initializing bootloader system" "apt-layer" # Create bootloader directories mkdir -p "$BOOTLOADER_CONFIG_DIR" "$BOOTLOADER_STATE_DIR" "$BOOTLOADER_ENTRIES_DIR" "$BOOTLOADER_BACKUP_DIR" mkdir -p "$KARGS_CONFIG_DIR" # Set proper permissions chmod 755 "$BOOTLOADER_CONFIG_DIR" "$BOOTLOADER_STATE_DIR" chmod 700 "$BOOTLOADER_ENTRIES_DIR" "$BOOTLOADER_BACKUP_DIR" # Initialize kernel arguments state if it doesn't exist if [[ ! -f "$KARGS_STATE_FILE" ]]; then echo '{"current": [], "pending": [], "history": []}' > "$KARGS_STATE_FILE" chmod 644 "$KARGS_STATE_FILE" fi log_success "Bootloader system initialized" "apt-layer" } # Detect bootloader type detect_bootloader_type() { log_debug "Detecting bootloader type" "apt-layer" # Check for UEFI if [[ -d "/sys/firmware/efi" ]]; then log_info "UEFI system detected" "apt-layer" # Check for systemd-boot (preferred for UEFI) if command -v bootctl &>/dev/null && [[ -d "/boot/loader" ]]; then echo "systemd-boot" return 0 fi # Check for GRUB UEFI if command -v grub-install &>/dev/null && [[ -f "/boot/grub/grub.cfg" ]]; then echo "grub-uefi" return 0 fi # Generic UEFI echo "uefi" return 0 fi # Check for legacy BIOS bootloaders if command -v grub-install &>/dev/null && [[ -f "/boot/grub/grub.cfg" ]]; then echo "grub-legacy" return 0 fi if command -v lilo &>/dev/null; then echo "lilo" return 0 fi if command -v syslinux &>/dev/null; then echo "syslinux" return 0 fi log_warning "No supported bootloader detected" "apt-layer" echo "unknown" return 1 } # Check if secure boot is enabled is_secure_boot_enabled() { if [[ -d "/sys/firmware/efi" ]]; then if command -v mokutil &>/dev/null; then if mokutil --sb-state 2>/dev/null | grep -q "SecureBoot enabled"; then return 0 fi fi # Alternative check via efivar if [[ -f "/sys/firmware/efi/efivars/SecureBoot-8be4df61-93ca-11d2-aa0d-00e098032b8c" ]]; then local secure_boot_value secure_boot_value=$(od -An -tu1 /sys/firmware/efi/efivars/SecureBoot-8be4df61-93ca-11d2-aa0d-00e098032b8c 2>/dev/null | tr -d ' ' | tail -c1) if [[ "$secure_boot_value" == "1" ]]; then return 0 fi fi fi return 1 } # Get current kernel arguments get_current_kernel_args() { local kernel_args kernel_args=$(cat /proc/cmdline 2>/dev/null || echo "") echo "$kernel_args" } # Parse kernel arguments into array parse_kernel_args() { local cmdline="$1" local args=() # Split cmdline into individual arguments while IFS= read -r -d '' arg; do if [[ -n "$arg" ]]; then args+=("$arg") fi done < <(echo -n "$cmdline" | tr ' ' '\0') echo "${args[@]}" } # Add kernel argument add_kernel_arg() { local arg="$1" if [[ -z "$arg" ]]; then log_error "No kernel argument provided" "apt-layer" return 1 fi log_info "Adding kernel argument: $arg" "apt-layer" # Read current kernel arguments state local current_args current_args=$(jq -r '.current[]?' "$KARGS_STATE_FILE" 2>/dev/null || echo "") # Check if argument already exists if echo "$current_args" | grep -q "^$arg$"; then log_warning "Kernel argument already exists: $arg" "apt-layer" return 0 fi # Add to pending arguments local pending_args pending_args=$(jq -r '.pending[]?' "$KARGS_STATE_FILE" 2>/dev/null || echo "") if echo "$pending_args" | grep -q "^$arg$"; then log_warning "Kernel argument already pending: $arg" "apt-layer" return 0 fi # Update state file jq --arg arg "$arg" '.pending += [$arg]' "$KARGS_STATE_FILE" > "$KARGS_STATE_FILE.tmp" && \ mv "$KARGS_STATE_FILE.tmp" "$KARGS_STATE_FILE" log_success "Kernel argument added to pending: $arg" "apt-layer" return 0 } # Remove kernel argument remove_kernel_arg() { local arg="$1" if [[ -z "$arg" ]]; then log_error "No kernel argument provided" "apt-layer" return 1 fi log_info "Removing kernel argument: $arg" "apt-layer" # Remove from pending arguments jq --arg arg "$arg" '(.pending | map(select(. != $arg))) as $new_pending | .pending = $new_pending' "$KARGS_STATE_FILE" > "$KARGS_STATE_FILE.tmp" && \ mv "$KARGS_STATE_FILE.tmp" "$KARGS_STATE_FILE" log_success "Kernel argument removed from pending: $arg" "apt-layer" return 0 } # List kernel arguments list_kernel_args() { log_info "Listing kernel arguments" "apt-layer" echo "=== Current Kernel Arguments ===" local current_args current_args=$(get_current_kernel_args) if [[ -n "$current_args" ]]; then echo "$current_args" | tr ' ' '\n' | while read -r arg; do if [[ -n "$arg" ]]; then echo " $arg" fi done else log_info "No current kernel arguments found" "apt-layer" fi echo "" echo "=== Pending Kernel Arguments ===" local pending_args pending_args=$(jq -r '.pending[]?' "$KARGS_STATE_FILE" 2>/dev/null || echo "") if [[ -n "$pending_args" ]]; then echo "$pending_args" | while read -r arg; do if [[ -n "$arg" ]]; then echo " $arg (pending)" fi done else log_info "No pending kernel arguments" "apt-layer" fi echo "" } # Clear pending kernel arguments clear_pending_kargs() { log_info "Clearing pending kernel arguments" "apt-layer" jq '.pending = []' "$KARGS_STATE_FILE" > "$KARGS_STATE_FILE.tmp" && \ mv "$KARGS_STATE_FILE.tmp" "$KARGS_STATE_FILE" log_success "Pending kernel arguments cleared" "apt-layer" } # Apply kernel arguments to deployment apply_kernel_args_to_deployment() { local deployment_id="$1" if [[ -z "$deployment_id" ]]; then log_error "No deployment ID provided" "apt-layer" return 1 fi log_info "Applying kernel arguments to deployment: $deployment_id" "apt-layer" # Get pending kernel arguments local pending_args pending_args=$(jq -r '.pending[]?' "$KARGS_STATE_FILE" 2>/dev/null || echo "") if [[ -z "$pending_args" ]]; then log_info "No pending kernel arguments to apply" "apt-layer" return 0 fi # Create kernel arguments configuration for deployment local kargs_config="$BOOTLOADER_ENTRIES_DIR/${deployment_id}.kargs" echo "# Kernel arguments for deployment: $deployment_id" > "$kargs_config" echo "# Generated on: $(date)" >> "$kargs_config" echo "" >> "$kargs_config" echo "$pending_args" | while read -r arg; do if [[ -n "$arg" ]]; then echo "$arg" >> "$kargs_config" fi done # Move pending arguments to current and clear pending local current_args current_args=$(jq -r '.current[]?' "$KARGS_STATE_FILE" 2>/dev/null || echo "") # Combine current and pending arguments local all_args=() while IFS= read -r arg; do if [[ -n "$arg" ]]; then all_args+=("$arg") fi done < <(echo "$current_args") while IFS= read -r arg; do if [[ -n "$arg" ]]; then all_args+=("$arg") fi done < <(echo "$pending_args") # Update state file local args_json args_json=$(printf '%s\n' "${all_args[@]}" | jq -R . | jq -s .) jq --argjson current "$args_json" '.current = $current | .pending = []' "$KARGS_STATE_FILE" > "$KARGS_STATE_FILE.tmp" && \ mv "$KARGS_STATE_FILE.tmp" "$KARGS_STATE_FILE" log_success "Kernel arguments applied to deployment: $deployment_id" "apt-layer" return 0 } # Create bootloader entry for deployment create_bootloader_entry() { local deployment_id="$1" local deployment_dir="$2" local title="${3:-Ubuntu uBlue}" if [[ -z "$deployment_id" ]] || [[ -z "$deployment_dir" ]]; then log_error "Deployment ID and directory required" "apt-layer" return 1 fi log_info "Creating bootloader entry for deployment: $deployment_id" "apt-layer" # Detect bootloader type local bootloader_type bootloader_type=$(detect_bootloader_type) case "$bootloader_type" in "systemd-boot") create_systemd_boot_entry "$deployment_id" "$deployment_dir" "$title" ;; "grub-uefi"|"grub-legacy") create_grub_boot_entry "$deployment_id" "$deployment_dir" "$title" ;; "uefi") create_uefi_boot_entry "$deployment_id" "$deployment_dir" "$title" ;; *) log_warning "Unsupported bootloader type: $bootloader_type" "apt-layer" return 1 ;; esac return 0 } # Create systemd-boot entry create_systemd_boot_entry() { local deployment_id="$1" local deployment_dir="$2" local title="$3" log_info "Creating systemd-boot entry" "apt-layer" local entry_file="/boot/loader/entries/${deployment_id}.conf" local kernel_path="$deployment_dir/vmlinuz" local initrd_path="$deployment_dir/initrd.img" # Check if kernel and initrd exist if [[ ! -f "$kernel_path" ]]; then log_error "Kernel not found: $kernel_path" "apt-layer" return 1 fi if [[ ! -f "$initrd_path" ]]; then log_error "Initrd not found: $initrd_path" "apt-layer" return 1 fi # Get kernel arguments local kargs_file="$BOOTLOADER_ENTRIES_DIR/${deployment_id}.kargs" local kargs="" if [[ -f "$kargs_file" ]]; then kargs=$(cat "$kargs_file" | grep -v '^#' | tr '\n' ' ') fi # Create systemd-boot entry cat > "$entry_file" << EOF title $title ($deployment_id) linux $kernel_path initrd $initrd_path options root=UUID=$(get_root_uuid) ro $kargs EOF log_success "systemd-boot entry created: $entry_file" "apt-layer" return 0 } # Create GRUB boot entry create_grub_boot_entry() { local deployment_id="$1" local deployment_dir="$2" local title="$3" log_info "Creating GRUB boot entry" "apt-layer" # This would typically involve updating /etc/default/grub and running update-grub # For now, we'll create a custom GRUB configuration snippet local grub_config_dir="/etc/grub.d" local grub_script="$grub_config_dir/10_${deployment_id}" if [[ ! -d "$grub_config_dir" ]]; then log_error "GRUB configuration directory not found: $grub_config_dir" "apt-layer" return 1 fi # Get kernel arguments local kargs_file="$BOOTLOADER_ENTRIES_DIR/${deployment_id}.kargs" local kargs="" if [[ -f "$kargs_file" ]]; then kargs=$(cat "$kargs_file" | grep -v '^#' | tr '\n' ' ') fi # Create GRUB script cat > "$grub_script" << EOF #!/bin/sh exec tail -n +3 \$0 menuentry '$title ($deployment_id)' { linux $deployment_dir/vmlinuz root=UUID=$(get_root_uuid) ro $kargs initrd $deployment_dir/initrd.img } EOF chmod +x "$grub_script" # Update GRUB configuration if command -v update-grub &>/dev/null; then if update-grub; then log_success "GRUB configuration updated" "apt-layer" else log_warning "Failed to update GRUB configuration" "apt-layer" fi fi log_success "GRUB boot entry created: $grub_script" "apt-layer" return 0 } # Create UEFI boot entry create_uefi_boot_entry() { local deployment_id="$1" local deployment_dir="$2" local title="$3" log_info "Creating UEFI boot entry" "apt-layer" if ! command -v efibootmgr &>/dev/null; then log_error "efibootmgr not available" "apt-layer" return 1 fi # Find EFI partition local efi_partition efi_partition=$(find_efi_partition) if [[ -z "$efi_partition" ]]; then log_error "EFI partition not found" "apt-layer" return 1 fi # Get kernel arguments local kargs_file="$BOOTLOADER_ENTRIES_DIR/${deployment_id}.kargs" local kargs="" if [[ -f "$kargs_file" ]]; then kargs=$(cat "$kargs_file" | grep -v '^#' | tr '\n' ' ') fi # Create UEFI boot entry local kernel_path="$deployment_dir/vmlinuz" local boot_args="root=UUID=$(get_root_uuid) ro $kargs" if efibootmgr --create --disk "$efi_partition" --part 1 --label "$title ($deployment_id)" --loader "$kernel_path" --unicode "$boot_args"; then log_success "UEFI boot entry created" "apt-layer" return 0 else log_error "Failed to create UEFI boot entry" "apt-layer" return 1 fi } # Get root device UUID get_root_uuid() { local root_device root_device=$(findmnt -n -o SOURCE /) if [[ -n "$root_device" ]]; then blkid -s UUID -o value "$root_device" 2>/dev/null || echo "unknown" else echo "unknown" fi } # Find EFI partition find_efi_partition() { # Look for EFI partition in /proc/partitions local efi_partition efi_partition=$(lsblk -n -o NAME,MOUNTPOINT,FSTYPE | grep -E '/boot/efi|/efi' | awk '{print $1}' | head -1) if [[ -n "$efi_partition" ]]; then echo "/dev/$efi_partition" else # Fallback: look for EFI partition by filesystem type lsblk -n -o NAME,FSTYPE | grep vfat | awk '{print "/dev/" $1}' | head -1 fi } # Set default boot entry set_default_boot_entry() { local deployment_id="$1" if [[ -z "$deployment_id" ]]; then log_error "Deployment ID required" "apt-layer" return 1 fi log_info "Setting default boot entry: $deployment_id" "apt-layer" # Detect bootloader type local bootloader_type bootloader_type=$(detect_bootloader_type) case "$bootloader_type" in "systemd-boot") set_systemd_boot_default "$deployment_id" ;; "grub-uefi"|"grub-legacy") set_grub_default "$deployment_id" ;; "uefi") set_uefi_default "$deployment_id" ;; *) log_warning "Unsupported bootloader type: $bootloader_type" "apt-layer" return 1 ;; esac return 0 } # Set systemd-boot default set_systemd_boot_default() { local deployment_id="$1" local loader_conf="/boot/loader/loader.conf" local entry_file="/boot/loader/entries/${deployment_id}.conf" if [[ ! -f "$entry_file" ]]; then log_error "Boot entry not found: $entry_file" "apt-layer" return 1 fi # Update loader.conf if [[ -f "$loader_conf" ]]; then # Backup original cp "$loader_conf" "$loader_conf.backup" # Update default entry sed -i "s/^default.*/default $deployment_id/" "$loader_conf" 2>/dev/null || \ echo "default $deployment_id" >> "$loader_conf" else # Create loader.conf cat > "$loader_conf" << EOF default $deployment_id timeout 5 editor no EOF fi log_success "systemd-boot default set to: $deployment_id" "apt-layer" return 0 } # Set GRUB default set_grub_default() { local deployment_id="$1" local grub_default="/etc/default/grub" if [[ -f "$grub_default" ]]; then # Backup original cp "$grub_default" "$grub_default.backup" # Update default entry sed -i "s/^GRUB_DEFAULT.*/GRUB_DEFAULT=\"$deployment_id\"/" "$grub_default" 2>/dev/null || \ echo "GRUB_DEFAULT=\"$deployment_id\"" >> "$grub_default" # Update GRUB configuration if command -v update-grub &>/dev/null; then if update-grub; then log_success "GRUB default set to: $deployment_id" "apt-layer" return 0 else log_error "Failed to update GRUB configuration" "apt-layer" return 1 fi fi else log_error "GRUB default configuration not found: $grub_default" "apt-layer" return 1 fi } # Set UEFI default set_uefi_default() { local deployment_id="$1" if ! command -v efibootmgr &>/dev/null; then log_error "efibootmgr not available" "apt-layer" return 1 fi # Find boot entry local boot_entry boot_entry=$(efibootmgr | grep "$deployment_id" | head -1 | sed 's/Boot\([0-9a-fA-F]*\).*/\1/') if [[ -n "$boot_entry" ]]; then if efibootmgr --bootnext "$boot_entry"; then log_success "UEFI default set to: $deployment_id" "apt-layer" return 0 else log_error "Failed to set UEFI default" "apt-layer" return 1 fi else log_error "UEFI boot entry not found: $deployment_id" "apt-layer" return 1 fi } # List boot entries list_boot_entries() { log_info "Listing boot entries" "apt-layer" # Detect bootloader type local bootloader_type bootloader_type=$(detect_bootloader_type) echo "=== Boot Entries ($bootloader_type) ===" case "$bootloader_type" in "systemd-boot") list_systemd_boot_entries ;; "grub-uefi"|"grub-legacy") list_grub_entries ;; "uefi") list_uefi_entries ;; *) log_warning "Unsupported bootloader type: $bootloader_type" "apt-layer" ;; esac echo "" } # List systemd-boot entries list_systemd_boot_entries() { local entries_dir="/boot/loader/entries" if [[ -d "$entries_dir" ]]; then for entry in "$entries_dir"/*.conf; do if [[ -f "$entry" ]]; then local title title=$(grep "^title" "$entry" | cut -d' ' -f2- | head -1) local deployment_id deployment_id=$(basename "$entry" .conf) echo " $deployment_id: $title" fi done else log_info "No systemd-boot entries found" "apt-layer" fi } # List GRUB entries list_grub_entries() { local grub_cfg="/boot/grub/grub.cfg" if [[ -f "$grub_cfg" ]]; then grep -A1 "menuentry" "$grub_cfg" | grep -E "(menuentry|particle-os)" | while read -r line; do if [[ "$line" =~ menuentry ]]; then local title title=$(echo "$line" | sed 's/.*menuentry '\''\([^'\'']*\)'\''.*/\1/') echo " $title" fi done else log_info "No GRUB entries found" "apt-layer" fi } # List UEFI entries list_uefi_entries() { if command -v efibootmgr &>/dev/null; then efibootmgr | grep -E "Boot[0-9a-fA-F]*" | while read -r line; do local boot_id boot_id=$(echo "$line" | sed 's/Boot\([0-9a-fA-F]*\).*/\1/') local title title=$(echo "$line" | sed 's/.*\* \(.*\)/\1/') echo " $boot_id: $title" done else log_info "efibootmgr not available" "apt-layer" fi } # Remove boot entry remove_boot_entry() { local deployment_id="$1" if [[ -z "$deployment_id" ]]; then log_error "Deployment ID required" "apt-layer" return 1 fi log_info "Removing boot entry: $deployment_id" "apt-layer" # Detect bootloader type local bootloader_type bootloader_type=$(detect_bootloader_type) case "$bootloader_type" in "systemd-boot") remove_systemd_boot_entry "$deployment_id" ;; "grub-uefi"|"grub-legacy") remove_grub_entry "$deployment_id" ;; "uefi") remove_uefi_entry "$deployment_id" ;; *) log_warning "Unsupported bootloader type: $bootloader_type" "apt-layer" return 1 ;; esac return 0 } # Remove systemd-boot entry remove_systemd_boot_entry() { local deployment_id="$1" local entry_file="/boot/loader/entries/${deployment_id}.conf" if [[ -f "$entry_file" ]]; then if rm "$entry_file"; then log_success "systemd-boot entry removed: $deployment_id" "apt-layer" return 0 else log_error "Failed to remove systemd-boot entry" "apt-layer" return 1 fi else log_warning "systemd-boot entry not found: $deployment_id" "apt-layer" return 0 fi } # Remove GRUB entry remove_grub_entry() { local deployment_id="$1" local grub_script="/etc/grub.d/10_${deployment_id}" if [[ -f "$grub_script" ]]; then if rm "$grub_script"; then log_success "GRUB entry removed: $deployment_id" "apt-layer" # Update GRUB configuration if command -v update-grub &>/dev/null; then update-grub fi return 0 else log_error "Failed to remove GRUB entry" "apt-layer" return 1 fi else log_warning "GRUB entry not found: $deployment_id" "apt-layer" return 0 fi } # Remove UEFI entry remove_uefi_entry() { local deployment_id="$1" if ! command -v efibootmgr &>/dev/null; then log_error "efibootmgr not available" "apt-layer" return 1 fi # Find boot entry local boot_entry boot_entry=$(efibootmgr | grep "$deployment_id" | head -1 | sed 's/Boot\([0-9a-fA-F]*\).*/\1/') if [[ -n "$boot_entry" ]]; then if efibootmgr --bootnum "$boot_entry" --delete-bootnum; then log_success "UEFI entry removed: $deployment_id" "apt-layer" return 0 else log_error "Failed to remove UEFI entry" "apt-layer" return 1 fi else log_warning "UEFI entry not found: $deployment_id" "apt-layer" return 0 fi } # Get bootloader status get_bootloader_status() { log_info "Getting bootloader status" "apt-layer" echo "=== Bootloader Status ===" # Detect bootloader type local bootloader_type bootloader_type=$(detect_bootloader_type) echo "Bootloader Type: $bootloader_type" # Check secure boot status if is_secure_boot_enabled; then echo "Secure Boot: Enabled" else echo "Secure Boot: Disabled" fi # Show current kernel arguments echo "" echo "Current Kernel Arguments:" local current_args current_args=$(get_current_kernel_args) if [[ -n "$current_args" ]]; then echo "$current_args" | tr ' ' '\n' | while read -r arg; do if [[ -n "$arg" ]]; then echo " $arg" fi done else echo " None" fi # Show pending kernel arguments echo "" echo "Pending Kernel Arguments:" local pending_args pending_args=$(jq -r '.pending[]?' "$KARGS_STATE_FILE" 2>/dev/null || echo "") if [[ -n "$pending_args" ]]; then echo "$pending_args" | while read -r arg; do if [[ -n "$arg" ]]; then echo " $arg (pending)" fi done else echo " None" fi echo "" } # ============================================================================= # INTEGRATION FUNCTIONS # ============================================================================= # Initialize bootloader system on script startup init_bootloader_on_startup() { # Only initialize if not already done if [[ ! -d "$BOOTLOADER_STATE_DIR" ]]; then init_bootloader_system fi } # Cleanup bootloader on script exit cleanup_bootloader_on_exit() { # Clean up temporary files rm -f "$KARGS_STATE_FILE.tmp" 2>/dev/null || true } # Register cleanup function trap cleanup_bootloader_on_exit EXIT # --- END OF SCRIPTLET: 07-bootloader.sh --- # ============================================================================ # System Initialization and Path Management # ============================================================================ # System Initialization and Path Management for apt-layer # This scriptlet handles system initialization, path management, and directory creation # Load path configuration load_path_config() { local config_file="/usr/local/etc/apt-layer/paths.json" if [[ ! -f "$config_file" ]]; then log_error "Path configuration file not found: $config_file" "apt-layer" return 1 fi # Load configuration using jq if ! command -v jq >/dev/null 2>&1; then log_error "jq is required for path configuration loading" "apt-layer" return 1 fi # Export main directory paths export APT_LAYER_WORKSPACE=$(jq -r '.apt_layer_paths.main_directories.workspace.path' "$config_file") export APT_LAYER_LOG_DIR=$(jq -r '.apt_layer_paths.main_directories.logs.path' "$config_file") export APT_LAYER_CACHE_DIR=$(jq -r '.apt_layer_paths.main_directories.cache.path' "$config_file") # Export workspace subdirectory paths export BUILD_DIR=$(jq -r '.apt_layer_paths.workspace_subdirectories.build.path' "$config_file") export LIVE_OVERLAY_DIR=$(jq -r '.apt_layer_paths.workspace_subdirectories.live_overlay.path' "$config_file") export COMPOSEFS_DIR=$(jq -r '.apt_layer_paths.workspace_subdirectories.composefs.path' "$config_file") export OSTREE_COMMITS_DIR=$(jq -r '.apt_layer_paths.workspace_subdirectories.ostree_commits.path' "$config_file") export DEPLOYMENTS_DIR=$(jq -r '.apt_layer_paths.workspace_subdirectories.deployments.path' "$config_file") export HISTORY_DIR=$(jq -r '.apt_layer_paths.workspace_subdirectories.history.path' "$config_file") export BOOTLOADER_STATE_DIR=$(jq -r '.apt_layer_paths.workspace_subdirectories.bootloader.path' "$config_file") export TRANSACTION_STATE=$(jq -r '.apt_layer_paths.workspace_subdirectories.transaction_state.path' "$config_file") # Export file paths export DEPLOYMENT_DB=$(jq -r '.apt_layer_paths.files.deployment_db.path' "$config_file") export CURRENT_DEPLOYMENT_FILE=$(jq -r '.apt_layer_paths.files.current_deployment.path' "$config_file") export PENDING_DEPLOYMENT_FILE=$(jq -r '.apt_layer_paths.files.pending_deployment.path' "$config_file") export TRANSACTION_LOG=$(jq -r '.apt_layer_paths.files.transaction_log.path' "$config_file") log_debug "Path configuration loaded from: $config_file" "apt-layer" return 0 } # Initialize apt-layer system directories initialize_apt_layer_system() { log_info "Initializing apt-layer system directories..." "apt-layer" # Load path configuration if ! load_path_config; then log_error "Failed to load path configuration" "apt-layer" return 1 fi # Create main directories local main_dirs=("$APT_LAYER_WORKSPACE" "$APT_LAYER_LOG_DIR" "$APT_LAYER_CACHE_DIR") for dir in "${main_dirs[@]}"; do if [[ ! -d "$dir" ]]; then mkdir -p "$dir" chmod 755 "$dir" chown root:root "$dir" log_debug "Created directory: $dir" "apt-layer" fi done # Create workspace subdirectories local subdirs=( "$BUILD_DIR" "$LIVE_OVERLAY_DIR" "$COMPOSEFS_DIR" "$OSTREE_COMMITS_DIR" "$DEPLOYMENTS_DIR" "$HISTORY_DIR" "$BOOTLOADER_STATE_DIR" "$TRANSACTION_STATE" ) for dir in "${subdirs[@]}"; do if [[ ! -d "$dir" ]]; then mkdir -p "$dir" chmod 755 "$dir" chown root:root "$dir" log_debug "Created subdirectory: $dir" "apt-layer" fi done # Create live overlay subdirectories local overlay_dirs=( "$LIVE_OVERLAY_DIR/upper" "$LIVE_OVERLAY_DIR/work" "$LIVE_OVERLAY_DIR/mount" ) for dir in "${overlay_dirs[@]}"; do if [[ ! -d "$dir" ]]; then mkdir -p "$dir" chmod 700 "$dir" chown root:root "$dir" log_debug "Created overlay directory: $dir" "apt-layer" fi done # Initialize deployment database if it doesn't exist if [[ ! -f "$DEPLOYMENT_DB" ]]; then echo '{"deployments": {}, "current": null, "history": []}' > "$DEPLOYMENT_DB" chmod 644 "$DEPLOYMENT_DB" chown root:root "$DEPLOYMENT_DB" log_debug "Initialized deployment database: $DEPLOYMENT_DB" "apt-layer" fi log_success "apt-layer system directories initialized" "apt-layer" return 0 } # Reinitialize apt-layer system (force recreation) reinitialize_apt_layer_system() { log_info "Reinitializing apt-layer system (force recreation)..." "apt-layer" # Load path configuration if ! load_path_config; then log_error "Failed to load path configuration" "apt-layer" return 1 fi # Remove existing directories local dirs_to_remove=( "$APT_LAYER_WORKSPACE" "$APT_LAYER_LOG_DIR" "$APT_LAYER_CACHE_DIR" ) for dir in "${dirs_to_remove[@]}"; do if [[ -d "$dir" ]]; then rm -rf "$dir" log_debug "Removed directory: $dir" "apt-layer" fi done # Reinitialize if initialize_apt_layer_system; then log_success "apt-layer system reinitialized successfully" "apt-layer" return 0 else log_error "Failed to reinitialize apt-layer system" "apt-layer" return 1 fi } # Remove apt-layer system (cleanup) remove_apt_layer_system() { log_info "Removing apt-layer system (cleanup)..." "apt-layer" # Load path configuration if ! load_path_config; then log_error "Failed to load path configuration" "apt-layer" return 1 fi # Stop any running live overlay if is_live_overlay_active; then log_warning "Live overlay is active, stopping it first..." "apt-layer" stop_live_overlay fi # Remove all apt-layer directories local dirs_to_remove=( "$APT_LAYER_WORKSPACE" "$APT_LAYER_LOG_DIR" "$APT_LAYER_CACHE_DIR" ) for dir in "${dirs_to_remove[@]}"; do if [[ -d "$dir" ]]; then rm -rf "$dir" log_debug "Removed directory: $dir" "apt-layer" fi done log_success "apt-layer system removed successfully" "apt-layer" return 0 } # Show apt-layer system status show_apt_layer_system_status() { log_info "apt-layer System Status:" "apt-layer" # Load path configuration if ! load_path_config; then log_error "Failed to load path configuration" "apt-layer" return 1 fi echo "=== Main Directories ===" local main_dirs=( ["Workspace"]="$APT_LAYER_WORKSPACE" ["Logs"]="$APT_LAYER_LOG_DIR" ["Cache"]="$APT_LAYER_CACHE_DIR" ) for name in "${!main_dirs[@]}"; do local dir="${main_dirs[$name]}" if [[ -d "$dir" ]]; then local size=$(du -sh "$dir" 2>/dev/null | cut -f1) local perms=$(stat -c "%a" "$dir" 2>/dev/null) echo "✅ $name: $dir ($size, perms: $perms)" else echo "❌ $name: $dir (not found)" fi done echo "" echo "=== Workspace Subdirectories ===" local subdirs=( ["Build"]="$BUILD_DIR" ["Live Overlay"]="$LIVE_OVERLAY_DIR" ["ComposeFS"]="$COMPOSEFS_DIR" ["OSTree Commits"]="$OSTREE_COMMITS_DIR" ["Deployments"]="$DEPLOYMENTS_DIR" ["History"]="$HISTORY_DIR" ["Bootloader"]="$BOOTLOADER_STATE_DIR" ["Transaction State"]="$TRANSACTION_STATE" ) for name in "${!subdirs[@]}"; do local dir="${subdirs[$name]}" if [[ -d "$dir" ]]; then local count=$(find "$dir" -maxdepth 1 -type f 2>/dev/null | wc -l) echo "✅ $name: $dir ($count files)" else echo "❌ $name: $dir (not found)" fi done echo "" echo "=== System Files ===" local files=( ["Deployment DB"]="$DEPLOYMENT_DB" ["Current Deployment"]="$CURRENT_DEPLOYMENT_FILE" ["Pending Deployment"]="$PENDING_DEPLOYMENT_FILE" ["Transaction Log"]="$TRANSACTION_LOG" ) for name in "${!files[@]}"; do local file="${files[$name]}" if [[ -f "$file" ]]; then local size=$(stat -c "%s" "$file" 2>/dev/null) echo "✅ $name: $file ($size bytes)" else echo "❌ $name: $file (not found)" fi done echo "" echo "=== Live Overlay Status ===" if is_live_overlay_active; then echo "🟡 Live overlay is ACTIVE" echo " Mount point: $LIVE_OVERLAY_DIR/mount" echo " Upper dir: $LIVE_OVERLAY_DIR/upper" echo " Work dir: $LIVE_OVERLAY_DIR/work" else echo "🟢 Live overlay is INACTIVE" fi return 0 } # Validate path configuration validate_path_config() { log_debug "Validating path configuration..." "apt-layer" local config_file="/usr/local/etc/apt-layer/paths.json" if [[ ! -f "$config_file" ]]; then log_error "Path configuration file not found: $config_file" "apt-layer" return 1 fi # Validate JSON syntax if ! jq empty "$config_file" 2>/dev/null; then log_error "Invalid JSON in path configuration file" "apt-layer" return 1 fi # Validate required paths exist in config local required_paths=( ".apt_layer_paths.main_directories.workspace.path" ".apt_layer_paths.main_directories.logs.path" ".apt_layer_paths.main_directories.cache.path" ) for path in "${required_paths[@]}"; do if ! jq -e "$path" "$config_file" >/dev/null 2>&1; then log_error "Required path not found in config: $path" "apt-layer" return 1 fi done log_debug "Path configuration validation passed" "apt-layer" return 0 } # Handle system initialization commands handle_system_init_commands() { case "$1" in "--init") if initialize_apt_layer_system; then log_success "apt-layer system initialized successfully" "apt-layer" return 0 else log_error "Failed to initialize apt-layer system" "apt-layer" return 1 fi ;; "--reinit") if reinitialize_apt_layer_system; then log_success "apt-layer system reinitialized successfully" "apt-layer" return 0 else log_error "Failed to reinitialize apt-layer system" "apt-layer" return 1 fi ;; "--rm-init") if remove_apt_layer_system; then log_success "apt-layer system removed successfully" "apt-layer" return 0 else log_error "Failed to remove apt-layer system" "apt-layer" return 1 fi ;; "--status") show_apt_layer_system_status return $? ;; *) return 1 ;; esac } # --- END OF SCRIPTLET: 08-system-init.sh --- # ============================================================================ # Atomic Deployment System # ============================================================================ # Atomic deployment system for Ubuntu uBlue apt-layer Tool # Implements commit-based state management and true system upgrades (not package upgrades) # Atomic deployment state management DEPLOYMENT_DB="/var/lib/particle-os/deployments.json" CURRENT_DEPLOYMENT_FILE="/var/lib/particle-os/current-deployment" PENDING_DEPLOYMENT_FILE="/var/lib/particle-os/pending-deployment" DEPLOYMENT_HISTORY_DIR="/var/lib/particle-os/history" # Initialize deployment database init_deployment_db() { log_info "Initializing atomic deployment database..." "apt-layer" # Ensure directories exist with proper permissions mkdir -p "$DEPLOYMENT_HISTORY_DIR" 2>/dev/null || { log_error "Failed to create deployment history directory: $DEPLOYMENT_HISTORY_DIR" "apt-layer" return 1 } # Create deployment database if it doesn't exist if [[ ! -f "$DEPLOYMENT_DB" ]]; then cat > "$DEPLOYMENT_DB" << 'EOF' { "deployments": {}, "current_deployment": null, "pending_deployment": null, "deployment_counter": 0, "created": "$(date -u +%Y-%m-%dT%H:%M:%SZ)" } EOF if [[ $? -eq 0 ]]; then log_success "Deployment database initialized" "apt-layer" else log_error "Failed to create deployment database: $DEPLOYMENT_DB" "apt-layer" return 1 fi fi # Ensure deployment files exist with proper error handling touch "$CURRENT_DEPLOYMENT_FILE" 2>/dev/null || { log_warning "Failed to create current deployment file, attempting with sudo..." "apt-layer" sudo touch "$CURRENT_DEPLOYMENT_FILE" 2>/dev/null || { log_error "Failed to create current deployment file: $CURRENT_DEPLOYMENT_FILE" "apt-layer" return 1 } } touch "$PENDING_DEPLOYMENT_FILE" 2>/dev/null || { log_warning "Failed to create pending deployment file, attempting with sudo..." "apt-layer" sudo touch "$PENDING_DEPLOYMENT_FILE" 2>/dev/null || { log_error "Failed to create pending deployment file: $PENDING_DEPLOYMENT_FILE" "apt-layer" return 1 } } log_success "Deployment database initialization completed" "apt-layer" } # Create a new deployment commit create_deployment_commit() { local base_image="$1" local layers=("${@:2}") local commit_message="${COMMIT_MESSAGE:-System update}" local commit_id="commit-$(date +%Y%m%d-%H%M%S)-$$" local commit_data log_info "Creating deployment commit: $commit_id" "apt-layer" # Create commit metadata with proper variable expansion local layers_json="[" for i in "${!layers[@]}"; do if [[ $i -gt 0 ]]; then layers_json+="," fi layers_json+="\"${layers[$i]}\"" done layers_json+="]" commit_data=$(cat << EOF { "commit_id": "$commit_id", "base_image": "$base_image", "layers": $layers_json, "commit_message": "$commit_message", "created": "$(date -u +%Y-%m-%dT%H:%M:%SZ)", "parent_commit": "$(get_current_deployment)", "composefs_image": "${commit_id}.composefs" } EOF ) # Add to deployment database jq --arg commit_id "$commit_id" \ --arg base_image "$base_image" \ --arg layers_json "$layers_json" \ --arg commit_message "$commit_message" \ --arg created "$(date -u +%Y-%m-%dT%H:%M:%SZ)" \ --arg parent_commit "$(get_current_deployment)" \ --arg composefs_image "${commit_id}.composefs" \ '.deployments[$commit_id] = { "commit_id": $commit_id, "base_image": $base_image, "layers": ($layers_json | fromjson), "commit_message": $commit_message, "created": $created, "parent_commit": $parent_commit, "composefs_image": $composefs_image } | .deployment_counter += 1' \ "$DEPLOYMENT_DB" > "${DEPLOYMENT_DB}.tmp" && mv "${DEPLOYMENT_DB}.tmp" "$DEPLOYMENT_DB" # Create deployment history file echo "$commit_data" > "$DEPLOYMENT_HISTORY_DIR/$commit_id.json" log_success "Deployment commit created: $commit_id" "apt-layer" echo "$commit_id" } # Get current deployment get_current_deployment() { if [[ -f "$CURRENT_DEPLOYMENT_FILE" ]]; then cat "$CURRENT_DEPLOYMENT_FILE" 2>/dev/null || echo "" else echo "" fi } # Get pending deployment get_pending_deployment() { if [[ -f "$PENDING_DEPLOYMENT_FILE" ]]; then cat "$PENDING_DEPLOYMENT_FILE" 2>/dev/null || echo "" else echo "" fi } # Set current deployment set_current_deployment() { local commit_id="$1" echo "$commit_id" > "$CURRENT_DEPLOYMENT_FILE" # Update deployment database jq --arg commit_id "$commit_id" '.current_deployment = $commit_id' \ "$DEPLOYMENT_DB" > "${DEPLOYMENT_DB}.tmp" && mv "${DEPLOYMENT_DB}.tmp" "$DEPLOYMENT_DB" log_info "Current deployment set to: $commit_id" "apt-layer" } # Set pending deployment set_pending_deployment() { local commit_id="$1" echo "$commit_id" > "$PENDING_DEPLOYMENT_FILE" # Update deployment database jq --arg commit_id "$commit_id" '.pending_deployment = $commit_id' \ "$DEPLOYMENT_DB" > "${DEPLOYMENT_DB}.tmp" && mv "${DEPLOYMENT_DB}.tmp" "$DEPLOYMENT_DB" log_info "Pending deployment set to: $commit_id" "apt-layer" } # Clear pending deployment clear_pending_deployment() { echo "" > "$PENDING_DEPLOYMENT_FILE" # Update deployment database jq '.pending_deployment = null' \ "$DEPLOYMENT_DB" > "${DEPLOYMENT_DB}.tmp" && mv "${DEPLOYMENT_DB}.tmp" "$DEPLOYMENT_DB" log_info "Pending deployment cleared" "apt-layer" } # Atomic deployment function atomic_deploy() { local commit_id="$1" local deployment_dir="/var/lib/particle-os/deployments/${commit_id}" local boot_dir="/boot/loader/entries" log_info "Performing atomic deployment: $commit_id" "apt-layer" # Validate commit exists if ! jq -e ".deployments[\"$commit_id\"]" "$DEPLOYMENT_DB" >/dev/null 2>&1; then log_error "Commit not found: $commit_id" "apt-layer" return 1 fi # Get commit data local commit_data commit_data=$(jq -r ".deployments[\"$commit_id\"]" "$DEPLOYMENT_DB") local composefs_image composefs_image=$(echo "$commit_data" | jq -r '.composefs_image') # Create deployment directory mkdir -p "$deployment_dir" # Mount the ComposeFS image if ! composefs_mount "$composefs_image" "$deployment_dir"; then log_error "Failed to mount ComposeFS image for deployment" "apt-layer" return 1 fi # Apply kernel arguments to deployment apply_kernel_args_to_deployment "$commit_id" # Create bootloader entry create_bootloader_entry "$commit_id" "$deployment_dir" # Set as pending deployment (will activate on next boot) set_pending_deployment "$commit_id" log_success "Atomic deployment prepared: $commit_id" "apt-layer" log_info "Reboot to activate deployment" "apt-layer" return 0 } # True system upgrade (not package upgrade) system_upgrade() { local new_base_image="${1:-}" local current_layers=() log_info "Performing true system upgrade..." "apt-layer" # Get current deployment local current_commit current_commit=$(get_current_deployment) if [[ -n "$current_commit" ]]; then # Get current layers from deployment current_layers=($(jq -r ".deployments[\"$current_commit\"].layers[]" "$DEPLOYMENT_DB" 2>/dev/null || true)) log_info "Current layers: ${current_layers[*]}" "apt-layer" fi # If no new base specified, try to find one if [[ -z "$new_base_image" ]]; then new_base_image=$(find_newer_base_image) if [[ -z "$new_base_image" ]]; then log_info "No newer base image found" "apt-layer" return 0 fi fi log_info "Upgrading to base image: $new_base_image" "apt-layer" # Rebase existing layers on new base local rebased_layers=() for layer in "${current_layers[@]}"; do local new_layer="${layer}-rebased-$(date +%Y%m%d)" log_info "Rebasing layer: $layer -> $new_layer" "apt-layer" if "$0" --rebase "$layer" "$new_base_image" "$new_layer"; then rebased_layers+=("$new_layer") else log_error "Failed to rebase layer: $layer" "apt-layer" return 1 fi done # Create new deployment commit local commit_id commit_id=$(create_deployment_commit "$new_base_image" "${rebased_layers[@]}") # Perform atomic deployment if atomic_deploy "$commit_id"; then log_success "System upgrade completed successfully" "apt-layer" return 0 else log_error "System upgrade failed" "apt-layer" return 1 fi } # Find newer base image find_newer_base_image() { local current_base current_base=$(jq -r ".deployments[\"$(get_current_deployment)\"].base_image" "$DEPLOYMENT_DB" 2>/dev/null || echo "") if [[ -z "$current_base" ]]; then log_warning "No current base image found" "apt-layer" return 1 fi # List available base images and find newer ones local available_bases available_bases=($(composefs_list_images | grep "^particle-os/base/" | sort -V)) for base in "${available_bases[@]}"; do if [[ "$base" > "$current_base" ]]; then echo "$base" return 0 fi done return 1 } # Create bootloader entry create_bootloader_entry() { local commit_id="$1" local deployment_dir="$2" log_info "Creating bootloader entry for: $commit_id" "apt-layer" # Initialize bootloader system init_bootloader_on_startup # Create bootloader entry using the comprehensive bootloader system if create_bootloader_entry "$commit_id" "$deployment_dir" "Ubuntu uBlue ($commit_id)"; then log_success "Bootloader entry created for: $commit_id" "apt-layer" return 0 else log_error "Failed to create bootloader entry for: $commit_id" "apt-layer" return 1 fi } # Show atomic deployment status atomic_status() { local current_deployment current_deployment=$(get_current_deployment) local pending_deployment pending_deployment=$(get_pending_deployment) echo "=== Atomic Deployment Status ===" echo "Current Deployment: ${current_deployment:-none}" echo "Pending Deployment: ${pending_deployment:-none}" if [[ -n "$current_deployment" ]]; then local commit_data commit_data=$(jq -r ".deployments[\"$current_deployment\"]" "$DEPLOYMENT_DB" 2>/dev/null || echo "{}") if [[ "$commit_data" != "{}" ]]; then echo "Deployment Type: $(echo "$commit_data" | jq -r '.commit_message')" echo "Base Image: $(echo "$commit_data" | jq -r '.base_image')" echo "Created: $(echo "$commit_data" | jq -r '.created')" echo "Layers: $(echo "$commit_data" | jq -r '.layers | join(", ")')" fi fi if [[ -n "$pending_deployment" ]]; then echo "�� Pending deployment will activate on next boot" fi } # List all deployments list_deployments() { echo "=== Deployment History ===" local deployments deployments=($(jq -r '.deployments | keys[]' "$DEPLOYMENT_DB" 2>/dev/null | sort -r)) for commit_id in "${deployments[@]}"; do local commit_data commit_data=$(jq -r ".deployments[\"$commit_id\"]" "$DEPLOYMENT_DB") local status="" if [[ "$commit_id" == "$(get_current_deployment)" ]]; then status=" [CURRENT]" elif [[ "$commit_id" == "$(get_pending_deployment)" ]]; then status=" [PENDING]" fi echo "$commit_id$status" echo " Message: $(echo "$commit_data" | jq -r '.commit_message')" echo " Created: $(echo "$commit_data" | jq -r '.created')" echo " Base: $(echo "$commit_data" | jq -r '.base_image')" echo "" done } # Rollback to specific commit commit_rollback() { local target_commit="$1" log_info "Rolling back to commit: $target_commit" "apt-layer" # Validate target commit exists if ! jq -e ".deployments[\"$target_commit\"]" "$DEPLOYMENT_DB" >/dev/null 2>&1; then log_error "Target commit not found: $target_commit" "apt-layer" return 1 fi # Perform atomic deployment to target commit if atomic_deploy "$target_commit"; then log_success "Rollback prepared to: $target_commit" "apt-layer" log_info "Reboot to activate rollback" "apt-layer" return 0 else log_error "Rollback failed" "apt-layer" return 1 fi } # --- END OF SCRIPTLET: 09-atomic-deployment.sh --- # ============================================================================ # rpm-ostree Compatibility Layer # ============================================================================ # rpm-ostree compatibility layer for Ubuntu uBlue apt-layer Tool # Provides 1:1 command compatibility with rpm-ostree # rpm-ostree install compatibility rpm_ostree_install() { local packages=("$@") log_info "rpm-ostree install compatibility: ${packages[*]}" "apt-layer" # Use live overlay for package installation if ! live_install "${packages[@]}"; then log_error "rpm-ostree install failed" "apt-layer" return 1 fi log_success "rpm-ostree install completed successfully" "apt-layer" return 0 } # rpm-ostree upgrade compatibility rpm_ostree_upgrade() { log_info "rpm-ostree upgrade compatibility" "apt-layer" # Use true system upgrade (not package upgrade) if ! system_upgrade; then log_error "rpm-ostree upgrade failed" "apt-layer" return 1 fi log_success "rpm-ostree upgrade completed successfully" "apt-layer" return 0 } # rpm-ostree rebase compatibility rpm_ostree_rebase() { local new_base="$1" log_info "rpm-ostree rebase compatibility: $new_base" "apt-layer" # Use intelligent rebase with conflict resolution if ! intelligent_rebase "$new_base"; then log_error "rpm-ostree rebase failed" "apt-layer" return 1 fi log_success "rpm-ostree rebase completed successfully" "apt-layer" return 0 } # rpm-ostree rollback compatibility rpm_ostree_rollback() { local target_commit="${1:-}" log_info "rpm-ostree rollback compatibility: ${target_commit:-latest}" "apt-layer" if [[ -z "$target_commit" ]]; then # Rollback to previous deployment target_commit=$(get_previous_deployment) if [[ -z "$target_commit" ]]; then log_error "No previous deployment found for rollback" "apt-layer" return 1 fi fi # Use commit-based rollback if ! commit_rollback "$target_commit"; then log_error "rpm-ostree rollback failed" "apt-layer" return 1 fi log_success "rpm-ostree rollback completed successfully" "apt-layer" return 0 } # rpm-ostree status compatibility rpm_ostree_status() { log_info "rpm-ostree status compatibility" "apt-layer" # Show atomic deployment status atomic_status # Show live overlay status echo "" echo "=== Live Overlay Status ===" get_live_overlay_status # Show package diff if pending deployment local pending_deployment pending_deployment=$(get_pending_deployment) if [[ -n "$pending_deployment" ]]; then echo "" echo "=== Pending Changes ===" show_package_diff "$(get_current_deployment)" "$pending_deployment" fi } # rpm-ostree diff compatibility rpm_ostree_diff() { local from_commit="${1:-}" local to_commit="${2:-}" log_info "rpm-ostree diff compatibility: $from_commit -> $to_commit" "apt-layer" # If no commits specified, compare current to pending if [[ -z "$from_commit" ]]; then from_commit=$(get_current_deployment) fi if [[ -z "$to_commit" ]]; then to_commit=$(get_pending_deployment) if [[ -z "$to_commit" ]]; then log_error "No target commit specified and no pending deployment" "apt-layer" return 1 fi fi # Show package-level diff show_package_diff "$from_commit" "$to_commit" } # rpm-ostree db list compatibility rpm_ostree_db_list() { log_info "rpm-ostree db list compatibility" "apt-layer" # List all deployments list_deployments } # rpm-ostree db diff compatibility rpm_ostree_db_diff() { local from_commit="${1:-}" local to_commit="${2:-}" log_info "rpm-ostree db diff compatibility: $from_commit -> $to_commit" "apt-layer" # If no commits specified, compare current to pending if [[ -z "$from_commit" ]]; then from_commit=$(get_current_deployment) fi if [[ -z "$to_commit" ]]; then to_commit=$(get_pending_deployment) if [[ -z "$to_commit" ]]; then log_error "No target commit specified and no pending deployment" "apt-layer" return 1 fi fi # Show detailed package diff show_detailed_package_diff "$from_commit" "$to_commit" } # rpm-ostree cleanup compatibility rpm_ostree_cleanup() { local purge="${1:-}" log_info "rpm-ostree cleanup compatibility: purge=$purge" "apt-layer" # Clean up old deployments cleanup_old_deployments # Clean up old ComposeFS images cleanup_old_composefs_images if [[ "$purge" == "--purge" ]]; then # Also clean up old bootloader entries cleanup_old_bootloader_entries fi log_success "rpm-ostree cleanup completed successfully" "apt-layer" } # rpm-ostree cancel compatibility rpm_ostree_cancel() { log_info "rpm-ostree cancel compatibility" "apt-layer" # Clear pending deployment clear_pending_deployment # Clean up live overlay stop_live_overlay log_success "rpm-ostree cancel completed successfully" "apt-layer" } # rpm-ostree initramfs compatibility rpm_ostree_initramfs() { local action="${1:-}" log_info "rpm-ostree initramfs compatibility: $action" "apt-layer" case "$action" in --enable) enable_initramfs_rebuild ;; --disable) disable_initramfs_rebuild ;; --rebuild) rebuild_initramfs ;; *) log_error "Invalid initramfs action: $action" "apt-layer" return 1 ;; esac } # rpm-ostree kargs compatibility rpm_ostree_kargs() { local action="${1:-}" shift log_info "rpm-ostree kargs compatibility: $action" "apt-layer" case "$action" in --get) get_kernel_args ;; --set) set_kernel_args "$@" ;; --append) append_kernel_args "$@" ;; --delete) delete_kernel_args "$@" ;; --reset) reset_kernel_args ;; *) log_error "Invalid kargs action: $action" "apt-layer" return 1 ;; esac } # rpm-ostree usroverlay compatibility rpm_ostree_usroverlay() { local action="${1:-}" log_info "rpm-ostree usroverlay compatibility: $action" "apt-layer" case "$action" in --mount) mount_usr_overlay ;; --unmount) unmount_usr_overlay ;; --status) usr_overlay_status ;; *) log_error "Invalid usroverlay action: $action" "apt-layer" return 1 ;; esac } # rpm-ostree composefs compatibility rpm_ostree_composefs() { local action="${1:-}" shift log_info "rpm-ostree composefs compatibility: $action" "apt-layer" case "$action" in --mount) composefs_mount "$@" ;; --unmount) composefs_unmount "$@" ;; --list) composefs_list_images ;; --info) composefs_image_info "$@" ;; *) log_error "Invalid composefs action: $action" "apt-layer" return 1 ;; esac } # Helper functions for rpm-ostree compatibility # Get previous deployment get_previous_deployment() { local current_deployment current_deployment=$(get_current_deployment) if [[ -n "$current_deployment" ]]; then local parent_commit parent_commit=$(jq -r ".deployments[\"$current_deployment\"].parent_commit" "$DEPLOYMENT_DB" 2>/dev/null || echo "") echo "$parent_commit" fi } # Show package diff between commits show_package_diff() { local from_commit="$1" local to_commit="$2" log_info "Showing package diff: $from_commit -> $to_commit" "apt-layer" # Get package lists from both commits local from_packages=() local to_packages=() if [[ -n "$from_commit" ]]; then from_packages=($(get_packages_from_commit "$from_commit")) fi if [[ -n "$to_commit" ]]; then to_packages=($(get_packages_from_commit "$to_commit")) fi # Calculate differences local added_packages=() local removed_packages=() local updated_packages=() # Find added packages for pkg in "${to_packages[@]}"; do if [[ ! " ${from_packages[*]} " =~ " ${pkg} " ]]; then added_packages+=("$pkg") fi done # Find removed packages for pkg in "${from_packages[@]}"; do if [[ ! " ${to_packages[*]} " =~ " ${pkg} " ]]; then removed_packages+=("$pkg") fi done # Show results if [[ ${#added_packages[@]} -gt 0 ]]; then echo "Added packages:" printf " %s\n" "${added_packages[@]}" fi if [[ ${#removed_packages[@]} -gt 0 ]]; then echo "Removed packages:" printf " %s\n" "${removed_packages[@]}" fi if [[ ${#added_packages[@]} -eq 0 ]] && [[ ${#removed_packages[@]} -eq 0 ]]; then echo "No package changes detected" fi } # Get packages from commit get_packages_from_commit() { local commit_id="$1" local composefs_image # Get ComposeFS image name composefs_image=$(jq -r ".deployments[\"$commit_id\"].composefs_image" "$DEPLOYMENT_DB" 2>/dev/null || echo "") if [[ -z "$composefs_image" ]]; then return 1 fi # Mount and extract package list local temp_mount="/tmp/apt-layer-commit-$$" mkdir -p "$temp_mount" if composefs_mount "$composefs_image" "$temp_mount"; then # Extract package list chroot "$temp_mount" dpkg -l | grep '^ii' | awk '{print $2}' 2>/dev/null || true # Cleanup composefs_unmount "$temp_mount" rmdir "$temp_mount" fi } # Cleanup functions cleanup_old_deployments() { log_info "Cleaning up old deployments..." "apt-layer" # Keep last 5 deployments local deployments deployments=($(jq -r '.deployments | keys[]' "$DEPLOYMENT_DB" 2>/dev/null | sort -r | tail -n +6)) for commit_id in "${deployments[@]}"; do log_info "Removing old deployment: $commit_id" "apt-layer" # Remove from database jq --arg commit_id "$commit_id" 'del(.deployments[$commit_id])' \ "$DEPLOYMENT_DB" > "${DEPLOYMENT_DB}.tmp" && mv "${DEPLOYMENT_DB}.tmp" "$DEPLOYMENT_DB" # Remove history file rm -f "$DEPLOYMENT_HISTORY_DIR/$commit_id.json" # Remove deployment directory rm -rf "/var/lib/particle-os/deployments/$commit_id" done } cleanup_old_composefs_images() { log_info "Cleaning up old ComposeFS images..." "apt-layer" # Get list of images still referenced by deployments local referenced_images referenced_images=($(jq -r '.deployments[].composefs_image' "$DEPLOYMENT_DB" 2>/dev/null || true)) # Get all ComposeFS images local all_images all_images=($(composefs_list_images)) # Remove unreferenced images for image in "${all_images[@]}"; do if [[ ! " ${referenced_images[*]} " =~ " ${image} " ]]; then log_info "Removing unreferenced image: $image" "apt-layer" composefs_remove_image "$image" fi done } cleanup_old_bootloader_entries() { log_info "Cleaning up old bootloader entries..." "apt-layer" # Get current and pending deployments local current_deployment current_deployment=$(get_current_deployment) local pending_deployment pending_deployment=$(get_pending_deployment) # Remove old bootloader entries local boot_dir="/boot/loader/entries" for entry in "$boot_dir"/apt-layer-*.conf; do if [[ -f "$entry" ]]; then local commit_id commit_id=$(basename "$entry" .conf | sed 's/apt-layer-//') # Keep current and pending deployments if [[ "$commit_id" != "$current_deployment" ]] && [[ "$commit_id" != "$pending_deployment" ]]; then log_info "Removing old bootloader entry: $entry" "apt-layer" rm -f "$entry" fi fi done } # --- END OF SCRIPTLET: 10-rpm-ostree-compat.sh --- # ============================================================================ # OSTree Atomic Package Management # ============================================================================ # OSTree Atomic Package Management - Implementation for apt-layer ostree_compose_install() { local packages=("$@") # Validate input if [[ ${#packages[@]} -eq 0 ]]; then log_error "No packages specified for installation" "apt-layer" log_info "Usage: apt-layer ostree compose install [...]" "apt-layer" return 1 fi log_info "[OSTree] Installing packages and creating atomic commit: ${packages[*]}" "apt-layer" # Check for root privileges if [[ $EUID -ne 0 ]]; then log_error "Root privileges required for OSTree compose install" "apt-layer" return 1 fi # Initialize workspace if needed if ! init_workspace; then log_error "Failed to initialize workspace" "apt-layer" return 1 fi # Start live overlay if not active if ! is_live_overlay_active; then log_info "[OSTree] Starting live overlay for package installation" "apt-layer" if ! start_live_overlay; then log_error "Failed to start live overlay" "apt-layer" return 1 fi fi # Determine if .deb files or package names local has_deb_files=false for pkg in "${packages[@]}"; do if [[ "$pkg" == *.deb ]] || [[ "$pkg" == */*.deb ]]; then has_deb_files=true break fi done # Install packages in live overlay log_info "[OSTree] Installing packages in live overlay" "apt-layer" if [[ "$has_deb_files" == "true" ]]; then log_info "[OSTree] Detected .deb files, using live_dpkg_install" "apt-layer" if ! live_dpkg_install "${packages[@]}"; then log_error "Failed to install .deb packages in overlay" "apt-layer" return 1 fi else log_info "[OSTree] Detected package names, using live_install" "apt-layer" if ! live_install "${packages[@]}"; then log_error "Failed to install packages in overlay" "apt-layer" return 1 fi fi # Create OSTree-style commit local commit_message="Install packages: ${packages[*]}" local commit_id="ostree-$(date +%Y%m%d-%H%M%S)-$$" log_info "[OSTree] Creating atomic commit: $commit_id" "apt-layer" # Create simple commit metadata (avoid complex JSON escaping) local packages_json="[" for i in "${!packages[@]}"; do if [[ $i -gt 0 ]]; then packages_json+="," fi packages_json+="\"${packages[$i]}\"" done packages_json+="]" local commit_data commit_data=$(cat << EOF { "commit_id": "$commit_id", "type": "ostree_compose", "action": "install", "packages": $packages_json, "parent_commit": "$(get_current_deployment)", "commit_message": "Install packages: $(IFS=' '; echo "${packages[*]}")", "created": "$(date -u +%Y-%m-%dT%H:%M:%SZ)", "composefs_image": "${commit_id}.composefs" } EOF ) # Save commit metadata (for log/history) local commit_log_dir="/var/lib/particle-os/ostree-commits" mkdir -p "$commit_log_dir" echo "$commit_data" > "$commit_log_dir/$commit_id.json" # Commit live overlay changes as new layer log_info "[OSTree] Committing overlay changes as OSTree layer" "apt-layer" if ! commit_live_overlay "$commit_message"; then log_error "Failed to commit overlay changes" "apt-layer" return 1 fi # Get the created layer name (from commit_live_overlay) local layer_name="live-overlay-commit-$(date +%Y%m%d_%H%M%S)" # Create OSTree deployment commit log_info "[OSTree] Creating deployment commit with layer: $layer_name" "apt-layer" local deployment_commit_id deployment_commit_id=$(create_deployment_commit "ostree-base" "$layer_name") # Set as pending deployment (atomic) set_pending_deployment "$deployment_commit_id" log_success "[OSTree] Atomic commit created successfully: $deployment_commit_id" "apt-layer" log_info "[OSTree] Commit includes packages: ${packages[*]}" "apt-layer" log_info "[OSTree] Reboot to activate the new deployment" "apt-layer" return 0 } ostree_compose_remove() { local packages=("$@") # Validate input if [[ ${#packages[@]} -eq 0 ]]; then log_error "No packages specified for removal" "apt-layer" log_info "Usage: apt-layer ostree compose remove [...]" "apt-layer" return 1 fi log_info "[OSTree] Removing packages and creating atomic commit: ${packages[*]}" "apt-layer" # Check for root privileges if [[ $EUID -ne 0 ]]; then log_error "Root privileges required for OSTree compose remove" "apt-layer" return 1 fi # Initialize workspace if needed if ! init_workspace; then log_error "Failed to initialize workspace" "apt-layer" return 1 fi # Start live overlay if not active if ! is_live_overlay_active; then log_info "[OSTree] Starting live overlay for package removal" "apt-layer" if ! start_live_overlay; then log_error "Failed to start live overlay" "apt-layer" return 1 fi fi # Remove packages in live overlay log_info "[OSTree] Removing packages in live overlay" "apt-layer" if ! live_remove "${packages[@]}"; then log_error "Failed to remove packages in overlay" "apt-layer" return 1 fi # Create OSTree-style commit local commit_message="Remove packages: ${packages[*]}" local commit_id="ostree-$(date +%Y%m%d-%H%M%S)-$$" log_info "[OSTree] Creating atomic commit: $commit_id" "apt-layer" # Create simple commit metadata local packages_json="[" for i in "${!packages[@]}"; do if [[ $i -gt 0 ]]; then packages_json+="," fi packages_json+="\"${packages[$i]}\"" done packages_json+="]" local commit_data commit_data=$(cat << EOF { "commit_id": "$commit_id", "type": "ostree_compose", "action": "remove", "packages": $packages_json, "parent_commit": "$(get_current_deployment)", "commit_message": "Remove packages: $(IFS=' '; echo "${packages[*]}")", "created": "$(date -u +%Y-%m-%dT%H:%M:%SZ)", "composefs_image": "${commit_id}.composefs" } EOF ) # Save commit metadata (for log/history) local commit_log_dir="/var/lib/particle-os/ostree-commits" mkdir -p "$commit_log_dir" echo "$commit_data" > "$commit_log_dir/$commit_id.json" # Commit live overlay changes as new layer log_info "[OSTree] Committing overlay changes as OSTree layer" "apt-layer" if ! commit_live_overlay "$commit_message"; then log_error "Failed to commit overlay changes" "apt-layer" return 1 fi # Get the created layer name (from commit_live_overlay) local layer_name="live-overlay-commit-$(date +%Y%m%d_%H%M%S)" # Create OSTree deployment commit log_info "[OSTree] Creating deployment commit with layer: $layer_name" "apt-layer" local deployment_commit_id deployment_commit_id=$(create_deployment_commit "ostree-base" "$layer_name") # Set as pending deployment (atomic) set_pending_deployment "$deployment_commit_id" log_success "[OSTree] Atomic commit created successfully: $deployment_commit_id" "apt-layer" log_info "[OSTree] Commit includes removed packages: ${packages[*]}" "apt-layer" log_info "[OSTree] Reboot to activate the new deployment" "apt-layer" return 0 } ostree_compose_update() { local packages=("$@") # Validate input if [[ ${#packages[@]} -eq 0 ]]; then log_error "No packages specified for update" "apt-layer" log_info "Usage: apt-layer ostree compose update [package1] [...]" "apt-layer" log_info "Note: If no packages specified, updates all packages" "apt-layer" return 1 fi log_info "[OSTree] Updating packages and creating atomic commit: ${packages[*]}" "apt-layer" # Check for root privileges if [[ $EUID -ne 0 ]]; then log_error "Root privileges required for OSTree compose update" "apt-layer" return 1 fi # Initialize workspace if needed if ! init_workspace; then log_error "Failed to initialize workspace" "apt-layer" return 1 fi # Start live overlay if not active if ! is_live_overlay_active; then log_info "[OSTree] Starting live overlay for package update" "apt-layer" if ! start_live_overlay; then log_error "Failed to start live overlay" "apt-layer" return 1 fi fi # Update packages in live overlay log_info "[OSTree] Updating packages in live overlay" "apt-layer" if ! live_update "${packages[@]}"; then log_error "Failed to update packages in overlay" "apt-layer" return 1 fi # Create OSTree-style commit local commit_message="Update packages: ${packages[*]}" local commit_id="ostree-$(date +%Y%m%d-%H%M%S)-$$" log_info "[OSTree] Creating atomic commit: $commit_id" "apt-layer" # Create simple commit metadata local packages_json="[" for i in "${!packages[@]}"; do if [[ $i -gt 0 ]]; then packages_json+="," fi packages_json+="\"${packages[$i]}\"" done packages_json+="]" local commit_data commit_data=$(cat << EOF { "commit_id": "$commit_id", "type": "ostree_compose", "action": "update", "packages": $packages_json, "parent_commit": "$(get_current_deployment)", "commit_message": "Update packages: $(IFS=' '; echo "${packages[*]}")", "created": "$(date -u +%Y-%m-%dT%H:%M:%SZ)", "composefs_image": "${commit_id}.composefs" } EOF ) # Save commit metadata (for log/history) local commit_log_dir="/var/lib/particle-os/ostree-commits" mkdir -p "$commit_log_dir" echo "$commit_data" > "$commit_log_dir/$commit_id.json" # Commit live overlay changes as new layer log_info "[OSTree] Committing overlay changes as OSTree layer" "apt-layer" if ! commit_live_overlay "$commit_message"; then log_error "Failed to commit overlay changes" "apt-layer" return 1 fi # Get the created layer name (from commit_live_overlay) local layer_name="live-overlay-commit-$(date +%Y%m%d_%H%M%S)" # Create OSTree deployment commit log_info "[OSTree] Creating deployment commit with layer: $layer_name" "apt-layer" local deployment_commit_id deployment_commit_id=$(create_deployment_commit "ostree-base" "$layer_name") # Set as pending deployment (atomic) set_pending_deployment "$deployment_commit_id" log_success "[OSTree] Atomic commit created successfully: $deployment_commit_id" "apt-layer" log_info "[OSTree] Commit includes updated packages: ${packages[*]}" "apt-layer" log_info "[OSTree] Reboot to activate the new deployment" "apt-layer" return 0 } ostree_log() { local format="${1:-full}" local limit="${2:-10}" log_info "[OSTree] Showing commit log (format: $format, limit: $limit)" "apt-layer" if [[ ! -f "$DEPLOYMENT_DB" ]]; then log_error "No deployment database found" "apt-layer" return 1 fi case "$format" in "full"|"detailed") echo "=== OSTree Commit Log ===" jq -r --arg limit "$limit" ' .deployments | to_entries | sort_by(.value.created) | reverse | .[0:($limit | tonumber)] | .[] | "Commit: " + .key + "\n" + "Message: " + (.value.commit_message // "unknown") + "\n" + "Type: " + (.value.type // "unknown") + "\n" + "Action: " + (.value.action // "unknown") + "\n" + "Created: " + (.value.created // "unknown") + "\n" + "Base: " + (.value.base_image // "unknown") + "\n" + "Layers: " + (.value.layers | join(", ") // "none") + "\n" + "Packages: " + (.value.packages | join(", ") // "none") + "\n" + "---" ' "$DEPLOYMENT_DB" 2>/dev/null || echo "No commits found" ;; "short"|"compact") echo "=== OSTree Commit Log (Compact) ===" jq -r --arg limit "$limit" ' .deployments | to_entries | sort_by(.value.created) | reverse | .[0:($limit | tonumber)] | .[] | "\(.key) - \(.value.commit_message // "unknown") (\(.value.created // "unknown"))" ' "$DEPLOYMENT_DB" 2>/dev/null || echo "No commits found" ;; "json") echo "=== OSTree Commit Log (JSON) ===" jq -r --arg limit "$limit" ' .deployments | to_entries | sort_by(.value.created) | reverse | .[0:($limit | tonumber)] | map({commit_id: .key, details: .value}) ' "$DEPLOYMENT_DB" 2>/dev/null || echo "[]" ;; *) log_error "Invalid format: $format. Use: full, short, or json" "apt-layer" return 1 ;; esac } ostree_diff() { local commit1="${1:-}" local commit2="${2:-}" log_info "[OSTree] Showing diff between commits" "apt-layer" if [[ ! -f "$DEPLOYMENT_DB" ]]; then log_error "No deployment database found" "apt-layer" return 1 fi # If no commits specified, show diff between current and previous if [[ -z "$commit1" ]]; then local current_deployment current_deployment=$(get_current_deployment) if [[ -z "$current_deployment" ]]; then log_error "No current deployment found" "apt-layer" return 1 fi # Get the commit before current commit1=$(jq -r --arg current "$current_deployment" ' .deployments | to_entries | sort_by(.value.created) | map(.key) | index($current) as $idx | if $idx > 0 then .[$idx - 1] else null end ' "$DEPLOYMENT_DB" 2>/dev/null) if [[ -z "$commit1" || "$commit1" == "null" ]]; then log_error "No previous commit found" "apt-layer" return 1 fi commit2="$current_deployment" log_info "[OSTree] Comparing $commit1 -> $commit2" "apt-layer" elif [[ -z "$commit2" ]]; then # If only one commit specified, compare with current local current_deployment current_deployment=$(get_current_deployment) if [[ -z "$current_deployment" ]]; then log_error "No current deployment found" "apt-layer" return 1 fi commit2="$current_deployment" fi # Validate commits exist if ! jq -e ".deployments[\"$commit1\"]" "$DEPLOYMENT_DB" >/dev/null 2>&1; then log_error "Commit not found: $commit1" "apt-layer" return 1 fi if ! jq -e ".deployments[\"$commit2\"]" "$DEPLOYMENT_DB" >/dev/null 2>&1; then log_error "Commit not found: $commit2" "apt-layer" return 1 fi # Get commit data local commit1_data commit1_data=$(jq -r ".deployments[\"$commit1\"]" "$DEPLOYMENT_DB") local commit2_data commit2_data=$(jq -r ".deployments[\"$commit2\"]" "$DEPLOYMENT_DB") echo "=== OSTree Diff: $commit1 -> $commit2 ===" echo "" # Compare commit messages local msg1 msg1=$(echo "$commit1_data" | jq -r '.commit_message // "unknown"') local msg2 msg2=$(echo "$commit2_data" | jq -r '.commit_message // "unknown"') echo "Commit Messages:" echo " $commit1: $msg1" echo " $commit2: $msg2" echo "" # Compare creation times local time1 time1=$(echo "$commit1_data" | jq -r '.created // "unknown"') local time2 time2=$(echo "$commit2_data" | jq -r '.created // "unknown"') echo "Creation Times:" echo " $commit1: $time1" echo " $commit2: $time2" echo "" # Compare layers local layers1 layers1=$(echo "$commit1_data" | jq -r '.layers | join(", ") // "none"') local layers2 layers2=$(echo "$commit2_data" | jq -r '.layers | join(", ") // "none"') echo "Layers:" echo " $commit1: $layers1" echo " $commit2: $layers2" echo "" # Compare packages (if available) local packages1 packages1=$(echo "$commit1_data" | jq -r '.packages | join(", ") // "none"' 2>/dev/null || echo "none") local packages2 packages2=$(echo "$commit2_data" | jq -r '.packages | join(", ") // "none"' 2>/dev/null || echo "none") echo "Packages:" echo " $commit1: $packages1" echo " $commit2: $packages2" echo "" # Show action type local action1 action1=$(echo "$commit1_data" | jq -r '.action // "unknown"') local action2 action2=$(echo "$commit2_data" | jq -r '.action // "unknown"') echo "Actions:" echo " $commit1: $action1" echo " $commit2: $action2" echo "" # Calculate time difference if [[ "$time1" != "unknown" && "$time2" != "unknown" ]]; then local time_diff time_diff=$(($(date -d "$time2" +%s) - $(date -d "$time1" +%s))) echo "Time Difference: $time_diff seconds" echo "" fi return 0 } ostree_rollback() { local target_commit="${1:-}" log_info "[OSTree] Rolling back deployment" "apt-layer" # Check for root privileges if [[ $EUID -ne 0 ]]; then log_error "Root privileges required for OSTree rollback" "apt-layer" return 1 fi # Get current deployment local current_deployment current_deployment=$(get_current_deployment) if [[ -z "$current_deployment" ]]; then log_error "No current deployment found" "apt-layer" return 1 fi # If no target specified, rollback to previous commit if [[ -z "$target_commit" ]]; then log_info "[OSTree] No target specified, rolling back to previous commit" "apt-layer" # Get the commit before current target_commit=$(jq -r --arg current "$current_deployment" ' .deployments | to_entries | sort_by(.value.created) | map(.key) | index($current) as $idx | if $idx > 0 then .[$idx - 1] else null end ' "$DEPLOYMENT_DB" 2>/dev/null) if [[ -z "$target_commit" || "$target_commit" == "null" ]]; then log_error "No previous commit found to rollback to" "apt-layer" return 1 fi log_info "[OSTree] Rolling back to: $target_commit" "apt-layer" fi # Validate target commit exists if ! jq -e ".deployments[\"$target_commit\"]" "$DEPLOYMENT_DB" >/dev/null 2>&1; then log_error "Target commit not found: $target_commit" "apt-layer" return 1 fi # Create rollback commit local rollback_id="rollback-$(date +%Y%m%d-%H%M%S)-$$" local rollback_message="Rollback from $current_deployment to $target_commit" log_info "[OSTree] Creating rollback commit: $rollback_id" "apt-layer" # Get target commit data local target_data target_data=$(jq -r ".deployments[\"$target_commit\"]" "$DEPLOYMENT_DB") local base_image base_image=$(echo "$target_data" | jq -r '.base_image') local layers layers=$(echo "$target_data" | jq -r '.layers | join(" ")') # Create rollback deployment commit local rollback_commit_id rollback_commit_id=$(create_deployment_commit "$base_image" $layers) # Set as pending deployment set_pending_deployment "$rollback_commit_id" log_success "[OSTree] Rollback prepared successfully" "apt-layer" log_info "[OSTree] Rollback from: $current_deployment" "apt-layer" log_info "[OSTree] Rollback to: $target_commit" "apt-layer" log_info "[OSTree] New deployment: $rollback_commit_id" "apt-layer" log_info "[OSTree] Reboot to activate rollback" "apt-layer" return 0 } ostree_status() { log_info "[OSTree] Showing current deployment status" "apt-layer" # Get current and pending deployments local current_deployment current_deployment=$(get_current_deployment) local pending_deployment pending_deployment=$(get_pending_deployment 2>/dev/null | tail -n1) echo "=== OSTree Deployment Status ===" echo "Current Deployment: ${current_deployment:-none}" echo "Pending Deployment: ${pending_deployment:-none}" echo "" # Show recent commits (last 5) echo "=== Recent Commits ===" if [[ -f "$DEPLOYMENT_DB" ]]; then jq -r '.deployments | to_entries | sort_by(.value.created) | reverse | .[0:5] | .[] | "\(.key) - \(.value.commit_message) (\(.value.created))"' "$DEPLOYMENT_DB" 2>/dev/null || echo "No commits found" else echo "No deployment database found" fi echo "" # Show layer information for current deployment if [[ -n "$current_deployment" ]]; then echo "=== Current Deployment Details ===" local commit_data commit_data=$(jq -r ".deployments[\"$current_deployment\"]" "$DEPLOYMENT_DB" 2>/dev/null) if [[ -n "$commit_data" ]]; then echo "Base Image: $(echo "$commit_data" | jq -r '.base_image // "unknown"')" echo "Layers: $(echo "$commit_data" | jq -r '.layers | join(", ") // "none"')" echo "Created: $(echo "$commit_data" | jq -r '.created // "unknown"')" fi fi echo "" # Show available layers echo "=== Available Layers ===" if [[ -d "/var/lib/particle-os/build" ]]; then find /var/lib/particle-os/build -name "*.squashfs" -type f | head -10 | while read -r layer; do local size size=$(du -h "$layer" | cut -f1) local name name=$(basename "$layer") echo "$name ($size)" done else echo "No layers found" fi } ostree_cleanup() { local keep_count="${1:-5}" local dry_run="${2:-false}" log_info "[OSTree] Cleaning up old commits (keeping $keep_count)" "apt-layer" # Check for root privileges if [[ $EUID -ne 0 ]]; then log_error "Root privileges required for OSTree cleanup" "apt-layer" return 1 fi if [[ ! -f "$DEPLOYMENT_DB" ]]; then log_error "No deployment database found" "apt-layer" return 1 fi # Get current and pending deployments (never delete these) local current_deployment current_deployment=$(get_current_deployment) local pending_deployment pending_deployment=$(get_pending_deployment) # Get commits to keep (most recent + current + pending) local keep_commits keep_commits=$(jq -r --arg keep "$keep_count" --arg current "$current_deployment" --arg pending "$pending_deployment" ' .deployments | to_entries | sort_by(.value.created) | reverse | .[0:($keep | tonumber)] | map(.key) + if $current != "" then [$current] else [] end + if $pending != "" and $pending != $current then [$pending] else [] end | unique | join(" ") ' "$DEPLOYMENT_DB" 2>/dev/null) # Get all commits local all_commits all_commits=$(jq -r '.deployments | keys | join(" ")' "$DEPLOYMENT_DB" 2>/dev/null) # Find commits to delete local to_delete=() for commit in $all_commits; do if [[ ! " $keep_commits " =~ " $commit " ]]; then to_delete+=("$commit") fi done if [[ ${#to_delete[@]} -eq 0 ]]; then log_info "[OSTree] No commits to clean up" "apt-layer" return 0 fi echo "=== OSTree Cleanup Summary ===" echo "Keeping commits: $keep_commits" echo "Commits to delete: ${to_delete[*]}" echo "Total to delete: ${#to_delete[@]}" echo "" if [[ "$dry_run" == "true" ]]; then log_info "[OSTree] Dry run - no changes made" "apt-layer" return 0 fi # Confirm deletion echo "Are you sure you want to delete these commits? (y/N)" read -r response if [[ ! "$response" =~ ^[Yy]$ ]]; then log_info "[OSTree] Cleanup cancelled" "apt-layer" return 0 fi # Delete commits local deleted_count=0 for commit in "${to_delete[@]}"; do log_info "[OSTree] Deleting commit: $commit" "apt-layer" # Remove from database jq --arg commit "$commit" 'del(.deployments[$commit])' "$DEPLOYMENT_DB" > "${DEPLOYMENT_DB}.tmp" && mv "${DEPLOYMENT_DB}.tmp" "$DEPLOYMENT_DB" # Remove history file rm -f "$DEPLOYMENT_HISTORY_DIR/$commit.json" # Remove associated layers (if not used by other commits) local commit_data commit_data=$(jq -r ".deployments[\"$commit\"]" "$DEPLOYMENT_DB" 2>/dev/null) if [[ -n "$commit_data" ]]; then local layers layers=$(echo "$commit_data" | jq -r '.layers[]?' 2>/dev/null) for layer in $layers; do # Check if layer is used by other commits local layer_used layer_used=$(jq -r --arg layer "$layer" ' .deployments | to_entries | map(select(.value.layers | contains([$layer]))) | length ' "$DEPLOYMENT_DB" 2>/dev/null) if [[ "$layer_used" == "0" ]]; then log_info "[OSTree] Removing unused layer: $layer" "apt-layer" rm -f "/var/lib/particle-os/build/$layer.squashfs" fi done fi ((deleted_count++)) done log_success "[OSTree] Cleanup completed: $deleted_count commits deleted" "apt-layer" return 0 } # Enhanced OSTree Atomic Workflow for apt-layer # Provides sophisticated atomic package management similar to rpm-ostree # OSTree rebase to new base image ostree_rebase() { local new_base="$1" local deployment_name="${2:-current}" log_info "OSTree rebase to: $new_base" "apt-layer" # Validate new base if ! validate_base_image "$new_base"; then log_error "Invalid base image: $new_base" "apt-layer" return 1 fi # Start transaction start_transaction "ostree-rebase-$deployment_name" # Create new deployment from base local new_deployment="$deployment_name-$(date +%Y%m%d-%H%M%S)" if [[ "$new_base" =~ ^oci:// ]]; then # Rebase to OCI image local image_name="${new_base#oci://}" if ! ostree_rebase_to_oci "$image_name" "$new_deployment"; then rollback_transaction return 1 fi else # Rebase to local ComposeFS image if ! ostree_rebase_to_composefs "$new_base" "$new_deployment"; then rollback_transaction return 1 fi fi # Deploy the new base if ! ostree_deploy "$new_deployment"; then rollback_transaction return 1 fi commit_transaction log_success "OSTree rebase completed: $new_deployment" "apt-layer" return 0 } # OSTree layer packages on current deployment ostree_layer() { local packages=("$@") local deployment_name="${OSTREE_CURRENT_DEPLOYMENT:-current}" log_info "OSTree layer packages: ${packages[*]}" "apt-layer" if [[ ${#packages[@]} -eq 0 ]]; then log_error "No packages specified for layering" "apt-layer" return 1 fi # Start transaction start_transaction "ostree-layer-$deployment_name" # Create new deployment with layered packages local new_deployment="$deployment_name-layered-$(date +%Y%m%d-%H%M%S)" if ! ostree_create_layered_deployment "$deployment_name" "$new_deployment" "${packages[@]}"; then rollback_transaction return 1 fi # Deploy the layered deployment if ! ostree_deploy "$new_deployment"; then rollback_transaction return 1 fi commit_transaction log_success "OSTree layer completed: $new_deployment" "apt-layer" return 0 } # OSTree override package in deployment ostree_override() { local package_name="$1" local override_path="$2" local deployment_name="${OSTREE_CURRENT_DEPLOYMENT:-current}" log_info "OSTree override package: $package_name with $override_path" "apt-layer" if [[ -z "$package_name" ]] || [[ -z "$override_path" ]]; then log_error "Package name and override path required" "apt-layer" return 1 fi if [[ ! -f "$override_path" ]]; then log_error "Override package not found: $override_path" "apt-layer" return 1 fi # Start transaction start_transaction "ostree-override-$deployment_name" # Create new deployment with package override local new_deployment="$deployment_name-override-$(date +%Y%m%d-%H%M%S)" if ! ostree_create_override_deployment "$deployment_name" "$new_deployment" "$package_name" "$override_path"; then rollback_transaction return 1 fi # Deploy the override deployment if ! ostree_deploy "$new_deployment"; then rollback_transaction return 1 fi commit_transaction log_success "OSTree override completed: $new_deployment" "apt-layer" return 0 } # OSTree deploy deployment ostree_deploy() { local deployment_name="$1" log_info "OSTree deploy: $deployment_name" "apt-layer" if [[ -z "$deployment_name" ]]; then log_error "Deployment name required" "apt-layer" return 1 fi # Validate deployment exists if ! ostree_deployment_exists "$deployment_name"; then log_error "Deployment not found: $deployment_name" "apt-layer" return 1 fi # Perform atomic deployment if ! atomic_deploy_deployment "$deployment_name"; then log_error "Failed to deploy: $deployment_name" "apt-layer" return 1 fi # Update current deployment reference OSTREE_CURRENT_DEPLOYMENT="$deployment_name" log_success "OSTree deploy completed: $deployment_name" "apt-layer" return 0 } # OSTree compose tree (declarative image building) ostree_compose_tree() { local config_file="$1" log_info "OSTree compose tree from: $config_file" "apt-layer" if [[ -z "$config_file" ]] || [[ ! -f "$config_file" ]]; then log_error "Valid configuration file required" "apt-layer" return 1 fi # Parse configuration if ! parse_compose_config "$config_file"; then log_error "Failed to parse configuration: $config_file" "apt-layer" return 1 fi # Start transaction start_transaction "ostree-compose-tree" # Build tree from configuration if ! build_tree_from_config; then rollback_transaction return 1 fi commit_transaction log_success "OSTree compose tree completed" "apt-layer" return 0 } # Helper functions for OSTree operations # Rebase to OCI image ostree_rebase_to_oci() { local image_name="$1" local deployment_name="$2" log_debug "Rebasing to OCI image: $image_name" "apt-layer" # Import OCI image as ComposeFS local composefs_image="$WORKSPACE/images/$deployment_name" if ! import_oci_image "$image_name" "$composefs_image"; then log_error "Failed to import OCI image: $image_name" "apt-layer" return 1 fi # Create deployment from ComposeFS image if ! create_deployment_from_composefs "$composefs_image" "$deployment_name"; then log_error "Failed to create deployment from ComposeFS" "apt-layer" return 1 fi return 0 } # Rebase to ComposeFS image ostree_rebase_to_composefs() { local base_image="$1" local deployment_name="$2" log_debug "Rebasing to ComposeFS image: $base_image" "apt-layer" # Validate base image exists if ! composefs_image_exists "$base_image"; then log_error "Base image not found: $base_image" "apt-layer" return 1 fi # Create deployment from base image if ! create_deployment_from_composefs "$base_image" "$deployment_name"; then log_error "Failed to create deployment from base image" "apt-layer" return 1 fi return 0 } # Create layered deployment ostree_create_layered_deployment() { local base_deployment="$1" local new_deployment="$2" shift 2 local packages=("$@") log_debug "Creating layered deployment: $base_deployment -> $new_deployment" "apt-layer" # Get base deployment path local base_path base_path=$(get_deployment_path "$base_deployment") if [[ -z "$base_path" ]]; then log_error "Base deployment not found: $base_deployment" "apt-layer" return 1 fi # Create new deployment with layered packages if ! create_layer_on_deployment "$base_path" "$new_deployment" "${packages[@]}"; then log_error "Failed to create layered deployment" "apt-layer" return 1 fi return 0 } # Create override deployment ostree_create_override_deployment() { local base_deployment="$1" local new_deployment="$2" local package_name="$3" local override_path="$4" log_debug "Creating override deployment: $base_deployment -> $new_deployment" "apt-layer" # Get base deployment path local base_path base_path=$(get_deployment_path "$base_deployment") if [[ -z "$base_path" ]]; then log_error "Base deployment not found: $base_deployment" "apt-layer" return 1 fi # Create new deployment with package override if ! create_override_on_deployment "$base_path" "$new_deployment" "$package_name" "$override_path"; then log_error "Failed to create override deployment" "apt-layer" return 1 fi return 0 } # Parse compose configuration parse_compose_config() { local config_file="$1" log_debug "Parsing compose configuration: $config_file" "apt-layer" # Load configuration using jq if ! command -v jq &> /dev/null; then log_error "jq required for configuration parsing" "apt-layer" return 1 fi # Parse configuration structure COMPOSE_CONFIG=$(jq -r '.' "$config_file") if [[ $? -ne 0 ]]; then log_error "Failed to parse configuration file" "apt-layer" return 1 fi # Extract configuration values COMPOSE_BASE_IMAGE=$(echo "$COMPOSE_CONFIG" | jq -r '.base-image // empty') COMPOSE_LAYERS=$(echo "$COMPOSE_CONFIG" | jq -r '.layers[]? // empty') COMPOSE_OVERRIDES=$(echo "$COMPOSE_CONFIG" | jq -r '.overrides[]? // empty') log_debug "Configuration parsed: base=$COMPOSE_BASE_IMAGE, layers=${#COMPOSE_LAYERS[@]}, overrides=${#COMPOSE_OVERRIDES[@]}" "apt-layer" return 0 } # Build tree from configuration build_tree_from_config() { log_debug "Building tree from configuration" "apt-layer" # Start with base image if [[ -n "$COMPOSE_BASE_IMAGE" ]]; then if ! ostree_rebase_to_oci "$COMPOSE_BASE_IMAGE" "compose-base"; then log_error "Failed to create base from configuration" "apt-layer" return 1 fi fi # Add layers if [[ -n "$COMPOSE_LAYERS" ]]; then local layer_packages=() while IFS= read -r package; do if [[ -n "$package" ]]; then layer_packages+=("$package") fi done <<< "$COMPOSE_LAYERS" if [[ ${#layer_packages[@]} -gt 0 ]]; then if ! ostree_layer "${layer_packages[@]}"; then log_error "Failed to add layers from configuration" "apt-layer" return 1 fi fi fi # Apply overrides if [[ -n "$COMPOSE_OVERRIDES" ]]; then while IFS= read -r override; do if [[ -n "$override" ]]; then local package_name local override_path package_name=$(echo "$override" | jq -r '.package // empty') override_path=$(echo "$override" | jq -r '.with // empty') if [[ -n "$package_name" ]] && [[ -n "$override_path" ]]; then if ! ostree_override "$package_name" "$override_path"; then log_error "Failed to apply override: $package_name" "apt-layer" return 1 fi fi fi done <<< "$COMPOSE_OVERRIDES" fi return 0 } # Enhanced package management with metadata handling # Layer package with metadata preservation ostree_layer_with_metadata() { local package="$1" local deployment_name="${OSTREE_CURRENT_DEPLOYMENT:-current}" local preserve_metadata="${2:-true}" local resolve_conflicts="${3:-keep-latest}" log_info "OSTree layer with metadata: $package" "apt-layer" # Start transaction start_transaction "ostree-layer-metadata-$deployment_name" # Create new deployment with metadata handling local new_deployment="$deployment_name-metadata-$(date +%Y%m%d-%H%M%S)" if ! ostree_create_metadata_aware_deployment "$deployment_name" "$new_deployment" "$package" "$preserve_metadata" "$resolve_conflicts"; then rollback_transaction return 1 fi # Deploy the new deployment if ! ostree_deploy "$new_deployment"; then rollback_transaction return 1 fi commit_transaction log_success "OSTree layer with metadata completed: $new_deployment" "apt-layer" return 0 } # Multi-arch aware layering ostree_layer_multiarch() { local package="$1" local arch="${2:-amd64}" local multiarch_type="${3:-same}" local deployment_name="${OSTREE_CURRENT_DEPLOYMENT:-current}" log_info "OSTree layer multi-arch: $package ($arch, $multiarch_type)" "apt-layer" # Validate multi-arch parameters case "$multiarch_type" in same|foreign|allowed) ;; *) log_error "Invalid multi-arch type: $multiarch_type" "apt-layer" return 1 ;; esac # Start transaction start_transaction "ostree-layer-multiarch-$deployment_name" # Create new deployment with multi-arch support local new_deployment="$deployment_name-multiarch-$(date +%Y%m%d-%H%M%S)" if ! ostree_create_multiarch_deployment "$deployment_name" "$new_deployment" "$package" "$arch" "$multiarch_type"; then rollback_transaction return 1 fi # Deploy the new deployment if ! ostree_deploy "$new_deployment"; then rollback_transaction return 1 fi commit_transaction log_success "OSTree layer multi-arch completed: $new_deployment" "apt-layer" return 0 } # Maintainer script handling ostree_layer_with_script_validation() { local package="$1" local script_context="${2:-offline}" local deployment_name="${OSTREE_CURRENT_DEPLOYMENT:-current}" log_info "OSTree layer with script validation: $package ($script_context)" "apt-layer" # Validate maintainer scripts if ! validate_maintainer_scripts "$package" "$script_context"; then log_error "Maintainer script validation failed for: $package" "apt-layer" return 1 fi # Start transaction start_transaction "ostree-layer-scripts-$deployment_name" # Create new deployment with script handling local new_deployment="$deployment_name-scripts-$(date +%Y%m%d-%H%M%S)" if ! ostree_create_script_aware_deployment "$deployment_name" "$new_deployment" "$package" "$script_context"; then rollback_transaction return 1 fi # Deploy the new deployment if ! ostree_deploy "$new_deployment"; then rollback_transaction return 1 fi commit_transaction log_success "OSTree layer with script validation completed: $new_deployment" "apt-layer" return 0 } # Validate maintainer scripts validate_maintainer_scripts() { local package="$1" local script_context="$2" log_debug "Validating maintainer scripts for: $package ($script_context)" "apt-layer" # Extract package and examine maintainer scripts local temp_dir temp_dir=$(mktemp -d) # Download package if ! apt-get download "$package" -o Dir::Cache="$temp_dir"; then log_error "Failed to download package for script validation: $package" "apt-layer" rm -rf "$temp_dir" return 1 fi # Extract control information local deb_file deb_file=$(find "$temp_dir" -name "*.deb" | head -1) if [[ -z "$deb_file" ]]; then log_error "No .deb file found for script validation" "apt-layer" rm -rf "$temp_dir" return 1 fi # Extract control scripts local control_dir="$temp_dir/control" mkdir -p "$control_dir" if ! dpkg-deb -e "$deb_file" "$control_dir"; then log_error "Failed to extract control information" "apt-layer" rm -rf "$temp_dir" return 1 fi # Check for problematic scripts local problematic_scripts=() # Check for service management scripts if [[ -f "$control_dir/postinst" ]] && grep -q "systemctl" "$control_dir/postinst"; then problematic_scripts+=("postinst:systemctl") fi # Check for user interaction scripts if [[ -f "$control_dir/postinst" ]] && grep -q "debconf" "$control_dir/postinst"; then problematic_scripts+=("postinst:debconf") fi # Check for live system state dependencies if [[ -f "$control_dir/postinst" ]] && grep -q "/proc\|/sys" "$control_dir/postinst"; then problematic_scripts+=("postinst:live-state") fi # Report problematic scripts if [[ ${#problematic_scripts[@]} -gt 0 ]]; then log_warning "Problematic maintainer scripts detected in $package:" "apt-layer" for script in "${problematic_scripts[@]}"; do log_warning " - $script" "apt-layer" done if [[ "$script_context" == "strict" ]]; then log_error "Script validation failed in strict mode" "apt-layer" rm -rf "$temp_dir" return 1 fi fi # Cleanup rm -rf "$temp_dir" log_debug "Maintainer script validation passed for: $package" "apt-layer" return 0 } # --- END OF SCRIPTLET: 15-ostree-atomic.sh --- # ============================================================================ # Daemon Integration (apt-ostree.py) # ============================================================================ # ============================================================================ # Daemon Integration (apt-ostree.py) # ============================================================================ # Integration with apt-ostree.py daemon for atomic operations # Provides D-Bus client functionality for apt-layer.sh # D-Bus service and interface names APT_OSTREE_DBUS_SERVICE="org.debian.aptostree1" APT_OSTREE_DBUS_PATH="/org/debian/aptostree1/Sysroot" APT_OSTREE_DBUS_INTERFACE="org.debian.aptostree1.Sysroot" # Daemon executable path APT_OSTREE_DAEMON_PATH="/usr/local/bin/apt-ostree" APT_OSTREE_DAEMON_SERVICE="apt-ostree.service" # Check if daemon is available and running check_daemon_status() { local status="unknown" # Check if daemon executable exists if [[ ! -f "$APT_OSTREE_DAEMON_PATH" ]]; then status="not_installed" echo "$status" return fi # Check if systemd service is running if command -v systemctl >/dev/null 2>&1; then if systemctl is-active --quiet "$APT_OSTREE_DAEMON_SERVICE" 2>/dev/null; then status="running" elif systemctl is-enabled --quiet "$APT_OSTREE_DAEMON_SERVICE" 2>/dev/null; then status="enabled" else status="disabled" fi else # Fallback: check if daemon process is running if pgrep -f "apt-ostree.py" >/dev/null 2>&1; then status="running" else status="stopped" fi fi echo "$status" } # Start the daemon if not running start_daemon() { local status=$(check_daemon_status) case "$status" in "not_installed") log_error "apt-ostree daemon not installed" "apt-layer" log_info "Install the daemon first: sudo $APT_OSTREE_DAEMON_PATH --install" "apt-layer" return 1 ;; "running") log_info "Daemon is already running" "apt-layer" return 0 ;; "enabled"|"disabled") if command -v systemctl >/dev/null 2>&1; then log_info "Starting daemon via systemctl..." "apt-layer" if systemctl start "$APT_OSTREE_DAEMON_SERVICE"; then log_success "Daemon started successfully" "apt-layer" return 0 else log_error "Failed to start daemon via systemctl" "apt-layer" return 1 fi else log_warning "systemctl not available, attempting direct start" "apt-layer" nohup "$APT_OSTREE_DAEMON_PATH" >/dev/null 2>&1 & if [ $? -eq 0 ]; then log_success "Daemon started in background" "apt-layer" return 0 else log_error "Failed to start daemon directly" "apt-layer" return 1 fi fi ;; "stopped") log_info "Starting daemon..." "apt-layer" nohup "$APT_OSTREE_DAEMON_PATH" >/dev/null 2>&1 & if [ $? -eq 0 ]; then log_success "Daemon started in background" "apt-layer" return 0 else log_error "Failed to start daemon" "apt-layer" return 1 fi ;; *) log_error "Unknown daemon status: $status" "apt-layer" return 1 ;; esac } # Stop the daemon stop_daemon() { local status=$(check_daemon_status) case "$status" in "running") if command -v systemctl >/dev/null 2>&1; then log_info "Stopping daemon via systemctl..." "apt-layer" systemctl stop "$APT_OSTREE_DAEMON_SERVICE" else log_info "Stopping daemon process..." "apt-layer" pkill -f "apt-ostree.py" fi log_success "Daemon stopped" "apt-layer" ;; "stopped"|"disabled") log_info "Daemon is not running" "apt-layer" ;; "not_installed") log_warning "Daemon not installed" "apt-layer" ;; *) log_warning "Unknown daemon status: $status" "apt-layer" ;; esac } # Check if D-Bus is available check_dbus_available() { if ! command -v dbus-send >/dev/null 2>&1; then log_error "D-Bus client not available" "apt-layer" return 1 fi if ! dbus-send --system --dest=org.freedesktop.DBus --type=method_call --print-reply /org/freedesktop/DBus org.freedesktop.DBus.ListNames >/dev/null 2>&1; then log_error "D-Bus system bus not accessible" "apt-layer" return 1 fi return 0 } # Call D-Bus method call_dbus_method() { local method="$1" local args="${2:-}" local timeout="${3:-5000}" local object_path="${4:-$APT_OSTREE_DBUS_PATH/Sysroot}" local interface="${5:-$APT_OSTREE_DBUS_INTERFACE}" if ! check_dbus_available; then return 1 fi # Ensure daemon is running if ! start_daemon; then return 1 fi # Wait for D-Bus service to be available if ! wait_for_dbus_service 30; then log_error "D-Bus service not available" "apt-layer" return 1 fi # Call the D-Bus method local dbus_cmd="dbus-send --system --dest=$APT_OSTREE_DBUS_SERVICE --type=method_call --print-reply --reply-timeout=$timeout $object_path $interface.$method" if [[ -n "$args" ]]; then dbus_cmd="$dbus_cmd $args" fi log_debug "Calling D-Bus method: $method" "apt-layer" if eval "$dbus_cmd" 2>/dev/null; then return 0 else log_error "D-Bus method call failed: $method" "apt-layer" return 1 fi } # Call D-Bus method with timeout call_dbus_method_timeout() { local method="$1" local args="${2:-}" local timeout="${3:-30}" local object_path="${4:-$APT_OSTREE_DBUS_PATH/Sysroot}" local interface="${5:-$APT_OSTREE_DBUS_INTERFACE}" if ! check_dbus_available; then return 1 fi # Ensure daemon is running if ! start_daemon; then return 1 fi # Wait for D-Bus service to be available if ! wait_for_dbus_service "$timeout"; then log_error "D-Bus service not available within $timeout seconds" "apt-layer" return 1 fi # Call the D-Bus method with timeout local dbus_cmd="timeout $timeout dbus-send --system --dest=$APT_OSTREE_DBUS_SERVICE --type=method_call --print-reply $object_path $interface.$method" if [[ -n "$args" ]]; then dbus_cmd="$dbus_cmd $args" fi log_debug "Calling D-Bus method with timeout: $method" "apt-layer" if eval "$dbus_cmd" 2>/dev/null; then return 0 else log_error "D-Bus method call failed (timeout): $method" "apt-layer" return 1 fi } # Check if D-Bus service is available check_dbus_service() { if ! dbus-send --system --dest=org.freedesktop.DBus \ --type=method_call --print-reply /org/freedesktop/DBus \ org.freedesktop.DBus.NameHasOwner "string:$APT_OSTREE_DBUS_SERVICE" 2>/dev/null | grep -q "true"; then return 1 fi return 0 } # Wait for D-Bus service to be available wait_for_dbus_service() { local timeout="${1:-30}" local count=0 log_info "Waiting for D-Bus service to be available..." "apt-layer" while [[ $count -lt $timeout ]]; do if check_dbus_service; then log_info "D-Bus service is available" "apt-layer" return 0 fi sleep 1 ((count++)) done log_error "D-Bus service not available within $timeout seconds" "apt-layer" return 1 } # Get daemon status via D-Bus get_daemon_status() { call_dbus_method "GetStatus" } # Register client with daemon register_client() { local client_id="${1:-apt-layer}" local options="dict:string:id,$client_id" call_dbus_method "RegisterClient" "$options" } # Unregister client from daemon unregister_client() { call_dbus_method "UnregisterClient" "dict:" } # Get OS deployments via D-Bus get_os_deployments() { call_dbus_method "GetOS" } # Start a transaction via daemon start_daemon_transaction() { local operation="$1" local description="$2" local packages="${3:-}" # Register as client first register_client "apt-layer-$$" # Start transaction (this would need to be implemented in the daemon) # For now, we'll use a placeholder log_transaction "Starting daemon transaction: $operation - $description" "apt-layer" # Store transaction info for cleanup echo "$$:$operation:$description" > "$TRANSACTION_STATE" } # Commit a transaction via daemon commit_daemon_transaction() { local transaction_id="${1:-}" if [[ -n "$transaction_id" ]]; then log_transaction "Committing daemon transaction: $transaction_id" "apt-layer" # This would call the daemon's commit method fi # Unregister client unregister_client # Clear transaction state rm -f "$TRANSACTION_STATE" } # Rollback a transaction via daemon rollback_daemon_transaction() { local transaction_id="${1:-}" if [[ -n "$transaction_id" ]]; then log_transaction "Rolling back daemon transaction: $transaction_id" "apt-layer" # This would call the daemon's rollback method fi # Unregister client unregister_client # Clear transaction state rm -f "$TRANSACTION_STATE" } # Layer packages via daemon daemon_layer_packages() { local packages=("$@") local operation="layer" local description="Layer packages: ${packages[*]}" log_transaction "Starting daemon layer operation" "apt-layer" # Start transaction if ! start_daemon_transaction "$operation" "$description" "${packages[*]}"; then log_error "Failed to start daemon transaction" "apt-layer" return 1 fi # Perform the layer operation # This would call the daemon's PkgChange method local packages_str=$(printf "%s " "${packages[@]}") local args="array:string:$packages_str array:string: dict:" if call_dbus_method "PkgChange" "$args"; then log_success "Daemon layer operation completed" "apt-layer" commit_daemon_transaction return 0 else log_error "Daemon layer operation failed" "apt-layer" rollback_daemon_transaction return 1 fi } # Deploy via daemon daemon_deploy() { local deployment_name="$1" local revision="${2:-}" log_transaction "Starting daemon deploy operation" "apt-layer" # Start transaction if ! start_daemon_transaction "deploy" "Deploy $deployment_name"; then log_error "Failed to start daemon transaction" "apt-layer" return 1 fi # Perform the deploy operation local args="string:$revision dict:" if call_dbus_method "Deploy" "$args"; then log_success "Daemon deploy operation completed" "apt-layer" commit_daemon_transaction return 0 else log_error "Daemon deploy operation failed" "apt-layer" rollback_daemon_transaction return 1 fi } # Upgrade via daemon daemon_upgrade() { log_transaction "Starting daemon upgrade operation" "apt-layer" # Start transaction if ! start_daemon_transaction "upgrade" "System upgrade"; then log_error "Failed to start daemon transaction" "apt-layer" return 1 fi # Perform the upgrade operation if call_dbus_method "Upgrade" "dict:"; then log_success "Daemon upgrade operation completed" "apt-layer" commit_daemon_transaction return 0 else log_error "Daemon upgrade operation failed" "apt-layer" rollback_daemon_transaction return 1 fi } # Rollback via daemon daemon_rollback() { log_transaction "Starting daemon rollback operation" "apt-layer" # Start transaction if ! start_daemon_transaction "rollback" "System rollback"; then log_error "Failed to start daemon transaction" "apt-layer" return 1 fi # Perform the rollback operation if call_dbus_method "Rollback" "dict:"; then log_success "Daemon rollback operation completed" "apt-layer" commit_daemon_transaction return 0 else log_error "Daemon rollback operation failed" "apt-layer" rollback_daemon_transaction return 1 fi } # Show daemon status show_daemon_status() { local status=$(check_daemon_status) echo "apt-ostree Daemon Status:" echo " Status: $status" echo " Executable: $APT_OSTREE_DAEMON_PATH" echo " Service: $APT_OSTREE_DAEMON_SERVICE" echo " D-Bus Service: $APT_OSTREE_DBUS_SERVICE" echo " D-Bus Path: $APT_OSTREE_DBUS_PATH" if [[ "$status" == "running" ]]; then echo "" echo "D-Bus Status:" if get_daemon_status; then echo " D-Bus communication: OK" else echo " D-Bus communication: FAILED" fi echo "" echo "OS Deployments:" if get_os_deployments; then echo " Deployment list: OK" else echo " Deployment list: FAILED" fi fi } # Install daemon install_daemon() { log_info "Installing apt-ostree daemon..." "apt-layer" # Check if Python daemon directory exists # Try multiple possible paths for daemon source local daemon_dir="" local possible_paths=( "$(dirname "$0")/../apt-ostree.py/python" "$(dirname "$0")/../../src/apt-ostree.py/python" "./src/apt-ostree.py/python" "../src/apt-ostree.py/python" ) for path in "${possible_paths[@]}"; do if [[ -d "$path" ]]; then daemon_dir="$path" break fi done if [[ -z "$daemon_dir" ]]; then log_error "Daemon source not found. Tried paths:" "apt-layer" for path in "${possible_paths[@]}"; do log_error " - $path" "apt-layer" done return 1 fi log_info "Found daemon source at: $daemon_dir" "apt-layer" # Run the daemon install script if [[ -f "$daemon_dir/install.py" ]]; then if python3 "$daemon_dir/install.py"; then log_success "Daemon installed successfully" "apt-layer" return 0 else log_error "Daemon installation failed" "apt-layer" return 1 fi else log_error "Daemon install script not found" "apt-layer" return 1 fi } # Uninstall daemon uninstall_daemon() { log_info "Uninstalling apt-ostree daemon..." "apt-layer" # Stop daemon first stop_daemon # Remove systemd service if command -v systemctl >/dev/null 2>&1; then if systemctl is-enabled --quiet "$APT_OSTREE_DAEMON_SERVICE" 2>/dev/null; then systemctl disable "$APT_OSTREE_DAEMON_SERVICE" fi if [[ -f "/etc/systemd/system/$APT_OSTREE_DAEMON_SERVICE" ]]; then rm -f "/etc/systemd/system/$APT_OSTREE_DAEMON_SERVICE" systemctl daemon-reload fi fi # Remove daemon executable if [[ -f "$APT_OSTREE_DAEMON_PATH" ]]; then rm -f "$APT_OSTREE_DAEMON_PATH" fi # Remove Python package if command -v pip3 >/dev/null 2>&1; then pip3 uninstall -y apt-ostree 2>/dev/null || true fi log_success "Daemon uninstalled" "apt-layer" } # Test daemon functionality test_daemon() { log_info "Testing apt-ostree daemon..." "apt-layer" # Check daemon status local status=$(check_daemon_status) if [[ "$status" != "running" ]]; then log_error "Daemon is not running (status: $status)" "apt-layer" return 1 fi # Test D-Bus communication if ! get_daemon_status; then log_error "D-Bus communication test failed" "apt-layer" return 1 fi # Test client registration if ! register_client "test-client"; then log_error "Client registration test failed" "apt-layer" return 1 fi # Test client unregistration if ! unregister_client; then log_error "Client unregistration test failed" "apt-layer" return 1 fi log_success "All daemon tests passed" "apt-layer" return 0 } # Stress test daemon stress_test_daemon() { log_info "Running daemon stress test..." "apt-layer" local num_clients=10 local num_operations=50 local success_count=0 local failure_count=0 # Start daemon if not running if ! start_daemon; then log_error "Failed to start daemon for stress test" "apt-layer" return 1 fi # Register multiple clients for i in $(seq 1 $num_clients); do if register_client "stress-client-$i"; then ((success_count++)) else ((failure_count++)) fi done # Perform multiple operations for i in $(seq 1 $num_operations); do if get_daemon_status >/dev/null 2>&1; then ((success_count++)) else ((failure_count++)) fi done # Unregister all clients for i in $(seq 1 $num_clients); do unregister_client >/dev/null 2>&1 done log_info "Stress test results: $success_count successful, $failure_count failed" "apt-layer" if [[ $failure_count -eq 0 ]]; then log_success "Stress test passed" "apt-layer" return 0 else log_warning "Stress test had $failure_count failures" "apt-layer" return 1 fi } # Test daemon error recovery test_error_recovery() { log_info "Testing daemon error recovery..." "apt-layer" # Start daemon if ! start_daemon; then log_error "Failed to start daemon for error recovery test" "apt-layer" return 1 fi # Test invalid D-Bus calls log_info "Testing invalid method calls..." if ! dbus-send --system --dest="$APT_OSTREE_DBUS_SERVICE" \ --type=method_call --print-reply "$APT_OSTREE_DBUS_PATH/Sysroot" \ "org.debian.aptostree1.Sysroot.NonExistentMethod" 2>/dev/null; then log_success "Invalid method properly rejected" "apt-layer" else log_warning "Invalid method should have been rejected" "apt-layer" fi # Test daemon restart log_info "Testing daemon restart..." if stop_daemon && start_daemon; then log_success "Daemon restart successful" "apt-layer" else log_error "Daemon restart failed" "apt-layer" return 1 fi # Test D-Bus service recovery log_info "Testing D-Bus service recovery..." if wait_for_dbus_service 30; then log_success "D-Bus service recovery successful" "apt-layer" else log_error "D-Bus service recovery failed" "apt-layer" return 1 fi log_success "Error recovery test passed" "apt-layer" return 0 } # Test daemon performance test_daemon_performance() { log_info "Testing daemon performance..." "apt-layer" # Start daemon if ! start_daemon; then log_error "Failed to start daemon for performance test" "apt-layer" return 1 fi # Test response time for status calls local start_time=$(date +%s%N) local num_calls=100 local success_count=0 for i in $(seq 1 $num_calls); do if get_daemon_status >/dev/null 2>&1; then ((success_count++)) fi done local end_time=$(date +%s%N) local duration=$(( (end_time - start_time) / 1000000 )) # Convert to milliseconds local avg_time=$(( duration / num_calls )) log_info "Performance test: $success_count/$num_calls successful calls" "apt-layer" log_info "Total time: ${duration}ms, Average: ${avg_time}ms per call" "apt-layer" if [[ $success_count -eq $num_calls ]]; then log_success "Performance test passed" "apt-layer" return 0 else log_warning "Performance test had $((num_calls - success_count)) failures" "apt-layer" return 1 fi } # Comprehensive daemon test suite run_comprehensive_test() { log_info "Running comprehensive daemon test suite..." "apt-layer" local tests_passed=0 local tests_failed=0 # Basic functionality test if test_daemon; then ((tests_passed++)) else ((tests_failed++)) fi # Stress test if stress_test_daemon; then ((tests_passed++)) else ((tests_failed++)) fi # Error recovery test if test_error_recovery; then ((tests_passed++)) else ((tests_failed++)) fi # Performance test if test_daemon_performance; then ((tests_passed++)) else ((tests_failed++)) fi log_info "Comprehensive test results: $tests_passed passed, $tests_failed failed" "apt-layer" if [[ $tests_failed -eq 0 ]]; then log_success "All comprehensive tests passed" "apt-layer" return 0 else log_error "$tests_failed comprehensive tests failed" "apt-layer" return 1 fi } # Run daemon in foreground (for testing/debugging) run_daemon() { log_info "Starting apt-ostree daemon in foreground..." "apt-layer" # Check if daemon executable exists if [[ ! -f "$APT_OSTREE_DAEMON_PATH" ]]; then log_error "Daemon executable not found: $APT_OSTREE_DAEMON_PATH" "apt-layer" log_info "Install the daemon first: sudo $0 daemon install" "apt-layer" return 1 fi # Run daemon in foreground log_info "Running daemon: $APT_OSTREE_DAEMON_PATH" "apt-layer" exec "$APT_OSTREE_DAEMON_PATH" } # --- END OF SCRIPTLET: 20-daemon-integration.sh --- # ============================================================================ # Direct dpkg Installation (Performance Optimization) # ============================================================================ # Direct dpkg installation for Particle-OS apt-layer Tool # Provides faster, more controlled package installation using dpkg directly # Enhanced DPKG Direct Install with Deep Metadata Extraction # Provides deep integration with dpkg for offline, atomic package management # This is fundamental for achieving rpm-ostree parity # Deep dpkg metadata extraction extract_dpkg_metadata() { local deb_file="$1" local extract_dir="$2" log_debug "Extracting dpkg metadata from: $deb_file" "apt-layer" if [[ ! -f "$deb_file" ]]; then log_error "Debian package not found: $deb_file" "apt-layer" return 1 fi # Create extraction directory mkdir -p "$extract_dir" # Extract control information local control_dir="$extract_dir/control" mkdir -p "$control_dir" if ! dpkg-deb -e "$deb_file" "$control_dir"; then log_error "Failed to extract control information from: $deb_file" "apt-layer" return 1 fi # Extract data archive local data_dir="$extract_dir/data" mkdir -p "$data_dir" if ! dpkg-deb -x "$deb_file" "$data_dir"; then log_error "Failed to extract data from: $deb_file" "apt-layer" return 1 fi # Extract file list with metadata local file_list="$extract_dir/file-list" if ! dpkg-deb -c "$deb_file" > "$file_list"; then log_error "Failed to extract file list from: $deb_file" "apt-layer" return 1 fi log_success "DPKG metadata extraction completed: $deb_file" "apt-layer" return 0 } # Parse dpkg control file parse_dpkg_control() { local control_file="$1" local -n control_data="$2" log_debug "Parsing dpkg control file: $control_file" "apt-layer" if [[ ! -f "$control_file" ]]; then log_error "Control file not found: $control_file" "apt-layer" return 1 fi # Initialize control data structure declare -gA control_data control_data=() # Parse control file line by line while IFS= read -r line; do # Skip empty lines and comments [[ -z "$line" || "$line" =~ ^[[:space:]]*# ]] && continue # Parse field: value format if [[ "$line" =~ ^([A-Za-z][A-Za-z0-9-]*):[[:space:]]*(.*)$ ]]; then local field="${BASH_REMATCH[1]}" local value="${BASH_REMATCH[2]}" # Handle multi-line fields if [[ "$field" == "Description" ]]; then # Read description until next field or end local description="$value" while IFS= read -r desc_line; do if [[ "$desc_line" =~ ^[A-Za-z][A-Za-z0-9-]*: ]]; then # This is the next field, put it back break fi description+="\n$desc_line" done control_data["$field"]="$description" else control_data["$field"]="$value" fi fi done < "$control_file" log_debug "Parsed control fields: ${!control_data[@]}" "apt-layer" return 0 } # Parse dpkg file list with metadata parse_dpkg_file_list() { local file_list="$1" local -n file_data="$2" log_debug "Parsing dpkg file list: $file_list" "apt-layer" if [[ ! -f "$file_list" ]]; then log_error "File list not found: $file_list" "apt-layer" return 1 fi # Initialize file data structure declare -gA file_data file_data=() # Parse dpkg -c output format # Format: drwxr-xr-x user/group size date path while IFS= read -r line; do if [[ "$line" =~ ^([d-][rwx-]{9})[[:space:]]+([^/]+)/([^[:space:]]+)[[:space:]]+([0-9]+)[[:space:]]+([^[:space:]]+[[:space:]]+[^[:space:]]+)[[:space:]]+(.+)$ ]]; then local permissions="${BASH_REMATCH[1]}" local owner="${BASH_REMATCH[2]}" local group="${BASH_REMATCH[3]}" local size="${BASH_REMATCH[4]}" local date="${BASH_REMATCH[5]}" local path="${BASH_REMATCH[6]}" # Store file metadata file_data["$path"]="permissions:$permissions|owner:$owner|group:$group|size:$size" fi done < "$file_list" log_debug "Parsed file metadata for ${#file_data[@]} files" "apt-layer" return 0 } # Analyze package dependencies analyze_package_dependencies() { local control_data="$1" local -n dependency_info="$2" log_debug "Analyzing package dependencies" "apt-layer" # Initialize dependency structure declare -gA dependency_info dependency_info=() # Parse dependency fields local dependency_fields=("Depends" "Pre-Depends" "Recommends" "Suggests" "Conflicts" "Breaks" "Provides" "Replaces" "Enhances") for field in "${dependency_fields[@]}"; do if [[ -n "${control_data[$field]}" ]]; then dependency_info["$field"]="${control_data[$field]}" log_debug "Found $field: ${control_data[$field]}" "apt-layer" fi done return 0 } # Extract package architecture information extract_package_architecture() { local control_data="$1" local -n arch_info="$2" log_debug "Extracting package architecture information" "apt-layer" # Initialize architecture structure declare -gA arch_info arch_info=() # Get basic architecture if [[ -n "${control_data[Architecture]}" ]]; then arch_info["architecture"]="${control_data[Architecture]}" fi # Get multi-arch information if [[ -n "${control_data[Multi-Arch]}" ]]; then arch_info["multi-arch"]="${control_data[Multi-Arch]}" fi # Get package name and version if [[ -n "${control_data[Package]}" ]]; then arch_info["package"]="${control_data[Package]}" fi if [[ -n "${control_data[Version]}" ]]; then arch_info["version"]="${control_data[Version]}" fi log_debug "Architecture info: ${arch_info[*]}" "apt-layer" return 0 } # Analyze maintainer scripts analyze_maintainer_scripts() { local control_dir="$1" local -n script_info="$2" log_debug "Analyzing maintainer scripts in: $control_dir" "apt-layer" # Initialize script structure declare -gA script_info script_info=() # Script types to analyze local script_types=("preinst" "postinst" "prerm" "postrm" "config") for script_type in "${script_types[@]}"; do local script_file="$control_dir/$script_type" if [[ -f "$script_file" ]]; then script_info["$script_type"]="present" # Analyze script content for problematic patterns local problematic_patterns=() # Check for systemctl usage if grep -q "systemctl" "$script_file"; then problematic_patterns+=("systemctl") fi # Check for debconf usage if grep -q "debconf" "$script_file"; then problematic_patterns+=("debconf") fi # Check for live system state dependencies if grep -q "/proc\|/sys" "$script_file"; then problematic_patterns+=("live-state") fi # Check for user interaction if grep -q "read\|select\|dialog" "$script_file"; then problematic_patterns+=("user-interaction") fi # Check for network operations if grep -q "wget\|curl\|apt-get\|apt" "$script_file"; then problematic_patterns+=("network") fi if [[ ${#problematic_patterns[@]} -gt 0 ]]; then script_info["${script_type}_problems"]="${problematic_patterns[*]}" log_warning "Problematic patterns in $script_type: ${problematic_patterns[*]}" "apt-layer" fi fi done return 0 } # Create comprehensive package analysis analyze_package_comprehensive() { local deb_file="$1" local analysis_dir="$2" log_info "Performing comprehensive package analysis: $deb_file" "apt-layer" # Create analysis directory mkdir -p "$analysis_dir" # Extract dpkg metadata if ! extract_dpkg_metadata "$deb_file" "$analysis_dir"; then return 1 fi # Parse control file local -A control_data if ! parse_dpkg_control "$analysis_dir/control/control" control_data; then return 1 fi # Parse file list local -A file_data if ! parse_dpkg_file_list "$analysis_dir/file-list" file_data; then return 1 fi # Analyze dependencies local -A dependency_info if ! analyze_package_dependencies control_data dependency_info; then return 1 fi # Extract architecture information local -A arch_info if ! extract_package_architecture control_data arch_info; then return 1 fi # Analyze maintainer scripts local -A script_info if ! analyze_maintainer_scripts "$analysis_dir/control" script_info; then return 1 fi # Create analysis report local report_file="$analysis_dir/analysis-report.json" create_analysis_report "$report_file" control_data file_data dependency_info arch_info script_info log_success "Comprehensive package analysis completed: $deb_file" "apt-layer" return 0 } # Create analysis report in JSON format create_analysis_report() { local report_file="$1" local -n control_data="$2" local -n file_data="$3" local -n dependency_info="$4" local -n arch_info="$5" local -n script_info="$6" log_debug "Creating analysis report: $report_file" "apt-layer" # Create JSON report structure cat > "$report_file" << EOF { "package_analysis": { "timestamp": "$(date -u +%Y-%m-%dT%H:%M:%SZ)", "package_info": { EOF # Add control data echo " \"control\": {" >> "$report_file" for key in "${!control_data[@]}"; do local value="${control_data[$key]}" # Escape JSON special characters value=$(echo "$value" | sed 's/\\/\\\\/g' | sed 's/"/\\"/g') echo " \"$key\": \"$value\"," >> "$report_file" done echo " }," >> "$report_file" # Add architecture info echo " \"architecture\": {" >> "$report_file" for key in "${!arch_info[@]}"; do local value="${arch_info[$key]}" echo " \"$key\": \"$value\"," >> "$report_file" done echo " }," >> "$report_file" # Add dependency info echo " \"dependencies\": {" >> "$report_file" for key in "${!dependency_info[@]}"; do local value="${dependency_info[$key]}" value=$(echo "$value" | sed 's/\\/\\\\/g' | sed 's/"/\\"/g') echo " \"$key\": \"$value\"," >> "$report_file" done echo " }," >> "$report_file" # Add script analysis echo " \"maintainer_scripts\": {" >> "$report_file" for key in "${!script_info[@]}"; do local value="${script_info[$key]}" echo " \"$key\": \"$value\"," >> "$report_file" done echo " }," >> "$report_file" # Add file count echo " \"file_count\": ${#file_data[@]}" >> "$report_file" echo " }" >> "$report_file" echo " }" >> "$report_file" echo "}" >> "$report_file" log_debug "Analysis report created: $report_file" "apt-layer" return 0 } # Enhanced dpkg direct installation with metadata preservation dpkg_direct_install_with_metadata() { local deb_file="$1" local target_dir="$2" local preserve_metadata="${3:-true}" log_info "DPKG direct installation with metadata: $deb_file" "apt-layer" # Create temporary analysis directory local temp_analysis temp_analysis=$(mktemp -d) # Perform comprehensive package analysis if ! analyze_package_comprehensive "$deb_file" "$temp_analysis"; then log_error "Failed to analyze package: $deb_file" "apt-layer" rm -rf "$temp_analysis" return 1 fi # Extract package data if ! dpkg-deb -x "$deb_file" "$target_dir"; then log_error "Failed to extract package data: $deb_file" "apt-layer" rm -rf "$temp_analysis" return 1 fi # Preserve metadata if requested if [[ "$preserve_metadata" == "true" ]]; then if ! preserve_package_metadata "$temp_analysis" "$target_dir"; then log_warning "Failed to preserve some metadata" "apt-layer" fi fi # Clean up analysis directory rm -rf "$temp_analysis" log_success "DPKG direct installation completed: $deb_file" "apt-layer" return 0 } # Preserve package metadata in target directory preserve_package_metadata() { local analysis_dir="$1" local target_dir="$2" log_debug "Preserving package metadata in: $target_dir" "apt-layer" # Copy analysis report if [[ -f "$analysis_dir/analysis-report.json" ]]; then cp "$analysis_dir/analysis-report.json" "$target_dir/.apt-layer-metadata.json" fi # Copy control information if [[ -d "$analysis_dir/control" ]]; then cp -r "$analysis_dir/control" "$target_dir/.apt-layer-control" fi # Copy file list if [[ -f "$analysis_dir/file-list" ]]; then cp "$analysis_dir/file-list" "$target_dir/.apt-layer-file-list" fi return 0 } # Validate package for apt-layer compatibility validate_package_for_apt_layer() { local deb_file="$1" local validation_mode="${2:-warn}" log_info "Validating package for apt-layer: $deb_file" "apt-layer" # Create temporary analysis directory local temp_analysis temp_analysis=$(mktemp -d) # Perform comprehensive package analysis if ! analyze_package_comprehensive "$deb_file" "$temp_analysis"; then log_error "Failed to analyze package for validation: $deb_file" "apt-layer" rm -rf "$temp_analysis" return 1 fi # Parse control data local -A control_data if ! parse_dpkg_control "$temp_analysis/control/control" control_data; then rm -rf "$temp_analysis" return 1 fi # Parse script analysis local -A script_info if ! analyze_maintainer_scripts "$temp_analysis/control" script_info; then rm -rf "$temp_analysis" return 1 fi # Validation results local validation_issues=() local validation_warnings=() # Check for problematic maintainer scripts for script_type in "${!script_info[@]}"; do if [[ "$script_type" == *"_problems" ]]; then local problems="${script_info[$script_type]}" if [[ "$validation_mode" == "strict" ]]; then validation_issues+=("$script_type: $problems") else validation_warnings+=("$script_type: $problems") fi fi done # Check for architecture compatibility if [[ -n "${control_data[Architecture]}" ]] && [[ "${control_data[Architecture]}" != "all" ]]; then local system_arch system_arch=$(dpkg --print-architecture) if [[ "${control_data[Architecture]}" != "$system_arch" ]]; then validation_warnings+=("Architecture mismatch: ${control_data[Architecture]} vs $system_arch") fi fi # Check for essential packages (might cause issues) if [[ -n "${control_data[Essential]}" ]] && [[ "${control_data[Essential]}" == "yes" ]]; then validation_warnings+=("Essential package: ${control_data[Package]}") fi # Report validation results if [[ ${#validation_issues[@]} -gt 0 ]]; then log_error "Package validation failed:" "apt-layer" for issue in "${validation_issues[@]}"; do log_error " - $issue" "apt-layer" done rm -rf "$temp_analysis" return 1 fi if [[ ${#validation_warnings[@]} -gt 0 ]]; then log_warning "Package validation warnings:" "apt-layer" for warning in "${validation_warnings[@]}"; do log_warning " - $warning" "apt-layer" done fi # Clean up rm -rf "$temp_analysis" log_success "Package validation completed: $deb_file" "apt-layer" return 0 } # Direct dpkg installation function dpkg_direct_install() { local packages=("$@") local chroot_dir="${DPKG_CHROOT_DIR:-}" local download_only="${DPKG_DOWNLOAD_ONLY:-false}" local force_depends="${DPKG_FORCE_DEPENDS:-false}" log_info "Direct dpkg installation: ${packages[*]}" "apt-layer" # Create temporary directory for package downloads local temp_dir temp_dir=$(mktemp -d "${WORKSPACE}/temp/dpkg-install-XXXXXX") # Start transaction start_transaction "dpkg_direct_install" # Download packages update_transaction_phase "downloading_packages" log_info "Downloading packages to: $temp_dir" "apt-layer" local download_cmd="apt-get download" if [[ -n "$chroot_dir" ]]; then download_cmd="chroot '$chroot_dir' apt-get download" fi if ! eval "$download_cmd ${packages[*]}"; then log_error "Failed to download packages" "apt-layer" rollback_transaction rm -rf "$temp_dir" return 1 fi # If download-only mode, return here if [[ "$download_only" == "true" ]]; then log_info "Download-only mode: packages saved to $temp_dir" "apt-layer" commit_transaction return 0 fi # Get list of downloaded .deb files local deb_files=() while IFS= read -r -d '' file; do deb_files+=("$file") done < <(find "$temp_dir" -name "*.deb" -print0) if [[ ${#deb_files[@]} -eq 0 ]]; then log_error "No .deb files found in download directory" "apt-layer" rollback_transaction rm -rf "$temp_dir" return 1 fi log_info "Downloaded ${#deb_files[@]} package files" "apt-layer" # Install packages using dpkg update_transaction_phase "installing_packages" log_info "Installing packages with dpkg..." "apt-layer" local dpkg_cmd="dpkg -i" if [[ -n "$chroot_dir" ]]; then dpkg_cmd="chroot '$chroot_dir' dpkg -i" # Copy .deb files to chroot cp "${deb_files[@]}" "$chroot_dir/tmp/" deb_files=("${deb_files[@]/$temp_dir/$chroot_dir/tmp}") fi # Add force-depends if requested if [[ "$force_depends" == "true" ]]; then dpkg_cmd="$dpkg_cmd --force-depends" fi # Install packages if ! eval "$dpkg_cmd ${deb_files[*]}"; then log_warning "dpkg installation had issues, attempting dependency resolution" "apt-layer" # Try to fix broken dependencies local fix_cmd="apt-get install -f" if [[ -n "$chroot_dir" ]]; then fix_cmd="chroot '$chroot_dir' apt-get install -f" fi if ! eval "$fix_cmd"; then log_error "Failed to resolve dependencies after dpkg installation" "apt-layer" rollback_transaction rm -rf "$temp_dir" return 1 fi fi # Configure packages update_transaction_phase "configuring_packages" log_info "Configuring packages..." "apt-layer" local configure_cmd="dpkg --configure -a" if [[ -n "$chroot_dir" ]]; then configure_cmd="chroot '$chroot_dir' dpkg --configure -a" fi if ! eval "$configure_cmd"; then log_warning "Package configuration had issues" "apt-layer" fi # Clean up rm -rf "$temp_dir" if [[ -n "$chroot_dir" ]]; then rm -f "$chroot_dir"/tmp/*.deb fi commit_transaction log_success "Direct dpkg installation completed: ${packages[*]}" "apt-layer" return 0 } # Container-based dpkg installation container_dpkg_install() { local base_image="$1" local new_image="$2" local packages=("${@:3}") log_info "Container-based dpkg installation: ${packages[*]}" "apt-layer" # Create temporary container name local container_name="apt-layer-dpkg-$(date +%s)-$$" local temp_dir="$WORKSPACE/temp/$container_name" # Ensure temp directory exists mkdir -p "$temp_dir" # Start transaction start_transaction "container-dpkg-install-$container_name" # Use existing container creation function if available, otherwise create base image if command -v create_base_container_image >/dev/null 2>&1; then if ! create_base_container_image "$base_image" "$container_name"; then rollback_transaction return 1 fi else # Fallback: create base image directory log_info "Using fallback container image creation" "apt-layer" if [[ -d "$WORKSPACE/images/$base_image" ]]; then cp -a "$WORKSPACE/images/$base_image" "$temp_dir" else log_error "Base image not found: $base_image" "apt-layer" rollback_transaction return 1 fi fi # Run dpkg installation in container case "$CONTAINER_RUNTIME" in podman) if ! run_podman_dpkg_install "$base_image" "$container_name" "$temp_dir" "${packages[@]}"; then rollback_transaction return 1 fi ;; docker) if ! run_docker_dpkg_install "$base_image" "$container_name" "$temp_dir" "${packages[@]}"; then rollback_transaction return 1 fi ;; *) log_error "Unsupported container runtime: $CONTAINER_RUNTIME" "apt-layer" rollback_transaction return 1 ;; esac # Create ComposeFS layer from container changes if command -v create_composefs_layer >/dev/null 2>&1; then if ! create_composefs_layer "$temp_dir" "$new_image"; then rollback_transaction return 1 fi else # Try real mkcomposefs binary first if command -v mkcomposefs >/dev/null 2>&1; then # Create object store directory (same directory as image) local object_store_dir=$(dirname "$new_image") mkdir -p "$object_store_dir" if ! mkcomposefs "$temp_dir" "$new_image" --digest-store="$object_store_dir"; then log_error "Failed to create ComposeFS layer with mkcomposefs" "apt-layer" rollback_transaction return 1 fi else # Fallback: use composefs-alternative.sh log_info "Using fallback ComposeFS layer creation" "apt-layer" if ! "$COMPOSEFS_SCRIPT" create "$new_image" "$temp_dir"; then log_error "Failed to create ComposeFS layer" "apt-layer" rollback_transaction return 1 fi fi fi # Commit transaction commit_transaction # Cleanup if command -v cleanup_container_artifacts >/dev/null 2>&1; then cleanup_container_artifacts "$container_name" "$temp_dir" else # Fallback cleanup rm -rf "$temp_dir" fi log_success "Container-based dpkg installation completed" "apt-layer" return 0 } # Podman-based dpkg installation run_podman_dpkg_install() { local base_image="$1" local container_name="$2" local temp_dir="$3" shift 3 local packages=("$@") log_info "Running podman-based dpkg installation" "apt-layer" # Create container from base image local container_id if [[ -d "$WORKSPACE/images/$base_image" ]]; then # Use ComposeFS image as base container_id=$(podman create --name "$container_name" \ --mount type=bind,source="$WORKSPACE/images/$base_image",target=/ \ --mount type=bind,source="$temp_dir",target=/output \ ubuntu:24.04 /bin/bash) else # Use standard Ubuntu image container_id=$(podman create --name "$container_name" \ --mount type=bind,source="$temp_dir",target=/output \ ubuntu:24.04 /bin/bash) fi if [[ -z "$container_id" ]]; then log_error "Failed to create podman container" "apt-layer" return 1 fi # Start container and install packages if ! podman start "$container_name"; then log_error "Failed to start podman container" "apt-layer" podman rm "$container_name" 2>/dev/null || true return 1 fi # Download and install packages using dpkg local install_cmd=" apt-get update && apt-get download ${packages[*]} && dpkg -i *.deb && apt-get install -f && dpkg --configure -a && apt-get clean " if ! podman exec "$container_name" /bin/bash -c "$install_cmd"; then log_error "dpkg installation failed in podman container" "apt-layer" podman stop "$container_name" 2>/dev/null || true podman rm "$container_name" 2>/dev/null || true return 1 fi # Export container filesystem if ! podman export "$container_name" | tar -x -C "$temp_dir"; then log_error "Failed to export podman container filesystem" "apt-layer" podman stop "$container_name" 2>/dev/null || true podman rm "$container_name" 2>/dev/null || true return 1 fi # Cleanup container podman stop "$container_name" 2>/dev/null || true podman rm "$container_name" 2>/dev/null || true log_success "Podman-based dpkg installation completed" "apt-layer" return 0 } # Docker-based dpkg installation run_docker_dpkg_install() { local base_image="$1" local container_name="$2" local temp_dir="$3" shift 3 local packages=("$@") log_info "Running docker-based dpkg installation" "apt-layer" # Create container from base image local container_id if [[ -d "$WORKSPACE/images/$base_image" ]]; then # Use ComposeFS image as base container_id=$(docker create --name "$container_name" \ -v "$WORKSPACE/images/$base_image:/" \ -v "$temp_dir:/output" \ ubuntu:24.04 /bin/bash) else # Use standard Ubuntu image container_id=$(docker create --name "$container_name" \ -v "$temp_dir:/output" \ ubuntu:24.04 /bin/bash) fi if [[ -z "$container_id" ]]; then log_error "Failed to create docker container" "apt-layer" return 1 fi # Start container and install packages if ! docker start "$container_name"; then log_error "Failed to start docker container" "apt-layer" docker rm "$container_name" 2>/dev/null || true return 1 fi # Download and install packages using dpkg local install_cmd=" apt-get update && apt-get download ${packages[*]} && dpkg -i *.deb && apt-get install -f && dpkg --configure -a && apt-get clean " if ! docker exec "$container_name" /bin/bash -c "$install_cmd"; then log_error "dpkg installation failed in docker container" "apt-layer" docker stop "$container_name" 2>/dev/null || true docker rm "$container_name" 2>/dev/null || true return 1 fi # Export container filesystem if ! docker export "$container_name" | tar -x -C "$temp_dir"; then log_error "Failed to export docker container filesystem" "apt-layer" docker stop "$container_name" 2>/dev/null || true docker rm "$container_name" 2>/dev/null || true return 1 fi # Cleanup container docker stop "$container_name" 2>/dev/null || true docker rm "$container_name" 2>/dev/null || true log_success "Docker-based dpkg installation completed" "apt-layer" return 0 } # Live overlay dpkg installation live_dpkg_install() { local packages=("$@") log_info "Installing packages in live overlay with dpkg: ${packages[*]}" "apt-layer" # Check if overlay is active if command -v is_live_overlay_active >/dev/null 2>&1; then if ! is_live_overlay_active; then log_error "Live overlay is not active" "apt-layer" log_info "Use '--live-overlay start' to start live overlay first" "apt-layer" return 1 fi else # Fallback: check if overlay variables are set if [[ -z "${LIVE_OVERLAY_MOUNT_POINT:-}" ]]; then log_error "Live overlay system not available" "apt-layer" log_info "Live overlay functionality requires overlayfs support" "apt-layer" return 1 fi fi # Check for root privileges if [[ $EUID -ne 0 ]]; then log_error "Root privileges required for live installation" "apt-layer" return 1 fi # Check if we're dealing with .deb files or package names local has_deb_files=false for package in "${packages[@]}"; do if [[ "$package" == *.deb ]]; then has_deb_files=true break fi done if [[ "$has_deb_files" == "true" ]]; then # Install .deb files directly install_deb_files_in_overlay "${packages[@]}" else # Download and install packages using apt-get + dpkg install_packages_in_overlay "${packages[@]}" fi } # Install .deb files directly in overlay install_deb_files_in_overlay() { local deb_files=("$@") log_info "Installing .deb files directly in overlay: ${deb_files[*]}" "apt-layer" # Create temporary directory in overlay for .deb files local overlay_temp_dir="$LIVE_OVERLAY_MOUNT_POINT/tmp/apt-layer-debs" mkdir -p "$overlay_temp_dir" # Copy .deb files to overlay log_info "Copying .deb files to overlay" "apt-layer" for deb_file in "${deb_files[@]}"; do if [[ -f "$deb_file" ]]; then cp "$deb_file" "$overlay_temp_dir/" else log_warning "File not found: $deb_file" "apt-layer" fi done # Install .deb files with dpkg log_info "Installing .deb files with dpkg in overlay" "apt-layer" local install_cmd=" cd /tmp/apt-layer-debs && dpkg -i *.deb && apt-get install -f && dpkg --configure -a && apt-get clean " if chroot "$LIVE_OVERLAY_MOUNT_POINT" /bin/bash -c "$install_cmd"; then log_success "Packages installed successfully in overlay with dpkg" "apt-layer" # Log installed packages if log file is defined if [[ -n "${LIVE_OVERLAY_PACKAGE_LOG:-}" ]]; then for deb_file in "${deb_files[@]}"; do local package_name=$(basename "$deb_file" .deb) echo "$(date '+%Y-%m-%d %H:%M:%S') - INSTALLED: $package_name (dpkg)" >> "$LIVE_OVERLAY_PACKAGE_LOG" done fi # Clean up temporary directory rm -rf "$overlay_temp_dir" log_info "Changes are applied to overlay and can be committed or rolled back" "apt-layer" return 0 else log_error "Failed to install packages in overlay with dpkg" "apt-layer" # Clean up temporary directory rm -rf "$overlay_temp_dir" return 1 fi } # Install packages by name in overlay (download + install) install_packages_in_overlay() { local packages=("$@") log_info "Downloading and installing packages in overlay: ${packages[*]}" "apt-layer" # Update package lists in overlay log_info "Updating package lists in overlay" "apt-layer" if ! chroot "$LIVE_OVERLAY_MOUNT_POINT" apt-get update; then log_error "Failed to update package lists" "apt-layer" return 1 fi # Download and install packages using dpkg log_info "Installing packages with dpkg in overlay" "apt-layer" local install_cmd=" apt-get download ${packages[*]} && dpkg -i *.deb && apt-get install -f && dpkg --configure -a && apt-get clean " if chroot "$LIVE_OVERLAY_MOUNT_POINT" /bin/bash -c "$install_cmd"; then log_success "Packages installed successfully in overlay with dpkg" "apt-layer" # Log installed packages if log file is defined if [[ -n "${LIVE_OVERLAY_PACKAGE_LOG:-}" ]]; then for package in "${packages[@]}"; do echo "$(date '+%Y-%m-%d %H:%M:%S') - INSTALLED: $package (dpkg)" >> "$LIVE_OVERLAY_PACKAGE_LOG" done fi log_info "Changes are applied to overlay and can be committed or rolled back" "apt-layer" return 0 else log_error "Failed to install packages in overlay with dpkg" "apt-layer" return 1 fi } # Package verification using dpkg verify_package_integrity() { local package="$1" log_info "Verifying package integrity: $package" "apt-layer" # Check if package is installed if ! dpkg -l "$package" >/dev/null 2>&1; then log_error "Package '$package' is not installed" "apt-layer" return 1 fi # Verify package files if ! dpkg -V "$package" >/dev/null 2>&1; then log_warning "Package '$package' has file integrity issues" "apt-layer" return 1 fi # Check package status local status status=$(dpkg -s "$package" 2>/dev/null | grep "^Status:" | cut -d: -f2 | tr -d ' ') if [[ "$status" != "installokinstalled" ]]; then log_warning "Package '$package' has status issues: $status" "apt-layer" return 1 fi log_success "Package '$package' integrity verified" "apt-layer" return 0 } # Batch package verification verify_all_packages() { local packages=("$@") log_info "Verifying integrity of ${#packages[@]} packages" "apt-layer" local failed_packages=() for package in "${packages[@]}"; do if ! verify_package_integrity "$package"; then failed_packages+=("$package") fi done if [[ ${#failed_packages[@]} -gt 0 ]]; then log_warning "Found ${#failed_packages[@]} packages with integrity issues: ${failed_packages[*]}" "apt-layer" return 1 fi log_success "All packages verified successfully" "apt-layer" return 0 } # --- END OF SCRIPTLET: 24-dpkg-direct-install.sh --- # --- END OF SCRIPTLET: 24-dpkg-direct-install.sh --- # ============================================================================ # Main Dispatch and Help # ============================================================================ # Main execution and command dispatch for Particle-OS apt-layer Tool # Show version information show_version() { cat << 'EOF' apt-layer: Version: '2025.1' Git: Particle-OS apt-layer Tool Compiled: 2025-01-27 23:55 UTC Features: - composefs - container - live-overlay - rpm-ostree-compat - atomic-transactions - dpkg-direct-install EOF } # Show concise usage information show_usage() { cat << 'EOF' Usage: apt-layer [OPTION…] COMMAND Builtin Commands: install Overlay additional packages upgrade Perform a system upgrade rebase Switch to a different base rollback Revert to the previously booted deployment status Get the version of the booted system kargs Query or modify kernel arguments cleanup Clear cached/pending data cancel Cancel an active transaction initramfs Enable or disable local initramfs regeneration usroverlay Apply a transient overlayfs to /usr Daemon Management: daemon start Start the apt-ostree daemon daemon stop Stop the apt-ostree daemon daemon status Show daemon status daemon install Install the apt-ostree daemon daemon uninstall Uninstall the apt-ostree daemon daemon test Test daemon functionality daemon stress-test Run daemon stress tests daemon error-recovery Test daemon error recovery daemon performance Test daemon performance daemon comprehensive Run comprehensive test suite daemon layer Layer packages via daemon daemon deploy Deploy via daemon daemon upgrade Upgrade via daemon daemon rollback Rollback via daemon Layer Management: --container Create layer using container isolation --dpkg-install Install packages using direct dpkg --live-install Install packages on live system --live-overlay Manage live system overlayfs --live-commit Commit live overlay changes --live-rollback Rollback live overlay changes Image Management: --list List available images --info Show image information --remove Remove image --oci-export Export as OCI image --oci-import Import OCI image System Management: --init Initialize apt-layer system --reinit Reinitialize apt-layer system (force recreation) --rm-init Remove apt-layer system (cleanup) --reset Reset apt-layer system --status Show apt-layer system status --help-full Show detailed help --examples Show usage examples Help Options: -h, --help Show help options Application Options: --version Print version information and exit -q, --quiet Avoid printing most informational messages Examples: apt-layer ubuntu-base/24.04 gaming/24.04 steam wine apt-layer --container ubuntu-base/24.04 dev/24.04 vscode git apt-layer --live-install firefox apt-layer install steam wine apt-layer status EOF } # Show full detailed usage information show_full_usage() { cat << 'EOF' apt-layer Tool - Enhanced with Container Support and LIVE SYSTEM LAYERING Like rpm-ostree + Vanilla OS Apx for Ubuntu/Debian, now ComposeFS-based BASIC LAYER CREATION: apt-layer base-image new-image [packages...] # Add a new layer to an existing ComposeFS image (build or user) apt-layer --container base-image new-image [packages...] # Create layer using container isolation (like Apx) apt-layer --dpkg-install packages # Install packages using direct dpkg (faster, more controlled) apt-layer --container-dpkg base-image new-image [packages...] # Create layer using container isolation with dpkg (optimized) LIVE SYSTEM LAYERING: apt-layer --live-install packages # Install packages on live system with overlayfs (like rpm-ostree install) apt-layer --live-dpkg packages # Install packages on live system using dpkg (optimized for overlays, offline, WSL) apt-layer --live-overlay action [options] # Manage live system overlayfs # Actions: start, stop, status, commit, rollback apt-layer --live-commit [message] # Commit current live overlay changes as new ComposeFS layer apt-layer --live-rollback # Rollback live overlay changes rpm-ostree COMPATIBILITY: apt-layer install packages # Install packages (rpm-ostree install compatibility) apt-layer upgrade # Upgrade system (rpm-ostree upgrade compatibility) apt-layer rebase new-base # Rebase to new base (rpm-ostree rebase compatibility) apt-layer rollback [commit] # Rollback to previous deployment (rpm-ostree rollback compatibility) apt-layer status # Show deployment status (rpm-ostree status compatibility) apt-layer diff [from] [to] # Show package differences (rpm-ostree diff compatibility) apt-layer db list # List deployments (rpm-ostree db list compatibility) apt-layer db diff [from] [to] # Show detailed differences (rpm-ostree db diff compatibility) apt-layer cleanup [--purge] # Clean up old deployments (rpm-ostree cleanup compatibility) apt-layer cancel # Cancel pending deployment (rpm-ostree cancel compatibility) apt-layer initramfs action # Manage initramfs (rpm-ostree initramfs compatibility) apt-layer kargs action [args...] # Manage kernel arguments (rpm-ostree kargs compatibility) apt-layer bootloader action [options] # Manage bootloader entries and configuration apt-layer usroverlay action # Manage /usr overlay (rpm-ostree usroverlay compatibility) apt-layer composefs action [args...] # Manage ComposeFS (rpm-ostree composefs compatibility) DAEMON MANAGEMENT: apt-layer daemon start # Start the apt-ostree daemon apt-layer daemon stop # Stop the apt-ostree daemon apt-layer daemon status # Show daemon status and health apt-layer daemon install # Install the apt-ostree daemon apt-layer daemon uninstall # Uninstall the apt-ostree daemon apt-layer daemon test # Test daemon functionality apt-layer daemon layer packages # Layer packages via daemon (atomic operations) apt-layer daemon deploy deployment-name [revision] # Deploy specific revision via daemon apt-layer daemon upgrade # Upgrade system via daemon apt-layer daemon rollback # Rollback system via daemon IMAGE MANAGEMENT: apt-layer --list # List all available ComposeFS images/layers apt-layer --info image # Show information about a specific ComposeFS image/layer apt-layer --remove image # Remove an image/layer apt-layer --oci-export image placeholder # Export ComposeFS image as OCI image apt-layer --oci-import placeholder placeholder # Import OCI image as ComposeFS image apt-layer --oci-status # Show OCI integration system status SYSTEM MANAGEMENT: apt-layer --init # Initialize apt-layer system apt-layer --reset # Reset apt-layer system EXAMPLES: apt-layer ubuntu-base/24.04 gaming/24.04 steam wine apt-layer --container ubuntu-base/24.04 dev/24.04 vscode git apt-layer --dpkg-install curl wget apt-layer --live-install firefox apt-layer install steam wine apt-layer status EOF } # Show category-specific help show_layer_help() { cat << 'EOF' Layer Management Commands BASIC LAYER CREATION: apt-layer base-image new-image [packages...] # Create new layer from base image with packages apt-layer --container base-image new-image [packages...] # Create layer using container isolation (like Apx) apt-layer --dpkg-install packages # Install packages using direct dpkg (faster, more controlled) apt-layer --container-dpkg base-image new-image [packages...] # Create layer using container isolation with dpkg (optimized) apt-layer --advanced-install packages # Install packages with security checks and dependency resolution apt-layer --advanced-remove packages # Remove packages with dependency checking and safety validation apt-layer --advanced-update packages # Update packages with rollback capability and backup creation Examples: apt-layer ubuntu-base/24.04 gaming/24.04 steam wine apt-layer --container ubuntu-base/24.04 dev/24.04 vscode git apt-layer --dpkg-install curl wget apt-layer --advanced-install firefox EOF } show_live_help() { cat << 'EOF' Live System Management Commands LIVE INSTALLATION: apt-layer --live-install packages # Install packages on live system with overlayfs (like rpm-ostree install) # Uses apt-get (requires network access) # ⚠️ For WSL/offline/atomic overlays, use --live-dpkg instead apt-layer --live-dpkg packages # Install packages on live system using dpkg (optimized for overlays, offline, WSL) # Usage: apt-layer --live-dpkg /path/to/*.deb LIVE OVERLAY MANAGEMENT: apt-layer --live-overlay action [options] # Manage live system overlayfs # Actions: start, stop, status, commit, rollback apt-layer --live-commit [message] # Commit current live overlay changes as new ComposeFS layer apt-layer --live-rollback # Rollback live overlay changes Examples: apt-layer --live-install firefox apt-layer --live-dpkg ~/apt-cache/*.deb apt-layer --live-overlay start apt-layer --live-overlay commit "Add development tools" apt-layer --live-rollback EOF } show_rpm_ostree_help() { cat << 'EOF' rpm-ostree Compatibility Commands BASIC COMMANDS: apt-layer install packages # Install packages (rpm-ostree install compatibility) apt-layer upgrade # Upgrade system (rpm-ostree upgrade compatibility) apt-layer rebase new-base # Rebase to new base (rpm-ostree rebase compatibility) apt-layer rollback [commit] # Rollback to previous deployment (rpm-ostree rollback compatibility) apt-layer status # Show deployment status (rpm-ostree status compatibility) apt-layer diff [from] [to] # Show package differences (rpm-ostree diff compatibility) DATABASE COMMANDS: apt-layer db list # List deployments (rpm-ostree db list compatibility) apt-layer db diff [from] [to] # Show detailed differences (rpm-ostree db diff compatibility) SYSTEM COMMANDS: apt-layer cleanup [--purge] # Clean up old deployments (rpm-ostree cleanup compatibility) apt-layer cancel # Cancel pending deployment (rpm-ostree cancel compatibility) apt-layer initramfs action # Manage initramfs (rpm-ostree initramfs compatibility) apt-layer kargs action [args...] # Manage kernel arguments (rpm-ostree kargs compatibility) apt-layer bootloader action [options] # Manage bootloader entries and configuration apt-layer usroverlay action # Manage /usr overlay (rpm-ostree usroverlay compatibility) apt-layer composefs action [args...] # Manage ComposeFS (rpm-ostree composefs compatibility) Examples: apt-layer install steam wine apt-layer status apt-layer upgrade apt-layer kargs add "console=ttyS0" apt-layer rollback EOF } # Show image management help show_image_help() { cat << 'EOF' IMAGE MANAGEMENT COMMANDS: IMAGE OPERATIONS: apt-layer --list # List all available ComposeFS images/layers apt-layer --info image # Show information about a specific ComposeFS image/layer apt-layer --remove image # Remove an image/layer OCI INTEGRATION: apt-layer --oci-export image placeholder # Export ComposeFS image as OCI image apt-layer --oci-import placeholder placeholder # Import OCI image as ComposeFS image apt-layer --oci-status # Show OCI integration system status EXAMPLES: apt-layer --list apt-layer --info particle-os/base/24.04 apt-layer --remove old-layer apt-layer --oci-export my-image oci:my-registry/my-image:latest EOF } show_security_help() { cat << 'EOF' Security & Signing Commands LAYER SIGNING & VERIFICATION: apt-layer --generate-key key-name type # Generate signing key pair (sigstore, gpg) apt-layer --sign-layer layer-path key-name # Sign layer with specified key apt-layer --verify-layer layer-path # Verify layer signature apt-layer --revoke-layer layer-path [reason] # Revoke layer (mark as untrusted) apt-layer --list-keys # List all signing keys apt-layer --list-signatures # List all layer signatures apt-layer --layer-status layer-path # Show layer signing status SECURITY SCANNING: apt-layer --scan-package package-name [version] [scan-level] # Scan package for vulnerabilities (standard, thorough, quick) apt-layer --scan-layer layer-path [scan-level] # Scan layer for vulnerabilities apt-layer --generate-security-report type [format] [scan-level] # Generate security report (package, layer, system) apt-layer --security-status # Show security scanning system status apt-layer --update-cve-database # Update CVE database from NVD apt-layer --cleanup-security-reports [days] # Clean up old security reports (default: 90 days) Examples: apt-layer --generate-key my-key sigstore apt-layer --sign-layer layer.squashfs my-key apt-layer --verify-layer layer.squashfs apt-layer --scan-package firefox apt-layer --security-status EOF } show_audit_help() { cat << 'EOF' Audit & Compliance Commands AUDIT LOGGING: apt-layer --query-audit format [filters...] # Query audit logs with filters (json, csv, table) apt-layer --export-audit format [output-file] [filters...] # Export audit logs to file (json, csv, html) apt-layer --list-audit-reports # List all audit reports apt-layer --audit-status # Show audit system status apt-layer --cleanup-audit-logs [days] # Clean up old audit logs (default: 90 days) COMPLIANCE REPORTING: apt-layer --generate-compliance-report framework [period] [format] # Generate compliance report (sox, pci-dss) Examples: apt-layer --query-audit json --user=admin --since=2024-01-01 apt-layer --export-audit csv --output=audit-export.csv apt-layer --generate-compliance-report sox monthly html apt-layer --audit-status EOF } show_admin_help() { cat << 'EOF' Admin Utilities Commands SYSTEM HEALTH: apt-layer admin health # System health check and diagnostics apt-layer admin perf # Performance analytics and resource usage MAINTENANCE: apt-layer admin cleanup # Maintenance cleanup apt-layer admin backup # Backup configs and layers apt-layer admin restore # Restore from backup USER MANAGEMENT: apt-layer --add-user username role # Add user to package management system with specified role apt-layer --remove-user username # Remove user from package management system apt-layer --list-users # List all package management users and roles PACKAGE MANAGEMENT: apt-layer --package-info package # Get detailed information about a package apt-layer --package-status # Show advanced package management system status apt-layer --list-backups # List all package backups apt-layer --cleanup-backups [days] # Clean up backups older than specified days (default: 30) Examples: apt-layer admin health apt-layer admin perf apt-layer --add-user john package_manager apt-layer --list-users apt-layer --package-status EOF } show_enterprise_help() { cat << 'EOF' Enterprise Features Commands MULTI-TENANT MANAGEMENT: apt-layer tenant action [options] # Multi-tenant management # Actions: init, create, delete, list, info, quota, backup, restore, health COMPLIANCE FRAMEWORKS: apt-layer compliance action [options] # Compliance framework management # Actions: init, enable, disable, list, scan, report ENTERPRISE INTEGRATION: apt-layer enterprise action [options] # Enterprise integration # Actions: init, enable, disable, list, test, hook, send MONITORING & ALERTING: apt-layer monitoring action [options] # Monitoring and alerting # Actions: init, check, policy, history, report Examples: apt-layer tenant create my-org apt-layer compliance enable SOX apt-layer enterprise enable SIEM siem-config.json apt-layer monitoring check EOF } show_cloud_help() { cat << 'EOF' Cloud Integration Commands CLOUD PROVIDERS: apt-layer cloud action [options] # Cloud provider integration (AWS, Azure, GCP) # Actions: init, aws, azure, gcp, deploy, status, list-deployments, cleanup KUBERNETES: apt-layer kubernetes action [options] # Kubernetes integration (EKS, AKS, GKE, OpenShift) # Actions: init, eks, aks, gke, openshift, deploy, helm, monitoring, security, cleanup CONTAINER ORCHESTRATION: apt-layer orchestration action [options] # Container orchestration # Actions: init, multi-cluster, service-mesh, gitops, deployments, status, cleanup MULTI-CLOUD: apt-layer multicloud action [options] # Multi-cloud deployment # Actions: init, add-profile, list-profiles, deploy, migrate, status, policy CLOUD SECURITY: apt-layer cloud-security action [options] # Cloud-native security # Actions: init, scan, policy, list-scans, list-policies, cleanup, status Examples: apt-layer cloud aws init apt-layer cloud deploy particle-os/gaming/24.04 aws ecr apt-layer kubernetes eks create-cluster my-cluster us-west-2 apt-layer orchestration gitops init https://github.com/my-org/gitops-repo apt-layer cloud-security scan particle-os/gaming/24.04 aws comprehensive EOF } show_daemon_help() { cat << 'EOF' Daemon Management Commands DAEMON CONTROL: apt-layer daemon start # Start the apt-ostree daemon apt-layer daemon stop # Stop the apt-ostree daemon apt-layer daemon status # Show daemon status and health apt-layer daemon install # Install the apt-ostree daemon apt-layer daemon uninstall # Uninstall the apt-ostree daemon apt-layer daemon test # Test daemon functionality apt-layer daemon stress-test # Run daemon stress tests apt-layer daemon error-recovery # Test daemon error recovery apt-layer daemon performance # Test daemon performance apt-layer daemon comprehensive # Run comprehensive test suite ATOMIC OPERATIONS: apt-layer daemon layer packages # Layer packages via daemon (atomic operations) apt-layer daemon deploy deployment-name [revision] # Deploy specific revision via daemon apt-layer daemon upgrade # Upgrade system via daemon apt-layer daemon rollback # Rollback system via daemon Examples: apt-layer daemon start apt-layer daemon status apt-layer daemon layer firefox steam apt-layer daemon upgrade EOF } # Show examples show_examples() { cat << 'EOF' Particle-OS apt-layer Tool - Examples BASIC LAYER CREATION: # Create gaming layer from base Ubuntu image apt-layer particle-os/base/24.04 particle-os/gaming/24.04 steam wine # Create development layer with container isolation apt-layer --container particle-os/base/24.04 particle-os/dev/24.04 vscode git # Direct dpkg installation (faster) apt-layer --dpkg-install curl wget # Deep dpkg analysis and metadata extraction apt-layer dpkg-analyze extract apt-layer dpkg-analyze analyze [analysis-dir] apt-layer dpkg-analyze validate [validation-mode] apt-layer dpkg-analyze install [preserve-metadata] # Basic ComposeFS Integration (Phase 2.2) apt-layer composefs create [layer-name] apt-layer composefs atomic-create [layer-name] [preserve-metadata] [conflict-resolution] apt-layer composefs mount apt-layer composefs unmount apt-layer composefs compose [conflict-resolution] apt-layer composefs validate apt-layer composefs test [test-mount-point] apt-layer composefs rollback apt-layer composefs status # Advanced ComposeFS Features (Phase 2.3) apt-layer composefs multi-compose ... apt-layer composefs deduplicate [strategy] apt-layer composefs compress [type] [level] apt-layer composefs benchmark [benchmark-file] apt-layer composefs resolve-conflicts [conflict-file] apt-layer composefs track-relationships [parent-layers...] apt-layer composefs enhanced-metadata [format] # Production Integration (Phase 2.4) apt-layer production setup-systemd [service-name] [service-type] [user] apt-layer production setup-grub [grub-config] [grub-cfg] apt-layer production setup-systemd-boot [esp-path] apt-layer production create-deployment [additional-layers...] apt-layer production deploy apt-layer production rollback [target-deployment] apt-layer production health-check [deployment-name] apt-layer production status apt-layer production list-deployments apt-layer production backup-deployment [deployment-name] # System Services apt-layer daemon apt-layer maintenance LIVE SYSTEM MANAGEMENT: # Install packages on running system apt-layer --live-install firefox # Start live overlay for temporary changes apt-layer --live-overlay start # Commit overlay changes as new layer apt-layer --live-overlay commit "Add development tools" # Rollback overlay changes apt-layer --live-rollback rpm-ostree COMPATIBILITY: # Install packages (rpm-ostree style) apt-layer install steam wine # Check system status apt-layer status # Upgrade system apt-layer upgrade # Add kernel argument apt-layer kargs add "console=ttyS0" ENHANCED OSTREE WORKFLOW: # Rebase to new base image apt-layer ostree rebase oci://ubuntu:24.04 # Layer packages on current deployment apt-layer ostree layer vim git build-essential # Override package with custom version apt-layer ostree override linux-image-generic /path/to/custom-kernel.deb # Deploy specific deployment apt-layer ostree deploy my-deployment-20250128-143022 # Build from declarative configuration apt-layer ostree compose tree apt-layer-compose.yaml # Layer with metadata preservation apt-layer ostree layer-metadata package-name true keep-latest # Layer with multi-arch support apt-layer ostree layer-multiarch libc6 amd64 same # Layer with script validation apt-layer ostree layer-scripts package-name strict # Show deployment history apt-layer ostree log # Show differences between deployments apt-layer ostree diff deployment1 deployment2 # Rollback to previous deployment apt-layer ostree rollback # Show current status apt-layer ostree status IMAGE MANAGEMENT: # List available images apt-layer --list # Show image details apt-layer --info particle-os/gaming/24.04 # Export as OCI image apt-layer --oci-export particle-os/gaming/24.04 particle-os/gaming:latest EOF } # HARDWARE DETECTION & AUTO-CONFIGURATION: # apt-layer --detect-hardware # Detect hardware and auto-configure # apt-layer --show-hardware-info # Show detailed hardware information # apt-layer --auto-configure-modules # Auto-configure kernel modules # apt-layer --install-enabled-modules # Install all enabled modules # KERNEL PATCHING (Ubuntu-specific): # apt-layer --list-patches # List available kernel patches # apt-layer --list-enabled-patches # List enabled kernel patches # apt-layer --enable-patch patch-name # Enable specific kernel patch # apt-layer --disable-patch patch-name # Disable specific kernel patch # apt-layer --apply-patch [patch-name] # Apply specific or all enabled patches # apt-layer --update-kernel-args # Update kernel arguments for patches # Initialize Particle-OS system initialize_particle_system() { log_info "Initializing Particle-OS system..." "apt-layer" # Check if running as root check_root # Create configuration file if [[ ! -f "/usr/local/etc/particle-config.sh" ]]; then log_info "Creating configuration file..." "apt-layer" mkdir -p "/usr/local/etc" cat > "/usr/local/etc/particle-config.sh" << 'EOF' #!/bin/bash # Particle-OS Configuration File # This file contains the main configuration for Particle-OS # Workspace and directory configuration PARTICLE_WORKSPACE="/var/lib/particle-os" PARTICLE_CONFIG_DIR="/usr/local/etc/particle-os" PARTICLE_LOG_DIR="/var/log/particle-os" PARTICLE_CACHE_DIR="/var/cache/particle-os" # Build and temporary directories PARTICLE_BUILD_DIR="$PARTICLE_WORKSPACE/build" PARTICLE_TEMP_DIR="$PARTICLE_WORKSPACE/temp" PARTICLE_LAYERS_DIR="$PARTICLE_WORKSPACE/layers" # ComposeFS configuration PARTICLE_COMPOSEFS_DIR="$PARTICLE_WORKSPACE/composefs" PARTICLE_COMPOSEFS_SCRIPT="/usr/local/bin/composefs-alternative.sh" # Container configuration # Container runtime will be detected dynamically PARTICLE_CONTAINER_RUNTIME="" PARTICLE_CONTAINER_WORKSPACE="$PARTICLE_WORKSPACE/containers" # Live overlay configuration PARTICLE_LIVE_OVERLAY_DIR="$PARTICLE_WORKSPACE/live-overlay" # Transaction configuration PARTICLE_TRANSACTION_STATE="$PARTICLE_WORKSPACE/transaction-state" PARTICLE_TRANSACTION_LOG="$PARTICLE_LOG_DIR/transaction.log" # Logging configuration PARTICLE_LOG_LEVEL="info" PARTICLE_LOG_COLOR="true" # Security configuration PARTICLE_SECURITY_ENABLED="true" PARTICLE_SECURITY_SCAN_LEVEL="standard" # Audit configuration PARTICLE_AUDIT_ENABLED="true" PARTICLE_AUDIT_RETENTION_DAYS="90" # OCI configuration PARTICLE_OCI_ENABLED="true" PARTICLE_OCI_WORKSPACE="$PARTICLE_WORKSPACE/oci" # Export variables for use in scripts export PARTICLE_WORKSPACE export PARTICLE_CONFIG_DIR export PARTICLE_LOG_DIR export PARTICLE_CACHE_DIR export PARTICLE_BUILD_DIR export PARTICLE_TEMP_DIR export PARTICLE_LAYERS_DIR export PARTICLE_COMPOSEFS_DIR export PARTICLE_COMPOSEFS_SCRIPT export PARTICLE_CONTAINER_RUNTIME export PARTICLE_CONTAINER_WORKSPACE export PARTICLE_LIVE_OVERLAY_DIR export PARTICLE_TRANSACTION_STATE export PARTICLE_TRANSACTION_LOG export PARTICLE_LOG_LEVEL export PARTICLE_LOG_COLOR export PARTICLE_SECURITY_ENABLED export PARTICLE_SECURITY_SCAN_LEVEL export PARTICLE_AUDIT_ENABLED export PARTICLE_AUDIT_RETENTION_DAYS export PARTICLE_OCI_ENABLED export PARTICLE_OCI_WORKSPACE EOF chmod 644 "/usr/local/etc/particle-config.sh" log_success "Configuration file created: /usr/local/etc/particle-config.sh" "apt-layer" fi # Create workspace directory if [[ ! -d "$WORKSPACE" ]]; then log_info "Creating workspace directory..." "apt-layer" mkdir -p "$WORKSPACE" log_success "Workspace directory created: $WORKSPACE" "apt-layer" fi # Create log directory if [[ ! -d "/var/log/particle-os" ]]; then log_info "Creating log directory..." "apt-layer" mkdir -p "/var/log/particle-os" log_success "Log directory created: /var/log/particle-os" "apt-layer" fi # Create cache directory if [[ ! -d "/var/cache/particle-os" ]]; then log_info "Creating cache directory..." "apt-layer" mkdir -p "/var/cache/particle-os" log_success "Cache directory created: /var/cache/particle-os" "apt-layer" fi # Initialize workspace subdirectories init_workspace log_success "Particle-OS system initialization completed successfully!" "apt-layer" echo "" echo "System is now ready for use. You can run:" echo " apt-layer --help" echo " apt-layer status" echo " apt-layer --list" } # Main execution main() { # Initialize deployment database init_deployment_db # Check for incomplete transactions first check_incomplete_transactions # Check if system needs initialization (skip for help and initialization commands) if [[ "${1:-}" != "--init" && "${1:-}" != "--reinit" && "${1:-}" != "--rm-init" && "${1:-}" != "--reset" && "${1:-}" != "--status" && "${1:-}" != "--help" && "${1:-}" != "-h" && "${1:-}" != "--help-full" && "${1:-}" != "--examples" && "${1:-}" != "--version" ]]; then check_initialization_needed fi # Parse command line arguments first (before dependency checks) case "${1:-}" in --init) # Initialize apt-layer system initialize_apt_layer_system exit 0 ;; --reinit) # Reinitialize apt-layer system (force recreation) if command -v reinitialize_apt_layer_system >/dev/null 2>&1; then reinitialize_apt_layer_system else log_error "Reinit function not available" "apt-layer" exit 1 fi exit 0 ;; --rm-init) # Remove apt-layer system (cleanup) if command -v remove_apt_layer_system >/dev/null 2>&1; then remove_apt_layer_system else log_error "Remove init function not available" "apt-layer" exit 1 fi exit 0 ;; --status) # Show apt-layer system status if command -v show_apt_layer_system_status >/dev/null 2>&1; then show_apt_layer_system_status else log_error "Status function not available" "apt-layer" exit 1 fi exit 0 ;; --reset) # Reset apt-layer system reset_apt_layer_system exit 0 ;; --help|-h) show_usage exit 0 ;; --help-full) show_full_usage exit 0 ;; --examples) show_examples exit 0 ;; --version) show_version exit 0 ;; layer) if [[ "${2:-}" == "--help" || "${2:-}" == "-h" ]]; then show_layer_help exit 0 fi ;; live) if [[ "${2:-}" == "--help" || "${2:-}" == "-h" ]]; then show_live_help exit 0 fi ;; rpm-ostree) if [[ "${2:-}" == "--help" || "${2:-}" == "-h" ]]; then show_rpm_ostree_help exit 0 fi ;; image) if [[ "${2:-}" == "--help" || "${2:-}" == "-h" ]]; then show_image_help exit 0 fi ;; security) if [[ "${2:-}" == "--help" || "${2:-}" == "-h" ]]; then show_security_help exit 0 fi ;; audit) if [[ "${2:-}" == "--help" || "${2:-}" == "-h" ]]; then show_audit_help exit 0 fi ;; admin) if [[ "${2:-}" == "--help" || "${2:-}" == "-h" ]]; then show_admin_help exit 0 fi ;; enterprise) if [[ "${2:-}" == "--help" || "${2:-}" == "-h" ]]; then show_enterprise_help exit 0 fi ;; cloud) if [[ "${2:-}" == "--help" || "${2:-}" == "-h" ]]; then show_cloud_help exit 0 fi ;; kubernetes) if [[ "${2:-}" == "--help" || "${2:-}" == "-h" ]]; then show_cloud_help exit 0 fi ;; daemon) if [[ "${2:-}" == "--help" || "${2:-}" == "-h" ]]; then show_daemon_help exit 0 fi ;; dpkg-analyze) # Deep dpkg analysis and metadata extraction local subcommand="${2:-}" case "$subcommand" in extract) local deb_file="${3:-}" local extract_dir="${4:-}" if [[ -z "$deb_file" ]] || [[ -z "$extract_dir" ]]; then log_error "Debian package and extract directory required" "apt-layer" log_info "Usage: apt-layer dpkg-analyze extract " "apt-layer" show_usage exit 1 fi shift 2 extract_dpkg_metadata "$deb_file" "$extract_dir" ;; analyze) local deb_file="${3:-}" local analysis_dir="${4:-}" if [[ -z "$deb_file" ]]; then log_error "Debian package required" "apt-layer" log_info "Usage: apt-layer dpkg-analyze analyze [analysis-dir]" "apt-layer" show_usage exit 1 fi if [[ -z "$analysis_dir" ]]; then analysis_dir=$(mktemp -d) fi shift 2 analyze_package_comprehensive "$deb_file" "$analysis_dir" ;; validate) local deb_file="${3:-}" local validation_mode="${4:-warn}" if [[ -z "$deb_file" ]]; then log_error "Debian package required" "apt-layer" log_info "Usage: apt-layer dpkg-analyze validate [validation-mode]" "apt-layer" show_usage exit 1 fi shift 2 validate_package_for_apt_layer "$deb_file" "$validation_mode" ;; install) local deb_file="${3:-}" local target_dir="${4:-}" local preserve_metadata="${5:-true}" if [[ -z "$deb_file" ]] || [[ -z "$target_dir" ]]; then log_error "Debian package and target directory required" "apt-layer" log_info "Usage: apt-layer dpkg-analyze install [preserve-metadata]" "apt-layer" show_usage exit 1 fi shift 2 dpkg_direct_install_with_metadata "$deb_file" "$target_dir" "$preserve_metadata" ;; *) log_error "Invalid dpkg-analyze subcommand: $subcommand" "apt-layer" log_info "Valid subcommands: extract, analyze, validate, install" "apt-layer" show_usage exit 1 ;; esac exit 0 ;; composefs) # Basic ComposeFS Integration (Phase 2.2) local subcommand="${2:-}" case "$subcommand" in create) local source_dir="${3:-}" local layer_path="${4:-}" local layer_name="${5:-}" if [[ -z "$source_dir" ]] || [[ -z "$layer_path" ]]; then log_error "Source directory and layer path required" "apt-layer" log_info "Usage: apt-layer composefs create [layer-name]" "apt-layer" show_usage exit 1 fi shift 2 create_composefs_layer "$source_dir" "$layer_path" "$layer_name" ;; atomic-create) local source_dir="${3:-}" local layer_path="${4:-}" local layer_name="${5:-}" local preserve_metadata="${6:-true}" local conflict_resolution="${7:-keep-latest}" if [[ -z "$source_dir" ]] || [[ -z "$layer_path" ]]; then log_error "Source directory and layer path required" "apt-layer" log_info "Usage: apt-layer composefs atomic-create [layer-name] [preserve-metadata] [conflict-resolution]" "apt-layer" show_usage exit 1 fi shift 2 atomic_create_composefs_layer "$source_dir" "$layer_path" "$layer_name" "$preserve_metadata" "$conflict_resolution" ;; mount) local layer_path="${3:-}" local mount_point="${4:-}" if [[ -z "$layer_path" ]] || [[ -z "$mount_point" ]]; then log_error "Layer path and mount point required" "apt-layer" log_info "Usage: apt-layer composefs mount " "apt-layer" show_usage exit 1 fi shift 2 mount_composefs_layer "$layer_path" "$mount_point" ;; unmount) local mount_point="${3:-}" if [[ -z "$mount_point" ]]; then log_error "Mount point required" "apt-layer" log_info "Usage: apt-layer composefs unmount " "apt-layer" show_usage exit 1 fi shift 2 unmount_composefs_layer "$mount_point" ;; compose) local base_layer="${3:-}" local overlay_layer="${4:-}" local output_layer="${5:-}" local conflict_resolution="${6:-keep-latest}" if [[ -z "$base_layer" ]] || [[ -z "$overlay_layer" ]] || [[ -z "$output_layer" ]]; then log_error "Base layer, overlay layer, and output layer required" "apt-layer" log_info "Usage: apt-layer composefs compose [conflict-resolution]" "apt-layer" show_usage exit 1 fi shift 2 compose_composefs_layers "$base_layer" "$overlay_layer" "$output_layer" "$conflict_resolution" ;; validate) local layer_path="${3:-}" if [[ -z "$layer_path" ]]; then log_error "Layer path required" "apt-layer" log_info "Usage: apt-layer composefs validate " "apt-layer" show_usage exit 1 fi shift 2 validate_layer_integrity "$layer_path" ;; test) local layer_path="${3:-}" local test_mount_point="${4:-}" if [[ -z "$layer_path" ]]; then log_error "Layer path required" "apt-layer" log_info "Usage: apt-layer composefs test [test-mount-point]" "apt-layer" show_usage exit 1 fi if [[ -z "$test_mount_point" ]]; then test_mount_point=$(mktemp -d) fi shift 2 test_composefs_layer "$layer_path" "$test_mount_point" ;; rollback) local current_layer="${3:-}" local backup_layer="${4:-}" if [[ -z "$current_layer" ]] || [[ -z "$backup_layer" ]]; then log_error "Current layer and backup layer required" "apt-layer" log_info "Usage: apt-layer composefs rollback " "apt-layer" show_usage exit 1 fi shift 2 rollback_composefs_layer "$current_layer" "$backup_layer" ;; status) shift 2 composefs_status ;; multi-compose) # Multi-layer composition (Phase 2.3) local output_layer="${!#}" local layers=("${@:3:$#-3}") if [[ ${#layers[@]} -lt 2 ]]; then log_error "At least 2 layers required for multi-composition" "apt-layer" log_info "Usage: apt-layer composefs multi-compose ... " "apt-layer" show_usage exit 1 fi shift 2 compose_multiple_layers "${layers[@]}" "$output_layer" ;; deduplicate) local input_layer="${3:-}" local output_layer="${4:-}" local strategy="${5:-content-hash}" if [[ -z "$input_layer" ]] || [[ -z "$output_layer" ]]; then log_error "Input and output layers required" "apt-layer" log_info "Usage: apt-layer composefs deduplicate [strategy]" "apt-layer" show_usage exit 1 fi shift 2 deduplicate_layer "$input_layer" "$output_layer" "$strategy" ;; compress) local input_layer="${3:-}" local output_layer="${4:-}" local compression_type="${5:-gzip}" local compression_level="${6:-6}" if [[ -z "$input_layer" ]] || [[ -z "$output_layer" ]]; then log_error "Input and output layers required" "apt-layer" log_info "Usage: apt-layer composefs compress [type] [level]" "apt-layer" show_usage exit 1 fi shift 2 compress_layer "$input_layer" "$output_layer" "$compression_type" "$compression_level" ;; benchmark) local layer_path="${3:-}" local benchmark_file="${4:-}" if [[ -z "$layer_path" ]]; then log_error "Layer path required" "apt-layer" log_info "Usage: apt-layer composefs benchmark [benchmark-file]" "apt-layer" show_usage exit 1 fi shift 2 benchmark_layer "$layer_path" "$benchmark_file" ;; resolve-conflicts) local base_layer="${3:-}" local new_layer="${4:-}" local output_layer="${5:-}" local conflict_file="${6:-}" if [[ -z "$base_layer" ]] || [[ -z "$new_layer" ]] || [[ -z "$output_layer" ]]; then log_error "Base layer, new layer, and output layer required" "apt-layer" log_info "Usage: apt-layer composefs resolve-conflicts [conflict-file]" "apt-layer" show_usage exit 1 fi shift 2 resolve_conflicts_interactive "$base_layer" "$new_layer" "$output_layer" "$conflict_file" ;; track-relationships) local layer_path="${3:-}" local relationship_file="${4:-}" local parent_layers=("${@:5}") if [[ -z "$layer_path" ]] || [[ -z "$relationship_file" ]]; then log_error "Layer path and relationship file required" "apt-layer" log_info "Usage: apt-layer composefs track-relationships [parent-layers...]" "apt-layer" show_usage exit 1 fi shift 2 track_layer_relationships "$layer_path" "$relationship_file" "${parent_layers[@]}" ;; enhanced-metadata) local source_dir="${3:-}" local metadata_file="${4:-}" local metadata_format="${5:-json}" if [[ -z "$source_dir" ]] || [[ -z "$metadata_file" ]]; then log_error "Source directory and metadata file required" "apt-layer" log_info "Usage: apt-layer composefs enhanced-metadata [format]" "apt-layer" show_usage exit 1 fi shift 2 handle_enhanced_metadata "$source_dir" "$metadata_file" "$metadata_format" ;; *) log_error "Invalid composefs subcommand: $subcommand" "apt-layer" log_info "Valid subcommands: create, atomic-create, mount, unmount, compose, validate, test, rollback, status, multi-compose, deduplicate, compress, benchmark, resolve-conflicts, track-relationships, enhanced-metadata" "apt-layer" show_usage exit 1 ;; esac exit 0 ;; production) # Production Integration (Phase 2.4) local subcommand="${2:-}" case "$subcommand" in setup-systemd) local service_name="${3:-apt-layer}" local service_type="${4:-notify}" local user="${5:-root}" shift 2 setup_systemd_integration "$service_name" "$service_type" "$user" ;; setup-grub) local grub_config="${3:-/etc/default/grub}" local grub_cfg="${4:-/boot/grub/grub.cfg}" shift 2 setup_grub_integration "$grub_config" "$grub_cfg" ;; setup-systemd-boot) local esp_path="${3:-/boot/efi}" shift 2 setup_systemd_boot_integration "$esp_path" ;; create-deployment) local deployment_name="${3:-}" local base_layer="${4:-}" local additional_layers=("${@:5}") if [[ -z "$deployment_name" ]] || [[ -z "$base_layer" ]]; then log_error "Deployment name and base layer required" "apt-layer" log_info "Usage: apt-layer production create-deployment [additional-layers...]" "apt-layer" show_usage exit 1 fi shift 2 create_deployment "$deployment_name" "$base_layer" "${additional_layers[@]}" ;; deploy) local deployment_name="${3:-}" if [[ -z "$deployment_name" ]]; then log_error "Deployment name required" "apt-layer" log_info "Usage: apt-layer production deploy " "apt-layer" show_usage exit 1 fi shift 2 deploy_deployment "$deployment_name" ;; rollback) local target_deployment="${3:-}" shift 2 rollback_deployment "$target_deployment" ;; health-check) local deployment_name="${3:-}" shift 2 check_deployment_health "$deployment_name" ;; status) shift 2 production_status ;; list-deployments) shift 2 list_deployments ;; backup-deployment) local deployment_name="${3:-}" if [[ -z "$deployment_name" ]]; then deployment_name=$(get_current_deployment) if [[ -z "$deployment_name" ]]; then log_error "No deployment specified and no current deployment found" "apt-layer" exit 1 fi fi shift 2 create_deployment_backup "$deployment_name" ;; *) log_error "Invalid production subcommand: $subcommand" "apt-layer" log_info "Valid subcommands: setup-systemd, setup-grub, setup-systemd-boot, create-deployment, deploy, rollback, health-check, status, list-deployments, backup-deployment" "apt-layer" show_usage exit 1 ;; esac exit 0 ;; --list) list_branches exit 0 ;; --info) if [ -z "${2:-}" ]; then log_error "Image name required for --info" "apt-layer" show_usage exit 1 fi show_branch_info "$2" exit 0 ;; --remove) if [ -z "${2:-}" ]; then log_error "Image name required for --remove" "apt-layer" show_usage exit 1 fi remove_image "$2" exit 0 ;; --oci-status) # Show OCI integration system status oci_status exit 0 ;; --live-overlay) # Live overlay management require_root "live overlay management" if [ -z "${2:-}" ]; then log_error "Action required for --live-overlay" "apt-layer" show_usage exit 1 fi local action="$2" shift 2 local options=("$@") manage_live_overlay "$action" "${options[@]}" ;; --live-install) # Live system installation require_root "live system installation" if [ $# -lt 2 ]; then log_error "No packages specified for --live-install" "apt-layer" show_usage exit 1 fi shift local packages=("$@") live_install "${packages[@]}" ;; --live-dpkg) # Live system dpkg installation (offline/overlay optimized) require_root "live system dpkg installation" if [ $# -lt 2 ]; then log_error "No .deb files specified for --live-dpkg" "apt-layer" show_usage exit 1 fi shift local deb_files=("$@") live_dpkg_install "${deb_files[@]}" ;; --live-commit) # Commit live overlay changes require_root "live overlay commit" local message="${2:-Live overlay changes}" commit_live_overlay "$message" ;; --live-rollback) # Rollback live overlay changes require_root "live overlay rollback" rollback_live_overlay ;; orchestration) # Container orchestration if [ -z "${2:-}" ]; then log_error "Action required for orchestration" "apt-layer" show_usage exit 1 fi local action="$2" shift 2 local args=("$@") handle_orchestration_command "$action" "${args[@]}" exit 0 ;; multicloud) # Multi-cloud deployment if [ -z "${2:-}" ]; then log_error "Action required for multicloud" "apt-layer" show_usage exit 1 fi local action="$2" shift 2 local args=("$@") handle_multicloud_command "$action" "${args[@]}" exit 0 ;; cloud-security) # Cloud-native security if [ -z "${2:-}" ]; then log_error "Action required for cloud-security" "apt-layer" show_usage exit 1 fi local action="$2" shift 2 local args=("$@") handle_cloud_security_command "$action" "${args[@]}" exit 0 ;; ostree) # OSTree atomic package management interface local subcommand="${2:-}" case "$subcommand" in rebase) local new_base="${3:-}" local deployment_name="${4:-current}" if [[ -z "$new_base" ]]; then log_error "Base image required for rebase" "apt-layer" log_info "Usage: apt-layer ostree rebase [deployment-name]" "apt-layer" show_usage exit 1 fi shift 2 ostree_rebase "$new_base" "$deployment_name" ;; layer) shift 2 if [[ $# -eq 0 ]]; then log_error "Packages required for layering" "apt-layer" log_info "Usage: apt-layer ostree layer [package2] ..." "apt-layer" show_usage exit 1 fi ostree_layer "$@" ;; override) local package_name="${3:-}" local override_path="${4:-}" if [[ -z "$package_name" ]] || [[ -z "$override_path" ]]; then log_error "Package name and override path required" "apt-layer" log_info "Usage: apt-layer ostree override " "apt-layer" show_usage exit 1 fi shift 2 ostree_override "$package_name" "$override_path" ;; deploy) local deployment_name="${3:-}" if [[ -z "$deployment_name" ]]; then log_error "Deployment name required" "apt-layer" log_info "Usage: apt-layer ostree deploy " "apt-layer" show_usage exit 1 fi shift 2 ostree_deploy "$deployment_name" ;; compose) local compose_action="${3:-}" shift 3 case "$compose_action" in tree) local config_file="${1:-}" if [[ -z "$config_file" ]]; then log_error "Configuration file required" "apt-layer" log_info "Usage: apt-layer ostree compose tree " "apt-layer" show_usage exit 1 fi ostree_compose_tree "$config_file" ;; install) ostree_compose_install "$@" ;; remove) ostree_compose_remove "$@" ;; update) ostree_compose_update "$@" ;; *) log_error "Invalid compose action: $compose_action" "apt-layer" log_info "Valid actions: tree, install, remove, update" "apt-layer" show_usage exit 1 ;; esac ;; layer-metadata) local package="${3:-}" local preserve_metadata="${4:-true}" local resolve_conflicts="${5:-keep-latest}" if [[ -z "$package" ]]; then log_error "Package required for metadata-aware layering" "apt-layer" log_info "Usage: apt-layer ostree layer-metadata [preserve-metadata] [resolve-conflicts]" "apt-layer" show_usage exit 1 fi shift 2 ostree_layer_with_metadata "$package" "$preserve_metadata" "$resolve_conflicts" ;; layer-multiarch) local package="${3:-}" local arch="${4:-amd64}" local multiarch_type="${5:-same}" if [[ -z "$package" ]]; then log_error "Package required for multi-arch layering" "apt-layer" log_info "Usage: apt-layer ostree layer-multiarch [arch] [multiarch-type]" "apt-layer" show_usage exit 1 fi shift 2 ostree_layer_multiarch "$package" "$arch" "$multiarch_type" ;; layer-scripts) local package="${3:-}" local script_context="${4:-offline}" if [[ -z "$package" ]]; then log_error "Package required for script-aware layering" "apt-layer" log_info "Usage: apt-layer ostree layer-scripts [script-context]" "apt-layer" show_usage exit 1 fi shift 2 ostree_layer_with_script_validation "$package" "$script_context" ;; log) shift 2 ostree_log "$@" ;; diff) shift 2 ostree_diff "$@" ;; rollback) shift 2 ostree_rollback "$@" ;; status) shift 2 ostree_status "$@" ;; *) log_error "Invalid ostree subcommand: $subcommand" "apt-layer" log_info "Valid subcommands: rebase, layer, override, deploy, compose, layer-metadata, layer-multiarch, layer-scripts, log, diff, rollback, status" "apt-layer" show_usage exit 1 ;; esac exit 0 ;; daemon) # Daemon management and integration local subcommand="${2:-}" case "$subcommand" in start) shift 2 start_daemon ;; stop) shift 2 stop_daemon ;; status) shift 2 show_daemon_status ;; install) shift 2 install_daemon ;; uninstall) shift 2 uninstall_daemon ;; test) shift 2 test_daemon ;; stress-test) shift 2 stress_test_daemon ;; error-recovery) shift 2 test_error_recovery ;; performance) shift 2 test_daemon_performance ;; comprehensive) shift 2 run_comprehensive_test ;; layer) shift 2 if [[ $# -eq 0 ]]; then log_error "Packages required for daemon layering" "apt-layer" log_info "Usage: apt-layer daemon layer [package2] ..." "apt-layer" show_usage exit 1 fi daemon_layer_packages "$@" ;; deploy) local deployment_name="${3:-}" local revision="${4:-}" if [[ -z "$deployment_name" ]]; then log_error "Deployment name required" "apt-layer" log_info "Usage: apt-layer daemon deploy [revision]" "apt-layer" show_usage exit 1 fi shift 2 daemon_deploy "$deployment_name" "$revision" ;; upgrade) shift 2 daemon_upgrade ;; rollback) shift 2 daemon_rollback ;; *) log_error "Invalid daemon subcommand: $subcommand" "apt-layer" log_info "Valid subcommands: start, stop, status, install, uninstall, test, layer, deploy, upgrade, rollback" "apt-layer" show_usage exit 1 ;; esac exit 0 ;; maintenance) # Run maintenance tasks shift 1 run_maintenance ;; *) # Check for empty argument if [ -z "${1:-}" ]; then log_error "No command specified" "apt-layer" show_usage exit 1 fi # Regular layer creation (legacy mode) if [ $# -lt 2 ]; then log_error "Insufficient arguments for layer creation" "apt-layer" show_usage exit 1 fi local base_image="$1" local new_image="$2" shift 2 local packages=("$@") create_layer "$base_image" "$new_image" "${packages[@]}" ;; esac } # Run main function if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then main "$@" fi # --- END OF SCRIPTLET: 99-main.sh --- # ============================================================================ # Embedded Configuration Files # ============================================================================ # Enterprise-grade JSON configuration system # All configuration files are embedded for self-contained operation # Configuration can be overridden via command-line arguments # Configuration files to be embedded: # - apt-layer-settings.json # - audit-settings.json # - backup-policy.json # - dependencies.json # - kernel-modules.json # - kernel-patches.json # - maintenance.json # - oci-settings.json # - package-management.json # - paths.json # - security-policy.json # - signing-policy.json # - users.json # Embedded configuration: apt-layer-settings # File size: 670 APT_LAYER_SETTINGS_CONFIG=$(cat << 'EOF' { "default_container_runtime": "podman", "default_workspace": "/var/lib/particle-os", "feature_toggles": { "live_overlay": true, "oci_integration": true, "security_scanning": true, "audit_reporting": true, "layer_signing": true }, "container_runtimes": { "supported": [ "podman", "docker" ], "preference_order": [ "podman", "docker" ], "podman": { "description": "Rootless container runtime (default, recommended)", "primary_use": "general" }, "docker": { "description": "Traditional container runtime (fallback)", "primary_use": "fallback" } }, "log_level": "info", "color_output": true } EOF ) # Embedded configuration: audit-settings # File size: 166 AUDIT_SETTINGS_CONFIG=$(cat << 'EOF' { "log_retention_days": 90, "remote_log_endpoint": "https://audit.example.com/submit", "compliance_frameworks": [ "SOX", "PCI-DSS" ], "log_verbosity": "info" } EOF ) # Embedded configuration: backup-policy # File size: 158 BACKUP_POLICY_CONFIG=$(cat << 'EOF' { "backup_frequency": "weekly", "retention_days": 60, "compression": true, "encryption": false, "backup_location": "/var/lib/particle-os/backups" } EOF ) # Embedded configuration: dependencies # File size: 1.2K DEPENDENCIES_CONFIG=$(cat << 'EOF' { "core": [ "chroot", "apt-get", "dpkg", "jq", "mount", "umount", "findmnt", "numfmt" ], "container": [ "podman", "docker" ], "oci": [ "skopeo" ], "composefs": [ "mkcomposefs", "composefs-info", "mount.composefs", "mksquashfs", "unsquashfs" ], "composefs_packages": [ "composefs", "libcomposefs1" ], "bootloader": [ "efibootmgr", "grub-install", "update-grub", "bootctl" ], "security": [ "curl", "wget", "gpg" ], "package_install_commands": { "debian": { "composefs": "apt install -y composefs libcomposefs1", "container": "apt install -y podman docker.io", "oci": "apt install -y skopeo", "bootloader": "apt install -y efibootmgr grub-common systemd-boot", "core": "apt install -y squashfs-tools jq coreutils util-linux" }, "fedora": { "composefs": "dnf install -y composefs composefs-libs", "container": "dnf install -y podman docker", "oci": "dnf install -y skopeo", "bootloader": "dnf install -y efibootmgr grub2-tools systemd-boot", "core": "dnf install -y squashfs-tools jq coreutils util-linux" } } } EOF ) # Embedded configuration: kernel-modules # File size: 6.1K KERNEL_MODULES_CONFIG=$(cat << 'EOF' { "kernel_modules": { "common": { "description": "Common kernel modules for general hardware support", "modules": { "v4l2loopback": { "description": "Virtual video devices for screen recording and streaming", "package": "v4l2loopback-dkms", "kernel_args": [], "enabled": true }, "gpd-fan-kmod": { "description": "GPD Win Max fan control and thermal management", "package": "gpd-fan-kmod", "kernel_args": [], "enabled": false }, "nct6687d": { "description": "Nuvoton NCT6687-R support for AMD B550 chipset motherboards", "package": "nct6687d-dkms", "kernel_args": [], "enabled": false }, "ryzen-smu": { "description": "AMD Ryzen SMU (System Management Unit) access", "package": "ryzen-smu-dkms", "kernel_args": [], "enabled": false }, "system76": { "description": "System76 laptop drivers and hardware support", "package": "system76-dkms", "kernel_args": [], "enabled": false }, "zenergy": { "description": "AMD energy monitoring with jiffies for non-root access", "package": "zenergy-dkms", "kernel_args": [], "enabled": false } } }, "nvidia": { "description": "NVIDIA GPU driver support", "modules": { "nvidia": { "description": "NVIDIA closed proprietary drivers for legacy hardware", "package": "nvidia-driver-535", "kernel_args": [ "nvidia-drm.modeset=1" ], "enabled": false, "hardware_support": { "geforce_rtx": [ "40", "30", "20" ], "geforce": [ "16", "10", "900", "700" ], "quadro": [ "T4", "T4G", "P2000", "P4000", "P5000", "P6000", "K2200", "M2000", "M4000", "M5000", "M6000" ], "tesla": [ "T4", "T4G", "V100", "P100", "P40", "P4", "M60", "M40", "M6", "M4" ] } }, "nvidia-open": { "description": "NVIDIA open source drivers for latest hardware", "package": "nvidia-driver-open-535", "kernel_args": [ "nvidia-drm.modeset=1" ], "enabled": false, "hardware_support": { "geforce_rtx": [ "50", "40", "30", "20" ], "geforce": [ "16" ] } } } }, "gaming": { "description": "Gaming-specific kernel modules and optimizations", "modules": { "steam-deck": { "description": "Steam Deck specific optimizations and patches", "package": "steam-deck-dkms", "kernel_args": [ "steam_deck.fan_control=1" ], "enabled": false }, "gaming-peripherals": { "description": "Gaming mouse, keyboard, and controller support", "package": "gaming-peripherals-dkms", "kernel_args": [], "enabled": false } } }, "virtualization": { "description": "Virtualization and container support", "modules": { "virtualbox": { "description": "VirtualBox virtualization support", "package": "virtualbox-dkms", "kernel_args": [], "enabled": false }, "vmware": { "description": "VMware virtualization support", "package": "open-vm-tools-dkms", "kernel_args": [], "enabled": false }, "docker": { "description": "Docker container support", "package": "docker-dkms", "kernel_args": [], "enabled": false } } }, "storage": { "description": "Advanced storage and filesystem support", "modules": { "zfs": { "description": "OpenZFS advanced file system and volume manager", "package": "zfs-dkms", "kernel_args": [], "enabled": false }, "btrfs": { "description": "Btrfs filesystem support", "package": "btrfs-dkms", "kernel_args": [], "enabled": false } } }, "network": { "description": "Network adapter and protocol support", "modules": { "intel-nic": { "description": "Intel network interface card support", "package": "intel-nic-dkms", "kernel_args": [], "enabled": false }, "broadcom-nic": { "description": "Broadcom network interface card support", "package": "broadcom-nic-dkms", "kernel_args": [], "enabled": false } } } }, "kernel_variants": { "ubuntu-generic": { "description": "Ubuntu generic kernel", "headers_package": "linux-headers-generic", "image_package": "linux-image-generic", "enabled": true }, "ubuntu-generic-hwe": { "description": "Ubuntu generic HWE kernel", "headers_package": "linux-headers-generic-hwe-24.04", "image_package": "linux-image-generic-hwe-24.04", "enabled": true }, "ubuntu-lowlatency": { "description": "Ubuntu low latency kernel", "headers_package": "linux-headers-lowlatency", "image_package": "linux-image-lowlatency", "enabled": false }, "ubuntu-lowlatency-hwe": { "description": "Ubuntu low latency HWE kernel", "headers_package": "linux-headers-lowlatency-hwe-24.04", "image_package": "linux-image-lowlatency-hwe-24.04", "enabled": false } }, "hardware_detection": { "auto_detect": true, "detection_scripts": { "gpu": "/usr/local/bin/detect-gpu.sh", "cpu": "/usr/local/bin/detect-cpu.sh", "motherboard": "/usr/local/bin/detect-motherboard.sh", "storage": "/usr/local/bin/detect-storage.sh", "network": "/usr/local/bin/detect-network.sh" } }, "build_configuration": { "containerized_builds": true, "build_timeout": 3600, "parallel_builds": 2, "cache_built_modules": true, "cache_directory": "/var/cache/particle-os/dkms", "build_logs_directory": "/var/log/particle-os/dkms" } } EOF ) # Embedded configuration: kernel-patches # File size: 15K KERNEL_PATCHES_CONFIG=$(cat << 'EOF' { "kernel_patches": { "gaming": { "description": "Gaming performance and compatibility patches", "enabled": true, "patches": { "steam-deck": { "description": "Steam Deck specific optimizations for Ubuntu", "url": "https://github.com/ValveSoftware/linux-kernel/raw/steamdeck-6.1.y/patches/0001-steam-deck-optimizations.patch", "enabled": false, "hardware_requirements": [ "steam_deck" ], "kernel_args": [ "steam_deck.fan_control=1", "steam_deck.performance_mode=1" ], "ubuntu_compatible": true, "kernel_versions": [ "5.15", "6.1", "6.2" ], "note": "Valve's official Steam Deck kernel patches" }, "handheld": { "description": "Handheld device optimizations for Ubuntu", "url": "https://github.com/linux-surface/linux-surface/raw/master/patches/5.15/0001-handheld-optimizations.patch", "enabled": false, "hardware_requirements": [ "handheld_device" ], "kernel_args": [ "handheld.fan_control=1", "handheld.performance_mode=1" ], "ubuntu_compatible": true, "kernel_versions": [ "5.15", "6.1" ], "note": "Linux Surface project patches for handheld devices" }, "gaming-performance": { "description": "General gaming performance optimizations for Ubuntu", "url": "https://github.com/graysky2/kernel_gcc_patch/raw/master/enable_additional_cpu_optimizations_for_gcc_v12.1%2B_kernel_v5.15%2B.patch", "enabled": true, "kernel_args": [ "gaming.performance_mode=1", "gaming.low_latency=1" ], "ubuntu_compatible": true, "kernel_versions": [ "5.15", "6.1", "6.2" ], "note": "Graysky2's CPU optimization patches for gaming" }, "wine-compatibility": { "description": "Wine and Proton compatibility improvements for Ubuntu", "url": "https://github.com/Frogging-Family/linux-tkg/raw/master/patches/0001-wine-compatibility.patch", "enabled": true, "kernel_args": [ "wine.compatibility_mode=1" ], "ubuntu_compatible": true, "kernel_versions": [ "5.15", "6.1", "6.2" ], "note": "Frogging-Family's Wine compatibility patches" } } }, "hardware": { "description": "Hardware-specific support patches for Ubuntu", "enabled": true, "patches": { "amd-optimizations": { "description": "AMD CPU and GPU optimizations for Ubuntu", "url": "https://github.com/graysky2/kernel_gcc_patch/raw/master/enable_additional_cpu_optimizations_for_gcc_v12.1%2B_kernel_v5.15%2B.patch", "enabled": false, "hardware_requirements": [ "amd_cpu", "amd_gpu" ], "kernel_args": [ "amd.performance_mode=1" ], "ubuntu_compatible": true, "kernel_versions": [ "5.15", "6.1", "6.2" ], "note": "Graysky2's AMD optimization patches" }, "intel-optimizations": { "description": "Intel CPU and GPU optimizations for Ubuntu", "url": "https://github.com/graysky2/kernel_gcc_patch/raw/master/enable_additional_cpu_optimizations_for_gcc_v12.1%2B_kernel_v5.15%2B.patch", "enabled": false, "hardware_requirements": [ "intel_cpu", "intel_gpu" ], "kernel_args": [ "intel.performance_mode=1" ], "ubuntu_compatible": true, "kernel_versions": [ "5.15", "6.1", "6.2" ], "note": "Graysky2's Intel optimization patches" }, "nvidia-optimizations": { "description": "NVIDIA GPU optimizations for Ubuntu", "url": "https://github.com/Frogging-Family/linux-tkg/raw/master/patches/0001-nvidia-optimizations.patch", "enabled": false, "hardware_requirements": [ "nvidia_gpu" ], "kernel_args": [ "nvidia.performance_mode=1", "nvidia.drm.modeset=1" ], "ubuntu_compatible": true, "kernel_versions": [ "5.15", "6.1", "6.2" ], "note": "Frogging-Family's NVIDIA optimization patches" }, "system76": { "description": "System76 hardware support for Ubuntu", "url": "https://github.com/system76/linux/raw/master/patches/0001-system76-ubuntu.patch", "enabled": false, "hardware_requirements": [ "system76_hardware" ], "kernel_args": [ "system76.fan_control=1" ], "ubuntu_compatible": true, "kernel_versions": [ "5.15", "6.1", "6.2" ], "note": "System76's official Ubuntu kernel patches" } } }, "performance": { "description": "General performance optimizations for Ubuntu", "enabled": true, "patches": { "cpu-scheduler": { "description": "CPU scheduler optimizations for Ubuntu", "url": "https://github.com/Frogging-Family/linux-tkg/raw/master/patches/0001-cpu-scheduler-optimizations.patch", "enabled": true, "kernel_args": [ "sched.performance_mode=1" ], "ubuntu_compatible": true, "kernel_versions": [ "5.15", "6.1", "6.2" ], "note": "Frogging-Family's CPU scheduler patches" }, "memory-management": { "description": "Memory management optimizations for Ubuntu", "url": "https://github.com/Frogging-Family/linux-tkg/raw/master/patches/0001-memory-management-optimizations.patch", "enabled": true, "kernel_args": [ "vm.performance_mode=1" ], "ubuntu_compatible": true, "kernel_versions": [ "5.15", "6.1", "6.2" ], "note": "Frogging-Family's memory management patches" }, "io-scheduler": { "description": "I/O scheduler optimizations for Ubuntu", "url": "https://github.com/Frogging-Family/linux-tkg/raw/master/patches/0001-io-scheduler-optimizations.patch", "enabled": true, "kernel_args": [ "elevator=bfq" ], "ubuntu_compatible": true, "kernel_versions": [ "5.15", "6.1", "6.2" ], "note": "Frogging-Family's I/O scheduler patches" }, "network-optimizations": { "description": "Network performance optimizations for Ubuntu", "url": "https://github.com/Frogging-Family/linux-tkg/raw/master/patches/0001-network-optimizations.patch", "enabled": true, "kernel_args": [ "net.core.rmem_max=16777216", "net.core.wmem_max=16777216" ], "ubuntu_compatible": true, "kernel_versions": [ "5.15", "6.1", "6.2" ], "note": "Frogging-Family's network optimization patches" } } }, "security": { "description": "Security and hardening patches for Ubuntu", "enabled": true, "patches": { "security-hardening": { "description": "General security hardening for Ubuntu", "url": "https://github.com/Ubuntu/linux/raw/master/security/0001-security-hardening.patch", "enabled": true, "kernel_args": [ "security.hardening=1" ], "ubuntu_compatible": true, "kernel_versions": [ "5.15", "6.1", "6.2" ], "note": "Ubuntu's official security hardening patches" }, "spectre-meltdown": { "description": "Spectre and Meltdown mitigations for Ubuntu", "url": "https://github.com/Ubuntu/linux/raw/master/security/0001-spectre-meltdown-mitigations.patch", "enabled": true, "kernel_args": [ "spectre_v2=on", "meltdown=on" ], "ubuntu_compatible": true, "kernel_versions": [ "5.15", "6.1", "6.2" ], "note": "Ubuntu's official Spectre/Meltdown mitigation patches" } } }, "compatibility": { "description": "Software compatibility patches for Ubuntu", "enabled": true, "patches": { "wine": { "description": "Wine compatibility improvements for Ubuntu", "url": "https://github.com/Frogging-Family/linux-tkg/raw/master/patches/0001-wine-compatibility.patch", "enabled": true, "kernel_args": [ "wine.compatibility=1" ], "ubuntu_compatible": true, "kernel_versions": [ "5.15", "6.1", "6.2" ], "note": "Frogging-Family's Wine compatibility patches" }, "proton": { "description": "Proton compatibility improvements for Ubuntu", "url": "https://github.com/Frogging-Family/linux-tkg/raw/master/patches/0001-proton-compatibility.patch", "enabled": true, "kernel_args": [ "proton.compatibility=1" ], "ubuntu_compatible": true, "kernel_versions": [ "5.15", "6.1", "6.2" ], "note": "Frogging-Family's Proton compatibility patches" }, "virtualization": { "description": "Virtualization compatibility for Ubuntu", "url": "https://github.com/Ubuntu/linux/raw/master/virtualization/0001-virtualization-compatibility.patch", "enabled": true, "kernel_args": [ "virtualization.compatibility=1" ], "ubuntu_compatible": true, "kernel_versions": [ "5.15", "6.1", "6.2" ], "note": "Ubuntu's official virtualization compatibility patches" } } }, "ubuntu_specific": { "description": "Ubuntu-specific patches and optimizations", "enabled": true, "patches": { "ubuntu-gaming": { "description": "Ubuntu gaming optimizations", "url": "https://github.com/Ubuntu/linux/raw/master/gaming/0001-ubuntu-gaming-optimizations.patch", "enabled": true, "kernel_args": [ "ubuntu.gaming_mode=1" ], "ubuntu_compatible": true, "kernel_versions": [ "5.15", "6.1", "6.2" ], "note": "Ubuntu's official gaming optimization patches" }, "ubuntu-performance": { "description": "Ubuntu performance optimizations", "url": "https://github.com/Ubuntu/linux/raw/master/performance/0001-ubuntu-performance-optimizations.patch", "enabled": true, "kernel_args": [ "ubuntu.performance_mode=1" ], "ubuntu_compatible": true, "kernel_versions": [ "5.15", "6.1", "6.2" ], "note": "Ubuntu's official performance optimization patches" }, "ubuntu-desktop": { "description": "Ubuntu desktop optimizations", "url": "https://github.com/Ubuntu/linux/raw/master/desktop/0001-ubuntu-desktop-optimizations.patch", "enabled": true, "kernel_args": [ "ubuntu.desktop_mode=1" ], "ubuntu_compatible": true, "kernel_versions": [ "5.15", "6.1", "6.2" ], "note": "Ubuntu's official desktop optimization patches" } } } }, "patch_application": { "auto_apply": true, "backup_patches": true, "patch_directory": "/var/lib/particle-os/kernel-patches", "backup_directory": "/var/lib/particle-os/kernel-patches/backup", "log_directory": "/var/log/particle-os/kernel-patches", "ubuntu_specific": { "use_dkms": true, "use_dkpg": true, "patch_format": "diff", "apply_method": "patch -p1" } }, "kernel_variants": { "ubuntu-generic": { "description": "Ubuntu generic kernel with patches", "base_package": "linux-generic", "headers_package": "linux-headers-generic", "patches_enabled": [ "gaming", "performance", "security", "compatibility", "ubuntu_specific" ], "enabled": true, "ubuntu_version": "24.04" }, "ubuntu-generic-hwe": { "description": "Ubuntu generic HWE kernel with patches", "base_package": "linux-generic-hwe-24.04", "headers_package": "linux-headers-generic-hwe-24.04", "patches_enabled": [ "gaming", "performance", "security", "compatibility", "ubuntu_specific" ], "enabled": true, "ubuntu_version": "24.04" }, "ubuntu-lowlatency": { "description": "Ubuntu low latency kernel with patches", "base_package": "linux-lowlatency", "headers_package": "linux-headers-lowlatency", "patches_enabled": [ "gaming", "performance", "security", "compatibility", "ubuntu_specific" ], "enabled": false, "ubuntu_version": "24.04" }, "ubuntu-lowlatency-hwe": { "description": "Ubuntu low latency HWE kernel with patches", "base_package": "linux-lowlatency-hwe-24.04", "headers_package": "linux-headers-lowlatency-hwe-24.04", "patches_enabled": [ "gaming", "performance", "security", "compatibility", "ubuntu_specific" ], "enabled": false, "ubuntu_version": "24.04" } }, "hardware_detection": { "auto_detect_patches": true, "detection_scripts": { "steam_deck": "/usr/local/bin/detect-steam-deck.sh", "handheld_device": "/usr/local/bin/detect-handheld.sh", "amd_cpu": "/usr/local/bin/detect-amd-cpu.sh", "amd_gpu": "/usr/local/bin/detect-amd-gpu.sh", "intel_cpu": "/usr/local/bin/detect-intel-cpu.sh", "intel_gpu": "/usr/local/bin/detect-intel-gpu.sh", "nvidia_gpu": "/usr/local/bin/detect-nvidia-gpu.sh", "system76_hardware": "/usr/local/bin/detect-system76.sh" } }, "ubuntu_integration": { "use_ubuntu_repositories": true, "ubuntu_kernel_sources": "https://git.launchpad.net/~ubuntu-kernel/ubuntu/+source/linux/+git/", "ubuntu_patch_workflow": "https://wiki.ubuntu.com/Kernel/Dev/KernelMaintenance", "ubuntu_kernel_team": "https://launchpad.net/~ubuntu-kernel", "ubuntu_kernel_ppa": "ppa:ubuntu-kernel-team/ppa" }, "patch_notes": { "important": "These patch URLs are examples and may not all exist. In a real implementation, you would need to verify each patch URL and ensure it's compatible with your specific Ubuntu kernel version.", "recommendations": [ "Use Ubuntu's official kernel patches when available", "Verify patch compatibility before applying", "Test patches in a safe environment first", "Keep backups of kernel configuration", "Use DKMS for kernel module patches when possible" ], "real_sources": [ "https://github.com/graysky2/kernel_gcc_patch - Real CPU optimization patches", "https://github.com/Frogging-Family/linux-tkg - Real gaming kernel patches", "https://github.com/ValveSoftware/linux-kernel - Real Steam Deck patches", "https://github.com/linux-surface/linux-surface - Real Surface/handheld patches", "https://github.com/system76/linux - Real System76 patches" ] } } EOF ) # Embedded configuration: maintenance # File size: 189 MAINTENANCE_CONFIG=$(cat << 'EOF' { "retention_days": 30, "keep_recent": 2, "deployments_dir": "/var/lib/particle-os/deployments", "logs_dir": "/var/log/apt-layer", "backups_dir": "/var/lib/particle-os/backups" } EOF ) # Embedded configuration: oci-settings # File size: 193 OCI_SETTINGS_CONFIG=$(cat << 'EOF' { "registry_url": "docker.io/particleos", "allowed_base_images": [ "ubuntu:24.04", "debian:12" ], "authentication": { "username": "particlebot", "password": "examplepassword" } } EOF ) # Embedded configuration: package-management # File size: 180 PACKAGE_MANAGEMENT_CONFIG=$(cat << 'EOF' { "allowed_repositories": [ "main", "universe", "multiverse" ], "dependency_resolution": "strict", "package_pinning": { "firefox": "125.0.1", "steam": "1.0.0.75" } } EOF ) # Embedded configuration: paths # File size: 4.6K PATHS_CONFIG=$(cat << 'EOF' { "apt_layer_paths": { "description": "apt-layer system path configuration", "version": "1.0", "main_directories": { "workspace": { "path": "/var/lib/apt-layer", "description": "Main apt-layer workspace directory", "permissions": "755", "owner": "root:root" }, "logs": { "path": "/var/log/apt-layer", "description": "apt-layer log files directory", "permissions": "755", "owner": "root:root" }, "cache": { "path": "/var/cache/apt-layer", "description": "apt-layer cache directory", "permissions": "755", "owner": "root:root" } }, "workspace_subdirectories": { "build": { "path": "/var/lib/apt-layer/build", "description": "Build artifacts and temporary files", "permissions": "755", "owner": "root:root" }, "live_overlay": { "path": "/var/lib/apt-layer/live-overlay", "description": "Live overlay system for temporary changes", "permissions": "755", "owner": "root:root" }, "composefs": { "path": "/var/lib/apt-layer/composefs", "description": "ComposeFS layers and metadata", "permissions": "755", "owner": "root:root" }, "ostree_commits": { "path": "/var/lib/apt-layer/ostree-commits", "description": "OSTree commit history and metadata", "permissions": "755", "owner": "root:root" }, "deployments": { "path": "/var/lib/apt-layer/deployments", "description": "Deployment directories and state", "permissions": "755", "owner": "root:root" }, "history": { "path": "/var/lib/apt-layer/history", "description": "Deployment history and rollback data", "permissions": "755", "owner": "root:root" }, "bootloader": { "path": "/var/lib/apt-layer/bootloader", "description": "Bootloader state and configuration", "permissions": "755", "owner": "root:root" }, "transaction_state": { "path": "/var/lib/apt-layer/transaction-state", "description": "Transaction state and temporary data", "permissions": "755", "owner": "root:root" } }, "files": { "deployment_db": { "path": "/var/lib/apt-layer/deployments.json", "description": "Deployment database file", "permissions": "644", "owner": "root:root" }, "current_deployment": { "path": "/var/lib/apt-layer/current-deployment", "description": "Current deployment identifier file", "permissions": "644", "owner": "root:root" }, "pending_deployment": { "path": "/var/lib/apt-layer/pending-deployment", "description": "Pending deployment identifier file", "permissions": "644", "owner": "root:root" }, "transaction_log": { "path": "/var/lib/apt-layer/transaction.log", "description": "Transaction log file", "permissions": "644", "owner": "root:root" } }, "fallback_paths": { "description": "Fallback paths for different environments", "wsl": { "workspace": "/mnt/wsl/apt-layer", "logs": "/mnt/wsl/apt-layer/logs", "cache": "/mnt/wsl/apt-layer/cache" }, "container": { "workspace": "/tmp/apt-layer", "logs": "/tmp/apt-layer/logs", "cache": "/tmp/apt-layer/cache" }, "test": { "workspace": "/tmp/apt-layer-test", "logs": "/tmp/apt-layer-test/logs", "cache": "/tmp/apt-layer-test/cache" } }, "environment_variables": { "description": "Environment variable mappings", "APT_LAYER_WORKSPACE": "workspace", "APT_LAYER_LOG_DIR": "logs", "APT_LAYER_CACHE_DIR": "cache", "BUILD_DIR": "workspace_subdirectories.build", "LIVE_OVERLAY_DIR": "workspace_subdirectories.live_overlay", "COMPOSEFS_DIR": "workspace_subdirectories.composefs", "OSTREE_COMMITS_DIR": "workspace_subdirectories.ostree_commits", "DEPLOYMENTS_DIR": "workspace_subdirectories.deployments", "HISTORY_DIR": "workspace_subdirectories.history", "BOOTLOADER_STATE_DIR": "workspace_subdirectories.bootloader", "TRANSACTION_STATE": "workspace_subdirectories.transaction_state", "DEPLOYMENT_DB": "files.deployment_db", "CURRENT_DEPLOYMENT_FILE": "files.current_deployment", "PENDING_DEPLOYMENT_FILE": "files.pending_deployment", "TRANSACTION_LOG": "files.transaction_log" } } } EOF ) # Embedded configuration: security-policy # File size: 197 SECURITY_POLICY_CONFIG=$(cat << 'EOF' { "require_gpg_signature": true, "allowed_packages": [ "firefox", "steam", "vscode" ], "blocked_packages": [ "telnet", "ftp" ], "vulnerability_threshold": "high", "enforce_signature": true } EOF ) # Embedded configuration: signing-policy # File size: 166 SIGNING_POLICY_CONFIG=$(cat << 'EOF' { "allowed_methods": [ "gpg", "sigstore" ], "trusted_keys": [ "key1.gpg", "key2.sigstore" ], "require_signature": true, "revocation_list": [ "revoked-key1.gpg" ] } EOF ) # Embedded configuration: users # File size: 264 USERS_CONFIG=$(cat << 'EOF' { "users": [ { "username": "admin", "role": "admin", "enabled": true }, { "username": "john", "role": "package_manager", "enabled": true }, { "username": "jane", "role": "viewer", "enabled": false } ], "roles": [ "admin", "package_manager", "viewer" ] } EOF ) # ============================================================================ # External Configuration Loading (Future Enhancement) # ============================================================================ # Function to load configuration from external files # Usage: load_config_from_file "config-name" load_config_from_file() { local config_name="$1" local config_file="/etc/apt-layer/config/${config_name}.json" if [[ -f "$config_file" ]]; then jq -r '.' "$config_file" else log_error "Configuration file not found: $config_file" "apt-layer" exit 1 fi } # ============================================================================ # Main Execution # ============================================================================ # Run main function if script is executed directly if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then main "$@" fi