diff --git a/.gitignore b/.gitignore index 23eb9cf..991663d 100644 --- a/.gitignore +++ b/.gitignore @@ -20,7 +20,10 @@ tmp/ *.tmp # AI-generated fix scripts and temporary work +.scratchpad/ scratchpad/ +scratchpad/ + # Compiled scripts (these are generated from source) # Uncomment if you want to exclude compiled scripts diff --git a/OFFICIAL_COMPOSEFS_MILESTONE.md b/OFFICIAL_COMPOSEFS_MILESTONE.md deleted file mode 100644 index 4e24757..0000000 --- a/OFFICIAL_COMPOSEFS_MILESTONE.md +++ /dev/null @@ -1,143 +0,0 @@ -# 🎉 Major Milestone: Official ComposeFS Integration Complete - -**Date**: January 27, 2025 -**Status**: ✅ **COMPLETED** - -## 🎯 What Was Accomplished - -### **Official ComposeFS Tools Integration** -- ✅ **Official ComposeFS Tools Working**: Successfully tested and functional -- ✅ **Automatic Backend Selection**: Particle-OS detects and uses official tools when available -- ✅ **Fallback Support**: Alternative implementation available if needed -- ✅ **Production Ready**: Native C implementation with kernel optimizations - -### **Alternative Implementation Archived** -- ✅ **composefs-alternative.sh ARCHIVED**: Moved to `archive/composefs-alternative.sh` -- ✅ **Archive Notice Created**: `archive/COMPOSEFS_ARCHIVE_NOTICE.md` explains the transition -- ✅ **Documentation Updated**: All documentation reflects official tool usage -- ✅ **Clean Codebase**: Removed redundant implementation from main directory - -## 🚀 Benefits Achieved - -### **Production Readiness** -- **Official Tools**: Uses `mkcomposefs` and `mount.composefs` from upstream -- **Standards Compliance**: Full compliance with official ComposeFS specification -- **Security**: fs-verity support for filesystem integrity verification -- **Performance**: Page cache sharing and EROFS integration - -### **Ecosystem Integration** -- **OSTree Integration**: Better integration with OSTree for atomic updates -- **Podman Support**: Enhanced integration with Podman's ComposeFS support -- **Flatpak Compatibility**: Prepared for future Flatpak ComposeFS support -- **Container Runtime**: Better integration with modern container workflows - -### **Maintenance Benefits** -- **Upstream Maintained**: Official tools maintained by Red Hat and containers community -- **Reduced Maintenance**: No need to maintain custom ComposeFS implementation -- **Bug Fixes**: Automatic benefit from upstream bug fixes and improvements -- **Feature Updates**: Access to new features as they're added upstream - -## 📊 Technical Details - -### **Package Status** -- **Repository**: https://salsa.debian.org/debian/composefs/ -- **Maintainer**: Roland Hieber (rhi@pengutronix.de) -- **Upstream**: https://github.com/containers/composefs -- **License**: BSD 2-Clause "Simplified" License -- **Status**: ⏳ **READY FOR UPLOAD - AWAITING SPONSORSHIP** (Debian Bug #1064457) - -### **Integration Features** -- **Automatic Detection**: Particle-OS automatically detects official tools -- **Graceful Fallback**: Falls back to alternative implementation if needed -- **Source Installation**: `--official-install` command for source builds -- **Package Installation**: Will support `sudo apt install composefs-tools` when available - -### **Usage Examples** -```bash -# Install official tools (when available) -sudo apt install composefs-tools - -# Or install from source -composefs-alternative.sh --official-install - -# Check status -composefs-alternative.sh official-status - -# Use official tools automatically -composefs-alternative.sh create my-image /path/to/base -composefs-alternative.sh mount my-image /mnt/point -``` - -## 🔄 Migration Path - -### **For Users** -1. **Automatic**: Particle-OS automatically detects and uses official tools -2. **Manual Installation**: Install official tools when available in repositories -3. **Source Build**: Use `--official-install` for immediate access -4. **Fallback**: Alternative implementation remains available if needed - -### **For Developers** -1. **Updated Documentation**: All docs reflect official tool usage -2. **Archived Implementation**: Alternative implementation preserved in archive -3. **Testing**: Official tools tested and working -4. **Future Development**: Focus on official tool integration and enhancements - -## 📈 Impact on Particle-OS - -### **Architecture Validation** -- **Approach Confirmed**: Official ComposeFS integration validates Particle-OS architecture -- **Standards Compliance**: Full compliance with official ComposeFS specification -- **Ecosystem Alignment**: Better alignment with container ecosystem standards -- **Future Proofing**: Positioned for future ComposeFS developments - -### **User Experience** -- **Simplified**: Users get official, production-ready tools -- **Reliable**: Official tools are well-tested and maintained -- **Compatible**: Better compatibility with other ComposeFS tools -- **Secure**: Enhanced security with fs-verity support - -### **Development Focus** -- **Reduced Maintenance**: Less time maintaining custom implementation -- **Enhanced Features**: Access to official tool features and improvements -- **Community Alignment**: Better alignment with container community -- **Standards Compliance**: Full compliance with official specifications - -## 🎯 Next Steps - -### **Immediate (Completed)** -- ✅ Archive alternative implementation -- ✅ Update documentation -- ✅ Test official tools integration -- ✅ Create archive notice - -### **Short Term** -- [ ] Test full integration workflow -- [ ] Update dependency checking for package availability -- [ ] Performance benchmarking -- [ ] User documentation updates - -### **Medium Term** -- [ ] Package integration when available in repositories -- [ ] Enhanced OSTree integration -- [ ] Podman integration testing -- [ ] Performance optimization - -### **Long Term** -- [ ] Flatpak integration -- [ ] Cloud deployment optimization -- [ ] Advanced features integration -- [ ] Community adoption - -## 🏆 Conclusion - -This milestone represents a **major achievement** for Particle-OS: - -1. **Production Readiness**: Official ComposeFS tools provide production-ready functionality -2. **Standards Compliance**: Full compliance with official ComposeFS specification -3. **Ecosystem Integration**: Better integration with container ecosystem -4. **Maintenance Reduction**: Reduced maintenance burden with upstream tools -5. **Future Proofing**: Positioned for future ComposeFS developments - -The successful integration of official ComposeFS tools **validates Particle-OS's approach** and positions it as a **serious contender** in the immutable Ubuntu ecosystem. The archiving of the alternative implementation demonstrates **maturity and focus** on production-ready solutions. - -**Particle-OS is now ready for production use with official ComposeFS tools!** 🚀 \ No newline at end of file diff --git a/SCRIPT_INVENTORY.md b/SCRIPT_INVENTORY.md index cc3e552..77dfe6e 100644 --- a/SCRIPT_INVENTORY.md +++ b/SCRIPT_INVENTORY.md @@ -5,8 +5,8 @@ This document catalogs all scripts in the tools directory and their purposes. ## Core Scripts (KEEP) ### Main Tools -- **apt-layer.sh** - Main apt-layer tool. Mimicks rpm-ostree but for deb packages. (compiled from scriptlets) -- **composefs-alternative.sh** - ComposeFS management tool (compiled from scriptlets) +- **apt-layer.sh** - Main apt-layer tool. Mimicks rpm-ostree but for deb packages. (compiled from scriptlets, now supports atomic OSTree commits and robust overlay/dpkg install with official ComposeFS tools) +- **composefs-alternative.sh** - ComposeFS management tool (archived; official ComposeFS tools are now default) - **bootc-alternative.sh** - BootC management tool (compiled from scriptlets) - **bootupd-alternative.sh** - BootUpd management tool (compiled from scriptlets) - **../orchestrator/orchestrator.sh** - Main orchestrator for all tools (moved to orchestrator directory) @@ -37,42 +37,9 @@ This document catalogs all scripts in the tools directory and their purposes. - **compile-windows.bat** - Windows batch compilation script - **compile-windows.ps1** - Windows PowerShell compilation script -## Redundant Fix Scripts (MOVE TO ARCHIVE) +## Redundant Fix Scripts (ARCHIVED) -These scripts were created during development to fix specific issues but are now redundant: - -### Permission Fixes -- **fix-system-permissions.sh** - Fixed system permissions (redundant) -- **fix-apt-layer-permissions.sh** - Fixed apt-layer permissions (redundant) -- **fix-apt-layer-permissions-final.sh** - Final apt-layer permission fix (redundant) -- **fix-permissions-complete.sh** - Complete permission fix (redundant) - -### Function Fixes -- **fix-missing-functions.sh** - Fixed missing functions (redundant) -- **fix-remaining-tools.sh** - Fixed remaining tools (redundant) -- **fix-all-particle-tools.sh** - Fixed all tools (redundant) - -### Configuration Fixes -- **fix-config.sh** - Fixed configuration (redundant) -- **fix-config-better.sh** - Better configuration fix (redundant) -- **create-clean-config.sh** - Created clean config (redundant) -- **restore-config.sh** - Restored configuration (redundant) -- **setup-directories.sh** - Setup directories (redundant) - -### Help Fixes -- **fix-help-syntax.sh** - Fixed help syntax (redundant) -- **final-help-fix.sh** - Final help fix (redundant) -- **comprehensive-fix.sh** - Comprehensive fix (redundant) - -### Quick Fixes -- **quick-fix-particle-os.sh** - Quick fix (redundant) - -### Testing Scripts -- **test-source-logging.sh** - Test source logging (redundant) -- **test-source-logging-fixed.sh** - Test fixed source logging (redundant) -- **test-logging-functions.sh** - Test logging functions (redundant) -- **test-line-endings.sh** - Test line endings (redundant) -- **dos2unix.sh** - Convert line endings (redundant) +All fix and test scripts have been moved to archive/ for historical reference. The workspace is now clean and only contains essential scripts for development and deployment. ## Source Code (KEEP) @@ -95,26 +62,7 @@ These scripts were created during development to fix specific issues but are now ## Archive (ALREADY ARCHIVED) The archive directory contains: -- Old test scripts +- All old test scripts and fix scripts - Previous versions of tools - Deprecated integration scripts -- Backup files - -## Cleanup Actions Required - -1. **Move redundant fix scripts to archive/** -2. **Update documentation to reflect current state** -3. **Remove references to archived scripts from documentation** -4. **Keep only the essential scripts for development and deployment** - -## Essential Scripts for Development - -For development work, you only need: -- Source scriptlets in `src/` directories -- Compilation scripts in each `src/` directory -- Main compiled tools (apt-layer.sh, etc.) -- Installation scripts -- Testing scripts -- Documentation - -All fix scripts can be safely archived as their fixes have been incorporated into the source scriptlets. \ No newline at end of file +- Backup files \ No newline at end of file diff --git a/TESTING_GUIDE.md b/TESTING_GUIDE.md index 68becd9..0deaa21 100644 --- a/TESTING_GUIDE.md +++ b/TESTING_GUIDE.md @@ -148,8 +148,15 @@ sudo ./install-particle-os.sh # Test apt-layer package management apt-layer install-packages curl wget -# Test composefs image creation -composefs-alternative create test-image /tmp/test-source +# Test atomic OSTree workflow +apt-layer ostree compose install curl wget +apt-layer ostree log +apt-layer ostree status +apt-layer ostree rollback + +# Test ComposeFS image creation (official tools) +mkcomposefs testdir test.cfs +composefs-fuse test.cfs /mnt/test-cfs # Test bootc image building bootc-alternative build test-image @@ -180,6 +187,21 @@ time composefs-alternative create large-image /large-source time particle-orchestrator deploy-image test-image ``` +## Overlay/dpkg Install Workflow + +# Download .deb files on host +sudo apt-get install --download-only htop + +# Copy .deb files to overlay +sudo cp /var/cache/apt/archives/*.deb /var/lib/particle-os/live-overlay/mount/tmp/packages/ + +# Install in overlay with dpkg +sudo chroot /var/lib/particle-os/live-overlay/mount dpkg -i /tmp/packages/*.deb + +# Clean up before commit +sudo rm -rf /var/lib/particle-os/live-overlay/mount/tmp/packages/ +sudo rm -rf /var/lib/particle-os/live-overlay/mount/var/cache/apt/archives/* + ## Troubleshooting ### Debug Mode diff --git a/apt-layer.sh b/apt-layer.sh index 889a55b..a75f2e2 100644 --- a/apt-layer.sh +++ b/apt-layer.sh @@ -6,7 +6,7 @@ # DO NOT modify this file directly as it will be overwritten # # # # Particle-OS apt-layer Tool # -# Generated on: 2025-07-14 00:46:49 # +# Generated on: 2025-07-14 01:44:01 # # # ################################################################################################################ @@ -2172,6 +2172,535 @@ container_status() { # --- END OF SCRIPTLET: 04-container.sh --- +# ============================================================================ +# Live Overlay System (rpm-ostree style) +# ============================================================================ + +# Ubuntu uBlue apt-layer Live Overlay System +# Implements live system layering similar to rpm-ostree +# Uses overlayfs for live package installation and management + +# ============================================================================= +# LIVE OVERLAY SYSTEM FUNCTIONS +# ============================================================================= + +# Live overlay state file (with fallbacks for when particle-config.sh is not loaded) +LIVE_OVERLAY_STATE_FILE="${UBLUE_ROOT:-/var/lib/particle-os}/live-overlay.state" +LIVE_OVERLAY_MOUNT_POINT="${UBLUE_ROOT:-/var/lib/particle-os}/live-overlay/mount" +LIVE_OVERLAY_PACKAGE_LOG="${UBLUE_LOG_DIR:-/var/log/particle-os}/live-overlay-packages.log" + +# Initialize live overlay system +init_live_overlay_system() { + log_info "Initializing live overlay system" "apt-layer" + + # Create live overlay directories + mkdir -p "${UBLUE_LIVE_OVERLAY_DIR:-/var/lib/particle-os/live-overlay}" "${UBLUE_LIVE_UPPER_DIR:-/var/lib/particle-os/live-overlay/upper}" "${UBLUE_LIVE_WORK_DIR:-/var/lib/particle-os/live-overlay/work}" + mkdir -p "$LIVE_OVERLAY_MOUNT_POINT" + + # Set proper permissions + chmod 755 "${UBLUE_LIVE_OVERLAY_DIR:-/var/lib/particle-os/live-overlay}" + chmod 700 "${UBLUE_LIVE_UPPER_DIR:-/var/lib/particle-os/live-overlay/upper}" "${UBLUE_LIVE_WORK_DIR:-/var/lib/particle-os/live-overlay/work}" + + # Initialize package log if it doesn't exist + if [[ ! -f "$LIVE_OVERLAY_PACKAGE_LOG" ]]; then + touch "$LIVE_OVERLAY_PACKAGE_LOG" + chmod 644 "$LIVE_OVERLAY_PACKAGE_LOG" + fi + + # Conditional DNS fix for chroot overlay (WSL, etc) + if [[ -d "$LIVE_OVERLAY_MOUNT_POINT" ]]; then + if ! chroot "$LIVE_OVERLAY_MOUNT_POINT" getent hosts archive.ubuntu.com >/dev/null 2>&1; then + log_warning "DNS resolution failed in overlay. Injecting public DNS servers..." "apt-layer" + # Backup original resolv.conf if present + if [[ -f "$LIVE_OVERLAY_MOUNT_POINT/etc/resolv.conf" ]]; then + cp "$LIVE_OVERLAY_MOUNT_POINT/etc/resolv.conf" "$LIVE_OVERLAY_MOUNT_POINT/etc/resolv.conf.aptlayerbak" + fi + echo "nameserver 8.8.8.8" > "$LIVE_OVERLAY_MOUNT_POINT/etc/resolv.conf" + echo "nameserver 1.1.1.1" >> "$LIVE_OVERLAY_MOUNT_POINT/etc/resolv.conf" + chmod 644 "$LIVE_OVERLAY_MOUNT_POINT/etc/resolv.conf" + touch "$LIVE_OVERLAY_MOUNT_POINT/.dns_fixed_by_apt_layer" + log_success "DNS configuration applied to overlay" "apt-layer" + else + log_info "DNS resolution in overlay is working. No changes made." "apt-layer" + fi + fi + + log_success "Live overlay system initialized" "apt-layer" +} + +# Check if live overlay is active +is_live_overlay_active() { + if [[ -f "$LIVE_OVERLAY_STATE_FILE" ]]; then + local state + state=$(cat "$LIVE_OVERLAY_STATE_FILE" 2>/dev/null || echo "") + [[ "$state" == "active" ]] + else + false + fi +} + +# Check if system supports live overlay +check_live_overlay_support() { + local errors=0 + local test_dir="/tmp/overlay-test-$$" + local test_lower="$test_dir/lower" + local test_upper="$test_dir/upper" + local test_work="$test_dir/work" + local test_mount="$test_dir/mount" + + # Check for overlay module + if ! modprobe -n overlay >/dev/null 2>&1; then + log_error "Overlay module not available" "apt-layer" + errors=$((errors + 1)) + fi + + # Create test directories + mkdir -p "$test_lower" "$test_upper" "$test_work" "$test_mount" 2>/dev/null + + # Check for overlayfs mount support + if ! mount -t overlay overlay -o "lowerdir=$test_lower,upperdir=$test_upper,workdir=$test_work" "$test_mount" 2>/dev/null; then + log_error "Overlayfs mount not supported" "apt-layer" + errors=$((errors + 1)) + else + umount "$test_mount" 2>/dev/null + fi + + # Cleanup test directories + rm -rf "$test_dir" 2>/dev/null + + # Check for read-only root filesystem + if ! is_root_readonly; then + log_warning "Root filesystem is not read-only - live overlay may not be necessary" "apt-layer" + fi + + if [[ $errors -gt 0 ]]; then + return 1 + fi + + return 0 +} + +# Check if root filesystem is read-only +is_root_readonly() { + local root_mount + root_mount=$(findmnt -n -o OPTIONS / | grep -o "ro" || echo "") + [[ -n "$root_mount" ]] +} + +# Start live overlay +start_live_overlay() { + log_info "Starting live overlay system" "apt-layer" + + # Check if already active + if is_live_overlay_active; then + log_warning "Live overlay is already active" "apt-layer" + return 0 + fi + + # Check system support + if ! check_live_overlay_support; then + log_error "System does not support live overlay" "apt-layer" + return 1 + fi + + # Initialize system + init_live_overlay_system + + # Create overlay mount + log_info "Creating overlay mount" "apt-layer" + if mount -t overlay overlay -o "lowerdir=/,upperdir=${UBLUE_LIVE_UPPER_DIR:-/var/lib/particle-os/live-overlay/upper},workdir=${UBLUE_LIVE_WORK_DIR:-/var/lib/particle-os/live-overlay/work}" "$LIVE_OVERLAY_MOUNT_POINT"; then + log_success "Overlay mount created successfully" "apt-layer" + + # Mark overlay as active + echo "active" > "$LIVE_OVERLAY_STATE_FILE" + + log_success "Live overlay started successfully" "apt-layer" + log_info "Changes will be applied to overlay and can be committed or rolled back" "apt-layer" + + return 0 + else + log_error "Failed to create overlay mount" "apt-layer" + return 1 + fi +} + +# Stop live overlay +stop_live_overlay() { + log_info "Stopping live overlay system" "apt-layer" + + # Check if overlay is active + if ! is_live_overlay_active; then + log_warning "Live overlay is not active" "apt-layer" + return 0 + fi + + # Check for active processes + if check_active_processes; then + log_warning "Active processes detected - overlay will persist until processes complete" "apt-layer" + return 0 + fi + + # Undo DNS fix if we applied it + if [[ -f "$LIVE_OVERLAY_MOUNT_POINT/.dns_fixed_by_apt_layer" ]]; then + if [[ -f "$LIVE_OVERLAY_MOUNT_POINT/etc/resolv.conf.aptlayerbak" ]]; then + mv "$LIVE_OVERLAY_MOUNT_POINT/etc/resolv.conf.aptlayerbak" "$LIVE_OVERLAY_MOUNT_POINT/etc/resolv.conf" + else + rm -f "$LIVE_OVERLAY_MOUNT_POINT/etc/resolv.conf" + fi + rm -f "$LIVE_OVERLAY_MOUNT_POINT/.dns_fixed_by_apt_layer" + log_info "DNS fix by apt-layer undone on overlay stop" "apt-layer" + fi + + # Unmount overlay + log_info "Unmounting overlay" "apt-layer" + if umount "$LIVE_OVERLAY_MOUNT_POINT"; then + log_success "Overlay unmounted successfully" "apt-layer" + + # Remove state file + rm -f "$LIVE_OVERLAY_STATE_FILE" + + log_success "Live overlay stopped successfully" "apt-layer" + return 0 + else + log_error "Failed to unmount overlay" "apt-layer" + return 1 + fi +} + +# Check for active processes that might prevent unmounting +check_active_processes() { + # Check for package manager processes + if pgrep -f "apt|dpkg|apt-get" >/dev/null 2>&1; then + return 0 + fi + + # Check for processes using the overlay mount + if lsof "$LIVE_OVERLAY_MOUNT_POINT" >/dev/null 2>&1; then + return 0 + fi + + return 1 +} + +# Get live overlay status +get_live_overlay_status() { + echo "=== Live Overlay Status ===" + + if is_live_overlay_active; then + log_success "� Live overlay is ACTIVE" "apt-layer" + + # Show mount details + if mountpoint -q "$LIVE_OVERLAY_MOUNT_POINT"; then + log_info "Overlay mount point: $LIVE_OVERLAY_MOUNT_POINT" "apt-layer" + + # Show overlay usage + if [[ -d "${UBLUE_LIVE_UPPER_DIR:-/var/lib/particle-os/live-overlay/upper}" ]]; then + local usage=$(du -sh "${UBLUE_LIVE_UPPER_DIR:-/var/lib/particle-os/live-overlay/upper}" 2>/dev/null | cut -f1 || echo "unknown") + log_info "Overlay usage: $usage" "apt-layer" + fi + + # Show installed packages + if [[ -f "$LIVE_OVERLAY_PACKAGE_LOG" ]]; then + local package_count=$(wc -l < "$LIVE_OVERLAY_PACKAGE_LOG" 2>/dev/null || echo "0") + log_info "Packages installed in overlay: $package_count" "apt-layer" + fi + else + log_warning "�� Overlay mount point not mounted" "apt-layer" + fi + + # Check for active processes + if check_active_processes; then + log_warning "�� Active processes detected - overlay cannot be stopped" "apt-layer" + fi + else + log_info "� Live overlay is not active" "apt-layer" + + # Check if system supports live overlay + if check_live_overlay_support >/dev/null 2>&1; then + log_info "� System supports live overlay" "apt-layer" + log_info "Use '--live-overlay start' to start live overlay" "apt-layer" + else + log_warning "�� System does not support live overlay" "apt-layer" + fi + fi + + echo "" +} + +# Install packages in live overlay +live_install() { + local packages=("$@") + + log_info "Installing packages in live overlay: ${packages[*]}" "apt-layer" + + # Check if overlay is active + if ! is_live_overlay_active; then + log_error "Live overlay is not active" "apt-layer" + log_info "Use '--live-overlay start' to start live overlay first" "apt-layer" + return 1 + fi + + # Check for root privileges + if [[ $EUID -ne 0 ]]; then + log_error "Root privileges required for live installation" "apt-layer" + return 1 + fi + + # Update package lists in overlay + log_info "Updating package lists in overlay" "apt-layer" + if ! chroot "$LIVE_OVERLAY_MOUNT_POINT" apt-get update; then + log_error "Failed to update package lists" "apt-layer" + log_warning "Network or DNS error? For offline or WSL overlays, use: apt-layer --live-dpkg <.deb files>" "apt-layer" + return 1 + fi + + # Install packages in overlay + log_info "Installing packages in overlay" "apt-layer" + if chroot "$LIVE_OVERLAY_MOUNT_POINT" apt-get install -y "${packages[@]}"; then + log_success "Packages installed successfully in overlay" "apt-layer" + + # Log installed packages + for package in "${packages[@]}"; do + echo "$(date '+%Y-%m-%d %H:%M:%S') - INSTALLED: $package" >> "$LIVE_OVERLAY_PACKAGE_LOG" + done + + log_info "Changes are applied to overlay and can be committed or rolled back" "apt-layer" + return 0 + else + log_error "Failed to install packages in overlay" "apt-layer" + log_warning "If this is a network or DNS issue, try: apt-layer --live-dpkg <.deb files>" "apt-layer" + return 1 + fi +} + +# Manage live overlay +manage_live_overlay() { + local action="$1" + shift + local options=("$@") + + case "$action" in + "start") + start_live_overlay + ;; + "stop") + stop_live_overlay + ;; + "status") + get_live_overlay_status + ;; + "commit") + local message="${options[0]:-Live overlay changes}" + commit_live_overlay "$message" + ;; + "rollback") + rollback_live_overlay + ;; + "list") + list_live_overlay_packages + ;; + "clean") + clean_live_overlay + ;; + *) + log_error "Unknown live overlay action: $action" "apt-layer" + log_info "Valid actions: start, stop, status, commit, rollback, list, clean" "apt-layer" + return 1 + ;; + esac +} + +# Commit live overlay changes +commit_live_overlay() { + local message="$1" + + log_info "Committing live overlay changes: $message" "apt-layer" + + # Check if overlay is active + if ! is_live_overlay_active; then + log_error "Live overlay is not active" "apt-layer" + return 1 + fi + + # Check if there are changes to commit + if ! has_overlay_changes; then + log_warning "No changes to commit" "apt-layer" + return 0 + fi + + # Create new ComposeFS layer from overlay changes + local timestamp=$(date '+%Y%m%d_%H%M%S') + local layer_name="live-overlay-commit-${timestamp}" + + log_info "Creating new layer: $layer_name" "apt-layer" + + # Create layer from overlay changes + if create_layer_from_overlay "$layer_name" "$message"; then + log_success "Live overlay changes committed as layer: $layer_name" "apt-layer" + + # Clean up overlay + clean_live_overlay + + return 0 + else + log_error "Failed to commit live overlay changes" "apt-layer" + return 1 + fi +} + +# Check if overlay has changes +has_overlay_changes() { + if [[ -d "${UBLUE_LIVE_UPPER_DIR:-/var/lib/particle-os/live-overlay/upper}" ]]; then + # Check if upper directory has any content + if [[ -n "$(find "${UBLUE_LIVE_UPPER_DIR:-/var/lib/particle-os/live-overlay/upper}" -mindepth 1 -maxdepth 1 2>/dev/null)" ]]; then + return 0 + fi + fi + + return 1 +} + +# Create layer from overlay changes +create_layer_from_overlay() { + local layer_name="$1" + local message="$2" + + # Create temporary directory for layer + local temp_layer_dir="${UBLUE_TEMP_DIR:-/var/lib/particle-os/temp}/live-layer-${layer_name}" + mkdir -p "$temp_layer_dir" + + # Copy overlay changes to temporary directory + log_info "Copying overlay changes to temporary layer" "apt-layer" + if ! cp -a "${UBLUE_LIVE_UPPER_DIR:-/var/lib/particle-os/live-overlay/upper}"/* "$temp_layer_dir/" 2>/dev/null; then + log_error "Failed to copy overlay changes" "apt-layer" + rm -rf "$temp_layer_dir" + return 1 + fi + + # Create ComposeFS layer + log_info "Creating ComposeFS layer from overlay changes" "apt-layer" + if ! create_composefs_layer "$temp_layer_dir" "$layer_name" "$message"; then + log_error "Failed to create ComposeFS layer" "apt-layer" + rm -rf "$temp_layer_dir" + return 1 + fi + + # Clean up temporary directory + rm -rf "$temp_layer_dir" + + return 0 +} + +# Create ComposeFS layer from directory +create_composefs_layer() { + local source_dir="$1" + local layer_name="$2" + local message="$3" + + # Use composefs-alternative to create layer + if command -v composefs-alternative >/dev/null 2>&1; then + if composefs-alternative create-layer "$source_dir" "$layer_name" "$message"; then + return 0 + fi + fi + + # Fallback: create simple squashfs layer + local layer_file="${UBLUE_BUILD_DIR:-/var/lib/particle-os/build}/${layer_name}.squashfs" + mkdir -p "$(dirname "$layer_file")" + + if mksquashfs "$source_dir" "$layer_file" -comp "${UBLUE_SQUASHFS_COMPRESSION:-xz}" -b "${UBLUE_SQUASHFS_BLOCK_SIZE:-1M}"; then + log_success "Created squashfs layer: $layer_file" "apt-layer" + return 0 + else + log_error "Failed to create squashfs layer" "apt-layer" + return 1 + fi +} + +# Rollback live overlay changes +rollback_live_overlay() { + log_info "Rolling back live overlay changes" "apt-layer" + + # Check if overlay is active + if ! is_live_overlay_active; then + log_error "Live overlay is not active" "apt-layer" + return 1 + fi + + # Stop overlay (this will discard changes) + if stop_live_overlay; then + log_success "Live overlay changes rolled back successfully" "apt-layer" + return 0 + else + log_error "Failed to rollback live overlay changes" "apt-layer" + return 1 + fi +} + +# List packages installed in live overlay +list_live_overlay_packages() { + log_info "Listing packages installed in live overlay" "apt-layer" + + if [[ -f "$LIVE_OVERLAY_PACKAGE_LOG" ]]; then + if [[ -s "$LIVE_OVERLAY_PACKAGE_LOG" ]]; then + echo "=== Packages Installed in Live Overlay ===" + cat "$LIVE_OVERLAY_PACKAGE_LOG" + echo "" + else + log_info "No packages installed in live overlay" "apt-layer" + fi + else + log_info "No package log found" "apt-layer" + fi +} + +# Clean live overlay +clean_live_overlay() { + log_info "Cleaning live overlay" "apt-layer" + + # Stop overlay if active + if is_live_overlay_active; then + stop_live_overlay + fi + + # Clean up overlay directories + rm -rf "${UBLUE_LIVE_UPPER_DIR:-/var/lib/particle-os/live-overlay/upper}"/* "${UBLUE_LIVE_WORK_DIR:-/var/lib/particle-os/live-overlay/work}"/* 2>/dev/null + + # Clean up package log + rm -f "$LIVE_OVERLAY_PACKAGE_LOG" + + # Remove state file + rm -f "$LIVE_OVERLAY_STATE_FILE" + + log_success "Live overlay cleaned successfully" "apt-layer" +} + +# ============================================================================= +# INTEGRATION FUNCTIONS +# ============================================================================= + +# Initialize live overlay system on script startup +init_live_overlay_on_startup() { + # Only initialize if not already done + if [[ ! -d "${UBLUE_LIVE_OVERLAY_DIR:-/var/lib/particle-os/live-overlay}" ]]; then + init_live_overlay_system + fi +} + +# Cleanup live overlay on script exit +cleanup_live_overlay_on_exit() { + # Only cleanup if overlay is active and no processes are using it + if is_live_overlay_active && ! check_active_processes; then + log_info "Cleaning up live overlay on exit" "apt-layer" + stop_live_overlay + fi +} + +# Register cleanup function +trap cleanup_live_overlay_on_exit EXIT + +# --- END OF SCRIPTLET: 05-live-overlay.sh --- + # ============================================================================ # OCI Export/Import Integration # ============================================================================ @@ -2743,1384 +3272,6 @@ oci_status() { # --- END OF SCRIPTLET: 06-oci-integration.sh --- -# ============================================================================ -# Atomic Deployment System -# ============================================================================ -# Atomic deployment system for Ubuntu uBlue apt-layer Tool -# Implements commit-based state management and true system upgrades (not package upgrades) - -# Atomic deployment state management -DEPLOYMENT_DB="/var/lib/particle-os/deployments.json" -CURRENT_DEPLOYMENT_FILE="/var/lib/particle-os/current-deployment" -PENDING_DEPLOYMENT_FILE="/var/lib/particle-os/pending-deployment" -DEPLOYMENT_HISTORY_DIR="/var/lib/particle-os/history" - -# Initialize deployment database -init_deployment_db() { - log_info "Initializing atomic deployment database..." "apt-layer" - - # Ensure directories exist with proper permissions - mkdir -p "$DEPLOYMENT_HISTORY_DIR" 2>/dev/null || { - log_error "Failed to create deployment history directory: $DEPLOYMENT_HISTORY_DIR" "apt-layer" - return 1 - } - - # Create deployment database if it doesn't exist - if [[ ! -f "$DEPLOYMENT_DB" ]]; then - cat > "$DEPLOYMENT_DB" << 'EOF' -{ - "deployments": {}, - "current_deployment": null, - "pending_deployment": null, - "deployment_counter": 0, - "created": "$(date -u +%Y-%m-%dT%H:%M:%SZ)" -} -EOF - if [[ $? -eq 0 ]]; then - log_success "Deployment database initialized" "apt-layer" - else - log_error "Failed to create deployment database: $DEPLOYMENT_DB" "apt-layer" - return 1 - fi - fi - - # Ensure deployment files exist with proper error handling - touch "$CURRENT_DEPLOYMENT_FILE" 2>/dev/null || { - log_warning "Failed to create current deployment file, attempting with sudo..." "apt-layer" - sudo touch "$CURRENT_DEPLOYMENT_FILE" 2>/dev/null || { - log_error "Failed to create current deployment file: $CURRENT_DEPLOYMENT_FILE" "apt-layer" - return 1 - } - } - - touch "$PENDING_DEPLOYMENT_FILE" 2>/dev/null || { - log_warning "Failed to create pending deployment file, attempting with sudo..." "apt-layer" - sudo touch "$PENDING_DEPLOYMENT_FILE" 2>/dev/null || { - log_error "Failed to create pending deployment file: $PENDING_DEPLOYMENT_FILE" "apt-layer" - return 1 - } - } - - log_success "Deployment database initialization completed" "apt-layer" -} - -# Create a new deployment commit -create_deployment_commit() { - local base_image="$1" - local layers=("${@:2}") - local commit_message="${COMMIT_MESSAGE:-System update}" - - local commit_id="commit-$(date +%Y%m%d-%H%M%S)-$$" - local commit_data - - log_info "Creating deployment commit: $commit_id" "apt-layer" - - # Create commit metadata with proper variable expansion - local layers_json="[" - for i in "${!layers[@]}"; do - if [[ $i -gt 0 ]]; then - layers_json+="," - fi - layers_json+="\"${layers[$i]}\"" - done - layers_json+="]" - - commit_data=$(cat << EOF -{ - "commit_id": "$commit_id", - "base_image": "$base_image", - "layers": $layers_json, - "commit_message": "$commit_message", - "created": "$(date -u +%Y-%m-%dT%H:%M:%SZ)", - "parent_commit": "$(get_current_deployment)", - "composefs_image": "${commit_id}.composefs" -} -EOF -) - - # Add to deployment database - jq --arg commit_id "$commit_id" \ - --arg base_image "$base_image" \ - --arg layers_json "$layers_json" \ - --arg commit_message "$commit_message" \ - --arg created "$(date -u +%Y-%m-%dT%H:%M:%SZ)" \ - --arg parent_commit "$(get_current_deployment)" \ - --arg composefs_image "${commit_id}.composefs" \ - '.deployments[$commit_id] = { - "commit_id": $commit_id, - "base_image": $base_image, - "layers": ($layers_json | fromjson), - "commit_message": $commit_message, - "created": $created, - "parent_commit": $parent_commit, - "composefs_image": $composefs_image - } | .deployment_counter += 1' \ - "$DEPLOYMENT_DB" > "${DEPLOYMENT_DB}.tmp" && mv "${DEPLOYMENT_DB}.tmp" "$DEPLOYMENT_DB" - - # Create deployment history file - echo "$commit_data" > "$DEPLOYMENT_HISTORY_DIR/$commit_id.json" - - log_success "Deployment commit created: $commit_id" "apt-layer" - echo "$commit_id" -} - -# Get current deployment -get_current_deployment() { - if [[ -f "$CURRENT_DEPLOYMENT_FILE" ]]; then - cat "$CURRENT_DEPLOYMENT_FILE" 2>/dev/null || echo "" - else - echo "" - fi -} - -# Get pending deployment -get_pending_deployment() { - if [[ -f "$PENDING_DEPLOYMENT_FILE" ]]; then - cat "$PENDING_DEPLOYMENT_FILE" 2>/dev/null || echo "" - else - echo "" - fi -} - -# Set current deployment -set_current_deployment() { - local commit_id="$1" - echo "$commit_id" > "$CURRENT_DEPLOYMENT_FILE" - - # Update deployment database - jq --arg commit_id "$commit_id" '.current_deployment = $commit_id' \ - "$DEPLOYMENT_DB" > "${DEPLOYMENT_DB}.tmp" && mv "${DEPLOYMENT_DB}.tmp" "$DEPLOYMENT_DB" - - log_info "Current deployment set to: $commit_id" "apt-layer" -} - -# Set pending deployment -set_pending_deployment() { - local commit_id="$1" - echo "$commit_id" > "$PENDING_DEPLOYMENT_FILE" - - # Update deployment database - jq --arg commit_id "$commit_id" '.pending_deployment = $commit_id' \ - "$DEPLOYMENT_DB" > "${DEPLOYMENT_DB}.tmp" && mv "${DEPLOYMENT_DB}.tmp" "$DEPLOYMENT_DB" - - log_info "Pending deployment set to: $commit_id" "apt-layer" -} - -# Clear pending deployment -clear_pending_deployment() { - echo "" > "$PENDING_DEPLOYMENT_FILE" - - # Update deployment database - jq '.pending_deployment = null' \ - "$DEPLOYMENT_DB" > "${DEPLOYMENT_DB}.tmp" && mv "${DEPLOYMENT_DB}.tmp" "$DEPLOYMENT_DB" - - log_info "Pending deployment cleared" "apt-layer" -} - -# Atomic deployment function -atomic_deploy() { - local commit_id="$1" - local deployment_dir="/var/lib/particle-os/deployments/${commit_id}" - local boot_dir="/boot/loader/entries" - - log_info "Performing atomic deployment: $commit_id" "apt-layer" - - # Validate commit exists - if ! jq -e ".deployments[\"$commit_id\"]" "$DEPLOYMENT_DB" >/dev/null 2>&1; then - log_error "Commit not found: $commit_id" "apt-layer" - return 1 - fi - - # Get commit data - local commit_data - commit_data=$(jq -r ".deployments[\"$commit_id\"]" "$DEPLOYMENT_DB") - local composefs_image - composefs_image=$(echo "$commit_data" | jq -r '.composefs_image') - - # Create deployment directory - mkdir -p "$deployment_dir" - - # Mount the ComposeFS image - if ! composefs_mount "$composefs_image" "$deployment_dir"; then - log_error "Failed to mount ComposeFS image for deployment" "apt-layer" - return 1 - fi - - # Apply kernel arguments to deployment - apply_kernel_args_to_deployment "$commit_id" - - # Create bootloader entry - create_bootloader_entry "$commit_id" "$deployment_dir" - - # Set as pending deployment (will activate on next boot) - set_pending_deployment "$commit_id" - - log_success "Atomic deployment prepared: $commit_id" "apt-layer" - log_info "Reboot to activate deployment" "apt-layer" - return 0 -} - -# True system upgrade (not package upgrade) -system_upgrade() { - local new_base_image="${1:-}" - local current_layers=() - - log_info "Performing true system upgrade..." "apt-layer" - - # Get current deployment - local current_commit - current_commit=$(get_current_deployment) - - if [[ -n "$current_commit" ]]; then - # Get current layers from deployment - current_layers=($(jq -r ".deployments[\"$current_commit\"].layers[]" "$DEPLOYMENT_DB" 2>/dev/null || true)) - log_info "Current layers: ${current_layers[*]}" "apt-layer" - fi - - # If no new base specified, try to find one - if [[ -z "$new_base_image" ]]; then - new_base_image=$(find_newer_base_image) - if [[ -z "$new_base_image" ]]; then - log_info "No newer base image found" "apt-layer" - return 0 - fi - fi - - log_info "Upgrading to base image: $new_base_image" "apt-layer" - - # Rebase existing layers on new base - local rebased_layers=() - for layer in "${current_layers[@]}"; do - local new_layer="${layer}-rebased-$(date +%Y%m%d)" - log_info "Rebasing layer: $layer -> $new_layer" "apt-layer" - - if "$0" --rebase "$layer" "$new_base_image" "$new_layer"; then - rebased_layers+=("$new_layer") - else - log_error "Failed to rebase layer: $layer" "apt-layer" - return 1 - fi - done - - # Create new deployment commit - local commit_id - commit_id=$(create_deployment_commit "$new_base_image" "${rebased_layers[@]}") - - # Perform atomic deployment - if atomic_deploy "$commit_id"; then - log_success "System upgrade completed successfully" "apt-layer" - return 0 - else - log_error "System upgrade failed" "apt-layer" - return 1 - fi -} - -# Find newer base image -find_newer_base_image() { - local current_base - current_base=$(jq -r ".deployments[\"$(get_current_deployment)\"].base_image" "$DEPLOYMENT_DB" 2>/dev/null || echo "") - - if [[ -z "$current_base" ]]; then - log_warning "No current base image found" "apt-layer" - return 1 - fi - - # List available base images and find newer ones - local available_bases - available_bases=($(composefs_list_images | grep "^ubuntu-ublue/base/" | sort -V)) - - for base in "${available_bases[@]}"; do - if [[ "$base" > "$current_base" ]]; then - echo "$base" - return 0 - fi - done - - return 1 -} - -# Create bootloader entry -create_bootloader_entry() { - local commit_id="$1" - local deployment_dir="$2" - - log_info "Creating bootloader entry for: $commit_id" "apt-layer" - - # Initialize bootloader system - init_bootloader_on_startup - - # Create bootloader entry using the comprehensive bootloader system - if create_bootloader_entry "$commit_id" "$deployment_dir" "Ubuntu uBlue ($commit_id)"; then - log_success "Bootloader entry created for: $commit_id" "apt-layer" - return 0 - else - log_error "Failed to create bootloader entry for: $commit_id" "apt-layer" - return 1 - fi -} - -# Show atomic deployment status -atomic_status() { - local current_deployment - current_deployment=$(get_current_deployment) - local pending_deployment - pending_deployment=$(get_pending_deployment) - - echo "=== Atomic Deployment Status ===" - echo "Current Deployment: ${current_deployment:-none}" - echo "Pending Deployment: ${pending_deployment:-none}" - - if [[ -n "$current_deployment" ]]; then - local commit_data - commit_data=$(jq -r ".deployments[\"$current_deployment\"]" "$DEPLOYMENT_DB" 2>/dev/null || echo "{}") - - if [[ "$commit_data" != "{}" ]]; then - echo "Deployment Type: $(echo "$commit_data" | jq -r '.commit_message')" - echo "Base Image: $(echo "$commit_data" | jq -r '.base_image')" - echo "Created: $(echo "$commit_data" | jq -r '.created')" - echo "Layers: $(echo "$commit_data" | jq -r '.layers | join(", ")')" - fi - fi - - if [[ -n "$pending_deployment" ]]; then - echo "�� Pending deployment will activate on next boot" - fi -} - -# List all deployments -list_deployments() { - echo "=== Deployment History ===" - - local deployments - deployments=($(jq -r '.deployments | keys[]' "$DEPLOYMENT_DB" 2>/dev/null | sort -r)) - - for commit_id in "${deployments[@]}"; do - local commit_data - commit_data=$(jq -r ".deployments[\"$commit_id\"]" "$DEPLOYMENT_DB") - - local status="" - if [[ "$commit_id" == "$(get_current_deployment)" ]]; then - status=" [CURRENT]" - elif [[ "$commit_id" == "$(get_pending_deployment)" ]]; then - status=" [PENDING]" - fi - - echo "$commit_id$status" - echo " Message: $(echo "$commit_data" | jq -r '.commit_message')" - echo " Created: $(echo "$commit_data" | jq -r '.created')" - echo " Base: $(echo "$commit_data" | jq -r '.base_image')" - echo "" - done -} - -# Rollback to specific commit -commit_rollback() { - local target_commit="$1" - - log_info "Rolling back to commit: $target_commit" "apt-layer" - - # Validate target commit exists - if ! jq -e ".deployments[\"$target_commit\"]" "$DEPLOYMENT_DB" >/dev/null 2>&1; then - log_error "Target commit not found: $target_commit" "apt-layer" - return 1 - fi - - # Perform atomic deployment to target commit - if atomic_deploy "$target_commit"; then - log_success "Rollback prepared to: $target_commit" "apt-layer" - log_info "Reboot to activate rollback" "apt-layer" - return 0 - else - log_error "Rollback failed" "apt-layer" - return 1 - fi -} - -# --- END OF SCRIPTLET: 09-atomic-deployment.sh --- - -# ============================================================================ -# rpm-ostree Compatibility Layer -# ============================================================================ -# rpm-ostree compatibility layer for Ubuntu uBlue apt-layer Tool -# Provides 1:1 command compatibility with rpm-ostree - -# rpm-ostree install compatibility -rpm_ostree_install() { - local packages=("$@") - - log_info "rpm-ostree install compatibility: ${packages[*]}" "apt-layer" - - # Use live overlay for package installation - if ! live_install "${packages[@]}"; then - log_error "rpm-ostree install failed" "apt-layer" - return 1 - fi - - log_success "rpm-ostree install completed successfully" "apt-layer" - return 0 -} - -# rpm-ostree upgrade compatibility -rpm_ostree_upgrade() { - log_info "rpm-ostree upgrade compatibility" "apt-layer" - - # Use true system upgrade (not package upgrade) - if ! system_upgrade; then - log_error "rpm-ostree upgrade failed" "apt-layer" - return 1 - fi - - log_success "rpm-ostree upgrade completed successfully" "apt-layer" - return 0 -} - -# rpm-ostree rebase compatibility -rpm_ostree_rebase() { - local new_base="$1" - - log_info "rpm-ostree rebase compatibility: $new_base" "apt-layer" - - # Use intelligent rebase with conflict resolution - if ! intelligent_rebase "$new_base"; then - log_error "rpm-ostree rebase failed" "apt-layer" - return 1 - fi - - log_success "rpm-ostree rebase completed successfully" "apt-layer" - return 0 -} - -# rpm-ostree rollback compatibility -rpm_ostree_rollback() { - local target_commit="${1:-}" - - log_info "rpm-ostree rollback compatibility: ${target_commit:-latest}" "apt-layer" - - if [[ -z "$target_commit" ]]; then - # Rollback to previous deployment - target_commit=$(get_previous_deployment) - if [[ -z "$target_commit" ]]; then - log_error "No previous deployment found for rollback" "apt-layer" - return 1 - fi - fi - - # Use commit-based rollback - if ! commit_rollback "$target_commit"; then - log_error "rpm-ostree rollback failed" "apt-layer" - return 1 - fi - - log_success "rpm-ostree rollback completed successfully" "apt-layer" - return 0 -} - -# rpm-ostree status compatibility -rpm_ostree_status() { - log_info "rpm-ostree status compatibility" "apt-layer" - - # Show atomic deployment status - atomic_status - - # Show live overlay status - echo "" - echo "=== Live Overlay Status ===" - get_live_overlay_status - - # Show package diff if pending deployment - local pending_deployment - pending_deployment=$(get_pending_deployment) - if [[ -n "$pending_deployment" ]]; then - echo "" - echo "=== Pending Changes ===" - show_package_diff "$(get_current_deployment)" "$pending_deployment" - fi -} - -# rpm-ostree diff compatibility -rpm_ostree_diff() { - local from_commit="${1:-}" - local to_commit="${2:-}" - - log_info "rpm-ostree diff compatibility: $from_commit -> $to_commit" "apt-layer" - - # If no commits specified, compare current to pending - if [[ -z "$from_commit" ]]; then - from_commit=$(get_current_deployment) - fi - if [[ -z "$to_commit" ]]; then - to_commit=$(get_pending_deployment) - if [[ -z "$to_commit" ]]; then - log_error "No target commit specified and no pending deployment" "apt-layer" - return 1 - fi - fi - - # Show package-level diff - show_package_diff "$from_commit" "$to_commit" -} - -# rpm-ostree db list compatibility -rpm_ostree_db_list() { - log_info "rpm-ostree db list compatibility" "apt-layer" - - # List all deployments - list_deployments -} - -# rpm-ostree db diff compatibility -rpm_ostree_db_diff() { - local from_commit="${1:-}" - local to_commit="${2:-}" - - log_info "rpm-ostree db diff compatibility: $from_commit -> $to_commit" "apt-layer" - - # If no commits specified, compare current to pending - if [[ -z "$from_commit" ]]; then - from_commit=$(get_current_deployment) - fi - if [[ -z "$to_commit" ]]; then - to_commit=$(get_pending_deployment) - if [[ -z "$to_commit" ]]; then - log_error "No target commit specified and no pending deployment" "apt-layer" - return 1 - fi - fi - - # Show detailed package diff - show_detailed_package_diff "$from_commit" "$to_commit" -} - -# rpm-ostree cleanup compatibility -rpm_ostree_cleanup() { - local purge="${1:-}" - - log_info "rpm-ostree cleanup compatibility: purge=$purge" "apt-layer" - - # Clean up old deployments - cleanup_old_deployments - - # Clean up old ComposeFS images - cleanup_old_composefs_images - - if [[ "$purge" == "--purge" ]]; then - # Also clean up old bootloader entries - cleanup_old_bootloader_entries - fi - - log_success "rpm-ostree cleanup completed successfully" "apt-layer" -} - -# rpm-ostree cancel compatibility -rpm_ostree_cancel() { - log_info "rpm-ostree cancel compatibility" "apt-layer" - - # Clear pending deployment - clear_pending_deployment - - # Clean up live overlay - stop_live_overlay - - log_success "rpm-ostree cancel completed successfully" "apt-layer" -} - -# rpm-ostree initramfs compatibility -rpm_ostree_initramfs() { - local action="${1:-}" - - log_info "rpm-ostree initramfs compatibility: $action" "apt-layer" - - case "$action" in - --enable) - enable_initramfs_rebuild - ;; - --disable) - disable_initramfs_rebuild - ;; - --rebuild) - rebuild_initramfs - ;; - *) - log_error "Invalid initramfs action: $action" "apt-layer" - return 1 - ;; - esac -} - -# rpm-ostree kargs compatibility -rpm_ostree_kargs() { - local action="${1:-}" - shift - - log_info "rpm-ostree kargs compatibility: $action" "apt-layer" - - case "$action" in - --get) - get_kernel_args - ;; - --set) - set_kernel_args "$@" - ;; - --append) - append_kernel_args "$@" - ;; - --delete) - delete_kernel_args "$@" - ;; - --reset) - reset_kernel_args - ;; - *) - log_error "Invalid kargs action: $action" "apt-layer" - return 1 - ;; - esac -} - -# rpm-ostree usroverlay compatibility -rpm_ostree_usroverlay() { - local action="${1:-}" - - log_info "rpm-ostree usroverlay compatibility: $action" "apt-layer" - - case "$action" in - --mount) - mount_usr_overlay - ;; - --unmount) - unmount_usr_overlay - ;; - --status) - usr_overlay_status - ;; - *) - log_error "Invalid usroverlay action: $action" "apt-layer" - return 1 - ;; - esac -} - -# rpm-ostree composefs compatibility -rpm_ostree_composefs() { - local action="${1:-}" - shift - - log_info "rpm-ostree composefs compatibility: $action" "apt-layer" - - case "$action" in - --mount) - composefs_mount "$@" - ;; - --unmount) - composefs_unmount "$@" - ;; - --list) - composefs_list_images - ;; - --info) - composefs_image_info "$@" - ;; - *) - log_error "Invalid composefs action: $action" "apt-layer" - return 1 - ;; - esac -} - -# Helper functions for rpm-ostree compatibility - -# Get previous deployment -get_previous_deployment() { - local current_deployment - current_deployment=$(get_current_deployment) - - if [[ -n "$current_deployment" ]]; then - local parent_commit - parent_commit=$(jq -r ".deployments[\"$current_deployment\"].parent_commit" "$DEPLOYMENT_DB" 2>/dev/null || echo "") - echo "$parent_commit" - fi -} - -# Show package diff between commits -show_package_diff() { - local from_commit="$1" - local to_commit="$2" - - log_info "Showing package diff: $from_commit -> $to_commit" "apt-layer" - - # Get package lists from both commits - local from_packages=() - local to_packages=() - - if [[ -n "$from_commit" ]]; then - from_packages=($(get_packages_from_commit "$from_commit")) - fi - - if [[ -n "$to_commit" ]]; then - to_packages=($(get_packages_from_commit "$to_commit")) - fi - - # Calculate differences - local added_packages=() - local removed_packages=() - local updated_packages=() - - # Find added packages - for pkg in "${to_packages[@]}"; do - if [[ ! " ${from_packages[*]} " =~ " ${pkg} " ]]; then - added_packages+=("$pkg") - fi - done - - # Find removed packages - for pkg in "${from_packages[@]}"; do - if [[ ! " ${to_packages[*]} " =~ " ${pkg} " ]]; then - removed_packages+=("$pkg") - fi - done - - # Show results - if [[ ${#added_packages[@]} -gt 0 ]]; then - echo "Added packages:" - printf " %s\n" "${added_packages[@]}" - fi - - if [[ ${#removed_packages[@]} -gt 0 ]]; then - echo "Removed packages:" - printf " %s\n" "${removed_packages[@]}" - fi - - if [[ ${#added_packages[@]} -eq 0 ]] && [[ ${#removed_packages[@]} -eq 0 ]]; then - echo "No package changes detected" - fi -} - -# Get packages from commit -get_packages_from_commit() { - local commit_id="$1" - local composefs_image - - # Get ComposeFS image name - composefs_image=$(jq -r ".deployments[\"$commit_id\"].composefs_image" "$DEPLOYMENT_DB" 2>/dev/null || echo "") - - if [[ -z "$composefs_image" ]]; then - return 1 - fi - - # Mount and extract package list - local temp_mount="/tmp/apt-layer-commit-$$" - mkdir -p "$temp_mount" - - if composefs_mount "$composefs_image" "$temp_mount"; then - # Extract package list - chroot "$temp_mount" dpkg -l | grep '^ii' | awk '{print $2}' 2>/dev/null || true - - # Cleanup - composefs_unmount "$temp_mount" - rmdir "$temp_mount" - fi -} - -# Cleanup functions -cleanup_old_deployments() { - log_info "Cleaning up old deployments..." "apt-layer" - - # Keep last 5 deployments - local deployments - deployments=($(jq -r '.deployments | keys[]' "$DEPLOYMENT_DB" 2>/dev/null | sort -r | tail -n +6)) - - for commit_id in "${deployments[@]}"; do - log_info "Removing old deployment: $commit_id" "apt-layer" - - # Remove from database - jq --arg commit_id "$commit_id" 'del(.deployments[$commit_id])' \ - "$DEPLOYMENT_DB" > "${DEPLOYMENT_DB}.tmp" && mv "${DEPLOYMENT_DB}.tmp" "$DEPLOYMENT_DB" - - # Remove history file - rm -f "$DEPLOYMENT_HISTORY_DIR/$commit_id.json" - - # Remove deployment directory - rm -rf "/var/lib/particle-os/deployments/$commit_id" - done -} - -cleanup_old_composefs_images() { - log_info "Cleaning up old ComposeFS images..." "apt-layer" - - # Get list of images still referenced by deployments - local referenced_images - referenced_images=($(jq -r '.deployments[].composefs_image' "$DEPLOYMENT_DB" 2>/dev/null || true)) - - # Get all ComposeFS images - local all_images - all_images=($(composefs_list_images)) - - # Remove unreferenced images - for image in "${all_images[@]}"; do - if [[ ! " ${referenced_images[*]} " =~ " ${image} " ]]; then - log_info "Removing unreferenced image: $image" "apt-layer" - composefs_remove_image "$image" - fi - done -} - -cleanup_old_bootloader_entries() { - log_info "Cleaning up old bootloader entries..." "apt-layer" - - # Get current and pending deployments - local current_deployment - current_deployment=$(get_current_deployment) - local pending_deployment - pending_deployment=$(get_pending_deployment) - - # Remove old bootloader entries - local boot_dir="/boot/loader/entries" - for entry in "$boot_dir"/apt-layer-*.conf; do - if [[ -f "$entry" ]]; then - local commit_id - commit_id=$(basename "$entry" .conf | sed 's/apt-layer-//') - - # Keep current and pending deployments - if [[ "$commit_id" != "$current_deployment" ]] && [[ "$commit_id" != "$pending_deployment" ]]; then - log_info "Removing old bootloader entry: $entry" "apt-layer" - rm -f "$entry" - fi - fi - done -} - -# --- END OF SCRIPTLET: 10-rpm-ostree-compat.sh --- - -# ============================================================================ -# Live Overlay System (rpm-ostree style) -# ============================================================================ - -# Ubuntu uBlue apt-layer Live Overlay System -# Implements live system layering similar to rpm-ostree -# Uses overlayfs for live package installation and management - -# ============================================================================= -# LIVE OVERLAY SYSTEM FUNCTIONS -# ============================================================================= - -# Live overlay state file (with fallbacks for when particle-config.sh is not loaded) -LIVE_OVERLAY_STATE_FILE="${UBLUE_ROOT:-/var/lib/particle-os}/live-overlay.state" -LIVE_OVERLAY_MOUNT_POINT="${UBLUE_ROOT:-/var/lib/particle-os}/live-overlay/mount" -LIVE_OVERLAY_PACKAGE_LOG="${UBLUE_LOG_DIR:-/var/log/particle-os}/live-overlay-packages.log" - -# Initialize live overlay system -init_live_overlay_system() { - log_info "Initializing live overlay system" "apt-layer" - - # Create live overlay directories - mkdir -p "${UBLUE_LIVE_OVERLAY_DIR:-/var/lib/particle-os/live-overlay}" "${UBLUE_LIVE_UPPER_DIR:-/var/lib/particle-os/live-overlay/upper}" "${UBLUE_LIVE_WORK_DIR:-/var/lib/particle-os/live-overlay/work}" - mkdir -p "$LIVE_OVERLAY_MOUNT_POINT" - - # Set proper permissions - chmod 755 "${UBLUE_LIVE_OVERLAY_DIR:-/var/lib/particle-os/live-overlay}" - chmod 700 "${UBLUE_LIVE_UPPER_DIR:-/var/lib/particle-os/live-overlay/upper}" "${UBLUE_LIVE_WORK_DIR:-/var/lib/particle-os/live-overlay/work}" - - # Initialize package log if it doesn't exist - if [[ ! -f "$LIVE_OVERLAY_PACKAGE_LOG" ]]; then - touch "$LIVE_OVERLAY_PACKAGE_LOG" - chmod 644 "$LIVE_OVERLAY_PACKAGE_LOG" - fi - - # Conditional DNS fix for chroot overlay (WSL, etc) - if [[ -d "$LIVE_OVERLAY_MOUNT_POINT" ]]; then - if ! chroot "$LIVE_OVERLAY_MOUNT_POINT" getent hosts archive.ubuntu.com >/dev/null 2>&1; then - log_warning "DNS resolution failed in overlay. Injecting public DNS servers..." "apt-layer" - # Backup original resolv.conf if present - if [[ -f "$LIVE_OVERLAY_MOUNT_POINT/etc/resolv.conf" ]]; then - cp "$LIVE_OVERLAY_MOUNT_POINT/etc/resolv.conf" "$LIVE_OVERLAY_MOUNT_POINT/etc/resolv.conf.aptlayerbak" - fi - echo "nameserver 8.8.8.8" > "$LIVE_OVERLAY_MOUNT_POINT/etc/resolv.conf" - echo "nameserver 1.1.1.1" >> "$LIVE_OVERLAY_MOUNT_POINT/etc/resolv.conf" - chmod 644 "$LIVE_OVERLAY_MOUNT_POINT/etc/resolv.conf" - touch "$LIVE_OVERLAY_MOUNT_POINT/.dns_fixed_by_apt_layer" - log_success "DNS configuration applied to overlay" "apt-layer" - else - log_info "DNS resolution in overlay is working. No changes made." "apt-layer" - fi - fi - - log_success "Live overlay system initialized" "apt-layer" -} - -# Check if live overlay is active -is_live_overlay_active() { - if [[ -f "$LIVE_OVERLAY_STATE_FILE" ]]; then - local state - state=$(cat "$LIVE_OVERLAY_STATE_FILE" 2>/dev/null || echo "") - [[ "$state" == "active" ]] - else - false - fi -} - -# Check if system supports live overlay -check_live_overlay_support() { - local errors=0 - local test_dir="/tmp/overlay-test-$$" - local test_lower="$test_dir/lower" - local test_upper="$test_dir/upper" - local test_work="$test_dir/work" - local test_mount="$test_dir/mount" - - # Check for overlay module - if ! modprobe -n overlay >/dev/null 2>&1; then - log_error "Overlay module not available" "apt-layer" - errors=$((errors + 1)) - fi - - # Create test directories - mkdir -p "$test_lower" "$test_upper" "$test_work" "$test_mount" 2>/dev/null - - # Check for overlayfs mount support - if ! mount -t overlay overlay -o "lowerdir=$test_lower,upperdir=$test_upper,workdir=$test_work" "$test_mount" 2>/dev/null; then - log_error "Overlayfs mount not supported" "apt-layer" - errors=$((errors + 1)) - else - umount "$test_mount" 2>/dev/null - fi - - # Cleanup test directories - rm -rf "$test_dir" 2>/dev/null - - # Check for read-only root filesystem - if ! is_root_readonly; then - log_warning "Root filesystem is not read-only - live overlay may not be necessary" "apt-layer" - fi - - if [[ $errors -gt 0 ]]; then - return 1 - fi - - return 0 -} - -# Check if root filesystem is read-only -is_root_readonly() { - local root_mount - root_mount=$(findmnt -n -o OPTIONS / | grep -o "ro" || echo "") - [[ -n "$root_mount" ]] -} - -# Start live overlay -start_live_overlay() { - log_info "Starting live overlay system" "apt-layer" - - # Check if already active - if is_live_overlay_active; then - log_warning "Live overlay is already active" "apt-layer" - return 0 - fi - - # Check system support - if ! check_live_overlay_support; then - log_error "System does not support live overlay" "apt-layer" - return 1 - fi - - # Initialize system - init_live_overlay_system - - # Create overlay mount - log_info "Creating overlay mount" "apt-layer" - if mount -t overlay overlay -o "lowerdir=/,upperdir=${UBLUE_LIVE_UPPER_DIR:-/var/lib/particle-os/live-overlay/upper},workdir=${UBLUE_LIVE_WORK_DIR:-/var/lib/particle-os/live-overlay/work}" "$LIVE_OVERLAY_MOUNT_POINT"; then - log_success "Overlay mount created successfully" "apt-layer" - - # Mark overlay as active - echo "active" > "$LIVE_OVERLAY_STATE_FILE" - - log_success "Live overlay started successfully" "apt-layer" - log_info "Changes will be applied to overlay and can be committed or rolled back" "apt-layer" - - return 0 - else - log_error "Failed to create overlay mount" "apt-layer" - return 1 - fi -} - -# Stop live overlay -stop_live_overlay() { - log_info "Stopping live overlay system" "apt-layer" - - # Check if overlay is active - if ! is_live_overlay_active; then - log_warning "Live overlay is not active" "apt-layer" - return 0 - fi - - # Check for active processes - if check_active_processes; then - log_warning "Active processes detected - overlay will persist until processes complete" "apt-layer" - return 0 - fi - - # Undo DNS fix if we applied it - if [[ -f "$LIVE_OVERLAY_MOUNT_POINT/.dns_fixed_by_apt_layer" ]]; then - if [[ -f "$LIVE_OVERLAY_MOUNT_POINT/etc/resolv.conf.aptlayerbak" ]]; then - mv "$LIVE_OVERLAY_MOUNT_POINT/etc/resolv.conf.aptlayerbak" "$LIVE_OVERLAY_MOUNT_POINT/etc/resolv.conf" - else - rm -f "$LIVE_OVERLAY_MOUNT_POINT/etc/resolv.conf" - fi - rm -f "$LIVE_OVERLAY_MOUNT_POINT/.dns_fixed_by_apt_layer" - log_info "DNS fix by apt-layer undone on overlay stop" "apt-layer" - fi - - # Unmount overlay - log_info "Unmounting overlay" "apt-layer" - if umount "$LIVE_OVERLAY_MOUNT_POINT"; then - log_success "Overlay unmounted successfully" "apt-layer" - - # Remove state file - rm -f "$LIVE_OVERLAY_STATE_FILE" - - log_success "Live overlay stopped successfully" "apt-layer" - return 0 - else - log_error "Failed to unmount overlay" "apt-layer" - return 1 - fi -} - -# Check for active processes that might prevent unmounting -check_active_processes() { - # Check for package manager processes - if pgrep -f "apt|dpkg|apt-get" >/dev/null 2>&1; then - return 0 - fi - - # Check for processes using the overlay mount - if lsof "$LIVE_OVERLAY_MOUNT_POINT" >/dev/null 2>&1; then - return 0 - fi - - return 1 -} - -# Get live overlay status -get_live_overlay_status() { - echo "=== Live Overlay Status ===" - - if is_live_overlay_active; then - log_success "� Live overlay is ACTIVE" "apt-layer" - - # Show mount details - if mountpoint -q "$LIVE_OVERLAY_MOUNT_POINT"; then - log_info "Overlay mount point: $LIVE_OVERLAY_MOUNT_POINT" "apt-layer" - - # Show overlay usage - if [[ -d "${UBLUE_LIVE_UPPER_DIR:-/var/lib/particle-os/live-overlay/upper}" ]]; then - local usage=$(du -sh "${UBLUE_LIVE_UPPER_DIR:-/var/lib/particle-os/live-overlay/upper}" 2>/dev/null | cut -f1 || echo "unknown") - log_info "Overlay usage: $usage" "apt-layer" - fi - - # Show installed packages - if [[ -f "$LIVE_OVERLAY_PACKAGE_LOG" ]]; then - local package_count=$(wc -l < "$LIVE_OVERLAY_PACKAGE_LOG" 2>/dev/null || echo "0") - log_info "Packages installed in overlay: $package_count" "apt-layer" - fi - else - log_warning "�� Overlay mount point not mounted" "apt-layer" - fi - - # Check for active processes - if check_active_processes; then - log_warning "�� Active processes detected - overlay cannot be stopped" "apt-layer" - fi - else - log_info "� Live overlay is not active" "apt-layer" - - # Check if system supports live overlay - if check_live_overlay_support >/dev/null 2>&1; then - log_info "� System supports live overlay" "apt-layer" - log_info "Use '--live-overlay start' to start live overlay" "apt-layer" - else - log_warning "�� System does not support live overlay" "apt-layer" - fi - fi - - echo "" -} - -# Install packages in live overlay -live_install() { - local packages=("$@") - - log_info "Installing packages in live overlay: ${packages[*]}" "apt-layer" - - # Check if overlay is active - if ! is_live_overlay_active; then - log_error "Live overlay is not active" "apt-layer" - log_info "Use '--live-overlay start' to start live overlay first" "apt-layer" - return 1 - fi - - # Check for root privileges - if [[ $EUID -ne 0 ]]; then - log_error "Root privileges required for live installation" "apt-layer" - return 1 - fi - - # Update package lists in overlay - log_info "Updating package lists in overlay" "apt-layer" - if ! chroot "$LIVE_OVERLAY_MOUNT_POINT" apt-get update; then - log_error "Failed to update package lists" "apt-layer" - log_warning "Network or DNS error? For offline or WSL overlays, use: apt-layer --live-dpkg <.deb files>" "apt-layer" - return 1 - fi - - # Install packages in overlay - log_info "Installing packages in overlay" "apt-layer" - if chroot "$LIVE_OVERLAY_MOUNT_POINT" apt-get install -y "${packages[@]}"; then - log_success "Packages installed successfully in overlay" "apt-layer" - - # Log installed packages - for package in "${packages[@]}"; do - echo "$(date '+%Y-%m-%d %H:%M:%S') - INSTALLED: $package" >> "$LIVE_OVERLAY_PACKAGE_LOG" - done - - log_info "Changes are applied to overlay and can be committed or rolled back" "apt-layer" - return 0 - else - log_error "Failed to install packages in overlay" "apt-layer" - log_warning "If this is a network or DNS issue, try: apt-layer --live-dpkg <.deb files>" "apt-layer" - return 1 - fi -} - -# Manage live overlay -manage_live_overlay() { - local action="$1" - shift - local options=("$@") - - case "$action" in - "start") - start_live_overlay - ;; - "stop") - stop_live_overlay - ;; - "status") - get_live_overlay_status - ;; - "commit") - local message="${options[0]:-Live overlay changes}" - commit_live_overlay "$message" - ;; - "rollback") - rollback_live_overlay - ;; - "list") - list_live_overlay_packages - ;; - "clean") - clean_live_overlay - ;; - *) - log_error "Unknown live overlay action: $action" "apt-layer" - log_info "Valid actions: start, stop, status, commit, rollback, list, clean" "apt-layer" - return 1 - ;; - esac -} - -# Commit live overlay changes -commit_live_overlay() { - local message="$1" - - log_info "Committing live overlay changes: $message" "apt-layer" - - # Check if overlay is active - if ! is_live_overlay_active; then - log_error "Live overlay is not active" "apt-layer" - return 1 - fi - - # Check if there are changes to commit - if ! has_overlay_changes; then - log_warning "No changes to commit" "apt-layer" - return 0 - fi - - # Create new ComposeFS layer from overlay changes - local timestamp=$(date '+%Y%m%d_%H%M%S') - local layer_name="live-overlay-commit-${timestamp}" - - log_info "Creating new layer: $layer_name" "apt-layer" - - # Create layer from overlay changes - if create_layer_from_overlay "$layer_name" "$message"; then - log_success "Live overlay changes committed as layer: $layer_name" "apt-layer" - - # Clean up overlay - clean_live_overlay - - return 0 - else - log_error "Failed to commit live overlay changes" "apt-layer" - return 1 - fi -} - -# Check if overlay has changes -has_overlay_changes() { - if [[ -d "${UBLUE_LIVE_UPPER_DIR:-/var/lib/particle-os/live-overlay/upper}" ]]; then - # Check if upper directory has any content - if [[ -n "$(find "${UBLUE_LIVE_UPPER_DIR:-/var/lib/particle-os/live-overlay/upper}" -mindepth 1 -maxdepth 1 2>/dev/null)" ]]; then - return 0 - fi - fi - - return 1 -} - -# Create layer from overlay changes -create_layer_from_overlay() { - local layer_name="$1" - local message="$2" - - # Create temporary directory for layer - local temp_layer_dir="${UBLUE_TEMP_DIR:-/var/lib/particle-os/temp}/live-layer-${layer_name}" - mkdir -p "$temp_layer_dir" - - # Copy overlay changes to temporary directory - log_info "Copying overlay changes to temporary layer" "apt-layer" - if ! cp -a "${UBLUE_LIVE_UPPER_DIR:-/var/lib/particle-os/live-overlay/upper}"/* "$temp_layer_dir/" 2>/dev/null; then - log_error "Failed to copy overlay changes" "apt-layer" - rm -rf "$temp_layer_dir" - return 1 - fi - - # Create ComposeFS layer - log_info "Creating ComposeFS layer from overlay changes" "apt-layer" - if ! create_composefs_layer "$temp_layer_dir" "$layer_name" "$message"; then - log_error "Failed to create ComposeFS layer" "apt-layer" - rm -rf "$temp_layer_dir" - return 1 - fi - - # Clean up temporary directory - rm -rf "$temp_layer_dir" - - return 0 -} - -# Create ComposeFS layer from directory -create_composefs_layer() { - local source_dir="$1" - local layer_name="$2" - local message="$3" - - # Use composefs-alternative to create layer - if command -v composefs-alternative >/dev/null 2>&1; then - if composefs-alternative create-layer "$source_dir" "$layer_name" "$message"; then - return 0 - fi - fi - - # Fallback: create simple squashfs layer - local layer_file="${UBLUE_BUILD_DIR:-/var/lib/particle-os/build}/${layer_name}.squashfs" - mkdir -p "$(dirname "$layer_file")" - - if mksquashfs "$source_dir" "$layer_file" -comp "${UBLUE_SQUASHFS_COMPRESSION:-xz}" -b "${UBLUE_SQUASHFS_BLOCK_SIZE:-1M}"; then - log_success "Created squashfs layer: $layer_file" "apt-layer" - return 0 - else - log_error "Failed to create squashfs layer" "apt-layer" - return 1 - fi -} - -# Rollback live overlay changes -rollback_live_overlay() { - log_info "Rolling back live overlay changes" "apt-layer" - - # Check if overlay is active - if ! is_live_overlay_active; then - log_error "Live overlay is not active" "apt-layer" - return 1 - fi - - # Stop overlay (this will discard changes) - if stop_live_overlay; then - log_success "Live overlay changes rolled back successfully" "apt-layer" - return 0 - else - log_error "Failed to rollback live overlay changes" "apt-layer" - return 1 - fi -} - -# List packages installed in live overlay -list_live_overlay_packages() { - log_info "Listing packages installed in live overlay" "apt-layer" - - if [[ -f "$LIVE_OVERLAY_PACKAGE_LOG" ]]; then - if [[ -s "$LIVE_OVERLAY_PACKAGE_LOG" ]]; then - echo "=== Packages Installed in Live Overlay ===" - cat "$LIVE_OVERLAY_PACKAGE_LOG" - echo "" - else - log_info "No packages installed in live overlay" "apt-layer" - fi - else - log_info "No package log found" "apt-layer" - fi -} - -# Clean live overlay -clean_live_overlay() { - log_info "Cleaning live overlay" "apt-layer" - - # Stop overlay if active - if is_live_overlay_active; then - stop_live_overlay - fi - - # Clean up overlay directories - rm -rf "${UBLUE_LIVE_UPPER_DIR:-/var/lib/particle-os/live-overlay/upper}"/* "${UBLUE_LIVE_WORK_DIR:-/var/lib/particle-os/live-overlay/work}"/* 2>/dev/null - - # Clean up package log - rm -f "$LIVE_OVERLAY_PACKAGE_LOG" - - # Remove state file - rm -f "$LIVE_OVERLAY_STATE_FILE" - - log_success "Live overlay cleaned successfully" "apt-layer" -} - -# ============================================================================= -# INTEGRATION FUNCTIONS -# ============================================================================= - -# Initialize live overlay system on script startup -init_live_overlay_on_startup() { - # Only initialize if not already done - if [[ ! -d "${UBLUE_LIVE_OVERLAY_DIR:-/var/lib/particle-os/live-overlay}" ]]; then - init_live_overlay_system - fi -} - -# Cleanup live overlay on script exit -cleanup_live_overlay_on_exit() { - # Only cleanup if overlay is active and no processes are using it - if is_live_overlay_active && ! check_active_processes; then - log_info "Cleaning up live overlay on exit" "apt-layer" - stop_live_overlay - fi -} - -# Register cleanup function -trap cleanup_live_overlay_on_exit EXIT - -# --- END OF SCRIPTLET: 05-live-overlay.sh --- - # ============================================================================ # Bootloader Integration (UEFI/GRUB/systemd-boot) # ============================================================================ @@ -4993,5403 +4144,853 @@ trap cleanup_bootloader_on_exit EXIT # --- END OF SCRIPTLET: 07-bootloader.sh --- # ============================================================================ -# Advanced Package Management (Enterprise Features) +# Atomic Deployment System # ============================================================================ +# Atomic deployment system for Ubuntu uBlue apt-layer Tool +# Implements commit-based state management and true system upgrades (not package upgrades) -# Ubuntu uBlue apt-layer Advanced Package Management -# Provides enterprise-grade package management with multi-user support, security features, -# and advanced dependency resolution for production deployments +# Atomic deployment state management +DEPLOYMENT_DB="/var/lib/particle-os/deployments.json" +CURRENT_DEPLOYMENT_FILE="/var/lib/particle-os/current-deployment" +PENDING_DEPLOYMENT_FILE="/var/lib/particle-os/pending-deployment" +DEPLOYMENT_HISTORY_DIR="/var/lib/particle-os/history" -# ============================================================================= -# ADVANCED PACKAGE MANAGEMENT FUNCTIONS -# ============================================================================= - -# Advanced package management configuration (with fallbacks for when particle-config.sh is not loaded) -ADVANCED_PKG_CONFIG_DIR="${UBLUE_CONFIG_DIR:-/etc/ubuntu-ublue}/package-management" -ADVANCED_PKG_STATE_DIR="${UBLUE_ROOT:-/var/lib/particle-os}/package-management" -ADVANCED_PKG_CACHE_DIR="$ADVANCED_PKG_STATE_DIR/cache" -ADVANCED_PKG_DEPENDENCIES_DIR="$ADVANCED_PKG_STATE_DIR/dependencies" -ADVANCED_PKG_SECURITY_DIR="$ADVANCED_PKG_STATE_DIR/security" -ADVANCED_PKG_USERS_DIR="$ADVANCED_PKG_STATE_DIR/users" -ADVANCED_PKG_POLICIES_DIR="$ADVANCED_PKG_STATE_DIR/policies" -ADVANCED_PKG_DKMS_DIR="$ADVANCED_PKG_STATE_DIR/dkms" -ADVANCED_PKG_NVIDIA_DIR="$ADVANCED_PKG_STATE_DIR/nvidia" - -# Initialize advanced package management system -init_advanced_package_management() { - log_info "Initializing advanced package management system" "apt-layer" +# Initialize deployment database +init_deployment_db() { + log_info "Initializing atomic deployment database..." "apt-layer" - # Create advanced package management directories - mkdir -p "$ADVANCED_PKG_CONFIG_DIR" "$ADVANCED_PKG_STATE_DIR" "$ADVANCED_PKG_CACHE_DIR" - mkdir -p "$ADVANCED_PKG_DEPENDENCIES_DIR" "$ADVANCED_PKG_SECURITY_DIR" "$ADVANCED_PKG_USERS_DIR" - mkdir -p "$ADVANCED_PKG_POLICIES_DIR" "$ADVANCED_PKG_DKMS_DIR" "$ADVANCED_PKG_NVIDIA_DIR" - - # Set proper permissions - chmod 755 "$ADVANCED_PKG_CONFIG_DIR" "$ADVANCED_PKG_STATE_DIR" - chmod 700 "$ADVANCED_PKG_CACHE_DIR" "$ADVANCED_PKG_DEPENDENCIES_DIR" "$ADVANCED_PKG_SECURITY_DIR" - chmod 750 "$ADVANCED_PKG_USERS_DIR" "$ADVANCED_PKG_POLICIES_DIR" "$ADVANCED_PKG_DKMS_DIR" "$ADVANCED_PKG_NVIDIA_DIR" - - # Initialize user management database - init_user_management_db - - # Initialize security policies - init_security_policies - - # Initialize dependency resolution cache - init_dependency_cache - - # Initialize DKMS management system - init_dkms_management - - # Initialize NVIDIA support system - init_nvidia_support - - log_success "Advanced package management system initialized" "apt-layer" -} - -# Initialize user management database -init_user_management_db() { - local user_db="$ADVANCED_PKG_USERS_DIR/users.json" - - if [[ ! -f "$user_db" ]]; then - cat > "$user_db" << EOF -{ - "users": {}, - "groups": {}, - "permissions": {}, - "roles": { - "admin": { - "description": "Full system administration", - "permissions": ["all"] - }, - "package_manager": { - "description": "Package installation and management", - "permissions": ["install", "remove", "update", "list"] - }, - "viewer": { - "description": "Read-only access to package information", - "permissions": ["list", "info", "status"] - } - } -} -EOF - chmod 600 "$user_db" - fi -} - -# Initialize security policies -init_security_policies() { - local security_policy="$ADVANCED_PKG_SECURITY_DIR/security-policy.json" - - if [[ ! -f "$security_policy" ]]; then - cat > "$security_policy" << EOF -{ - "package_verification": { - "enabled": true, - "gpg_check": true, - "hash_verification": true, - "source_verification": true - }, - "installation_policies": { - "allow_unsigned_packages": false, - "allow_external_sources": false, - "require_approval": false, - "max_package_size_mb": 1000 - }, - "security_scanning": { - "enabled": true, - "scan_installed_packages": true, - "scan_dependencies": true, - "vulnerability_check": true - }, - "audit_logging": { - "enabled": true, - "log_level": "INFO", - "retention_days": 90 - } -} -EOF - chmod 600 "$security_policy" - fi -} - -# Initialize dependency cache -init_dependency_cache() { - local dep_cache="$ADVANCED_PKG_DEPENDENCIES_DIR/dependency-cache.json" - - if [[ ! -f "$dep_cache" ]]; then - cat > "$dep_cache" << EOF -{ - "package_dependencies": {}, - "reverse_dependencies": {}, - "conflict_resolution": {}, - "last_updated": "$(date -u +%Y-%m-%dT%H:%M:%SZ)" -} -EOF - chmod 644 "$dep_cache" - fi -} - -# Initialize DKMS management system -init_dkms_management() { - local dkms_config="$ADVANCED_PKG_DKMS_DIR/dkms-config.json" - local dkms_modules="$ADVANCED_PKG_DKMS_DIR/installed-modules.json" - local dkms_kernels="$ADVANCED_PKG_DKMS_DIR/kernel-versions.json" - - log_info "Initializing DKMS management system" "apt-layer" - - # Create DKMS configuration - if [[ ! -f "$dkms_config" ]]; then - cat > "$dkms_config" << EOF -{ - "dkms_enabled": true, - "auto_rebuild": true, - "build_environment": "container", - "kernel_headers_auto": true, - "rollback_on_failure": true, - "log_level": "info", - "build_timeout": 3600, - "max_parallel_builds": 2, - "containerized_builds": true, - "nvidia_support": true, - "gaming_optimizations": true -} -EOF - chmod 600 "$dkms_config" - fi - - # Create installed modules tracking - if [[ ! -f "$dkms_modules" ]]; then - cat > "$dkms_modules" << EOF -{ - "installed_modules": {}, - "build_history": {}, - "last_updated": "$(date -u +%Y-%m-%dT%H:%M:%SZ)" -} -EOF - chmod 600 "$dkms_modules" - fi - - # Create kernel version tracking - if [[ ! -f "$dkms_kernels" ]]; then - cat > "$dkms_kernels" << EOF -{ - "current": "$(uname -r)", - "installed": ["$(uname -r)"], - "dkms_modules": {}, - "last_updated": "$(date -u +%Y-%m-%dT%H:%M:%SZ)" -} -EOF - chmod 600 "$dkms_kernels" - fi - - # Create DKMS hooks directory - mkdir -p "$ADVANCED_PKG_DKMS_DIR/hooks" - chmod 700 "$ADVANCED_PKG_DKMS_DIR/hooks" - - # Create build environments directory - mkdir -p "$ADVANCED_PKG_DKMS_DIR/build-environments" - chmod 700 "$ADVANCED_PKG_DKMS_DIR/build-environments" - - log_success "DKMS management system initialized" "apt-layer" -} - -# Initialize NVIDIA support system -init_nvidia_support() { - local nvidia_config="$ADVANCED_PKG_NVIDIA_DIR/nvidia-config.json" - local nvidia_drivers="$ADVANCED_PKG_NVIDIA_DIR/installed-drivers.json" - local nvidia_prime="$ADVANCED_PKG_NVIDIA_DIR/prime-config.json" - - log_info "Initializing NVIDIA support system" "apt-layer" - - # Create NVIDIA configuration - if [[ ! -f "$nvidia_config" ]]; then - cat > "$nvidia_config" << EOF -{ - "nvidia_support_enabled": true, - "auto_install_drivers": true, - "preferred_driver_version": "535", - "prime_support": true, - "gaming_optimizations": true, - "cuda_support": false, - "auto_switch_gpu": true, - "performance_mode": "balanced" -} -EOF - chmod 600 "$nvidia_config" - fi - - # Create installed drivers tracking - if [[ ! -f "$nvidia_drivers" ]]; then - cat > "$nvidia_drivers" << EOF -{ - "installed_drivers": {}, - "driver_history": {}, - "last_updated": "$(date -u +%Y-%m-%dT%H:%M:%SZ)" -} -EOF - chmod 600 "$nvidia_drivers" - fi - - # Create NVIDIA Prime configuration - if [[ ! -f "$nvidia_prime" ]]; then - cat > "$nvidia_prime" << EOF -{ - "prime_enabled": true, - "current_gpu": "auto", - "gpu_switching": "manual", - "auto_detect": true, - "gpu_configurations": { - "integrated": { - "description": "Integrated GPU (Intel/AMD)", - "power_saving": true - }, - "nvidia": { - "description": "NVIDIA Discrete GPU", - "performance": true - }, - "auto": { - "description": "Automatic GPU selection", - "dynamic": true - } - } -} -EOF - chmod 600 "$nvidia_prime" - fi - - log_success "NVIDIA support system initialized" "apt-layer" -} - -# Check user permissions -check_user_permissions() { - local user="$1" - local required_permission="$2" - - log_debug "Checking permission '$required_permission' for user '$user'" "apt-layer" - - # Root user has all permissions - if [[ "$user" == "root" ]] || [[ $EUID -eq 0 ]]; then - return 0 - fi - - local user_db="$ADVANCED_PKG_USERS_DIR/users.json" - - if [[ ! -f "$user_db" ]]; then - log_error "User management database not found" "apt-layer" + # Ensure directories exist with proper permissions + mkdir -p "$DEPLOYMENT_HISTORY_DIR" 2>/dev/null || { + log_error "Failed to create deployment history directory: $DEPLOYMENT_HISTORY_DIR" "apt-layer" return 1 - fi - - # Get user role - local user_role - user_role=$(jq -r ".users[\"$user\"].role // \"viewer\"" "$user_db" 2>/dev/null || echo "viewer") - - # Get role permissions - local role_permissions - role_permissions=$(jq -r ".roles[\"$user_role\"].permissions[]?" "$user_db" 2>/dev/null || echo "") - - # Check if user has required permission - if echo "$role_permissions" | grep -q "^$required_permission$" || echo "$role_permissions" | grep -q "^all$"; then - return 0 - fi - - log_error "User '$user' does not have permission '$required_permission'" "apt-layer" - return 1 -} - -# Add user to package management system -add_package_user() { - local username="$1" - local role="${2:-viewer}" - - if [[ -z "$username" ]]; then - log_error "Username required" "apt-layer" - return 1 - fi - - log_info "Adding user '$username' with role '$role'" "apt-layer" - - # Check if user exists in system - if ! id "$username" &>/dev/null; then - log_error "User '$username' does not exist in system" "apt-layer" - return 1 - fi - - local user_db="$ADVANCED_PKG_USERS_DIR/users.json" - - # Check if role exists - if ! jq -e ".roles[\"$role\"]" "$user_db" >/dev/null 2>&1; then - log_error "Role '$role' does not exist" "apt-layer" - return 1 - fi - - # Add user to database - jq --arg user "$username" --arg role "$role" '.users[$user] = {"role": $role, "added": "'$(date -u +%Y-%m-%dT%H:%M:%SZ)'"}' "$user_db" > "$user_db.tmp" && \ - mv "$user_db.tmp" "$user_db" - - log_success "User '$username' added with role '$role'" "apt-layer" - return 0 -} - -# Remove user from package management system -remove_package_user() { - local username="$1" - - if [[ -z "$username" ]]; then - log_error "Username required" "apt-layer" - return 1 - fi - - log_info "Removing user '$username'" "apt-layer" - - local user_db="$ADVANCED_PKG_USERS_DIR/users.json" - - # Remove user from database - jq --arg user "$username" 'del(.users[$user])' "$user_db" > "$user_db.tmp" && \ - mv "$user_db.tmp" "$user_db" - - log_success "User '$username' removed" "apt-layer" - return 0 -} - -# List package management users -list_package_users() { - log_info "Listing package management users" "apt-layer" - - local user_db="$ADVANCED_PKG_USERS_DIR/users.json" - - if [[ ! -f "$user_db" ]]; then - log_error "User management database not found" "apt-layer" - return 1 - fi - - echo "=== Package Management Users ===" - - local users - users=$(jq -r '.users | to_entries[] | "\(.key): \(.value.role)"' "$user_db" 2>/dev/null || echo "") - - if [[ -n "$users" ]]; then - echo "$users" | while read -r user_info; do - echo " $user_info" - done - else - log_info "No users found" "apt-layer" - fi - - echo "" - echo "=== Available Roles ===" - - local roles - roles=$(jq -r '.roles | to_entries[] | "\(.key): \(.value.description)"' "$user_db" 2>/dev/null || echo "") - - if [[ -n "$roles" ]]; then - echo "$roles" | while read -r role_info; do - echo " $role_info" - done - else - log_info "No roles found" "apt-layer" - fi - - echo "" -} - -# Advanced dependency resolution -resolve_package_dependencies() { - local packages=("$@") - - if [[ ${#packages[@]} -eq 0 ]]; then - log_error "No packages specified for dependency resolution" "apt-layer" - return 1 - fi - - log_info "Resolving dependencies for packages: ${packages[*]}" "apt-layer" - - # Create temporary file for dependency resolution - local temp_deps="$ADVANCED_PKG_CACHE_DIR/temp-deps-$$.txt" - local resolved_deps="$ADVANCED_PKG_CACHE_DIR/resolved-deps-$$.txt" - - # Get package dependencies using apt-cache - for package in "${packages[@]}"; do - log_debug "Resolving dependencies for package: $package" "apt-layer" - - # Get direct dependencies - apt-cache depends "$package" 2>/dev/null | grep -E "^(Depends|Recommends|Suggests)" | cut -d: -f2 | tr -d ' ' | grep -v "^$" >> "$temp_deps" || true - - # Get reverse dependencies (what depends on this package) - apt-cache rdepends "$package" 2>/dev/null | grep -v "^Reverse Depends" | grep -v "^$" >> "$temp_deps" || true - done - - # Remove duplicates and sort - sort -u "$temp_deps" > "$resolved_deps" - - # Check for conflicts - local conflicts=() - while read -r dep; do - if [[ -n "$dep" ]]; then - # Check if package conflicts with any existing packages - if check_package_conflicts "$dep"; then - conflicts+=("$dep") - fi - fi - done < "$resolved_deps" - - # Report conflicts - if [[ ${#conflicts[@]} -gt 0 ]]; then - log_warning "Package conflicts detected: ${conflicts[*]}" "apt-layer" - log_info "Manual resolution may be required" "apt-layer" - fi - - # Clean up temporary files - rm -f "$temp_deps" "$resolved_deps" - - log_success "Dependency resolution completed" "apt-layer" - return 0 -} - -# Check for package conflicts -check_package_conflicts() { - local package="$1" - - if [[ -z "$package" ]]; then - return 1 - fi - - # Check if package conflicts with any installed packages - local conflicts - conflicts=$(apt-cache policy "$package" 2>/dev/null | grep -A1 "Installed" | grep -E "(Conflicts|Breaks)" || echo "") - - if [[ -n "$conflicts" ]]; then - log_warning "Package '$package' has conflicts: $conflicts" "apt-layer" - return 0 - fi - - return 1 -} - -# Advanced package installation with security checks -advanced_install_packages() { - local packages=("$@") - local user="${SUDO_USER:-$USER}" - - if [[ ${#packages[@]} -eq 0 ]]; then - log_error "No packages specified for installation" "apt-layer" - return 1 - fi - - log_info "Advanced package installation for packages: ${packages[*]}" "apt-layer" - - # Check user permissions - if ! check_user_permissions "$user" "install"; then - return 1 - fi - - # Check security policies - if ! check_security_policies "${packages[@]}"; then - return 1 - fi - - # Resolve dependencies - if ! resolve_package_dependencies "${packages[@]}"; then - log_error "Dependency resolution failed" "apt-layer" - return 1 - fi - - # Start transaction - start_transaction "advanced_install_${packages[0]}" - - # Update package lists - update_transaction_phase "updating_package_lists" - log_info "Updating package lists" "apt-layer" - if ! apt-get update; then - log_error "Failed to update package lists" "apt-layer" - rollback_transaction - return 1 - fi - - # Install packages - update_transaction_phase "installing_packages" - log_info "Installing packages: ${packages[*]}" "apt-layer" - if ! apt-get install -y "${packages[@]}"; then - log_error "Failed to install packages: ${packages[*]}" "apt-layer" - rollback_transaction - return 1 - fi - - # Clean up - apt-get clean - apt-get autoremove -y - - # Log installation - log_package_installation "$user" "${packages[@]}" - - commit_transaction - log_success "Advanced package installation completed: ${packages[*]}" "apt-layer" - return 0 -} - -# Check security policies -check_security_policies() { - local packages=("$@") - - log_info "Checking security policies for packages: ${packages[*]}" "apt-layer" - - local security_policy="$ADVANCED_PKG_SECURITY_DIR/security-policy.json" - - if [[ ! -f "$security_policy" ]]; then - log_warning "Security policy not found, using default policies" "apt-layer" - return 0 - fi - - # Check package verification settings - local gpg_check - gpg_check=$(jq -r '.package_verification.gpg_check' "$security_policy" 2>/dev/null || echo "true") - - if [[ "$gpg_check" == "true" ]]; then - log_info "GPG verification enabled" "apt-layer" - # Perform comprehensive GPG verification - for package in "${packages[@]}"; do - if ! check_package_gpg_signature "$package"; then - log_error "Package '$package' failed GPG signature verification" "apt-layer" - return 1 - fi - done - fi - - # Check installation policies - local allow_unsigned - allow_unsigned=$(jq -r '.installation_policies.allow_unsigned_packages' "$security_policy" 2>/dev/null || echo "false") - - if [[ "$allow_unsigned" == "false" ]]; then - log_info "Unsigned packages not allowed" "apt-layer" - # Check for unsigned packages using enhanced signing verification - for package in "${packages[@]}"; do - if ! check_package_signing "$package"; then - log_error "Package '$package' is not properly signed" "apt-layer" - return 1 - fi - done - fi - - # Check package size limits - local max_size - max_size=$(jq -r '.installation_policies.max_package_size_mb' "$security_policy" 2>/dev/null || echo "1000") - - for package in "${packages[@]}"; do - if ! check_package_size "$package" "$max_size"; then - log_error "Package '$package' exceeds size limit of ${max_size}MB" "apt-layer" - return 1 - fi - done - - log_success "Security policy checks passed" "apt-layer" - return 0 -} - -# Check package signature -check_package_signature() { - local package="$1" - - if [[ -z "$package" ]]; then - return 1 - fi - - # Check if package is signed (simplified check) - local package_info - package_info=$(apt-cache policy "$package" 2>/dev/null || echo "") - - if echo "$package_info" | grep -q "Signed-By"; then - return 0 - fi - - return 1 -} - -# Check package GPG signature (comprehensive verification) -check_package_gpg_signature() { - local package="$1" - - if [[ -z "$package" ]]; then - return 1 - fi - - log_debug "Verifying GPG signature for package: $package" "apt-layer" - - # Check if GPG is available - if ! command -v gpg &>/dev/null; then - log_warning "GPG not available, skipping signature verification for: $package" "apt-layer" - return 0 - fi - - # Get package source and key information - local package_info - package_info=$(apt-cache policy "$package" 2>/dev/null || echo "") - - # Check if package has a signed-by field - local signed_by - signed_by=$(echo "$package_info" | grep "Signed-By:" | cut -d: -f2 | tr -d ' ' || echo "") - - if [[ -z "$signed_by" ]]; then - log_warning "Package '$package' has no Signed-By field" "apt-layer" - return 1 - fi - - # Verify the GPG key exists and is trusted - if ! gpg --list-keys "$signed_by" &>/dev/null; then - log_warning "GPG key '$signed_by' for package '$package' not found in keyring" "apt-layer" - return 1 - fi - - # Additional verification: check if the key is trusted - local trust_level - trust_level=$(gpg --list-keys --with-colons "$signed_by" 2>/dev/null | grep "^pub:" | cut -d: -f2 || echo "") - - if [[ "$trust_level" != "u" ]] && [[ "$trust_level" != "f" ]]; then - log_warning "GPG key '$signed_by' for package '$package' is not fully trusted (trust level: $trust_level)" "apt-layer" - return 1 - fi - - log_debug "GPG signature verification passed for package: $package" "apt-layer" - return 0 -} - -# Check package signing (enhanced verification) -check_package_signing() { - local package="$1" - - if [[ -z "$package" ]]; then - return 1 - fi - - log_debug "Checking package signing for: $package" "apt-layer" - - # Check if debsig-verify is available for Debian package signing verification - if command -v debsig-verify &>/dev/null; then - # Get package file path (this would require downloading or finding the .deb file) - local package_file - package_file=$(find /var/cache/apt/archives -name "${package}*.deb" 2>/dev/null | head -1 || echo "") - - if [[ -n "$package_file" ]] && [[ -f "$package_file" ]]; then - if debsig-verify "$package_file" &>/dev/null; then - log_debug "Package signing verification passed for: $package" "apt-layer" - return 0 - else - log_warning "Package signing verification failed for: $package" "apt-layer" - return 1 - fi - fi - fi - - # Fallback to basic signature check - if check_package_signature "$package"; then - log_debug "Basic package signature check passed for: $package" "apt-layer" - return 0 - fi - - log_warning "Package signing verification failed for: $package" "apt-layer" - return 1 -} - -# Check package size -check_package_size() { - local package="$1" - local max_size_mb="$2" - - if [[ -z "$package" ]] || [[ -z "$max_size_mb" ]]; then - return 1 - fi - - # Get package size (simplified check) - local package_size - package_size=$(apt-cache show "$package" 2>/dev/null | grep "^Size:" | cut -d: -f2 | tr -d ' ' || echo "0") - - if [[ "$package_size" -gt $((max_size_mb * 1024 * 1024)) ]]; then - return 1 - fi - - return 0 -} - -# Log package installation -log_package_installation() { - local user="$1" - shift - local packages=("$@") - - local audit_log="$ADVANCED_PKG_SECURITY_DIR/audit.log" - - for package in "${packages[@]}"; do - echo "$(date -u +%Y-%m-%dT%H:%M:%SZ) - INSTALL - User: $user - Package: $package" >> "$audit_log" - done -} - -# Advanced package removal with dependency checking -advanced_remove_packages() { - local packages=("$@") - local user="${SUDO_USER:-$USER}" - - if [[ ${#packages[@]} -eq 0 ]]; then - log_error "No packages specified for removal" "apt-layer" - return 1 - fi - - log_info "Advanced package removal for packages: ${packages[*]}" "apt-layer" - - # Check user permissions - if ! check_user_permissions "$user" "remove"; then - return 1 - fi - - # Check for critical dependencies - for package in "${packages[@]}"; do - if check_critical_dependency "$package"; then - log_warning "Package '$package' may be a critical dependency" "apt-layer" - log_info "Manual verification recommended" "apt-layer" - fi - done - - # Start transaction - start_transaction "advanced_remove_${packages[0]}" - - # Remove packages - update_transaction_phase "removing_packages" - log_info "Removing packages: ${packages[*]}" "apt-layer" - if ! apt-get remove -y "${packages[@]}"; then - log_error "Failed to remove packages: ${packages[*]}" "apt-layer" - rollback_transaction - return 1 - fi - - # Clean up - apt-get autoremove -y - apt-get clean - - # Log removal - log_package_removal "$user" "${packages[@]}" - - commit_transaction - log_success "Advanced package removal completed: ${packages[*]}" "apt-layer" - return 0 -} - -# Check if package is a critical dependency -check_critical_dependency() { - local package="$1" - - if [[ -z "$package" ]]; then - return 1 - fi - - # List of critical system packages (simplified) - local critical_packages=("systemd" "bash" "coreutils" "apt" "dpkg" "base-files" "ubuntu-minimal") - - for critical in "${critical_packages[@]}"; do - if [[ "$package" == "$critical" ]]; then - return 0 - fi - done - - return 1 -} - -# Log package removal -log_package_removal() { - local user="$1" - shift - local packages=("$@") - - local audit_log="$ADVANCED_PKG_SECURITY_DIR/audit.log" - - for package in "${packages[@]}"; do - echo "$(date -u +%Y-%m-%dT%H:%M:%SZ) - REMOVE - User: $user - Package: $package" >> "$audit_log" - done -} - -# Advanced package update with rollback capability -advanced_update_packages() { - local packages=("$@") - local user="${SUDO_USER:-$USER}" - - log_info "Advanced package update for packages: ${packages[*]}" "apt-layer" - - # Check user permissions - if ! check_user_permissions "$user" "update"; then - return 1 - fi - - # Create backup of current state - local backup_id - backup_id=$(create_package_backup "${packages[@]}") - - if [[ -z "$backup_id" ]]; then - log_error "Failed to create package backup" "apt-layer" - return 1 - fi - - log_info "Created backup: $backup_id" "apt-layer" - - # Start transaction - start_transaction "advanced_update_${packages[0]}" - - # Update package lists - update_transaction_phase "updating_package_lists" - log_info "Updating package lists" "apt-layer" - if ! apt-get update; then - log_error "Failed to update package lists" "apt-layer" - rollback_transaction - return 1 - fi - - # Update packages - update_transaction_phase "updating_packages" - log_info "Updating packages: ${packages[*]}" "apt-layer" - if ! apt-get upgrade -y "${packages[@]}"; then - log_error "Failed to update packages: ${packages[*]}" "apt-layer" - log_info "Rolling back to backup: $backup_id" "apt-layer" - restore_package_backup "$backup_id" - rollback_transaction - return 1 - fi - - # Log update - log_package_update "$user" "${packages[@]}" - - commit_transaction - log_success "Advanced package update completed: ${packages[*]}" "apt-layer" - return 0 -} - -# Create package backup -create_package_backup() { - local packages=("$@") - local backup_id="backup-$(date +%Y%m%d-%H%M%S)-$$" - local backup_dir="$ADVANCED_PKG_STATE_DIR/backups/$backup_id" - - mkdir -p "$backup_dir" - - log_info "Creating comprehensive package backup: $backup_id" "apt-layer" - - # Save package states - for package in "${packages[@]}"; do - dpkg -l "$package" > "$backup_dir/${package}.state" 2>/dev/null || true - done - - # Save complete package list - dpkg -l | grep -E "^ii" > "$backup_dir/installed-packages.list" 2>/dev/null || true - - # Save package configuration - dpkg --get-selections > "$backup_dir/package-selections.list" 2>/dev/null || true - - # Save repository information - cp /etc/apt/sources.list "$backup_dir/sources.list" 2>/dev/null || true - cp -r /etc/apt/sources.list.d "$backup_dir/" 2>/dev/null || true - - # Save GPG key information - apt-key list > "$backup_dir/apt-keys.list" 2>/dev/null || true - - # Create backup metadata - cat > "$backup_dir/backup-metadata.json" << EOF -{ - "backup_id": "$backup_id", - "created_at": "$(date -u +%Y-%m-%dT%H:%M:%SZ)", - "created_by": "$(whoami)", - "packages": $(printf '%s\n' "${packages[@]}" | jq -R . | jq -s .), - "system_info": { - "hostname": "$(hostname)", - "distribution": "$(lsb_release -d | cut -f2 2>/dev/null || echo 'unknown')", - "kernel": "$(uname -r)" - } -} -EOF - - # Compress backup for storage efficiency - tar -czf "$backup_dir.tar.gz" -C "$ADVANCED_PKG_STATE_DIR/backups" "$backup_id" 2>/dev/null || true - - log_success "Package backup created: $backup_id" "apt-layer" - echo "$backup_id" -} - -# Restore package backup -restore_package_backup() { - local backup_id="$1" - local backup_dir="$ADVANCED_PKG_STATE_DIR/backups/$backup_id" - local backup_archive="$backup_dir.tar.gz" - - # Check if backup exists (try both directory and compressed archive) - if [[ ! -d "$backup_dir" ]] && [[ ! -f "$backup_archive" ]]; then - log_error "Backup not found: $backup_id" "apt-layer" - return 1 - fi - - log_info "Restoring from backup: $backup_id" "apt-layer" - - # Extract compressed backup if needed - if [[ -f "$backup_archive" ]] && [[ ! -d "$backup_dir" ]]; then - log_info "Extracting compressed backup..." "apt-layer" - tar -xzf "$backup_archive" -C "$ADVANCED_PKG_STATE_DIR/backups/" 2>/dev/null || { - log_error "Failed to extract backup archive: $backup_archive" "apt-layer" - return 1 - } - fi - - # Verify backup integrity - if [[ ! -f "$backup_dir/backup-metadata.json" ]]; then - log_error "Backup metadata not found, backup may be corrupted: $backup_id" "apt-layer" - return 1 - fi - - # Read backup metadata - local backup_metadata - backup_metadata=$(cat "$backup_dir/backup-metadata.json" 2>/dev/null || echo "{}") - local backup_packages - backup_packages=$(echo "$backup_metadata" | jq -r '.packages[]?' 2>/dev/null || echo "") - - log_info "Backup contains $(echo "$backup_packages" | wc -l) packages" "apt-layer" - - # Restore package selections if available - if [[ -f "$backup_dir/package-selections.list" ]]; then - log_info "Restoring package selections..." "apt-layer" - dpkg --set-selections < "$backup_dir/package-selections.list" 2>/dev/null || { - log_warning "Failed to restore package selections" "apt-layer" - } - fi - - # Restore repository information if available - if [[ -f "$backup_dir/sources.list" ]]; then - log_info "Restoring repository configuration..." "apt-layer" - cp "$backup_dir/sources.list" /etc/apt/sources.list 2>/dev/null || { - log_warning "Failed to restore sources.list" "apt-layer" - } - fi - - if [[ -d "$backup_dir/sources.list.d" ]]; then - cp -r "$backup_dir/sources.list.d"/* /etc/apt/sources.list.d/ 2>/dev/null || { - log_warning "Failed to restore sources.list.d" "apt-layer" - } - fi - - # Update package lists after repository restoration - apt-get update 2>/dev/null || { - log_warning "Failed to update package lists after repository restoration" "apt-layer" } - log_success "Backup restoration completed: $backup_id" "apt-layer" - log_audit "BACKUP_RESTORE" "Restored from backup: $backup_id" - return 0 + # Create deployment database if it doesn't exist + if [[ ! -f "$DEPLOYMENT_DB" ]]; then + cat > "$DEPLOYMENT_DB" << 'EOF' +{ + "deployments": {}, + "current_deployment": null, + "pending_deployment": null, + "deployment_counter": 0, + "created": "$(date -u +%Y-%m-%dT%H:%M:%SZ)" } - -# Log package update -log_package_update() { - local user="$1" - shift - local packages=("$@") - - local audit_log="$ADVANCED_PKG_SECURITY_DIR/audit.log" - - for package in "${packages[@]}"; do - echo "$(date -u +%Y-%m-%dT%H:%M:%SZ) - UPDATE - User: $user - Package: $package" >> "$audit_log" - done -} - -# List package backups -list_package_backups() { - log_info "Listing package backups" "apt-layer" - - local backups_dir="$ADVANCED_PKG_STATE_DIR/backups" - - if [[ ! -d "$backups_dir" ]]; then - log_info "No backups directory found" "apt-layer" - return 0 - fi - - echo "=== Package Backups ===" - - local backups - backups=$(find "$backups_dir" -name "backup-*" -type d 2>/dev/null | sort -r || echo "") - - if [[ -n "$backups" ]]; then - for backup_dir in $backups; do - local backup_id - backup_id=$(basename "$backup_dir") - local metadata_file="$backup_dir/backup-metadata.json" - - if [[ -f "$metadata_file" ]]; then - local created_at - created_at=$(jq -r '.created_at // "unknown"' "$metadata_file" 2>/dev/null || echo "unknown") - local created_by - created_by=$(jq -r '.created_by // "unknown"' "$metadata_file" 2>/dev/null || echo "unknown") - local package_count - package_count=$(jq -r '.packages | length // 0' "$metadata_file" 2>/dev/null || echo "0") - - echo " $backup_id: $package_count packages, created by $created_by at $created_at" - else - echo " $backup_id: (metadata not available)" - fi - done - else - log_info "No backups found" "apt-layer" - fi - - echo "" -} - -# Clean up old backups -cleanup_old_backups() { - local max_age_days="${1:-30}" - - log_info "Cleaning up backups older than $max_age_days days" "apt-layer" - - local backups_dir="$ADVANCED_PKG_STATE_DIR/backups" - - if [[ ! -d "$backups_dir" ]]; then - log_info "No backups directory found" "apt-layer" - return 0 - fi - - local removed_count=0 - - # Find and remove old backup directories - while IFS= read -r -d '' backup_dir; do - local backup_id - backup_id=$(basename "$backup_dir") - local metadata_file="$backup_dir/backup-metadata.json" - - if [[ -f "$metadata_file" ]]; then - local created_at - created_at=$(jq -r '.created_at // ""' "$metadata_file" 2>/dev/null || echo "") - - if [[ -n "$created_at" ]]; then - local created_timestamp - created_timestamp=$(date -d "$created_at" +%s 2>/dev/null || echo "0") - local current_timestamp - current_timestamp=$(date +%s) - local age_days - age_days=$(( (current_timestamp - created_timestamp) / 86400 )) - - if [[ $age_days -gt $max_age_days ]]; then - log_info "Removing old backup: $backup_id (age: ${age_days} days)" "apt-layer" - rm -rf "$backup_dir" 2>/dev/null || true - rm -f "$backup_dir.tar.gz" 2>/dev/null || true - ((removed_count++)) - fi - fi - fi - done < <(find "$backups_dir" -name "backup-*" -type d -print0 2>/dev/null) - - log_success "Cleaned up $removed_count old backups" "apt-layer" - return 0 -} - -# Get advanced package information -get_advanced_package_info() { - local package="$1" - - if [[ -z "$package" ]]; then - log_error "Package name required" "apt-layer" - return 1 - fi - - log_info "Getting advanced information for package: $package" "apt-layer" - - echo "=== Advanced Package Information: $package ===" - - # Basic package information - echo "Basic Information:" - apt-cache show "$package" 2>/dev/null | grep -E "^(Package|Version|Architecture|Maintainer|Description)" | head -10 - - # Dependencies - echo "" - echo "Dependencies:" - apt-cache depends "$package" 2>/dev/null | grep -E "^(Depends|Recommends|Suggests)" | head -5 - - # Reverse dependencies - echo "" - echo "Reverse Dependencies:" - apt-cache rdepends "$package" 2>/dev/null | grep -v "^Reverse Depends" | head -5 - - # Security information - echo "" - echo "Security Information:" - if check_package_signature "$package"; then - echo " � Package is signed" - else - echo " � Package is not signed" - fi - - # Size information - local package_size - package_size=$(apt-cache show "$package" 2>/dev/null | grep "^Size:" | cut -d: -f2 | tr -d ' ' || echo "unknown") - echo " Size: $package_size bytes" - - echo "" -} - -# List advanced package management status -list_advanced_package_status() { - log_info "Listing advanced package management status" "apt-layer" - - echo "=== Advanced Package Management Status ===" - - # User management status - echo "User Management:" - local user_count - user_count=$(jq '.users | length' "$ADVANCED_PKG_USERS_DIR/users.json" 2>/dev/null || echo "0") - echo " Active users: $user_count" - - # Security policy status - echo "" - echo "Security Policies:" - local security_policy="$ADVANCED_PKG_SECURITY_DIR/security-policy.json" - if [[ -f "$security_policy" ]]; then - local gpg_check - gpg_check=$(jq -r '.package_verification.gpg_check' "$security_policy" 2>/dev/null || echo "unknown") - echo " GPG verification: $gpg_check" - - local audit_logging - audit_logging=$(jq -r '.audit_logging.enabled' "$security_policy" 2>/dev/null || echo "unknown") - echo " Audit logging: $audit_logging" - else - echo " Security policies: not configured" - fi - - # Dependency cache status - echo "" - echo "Dependency Cache:" - local dep_cache="$ADVANCED_PKG_DEPENDENCIES_DIR/dependency-cache.json" - if [[ -f "$dep_cache" ]]; then - local last_updated - last_updated=$(jq -r '.last_updated' "$dep_cache" 2>/dev/null || echo "unknown") - echo " Last updated: $last_updated" - else - echo " Dependency cache: not initialized" - fi - - # Audit log status - echo "" - echo "Audit Log:" - local audit_log="$ADVANCED_PKG_SECURITY_DIR/audit.log" - if [[ -f "$audit_log" ]]; then - local log_entries - log_entries=$(wc -l < "$audit_log" 2>/dev/null || echo "0") - echo " Total entries: $log_entries" - - local recent_entries - recent_entries=$(tail -10 "$audit_log" 2>/dev/null | wc -l || echo "0") - echo " Recent entries: $recent_entries" - else - echo " Audit log: not available" - fi - - echo "" -} - -# ============================================================================= -# DKMS MANAGEMENT FUNCTIONS -# ============================================================================= - -# Install DKMS module with atomic transaction -install_dkms_module() { - local module_name="$1" - local module_version="$2" - local user="${SUDO_USER:-$USER}" - - if [[ -z "$module_name" ]] || [[ -z "$module_version" ]]; then - log_error "Module name and version required" "apt-layer" - return 1 - fi - - log_info "Installing DKMS module: $module_name/$module_version" "apt-layer" - - # Check user permissions - if ! check_user_permissions "$user" "install"; then - return 1 - fi - - # Check DKMS configuration - local dkms_config="$ADVANCED_PKG_DKMS_DIR/dkms-config.json" - local dkms_enabled - dkms_enabled=$(jq -r '.dkms_enabled' "$dkms_config" 2>/dev/null || echo "true") - - if [[ "$dkms_enabled" != "true" ]]; then - log_error "DKMS is disabled in configuration" "apt-layer" - return 1 - fi - - # Start transaction - start_transaction "dkms_install_${module_name}_${module_version}" - - # Install kernel headers if needed - update_transaction_phase "installing_kernel_headers" - if ! install_kernel_headers; then - log_error "Failed to install kernel headers" "apt-layer" - rollback_transaction - return 1 - fi - - # Install DKMS module - update_transaction_phase "installing_dkms_module" - if ! dkms install "$module_name/$module_version"; then - log_error "Failed to install DKMS module: $module_name/$module_version" "apt-layer" - rollback_transaction - return 1 - fi - - # Update module tracking - update_dkms_module_tracking "$module_name" "$module_version" "installed" - - # Log installation - log_dkms_installation "$user" "$module_name" "$module_version" - - commit_transaction - log_success "DKMS module installed: $module_name/$module_version" "apt-layer" - return 0 -} - -# Remove DKMS module with atomic transaction -remove_dkms_module() { - local module_name="$1" - local module_version="$2" - local user="${SUDO_USER:-$USER}" - - if [[ -z "$module_name" ]] || [[ -z "$module_version" ]]; then - log_error "Module name and version required" "apt-layer" - return 1 - fi - - log_info "Removing DKMS module: $module_name/$module_version" "apt-layer" - - # Check user permissions - if ! check_user_permissions "$user" "remove"; then - return 1 - fi - - # Start transaction - start_transaction "dkms_remove_${module_name}_${module_version}" - - # Remove DKMS module - update_transaction_phase "removing_dkms_module" - if ! dkms remove "$module_name/$module_version"; then - log_error "Failed to remove DKMS module: $module_name/$module_version" "apt-layer" - rollback_transaction - return 1 - fi - - # Update module tracking - update_dkms_module_tracking "$module_name" "$module_version" "removed" - - # Log removal - log_dkms_removal "$user" "$module_name" "$module_version" - - commit_transaction - log_success "DKMS module removed: $module_name/$module_version" "apt-layer" - return 0 -} - -# Rebuild DKMS module -rebuild_dkms_module() { - local module_name="$1" - local module_version="$2" - local kernel_version="${3:-$(uname -r)}" - local user="${SUDO_USER:-$USER}" - - if [[ -z "$module_name" ]] || [[ -z "$module_version" ]]; then - log_error "Module name and version required" "apt-layer" - return 1 - fi - - log_info "Rebuilding DKMS module: $module_name/$module_version for kernel: $kernel_version" "apt-layer" - - # Check user permissions - if ! check_user_permissions "$user" "update"; then - return 1 - fi - - # Start transaction - start_transaction "dkms_rebuild_${module_name}_${module_version}" - - # Rebuild DKMS module - update_transaction_phase "rebuilding_dkms_module" - if ! dkms build "$module_name/$module_version" -k "$kernel_version"; then - log_error "Failed to rebuild DKMS module: $module_name/$module_version" "apt-layer" - rollback_transaction - return 1 - fi - - # Install rebuilt module - update_transaction_phase "installing_rebuilt_module" - if ! dkms install "$module_name/$module_version" -k "$kernel_version"; then - log_error "Failed to install rebuilt DKMS module: $module_name/$module_version" "apt-layer" - rollback_transaction - return 1 - fi - - # Update module tracking - update_dkms_module_tracking "$module_name" "$module_version" "rebuilt" - - # Log rebuild - log_dkms_rebuild "$user" "$module_name" "$module_version" "$kernel_version" - - commit_transaction - log_success "DKMS module rebuilt: $module_name/$module_version" "apt-layer" - return 0 -} - -# Rebuild all DKMS modules -rebuild_all_dkms_modules() { - local kernel_version="${1:-$(uname -r)}" - local user="${SUDO_USER:-$USER}" - - log_info "Rebuilding all DKMS modules for kernel: $kernel_version" "apt-layer" - - # Check user permissions - if ! check_user_permissions "$user" "update"; then - return 1 - fi - - # Get list of installed DKMS modules - local installed_modules - installed_modules=$(dkms status | grep -E "^[a-zA-Z0-9_-]+/[0-9.]+" | cut -d'/' -f1,2 | sort -u || echo "") - - if [[ -z "$installed_modules" ]]; then - log_info "No DKMS modules found to rebuild" "apt-layer" - return 0 - fi - - local failed_modules=() - local success_count=0 - - # Rebuild each module - while IFS= read -r module_info; do - if [[ -n "$module_info" ]]; then - local module_name - local module_version - module_name=$(echo "$module_info" | cut -d'/' -f1) - module_version=$(echo "$module_info" | cut -d'/' -f2) - - log_info "Rebuilding module: $module_name/$module_version" "apt-layer" - - if rebuild_dkms_module "$module_name" "$module_version" "$kernel_version"; then - ((success_count++)) - else - failed_modules+=("$module_name/$module_version") - fi - fi - done <<< "$installed_modules" - - # Report results - if [[ ${#failed_modules[@]} -eq 0 ]]; then - log_success "All DKMS modules rebuilt successfully ($success_count modules)" "apt-layer" - return 0 - else - log_warning "Some DKMS modules failed to rebuild: ${failed_modules[*]}" "apt-layer" - log_info "Successfully rebuilt: $success_count modules" "apt-layer" - return 1 - fi -} - -# Install kernel headers -install_kernel_headers() { - local kernel_version=$(uname -r) - local headers_package="linux-headers-${kernel_version}" - - log_info "Installing kernel headers: $headers_package" "apt-layer" - - # Check if headers are already installed - if dpkg -l | grep -q "^ii.*$headers_package"; then - log_info "Kernel headers already installed: $headers_package" "apt-layer" - return 0 - fi - - # Install kernel headers - if apt-get install -y "$headers_package"; then - log_success "Kernel headers installed: $headers_package" "apt-layer" - return 0 - else - log_error "Failed to install kernel headers: $headers_package" "apt-layer" - return 1 - fi -} - -# Update DKMS module tracking -update_dkms_module_tracking() { - local module_name="$1" - local module_version="$2" - local action="$3" - - local dkms_modules="$ADVANCED_PKG_DKMS_DIR/installed-modules.json" - local current_time=$(date -u +%Y-%m-%dT%H:%M:%SZ) - - # Update installed modules tracking - case "$action" in - "installed") - jq --arg name "$module_name" --arg version "$module_version" --arg time "$current_time" \ - '.installed_modules[$name] = {"version": $version, "installed_at": $time, "status": "active"}' \ - "$dkms_modules" > "$dkms_modules.tmp" && mv "$dkms_modules.tmp" "$dkms_modules" - ;; - "removed") - jq --arg name "$module_name" 'del(.installed_modules[$name])' \ - "$dkms_modules" > "$dkms_modules.tmp" && mv "$dkms_modules.tmp" "$dkms_modules" - ;; - "rebuilt") - jq --arg name "$module_name" --arg time "$current_time" \ - '.installed_modules[$name].last_rebuilt = $time' \ - "$dkms_modules" > "$dkms_modules.tmp" && mv "$dkms_modules.tmp" "$dkms_modules" - ;; - esac - - # Update build history - jq --arg name "$module_name" --arg version "$module_version" --arg action "$action" --arg time "$current_time" \ - '.build_history[$time] = {"module": $name, "version": $version, "action": $action}' \ - "$dkms_modules" > "$dkms_modules.tmp" && mv "$dkms_modules.tmp" "$dkms_modules" - - # Update last_updated timestamp - jq --arg time "$current_time" '.last_updated = $time' \ - "$dkms_modules" > "$dkms_modules.tmp" && mv "$dkms_modules.tmp" "$dkms_modules" -} - -# Log DKMS installation -log_dkms_installation() { - local user="$1" - local module_name="$2" - local module_version="$3" - - local audit_log="$ADVANCED_PKG_SECURITY_DIR/audit.log" - echo "$(date -u +%Y-%m-%dT%H:%M:%SZ) - DKMS_INSTALL - User: $user - Module: $module_name/$module_version" >> "$audit_log" -} - -# Log DKMS removal -log_dkms_removal() { - local user="$1" - local module_name="$2" - local module_version="$3" - - local audit_log="$ADVANCED_PKG_SECURITY_DIR/audit.log" - echo "$(date -u +%Y-%m-%dT%H:%M:%SZ) - DKMS_REMOVE - User: $user - Module: $module_name/$module_version" >> "$audit_log" -} - -# Log DKMS rebuild -log_dkms_rebuild() { - local user="$1" - local module_name="$2" - local module_version="$3" - local kernel_version="$4" - - local audit_log="$ADVANCED_PKG_SECURITY_DIR/audit.log" - echo "$(date -u +%Y-%m-%dT%H:%M:%SZ) - DKMS_REBUILD - User: $user - Module: $module_name/$module_version - Kernel: $kernel_version" >> "$audit_log" -} - -# Get DKMS status -get_dkms_status() { - log_info "Getting DKMS status" "apt-layer" - - echo "=== DKMS Status ===" - - # Get system DKMS status - local dkms_status - dkms_status=$(dkms status 2>/dev/null || echo "") - - if [[ -n "$dkms_status" ]]; then - echo "$dkms_status" - else - log_info "No DKMS modules found" "apt-layer" - fi - - echo "" - echo "=== DKMS Configuration ===" - - # Show DKMS configuration - local dkms_config="$ADVANCED_PKG_DKMS_DIR/dkms-config.json" - if [[ -f "$dkms_config" ]]; then - jq -r 'to_entries[] | " \(.key): \(.value)"' "$dkms_config" 2>/dev/null || echo " Configuration not readable" - else - echo " Configuration not found" - fi - - echo "" - echo "=== Installed Modules ===" - - # Show tracked modules - local dkms_modules="$ADVANCED_PKG_DKMS_DIR/installed-modules.json" - if [[ -f "$dkms_modules" ]]; then - local module_count - module_count=$(jq '.installed_modules | length' "$dkms_modules" 2>/dev/null || echo "0") - echo " Tracked modules: $module_count" - - jq -r '.installed_modules | to_entries[] | " \(.key): \(.value.version) (\(.value.status))"' "$dkms_modules" 2>/dev/null || echo " No modules found" - else - echo " Module tracking not available" - fi - - echo "" -} - -# ============================================================================= -# NVIDIA SUPPORT FUNCTIONS -# ============================================================================= - -# Install NVIDIA drivers -install_nvidia_drivers() { - local driver_version="${1:-auto}" - local user="${SUDO_USER:-$USER}" - - log_info "Installing NVIDIA drivers using graphics-drivers PPA" "apt-layer" - - # Check user permissions - if ! check_user_permissions "$user" "install"; then - return 1 - fi - - # Check NVIDIA configuration - local nvidia_config="$ADVANCED_PKG_NVIDIA_DIR/nvidia-config.json" - local nvidia_enabled - nvidia_enabled=$(jq -r '.nvidia_support_enabled' "$nvidia_config" 2>/dev/null || echo "true") - - if [[ "$nvidia_enabled" != "true" ]]; then - log_error "NVIDIA support is disabled in configuration" "apt-layer" - return 1 - fi - - # Start transaction - start_transaction "nvidia_install_${driver_version}" - - # Add graphics-drivers PPA if needed - update_transaction_phase "adding_graphics_drivers_ppa" - if ! add_nvidia_repository; then - log_error "Failed to add graphics-drivers PPA" "apt-layer" - rollback_transaction - return 1 - fi - - # Determine driver version if auto - if [[ "$driver_version" == "auto" ]]; then - update_transaction_phase "detecting_optimal_driver" - driver_version=$(detect_optimal_nvidia_driver) - if [[ -z "$driver_version" ]]; then - log_error "Failed to detect optimal NVIDIA driver version" "apt-layer" - rollback_transaction - return 1 - fi - log_info "Detected optimal driver version: $driver_version" "apt-layer" - fi - - # Install NVIDIA driver packages from PPA - update_transaction_phase "installing_nvidia_drivers" - local nvidia_packages=("nvidia-driver-$driver_version" "nvidia-settings" "nvidia-prime" "nvidia-modprobe") - - log_info "Installing NVIDIA packages: ${nvidia_packages[*]}" "apt-layer" - if ! apt-get install -y "${nvidia_packages[@]}"; then - log_error "Failed to install NVIDIA driver packages" "apt-layer" - rollback_transaction - return 1 - fi - - # Install DKMS module for NVIDIA driver (handled automatically by PPA packages) - update_transaction_phase "verifying_dkms_installation" - if ! verify_nvidia_dkms_installation "$driver_version"; then - log_warning "NVIDIA DKMS module verification failed" "apt-layer" - fi - - # Configure NVIDIA Prime - update_transaction_phase "configuring_nvidia_prime" - if ! configure_nvidia_prime; then - log_warning "Failed to configure NVIDIA Prime" "apt-layer" - fi - - # Update driver tracking - update_nvidia_driver_tracking "$driver_version" "installed" - - # Log installation - log_nvidia_installation "$user" "$driver_version" - - commit_transaction - log_success "NVIDIA drivers installed successfully: version $driver_version" "apt-layer" - log_info "Driver installed from graphics-drivers PPA (recommended approach)" "apt-layer" - log_info "Reboot required to activate NVIDIA drivers" "apt-layer" - return 0 -} - -# Detect optimal NVIDIA driver version -detect_optimal_nvidia_driver() { - log_info "Detecting optimal NVIDIA driver version" "apt-layer" - - # Check if lspci is available - if ! command -v lspci &>/dev/null; then - log_error "lspci not available for GPU detection" "apt-layer" - return 1 - fi - - # Get NVIDIA GPU information - local gpu_info - gpu_info=$(lspci | grep -i nvidia || echo "") - - if [[ -z "$gpu_info" ]]; then - log_error "No NVIDIA GPU detected" "apt-layer" - return 1 - fi - - log_info "Detected NVIDIA GPU: $gpu_info" "apt-layer" - - # Use ubuntu-drivers to recommend the best driver - if command -v ubuntu-drivers &>/dev/null; then - local recommended_driver - recommended_driver=$(ubuntu-drivers devices | grep -A1 "recommended" | tail -1 | awk '{print $3}' || echo "") - - if [[ -n "$recommended_driver" ]]; then - log_info "Ubuntu drivers recommends: $recommended_driver" "apt-layer" - echo "$recommended_driver" - return 0 - fi - fi - - # Fallback to common driver versions based on GPU age - # This is a simplified approach - in practice, ubuntu-drivers is more accurate - local gpu_model - gpu_model=$(echo "$gpu_info" | grep -o "\[.*\]" | tr -d '[]' || echo "") - - case "$gpu_model" in - *"GTX 10"*|*"GTX 16"*|*"RTX 20"*|*"RTX 30"*|*"RTX 40"*) - echo "535" # Modern GPUs - ;; - *"GTX 9"*|*"GTX 7"*|*"GTX 6"*) - echo "470" # Older but still supported GPUs - ;; - *"GTX 5"*|*"GTX 4"*) - echo "390" # Legacy GPUs - ;; - *) - echo "535" # Default to latest stable - ;; - esac -} - -# Verify NVIDIA DKMS installation -verify_nvidia_dkms_installation() { - local driver_version="$1" - - log_info "Verifying NVIDIA DKMS installation for version: $driver_version" "apt-layer" - - # Check if DKMS module is installed - if dkms status | grep -q "nvidia/$driver_version"; then - log_success "NVIDIA DKMS module verified: nvidia/$driver_version" "apt-layer" - return 0 - else - log_warning "NVIDIA DKMS module not found in DKMS status" "apt-layer" - - # Check if module is loaded - if lsmod | grep -q nvidia; then - log_info "NVIDIA module is loaded (may be built-in)" "apt-layer" - return 0 +EOF + if [[ $? -eq 0 ]]; then + log_success "Deployment database initialized" "apt-layer" else - log_error "NVIDIA module not loaded" "apt-layer" + log_error "Failed to create deployment database: $DEPLOYMENT_DB" "apt-layer" return 1 fi fi -} - -# Add NVIDIA repository -add_nvidia_repository() { - log_info "Adding NVIDIA graphics-drivers PPA repository" "apt-layer" - # Check if PPA already exists - if grep -q "graphics-drivers" /etc/apt/sources.list.d/*.list 2>/dev/null; then - log_info "NVIDIA graphics-drivers PPA already exists" "apt-layer" - return 0 - fi - - # Add graphics-drivers PPA (recommended approach) - if ! add-apt-repository ppa:graphics-drivers/ppa -y; then - log_error "Failed to add graphics-drivers PPA" "apt-layer" - return 1 - fi - - # Update package lists - if ! apt-get update; then - log_error "Failed to update package lists after adding graphics-drivers PPA" "apt-layer" - return 1 - fi - - log_success "NVIDIA graphics-drivers PPA added successfully" "apt-layer" - log_info "This is the recommended approach for NVIDIA driver installation on Ubuntu" "apt-layer" - return 0 -} - -# Configure NVIDIA Prime -configure_nvidia_prime() { - log_info "Configuring NVIDIA Prime" "apt-layer" - - # Create NVIDIA Prime configuration - mkdir -p /etc/prime - cat > /etc/prime/display << EOF -# NVIDIA Prime configuration -# Auto-detect GPU configuration -auto -EOF - - # Install NVIDIA Prime utilities if not already installed - if ! dpkg -l | grep -q nvidia-prime-applet; then - apt-get install -y nvidia-prime-applet - fi - - # Configure system for NVIDIA Prime - if command -v prime-select &>/dev/null; then - prime-select nvidia - log_success "NVIDIA Prime configured for NVIDIA GPU" "apt-layer" - else - log_warning "prime-select not available" "apt-layer" - fi - - return 0 -} - -# Switch GPU using NVIDIA Prime -switch_gpu() { - local gpu="${1:-nvidia}" - local user="${SUDO_USER:-$USER}" - - log_info "Switching GPU to: $gpu" "apt-layer" - - # Check user permissions - if ! check_user_permissions "$user" "update"; then - return 1 - fi - - # Check if prime-select is available - if ! command -v prime-select &>/dev/null; then - log_error "prime-select not available" "apt-layer" - return 1 - fi - - # Switch GPU - case "$gpu" in - "nvidia") - if prime-select nvidia; then - log_success "Switched to NVIDIA GPU" "apt-layer" - update_nvidia_prime_config "nvidia" - return 0 - else - log_error "Failed to switch to NVIDIA GPU" "apt-layer" - return 1 - fi - ;; - "integrated") - if prime-select intel; then - log_success "Switched to integrated GPU" "apt-layer" - update_nvidia_prime_config "integrated" - return 0 - else - log_error "Failed to switch to integrated GPU" "apt-layer" - return 1 - fi - ;; - "auto") - if prime-select auto; then - log_success "Switched to automatic GPU selection" "apt-layer" - update_nvidia_prime_config "auto" - return 0 - else - log_error "Failed to switch to automatic GPU selection" "apt-layer" - return 1 - fi - ;; - *) - log_error "Invalid GPU selection: $gpu" "apt-layer" - log_info "Valid options: nvidia, integrated, auto" "apt-layer" + # Ensure deployment files exist with proper error handling + touch "$CURRENT_DEPLOYMENT_FILE" 2>/dev/null || { + log_warning "Failed to create current deployment file, attempting with sudo..." "apt-layer" + sudo touch "$CURRENT_DEPLOYMENT_FILE" 2>/dev/null || { + log_error "Failed to create current deployment file: $CURRENT_DEPLOYMENT_FILE" "apt-layer" return 1 - ;; - esac -} - -# Update NVIDIA Prime configuration -update_nvidia_prime_config() { - local gpu="$1" - local nvidia_prime="$ADVANCED_PKG_NVIDIA_DIR/prime-config.json" - - jq --arg gpu "$gpu" '.current_gpu = $gpu' "$nvidia_prime" > "$nvidia_prime.tmp" && \ - mv "$nvidia_prime.tmp" "$nvidia_prime" -} - -# Update NVIDIA driver tracking -update_nvidia_driver_tracking() { - local driver_version="$1" - local action="$2" - - local nvidia_drivers="$ADVANCED_PKG_NVIDIA_DIR/installed-drivers.json" - local current_time=$(date -u +%Y-%m-%dT%H:%M:%SZ) - - case "$action" in - "installed") - jq --arg version "$driver_version" --arg time "$current_time" \ - '.installed_drivers[$version] = {"installed_at": $time, "status": "active"}' \ - "$nvidia_drivers" > "$nvidia_drivers.tmp" && mv "$nvidia_drivers.tmp" "$nvidia_drivers" - ;; - "removed") - jq --arg version "$driver_version" 'del(.installed_drivers[$version])' \ - "$nvidia_drivers" > "$nvidia_drivers.tmp" && mv "$nvidia_drivers.tmp" "$nvidia_drivers" - ;; - esac - - # Update driver history - jq --arg version "$driver_version" --arg action "$action" --arg time "$current_time" \ - '.driver_history[$time] = {"version": $version, "action": $action}' \ - "$nvidia_drivers" > "$nvidia_drivers.tmp" && mv "$nvidia_drivers.tmp" "$nvidia_drivers" - - # Update last_updated timestamp - jq --arg time "$current_time" '.last_updated = $time' \ - "$nvidia_drivers" > "$nvidia_drivers.tmp" && mv "$nvidia_drivers.tmp" "$nvidia_drivers" -} - -# Log NVIDIA installation -log_nvidia_installation() { - local user="$1" - local driver_version="$2" - - local audit_log="$ADVANCED_PKG_SECURITY_DIR/audit.log" - echo "$(date -u +%Y-%m-%dT%H:%M:%SZ) - NVIDIA_INSTALL - User: $user - Driver: $driver_version" >> "$audit_log" -} - -# Get NVIDIA status -get_nvidia_status() { - log_info "Getting NVIDIA status" "apt-layer" - - echo "=== NVIDIA Status ===" - - # Check if NVIDIA drivers are loaded - if lsmod | grep -q nvidia; then - echo " NVIDIA drivers: loaded" - - # Get NVIDIA driver version - if command -v nvidia-smi &>/dev/null; then - local driver_version - driver_version=$(nvidia-smi --query-gpu=driver_version --format=csv,noheader,nounits 2>/dev/null | head -1 || echo "unknown") - echo " Driver version: $driver_version" - fi - - # Get GPU information - if command -v nvidia-smi &>/dev/null; then - echo " GPU Information:" - nvidia-smi --query-gpu=name,memory.total,temperature.gpu --format=csv,noheader 2>/dev/null | while read -r gpu_info; do - echo " $gpu_info" - done - fi - else - echo " NVIDIA drivers: not loaded" - fi - - echo "" - echo "=== NVIDIA Prime Status ===" - - # Check NVIDIA Prime status - if command -v prime-select &>/dev/null; then - local current_gpu - current_gpu=$(prime-select get 2>/dev/null || echo "unknown") - echo " Current GPU: $current_gpu" - else - echo " NVIDIA Prime: not available" - fi - - echo "" - echo "=== NVIDIA Configuration ===" - - # Show NVIDIA configuration - local nvidia_config="$ADVANCED_PKG_NVIDIA_DIR/nvidia-config.json" - if [[ -f "$nvidia_config" ]]; then - jq -r 'to_entries[] | " \(.key): \(.value)"' "$nvidia_config" 2>/dev/null || echo " Configuration not readable" - else - echo " Configuration not found" - fi - - echo "" -} - -# ============================================================================= -# INTEGRATION FUNCTIONS -# ============================================================================= - -# Initialize advanced package management on script startup -init_advanced_package_management_on_startup() { - # Only initialize if not already done - if [[ ! -d "$ADVANCED_PKG_STATE_DIR" ]]; then - init_advanced_package_management - fi -} - -# Cleanup advanced package management on script exit -cleanup_advanced_package_management_on_exit() { - # Clean up temporary files - rm -f "$ADVANCED_PKG_CACHE_DIR"/temp-* 2>/dev/null || true - rm -f "$ADVANCED_PKG_CACHE_DIR"/resolved-* 2>/dev/null || true -} - -# Register cleanup function -trap cleanup_advanced_package_management_on_exit EXIT - -# --- END OF SCRIPTLET: 08-advanced-package-management.sh --- - -# ============================================================================ -# Layer Signing & Verification (Enterprise Security) -# ============================================================================ - -# Ubuntu uBlue apt-layer Layer Signing & Verification -# Provides enterprise-grade layer signing and verification for immutable deployments -# Supports Sigstore (cosign) for modern OCI-compatible signing and GPG for traditional workflows - -# ============================================================================= -# LAYER SIGNING & VERIFICATION FUNCTIONS -# ============================================================================= - -# Layer signing configuration (with fallbacks for when particle-config.sh is not loaded) -LAYER_SIGNING_CONFIG_DIR="${UBLUE_CONFIG_DIR:-/etc/ubuntu-ublue}/layer-signing" -LAYER_SIGNING_STATE_DIR="${UBLUE_ROOT:-/var/lib/particle-os}/layer-signing" -LAYER_SIGNING_KEYS_DIR="$LAYER_SIGNING_STATE_DIR/keys" -LAYER_SIGNING_SIGNATURES_DIR="$LAYER_SIGNING_STATE_DIR/signatures" -LAYER_SIGNING_VERIFICATION_DIR="$LAYER_SIGNING_STATE_DIR/verification" -LAYER_SIGNING_REVOCATION_DIR="$LAYER_SIGNING_STATE_DIR/revocation" - -# Signing configuration -LAYER_SIGNING_ENABLED="${LAYER_SIGNING_ENABLED:-true}" -LAYER_SIGNING_METHOD="${LAYER_SIGNING_METHOD:-sigstore}" # sigstore, gpg, both -LAYER_SIGNING_VERIFY_ON_IMPORT="${LAYER_SIGNING_VERIFY_ON_IMPORT:-true}" -LAYER_SIGNING_VERIFY_ON_MOUNT="${LAYER_SIGNING_VERIFY_ON_MOUNT:-true}" -LAYER_SIGNING_VERIFY_ON_ACTIVATE="${LAYER_SIGNING_VERIFY_ON_ACTIVATE:-true}" -LAYER_SIGNING_FAIL_ON_VERIFY="${LAYER_SIGNING_FAIL_ON_VERIFY:-true}" - -# Initialize layer signing system -init_layer_signing() { - log_info "Initializing layer signing and verification system" "apt-layer" - - # Create layer signing directories - mkdir -p "$LAYER_SIGNING_CONFIG_DIR" "$LAYER_SIGNING_STATE_DIR" "$LAYER_SIGNING_KEYS_DIR" - mkdir -p "$LAYER_SIGNING_SIGNATURES_DIR" "$LAYER_SIGNING_VERIFICATION_DIR" "$LAYER_SIGNING_REVOCATION_DIR" - - # Set proper permissions - chmod 755 "$LAYER_SIGNING_CONFIG_DIR" "$LAYER_SIGNING_STATE_DIR" - chmod 700 "$LAYER_SIGNING_KEYS_DIR" "$LAYER_SIGNING_SIGNATURES_DIR" - chmod 750 "$LAYER_SIGNING_VERIFICATION_DIR" "$LAYER_SIGNING_REVOCATION_DIR" - - # Initialize signing configuration - init_signing_config - - # Initialize key management - init_key_management - - # Initialize revocation system - init_revocation_system - - # Check signing tools availability - check_signing_tools - - log_success "Layer signing and verification system initialized" "apt-layer" -} - -# Initialize signing configuration -init_signing_config() { - local config_file="$LAYER_SIGNING_CONFIG_DIR/signing-config.json" - - if [[ ! -f "$config_file" ]]; then - cat > "$config_file" << EOF -{ - "signing": { - "enabled": true, - "method": "sigstore", - "verify_on_import": true, - "verify_on_mount": true, - "verify_on_activate": true, - "fail_on_verify": true - }, - "sigstore": { - "enabled": true, - "keyless": false, - "fulcio_url": "https://fulcio.sigstore.dev", - "rekor_url": "https://rekor.sigstore.dev", - "tuf_url": "https://tuf.sigstore.dev" - }, - "gpg": { - "enabled": true, - "keyring": "/etc/apt/trusted.gpg", - "signing_key": "", - "verification_keys": [] - }, - "key_management": { - "local_keys": true, - "hsm_support": false, - "remote_key_service": false, - "key_rotation_days": 365 - }, - "revocation": { - "enabled": true, - "check_revocation": true, - "revocation_list_url": "", - "local_revocation_list": true + } } -} -EOF - chmod 600 "$config_file" - fi -} - -# Initialize key management -init_key_management() { - local key_db="$LAYER_SIGNING_KEYS_DIR/keys.json" - if [[ ! -f "$key_db" ]]; then - cat > "$key_db" << EOF -{ - "keys": {}, - "key_pairs": {}, - "public_keys": {}, - "key_metadata": {}, - "last_updated": "$(date -u +%Y-%m-%dT%H:%M:%SZ)" -} -EOF - chmod 600 "$key_db" - fi -} - -# Initialize revocation system -init_revocation_system() { - local revocation_list="$LAYER_SIGNING_REVOCATION_DIR/revocation-list.json" - - if [[ ! -f "$revocation_list" ]]; then - cat > "$revocation_list" << EOF -{ - "revoked_keys": {}, - "revoked_signatures": {}, - "revoked_layers": {}, - "revocation_reasons": {}, - "last_updated": "$(date -u +%Y-%m-%dT%H:%M:%SZ)" -} -EOF - chmod 600 "$revocation_list" - fi -} - -# Check signing tools availability -check_signing_tools() { - log_info "Checking signing tools availability" "apt-layer" - - local tools_available=true - - # Check for cosign (Sigstore) - if ! command -v cosign &>/dev/null; then - log_warning "cosign (Sigstore) not found - Sigstore signing will be disabled" "apt-layer" - LAYER_SIGNING_METHOD="gpg" - else - log_info "cosign (Sigstore) found: $(cosign version 2>/dev/null | head -1 || echo 'version unknown')" "apt-layer" - fi - - # Check for GPG - if ! command -v gpg &>/dev/null; then - log_warning "GPG not found - GPG signing will be disabled" "apt-layer" - if [[ "$LAYER_SIGNING_METHOD" == "gpg" ]]; then - LAYER_SIGNING_METHOD="sigstore" - fi - else - log_info "GPG found: $(gpg --version | head -1)" "apt-layer" - fi - - # Check if any signing method is available - if [[ "$LAYER_SIGNING_METHOD" == "both" ]] && ! command -v cosign &>/dev/null && ! command -v gpg &>/dev/null; then - log_error "No signing tools available - layer signing will be disabled" "apt-layer" - LAYER_SIGNING_ENABLED=false - return 1 - fi - - return 0 -} - -# Generate signing key pair -generate_signing_key_pair() { - local key_name="$1" - local key_type="${2:-sigstore}" - - if [[ -z "$key_name" ]]; then - log_error "Key name required for key pair generation" "apt-layer" - return 1 - fi - - log_info "Generating signing key pair: $key_name (type: $key_type)" "apt-layer" - - case "$key_type" in - "sigstore") - generate_sigstore_key_pair "$key_name" - ;; - "gpg") - generate_gpg_key_pair "$key_name" - ;; - *) - log_error "Unsupported key type: $key_type" "apt-layer" + touch "$PENDING_DEPLOYMENT_FILE" 2>/dev/null || { + log_warning "Failed to create pending deployment file, attempting with sudo..." "apt-layer" + sudo touch "$PENDING_DEPLOYMENT_FILE" 2>/dev/null || { + log_error "Failed to create pending deployment file: $PENDING_DEPLOYMENT_FILE" "apt-layer" return 1 - ;; - esac + } + } + + log_success "Deployment database initialization completed" "apt-layer" } -# Generate Sigstore key pair -generate_sigstore_key_pair() { - local key_name="$1" - local key_dir="$LAYER_SIGNING_KEYS_DIR/sigstore/$key_name" +# Create a new deployment commit +create_deployment_commit() { + local base_image="$1" + local layers=("${@:2}") + local commit_message="${COMMIT_MESSAGE:-System update}" - mkdir -p "$key_dir" + local commit_id="commit-$(date +%Y%m%d-%H%M%S)-$$" + local commit_data - log_info "Generating Sigstore key pair for: $key_name" "apt-layer" + log_info "Creating deployment commit: $commit_id" "apt-layer" - # Generate cosign key pair - if cosign generate-key-pair --output-key-prefix "$key_dir/key" 2>/dev/null; then - # Store key metadata - local key_db="$LAYER_SIGNING_KEYS_DIR/keys.json" - local key_id - key_id=$(cosign public-key --key "$key_dir/key.key" 2>/dev/null | sha256sum | cut -d' ' -f1 || echo "unknown") - - jq --arg name "$key_name" \ - --arg type "sigstore" \ - --arg public_key "$key_dir/key.pub" \ - --arg private_key "$key_dir/key.key" \ - --arg key_id "$key_id" \ - --arg created "$(date -u +%Y-%m-%dT%H:%M:%SZ)" \ - '.key_pairs[$name] = { - "type": $type, - "public_key": $public_key, - "private_key": $private_key, - "key_id": $key_id, - "created": $created, - "status": "active" - }' "$key_db" > "$key_db.tmp" && mv "$key_db.tmp" "$key_db" - - chmod 600 "$key_dir/key.key" - chmod 644 "$key_dir/key.pub" - - log_success "Sigstore key pair generated: $key_name" "apt-layer" - return 0 - else - log_error "Failed to generate Sigstore key pair: $key_name" "apt-layer" - return 1 - fi -} - -# Generate GPG key pair -generate_gpg_key_pair() { - local key_name="$1" - local key_dir="$LAYER_SIGNING_KEYS_DIR/gpg/$key_name" - - mkdir -p "$key_dir" - - log_info "Generating GPG key pair for: $key_name" "apt-layer" - - # Create GPG key configuration - cat > "$key_dir/key-config" << EOF -Key-Type: RSA -Key-Length: 4096 -Name-Real: apt-layer signing key -Name-Email: apt-layer@$(hostname) -Name-Comment: $key_name -Expire-Date: 2y -%commit -EOF - - # Generate GPG key - if gpg --batch --gen-key "$key_dir/key-config" 2>/dev/null; then - # Export public key - gpg --armor --export apt-layer@$(hostname) > "$key_dir/public.key" 2>/dev/null - - # Get key fingerprint - local key_fingerprint - key_fingerprint=$(gpg --fingerprint apt-layer@$(hostname) 2>/dev/null | grep "Key fingerprint" | sed 's/.*= //' | tr -d ' ') - - # Store key metadata - local key_db="$LAYER_SIGNING_KEYS_DIR/keys.json" - - jq --arg name "$key_name" \ - --arg type "gpg" \ - --arg public_key "$key_dir/public.key" \ - --arg key_id "$key_fingerprint" \ - --arg email "apt-layer@$(hostname)" \ - --arg created "$(date -u +%Y-%m-%dT%H:%M:%SZ)" \ - '.key_pairs[$name] = { - "type": $type, - "public_key": $public_key, - "key_id": $key_id, - "email": $email, - "created": $created, - "status": "active" - }' "$key_db" > "$key_db.tmp" && mv "$key_db.tmp" "$key_db" - - chmod 600 "$key_dir/key-config" - chmod 644 "$key_dir/public.key" - - log_success "GPG key pair generated: $key_name" "apt-layer" - return 0 - else - log_error "Failed to generate GPG key pair: $key_name" "apt-layer" - return 1 - fi -} - -# Sign layer with specified method -sign_layer() { - local layer_path="$1" - local key_name="$2" - local signing_method="${3:-$LAYER_SIGNING_METHOD}" - - if [[ -z "$layer_path" ]] || [[ -z "$key_name" ]]; then - log_error "Layer path and key name required for signing" "apt-layer" - return 1 - fi - - if [[ ! -f "$layer_path" ]]; then - log_error "Layer file not found: $layer_path" "apt-layer" - return 1 - fi - - log_info "Signing layer: $layer_path with key: $key_name (method: $signing_method)" "apt-layer" - - case "$signing_method" in - "sigstore") - sign_layer_sigstore "$layer_path" "$key_name" - ;; - "gpg") - sign_layer_gpg "$layer_path" "$key_name" - ;; - "both") - sign_layer_sigstore "$layer_path" "$key_name" && \ - sign_layer_gpg "$layer_path" "$key_name" - ;; - *) - log_error "Unsupported signing method: $signing_method" "apt-layer" - return 1 - ;; - esac -} - -# Sign layer with Sigstore -sign_layer_sigstore() { - local layer_path="$1" - local key_name="$2" - local key_dir="$LAYER_SIGNING_KEYS_DIR/sigstore/$key_name" - local signature_path="$layer_path.sig" - - if [[ ! -f "$key_dir/key.key" ]]; then - log_error "Sigstore private key not found: $key_dir/key.key" "apt-layer" - return 1 - fi - - log_info "Signing layer with Sigstore: $layer_path" "apt-layer" - - # Sign the layer - if cosign sign-blob --key "$key_dir/key.key" --output-signature "$signature_path" "$layer_path" 2>/dev/null; then - # Store signature metadata - local signature_db="$LAYER_SIGNING_SIGNATURES_DIR/signatures.json" - - if [[ ! -f "$signature_db" ]]; then - cat > "$signature_db" << EOF -{ - "signatures": {}, - "last_updated": "$(date -u +%Y-%m-%dT%H:%M:%SZ)" -} -EOF - fi - - local layer_hash - layer_hash=$(sha256sum "$layer_path" | cut -d' ' -f1) - - jq --arg layer "$layer_path" \ - --arg signature "$signature_path" \ - --arg method "sigstore" \ - --arg key_name "$key_name" \ - --arg layer_hash "$layer_hash" \ - --arg signed_at "$(date -u +%Y-%m-%dT%H:%M:%SZ)" \ - '.signatures[$layer] = { - "signature_file": $signature, - "method": $method, - "key_name": $key_name, - "layer_hash": $layer_hash, - "signed_at": $signed_at, - "status": "valid" - }' "$signature_db" > "$signature_db.tmp" && mv "$signature_db.tmp" "$signature_db" - - log_success "Layer signed with Sigstore: $layer_path" "apt-layer" - return 0 - else - log_error "Failed to sign layer with Sigstore: $layer_path" "apt-layer" - return 1 - fi -} - -# Sign layer with GPG -sign_layer_gpg() { - local layer_path="$1" - local key_name="$2" - local signature_path="$layer_path.sig" - - log_info "Signing layer with GPG: $layer_path" "apt-layer" - - # Sign the layer - if gpg --detach-sign --armor --output "$signature_path" "$layer_path" 2>/dev/null; then - # Store signature metadata - local signature_db="$LAYER_SIGNING_SIGNATURES_DIR/signatures.json" - - if [[ ! -f "$signature_db" ]]; then - cat > "$signature_db" << EOF -{ - "signatures": {}, - "last_updated": "$(date -u +%Y-%m-%dT%H:%M:%SZ)" -} -EOF - fi - - local layer_hash - layer_hash=$(sha256sum "$layer_path" | cut -d' ' -f1) - - jq --arg layer "$layer_path" \ - --arg signature "$signature_path" \ - --arg method "gpg" \ - --arg key_name "$key_name" \ - --arg layer_hash "$layer_hash" \ - --arg signed_at "$(date -u +%Y-%m-%dT%H:%M:%SZ)" \ - '.signatures[$layer] = { - "signature_file": $signature, - "method": $method, - "key_name": $key_name, - "layer_hash": $layer_hash, - "signed_at": $signed_at, - "status": "valid" - }' "$signature_db" > "$signature_db.tmp" && mv "$signature_db.tmp" "$signature_db" - - log_success "Layer signed with GPG: $layer_path" "apt-layer" - return 0 - else - log_error "Failed to sign layer with GPG: $layer_path" "apt-layer" - return 1 - fi -} - -# Verify layer signature -verify_layer_signature() { - local layer_path="$1" - local signature_path="$2" - local verification_method="${3:-auto}" - - if [[ -z "$layer_path" ]] || [[ -z "$signature_path" ]]; then - log_error "Layer path and signature path required for verification" "apt-layer" - return 1 - fi - - if [[ ! -f "$layer_path" ]]; then - log_error "Layer file not found: $layer_path" "apt-layer" - return 1 - fi - - if [[ ! -f "$signature_path" ]]; then - log_error "Signature file not found: $signature_path" "apt-layer" - return 1 - fi - - log_info "Verifying layer signature: $layer_path" "apt-layer" - - # Auto-detect verification method - if [[ "$verification_method" == "auto" ]]; then - if [[ "$signature_path" == *.sig ]] && head -1 "$signature_path" | grep -q "-----BEGIN PGP SIGNATURE-----"; then - verification_method="gpg" - else - verification_method="sigstore" - fi - fi - - case "$verification_method" in - "sigstore") - verify_layer_sigstore "$layer_path" "$signature_path" - ;; - "gpg") - verify_layer_gpg "$layer_path" "$signature_path" - ;; - *) - log_error "Unsupported verification method: $verification_method" "apt-layer" - return 1 - ;; - esac -} - -# Verify layer with Sigstore -verify_layer_sigstore() { - local layer_path="$1" - local signature_path="$2" - local key_dir="$LAYER_SIGNING_KEYS_DIR/sigstore" - - log_info "Verifying layer with Sigstore: $layer_path" "apt-layer" - - # Find the public key - local public_key="" - for key_name in "$key_dir"/*/key.pub; do - if [[ -f "$key_name" ]]; then - public_key="$key_name" - break + # Create commit metadata with proper variable expansion + local layers_json="[" + for i in "${!layers[@]}"; do + if [[ $i -gt 0 ]]; then + layers_json+="," fi + layers_json+="\"${layers[$i]}\"" done + layers_json+="]" - if [[ -z "$public_key" ]]; then - log_error "No Sigstore public key found for verification" "apt-layer" - return 1 - fi - - # Verify the signature - if cosign verify-blob --key "$public_key" --signature "$signature_path" "$layer_path" 2>/dev/null; then - log_success "Layer signature verified with Sigstore: $layer_path" "apt-layer" - return 0 - else - log_error "Layer signature verification failed with Sigstore: $layer_path" "apt-layer" - return 1 - fi -} - -# Verify layer with GPG -verify_layer_gpg() { - local layer_path="$1" - local signature_path="$2" - - log_info "Verifying layer with GPG: $layer_path" "apt-layer" - - # Verify the signature - if gpg --verify "$signature_path" "$layer_path" 2>/dev/null; then - log_success "Layer signature verified with GPG: $layer_path" "apt-layer" - return 0 - else - log_error "Layer signature verification failed with GPG: $layer_path" "apt-layer" - return 1 - fi -} - -# Check if layer is revoked -check_layer_revocation() { - local layer_path="$1" - - if [[ -z "$layer_path" ]]; then - return 1 - fi - - local revocation_list="$LAYER_SIGNING_REVOCATION_DIR/revocation-list.json" - - if [[ ! -f "$revocation_list" ]]; then - return 1 - fi - - local layer_hash - layer_hash=$(sha256sum "$layer_path" 2>/dev/null | cut -d' ' -f1 || echo "") - - if [[ -n "$layer_hash" ]]; then - if jq -e ".revoked_layers[\"$layer_hash\"]" "$revocation_list" >/dev/null 2>&1; then - log_warning "Layer is revoked: $layer_path" "apt-layer" - return 0 - fi - fi - - return 1 -} - -# Revoke layer -revoke_layer() { - local layer_path="$1" - local reason="${2:-Manual revocation}" - - if [[ -z "$layer_path" ]]; then - log_error "Layer path required for revocation" "apt-layer" - return 1 - fi - - if [[ ! -f "$layer_path" ]]; then - log_error "Layer file not found: $layer_path" "apt-layer" - return 1 - fi - - log_info "Revoking layer: $layer_path" "apt-layer" - - local revocation_list="$LAYER_SIGNING_REVOCATION_DIR/revocation-list.json" - local layer_hash - layer_hash=$(sha256sum "$layer_path" | cut -d' ' -f1) - - jq --arg layer_hash "$layer_hash" \ - --arg reason "$reason" \ - --arg revoked_at "$(date -u +%Y-%m-%dT%H:%M:%SZ)" \ - '.revoked_layers[$layer_hash] = { - "reason": $reason, - "revoked_at": $revoked_at, - "revoked_by": "'$(whoami)'" - }' "$revocation_list" > "$revocation_list.tmp" && mv "$revocation_list.tmp" "$revocation_list" - - log_success "Layer revoked: $layer_path" "apt-layer" - return 0 -} - -# List signing keys -list_signing_keys() { - log_info "Listing signing keys" "apt-layer" - - local key_db="$LAYER_SIGNING_KEYS_DIR/keys.json" - - if [[ ! -f "$key_db" ]]; then - log_error "Key database not found" "apt-layer" - return 1 - fi - - echo "=== Signing Keys ===" - - local keys - keys=$(jq -r '.key_pairs | to_entries[] | "\(.key): \(.value.type) - \(.value.key_id) (\(.value.status))"' "$key_db" 2>/dev/null || echo "") - - if [[ -n "$keys" ]]; then - echo "$keys" | while read -r key_info; do - echo " $key_info" - done - else - log_info "No signing keys found" "apt-layer" - fi - - echo "" -} - -# List layer signatures -list_layer_signatures() { - log_info "Listing layer signatures" "apt-layer" - - local signature_db="$LAYER_SIGNING_SIGNATURES_DIR/signatures.json" - - if [[ ! -f "$signature_db" ]]; then - log_error "Signature database not found" "apt-layer" - return 1 - fi - - echo "=== Layer Signatures ===" - - local signatures - signatures=$(jq -r '.signatures | to_entries[] | "\(.key): \(.value.method) - \(.value.key_name) (\(.value.status))"' "$signature_db" 2>/dev/null || echo "") - - if [[ -n "$signatures" ]]; then - echo "$signatures" | while read -r sig_info; do - echo " $sig_info" - done - else - log_info "No layer signatures found" "apt-layer" - fi - - echo "" -} - -# Get layer signing status -get_layer_signing_status() { - local layer_path="$1" - - if [[ -z "$layer_path" ]]; then - log_error "Layer path required for status check" "apt-layer" - return 1 - fi - - log_info "Getting signing status for layer: $layer_path" "apt-layer" - - echo "=== Layer Signing Status: $layer_path ===" - - # Check if layer exists - if [[ ! -f "$layer_path" ]]; then - echo " Layer file not found" - return 1 - fi - - echo " Layer file exists" - - # Check for signatures - local signature_db="$LAYER_SIGNING_SIGNATURES_DIR/signatures.json" - if [[ -f "$signature_db" ]]; then - local signature_info - signature_info=$(jq -r ".signatures[\"$layer_path\"] // empty" "$signature_db" 2>/dev/null) - - if [[ -n "$signature_info" ]]; then - local method - method=$(echo "$signature_info" | jq -r '.method // "unknown"') - local key_name - key_name=$(echo "$signature_info" | jq -r '.key_name // "unknown"') - local status - status=$(echo "$signature_info" | jq -r '.status // "unknown"') - local signed_at - signed_at=$(echo "$signature_info" | jq -r '.signed_at // "unknown"') - - echo " Signed with $method using key: $key_name" - echo " Signature status: $status" - echo " Signed at: $signed_at" - else - echo " No signature found" - fi - else - echo " Signature database not found" - fi - - # Check for revocation - if check_layer_revocation "$layer_path"; then - echo " Layer is revoked" - else - echo " Layer is not revoked" - fi - - echo "" -} - -# ============================================================================= -# INTEGRATION FUNCTIONS -# ============================================================================= - -# Initialize layer signing on script startup -init_layer_signing_on_startup() { - # Only initialize if not already done and signing is enabled - if [[ "$LAYER_SIGNING_ENABLED" == "true" ]] && [[ ! -d "$LAYER_SIGNING_STATE_DIR" ]]; then - init_layer_signing - fi -} - -# Verify layer before import -verify_layer_before_import() { - local layer_path="$1" - - if [[ "$LAYER_SIGNING_VERIFY_ON_IMPORT" != "true" ]]; then - return 0 - fi - - if [[ -z "$layer_path" ]]; then - return 1 - fi - - log_info "Verifying layer before import: $layer_path" "apt-layer" - - # Check for revocation first - if check_layer_revocation "$layer_path"; then - if [[ "$LAYER_SIGNING_FAIL_ON_VERIFY" == "true" ]]; then - log_error "Layer is revoked, import blocked: $layer_path" "apt-layer" - return 1 - else - log_warning "Layer is revoked but import allowed: $layer_path" "apt-layer" - fi - fi - - # Check for signature - local signature_path="$layer_path.sig" - if [[ -f "$signature_path" ]]; then - if ! verify_layer_signature "$layer_path" "$signature_path"; then - if [[ "$LAYER_SIGNING_FAIL_ON_VERIFY" == "true" ]]; then - log_error "Layer signature verification failed, import blocked: $layer_path" "apt-layer" - return 1 - else - log_warning "Layer signature verification failed but import allowed: $layer_path" "apt-layer" - fi - fi - else - log_warning "No signature found for layer: $layer_path" "apt-layer" - fi - - return 0 -} - -# Verify layer before mount -verify_layer_before_mount() { - local layer_path="$1" - - if [[ "$LAYER_SIGNING_VERIFY_ON_MOUNT" != "true" ]]; then - return 0 - fi - - if [[ -z "$layer_path" ]]; then - return 1 - fi - - log_info "Verifying layer before mount: $layer_path" "apt-layer" - - # Check for revocation - if check_layer_revocation "$layer_path"; then - if [[ "$LAYER_SIGNING_FAIL_ON_VERIFY" == "true" ]]; then - log_error "Layer is revoked, mount blocked: $layer_path" "apt-layer" - return 1 - else - log_warning "Layer is revoked but mount allowed: $layer_path" "apt-layer" - fi - fi - - # Check for signature - local signature_path="$layer_path.sig" - if [[ -f "$signature_path" ]]; then - if ! verify_layer_signature "$layer_path" "$signature_path"; then - if [[ "$LAYER_SIGNING_FAIL_ON_VERIFY" == "true" ]]; then - log_error "Layer signature verification failed, mount blocked: $layer_path" "apt-layer" - return 1 - else - log_warning "Layer signature verification failed but mount allowed: $layer_path" "apt-layer" - fi - fi - else - log_warning "No signature found for layer: $layer_path" "apt-layer" - fi - - return 0 -} - -# Verify layer before activation -verify_layer_before_activation() { - local layer_path="$1" - - if [[ "$LAYER_SIGNING_VERIFY_ON_ACTIVATE" != "true" ]]; then - return 0 - fi - - if [[ -z "$layer_path" ]]; then - return 1 - fi - - log_info "Verifying layer before activation: $layer_path" "apt-layer" - - # Check for revocation - if check_layer_revocation "$layer_path"; then - if [[ "$LAYER_SIGNING_FAIL_ON_VERIFY" == "true" ]]; then - log_error "Layer is revoked, activation blocked: $layer_path" "apt-layer" - return 1 - else - log_warning "Layer is revoked but activation allowed: $layer_path" "apt-layer" - fi - fi - - # Check for signature - local signature_path="$layer_path.sig" - if [[ -f "$signature_path" ]]; then - if ! verify_layer_signature "$layer_path" "$signature_path"; then - if [[ "$LAYER_SIGNING_FAIL_ON_VERIFY" == "true" ]]; then - log_error "Layer signature verification failed, activation blocked: $layer_path" "apt-layer" - return 1 - else - log_warning "Layer signature verification failed but activation allowed: $layer_path" "apt-layer" - fi - fi - else - log_warning "No signature found for layer: $layer_path" "apt-layer" - fi - - return 0 -} - -# Cleanup layer signing on script exit -cleanup_layer_signing_on_exit() { - # Clean up temporary files - rm -f "$LAYER_SIGNING_VERIFICATION_DIR"/temp-* 2>/dev/null || true -} - -# Register cleanup function -trap cleanup_layer_signing_on_exit EXIT - -# --- END OF SCRIPTLET: 11-layer-signing.sh --- - -# ============================================================================ -# Centralized Audit & Reporting (Enterprise Compliance) -# ============================================================================ - -# Ubuntu uBlue apt-layer Centralized Audit & Reporting -# Provides enterprise-grade audit logging, reporting, and compliance features -# for comprehensive security monitoring and regulatory compliance - -# ============================================================================= -# AUDIT & REPORTING FUNCTIONS -# ============================================================================= - -# Audit and reporting configuration (with fallbacks for when particle-config.sh is not loaded) -AUDIT_CONFIG_DIR="${UBLUE_CONFIG_DIR:-/etc/ubuntu-ublue}/audit" -AUDIT_STATE_DIR="${UBLUE_ROOT:-/var/lib/particle-os}/audit" -AUDIT_LOGS_DIR="$AUDIT_STATE_DIR/logs" -AUDIT_REPORTS_DIR="$AUDIT_STATE_DIR/reports" -AUDIT_EXPORTS_DIR="$AUDIT_STATE_DIR/exports" -AUDIT_QUERIES_DIR="$AUDIT_STATE_DIR/queries" -AUDIT_COMPLIANCE_DIR="$AUDIT_STATE_DIR/compliance" - -# Audit configuration -AUDIT_ENABLED="${AUDIT_ENABLED:-true}" -AUDIT_LOG_LEVEL="${AUDIT_LOG_LEVEL:-INFO}" -AUDIT_RETENTION_DAYS="${AUDIT_RETENTION_DAYS:-90}" -AUDIT_ROTATION_SIZE_MB="${AUDIT_ROTATION_SIZE_MB:-100}" -AUDIT_REMOTE_SHIPPING="${AUDIT_REMOTE_SHIPPING:-false}" -AUDIT_SYSLOG_ENABLED="${AUDIT_SYSLOG_ENABLED:-false}" -AUDIT_HTTP_ENDPOINT="${AUDIT_HTTP_ENDPOINT:-}" -AUDIT_HTTP_API_KEY="${AUDIT_HTTP_API_KEY:-}" - -# Initialize audit and reporting system -init_audit_reporting() { - log_info "Initializing centralized audit and reporting system" "apt-layer" - - # Create audit and reporting directories - mkdir -p "$AUDIT_CONFIG_DIR" "$AUDIT_STATE_DIR" "$AUDIT_LOGS_DIR" - mkdir -p "$AUDIT_REPORTS_DIR" "$AUDIT_EXPORTS_DIR" "$AUDIT_QUERIES_DIR" - mkdir -p "$AUDIT_COMPLIANCE_DIR" - - # Set proper permissions - chmod 755 "$AUDIT_CONFIG_DIR" "$AUDIT_STATE_DIR" - chmod 750 "$AUDIT_LOGS_DIR" "$AUDIT_REPORTS_DIR" "$AUDIT_EXPORTS_DIR" - chmod 700 "$AUDIT_QUERIES_DIR" "$AUDIT_COMPLIANCE_DIR" - - # Initialize audit configuration - init_audit_config - - # Initialize audit log rotation - init_audit_log_rotation - - # Initialize compliance templates - init_compliance_templates - - # Initialize query cache - init_query_cache - - log_success "Centralized audit and reporting system initialized" "apt-layer" -} - -# Initialize audit configuration -init_audit_config() { - local config_file="$AUDIT_CONFIG_DIR/audit-config.json" - - if [[ ! -f "$config_file" ]]; then - cat > "$config_file" << 'EOF' + commit_data=$(cat << EOF { - "audit": { - "enabled": true, - "log_level": "INFO", - "retention_days": 90, - "rotation_size_mb": 100, - "compression_enabled": true - }, - "remote_shipping": { - "enabled": false, - "syslog_enabled": false, - "syslog_facility": "local0", - "http_endpoint": "", - "http_api_key": "", - "http_timeout": 30, - "retry_attempts": 3 - }, - "compliance": { - "sox_enabled": false, - "pci_dss_enabled": false, - "hipaa_enabled": false, - "gdpr_enabled": false, - "custom_frameworks": [] - }, - "reporting": { - "auto_generate_reports": false, - "report_schedule": "weekly", - "export_formats": ["json", "csv", "html"], - "include_sensitive_data": false - }, - "alerts": { - "enabled": false, - "critical_events": ["SECURITY_VIOLATION", "POLICY_VIOLATION"], - "notification_methods": ["email", "webhook"], - "email_recipients": [], - "webhook_url": "" - } -} -EOF - chmod 600 "$config_file" - fi -} - -# Initialize audit log rotation -init_audit_log_rotation() { - local logrotate_config="$AUDIT_CONFIG_DIR/logrotate.conf" - - if [[ ! -f "$logrotate_config" ]]; then - cat > "$logrotate_config" << 'EOF' -$AUDIT_LOGS_DIR/*.log { - daily - rotate 90 - compress - delaycompress - missingok - notifempty - create 640 root root - postrotate - systemctl reload rsyslog > /dev/null 2>&1 || true - endscript -} -EOF - chmod 644 "$logrotate_config" - fi -} - -# Initialize compliance templates -init_compliance_templates() { - # SOX compliance template - local sox_template="$AUDIT_COMPLIANCE_DIR/sox-template.json" - if [[ ! -f "$sox_template" ]]; then - cat > "$sox_template" << 'EOF' -{ - "framework": "SOX", - "version": "2002", - "requirements": { - "access_control": { - "user_management": true, - "role_based_access": true, - "privilege_escalation": true - }, - "change_management": { - "package_installation": true, - "system_modifications": true, - "deployment_approval": true - }, - "audit_trail": { - "comprehensive_logging": true, - "log_integrity": true, - "log_retention": true - } - }, - "reporting_periods": ["daily", "weekly", "monthly", "quarterly"] -} -EOF - fi - - # PCI DSS compliance template - local pci_template="$AUDIT_COMPLIANCE_DIR/pci-dss-template.json" - if [[ ! -f "$pci_template" ]]; then - cat > "$pci_template" << 'EOF' -{ - "framework": "PCI-DSS", - "version": "4.0", - "requirements": { - "access_control": { - "unique_user_ids": true, - "role_based_access": true, - "privilege_minimization": true - }, - "security_monitoring": { - "audit_logging": true, - "intrusion_detection": true, - "vulnerability_scanning": true - }, - "change_management": { - "change_approval": true, - "testing_procedures": true, - "rollback_capabilities": true - } - }, - "reporting_periods": ["daily", "weekly", "monthly"] -} -EOF - fi -} - -# Initialize query cache -init_query_cache() { - local query_cache="$AUDIT_QUERIES_DIR/query-cache.json" - - if [[ ! -f "$query_cache" ]]; then - cat > "$query_cache" << 'EOF' -{ - "queries": {}, - "cached_results": {}, - "last_updated": "$(date -u +%Y-%m-%dT%H:%M:%SZ)" -} -EOF - chmod 600 "$query_cache" - fi -} - -# Enhanced audit logging function -log_audit_event() { - local event_type="$1" - local event_data="$2" - local severity="${3:-INFO}" - local user="${4:-$(whoami)}" - local session_id="${5:-$(echo $$)}" - - if [[ "$AUDIT_ENABLED" != "true" ]]; then - return 0 - fi - - # Create structured audit event - local audit_event - audit_event=$(cat << 'EOF' -{ - "timestamp": "$(date -u +%Y-%m-%dT%H:%M:%SZ)", - "event_type": "$event_type", - "severity": "$severity", - "user": "$user", - "session_id": "$session_id", - "hostname": "$(hostname)", - "data": $event_data + "commit_id": "$commit_id", + "base_image": "$base_image", + "layers": $layers_json, + "commit_message": "$commit_message", + "created": "$(date -u +%Y-%m-%dT%H:%M:%SZ)", + "parent_commit": "$(get_current_deployment)", + "composefs_image": "${commit_id}.composefs" } EOF ) - # Write to local audit log - local audit_log="$AUDIT_LOGS_DIR/audit.log" - echo "$audit_event" >> "$audit_log" + # Add to deployment database + jq --arg commit_id "$commit_id" \ + --arg base_image "$base_image" \ + --arg layers_json "$layers_json" \ + --arg commit_message "$commit_message" \ + --arg created "$(date -u +%Y-%m-%dT%H:%M:%SZ)" \ + --arg parent_commit "$(get_current_deployment)" \ + --arg composefs_image "${commit_id}.composefs" \ + '.deployments[$commit_id] = { + "commit_id": $commit_id, + "base_image": $base_image, + "layers": ($layers_json | fromjson), + "commit_message": $commit_message, + "created": $created, + "parent_commit": $parent_commit, + "composefs_image": $composefs_image + } | .deployment_counter += 1' \ + "$DEPLOYMENT_DB" > "${DEPLOYMENT_DB}.tmp" && mv "${DEPLOYMENT_DB}.tmp" "$DEPLOYMENT_DB" - # Ship to remote destinations if enabled - ship_audit_event "$audit_event" + # Create deployment history file + echo "$commit_data" > "$DEPLOYMENT_HISTORY_DIR/$commit_id.json" - # Log to syslog if enabled - if [[ "$AUDIT_SYSLOG_ENABLED" == "true" ]]; then - logger -t "apt-layer-audit" -p "local0.info" "$audit_event" - fi + log_success "Deployment commit created: $commit_id" "apt-layer" + echo "$commit_id" } -# Ship audit event to remote destinations -ship_audit_event() { - local audit_event="$1" - - # Ship to HTTP endpoint if configured - if [[ -n "$AUDIT_HTTP_ENDPOINT" ]] && [[ -n "$AUDIT_HTTP_API_KEY" ]]; then - ship_to_http_endpoint "$audit_event" & - fi - - # Ship to syslog if enabled - if [[ "$AUDIT_SYSLOG_ENABLED" == "true" ]]; then - ship_to_syslog "$audit_event" & - fi -} - -# Ship audit event to HTTP endpoint -ship_to_http_endpoint() { - local audit_event="$1" - local config_file="$AUDIT_CONFIG_DIR/audit-config.json" - - local endpoint - endpoint=$(jq -r '.remote_shipping.http_endpoint' "$config_file" 2>/dev/null || echo "$AUDIT_HTTP_ENDPOINT") - local api_key - api_key=$(jq -r '.remote_shipping.http_api_key' "$config_file" 2>/dev/null || echo "$AUDIT_HTTP_API_KEY") - local timeout - timeout=$(jq -r '.remote_shipping.http_timeout // 30' "$config_file" 2>/dev/null || echo "30") - local retry_attempts - retry_attempts=$(jq -r '.remote_shipping.retry_attempts // 3' "$config_file" 2>/dev/null || echo "3") - - if [[ -z "$endpoint" ]] || [[ -z "$api_key" ]]; then - return 1 - fi - - local attempt=0 - while [[ $attempt -lt $retry_attempts ]]; do - if curl -s -X POST \ - -H "Content-Type: application/json" \ - -H "Authorization: Bearer $api_key" \ - -H "User-Agent: apt-layer-audit/1.0" \ - --data "$audit_event" \ - --connect-timeout "$timeout" \ - "$endpoint" >/dev/null 2>&1; then - return 0 - fi - - ((attempt++)) - if [[ $attempt -lt $retry_attempts ]]; then - sleep $((attempt * 2)) # Exponential backoff - fi - done - - log_warning "Failed to ship audit event to HTTP endpoint after $retry_attempts attempts" "apt-layer" - return 1 -} - -# Ship audit event to syslog -ship_to_syslog() { - local audit_event="$1" - local config_file="$AUDIT_CONFIG_DIR/audit-config.json" - - local facility - facility=$(jq -r '.remote_shipping.syslog_facility // "local0"' "$config_file" 2>/dev/null || echo "local0") - - logger -t "apt-layer-audit" -p "$facility.info" "$audit_event" -} - -# Query audit logs -query_audit_logs() { - local query_params=("$@") - local output_format="${query_params[0]:-json}" - local filters=("${query_params[@]:1}") - - log_info "Querying audit logs with format: $output_format" "apt-layer" - - local audit_log="$AUDIT_LOGS_DIR/audit.log" - if [[ ! -f "$audit_log" ]]; then - log_error "Audit log not found" "apt-layer" - return 1 - fi - - # Build jq filter from parameters - local jq_filter="." - for filter in "${filters[@]}"; do - case "$filter" in - --user=*) - local user="${filter#--user=}" - jq_filter="$jq_filter | select(.user == \"$user\")" - ;; - --event-type=*) - local event_type="${filter#--event-type=}" - jq_filter="$jq_filter | select(.event_type == \"$event_type\")" - ;; - --severity=*) - local severity="${filter#--severity=}" - jq_filter="$jq_filter | select(.severity == \"$severity\")" - ;; - --since=*) - local since="${filter#--since=}" - jq_filter="$jq_filter | select(.timestamp >= \"$since\")" - ;; - --until=*) - local until="${filter#--until=}" - jq_filter="$jq_filter | select(.timestamp <= \"$until\")" - ;; - --limit=*) - local limit="${filter#--limit=}" - jq_filter="$jq_filter | head -n $limit" - ;; - esac - done - - # Execute query - case "$output_format" in - "json") - jq -s "$jq_filter" "$audit_log" 2>/dev/null || echo "[]" - ;; - "csv") - echo "timestamp,event_type,severity,user,session_id,hostname,data" - jq -r "$jq_filter | .[] | [.timestamp, .event_type, .severity, .user, .session_id, .hostname, .data] | @csv" "$audit_log" 2>/dev/null || true - ;; - "table") - echo "Timestamp | Event Type | Severity | User | Session ID | Hostname" - echo "----------|------------|----------|------|------------|----------" - jq -r "$jq_filter | .[] | \"\(.timestamp) | \(.event_type) | \(.severity) | \(.user) | \(.session_id) | \(.hostname)\"" "$audit_log" 2>/dev/null || true - ;; - *) - log_error "Unsupported output format: $output_format" "apt-layer" - return 1 - ;; - esac -} - -# Export audit logs -export_audit_logs() { - local export_format="$1" - local output_file="$2" - local filters=("${@:3}") - - if [[ -z "$export_format" ]]; then - log_error "Export format required" "apt-layer" - return 1 - fi - - if [[ -z "$output_file" ]]; then - output_file="$AUDIT_EXPORTS_DIR/audit-export-$(date +%Y%m%d-%H%M%S).$export_format" - fi - - log_info "Exporting audit logs to: $output_file" "apt-layer" - - # Create exports directory if it doesn't exist - mkdir -p "$(dirname "$output_file")" - - # Export with filters - if query_audit_logs "$export_format" "${filters[@]}" > "$output_file"; then - log_success "Audit logs exported to: $output_file" "apt-layer" - log_audit_event "EXPORT_AUDIT_LOGS" "{\"format\": \"$export_format\", \"file\": \"$output_file\", \"filters\": $(printf '%s\n' "${filters[@]}" | jq -R . | jq -s .)}" - return 0 +# Get current deployment +get_current_deployment() { + if [[ -f "$CURRENT_DEPLOYMENT_FILE" ]]; then + cat "$CURRENT_DEPLOYMENT_FILE" 2>/dev/null || echo "" else - log_error "Failed to export audit logs" "apt-layer" - return 1 - fi -} - -# Generate compliance report -generate_compliance_report() { - local framework="$1" - local report_period="${2:-monthly}" - local output_format="${3:-html}" - - if [[ -z "$framework" ]]; then - log_error "Compliance framework required" "apt-layer" - return 1 - fi - - log_info "Generating $framework compliance report for period: $report_period" "apt-layer" - - local template_file="$AUDIT_COMPLIANCE_DIR/${framework,,}-template.json" - if [[ ! -f "$template_file" ]]; then - log_error "Compliance template not found: $template_file" "apt-layer" - return 1 - fi - - local report_file="$AUDIT_REPORTS_DIR/${framework,,}-compliance-$(date +%Y%m%d-%H%M%S).$output_format" - - # Generate report based on framework - case "$framework" in - "SOX"|"sox") - generate_sox_report "$template_file" "$report_period" "$output_format" "$report_file" - ;; - "PCI-DSS"|"pci_dss") - generate_pci_dss_report "$template_file" "$report_period" "$output_format" "$report_file" - ;; - *) - log_error "Unsupported compliance framework: $framework" "apt-layer" - return 1 - ;; - esac - - log_success "Compliance report generated: $report_file" "apt-layer" - log_audit_event "GENERATE_COMPLIANCE_REPORT" "{\"framework\": \"$framework\", \"period\": \"$report_period\", \"format\": \"$output_format\", \"file\": \"$report_file\"}" - return 0 -} - -# Generate SOX compliance report -generate_sox_report() { - local template_file="$1" - local report_period="$2" - local output_format="$3" - local report_file="$4" - - # Query relevant audit events - local access_control_events - access_control_events=$(query_audit_logs json --event-type=USER_ADD --event-type=USER_REMOVE --event-type=PERMISSION_CHECK) - - local change_management_events - change_management_events=$(query_audit_logs json --event-type=INSTALL_SUCCESS --event-type=REMOVE_SUCCESS --event-type=UPDATE_SUCCESS) - - local audit_trail_events - audit_trail_events=$(query_audit_logs json --event-type=EXPORT_AUDIT_LOGS --event-type=GENERATE_COMPLIANCE_REPORT) - - # Generate report content - case "$output_format" in - "html") - generate_sox_html_report "$template_file" "$report_period" "$access_control_events" "$change_management_events" "$audit_trail_events" "$report_file" - ;; - "json") - generate_sox_json_report "$template_file" "$report_period" "$access_control_events" "$change_management_events" "$audit_trail_events" "$report_file" - ;; - *) - log_error "Unsupported output format for SOX report: $output_format" "apt-layer" - return 1 - ;; - esac -} - -# Generate SOX HTML report -generate_sox_html_report() { - local template_file="$1" - local report_period="$2" - local access_control_events="$3" - local change_management_events="$4" - local audit_trail_events="$5" - local report_file="$6" - - cat > "$report_file" << 'EOF' - - - - SOX Compliance Report - $report_period - - - -
-

SOX Compliance Report

-

Period: $report_period

-

Generated: $(date -u +%Y-%m-%dT%H:%M:%SZ)

-

System: $(hostname)

-
- -
-

Access Control (Section 404)

-
-

User Management

-

Status: Compliant

-

User management events tracked and logged.

-
-
-

Role-Based Access Control

-

Status: Compliant

-

RBAC implemented with proper permission validation.

-
-
- -
-

Change Management (Section 404)

-
-

Package Installation Tracking

-

Status: Compliant

-

All package installations are logged and tracked.

-
-
-

System Modifications

-

Status: Compliant

-

System modifications are tracked through audit logs.

-
-
- -
-

Audit Trail (Section 404)

-
-

Comprehensive Logging

-

Status: Compliant

-

All critical operations are logged with timestamps and user information.

-
-
-

Log Integrity

-

Status: Compliant

-

Audit logs are protected and tamper-evident.

-
-
- - -EOF -} - -# Generate SOX JSON report -generate_sox_json_report() { - local template_file="$1" - local report_period="$2" - local access_control_events="$3" - local change_management_events="$4" - local audit_trail_events="$5" - local report_file="$6" - - cat > "$report_file" << 'EOF' -{ - "framework": "SOX", - "version": "2002", - "report_period": "$report_period", - "generated_at": "$(date -u +%Y-%m-%dT%H:%M:%SZ)", - "system": "$(hostname)", - "compliance_status": "compliant", - "requirements": { - "access_control": { - "status": "compliant", - "user_management": { - "status": "compliant", - "description": "User management events tracked and logged" - }, - "role_based_access": { - "status": "compliant", - "description": "RBAC implemented with proper permission validation" - } - }, - "change_management": { - "status": "compliant", - "package_installation": { - "status": "compliant", - "description": "All package installations are logged and tracked" - }, - "system_modifications": { - "status": "compliant", - "description": "System modifications are tracked through audit logs" - } - }, - "audit_trail": { - "status": "compliant", - "comprehensive_logging": { - "status": "compliant", - "description": "All critical operations are logged with timestamps and user information" - }, - "log_integrity": { - "status": "compliant", - "description": "Audit logs are protected and tamper-evident" - } - } - } -} -EOF -} - -# Generate PCI DSS compliance report -generate_pci_dss_report() { - local template_file="$1" - local report_period="$2" - local output_format="$3" - local report_file="$4" - - # Similar implementation to SOX but with PCI DSS specific requirements - log_info "PCI DSS report generation not yet implemented" "apt-layer" - return 1 -} - -# List audit reports -list_audit_reports() { - log_info "Listing audit reports" "apt-layer" - - echo "=== Audit Reports ===" - - local reports - reports=$(find "$AUDIT_REPORTS_DIR" -name "*.html" -o -name "*.json" -o -name "*.csv" 2>/dev/null | sort -r || echo "") - - if [[ -n "$reports" ]]; then - for report in $reports; do - local report_name - report_name=$(basename "$report") - local report_size - report_size=$(du -h "$report" | cut -f1) - local report_date - report_date=$(stat -c %y "$report" 2>/dev/null || echo "unknown") - - echo " $report_name ($report_size) - $report_date" - done - else - log_info "No audit reports found" "apt-layer" - fi - - echo "" -} - -# Clean up old audit logs -cleanup_old_audit_logs() { - local max_age_days="${1:-90}" - - log_info "Cleaning up audit logs older than $max_age_days days" "apt-layer" - - local removed_count=0 - - # Clean up old log files - while IFS= read -r -d '' log_file; do - local file_age - file_age=$(find "$log_file" -mtime +$max_age_days 2>/dev/null | wc -l) - - if [[ $file_age -gt 0 ]]; then - log_info "Removing old audit log: $(basename "$log_file")" "apt-layer" - rm -f "$log_file" - ((removed_count++)) - fi - done < <(find "$AUDIT_LOGS_DIR" -name "*.log*" -print0 2>/dev/null) - - # Clean up old exports - while IFS= read -r -d '' export_file; do - local file_age - file_age=$(find "$export_file" -mtime +$max_age_days 2>/dev/null | wc -l) - - if [[ $file_age -gt 0 ]]; then - log_info "Removing old export: $(basename "$export_file")" "apt-layer" - rm -f "$export_file" - ((removed_count++)) - fi - done < <(find "$AUDIT_EXPORTS_DIR" -name "*" -print0 2>/dev/null) - - log_success "Cleaned up $removed_count old audit files" "apt-layer" - return 0 -} - -# Get audit system status -get_audit_status() { - log_info "Getting audit system status" "apt-layer" - - echo "=== Audit System Status ===" - - # General status - echo "General:" - echo " Enabled: $AUDIT_ENABLED" - echo " Log Level: $AUDIT_LOG_LEVEL" - echo " Retention Days: $AUDIT_RETENTION_DAYS" - echo " Rotation Size: ${AUDIT_ROTATION_SIZE_MB}MB" - - # Remote shipping status - echo "" - echo "Remote Shipping:" - echo " Enabled: $AUDIT_REMOTE_SHIPPING" - echo " Syslog: $AUDIT_SYSLOG_ENABLED" - echo " HTTP Endpoint: ${AUDIT_HTTP_ENDPOINT:-not configured}" - - # Log statistics - echo "" - echo "Log Statistics:" - local audit_log="$AUDIT_LOGS_DIR/audit.log" - if [[ -f "$audit_log" ]]; then - local total_entries - total_entries=$(wc -l < "$audit_log" 2>/dev/null || echo "0") - echo " Total Entries: $total_entries" - - local recent_entries - recent_entries=$(tail -100 "$audit_log" 2>/dev/null | wc -l || echo "0") - echo " Recent Entries (last 100): $recent_entries" - - local log_size - log_size=$(du -h "$audit_log" | cut -f1 2>/dev/null || echo "unknown") - echo " Log Size: $log_size" - else - echo " Audit log: not available" - fi - - # Report statistics - echo "" - echo "Report Statistics:" - local report_count - report_count=$(find "$AUDIT_REPORTS_DIR" -name "*.html" -o -name "*.json" -o -name "*.csv" 2>/dev/null | wc -l || echo "0") - echo " Total Reports: $report_count" - - local export_count - export_count=$(find "$AUDIT_EXPORTS_DIR" -name "*" 2>/dev/null | wc -l || echo "0") - echo " Total Exports: $export_count" - - echo "" -} - -# ============================================================================= -# INTEGRATION FUNCTIONS -# ============================================================================= - -# Initialize audit reporting on script startup -init_audit_reporting_on_startup() { - # Only initialize if not already done - if [[ ! -d "$AUDIT_STATE_DIR" ]]; then - init_audit_reporting - fi -} - -# Cleanup audit reporting on script exit -cleanup_audit_reporting_on_exit() { - # Clean up temporary files - rm -f "$AUDIT_QUERIES_DIR"/temp-* 2>/dev/null || true - rm -f "$AUDIT_EXPORTS_DIR"/temp-* 2>/dev/null || true -} - -# Register cleanup function -trap cleanup_audit_reporting_on_exit EXIT - -# --- END OF SCRIPTLET: 12-audit-reporting.sh --- - -# ============================================================================ -# Automated Security Scanning (Enterprise Security) -# ============================================================================ - -# Ubuntu uBlue apt-layer Automated Security Scanning -# Provides enterprise-grade security scanning, CVE checking, and policy enforcement -# for comprehensive security monitoring and vulnerability management - -# ============================================================================= -# SECURITY SCANNING FUNCTIONS -# ============================================================================= - -# Security scanning configuration (with fallbacks for when particle-config.sh is not loaded) -SECURITY_CONFIG_DIR="${UBLUE_CONFIG_DIR:-/etc/ubuntu-ublue}/security" -SECURITY_STATE_DIR="${UBLUE_ROOT:-/var/lib/particle-os}/security" -SECURITY_SCANS_DIR="$SECURITY_STATE_DIR/scans" -SECURITY_REPORTS_DIR="$SECURITY_STATE_DIR/reports" -SECURITY_CACHE_DIR="$SECURITY_STATE_DIR/cache" -SECURITY_POLICIES_DIR="$SECURITY_STATE_DIR/policies" -SECURITY_CVE_DB_DIR="$SECURITY_STATE_DIR/cve-db" - -# Security configuration -SECURITY_ENABLED="${SECURITY_ENABLED:-true}" -SECURITY_SCAN_LEVEL="${SECURITY_SCAN_LEVEL:-standard}" -SECURITY_AUTO_SCAN="${SECURITY_AUTO_SCAN:-false}" -SECURITY_CVE_CHECKING="${SECURITY_CVE_CHECKING:-true}" -SECURITY_POLICY_ENFORCEMENT="${SECURITY_POLICY_ENFORCEMENT:-true}" -SECURITY_SCAN_INTERVAL_HOURS="${SECURITY_SCAN_INTERVAL_HOURS:-24}" -SECURITY_REPORT_RETENTION_DAYS="${SECURITY_REPORT_RETENTION_DAYS:-90}" - -# Initialize security scanning system -init_security_scanning() { - log_info "Initializing automated security scanning system" "apt-layer" - - # Create security scanning directories - mkdir -p "$SECURITY_CONFIG_DIR" "$SECURITY_STATE_DIR" "$SECURITY_SCANS_DIR" - mkdir -p "$SECURITY_REPORTS_DIR" "$SECURITY_CACHE_DIR" "$SECURITY_POLICIES_DIR" - mkdir -p "$SECURITY_CVE_DB_DIR" - - # Set proper permissions - chmod 755 "$SECURITY_CONFIG_DIR" "$SECURITY_STATE_DIR" - chmod 750 "$SECURITY_SCANS_DIR" "$SECURITY_REPORTS_DIR" "$SECURITY_CACHE_DIR" - chmod 700 "$SECURITY_POLICIES_DIR" "$SECURITY_CVE_DB_DIR" - - # Initialize security configuration - init_security_config - - # Initialize CVE database - init_cve_database - - # Initialize security policies - init_security_policies - - # Initialize scan cache - init_scan_cache - - log_success "Automated security scanning system initialized" "apt-layer" -} - -# Initialize security configuration -init_security_config() { - local config_file="$SECURITY_CONFIG_DIR/security-config.json" - - if [[ ! -f "$config_file" ]]; then - cat > "$config_file" << EOF -{ - "security": { - "enabled": true, - "scan_level": "standard", - "auto_scan": false, - "cve_checking": true, - "policy_enforcement": true, - "scan_interval_hours": 24, - "report_retention_days": 90 - }, - "scanning": { - "package_scanning": true, - "layer_scanning": true, - "system_scanning": true, - "dependency_scanning": true, - "vulnerability_scanning": true - }, - "cve": { - "database_url": "https://nvd.nist.gov/vuln/data-feeds", - "update_interval_hours": 6, - "severity_threshold": "MEDIUM", - "auto_update": true - }, - "policies": { - "critical_vulnerabilities": "BLOCK", - "high_vulnerabilities": "WARN", - "medium_vulnerabilities": "LOG", - "low_vulnerabilities": "LOG", - "unknown_severity": "WARN" - }, - "reporting": { - "auto_generate_reports": false, - "report_format": "html", - "include_recommendations": true, - "include_remediation": true - } -} -EOF - chmod 600 "$config_file" - fi -} - -# Initialize CVE database -init_cve_database() { - local cve_db_file="$SECURITY_CVE_DB_DIR/cve-database.json" - - if [[ ! -f "$cve_db_file" ]]; then - cat > "$cve_db_file" << EOF -{ - "metadata": { - "version": "1.0", - "last_updated": "$(date -u +%Y-%m-%dT%H:%M:%SZ)", - "source": "NVD", - "total_cves": 0 - }, - "cves": {}, - "packages": {}, - "severity_levels": { - "CRITICAL": 4, - "HIGH": 3, - "MEDIUM": 2, - "LOW": 1, - "UNKNOWN": 0 - } -} -EOF - chmod 600 "$cve_db_file" - fi -} - -# Initialize security policies -init_security_policies() { - # Default security policy - local default_policy="$SECURITY_POLICIES_DIR/default-policy.json" - if [[ ! -f "$default_policy" ]]; then - cat > "$default_policy" << EOF -{ - "policy_name": "default", - "version": "1.0", - "description": "Default security policy for Ubuntu uBlue apt-layer", - "rules": { - "critical_vulnerabilities": { - "action": "BLOCK", - "description": "Block installation of packages with critical vulnerabilities" - }, - "high_vulnerabilities": { - "action": "WARN", - "description": "Warn about packages with high vulnerabilities" - }, - "medium_vulnerabilities": { - "action": "LOG", - "description": "Log packages with medium vulnerabilities" - }, - "low_vulnerabilities": { - "action": "LOG", - "description": "Log packages with low vulnerabilities" - }, - "unknown_severity": { - "action": "WARN", - "description": "Warn about packages with unknown vulnerability status" - } - }, - "exceptions": [], - "enabled": true -} -EOF - chmod 600 "$default_policy" - fi -} - -# Initialize scan cache -init_scan_cache() { - local cache_file="$SECURITY_CACHE_DIR/scan-cache.json" - - if [[ ! -f "$cache_file" ]]; then - cat > "$cache_file" << EOF -{ - "cache_metadata": { - "version": "1.0", - "created": "$(date -u +%Y-%m-%dT%H:%M:%SZ)", - "last_cleaned": "$(date -u +%Y-%m-%dT%H:%M:%SZ)" - }, - "package_scans": {}, - "layer_scans": {}, - "system_scans": {}, - "cve_checks": {} -} -EOF - chmod 600 "$cache_file" - fi -} - -# Scan package for vulnerabilities -scan_package() { - local package_name="$1" - local package_version="${2:-}" - local scan_level="${3:-standard}" - - log_info "Scanning package: $package_name" "apt-layer" - - # Check cache first - local cache_key="${package_name}_${package_version}_${scan_level}" - local cached_result - cached_result=$(get_cached_scan_result "package_scans" "$cache_key") - - if [[ -n "$cached_result" ]]; then - log_info "Using cached scan result for $package_name" "apt-layer" - echo "$cached_result" - return 0 - fi - - # Perform package scan - local scan_result - scan_result=$(perform_package_scan "$package_name" "$package_version" "$scan_level") - - # Cache the result - cache_scan_result "package_scans" "$cache_key" "$scan_result" - - # Apply security policy - apply_security_policy "$package_name" "$scan_result" - - echo "$scan_result" -} - -# Perform package vulnerability scan -perform_package_scan() { - local package_name="$1" - local package_version="$2" - local scan_level="$3" - - # Create scan result structure - local scan_result - scan_result=$(cat << 'EOF' -{ - "package": "$package_name", - "version": "$package_version", - "scan_level": "$scan_level", - "scan_timestamp": "$(date -u +%Y-%m-%dT%H:%M:%SZ)", - "vulnerabilities": [], - "security_score": 100, - "recommendations": [], - "status": "clean" -} -EOF -) - - # Check for known vulnerabilities - local vulnerabilities - vulnerabilities=$(check_package_vulnerabilities "$package_name" "$package_version") - - if [[ -n "$vulnerabilities" ]]; then - # Update scan result with vulnerabilities - scan_result=$(echo "$scan_result" | jq --argjson vulns "$vulnerabilities" '.vulnerabilities = $vulns') - - # Calculate security score - local security_score - security_score=$(calculate_security_score "$vulnerabilities") - scan_result=$(echo "$scan_result" | jq --arg score "$security_score" '.security_score = ($score | tonumber)') - - # Update status - scan_result=$(echo "$scan_result" | jq '.status = "vulnerable"') - - # Generate recommendations - local recommendations - recommendations=$(generate_security_recommendations "$vulnerabilities") - scan_result=$(echo "$scan_result" | jq --argjson recs "$recommendations" '.recommendations = $recs') - fi - - echo "$scan_result" -} - -# Check package for known vulnerabilities -check_package_vulnerabilities() { - local package_name="$1" - local package_version="$2" - - local cve_db_file="$SECURITY_CVE_DB_DIR/cve-database.json" - - if [[ ! -f "$cve_db_file" ]]; then - log_warning "CVE database not found, skipping vulnerability check" "apt-layer" - return 0 - fi - - # Search for package in CVE database - local vulnerabilities - vulnerabilities=$(jq -r --arg pkg "$package_name" '.packages[$pkg] // []' "$cve_db_file" 2>/dev/null || echo "[]") - - if [[ "$vulnerabilities" == "[]" ]]; then - # Try alternative package name formats - local alt_names=("${package_name}-dev" "${package_name}-common" "lib${package_name}") - - for alt_name in "${alt_names[@]}"; do - local alt_vulns - alt_vulns=$(jq -r --arg pkg "$alt_name" '.packages[$pkg] // []' "$cve_db_file" 2>/dev/null || echo "[]") - - if [[ "$alt_vulns" != "[]" ]]; then - vulnerabilities="$alt_vulns" - break - fi - done - fi - - echo "$vulnerabilities" -} - -# Calculate security score based on vulnerabilities -calculate_security_score() { - local vulnerabilities="$1" - - local score=100 - local critical_count=0 - local high_count=0 - local medium_count=0 - local low_count=0 - - # Count vulnerabilities by severity - critical_count=$(echo "$vulnerabilities" | jq -r '[.[] | select(.severity == "CRITICAL")] | length' 2>/dev/null || echo "0") - high_count=$(echo "$vulnerabilities" | jq -r '[.[] | select(.severity == "HIGH")] | length' 2>/dev/null || echo "0") - medium_count=$(echo "$vulnerabilities" | jq -r '[.[] | select(.severity == "MEDIUM")] | length' 2>/dev/null || echo "0") - low_count=$(echo "$vulnerabilities" | jq -r '[.[] | select(.severity == "LOW")] | length' 2>/dev/null || echo "0") - - # Calculate score (critical: -20, high: -10, medium: -5, low: -1) - score=$((score - (critical_count * 20) - (high_count * 10) - (medium_count * 5) - low_count)) - - # Ensure score doesn't go below 0 - if [[ $score -lt 0 ]]; then - score=0 - fi - - echo "$score" -} - -# Generate security recommendations -generate_security_recommendations() { - local vulnerabilities="$1" - - local recommendations="[]" - - # Check for critical vulnerabilities - local critical_count - critical_count=$(echo "$vulnerabilities" | jq -r '[.[] | select(.severity == "CRITICAL")] | length' 2>/dev/null || echo "0") - - if [[ $critical_count -gt 0 ]]; then - recommendations=$(echo "$recommendations" | jq '. += ["Do not install packages with critical vulnerabilities"]') - fi - - # Check for high vulnerabilities - local high_count - high_count=$(echo "$vulnerabilities" | jq -r '[.[] | select(.severity == "HIGH")] | length' 2>/dev/null || echo "0") - - if [[ $high_count -gt 0 ]]; then - recommendations=$(echo "$recommendations" | jq '. += ["Consider alternative packages or wait for security updates"]') - fi - - # Check for outdated packages - local outdated_count - outdated_count=$(echo "$vulnerabilities" | jq -r '[.[] | select(.type == "outdated")] | length' 2>/dev/null || echo "0") - - if [[ $outdated_count -gt 0 ]]; then - recommendations=$(echo "$recommendations" | jq '. += ["Update to latest version when available"]') - fi - - echo "$recommendations" -} - -# Apply security policy to scan result -apply_security_policy() { - local package_name="$1" - local scan_result="$2" - - local policy_file="$SECURITY_POLICIES_DIR/default-policy.json" - - if [[ ! -f "$policy_file" ]]; then - log_warning "Security policy not found, skipping policy enforcement" "apt-layer" - return 0 - fi - - # Get highest severity vulnerability - local highest_severity - highest_severity=$(echo "$scan_result" | jq -r '.vulnerabilities | map(.severity) | sort | reverse | .[0] // "UNKNOWN"' 2>/dev/null || echo "UNKNOWN") - - # Get policy action for this severity - local policy_action - policy_action=$(jq -r --arg sev "$highest_severity" '.rules[$sev + "_vulnerabilities"].action // "LOG"' "$policy_file" 2>/dev/null || echo "LOG") - - case "$policy_action" in - "BLOCK") - log_error "Security policy BLOCKED installation of $package_name (severity: $highest_severity)" "apt-layer" - log_audit_event "SECURITY_POLICY_BLOCK" "{\"package\": \"$package_name\", \"severity\": \"$highest_severity\", \"policy_action\": \"$policy_action\"}" "WARNING" - return 1 - ;; - "WARN") - log_warning "Security policy WARNING for $package_name (severity: $highest_severity)" "apt-layer" - log_audit_event "SECURITY_POLICY_WARN" "{\"package\": \"$package_name\", \"severity\": \"$highest_severity\", \"policy_action\": \"$policy_action\"}" "WARNING" - ;; - "LOG") - log_info "Security policy LOGGED $package_name (severity: $highest_severity)" "apt-layer" - log_audit_event "SECURITY_POLICY_LOG" "{\"package\": \"$package_name\", \"severity\": \"$highest_severity\", \"policy_action\": \"$policy_action\"}" "INFO" - ;; - *) - log_info "Security policy action $policy_action for $package_name (severity: $highest_severity)" "apt-layer" - ;; - esac - - return 0 -} - -# Scan layer for vulnerabilities -scan_layer() { - local layer_path="$1" - local scan_level="${2:-standard}" - - log_info "Scanning layer: $layer_path" "apt-layer" - - # Check cache first - local cache_key="${layer_path}_${scan_level}" - local cached_result - cached_result=$(get_cached_scan_result "layer_scans" "$cache_key") - - if [[ -n "$cached_result" ]]; then - log_info "Using cached scan result for layer" "apt-layer" - echo "$cached_result" - return 0 - fi - - # Extract packages from layer - local packages - packages=$(extract_packages_from_layer "$layer_path") - - # Scan each package - local layer_scan_result - layer_scan_result=$(cat << 'EOF' -{ - "layer": "$layer_path", - "scan_level": "$scan_level", - "scan_timestamp": "$(date -u +%Y-%m-%dT%H:%M:%SZ)", - "packages": [], - "total_vulnerabilities": 0, - "security_score": 100, - "status": "clean" -} -EOF -) - - local total_vulnerabilities=0 - local total_score=0 - local package_count=0 - - while IFS= read -r package; do - if [[ -n "$package" ]]; then - local package_scan - package_scan=$(scan_package "$package" "" "$scan_level") - - # Add package to layer scan result - layer_scan_result=$(echo "$layer_scan_result" | jq --argjson pkg_scan "$package_scan" '.packages += [$pkg_scan]') - - # Count vulnerabilities - local vuln_count - vuln_count=$(echo "$package_scan" | jq -r '.vulnerabilities | length' 2>/dev/null || echo "0") - total_vulnerabilities=$((total_vulnerabilities + vuln_count)) - - # Accumulate score - local pkg_score - pkg_score=$(echo "$package_scan" | jq -r '.security_score' 2>/dev/null || echo "100") - total_score=$((total_score + pkg_score)) - package_count=$((package_count + 1)) - fi - done <<< "$packages" - - # Calculate average security score - if [[ $package_count -gt 0 ]]; then - local avg_score=$((total_score / package_count)) - layer_scan_result=$(echo "$layer_scan_result" | jq --arg score "$avg_score" '.security_score = ($score | tonumber)') - fi - - # Update total vulnerabilities - layer_scan_result=$(echo "$layer_scan_result" | jq --arg vulns "$total_vulnerabilities" '.total_vulnerabilities = ($vulns | tonumber)') - - # Update status - if [[ $total_vulnerabilities -gt 0 ]]; then - layer_scan_result=$(echo "$layer_scan_result" | jq '.status = "vulnerable"') - fi - - # Cache the result - cache_scan_result "layer_scans" "$cache_key" "$layer_scan_result" - - echo "$layer_scan_result" -} - -# Extract packages from layer -extract_packages_from_layer() { - local layer_path="$1" - - # This is a simplified implementation - # In a real implementation, you would extract the actual package list from the layer - local temp_dir - temp_dir=$(mktemp -d) - - # Mount layer and extract package information - if mount_layer "$layer_path" "$temp_dir"; then - # Extract package list (simplified) - local packages - packages=$(find "$temp_dir" -name "*.deb" -exec basename {} \; 2>/dev/null | sed 's/_.*$//' || echo "") - - # Cleanup - umount_layer "$temp_dir" - rmdir "$temp_dir" 2>/dev/null || true - - echo "$packages" - else - log_warning "Failed to mount layer for package extraction" "apt-layer" echo "" fi } -# Mount layer for scanning -mount_layer() { - local layer_path="$1" - local mount_point="$2" - - # Simplified mount implementation - # In a real implementation, you would use appropriate mounting for the layer format - if [[ -f "$layer_path" ]]; then - # For squashfs layers - mount -t squashfs "$layer_path" "$mount_point" 2>/dev/null || return 1 - elif [[ -d "$layer_path" ]]; then - # For directory layers - mount --bind "$layer_path" "$mount_point" 2>/dev/null || return 1 +# Get pending deployment +get_pending_deployment() { + if [[ -f "$PENDING_DEPLOYMENT_FILE" ]]; then + cat "$PENDING_DEPLOYMENT_FILE" 2>/dev/null || echo "" else + echo "" + fi +} + +# Set current deployment +set_current_deployment() { + local commit_id="$1" + echo "$commit_id" > "$CURRENT_DEPLOYMENT_FILE" + + # Update deployment database + jq --arg commit_id "$commit_id" '.current_deployment = $commit_id' \ + "$DEPLOYMENT_DB" > "${DEPLOYMENT_DB}.tmp" && mv "${DEPLOYMENT_DB}.tmp" "$DEPLOYMENT_DB" + + log_info "Current deployment set to: $commit_id" "apt-layer" +} + +# Set pending deployment +set_pending_deployment() { + local commit_id="$1" + echo "$commit_id" > "$PENDING_DEPLOYMENT_FILE" + + # Update deployment database + jq --arg commit_id "$commit_id" '.pending_deployment = $commit_id' \ + "$DEPLOYMENT_DB" > "${DEPLOYMENT_DB}.tmp" && mv "${DEPLOYMENT_DB}.tmp" "$DEPLOYMENT_DB" + + log_info "Pending deployment set to: $commit_id" "apt-layer" +} + +# Clear pending deployment +clear_pending_deployment() { + echo "" > "$PENDING_DEPLOYMENT_FILE" + + # Update deployment database + jq '.pending_deployment = null' \ + "$DEPLOYMENT_DB" > "${DEPLOYMENT_DB}.tmp" && mv "${DEPLOYMENT_DB}.tmp" "$DEPLOYMENT_DB" + + log_info "Pending deployment cleared" "apt-layer" +} + +# Atomic deployment function +atomic_deploy() { + local commit_id="$1" + local deployment_dir="/var/lib/particle-os/deployments/${commit_id}" + local boot_dir="/boot/loader/entries" + + log_info "Performing atomic deployment: $commit_id" "apt-layer" + + # Validate commit exists + if ! jq -e ".deployments[\"$commit_id\"]" "$DEPLOYMENT_DB" >/dev/null 2>&1; then + log_error "Commit not found: $commit_id" "apt-layer" return 1 fi + # Get commit data + local commit_data + commit_data=$(jq -r ".deployments[\"$commit_id\"]" "$DEPLOYMENT_DB") + local composefs_image + composefs_image=$(echo "$commit_data" | jq -r '.composefs_image') + + # Create deployment directory + mkdir -p "$deployment_dir" + + # Mount the ComposeFS image + if ! composefs_mount "$composefs_image" "$deployment_dir"; then + log_error "Failed to mount ComposeFS image for deployment" "apt-layer" + return 1 + fi + + # Apply kernel arguments to deployment + apply_kernel_args_to_deployment "$commit_id" + + # Create bootloader entry + create_bootloader_entry "$commit_id" "$deployment_dir" + + # Set as pending deployment (will activate on next boot) + set_pending_deployment "$commit_id" + + log_success "Atomic deployment prepared: $commit_id" "apt-layer" + log_info "Reboot to activate deployment" "apt-layer" return 0 } -# Unmount layer -umount_layer() { - local mount_point="$1" +# True system upgrade (not package upgrade) +system_upgrade() { + local new_base_image="${1:-}" + local current_layers=() - umount "$mount_point" 2>/dev/null || true -} - -# Get cached scan result -get_cached_scan_result() { - local cache_type="$1" - local cache_key="$2" + log_info "Performing true system upgrade..." "apt-layer" - local cache_file="$SECURITY_CACHE_DIR/scan-cache.json" + # Get current deployment + local current_commit + current_commit=$(get_current_deployment) - if [[ ! -f "$cache_file" ]]; then - return 1 + if [[ -n "$current_commit" ]]; then + # Get current layers from deployment + current_layers=($(jq -r ".deployments[\"$current_commit\"].layers[]" "$DEPLOYMENT_DB" 2>/dev/null || true)) + log_info "Current layers: ${current_layers[*]}" "apt-layer" fi - # Check if cache entry exists and is not expired - local cached_result - cached_result=$(jq -r --arg type "$cache_type" --arg key "$cache_key" '.[$type][$key] // empty' "$cache_file" 2>/dev/null) - - if [[ -n "$cached_result" ]]; then - # Check if cache is still valid (24 hours) - local cache_timestamp - cache_timestamp=$(echo "$cached_result" | jq -r '.cache_timestamp' 2>/dev/null || echo "") - - if [[ -n "$cache_timestamp" ]]; then - local cache_age - cache_age=$(($(date +%s) - $(date -d "$cache_timestamp" +%s))) - - if [[ $cache_age -lt 86400 ]]; then # 24 hours - echo "$cached_result" - return 0 - fi + # If no new base specified, try to find one + if [[ -z "$new_base_image" ]]; then + new_base_image=$(find_newer_base_image) + if [[ -z "$new_base_image" ]]; then + log_info "No newer base image found" "apt-layer" + return 0 fi fi - return 1 -} - -# Cache scan result -cache_scan_result() { - local cache_type="$1" - local cache_key="$2" - local scan_result="$3" + log_info "Upgrading to base image: $new_base_image" "apt-layer" - local cache_file="$SECURITY_CACHE_DIR/scan-cache.json" - - # Add cache timestamp - local cached_result - cached_result=$(echo "$scan_result" | jq --arg timestamp "$(date -u +%Y-%m-%dT%H:%M:%SZ)" '.cache_timestamp = $timestamp') - - # Update cache file - jq --arg type "$cache_type" --arg key "$cache_key" --argjson result "$cached_result" '.[$type][$key] = $result' "$cache_file" > "$cache_file.tmp" && mv "$cache_file.tmp" "$cache_file" 2>/dev/null || true -} - -# Update CVE database -update_cve_database() { - log_info "Updating CVE database" "apt-layer" - - local cve_db_file="$SECURITY_CVE_DB_DIR/cve-database.json" - local config_file="$SECURITY_CONFIG_DIR/security-config.json" - - # Get database URL from config - local db_url - db_url=$(jq -r '.cve.database_url // "https://nvd.nist.gov/vuln/data-feeds"' "$config_file" 2>/dev/null || echo "https://nvd.nist.gov/vuln/data-feeds") - - # Download latest CVE data (simplified implementation) - local temp_file - temp_file=$(mktemp) - - if curl -s -L "$db_url" > "$temp_file" 2>/dev/null; then - # Process and update database (simplified) - log_success "CVE database updated successfully" "apt-layer" - log_audit_event "CVE_DATABASE_UPDATE" "{\"status\": \"success\", \"source\": \"$db_url\"}" "INFO" - else - log_error "Failed to update CVE database" "apt-layer" - log_audit_event "CVE_DATABASE_UPDATE" "{\"status\": \"failed\", \"source\": \"$db_url\"}" "ERROR" - return 1 - fi - - rm -f "$temp_file" - return 0 -} - -# Generate security report -generate_security_report() { - local report_type="$1" - local output_format="${2:-html}" - local scan_level="${3:-standard}" - - log_info "Generating security report: $report_type" "apt-layer" - - local report_file="$SECURITY_REPORTS_DIR/security-report-$(date +%Y%m%d-%H%M%S).$output_format" - - case "$report_type" in - "package") - generate_package_security_report "$output_format" "$scan_level" "$report_file" - ;; - "layer") - generate_layer_security_report "$output_format" "$scan_level" "$report_file" - ;; - "system") - generate_system_security_report "$output_format" "$scan_level" "$report_file" - ;; - *) - log_error "Unknown report type: $report_type" "apt-layer" - return 1 - ;; - esac - - log_success "Security report generated: $report_file" "apt-layer" - log_audit_event "GENERATE_SECURITY_REPORT" "{\"type\": \"$report_type\", \"format\": \"$output_format\", \"file\": \"$report_file\"}" - return 0 -} - -# Generate package security report -generate_package_security_report() { - local output_format="$1" - local scan_level="$2" - local report_file="$3" - - case "$output_format" in - "html") - generate_package_html_report "$scan_level" "$report_file" - ;; - "json") - generate_package_json_report "$scan_level" "$report_file" - ;; - *) - log_error "Unsupported output format for package report: $output_format" "apt-layer" - return 1 - ;; - esac -} - -# Generate package HTML report -generate_package_html_report() { - local scan_level="$1" - local report_file="$2" - - cat > "$report_file" << EOF - - - - Package Security Report - $scan_level - - - -
-

Package Security Report

-

Scan Level: $scan_level

-

Generated: $(date -u +%Y-%m-%dT%H:%M:%SZ)

-

System: $(hostname)

-
- -
-

Security Summary

-

This report provides a comprehensive security analysis of scanned packages.

-

Scan level: $scan_level

-
- -
-

Recommendations

- -
- - -EOF -} - -# Generate package JSON report -generate_package_json_report() { - local scan_level="$1" - local report_file="$2" - - cat > "$report_file" << EOF -{ - "report_type": "package_security", - "scan_level": "$scan_level", - "generated_at": "$(date -u +%Y-%m-%dT%H:%M:%SZ)", - "system": "$(hostname)", - "summary": { - "total_packages_scanned": 0, - "vulnerable_packages": 0, - "critical_vulnerabilities": 0, - "high_vulnerabilities": 0, - "medium_vulnerabilities": 0, - "low_vulnerabilities": 0 - }, - "packages": [], - "recommendations": [ - "Review all critical and high severity vulnerabilities", - "Update packages to latest secure versions", - "Consider alternative packages for persistent vulnerabilities", - "Implement security policies to prevent vulnerable package installation" - ] -} -EOF -} - -# Generate layer security report -generate_layer_security_report() { - local output_format="$1" - local scan_level="$2" - local report_file="$3" - - # Similar implementation to package report but for layers - log_info "Layer security report generation not yet implemented" "apt-layer" - return 1 -} - -# Generate system security report -generate_system_security_report() { - local output_format="$1" - local scan_level="$2" - local report_file="$3" - - # Similar implementation to package report but for system-wide analysis - log_info "System security report generation not yet implemented" "apt-layer" - return 1 -} - -# Get security scanning status -get_security_status() { - log_info "Getting security scanning system status" "apt-layer" - - echo "=== Security Scanning System Status ===" - - # General status - echo "General:" - echo " Enabled: $SECURITY_ENABLED" - echo " Scan Level: $SECURITY_SCAN_LEVEL" - echo " Auto Scan: $SECURITY_AUTO_SCAN" - echo " CVE Checking: $SECURITY_CVE_CHECKING" - echo " Policy Enforcement: $SECURITY_POLICY_ENFORCEMENT" - - # CVE database status - echo "" - echo "CVE Database:" - local cve_db_file="$SECURITY_CVE_DB_DIR/cve-database.json" - if [[ -f "$cve_db_file" ]]; then - local last_updated - last_updated=$(jq -r '.metadata.last_updated' "$cve_db_file" 2>/dev/null || echo "unknown") - local total_cves - total_cves=$(jq -r '.metadata.total_cves' "$cve_db_file" 2>/dev/null || echo "0") - echo " Last Updated: $last_updated" - echo " Total CVEs: $total_cves" - else - echo " Status: Not initialized" - fi - - # Scan statistics - echo "" - echo "Scan Statistics:" - local cache_file="$SECURITY_CACHE_DIR/scan-cache.json" - if [[ -f "$cache_file" ]]; then - local package_scans - package_scans=$(jq -r '.package_scans | keys | length' "$cache_file" 2>/dev/null || echo "0") - local layer_scans - layer_scans=$(jq -r '.layer_scans | keys | length' "$cache_file" 2>/dev/null || echo "0") - echo " Cached Package Scans: $package_scans" - echo " Cached Layer Scans: $layer_scans" - else - echo " Cache: Not initialized" - fi - - # Report statistics - echo "" - echo "Report Statistics:" - local report_count - report_count=$(find "$SECURITY_REPORTS_DIR" -name "*.html" -o -name "*.json" 2>/dev/null | wc -l || echo "0") - echo " Total Reports: $report_count" - - echo "" -} - -# Clean up old security reports -cleanup_old_security_reports() { - local max_age_days="${1:-90}" - - log_info "Cleaning up security reports older than $max_age_days days" "apt-layer" - - local removed_count=0 - - # Clean up old reports - while IFS= read -r report_file; do - local file_age - file_age=$(find "$report_file" -mtime +$max_age_days 2>/dev/null | wc -l) + # Rebase existing layers on new base + local rebased_layers=() + for layer in "${current_layers[@]}"; do + local new_layer="${layer}-rebased-$(date +%Y%m%d)" + log_info "Rebasing layer: $layer -> $new_layer" "apt-layer" - if [[ $file_age -gt 0 ]]; then - log_info "Removing old security report: $(basename "$report_file")" "apt-layer" - rm -f "$report_file" - ((removed_count++)) - fi - done < <(find "$SECURITY_REPORTS_DIR" -name "*.html" -o -name "*.json" 2>/dev/null) - - log_success "Cleaned up $removed_count old security reports" "apt-layer" - return 0 -} - -# ============================================================================= -# INTEGRATION FUNCTIONS -# ============================================================================= - -# Initialize security scanning on script startup -init_security_scanning_on_startup() { - # Only initialize if not already done - if [[ ! -d "$SECURITY_STATE_DIR" ]]; then - init_security_scanning - fi -} - -# Cleanup security scanning on script exit -cleanup_security_scanning_on_exit() { - # Clean up temporary files - rm -f "$SECURITY_CACHE_DIR"/temp-* 2>/dev/null || true - rm -f "$SECURITY_SCANS_DIR"/temp-* 2>/dev/null || true -} - -# Register cleanup function -trap cleanup_security_scanning_on_exit EXIT - -# --- END OF SCRIPTLET: 13-security-scanning.sh --- - -# ============================================================================ -# Admin Utilities (Health Monitoring, Analytics, Maintenance) -# ============================================================================ - -# 14-admin-utilities.sh - Admin Utilities for Particle-OS apt-layer -# Provides system health monitoring, performance analytics, and admin tools - -# --- Color and Symbols --- -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -RED='\033[0;31m' -CYAN='\033[0;36m' -NC='\033[0m' -CHECK="" -WARN=" " -CROSS="" -INFO=" " - -# --- Helper: Check for WSL --- -is_wsl() { - grep -qi microsoft /proc/version 2>/dev/null -} - -get_wsl_version() { - if is_wsl; then - if grep -q WSL2 /proc/version 2>/dev/null; then - echo "WSL2" + if "$0" --rebase "$layer" "$new_base_image" "$new_layer"; then + rebased_layers+=("$new_layer") else - echo "WSL1" + log_error "Failed to rebase layer: $layer" "apt-layer" + return 1 fi - fi -} - -# --- System Health Monitoring --- -health_check() { - local health_status=0 - echo -e "${CYAN}================= System Health Check =================${NC}" - echo -e "${INFO} Hostname: $(hostname 2>/dev/null || echo N/A)" - echo -e "${INFO} Uptime: $(uptime -p 2>/dev/null || echo N/A)" - echo -e "${INFO} Kernel: $(uname -r 2>/dev/null || echo N/A)" - if is_wsl; then - echo -e "${INFO} WSL: $(get_wsl_version)" - fi - echo -e "${INFO} Load Avg: $(awk '{print $1, $2, $3}' /proc/loadavg 2>/dev/null || echo N/A)" - # CPU Info - if command -v lscpu &>/dev/null; then - cpu_model=$(lscpu | grep 'Model name' | awk -F: '{print $2}' | xargs) - cpu_cores=$(lscpu | grep '^CPU(s):' | awk '{print $2}') - echo -e "${INFO} CPU: $cpu_model ($cpu_cores cores)" - else - echo -e "${WARN} CPU: lscpu not available" - health_status=1 - fi - # Memory - if command -v free &>/dev/null; then - mem_line=$(free -m | grep Mem) - mem_total=$(echo $mem_line | awk '{print $2}') - mem_used=$(echo $mem_line | awk '{print $3}') - mem_free=$(echo $mem_line | awk '{print $4}') - mem_perc=$((100 * mem_used / mem_total)) - echo -e "${INFO} Memory: ${mem_total}MiB total, ${mem_used}MiB used (${mem_perc}%)" - else - echo -e "${WARN} Memory: free not available" - health_status=1 - fi - # Disk - if command -v df &>/dev/null; then - disk_root=$(df -h / | tail -1) - disk_total=$(echo $disk_root | awk '{print $2}') - disk_used=$(echo $disk_root | awk '{print $3}') - disk_avail=$(echo $disk_root | awk '{print $4}') - disk_perc=$(echo $disk_root | awk '{print $5}') - echo -e "${INFO} Disk /: $disk_total total, $disk_used used, $disk_avail free ($disk_perc)" - if [ -d /var/lib/particle-os ]; then - disk_ublue=$(df -h /var/lib/particle-os 2>/dev/null | tail -1) - if [ -n "$disk_ublue" ]; then - ublue_total=$(echo $disk_ublue | awk '{print $2}') - ublue_used=$(echo $disk_ublue | awk '{print $3}') - ublue_avail=$(echo $disk_ublue | awk '{print $4}') - ublue_perc=$(echo $disk_ublue | awk '{print $5}') - echo -e "${INFO} Disk /var/lib/particle-os: $ublue_total total, $ublue_used used, $ublue_avail free ($ublue_perc)" - fi - fi - else - echo -e "${WARN} Disk: df not available" - health_status=1 - fi - # OverlayFS/ComposeFS - overlays=$(mount | grep overlay | wc -l) - composefs=$(mount | grep composefs | wc -l) - echo -e "${INFO} OverlayFS: $overlays overlays mounted" - echo -e "${INFO} ComposeFS: $composefs composefs mounted" - # Bootloader - if command -v bootctl &>/dev/null; then - boot_status=$(bootctl status 2>/dev/null | grep 'System:' | xargs) - echo -e "${INFO} Bootloader: ${boot_status:-N/A}" - else - echo -e "${WARN} Bootloader: bootctl not available" - fi - # Security - if command -v apparmor_status &>/dev/null; then - sec_status=$(apparmor_status | grep 'profiles are in enforce mode' || echo 'N/A') - echo -e "${INFO} Security: $sec_status" - else - echo -e "${WARN} Security: apparmor_status not available" - fi - # Layer Integrity/Deployment - echo -e "${CYAN}-----------------------------------------------------${NC}" - echo -e "${INFO} Layer Integrity: [Coming soon] (future: check layer hashes)" - echo -e "${INFO} Deployment Status: [Coming soon] (future: show active deployments)" - # Top processes - echo -e "${CYAN}---------------- Top 3 Processes ---------------------${NC}" - if command -v ps &>/dev/null; then - echo -e "${INFO} By CPU:" - ps -eo pid,comm,%cpu --sort=-%cpu | head -n 4 | tail -n 3 | awk '{printf " PID: %-6s %-20s CPU: %s%%\n", $1, $2, $3}' - echo -e "${INFO} By MEM:" - ps -eo pid,comm,%mem --sort=-%mem | head -n 4 | tail -n 3 | awk '{printf " PID: %-6s %-20s MEM: %s%%\n", $1, $2, $3}' - else - echo -e "${WARN} ps not available for process listing" - fi - echo -e "${CYAN}-----------------------------------------------------${NC}" - # Summary - if [ $health_status -eq 0 ]; then - echo -e "${GREEN}${CHECK} System health: OK${NC}" - else - echo -e "${YELLOW}${WARN} System health: WARNING (see above)${NC}" - fi - echo -e "${CYAN}=====================================================${NC}" -} - -# --- Performance Analytics --- -performance_report() { - echo -e "${CYAN}=============== Performance Analytics ===============${NC}" - echo -e "${INFO} Layer creation time (last 5): [Coming soon] (future: show timing logs)" - echo -e "${INFO} Resource usage (CPU/mem): [Coming soon] (future: show resource stats)" - if command -v iostat &>/dev/null; then - echo -e "${INFO} Disk I/O stats:" - iostat | grep -A1 Device | tail -n +2 - else - echo -e "${WARN} Disk I/O stats: iostat not available" - fi - echo -e "${INFO} Historical trends: [Coming soon] (future: show trends if data available)" - echo -e "${CYAN}=====================================================${NC}" -} - -# --- Automated Maintenance --- -admin_cleanup() { - # Defaults - local days=30 - local dry_run=false - local keep_recent=2 - local DEPLOYMENTS_DIR="/var/lib/particle-os/deployments" - local LOGS_DIR="/var/log/apt-layer" - local BACKUPS_DIR="/var/lib/particle-os/backups" - - # Load config from JSON if available - local config_file="$(dirname "${BASH_SOURCE[0]}")/../config/maintenance.json" - if [ -f "$config_file" ] && command -v jq &>/dev/null; then - days=$(jq -r '.retention_days // 30' "$config_file") - keep_recent=$(jq -r '.keep_recent // 2' "$config_file") - DEPLOYMENTS_DIR=$(jq -r '.deployments_dir // "/var/lib/particle-os/deployments"' "$config_file") - LOGS_DIR=$(jq -r '.logs_dir // "/var/log/apt-layer"' "$config_file") - BACKUPS_DIR=$(jq -r '.backups_dir // "/var/lib/particle-os/backups"' "$config_file") - fi - - # Parse arguments (override config) - while [[ $# -gt 0 ]]; do - case $1 in - --days|-d) - days="$2"; shift 2;; - --dry-run) - dry_run=true; shift;; - --keep-recent) - keep_recent="$2"; shift 2;; - --deployments-dir) - DEPLOYMENTS_DIR="$2"; shift 2;; - --logs-dir) - LOGS_DIR="$2"; shift 2;; - --backups-dir) - BACKUPS_DIR="$2"; shift 2;; - --schedule) - echo -e "${YELLOW}${WARN} Scheduled cleanup: Not yet implemented (will use systemd/cron)${NC}"; return;; - *) - shift;; - esac done - - echo -e "${CYAN}--- Automated Maintenance Cleanup ---${NC}" - echo -e "${INFO} Retention: $days days" - echo -e "${INFO} Keep recent: $keep_recent items" - echo -e "${INFO} Deployments dir: $DEPLOYMENTS_DIR" - echo -e "${INFO} Logs dir: $LOGS_DIR" - echo -e "${INFO} Backups dir: $BACKUPS_DIR" - if [ "$dry_run" = true ]; then - echo -e "${YELLOW}${WARN} DRY RUN MODE - No files will be deleted${NC}" - fi - - local total_deleted=0 - # Helper function to cleanup directory - cleanup_directory() { - local dir="$1" - local description="$2" - local deleted_count=0 - - if [ ! -d "$dir" ]; then - echo -e "${INFO} $description: Directory does not exist, skipping" - return - fi - - echo -e "${INFO} $description: Scanning $dir" - - # Get list of files/directories older than retention period - local old_items=() - if command -v find &>/dev/null; then - while IFS= read -r -d '' item; do - old_items+=("$item") - done < <(find "$dir" -maxdepth 1 -type f -o -type d -mtime +$days -print0 2>/dev/null) - fi - - # Remove the most recent items from deletion list - if [ ${#old_items[@]} -gt 0 ] && [ $keep_recent -gt 0 ]; then - # Sort by modification time (newest first) and keep the most recent - local sorted_items=($(printf '%s\n' "${old_items[@]}" | xargs -I {} stat -c '%Y %n' {} 2>/dev/null | sort -nr | tail -n +$((keep_recent + 1)) | awk '{print $2}')) - old_items=("${sorted_items[@]}") - fi - - if [ ${#old_items[@]} -eq 0 ]; then - echo -e "${INFO} $description: No items to delete" - return - fi - - echo -e "${INFO} $description: Found ${#old_items[@]} items to delete" - - for item in "${old_items[@]}"; do - if [ "$dry_run" = true ]; then - echo -e " ${YELLOW}Would delete: $item${NC}" - else - if rm -rf "$item" 2>/dev/null; then - echo -e " ${GREEN}Deleted: $item${NC}" - ((deleted_count++)) - else - echo -e " ${RED}Failed to delete: $item${NC}" - fi - fi - done - - if [ "$dry_run" = false ]; then - total_deleted=$((total_deleted + deleted_count)) - fi - } + # Create new deployment commit + local commit_id + commit_id=$(create_deployment_commit "$new_base_image" "${rebased_layers[@]}") - # Cleanup each directory - cleanup_directory "$DEPLOYMENTS_DIR" "Deployments" - cleanup_directory "$LOGS_DIR" "Logs" - cleanup_directory "$BACKUPS_DIR" "Backups" - - # Summary - if [ "$dry_run" = true ]; then - echo -e "${YELLOW}${WARN} Dry run completed - no files were deleted${NC}" + # Perform atomic deployment + if atomic_deploy "$commit_id"; then + log_success "System upgrade completed successfully" "apt-layer" + return 0 else - echo -e "${GREEN}${CHECK} Cleanup complete - $total_deleted items deleted${NC}" + log_error "System upgrade failed" "apt-layer" + return 1 fi - echo -e "${CYAN}-------------------------------------${NC}" } -# --- Backup/Restore (Stub) --- -admin_backup() { - echo -e "${YELLOW}${WARN} Backup: Not yet implemented${NC}" +# Find newer base image +find_newer_base_image() { + local current_base + current_base=$(jq -r ".deployments[\"$(get_current_deployment)\"].base_image" "$DEPLOYMENT_DB" 2>/dev/null || echo "") + + if [[ -z "$current_base" ]]; then + log_warning "No current base image found" "apt-layer" + return 1 + fi + + # List available base images and find newer ones + local available_bases + available_bases=($(composefs_list_images | grep "^ubuntu-ublue/base/" | sort -V)) + + for base in "${available_bases[@]}"; do + if [[ "$base" > "$current_base" ]]; then + echo "$base" + return 0 + fi + done + + return 1 } -admin_restore() { - echo -e "${YELLOW}${WARN} Restore: Not yet implemented${NC}" +# Create bootloader entry +create_bootloader_entry() { + local commit_id="$1" + local deployment_dir="$2" + + log_info "Creating bootloader entry for: $commit_id" "apt-layer" + + # Initialize bootloader system + init_bootloader_on_startup + + # Create bootloader entry using the comprehensive bootloader system + if create_bootloader_entry "$commit_id" "$deployment_dir" "Ubuntu uBlue ($commit_id)"; then + log_success "Bootloader entry created for: $commit_id" "apt-layer" + return 0 + else + log_error "Failed to create bootloader entry for: $commit_id" "apt-layer" + return 1 + fi } -# --- Command Dispatch --- -admin_utilities_main() { - case "${1:-}" in - health|health-check) - health_check - ;; - perf|performance|analytics) - performance_report - ;; - cleanup) - shift - admin_cleanup "$@" - ;; - backup) - admin_backup - ;; - restore) - admin_restore - ;; - help|--help|-h|"") - echo -e "${CYAN}Admin Utilities Commands:${NC}" - echo -e " ${GREEN}health${NC} - System health check" - echo -e " ${GREEN}perf${NC} - Performance analytics" - echo -e " ${GREEN}cleanup${NC} - Maintenance cleanup (--days N, --dry-run, --keep-recent N)" - echo -e " ${GREEN}backup${NC} - Backup configs/layers (stub)" - echo -e " ${GREEN}restore${NC} - Restore from backup (stub)" - echo -e " ${GREEN}help${NC} - Show this help message" - ;; - *) - echo -e "${RED}${CROSS} Unknown admin command: $1${NC}" - admin_utilities_main help - ;; - esac +# Show atomic deployment status +atomic_status() { + local current_deployment + current_deployment=$(get_current_deployment) + local pending_deployment + pending_deployment=$(get_pending_deployment) + + echo "=== Atomic Deployment Status ===" + echo "Current Deployment: ${current_deployment:-none}" + echo "Pending Deployment: ${pending_deployment:-none}" + + if [[ -n "$current_deployment" ]]; then + local commit_data + commit_data=$(jq -r ".deployments[\"$current_deployment\"]" "$DEPLOYMENT_DB" 2>/dev/null || echo "{}") + + if [[ "$commit_data" != "{}" ]]; then + echo "Deployment Type: $(echo "$commit_data" | jq -r '.commit_message')" + echo "Base Image: $(echo "$commit_data" | jq -r '.base_image')" + echo "Created: $(echo "$commit_data" | jq -r '.created')" + echo "Layers: $(echo "$commit_data" | jq -r '.layers | join(", ")')" + fi + fi + + if [[ -n "$pending_deployment" ]]; then + echo "�� Pending deployment will activate on next boot" + fi +} + +# List all deployments +list_deployments() { + echo "=== Deployment History ===" + + local deployments + deployments=($(jq -r '.deployments | keys[]' "$DEPLOYMENT_DB" 2>/dev/null | sort -r)) + + for commit_id in "${deployments[@]}"; do + local commit_data + commit_data=$(jq -r ".deployments[\"$commit_id\"]" "$DEPLOYMENT_DB") + + local status="" + if [[ "$commit_id" == "$(get_current_deployment)" ]]; then + status=" [CURRENT]" + elif [[ "$commit_id" == "$(get_pending_deployment)" ]]; then + status=" [PENDING]" + fi + + echo "$commit_id$status" + echo " Message: $(echo "$commit_data" | jq -r '.commit_message')" + echo " Created: $(echo "$commit_data" | jq -r '.created')" + echo " Base: $(echo "$commit_data" | jq -r '.base_image')" + echo "" + done +} + +# Rollback to specific commit +commit_rollback() { + local target_commit="$1" + + log_info "Rolling back to commit: $target_commit" "apt-layer" + + # Validate target commit exists + if ! jq -e ".deployments[\"$target_commit\"]" "$DEPLOYMENT_DB" >/dev/null 2>&1; then + log_error "Target commit not found: $target_commit" "apt-layer" + return 1 + fi + + # Perform atomic deployment to target commit + if atomic_deploy "$target_commit"; then + log_success "Rollback prepared to: $target_commit" "apt-layer" + log_info "Reboot to activate rollback" "apt-layer" + return 0 + else + log_error "Rollback failed" "apt-layer" + return 1 + fi } -# --- END OF SCRIPTLET: 14-admin-utilities.sh --- +# --- END OF SCRIPTLET: 09-atomic-deployment.sh --- # ============================================================================ -# Multi-Tenant Support (Enterprise Features) +# rpm-ostree Compatibility Layer # ============================================================================ +# rpm-ostree compatibility layer for Ubuntu uBlue apt-layer Tool +# Provides 1:1 command compatibility with rpm-ostree -# Multi-Tenant Support for apt-layer -# Enables enterprise deployments with multiple organizations, departments, or environments -# Provides tenant isolation, resource quotas, and cross-tenant management - -# Multi-tenant configuration -MULTI_TENANT_ENABLED="${MULTI_TENANT_ENABLED:-false}" -TENANT_ISOLATION_LEVEL="${TENANT_ISOLATION_LEVEL:-strict}" # strict, moderate, permissive -TENANT_RESOURCE_QUOTAS="${TENANT_RESOURCE_QUOTAS:-true}" -TENANT_CROSS_ACCESS="${TENANT_CROSS_ACCESS:-false}" - -# Tenant management functions -init_multi_tenant_system() { - log_info "Initializing multi-tenant system..." "multi-tenant" +# rpm-ostree install compatibility +rpm_ostree_install() { + local packages=("$@") - # Create tenant directories - local tenant_base="${WORKSPACE}/tenants" - mkdir -p "$tenant_base" - mkdir -p "$tenant_base/shared" - mkdir -p "$tenant_base/templates" + log_info "rpm-ostree install compatibility: ${packages[*]}" "apt-layer" - # Initialize tenant database - local tenant_db="$tenant_base/tenants.json" - if [[ ! -f "$tenant_db" ]]; then - cat > "$tenant_db" << 'EOF' -{ - "tenants": [], - "policies": { - "default_isolation": "strict", - "default_quotas": { - "max_layers": 100, - "max_storage_gb": 50, - "max_users": 10 - }, - "cross_tenant_access": false - }, - "metadata": { - "created": "", - "version": "1.0" - } -} -EOF - # Set creation timestamp - jq --arg created "$(date -Iseconds)" '.metadata.created = $created' "$tenant_db" > "$tenant_db.tmp" && mv "$tenant_db.tmp" "$tenant_db" - fi - - log_success "Multi-tenant system initialized" "multi-tenant" -} - -# Tenant creation and management -create_tenant() { - local tenant_name="$1" - local tenant_config="$2" - - if [[ -z "$tenant_name" ]]; then - log_error "Tenant name is required" "multi-tenant" - return 1 - fi - - # Validate tenant name - if [[ ! "$tenant_name" =~ ^[a-zA-Z0-9_-]+$ ]]; then - log_error "Invalid tenant name: $tenant_name (use alphanumeric, underscore, hyphen only)" "multi-tenant" - return 1 - fi - - local tenant_base="${WORKSPACE}/tenants" - local tenant_db="$tenant_base/tenants.json" - local tenant_dir="$tenant_base/$tenant_name" - - # Check if tenant already exists - if jq -e ".tenants[] | select(.name == \"$tenant_name\")" "$tenant_db" > /dev/null 2>&1; then - log_error "Tenant '$tenant_name' already exists" "multi-tenant" - return 1 - fi - - # Create tenant directory structure - mkdir -p "$tenant_dir" - mkdir -p "$tenant_dir/layers" - mkdir -p "$tenant_dir/deployments" - mkdir -p "$tenant_dir/users" - mkdir -p "$tenant_dir/audit" - mkdir -p "$tenant_dir/backups" - mkdir -p "$tenant_dir/config" - - # Create tenant configuration - local tenant_config_file="$tenant_dir/config/tenant.json" - cat > "$tenant_config_file" << EOF -{ - "name": "$tenant_name", - "created": "$(date -Iseconds)", - "status": "active", - "isolation_level": "$TENANT_ISOLATION_LEVEL", - "quotas": { - "max_layers": 100, - "max_storage_gb": 50, - "max_users": 10, - "used_layers": 0, - "used_storage_gb": 0, - "used_users": 0 - }, - "policies": { - "allowed_packages": [], - "blocked_packages": [], - "security_level": "standard", - "audit_retention_days": 90 - }, - "integrations": { - "oci_registries": [], - "external_audit": null, - "monitoring": null - } -} -EOF - - # Merge custom configuration if provided - if [[ -n "$tenant_config" && -f "$tenant_config" ]]; then - if jq empty "$tenant_config" 2>/dev/null; then - jq -s '.[0] * .[1]' "$tenant_config_file" "$tenant_config" > "$tenant_config_file.tmp" && mv "$tenant_config_file.tmp" "$tenant_config_file" - else - log_warning "Invalid JSON in tenant configuration, using defaults" "multi-tenant" - fi - fi - - # Add tenant to database - local tenant_info - tenant_info=$(jq -r '.' "$tenant_config_file") - jq --arg name "$tenant_name" --argjson info "$tenant_info" '.tenants += [$info]' "$tenant_db" > "$tenant_db.tmp" && mv "$tenant_db.tmp" "$tenant_db" - - log_success "Tenant '$tenant_name' created successfully" "multi-tenant" - log_info "Tenant directory: $tenant_dir" "multi-tenant" -} - -# Tenant deletion -delete_tenant() { - local tenant_name="$1" - local force="${2:-false}" - - if [[ -z "$tenant_name" ]]; then - log_error "Tenant name is required" "multi-tenant" - return 1 - fi - - local tenant_base="${WORKSPACE}/tenants" - local tenant_db="$tenant_base/tenants.json" - local tenant_dir="$tenant_base/$tenant_name" - - # Check if tenant exists - if ! jq -e ".tenants[] | select(.name == \"$tenant_name\")" "$tenant_db" > /dev/null 2>&1; then - log_error "Tenant '$tenant_name' does not exist" "multi-tenant" - return 1 - fi - - # Check for active resources - local active_layers=0 - local active_deployments=0 - - if [[ -d "$tenant_dir/layers" ]]; then - active_layers=$(find "$tenant_dir/layers" -name "*.squashfs" 2>/dev/null | wc -l) - fi - - if [[ -d "$tenant_dir/deployments" ]]; then - active_deployments=$(find "$tenant_dir/deployments" -name "*.json" 2>/dev/null | wc -l) - fi - - if [[ $active_layers -gt 0 || $active_deployments -gt 0 ]]; then - if [[ "$force" != "true" ]]; then - log_error "Tenant '$tenant_name' has active resources ($active_layers layers, $active_deployments deployments)" "multi-tenant" - log_error "Use --force to delete anyway" "multi-tenant" - return 1 - else - log_warning "Force deleting tenant with active resources" "multi-tenant" - fi - fi - - # Remove from database - jq --arg name "$tenant_name" 'del(.tenants[] | select(.name == $name))' "$tenant_db" > "$tenant_db.tmp" && mv "$tenant_db.tmp" "$tenant_db" - - # Remove tenant directory - if [[ -d "$tenant_dir" ]]; then - rm -rf "$tenant_dir" - fi - - log_success "Tenant '$tenant_name' deleted successfully" "multi-tenant" -} - -# Tenant listing and information -list_tenants() { - local format="${1:-table}" - local tenant_base="${WORKSPACE}/tenants" - local tenant_db="$tenant_base/tenants.json" - - if [[ ! -f "$tenant_db" ]]; then - log_error "Tenant database not found" "multi-tenant" - return 1 - fi - - case "$format" in - "json") - jq -r '.' "$tenant_db" - ;; - "csv") - echo "name,status,created,layers,storage_gb,users" - jq -r '.tenants[] | [.name, .status, .created, .quotas.used_layers, .quotas.used_storage_gb, .quotas.used_users] | @csv' "$tenant_db" - ;; - "table"|*) - echo "Tenants:" - echo "========" - jq -r '.tenants[] | "\(.name) (\(.status)) - Layers: \(.quotas.used_layers)/\(.quotas.max_layers), Storage: \(.quotas.used_storage_gb)GB/\(.quotas.max_storage_gb)GB"' "$tenant_db" - ;; - esac -} - -# Tenant information -get_tenant_info() { - local tenant_name="$1" - local format="${2:-json}" - - if [[ -z "$tenant_name" ]]; then - log_error "Tenant name is required" "multi-tenant" - return 1 - fi - - local tenant_base="${WORKSPACE}/tenants" - local tenant_db="$tenant_base/tenants.json" - - local tenant_info - tenant_info=$(jq -r ".tenants[] | select(.name == \"$tenant_name\")" "$tenant_db" 2>/dev/null) - - if [[ -z "$tenant_info" ]]; then - log_error "Tenant '$tenant_name' not found" "multi-tenant" - return 1 - fi - - case "$format" in - "json") - echo "$tenant_info" - ;; - "yaml") - echo "$tenant_info" | jq -r '.' | sed 's/^/ /' - ;; - "summary") - local name status created layers storage users - name=$(echo "$tenant_info" | jq -r '.name') - status=$(echo "$tenant_info" | jq -r '.status') - created=$(echo "$tenant_info" | jq -r '.created') - layers=$(echo "$tenant_info" | jq -r '.quotas.used_layers') - storage=$(echo "$tenant_info" | jq -r '.quotas.used_storage_gb') - users=$(echo "$tenant_info" | jq -r '.quotas.used_users') - - echo "Tenant: $name" - echo "Status: $status" - echo "Created: $created" - echo "Resources: $layers layers, ${storage}GB storage, $users users" - ;; - esac -} - -# Tenant quota management -update_tenant_quotas() { - local tenant_name="$1" - local quota_type="$2" - local value="$3" - - if [[ -z "$tenant_name" || -z "$quota_type" || -z "$value" ]]; then - log_error "Usage: update_tenant_quotas " "multi-tenant" - return 1 - fi - - local tenant_base="${WORKSPACE}/tenants" - local tenant_db="$tenant_base/tenants.json" - - # Validate quota type - case "$quota_type" in - "max_layers"|"max_storage_gb"|"max_users") - ;; - *) - log_error "Invalid quota type: $quota_type" "multi-tenant" - log_error "Valid types: max_layers, max_storage_gb, max_users" "multi-tenant" - return 1 - ;; - esac - - # Update quota - jq --arg name "$tenant_name" --arg type "$quota_type" --arg value "$value" \ - '.tenants[] | select(.name == $name) | .quotas[$type] = ($value | tonumber)' "$tenant_db" > "$tenant_db.tmp" && mv "$tenant_db.tmp" "$tenant_db" - - log_success "Updated quota for tenant '$tenant_name': $quota_type = $value" "multi-tenant" -} - -# Tenant isolation and access control -check_tenant_access() { - local tenant_name="$1" - local user="$2" - local operation="$3" - - if [[ -z "$tenant_name" || -z "$user" || -z "$operation" ]]; then - log_error "Usage: check_tenant_access " "multi-tenant" - return 1 - fi - - local tenant_base="${WORKSPACE}/tenants" - local tenant_db="$tenant_base/tenants.json" - - # Check if tenant exists - if ! jq -e ".tenants[] | select(.name == \"$tenant_name\")" "$tenant_db" > /dev/null 2>&1; then - log_error "Tenant '$tenant_name' not found" "multi-tenant" - return 1 - fi - - # Get tenant isolation level - local isolation_level - isolation_level=$(jq -r ".tenants[] | select(.name == \"$tenant_name\") | .isolation_level" "$tenant_db") - - # Check user access (simplified - in real implementation, this would check user roles) - local user_file="$tenant_base/$tenant_name/users/$user.json" - if [[ ! -f "$user_file" ]]; then - log_error "User '$user' not found in tenant '$tenant_name'" "multi-tenant" - return 1 - fi - - # Check operation permissions - local user_role - user_role=$(jq -r '.role' "$user_file" 2>/dev/null) - - case "$operation" in - "read") - [[ "$user_role" =~ ^(admin|package_manager|viewer)$ ]] && return 0 - ;; - "write") - [[ "$user_role" =~ ^(admin|package_manager)$ ]] && return 0 - ;; - "admin") - [[ "$user_role" == "admin" ]] && return 0 - ;; - *) - log_error "Unknown operation: $operation" "multi-tenant" - return 1 - ;; - esac - - log_error "Access denied: User '$user' with role '$user_role' cannot perform '$operation' operation" "multi-tenant" - return 1 -} - -# Tenant resource usage tracking -update_tenant_usage() { - local tenant_name="$1" - local resource_type="$2" - local amount="$3" - - if [[ -z "$tenant_name" || -z "$resource_type" || -z "$amount" ]]; then - log_error "Usage: update_tenant_usage " "multi-tenant" - return 1 - fi - - local tenant_base="${WORKSPACE}/tenants" - local tenant_db="$tenant_base/tenants.json" - - # Update usage - jq --arg name "$tenant_name" --arg type "$resource_type" --arg amount "$amount" \ - '.tenants[] | select(.name == $name) | .quotas["used_" + $type] = (.quotas["used_" + $type] + ($amount | tonumber))' "$tenant_db" > "$tenant_db.tmp" && mv "$tenant_db.tmp" "$tenant_db" - - log_debug "Updated usage for tenant '$tenant_name': $resource_type += $amount" "multi-tenant" -} - -# Tenant quota enforcement -enforce_tenant_quotas() { - local tenant_name="$1" - local resource_type="$2" - local requested_amount="$3" - - if [[ -z "$tenant_name" || -z "$resource_type" || -z "$requested_amount" ]]; then - log_error "Usage: enforce_tenant_quotas " "multi-tenant" - return 1 - fi - - local tenant_base="${WORKSPACE}/tenants" - local tenant_db="$tenant_base/tenants.json" - - # Get current usage and quota - local current_usage max_quota - current_usage=$(jq -r ".tenants[] | select(.name == \"$tenant_name\") | .quotas.used_$resource_type" "$tenant_db") - max_quota=$(jq -r ".tenants[] | select(.name == \"$tenant_name\") | .quotas.max_$resource_type" "$tenant_db") - - # Check if request would exceed quota - local new_total=$((current_usage + requested_amount)) - if [[ $new_total -gt $max_quota ]]; then - log_error "Quota exceeded for tenant '$tenant_name': $resource_type" "multi-tenant" - log_error "Current: $current_usage, Requested: $requested_amount, Max: $max_quota" "multi-tenant" + # Use live overlay for package installation + if ! live_install "${packages[@]}"; then + log_error "rpm-ostree install failed" "apt-layer" return 1 fi + log_success "rpm-ostree install completed successfully" "apt-layer" return 0 } -# Cross-tenant operations (when enabled) -cross_tenant_operation() { - local source_tenant="$1" - local target_tenant="$2" - local operation="$3" - local user="$4" +# rpm-ostree upgrade compatibility +rpm_ostree_upgrade() { + log_info "rpm-ostree upgrade compatibility" "apt-layer" - if [[ "$TENANT_CROSS_ACCESS" != "true" ]]; then - log_error "Cross-tenant operations are disabled" "multi-tenant" + # Use true system upgrade (not package upgrade) + if ! system_upgrade; then + log_error "rpm-ostree upgrade failed" "apt-layer" return 1 fi - if [[ -z "$source_tenant" || -z "$target_tenant" || -z "$operation" || -z "$user" ]]; then - log_error "Usage: cross_tenant_operation " "multi-tenant" + log_success "rpm-ostree upgrade completed successfully" "apt-layer" + return 0 +} + +# rpm-ostree rebase compatibility +rpm_ostree_rebase() { + local new_base="$1" + + log_info "rpm-ostree rebase compatibility: $new_base" "apt-layer" + + # Use intelligent rebase with conflict resolution + if ! intelligent_rebase "$new_base"; then + log_error "rpm-ostree rebase failed" "apt-layer" return 1 fi - # Check user has admin access to both tenants - if ! check_tenant_access "$source_tenant" "$user" "admin"; then - log_error "User '$user' lacks admin access to source tenant '$source_tenant'" "multi-tenant" + log_success "rpm-ostree rebase completed successfully" "apt-layer" + return 0 +} + +# rpm-ostree rollback compatibility +rpm_ostree_rollback() { + local target_commit="${1:-}" + + log_info "rpm-ostree rollback compatibility: ${target_commit:-latest}" "apt-layer" + + if [[ -z "$target_commit" ]]; then + # Rollback to previous deployment + target_commit=$(get_previous_deployment) + if [[ -z "$target_commit" ]]; then + log_error "No previous deployment found for rollback" "apt-layer" + return 1 + fi + fi + + # Use commit-based rollback + if ! commit_rollback "$target_commit"; then + log_error "rpm-ostree rollback failed" "apt-layer" return 1 fi - if ! check_tenant_access "$target_tenant" "$user" "admin"; then - log_error "User '$user' lacks admin access to target tenant '$target_tenant'" "multi-tenant" - return 1 + log_success "rpm-ostree rollback completed successfully" "apt-layer" + return 0 +} + +# rpm-ostree status compatibility +rpm_ostree_status() { + log_info "rpm-ostree status compatibility" "apt-layer" + + # Show atomic deployment status + atomic_status + + # Show live overlay status + echo "" + echo "=== Live Overlay Status ===" + get_live_overlay_status + + # Show package diff if pending deployment + local pending_deployment + pending_deployment=$(get_pending_deployment) + if [[ -n "$pending_deployment" ]]; then + echo "" + echo "=== Pending Changes ===" + show_package_diff "$(get_current_deployment)" "$pending_deployment" + fi +} + +# rpm-ostree diff compatibility +rpm_ostree_diff() { + local from_commit="${1:-}" + local to_commit="${2:-}" + + log_info "rpm-ostree diff compatibility: $from_commit -> $to_commit" "apt-layer" + + # If no commits specified, compare current to pending + if [[ -z "$from_commit" ]]; then + from_commit=$(get_current_deployment) + fi + if [[ -z "$to_commit" ]]; then + to_commit=$(get_pending_deployment) + if [[ -z "$to_commit" ]]; then + log_error "No target commit specified and no pending deployment" "apt-layer" + return 1 + fi fi - log_info "Cross-tenant operation: $operation from '$source_tenant' to '$target_tenant' by '$user'" "multi-tenant" + # Show package-level diff + show_package_diff "$from_commit" "$to_commit" +} + +# rpm-ostree db list compatibility +rpm_ostree_db_list() { + log_info "rpm-ostree db list compatibility" "apt-layer" - # Implement specific cross-tenant operations here - case "$operation" in - "copy_layer") - # Copy layer from source to target tenant - log_info "Copying layer between tenants..." "multi-tenant" + # List all deployments + list_deployments +} + +# rpm-ostree db diff compatibility +rpm_ostree_db_diff() { + local from_commit="${1:-}" + local to_commit="${2:-}" + + log_info "rpm-ostree db diff compatibility: $from_commit -> $to_commit" "apt-layer" + + # If no commits specified, compare current to pending + if [[ -z "$from_commit" ]]; then + from_commit=$(get_current_deployment) + fi + if [[ -z "$to_commit" ]]; then + to_commit=$(get_pending_deployment) + if [[ -z "$to_commit" ]]; then + log_error "No target commit specified and no pending deployment" "apt-layer" + return 1 + fi + fi + + # Show detailed package diff + show_detailed_package_diff "$from_commit" "$to_commit" +} + +# rpm-ostree cleanup compatibility +rpm_ostree_cleanup() { + local purge="${1:-}" + + log_info "rpm-ostree cleanup compatibility: purge=$purge" "apt-layer" + + # Clean up old deployments + cleanup_old_deployments + + # Clean up old ComposeFS images + cleanup_old_composefs_images + + if [[ "$purge" == "--purge" ]]; then + # Also clean up old bootloader entries + cleanup_old_bootloader_entries + fi + + log_success "rpm-ostree cleanup completed successfully" "apt-layer" +} + +# rpm-ostree cancel compatibility +rpm_ostree_cancel() { + log_info "rpm-ostree cancel compatibility" "apt-layer" + + # Clear pending deployment + clear_pending_deployment + + # Clean up live overlay + stop_live_overlay + + log_success "rpm-ostree cancel completed successfully" "apt-layer" +} + +# rpm-ostree initramfs compatibility +rpm_ostree_initramfs() { + local action="${1:-}" + + log_info "rpm-ostree initramfs compatibility: $action" "apt-layer" + + case "$action" in + --enable) + enable_initramfs_rebuild ;; - "sync_config") - # Sync configuration between tenants - log_info "Syncing configuration between tenants..." "multi-tenant" + --disable) + disable_initramfs_rebuild + ;; + --rebuild) + rebuild_initramfs ;; *) - log_error "Unknown cross-tenant operation: $operation" "multi-tenant" + log_error "Invalid initramfs action: $action" "apt-layer" return 1 ;; esac } -# Tenant backup and restore -backup_tenant() { - local tenant_name="$1" - local backup_path="$2" - - if [[ -z "$tenant_name" ]]; then - log_error "Tenant name is required" "multi-tenant" - return 1 - fi - - local tenant_base="${WORKSPACE}/tenants" - local tenant_dir="$tenant_base/$tenant_name" - - if [[ ! -d "$tenant_dir" ]]; then - log_error "Tenant directory not found: $tenant_dir" "multi-tenant" - return 1 - fi - - # Create backup - local backup_file - if [[ -n "$backup_path" ]]; then - backup_file="$backup_path" - else - backup_file="$tenant_dir/backups/tenant-${tenant_name}-$(date +%Y%m%d-%H%M%S).tar.gz" - fi - - mkdir -p "$(dirname "$backup_file")" - - tar -czf "$backup_file" -C "$tenant_base" "$tenant_name" - - log_success "Tenant '$tenant_name' backed up to: $backup_file" "multi-tenant" -} - -restore_tenant() { - local backup_file="$1" - local tenant_name="$2" - - if [[ -z "$backup_file" || -z "$tenant_name" ]]; then - log_error "Usage: restore_tenant " "multi-tenant" - return 1 - fi - - if [[ ! -f "$backup_file" ]]; then - log_error "Backup file not found: $backup_file" "multi-tenant" - return 1 - fi - - local tenant_base="${WORKSPACE}/tenants" - local tenant_dir="$tenant_base/$tenant_name" - - # Check if tenant already exists - if [[ -d "$tenant_dir" ]]; then - log_error "Tenant '$tenant_name' already exists. Delete it first or use a different name." "multi-tenant" - return 1 - fi - - # Restore tenant - tar -xzf "$backup_file" -C "$tenant_base" - - log_success "Tenant '$tenant_name' restored from: $backup_file" "multi-tenant" -} - -# Tenant health check -check_tenant_health() { - local tenant_name="$1" - - if [[ -z "$tenant_name" ]]; then - log_error "Tenant name is required" "multi-tenant" - return 1 - fi - - local tenant_base="${WORKSPACE}/tenants" - local tenant_dir="$tenant_base/$tenant_name" - local tenant_db="$tenant_base/tenants.json" - - echo "Tenant Health Check: $tenant_name" - echo "================================" - - # Check tenant exists - if [[ ! -d "$tenant_dir" ]]; then - echo " Tenant directory not found" - return 1 - fi - - if ! jq -e ".tenants[] | select(.name == \"$tenant_name\")" "$tenant_db" > /dev/null 2>&1; then - echo " Tenant not found in database" - return 1 - fi - - echo " Tenant exists" - - # Check directory structure - local missing_dirs=() - for dir in layers deployments users audit backups config; do - if [[ ! -d "$tenant_dir/$dir" ]]; then - missing_dirs+=("$dir") - fi - done - - if [[ ${#missing_dirs[@]} -gt 0 ]]; then - echo " Missing directories: ${missing_dirs[*]}" - else - echo " Directory structure complete" - fi - - # Check quota usage - local tenant_info - tenant_info=$(jq -r ".tenants[] | select(.name == \"$tenant_name\")" "$tenant_db") - - local layers_used layers_max storage_used storage_max - layers_used=$(echo "$tenant_info" | jq -r '.quotas.used_layers') - layers_max=$(echo "$tenant_info" | jq -r '.quotas.max_layers') - storage_used=$(echo "$tenant_info" | jq -r '.quotas.used_storage_gb') - storage_max=$(echo "$tenant_info" | jq -r '.quotas.max_storage_gb') - - echo " Resource Usage:" - echo " Layers: $layers_used/$layers_max" - echo " Storage: ${storage_used}GB/${storage_max}GB" - - # Check for quota warnings - local layer_percent=$((layers_used * 100 / layers_max)) - local storage_percent=$((storage_used * 100 / storage_max)) - - if [[ $layer_percent -gt 80 ]]; then - echo " Layer quota usage high: ${layer_percent}%" - fi - - if [[ $storage_percent -gt 80 ]]; then - echo " Storage quota usage high: ${storage_percent}%" - fi - - echo " Tenant health check complete" -} - -# Multi-tenant command handler -handle_multi_tenant_command() { - local command="$1" +# rpm-ostree kargs compatibility +rpm_ostree_kargs() { + local action="${1:-}" shift - case "$command" in - "init") - init_multi_tenant_system + log_info "rpm-ostree kargs compatibility: $action" "apt-layer" + + case "$action" in + --get) + get_kernel_args ;; - "create") - local tenant_name="$1" - local config_file="$2" - create_tenant "$tenant_name" "$config_file" + --set) + set_kernel_args "$@" ;; - "delete") - local tenant_name="$1" - local force="$2" - delete_tenant "$tenant_name" "$force" + --append) + append_kernel_args "$@" ;; - "list") - local format="$1" - list_tenants "$format" + --delete) + delete_kernel_args "$@" ;; - "info") - local tenant_name="$1" - local format="$2" - get_tenant_info "$tenant_name" "$format" + --reset) + reset_kernel_args ;; - "quota") - local tenant_name="$1" - local quota_type="$2" - local value="$3" - update_tenant_quotas "$tenant_name" "$quota_type" "$value" - ;; - "backup") - local tenant_name="$1" - local backup_path="$2" - backup_tenant "$tenant_name" "$backup_path" - ;; - "restore") - local backup_file="$1" - local tenant_name="$2" - restore_tenant "$backup_file" "$tenant_name" - ;; - "health") - local tenant_name="$1" - check_tenant_health "$tenant_name" - ;; - "help"|*) - echo "Multi-Tenant Commands:" - echo "=====================" - echo " init - Initialize multi-tenant system" - echo " create [config_file] - Create new tenant" - echo " delete [--force] - Delete tenant" - echo " list [format] - List tenants (json|csv|table)" - echo " info [format] - Get tenant info (json|yaml|summary)" - echo " quota - Update tenant quota" - echo " backup [path] - Backup tenant" - echo " restore - Restore tenant" - echo " health - Check tenant health" - echo " help - Show this help" + *) + log_error "Invalid kargs action: $action" "apt-layer" + return 1 ;; esac +} + +# rpm-ostree usroverlay compatibility +rpm_ostree_usroverlay() { + local action="${1:-}" + + log_info "rpm-ostree usroverlay compatibility: $action" "apt-layer" + + case "$action" in + --mount) + mount_usr_overlay + ;; + --unmount) + unmount_usr_overlay + ;; + --status) + usr_overlay_status + ;; + *) + log_error "Invalid usroverlay action: $action" "apt-layer" + return 1 + ;; + esac +} + +# rpm-ostree composefs compatibility +rpm_ostree_composefs() { + local action="${1:-}" + shift + + log_info "rpm-ostree composefs compatibility: $action" "apt-layer" + + case "$action" in + --mount) + composefs_mount "$@" + ;; + --unmount) + composefs_unmount "$@" + ;; + --list) + composefs_list_images + ;; + --info) + composefs_image_info "$@" + ;; + *) + log_error "Invalid composefs action: $action" "apt-layer" + return 1 + ;; + esac +} + +# Helper functions for rpm-ostree compatibility + +# Get previous deployment +get_previous_deployment() { + local current_deployment + current_deployment=$(get_current_deployment) + + if [[ -n "$current_deployment" ]]; then + local parent_commit + parent_commit=$(jq -r ".deployments[\"$current_deployment\"].parent_commit" "$DEPLOYMENT_DB" 2>/dev/null || echo "") + echo "$parent_commit" + fi +} + +# Show package diff between commits +show_package_diff() { + local from_commit="$1" + local to_commit="$2" + + log_info "Showing package diff: $from_commit -> $to_commit" "apt-layer" + + # Get package lists from both commits + local from_packages=() + local to_packages=() + + if [[ -n "$from_commit" ]]; then + from_packages=($(get_packages_from_commit "$from_commit")) + fi + + if [[ -n "$to_commit" ]]; then + to_packages=($(get_packages_from_commit "$to_commit")) + fi + + # Calculate differences + local added_packages=() + local removed_packages=() + local updated_packages=() + + # Find added packages + for pkg in "${to_packages[@]}"; do + if [[ ! " ${from_packages[*]} " =~ " ${pkg} " ]]; then + added_packages+=("$pkg") + fi + done + + # Find removed packages + for pkg in "${from_packages[@]}"; do + if [[ ! " ${to_packages[*]} " =~ " ${pkg} " ]]; then + removed_packages+=("$pkg") + fi + done + + # Show results + if [[ ${#added_packages[@]} -gt 0 ]]; then + echo "Added packages:" + printf " %s\n" "${added_packages[@]}" + fi + + if [[ ${#removed_packages[@]} -gt 0 ]]; then + echo "Removed packages:" + printf " %s\n" "${removed_packages[@]}" + fi + + if [[ ${#added_packages[@]} -eq 0 ]] && [[ ${#removed_packages[@]} -eq 0 ]]; then + echo "No package changes detected" + fi +} + +# Get packages from commit +get_packages_from_commit() { + local commit_id="$1" + local composefs_image + + # Get ComposeFS image name + composefs_image=$(jq -r ".deployments[\"$commit_id\"].composefs_image" "$DEPLOYMENT_DB" 2>/dev/null || echo "") + + if [[ -z "$composefs_image" ]]; then + return 1 + fi + + # Mount and extract package list + local temp_mount="/tmp/apt-layer-commit-$$" + mkdir -p "$temp_mount" + + if composefs_mount "$composefs_image" "$temp_mount"; then + # Extract package list + chroot "$temp_mount" dpkg -l | grep '^ii' | awk '{print $2}' 2>/dev/null || true + + # Cleanup + composefs_unmount "$temp_mount" + rmdir "$temp_mount" + fi +} + +# Cleanup functions +cleanup_old_deployments() { + log_info "Cleaning up old deployments..." "apt-layer" + + # Keep last 5 deployments + local deployments + deployments=($(jq -r '.deployments | keys[]' "$DEPLOYMENT_DB" 2>/dev/null | sort -r | tail -n +6)) + + for commit_id in "${deployments[@]}"; do + log_info "Removing old deployment: $commit_id" "apt-layer" + + # Remove from database + jq --arg commit_id "$commit_id" 'del(.deployments[$commit_id])' \ + "$DEPLOYMENT_DB" > "${DEPLOYMENT_DB}.tmp" && mv "${DEPLOYMENT_DB}.tmp" "$DEPLOYMENT_DB" + + # Remove history file + rm -f "$DEPLOYMENT_HISTORY_DIR/$commit_id.json" + + # Remove deployment directory + rm -rf "/var/lib/particle-os/deployments/$commit_id" + done +} + +cleanup_old_composefs_images() { + log_info "Cleaning up old ComposeFS images..." "apt-layer" + + # Get list of images still referenced by deployments + local referenced_images + referenced_images=($(jq -r '.deployments[].composefs_image' "$DEPLOYMENT_DB" 2>/dev/null || true)) + + # Get all ComposeFS images + local all_images + all_images=($(composefs_list_images)) + + # Remove unreferenced images + for image in "${all_images[@]}"; do + if [[ ! " ${referenced_images[*]} " =~ " ${image} " ]]; then + log_info "Removing unreferenced image: $image" "apt-layer" + composefs_remove_image "$image" + fi + done +} + +cleanup_old_bootloader_entries() { + log_info "Cleaning up old bootloader entries..." "apt-layer" + + # Get current and pending deployments + local current_deployment + current_deployment=$(get_current_deployment) + local pending_deployment + pending_deployment=$(get_pending_deployment) + + # Remove old bootloader entries + local boot_dir="/boot/loader/entries" + for entry in "$boot_dir"/apt-layer-*.conf; do + if [[ -f "$entry" ]]; then + local commit_id + commit_id=$(basename "$entry" .conf | sed 's/apt-layer-//') + + # Keep current and pending deployments + if [[ "$commit_id" != "$current_deployment" ]] && [[ "$commit_id" != "$pending_deployment" ]]; then + log_info "Removing old bootloader entry: $entry" "apt-layer" + rm -f "$entry" + fi + fi + done } -# --- END OF SCRIPTLET: 15-multi-tenant.sh --- +# --- END OF SCRIPTLET: 10-rpm-ostree-compat.sh --- # ============================================================================ # OSTree Atomic Package Management @@ -11131,6316 +5732,6 @@ ostree_cleanup() { # --- END OF SCRIPTLET: 15-ostree-atomic.sh --- -# ============================================================================ -# Advanced Compliance Frameworks (Enterprise Features) -# ============================================================================ - -# Advanced Compliance Frameworks for apt-layer -# Provides comprehensive compliance capabilities for enterprise deployments -# Supports multiple compliance standards with automated reporting and validation - -# Compliance framework configuration -COMPLIANCE_ENABLED="${COMPLIANCE_ENABLED:-true}" -COMPLIANCE_LEVEL="${COMPLIANCE_LEVEL:-enterprise}" # basic, enterprise, strict -COMPLIANCE_AUTO_SCAN="${COMPLIANCE_AUTO_SCAN:-true}" -COMPLIANCE_REPORTING="${COMPLIANCE_REPORTING:-true}" - -# Supported compliance frameworks -SUPPORTED_FRAMEWORKS=( - "SOX" # Sarbanes-Oxley Act - "PCI-DSS" # Payment Card Industry Data Security Standard - "HIPAA" # Health Insurance Portability and Accountability Act - "GDPR" # General Data Protection Regulation - "ISO-27001" # Information Security Management - "NIST-CSF" # NIST Cybersecurity Framework - "CIS" # Center for Internet Security Controls - "FEDRAMP" # Federal Risk and Authorization Management Program - "SOC-2" # Service Organization Control 2 - "CMMC" # Cybersecurity Maturity Model Certification -) - -# Compliance framework initialization -init_compliance_frameworks() { - log_info "Initializing advanced compliance frameworks..." "compliance" - - # Create compliance directories - local compliance_base="${WORKSPACE}/compliance" - mkdir -p "$compliance_base" - mkdir -p "$compliance_base/frameworks" - mkdir -p "$compliance_base/reports" - mkdir -p "$compliance_base/templates" - mkdir -p "$compliance_base/evidence" - mkdir -p "$compliance_base/controls" - - # Initialize compliance database - local compliance_db="$compliance_base/compliance.json" - if [[ ! -f "$compliance_db" ]]; then - cat > "$compliance_db" << 'EOF' -{ - "frameworks": {}, - "controls": {}, - "evidence": {}, - "reports": {}, - "metadata": { - "created": "", - "version": "1.0", - "last_scan": null - } -} -EOF - # Set creation timestamp - jq --arg created "$(date -Iseconds)" '.metadata.created = $created' "$compliance_db" > "$compliance_db.tmp" && mv "$compliance_db.tmp" "$compliance_db" - fi - - # Initialize framework templates - init_framework_templates - - log_success "Advanced compliance frameworks initialized" "compliance" -} - -# Initialize framework templates -init_framework_templates() { - local templates_dir="${WORKSPACE}/compliance/templates" - - # SOX Template - cat > "$templates_dir/sox.json" << 'EOF' -{ - "name": "SOX", - "version": "2024", - "description": "Sarbanes-Oxley Act Compliance", - "controls": { - "SOX-001": { - "title": "Access Control", - "description": "Ensure proper access controls are in place", - "category": "Access Management", - "severity": "high", - "requirements": [ - "User authentication and authorization", - "Role-based access control", - "Access logging and monitoring" - ] - }, - "SOX-002": { - "title": "Change Management", - "description": "Implement proper change management procedures", - "category": "Change Management", - "severity": "high", - "requirements": [ - "Change approval process", - "Change documentation", - "Change testing and validation" - ] - }, - "SOX-003": { - "title": "Data Integrity", - "description": "Ensure data integrity and accuracy", - "category": "Data Management", - "severity": "critical", - "requirements": [ - "Data validation", - "Backup and recovery", - "Audit trails" - ] - } - } -} -EOF - - # PCI-DSS Template - cat > "$templates_dir/pci-dss.json" << 'EOF' -{ - "name": "PCI-DSS", - "version": "4.0", - "description": "Payment Card Industry Data Security Standard", - "controls": { - "PCI-001": { - "title": "Build and Maintain a Secure Network", - "description": "Install and maintain a firewall configuration", - "category": "Network Security", - "severity": "critical", - "requirements": [ - "Firewall configuration", - "Network segmentation", - "Security testing" - ] - }, - "PCI-002": { - "title": "Protect Cardholder Data", - "description": "Protect stored cardholder data", - "category": "Data Protection", - "severity": "critical", - "requirements": [ - "Data encryption", - "Key management", - "Data retention policies" - ] - }, - "PCI-003": { - "title": "Maintain Vulnerability Management", - "description": "Use and regularly update anti-virus software", - "category": "Vulnerability Management", - "severity": "high", - "requirements": [ - "Anti-virus software", - "Vulnerability scanning", - "Patch management" - ] - } - } -} -EOF - - # HIPAA Template - cat > "$templates_dir/hipaa.json" << 'EOF' -{ - "name": "HIPAA", - "version": "2024", - "description": "Health Insurance Portability and Accountability Act", - "controls": { - "HIPAA-001": { - "title": "Administrative Safeguards", - "description": "Implement administrative safeguards for PHI", - "category": "Administrative", - "severity": "critical", - "requirements": [ - "Security officer designation", - "Workforce training", - "Incident response procedures" - ] - }, - "HIPAA-002": { - "title": "Physical Safeguards", - "description": "Implement physical safeguards for PHI", - "category": "Physical", - "severity": "high", - "requirements": [ - "Facility access controls", - "Workstation security", - "Device and media controls" - ] - }, - "HIPAA-003": { - "title": "Technical Safeguards", - "description": "Implement technical safeguards for PHI", - "category": "Technical", - "severity": "critical", - "requirements": [ - "Access control", - "Audit controls", - "Transmission security" - ] - } - } -} -EOF - - # GDPR Template - cat > "$templates_dir/gdpr.json" << 'EOF' -{ - "name": "GDPR", - "version": "2018", - "description": "General Data Protection Regulation", - "controls": { - "GDPR-001": { - "title": "Data Protection by Design", - "description": "Implement data protection by design and by default", - "category": "Privacy by Design", - "severity": "high", - "requirements": [ - "Privacy impact assessments", - "Data minimization", - "Default privacy settings" - ] - }, - "GDPR-002": { - "title": "Data Subject Rights", - "description": "Ensure data subject rights are protected", - "category": "Data Subject Rights", - "severity": "critical", - "requirements": [ - "Right to access", - "Right to rectification", - "Right to erasure" - ] - }, - "GDPR-003": { - "title": "Data Breach Notification", - "description": "Implement data breach notification procedures", - "category": "Incident Response", - "severity": "high", - "requirements": [ - "Breach detection", - "Notification procedures", - "Documentation requirements" - ] - } - } -} -EOF - - # ISO-27001 Template - cat > "$templates_dir/iso-27001.json" << 'EOF' -{ - "name": "ISO-27001", - "version": "2022", - "description": "Information Security Management System", - "controls": { - "ISO-001": { - "title": "Information Security Policies", - "description": "Define information security policies", - "category": "Policies", - "severity": "high", - "requirements": [ - "Policy framework", - "Policy review", - "Policy communication" - ] - }, - "ISO-002": { - "title": "Organization of Information Security", - "description": "Establish information security organization", - "category": "Organization", - "severity": "high", - "requirements": [ - "Security roles", - "Segregation of duties", - "Contact with authorities" - ] - }, - "ISO-003": { - "title": "Human Resource Security", - "description": "Ensure security in human resources", - "category": "Human Resources", - "severity": "medium", - "requirements": [ - "Screening", - "Terms and conditions", - "Security awareness" - ] - } - } -} -EOF - - log_info "Framework templates initialized" "compliance" -} - -# Framework management functions -enable_framework() { - local framework_name="$1" - local config_file="$2" - - if [[ -z "$framework_name" ]]; then - log_error "Framework name is required" "compliance" - return 1 - fi - - # Validate framework name - local valid_framework=false - for framework in "${SUPPORTED_FRAMEWORKS[@]}"; do - if [[ "$framework" == "$framework_name" ]]; then - valid_framework=true - break - fi - done - - if [[ "$valid_framework" != "true" ]]; then - log_error "Unsupported framework: $framework_name" "compliance" - log_info "Supported frameworks: ${SUPPORTED_FRAMEWORKS[*]}" "compliance" - return 1 - fi - - local compliance_base="${WORKSPACE}/compliance" - local compliance_db="$compliance_base/compliance.json" - local template_file="$compliance_base/templates/${framework_name,,}.json" - - # Check if framework template exists - if [[ ! -f "$template_file" ]]; then - log_error "Framework template not found: $template_file" "compliance" - return 1 - fi - - # Load template - local template_data - template_data=$(jq -r '.' "$template_file") - - # Merge custom configuration if provided - if [[ -n "$config_file" && -f "$config_file" ]]; then - if jq empty "$config_file" 2>/dev/null; then - template_data=$(jq -s '.[0] * .[1]' <(echo "$template_data") "$config_file") - else - log_warning "Invalid JSON in framework configuration, using template defaults" "compliance" - fi - fi - - # Add framework to database - jq --arg name "$framework_name" --argjson data "$template_data" \ - '.frameworks[$name] = $data' "$compliance_db" > "$compliance_db.tmp" && mv "$compliance_db.tmp" "$compliance_db" - - log_success "Framework '$framework_name' enabled successfully" "compliance" -} - -disable_framework() { - local framework_name="$1" - - if [[ -z "$framework_name" ]]; then - log_error "Framework name is required" "compliance" - return 1 - fi - - local compliance_base="${WORKSPACE}/compliance" - local compliance_db="$compliance_base/compliance.json" - - # Remove framework from database - jq --arg name "$framework_name" 'del(.frameworks[$name])' "$compliance_db" > "$compliance_db.tmp" && mv "$compliance_db.tmp" "$compliance_db" - - log_success "Framework '$framework_name' disabled successfully" "compliance" -} - -list_frameworks() { - local format="${1:-table}" - local compliance_base="${WORKSPACE}/compliance" - local compliance_db="$compliance_base/compliance.json" - - if [[ ! -f "$compliance_db" ]]; then - log_error "Compliance database not found" "compliance" - return 1 - fi - - case "$format" in - "json") - jq -r '.frameworks' "$compliance_db" - ;; - "csv") - echo "framework,version,description,controls_count" - jq -r '.frameworks | to_entries[] | [.key, .value.version, .value.description, (.value.controls | length)] | @csv' "$compliance_db" - ;; - "table"|*) - echo "Enabled Compliance Frameworks:" - echo "==============================" - jq -r '.frameworks | to_entries[] | "\(.key) (\(.value.version)) - \(.value.description)"' "$compliance_db" - ;; - esac -} - -# Compliance scanning and assessment -run_compliance_scan() { - local framework_name="$1" - local scan_level="${2:-standard}" # quick, standard, thorough - - if [[ -z "$framework_name" ]]; then - log_error "Framework name is required" "compliance" - return 1 - fi - - local compliance_base="${WORKSPACE}/compliance" - local compliance_db="$compliance_base/compliance.json" - - # Check if framework is enabled - if ! jq -e ".frameworks[\"$framework_name\"]" "$compliance_db" > /dev/null 2>&1; then - log_error "Framework '$framework_name' is not enabled" "compliance" - return 1 - fi - - log_info "Running compliance scan for framework: $framework_name (level: $scan_level)" "compliance" - - # Create scan report - local scan_id="scan-$(date +%Y%m%d-%H%M%S)" - local report_file="$compliance_base/reports/${framework_name}-${scan_id}.json" - - # Initialize report structure - local report_data - report_data=$(cat << 'EOF' -{ - "scan_id": "$scan_id", - "framework": "$framework_name", - "scan_level": "$scan_level", - "timestamp": "$(date -Iseconds)", - "results": {}, - "summary": { - "total_controls": 0, - "passed": 0, - "failed": 0, - "warnings": 0, - "not_applicable": 0 - } -} -EOF -) - - # Get framework controls - local controls - controls=$(jq -r ".frameworks[\"$framework_name\"].controls" "$compliance_db") - - # Scan each control - local total_controls=0 - local passed_controls=0 - local failed_controls=0 - local warning_controls=0 - local na_controls=0 - - while IFS= read -r control_id; do - if [[ -n "$control_id" ]]; then - total_controls=$((total_controls + 1)) - - # Assess control compliance - local control_result - control_result=$(assess_control_compliance "$framework_name" "$control_id" "$scan_level") - - # Parse result - local status - status=$(echo "$control_result" | jq -r '.status') - - case "$status" in - "PASS") - passed_controls=$((passed_controls + 1)) - ;; - "FAIL") - failed_controls=$((failed_controls + 1)) - ;; - "WARNING") - warning_controls=$((warning_controls + 1)) - ;; - "N/A") - na_controls=$((na_controls + 1)) - ;; - esac - - # Add to report - report_data=$(echo "$report_data" | jq --arg id "$control_id" --argjson result "$control_result" '.results[$id] = $result') - fi - done < <(echo "$controls" | jq -r 'keys[]') - - # Update summary - report_data=$(echo "$report_data" | jq --argjson total $total_controls --argjson passed $passed_controls --argjson failed $failed_controls --argjson warnings $warning_controls --argjson na $na_controls \ - '.summary.total_controls = $total | .summary.passed = $passed | .summary.failed = $failed | .summary.warnings = $warnings | .summary.not_applicable = $na') - - # Save report - echo "$report_data" > "$report_file" - - # Update compliance database - jq --arg framework "$framework_name" --arg scan_id "$scan_id" --arg report_file "$report_file" \ - '.reports[$framework] = {"last_scan": $scan_id, "report_file": $report_file}' "$compliance_db" > "$compliance_db.tmp" && mv "$compliance_db.tmp" "$compliance_db" - - log_success "Compliance scan completed: $scan_id" "compliance" - log_info "Report saved to: $report_file" "compliance" - - # Print summary - echo "Compliance Scan Summary:" - echo "========================" - echo "Framework: $framework_name" - echo "Scan Level: $scan_level" - echo "Total Controls: $total_controls" - echo "Passed: $passed_controls" - echo "Failed: $failed_controls" - echo "Warnings: $warning_controls" - echo "Not Applicable: $na_controls" - - return 0 -} - -# Control assessment -assess_control_compliance() { - local framework_name="$1" - local control_id="$2" - local scan_level="$3" - - local compliance_base="${WORKSPACE}/compliance" - local compliance_db="$compliance_base/compliance.json" - - # Get control details - local control_info - control_info=$(jq -r ".frameworks[\"$framework_name\"].controls[\"$control_id\"]" "$compliance_db") - - local control_title - control_title=$(echo "$control_info" | jq -r '.title') - local control_category - control_category=$(echo "$control_info" | jq -r '.category') - local control_severity - control_severity=$(echo "$control_info" | jq -r '.severity') - - # Perform control-specific assessment - local status="PASS" - local evidence="" - local findings="" - - case "$control_id" in - "SOX-001"|"PCI-001"|"HIPAA-003"|"ISO-002") - # Access Control assessment - if check_access_controls; then - status="PASS" - evidence="Access controls properly configured" - else - status="FAIL" - evidence="Access controls not properly configured" - findings="Missing role-based access control implementation" - fi - ;; - "SOX-002"|"PCI-003"|"ISO-001") - # Change Management assessment - if check_change_management; then - status="PASS" - evidence="Change management procedures in place" - else - status="WARNING" - evidence="Change management procedures need improvement" - findings="Documentation of change procedures incomplete" - fi - ;; - "SOX-003"|"PCI-002"|"HIPAA-002") - # Data Protection assessment - if check_data_protection; then - status="PASS" - evidence="Data protection measures implemented" - else - status="FAIL" - evidence="Data protection measures insufficient" - findings="Encryption not properly configured" - fi - ;; - "GDPR-001"|"GDPR-002"|"GDPR-003") - # Privacy assessment - if check_privacy_controls; then - status="PASS" - evidence="Privacy controls implemented" - else - status="WARNING" - evidence="Privacy controls need enhancement" - findings="Data minimization not fully implemented" - fi - ;; - "HIPAA-001") - # Administrative safeguards - if check_administrative_safeguards; then - status="PASS" - evidence="Administrative safeguards in place" - else - status="FAIL" - evidence="Administrative safeguards missing" - findings="Security officer not designated" - fi - ;; - *) - # Default assessment - status="N/A" - evidence="Control not implemented in assessment engine" - findings="Manual assessment required" - ;; - esac - - # Create result JSON - cat << 'EOF' -{ - "control_id": "$control_id", - "title": "$control_title", - "category": "$control_category", - "severity": "$control_severity", - "status": "$status", - "evidence": "$evidence", - "findings": "$findings", - "assessment_time": "$(date -Iseconds)" -} -EOF -} - -# Control check functions (stubs for now) -check_access_controls() { - # Check if access controls are properly configured - # This would check user management, role assignments, etc. - local user_count - user_count=$(jq -r '.users | length' "${WORKSPACE}/users.json" 2>/dev/null || echo "0") - - if [[ $user_count -gt 0 ]]; then - return 0 # Pass - else - return 1 # Fail - fi -} - -check_change_management() { - # Check if change management procedures are in place - # This would check for change logs, approval processes, etc. - local audit_logs - audit_logs=$(find "${WORKSPACE}/audit" -name "*.log" 2>/dev/null | wc -l) - - if [[ $audit_logs -gt 0 ]]; then - return 0 # Pass - else - return 1 # Fail - fi -} - -check_data_protection() { - # Check if data protection measures are implemented - # This would check encryption, backup procedures, etc. - local backup_count - backup_count=$(find "${WORKSPACE}/backups" -name "*.tar.gz" 2>/dev/null | wc -l) - - if [[ $backup_count -gt 0 ]]; then - return 0 # Pass - else - return 1 # Fail - fi -} - -check_privacy_controls() { - # Check if privacy controls are implemented - # This would check data minimization, consent management, etc. - # For now, return pass if audit system is enabled - if [[ "$COMPLIANCE_ENABLED" == "true" ]]; then - return 0 # Pass - else - return 1 # Fail - fi -} - -check_administrative_safeguards() { - # Check if administrative safeguards are in place - # This would check security officer designation, training, etc. - # For now, return pass if compliance system is initialized - local compliance_db="${WORKSPACE}/compliance/compliance.json" - if [[ -f "$compliance_db" ]]; then - return 0 # Pass - else - return 1 # Fail - fi -} - -# Compliance reporting -generate_compliance_report() { - local framework_name="$1" - local report_format="${2:-html}" - local report_period="${3:-monthly}" - - if [[ -z "$framework_name" ]]; then - log_error "Framework name is required" "compliance" - return 1 - fi - - local compliance_base="${WORKSPACE}/compliance" - local compliance_db="$compliance_base/compliance.json" - - # Check if framework is enabled - if ! jq -e ".frameworks[\"$framework_name\"]" "$compliance_db" > /dev/null 2>&1; then - log_error "Framework '$framework_name' is not enabled" "compliance" - return 1 - fi - - # Get latest scan report - local report_file - report_file=$(jq -r ".reports[\"$framework_name\"].report_file" "$compliance_db" 2>/dev/null) - - if [[ -z "$report_file" || "$report_file" == "null" ]]; then - log_error "No scan report found for framework '$framework_name'" "compliance" - log_info "Run a compliance scan first: compliance scan $framework_name" "compliance" - return 1 - fi - - if [[ ! -f "$report_file" ]]; then - log_error "Report file not found: $report_file" "compliance" - return 1 - fi - - # Generate report based on format - case "$report_format" in - "html") - generate_html_compliance_report "$framework_name" "$report_file" - ;; - "json") - generate_json_compliance_report "$framework_name" "$report_file" - ;; - "pdf") - generate_pdf_compliance_report "$framework_name" "$report_file" - ;; - *) - log_error "Unsupported report format: $report_format" "compliance" - return 1 - ;; - esac -} - -generate_html_compliance_report() { - local framework_name="$1" - local report_file="$2" - - local report_data - report_data=$(jq -r '.' "$report_file") - - local output_file="${WORKSPACE}/compliance/reports/${framework_name}-report-$(date +%Y%m%d).html" - - # Generate HTML report - cat > "$output_file" << 'EOF' - - - - Compliance Report - $framework_name - - - -
-

Compliance Report - $framework_name

-

Generated: $(date)

-

Scan ID: $(echo "$report_data" | jq -r '.scan_id')

-
- -
-

Summary

-

Total Controls: $(echo "$report_data" | jq -r '.summary.total_controls')

-

Passed: $(echo "$report_data" | jq -r '.summary.passed')

-

Failed: $(echo "$report_data" | jq -r '.summary.failed')

-

Warnings: $(echo "$report_data" | jq -r '.summary.warnings')

-

Not Applicable: $(echo "$report_data" | jq -r '.summary.not_applicable')

-
- -
-

Control Results

-EOF - - # Add control results - echo "$report_data" | jq -r '.results | to_entries[] | "\(.key): \(.value.status)"' | while IFS=':' read -r control_id status; do - local control_data - control_data=$(echo "$report_data" | jq -r ".results[\"$control_id\"]") - local title - title=$(echo "$control_data" | jq -r '.title') - local evidence - evidence=$(echo "$control_data" | jq -r '.evidence') - local findings - findings=$(echo "$control_data" | jq -r '.findings') - - cat >> "$output_file" << 'EOF' -
-

$control_id - $title

-

Status: $status

-

Evidence: $evidence

-EOF - - if [[ -n "$findings" && "$findings" != "null" ]]; then - cat >> "$output_file" << 'EOF' -

Findings: $findings

-EOF - fi - - cat >> "$output_file" << 'EOF' -
-EOF - done - - cat >> "$output_file" << 'EOF' -
- - -EOF - - log_success "HTML compliance report generated: $output_file" "compliance" -} - -generate_json_compliance_report() { - local framework_name="$1" - local report_file="$2" - - local output_file="${WORKSPACE}/compliance/reports/${framework_name}-report-$(date +%Y%m%d).json" - - # Copy and enhance the report - jq --arg framework "$framework_name" --arg generated "$(date -Iseconds)" \ - '. + {"framework": $framework, "report_generated": $generated}' "$report_file" > "$output_file" - - log_success "JSON compliance report generated: $output_file" "compliance" -} - -generate_pdf_compliance_report() { - local framework_name="$1" - local report_file="$2" - - local output_file="${WORKSPACE}/compliance/reports/${framework_name}-report-$(date +%Y%m%d).pdf" - - # For now, generate HTML and suggest conversion - local html_file="${WORKSPACE}/compliance/reports/${framework_name}-report-$(date +%Y%m%d).html" - generate_html_compliance_report "$framework_name" "$report_file" - - log_warning "PDF generation not implemented" "compliance" - log_info "HTML report generated: $html_file" "compliance" - log_info "Convert to PDF manually or use tools like wkhtmltopdf" "compliance" -} - -# Compliance command handler -handle_compliance_command() { - local command="$1" - shift - - case "$command" in - "init") - init_compliance_frameworks - ;; - "enable") - local framework_name="$1" - local config_file="$2" - enable_framework "$framework_name" "$config_file" - ;; - "disable") - local framework_name="$1" - disable_framework "$framework_name" - ;; - "list") - local format="$1" - list_frameworks "$format" - ;; - "scan") - local framework_name="$1" - local scan_level="$2" - run_compliance_scan "$framework_name" "$scan_level" - ;; - "report") - local framework_name="$1" - local format="$2" - local period="$3" - generate_compliance_report "$framework_name" "$format" "$period" - ;; - "help"|*) - echo "Advanced Compliance Framework Commands:" - echo "======================================" - echo " init - Initialize compliance frameworks" - echo " enable [config_file] - Enable compliance framework" - echo " disable - Disable compliance framework" - echo " list [format] - List enabled frameworks (json|csv|table)" - echo " scan [level] - Run compliance scan (quick|standard|thorough)" - echo " report [format] [period] - Generate compliance report (html|json|pdf)" - echo " help - Show this help" - echo "" - echo "Supported Frameworks:" - echo " SOX, PCI-DSS, HIPAA, GDPR, ISO-27001, NIST-CSF, CIS, FEDRAMP, SOC-2, CMMC" - ;; - esac -} - -# --- END OF SCRIPTLET: 16-compliance-frameworks.sh --- - -# ============================================================================ -# Enterprise Integration (Enterprise Features) -# ============================================================================ - -# Enterprise Integration for apt-layer -# Provides hooks and integrations with enterprise tools and systems -# Supports SIEM, ticketing, monitoring, and other enterprise integrations - -# Enterprise integration configuration -ENTERPRISE_INTEGRATION_ENABLED="${ENTERPRISE_INTEGRATION_ENABLED:-true}" -ENTERPRISE_INTEGRATION_LEVEL="${ENTERPRISE_INTEGRATION_LEVEL:-basic}" # basic, standard, advanced -ENTERPRISE_INTEGRATION_TIMEOUT="${ENTERPRISE_INTEGRATION_TIMEOUT:-30}" -ENTERPRISE_INTEGRATION_RETRY="${ENTERPRISE_INTEGRATION_RETRY:-3}" - -# Supported enterprise integrations -SUPPORTED_INTEGRATIONS=( - "SIEM" # Security Information and Event Management - "TICKETING" # IT Service Management / Ticketing - "MONITORING" # System monitoring and alerting - "CMDB" # Configuration Management Database - "BACKUP" # Enterprise backup systems - "SECURITY" # Security tools and platforms - "COMPLIANCE" # Compliance and governance tools - "DEVOPS" # DevOps and CI/CD tools - "CLOUD" # Cloud platform integrations - "CUSTOM" # Custom enterprise integrations -) - -# Enterprise integration initialization -init_enterprise_integration() { - log_info "Initializing enterprise integration system..." "enterprise" - - # Create enterprise integration directories - local enterprise_base="${WORKSPACE}/enterprise" - mkdir -p "$enterprise_base" - mkdir -p "$enterprise_base/integrations" - mkdir -p "$enterprise_base/hooks" - mkdir -p "$enterprise_base/configs" - mkdir -p "$enterprise_base/logs" - mkdir -p "$enterprise_base/templates" - - # Initialize enterprise integration database - local enterprise_db="$enterprise_base/integrations.json" - if [[ ! -f "$enterprise_db" ]]; then - cat > "$enterprise_db" << 'EOF' -{ - "integrations": {}, - "hooks": {}, - "configs": {}, - "metadata": { - "created": "", - "version": "1.0", - "last_sync": null - } -} -EOF - # Set creation timestamp - jq --arg created "$(date -Iseconds)" '.metadata.created = $created' "$enterprise_db" > "$enterprise_db.tmp" && mv "$enterprise_db.tmp" "$enterprise_db" - fi - - # Initialize integration templates - init_integration_templates - - log_success "Enterprise integration system initialized" "enterprise" -} - -# Initialize integration templates -init_integration_templates() { - local templates_dir="${WORKSPACE}/enterprise/templates" - - # SIEM Integration Template - cat > "$templates_dir/siem.json" << 'EOF' -{ - "name": "SIEM", - "type": "security", - "description": "Security Information and Event Management Integration", - "endpoints": { - "events": "https://siem.example.com/api/v1/events", - "alerts": "https://siem.example.com/api/v1/alerts", - "incidents": "https://siem.example.com/api/v1/incidents" - }, - "authentication": { - "type": "api_key", - "header": "X-API-Key" - }, - "events": { - "layer_created": true, - "layer_deleted": true, - "security_scan": true, - "compliance_scan": true, - "user_action": true, - "system_event": true - }, - "format": "json", - "retry_policy": { - "max_retries": 3, - "backoff_multiplier": 2, - "timeout": 30 - } -} -EOF - - # Ticketing Integration Template - cat > "$templates_dir/ticketing.json" << 'EOF' -{ - "name": "TICKETING", - "type": "service_management", - "description": "IT Service Management / Ticketing System Integration", - "endpoints": { - "tickets": "https://ticketing.example.com/api/v2/tickets", - "incidents": "https://ticketing.example.com/api/v2/incidents", - "changes": "https://ticketing.example.com/api/v2/changes" - }, - "authentication": { - "type": "basic_auth", - "username": "service_account", - "password": "encrypted_password" - }, - "triggers": { - "security_incident": true, - "compliance_violation": true, - "system_failure": true, - "maintenance_required": true, - "user_request": true - }, - "format": "json", - "priority_mapping": { - "critical": "P1", - "high": "P2", - "medium": "P3", - "low": "P4" - } -} -EOF - - # Monitoring Integration Template - cat > "$templates_dir/monitoring.json" << 'EOF' -{ - "name": "MONITORING", - "type": "monitoring", - "description": "System Monitoring and Alerting Integration", - "endpoints": { - "metrics": "https://monitoring.example.com/api/v1/metrics", - "alerts": "https://monitoring.example.com/api/v1/alerts", - "health": "https://monitoring.example.com/api/v1/health" - }, - "authentication": { - "type": "bearer_token", - "token": "encrypted_token" - }, - "metrics": { - "layer_count": true, - "storage_usage": true, - "security_status": true, - "compliance_status": true, - "user_activity": true, - "system_performance": true - }, - "format": "json", - "collection_interval": 300 -} -EOF - - # CMDB Integration Template - cat > "$templates_dir/cmdb.json" << 'EOF' -{ - "name": "CMDB", - "type": "configuration_management", - "description": "Configuration Management Database Integration", - "endpoints": { - "assets": "https://cmdb.example.com/api/v1/assets", - "configurations": "https://cmdb.example.com/api/v1/configurations", - "relationships": "https://cmdb.example.com/api/v1/relationships" - }, - "authentication": { - "type": "oauth2", - "client_id": "apt_layer_client", - "client_secret": "encrypted_secret" - }, - "assets": { - "layers": true, - "deployments": true, - "users": true, - "configurations": true, - "dependencies": true - }, - "format": "json", - "sync_interval": 3600 -} -EOF - - # DevOps Integration Template - cat > "$templates_dir/devops.json" << 'EOF' -{ - "name": "DEVOPS", - "type": "devops", - "description": "DevOps and CI/CD Tools Integration", - "endpoints": { - "pipelines": "https://devops.example.com/api/v1/pipelines", - "deployments": "https://devops.example.com/api/v1/deployments", - "artifacts": "https://devops.example.com/api/v1/artifacts" - }, - "authentication": { - "type": "service_account", - "token": "encrypted_token" - }, - "triggers": { - "layer_ready": true, - "deployment_complete": true, - "security_approved": true, - "compliance_verified": true - }, - "format": "json", - "webhook_url": "https://devops.example.com/webhooks/apt-layer" -} -EOF - - log_info "Integration templates initialized" "enterprise" -} - -# Integration management functions -enable_integration() { - local integration_name="$1" - local config_file="$2" - - if [[ -z "$integration_name" ]]; then - log_error "Integration name is required" "enterprise" - return 1 - fi - - # Validate integration name - local valid_integration=false - for integration in "${SUPPORTED_INTEGRATIONS[@]}"; do - if [[ "$integration" == "$integration_name" ]]; then - valid_integration=true - break - fi - done - - if [[ "$valid_integration" != "true" ]]; then - log_error "Unsupported integration: $integration_name" "enterprise" - log_info "Supported integrations: ${SUPPORTED_INTEGRATIONS[*]}" "enterprise" - return 1 - fi - - local enterprise_base="${WORKSPACE}/enterprise" - local enterprise_db="$enterprise_base/integrations.json" - local template_file="$enterprise_base/templates/${integration_name,,}.json" - - # Check if integration template exists - if [[ ! -f "$template_file" ]]; then - log_error "Integration template not found: $template_file" "enterprise" - return 1 - fi - - # Load template - local template_data - template_data=$(jq -r '.' "$template_file") - - # Merge custom configuration if provided - if [[ -n "$config_file" && -f "$config_file" ]]; then - if jq empty "$config_file" 2>/dev/null; then - template_data=$(jq -s '.[0] * .[1]' <(echo "$template_data") "$config_file") - else - log_warning "Invalid JSON in integration configuration, using template defaults" "enterprise" - fi - fi - - # Add integration to database - jq --arg name "$integration_name" --argjson data "$template_data" \ - '.integrations[$name] = $data' "$enterprise_db" > "$enterprise_db.tmp" && mv "$enterprise_db.tmp" "$enterprise_db" - - # Test integration connectivity - test_integration_connectivity "$integration_name" - - log_success "Integration '$integration_name' enabled successfully" "enterprise" -} - -disable_integration() { - local integration_name="$1" - - if [[ -z "$integration_name" ]]; then - log_error "Integration name is required" "enterprise" - return 1 - fi - - local enterprise_base="${WORKSPACE}/enterprise" - local enterprise_db="$enterprise_base/integrations.json" - - # Remove integration from database - jq --arg name "$integration_name" 'del(.integrations[$name])' "$enterprise_db" > "$enterprise_db.tmp" && mv "$enterprise_db.tmp" "$enterprise_db" - - log_success "Integration '$integration_name' disabled successfully" "enterprise" -} - -list_integrations() { - local format="${1:-table}" - local enterprise_base="${WORKSPACE}/enterprise" - local enterprise_db="$enterprise_base/integrations.json" - - if [[ ! -f "$enterprise_db" ]]; then - log_error "Enterprise integration database not found" "enterprise" - return 1 - fi - - case "$format" in - "json") - jq -r '.integrations' "$enterprise_db" - ;; - "csv") - echo "integration,type,description,status" - jq -r '.integrations | to_entries[] | [.key, .value.type, .value.description, "enabled"] | @csv' "$enterprise_db" - ;; - "table"|*) - echo "Enabled Enterprise Integrations:" - echo "===============================" - jq -r '.integrations | to_entries[] | "\(.key) (\(.value.type)) - \(.value.description)"' "$enterprise_db" - ;; - esac -} - -# Integration connectivity testing -test_integration_connectivity() { - local integration_name="$1" - - local enterprise_base="${WORKSPACE}/enterprise" - local enterprise_db="$enterprise_base/integrations.json" - - # Get integration configuration - local integration_config - integration_config=$(jq -r ".integrations[\"$integration_name\"]" "$enterprise_db") - - if [[ "$integration_config" == "null" ]]; then - log_error "Integration '$integration_name' not found" "enterprise" - return 1 - fi - - log_info "Testing connectivity for integration: $integration_name" "enterprise" - - # Test primary endpoint - local primary_endpoint - primary_endpoint=$(echo "$integration_config" | jq -r '.endpoints | to_entries[0].value') - - if [[ -n "$primary_endpoint" && "$primary_endpoint" != "null" ]]; then - # Test HTTP connectivity - if curl -s --connect-timeout 10 --max-time 30 "$primary_endpoint" > /dev/null 2>&1; then - log_success "Connectivity test passed for $integration_name" "enterprise" - else - log_warning "Connectivity test failed for $integration_name" "enterprise" - fi - else - log_info "No primary endpoint configured for $integration_name" "enterprise" - fi -} - -# Event sending functions -send_enterprise_event() { - local integration_name="$1" - local event_type="$2" - local event_data="$3" - - if [[ -z "$integration_name" || -z "$event_type" ]]; then - log_error "Integration name and event type are required" "enterprise" - return 1 - fi - - local enterprise_base="${WORKSPACE}/enterprise" - local enterprise_db="$enterprise_base/integrations.json" - - # Get integration configuration - local integration_config - integration_config=$(jq -r ".integrations[\"$integration_name\"]" "$enterprise_db") - - if [[ "$integration_config" == "null" ]]; then - log_error "Integration '$integration_name' not found" "enterprise" - return 1 - fi - - # Check if event type is enabled - local event_enabled - event_enabled=$(echo "$integration_config" | jq -r ".events.$event_type // .triggers.$event_type // false") - - if [[ "$event_enabled" != "true" ]]; then - log_debug "Event type '$event_type' not enabled for integration '$integration_name'" "enterprise" - return 0 - fi - - # Get endpoint for event type - local endpoint - case "$event_type" in - "layer_created"|"layer_deleted"|"security_scan"|"compliance_scan") - endpoint=$(echo "$integration_config" | jq -r '.endpoints.events // .endpoints.alerts') - ;; - "security_incident"|"compliance_violation"|"system_failure") - endpoint=$(echo "$integration_config" | jq -r '.endpoints.incidents // .endpoints.alerts') - ;; - *) - endpoint=$(echo "$integration_config" | jq -r '.endpoints.events') - ;; - esac - - if [[ -z "$endpoint" || "$endpoint" == "null" ]]; then - log_error "No endpoint configured for event type '$event_type'" "enterprise" - return 1 - fi - - # Prepare event payload - local payload - payload=$(prepare_event_payload "$integration_name" "$event_type" "$event_data") - - # Send event - send_event_to_integration "$integration_name" "$endpoint" "$payload" -} - -prepare_event_payload() { - local integration_name="$1" - local event_type="$2" - local event_data="$3" - - # Base event structure - local base_event - base_event=$(cat << 'EOF' -{ - "source": "apt-layer", - "integration": "$integration_name", - "event_type": "$event_type", - "timestamp": "$(date -Iseconds)", - "version": "1.0" -} -EOF -) - - # Merge with event data if provided - if [[ -n "$event_data" ]]; then - if jq empty <(echo "$event_data") 2>/dev/null; then - echo "$base_event" | jq --argjson data "$event_data" '. + $data' - else - echo "$base_event" | jq --arg data "$event_data" '. + {"message": $data}' - fi - else - echo "$base_event" - fi -} - -send_event_to_integration() { - local integration_name="$1" - local endpoint="$2" - local payload="$3" - - local enterprise_base="${WORKSPACE}/enterprise" - local enterprise_db="$enterprise_base/integrations.json" - - # Get integration configuration - local integration_config - integration_config=$(jq -r ".integrations[\"$integration_name\"]" "$enterprise_db") - - # Get authentication details - local auth_type - auth_type=$(echo "$integration_config" | jq -r '.authentication.type') - - # Prepare curl command - local curl_cmd="curl -s --connect-timeout $ENTERPRISE_INTEGRATION_TIMEOUT --max-time $ENTERPRISE_INTEGRATION_TIMEOUT" - - # Add authentication - case "$auth_type" in - "api_key") - local api_key - api_key=$(echo "$integration_config" | jq -r '.authentication.header // "X-API-Key"') - local key_value - key_value=$(echo "$integration_config" | jq -r '.authentication.key') - curl_cmd="$curl_cmd -H \"$api_key: $key_value\"" - ;; - "basic_auth") - local username - username=$(echo "$integration_config" | jq -r '.authentication.username') - local password - password=$(echo "$integration_config" | jq -r '.authentication.password') - curl_cmd="$curl_cmd -u \"$username:$password\"" - ;; - "bearer_token") - local token - token=$(echo "$integration_config" | jq -r '.authentication.token') - curl_cmd="$curl_cmd -H \"Authorization: Bearer $token\"" - ;; - "oauth2") - local client_id - client_id=$(echo "$integration_config" | jq -r '.authentication.client_id') - local client_secret - client_secret=$(echo "$integration_config" | jq -r '.authentication.client_secret') - curl_cmd="$curl_cmd -H \"X-Client-ID: $client_id\" -H \"X-Client-Secret: $client_secret\"" - ;; - esac - - # Add headers and send - curl_cmd="$curl_cmd -H \"Content-Type: application/json\" -X POST -d '$payload' \"$endpoint\"" - - # Send with retry logic - local retry_count=0 - local max_retries - max_retries=$(echo "$integration_config" | jq -r '.retry_policy.max_retries // 3') - - while [[ $retry_count -lt $max_retries ]]; do - local response - response=$(eval "$curl_cmd") - local exit_code=$? - - if [[ $exit_code -eq 0 ]]; then - log_debug "Event sent successfully to $integration_name" "enterprise" - return 0 - else - retry_count=$((retry_count + 1)) - if [[ $retry_count -lt $max_retries ]]; then - local backoff - backoff=$(echo "$integration_config" | jq -r '.retry_policy.backoff_multiplier // 2') - local wait_time=$((retry_count * backoff)) - log_warning "Event send failed, retrying in ${wait_time}s (attempt $retry_count/$max_retries)" "enterprise" - sleep "$wait_time" - fi - fi - done - - log_error "Failed to send event to $integration_name after $max_retries attempts" "enterprise" - return 1 -} - -# Hook management functions -register_hook() { - local hook_name="$1" - local hook_script="$2" - local event_types="$3" - - if [[ -z "$hook_name" || -z "$hook_script" ]]; then - log_error "Hook name and script are required" "enterprise" - return 1 - fi - - local enterprise_base="${WORKSPACE}/enterprise" - local hooks_dir="$enterprise_base/hooks" - local enterprise_db="$enterprise_base/integrations.json" - - # Create hook file - local hook_file="$hooks_dir/$hook_name.sh" - cat > "$hook_file" << EOF -#!/bin/bash -# Enterprise Integration Hook: $hook_name -# Event Types: $event_types - -$hook_script -EOF - - chmod +x "$hook_file" - - # Register hook in database - jq --arg name "$hook_name" --arg script "$hook_file" --arg events "$event_types" \ - '.hooks[$name] = {"script": $script, "events": $events, "enabled": true}' "$enterprise_db" > "$enterprise_db.tmp" && mv "$enterprise_db.tmp" "$enterprise_db" - - log_success "Hook '$hook_name' registered successfully" "enterprise" -} - -unregister_hook() { - local hook_name="$1" - - if [[ -z "$hook_name" ]]; then - log_error "Hook name is required" "enterprise" - return 1 - fi - - local enterprise_base="${WORKSPACE}/enterprise" - local hooks_dir="$enterprise_base/hooks" - local enterprise_db="$enterprise_base/integrations.json" - - # Remove hook file - local hook_file="$hooks_dir/$hook_name.sh" - if [[ -f "$hook_file" ]]; then - rm -f "$hook_file" - fi - - # Remove from database - jq --arg name "$hook_name" 'del(.hooks[$name])' "$enterprise_db" > "$enterprise_db.tmp" && mv "$enterprise_db.tmp" "$enterprise_db" - - log_success "Hook '$hook_name' unregistered successfully" "enterprise" -} - -list_hooks() { - local format="${1:-table}" - local enterprise_base="${WORKSPACE}/enterprise" - local enterprise_db="$enterprise_base/integrations.json" - - if [[ ! -f "$enterprise_db" ]]; then - log_error "Enterprise integration database not found" "enterprise" - return 1 - fi - - case "$format" in - "json") - jq -r '.hooks' "$enterprise_db" - ;; - "csv") - echo "hook_name,script,events,enabled" - jq -r '.hooks | to_entries[] | [.key, .value.script, .value.events, .value.enabled] | @csv' "$enterprise_db" - ;; - "table"|*) - echo "Registered Enterprise Hooks:" - echo "============================" - jq -r '.hooks | to_entries[] | "\(.key) - \(.value.events) (\(.value.enabled))"' "$enterprise_db" - ;; - esac -} - -# Hook execution -execute_hooks() { - local event_type="$1" - local event_data="$2" - - local enterprise_base="${WORKSPACE}/enterprise" - local enterprise_db="$enterprise_base/integrations.json" - - # Get hooks for this event type - local hooks - hooks=$(jq -r ".hooks | to_entries[] | select(.value.events | contains(\"$event_type\")) | .key" "$enterprise_db") - - if [[ -z "$hooks" ]]; then - log_debug "No hooks registered for event type: $event_type" "enterprise" - return 0 - fi - - while IFS= read -r hook_name; do - if [[ -n "$hook_name" ]]; then - execute_single_hook "$hook_name" "$event_type" "$event_data" - fi - done <<< "$hooks" -} - -execute_single_hook() { - local hook_name="$1" - local event_type="$2" - local event_data="$3" - - local enterprise_base="${WORKSPACE}/enterprise" - local enterprise_db="$enterprise_base/integrations.json" - - # Get hook configuration - local hook_config - hook_config=$(jq -r ".hooks[\"$hook_name\"]" "$enterprise_db") - - if [[ "$hook_config" == "null" ]]; then - log_error "Hook '$hook_name' not found" "enterprise" - return 1 - fi - - local enabled - enabled=$(echo "$hook_config" | jq -r '.enabled') - - if [[ "$enabled" != "true" ]]; then - log_debug "Hook '$hook_name' is disabled" "enterprise" - return 0 - fi - - local script_path - script_path=$(echo "$hook_config" | jq -r '.script') - - if [[ ! -f "$script_path" ]]; then - log_error "Hook script not found: $script_path" "enterprise" - return 1 - fi - - # Execute hook with environment variables - log_debug "Executing hook: $hook_name" "enterprise" - - export APT_LAYER_EVENT_TYPE="$event_type" - export APT_LAYER_EVENT_DATA="$event_data" - export APT_LAYER_WORKSPACE="$WORKSPACE" - - if bash "$script_path"; then - log_debug "Hook '$hook_name' executed successfully" "enterprise" - else - log_error "Hook '$hook_name' execution failed" "enterprise" - fi -} - -# Enterprise integration command handler -handle_enterprise_integration_command() { - local command="$1" - shift - - case "$command" in - "init") - init_enterprise_integration - ;; - "enable") - local integration_name="$1" - local config_file="$2" - enable_integration "$integration_name" "$config_file" - ;; - "disable") - local integration_name="$1" - disable_integration "$integration_name" - ;; - "list") - local format="$1" - list_integrations "$format" - ;; - "test") - local integration_name="$1" - test_integration_connectivity "$integration_name" - ;; - "hook") - local hook_command="$1" - shift - case "$hook_command" in - "register") - local hook_name="$1" - local hook_script="$2" - local event_types="$3" - register_hook "$hook_name" "$hook_script" "$event_types" - ;; - "unregister") - local hook_name="$1" - unregister_hook "$hook_name" - ;; - "list") - local format="$1" - list_hooks "$format" - ;; - *) - echo "Hook commands: register, unregister, list" - ;; - esac - ;; - "send") - local integration_name="$1" - local event_type="$2" - local event_data="$3" - send_enterprise_event "$integration_name" "$event_type" "$event_data" - ;; - "help"|*) - echo "Enterprise Integration Commands:" - echo "===============================" - echo " init - Initialize enterprise integration system" - echo " enable [config_file] - Enable enterprise integration" - echo " disable - Disable enterprise integration" - echo " list [format] - List enabled integrations (json|csv|table)" - echo " test - Test integration connectivity" - echo " hook register