cleanup
Some checks failed
particle-os CI / Test particle-os (push) Failing after 1s
particle-os CI / Integration Test (push) Has been skipped
particle-os CI / Security & Quality (push) Failing after 1s
Test particle-os Basic Functionality / test-basic (push) Failing after 1s
particle-os CI / Build and Release (push) Has been skipped

This commit is contained in:
robojerk 2025-08-27 12:30:24 -07:00
parent d782a8a4fb
commit 126ee1a849
76 changed files with 1683 additions and 470 deletions

356
test/unit/test_apt_stage.py Normal file
View file

@ -0,0 +1,356 @@
#!/usr/bin/env python3
"""
Unit tests for the Debian APT Stage
This module contains comprehensive tests for the AptStage class to ensure
it correctly handles Debian package management within osbuild.
Author: Debian bootc-image-builder team
License: Same as original bootc-image-builder
"""
import unittest
import tempfile
import os
import json
import shutil
from unittest.mock import Mock, patch, MagicMock
import sys
# Add the osbuild-stages directory to the path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', 'osbuild-stages', 'apt-stage'))
from apt_stage import AptStage
class TestAptStage(unittest.TestCase):
"""Test cases for the AptStage class."""
def setUp(self):
"""Set up test fixtures."""
self.temp_dir = tempfile.mkdtemp()
self.test_options = {
'packages': ['linux-image-amd64', 'systemd', 'initramfs-tools'],
'release': 'trixie',
'arch': 'amd64',
'repos': [
{
'name': 'debian',
'url': 'http://deb.debian.org/debian',
'suite': 'trixie',
'components': ['main', 'contrib']
}
]
}
# Create a mock context
self.mock_context = Mock()
self.mock_context.root = self.temp_dir
# Mock the context.run method
self.mock_context.run.return_value = Mock(
returncode=0,
stdout='',
stderr=''
)
def tearDown(self):
"""Clean up test fixtures."""
shutil.rmtree(self.temp_dir)
def test_initialization(self):
"""Test AptStage initialization with valid options."""
stage = AptStage(self.test_options)
self.assertEqual(stage.packages, ['linux-image-amd64', 'systemd', 'initramfs-tools'])
self.assertEqual(stage.release, 'trixie')
self.assertEqual(stage.arch, 'amd64')
self.assertEqual(len(stage.repos), 1)
def test_initialization_without_packages(self):
"""Test AptStage initialization fails without packages."""
options = self.test_options.copy()
del options['packages']
with self.assertRaises(ValueError) as context:
AptStage(options)
self.assertIn("No packages specified", str(context.exception))
def test_initialization_with_defaults(self):
"""Test AptStage initialization with minimal options."""
options = {
'packages': ['linux-image-amd64']
}
stage = AptStage(options)
self.assertEqual(stage.release, 'trixie') # default
self.assertEqual(stage.arch, 'amd64') # default
self.assertTrue(stage.install_weak_deps) # default
self.assertEqual(stage.exclude_packages, []) # default
def test_setup_apt_config(self):
"""Test APT configuration setup."""
stage = AptStage(self.test_options)
stage._setup_apt_config(self.mock_context)
# Check that the config directory was created
config_dir = os.path.join(self.temp_dir, 'etc', 'apt', 'apt.conf.d')
self.assertTrue(os.path.exists(config_dir))
# Check that the config file was created
config_file = os.path.join(config_dir, '99osbuild')
self.assertTrue(os.path.exists(config_file))
# Check config file contents
with open(config_file, 'r') as f:
config_content = f.read()
# Verify key configuration options are present
self.assertIn('Acquire::Check-Valid-Until "false"', config_content)
self.assertIn('Dpkg::Options::="--force-confdef"', config_content)
self.assertIn('Dpkg::Use-Pty "false"', config_content)
def test_configure_repositories_with_custom_repos(self):
"""Test repository configuration with custom repositories."""
stage = AptStage(self.test_options)
stage._configure_repositories(self.mock_context)
# Check that the sources directory was created
sources_dir = os.path.join(self.temp_dir, 'etc', 'apt', 'sources.list.d')
self.assertTrue(os.path.exists(sources_dir))
# Check that the repository file was created
repo_file = os.path.join(sources_dir, 'debian.list')
self.assertTrue(os.path.exists(repo_file))
# Check repository file contents
with open(repo_file, 'r') as f:
repo_content = f.read()
expected_content = 'deb http://deb.debian.org/debian trixie main contrib\n'
self.assertEqual(repo_content, expected_content)
def test_configure_repositories_with_defaults(self):
"""Test repository configuration with default repositories."""
options = {
'packages': ['linux-image-amd64'],
'release': 'bookworm'
}
stage = AptStage(options)
stage._configure_repositories(self.mock_context)
# Check that default repositories were created
sources_dir = os.path.join(self.temp_dir, 'etc', 'apt', 'sources.list.d')
expected_files = ['debian.list', 'debian-security.list', 'debian-updates.list']
for filename in expected_files:
filepath = os.path.join(sources_dir, filename)
self.assertTrue(os.path.exists(filepath))
def test_update_package_lists_success(self):
"""Test successful package list update."""
stage = AptStage(self.test_options)
stage._update_package_lists(self.mock_context)
# Verify that apt-get update was called
self.mock_context.run.assert_called_with(['apt-get', 'update'])
def test_update_package_lists_failure(self):
"""Test package list update failure."""
stage = AptStage(self.test_options)
# Mock a failed command
self.mock_context.run.return_value = Mock(
returncode=1,
stdout='',
stderr='Failed to update'
)
with self.assertRaises(RuntimeError) as context:
stage._update_package_lists(self.mock_context)
self.assertIn("Failed to update package lists", str(context.exception))
def test_install_packages_success(self):
"""Test successful package installation."""
stage = AptStage(self.test_options)
stage._install_packages(self.mock_context)
# Verify that apt-get install was called with correct arguments
expected_cmd = [
'apt-get', 'install', '-y', '--no-install-recommends',
'linux-image-amd64', 'systemd', 'initramfs-tools'
]
self.mock_context.run.assert_called_with(expected_cmd)
def test_install_packages_with_custom_arch(self):
"""Test package installation with custom architecture."""
options = self.test_options.copy()
options['arch'] = 'arm64'
stage = AptStage(options)
stage._install_packages(self.mock_context)
# Verify that architecture was specified
expected_cmd = [
'apt-get', 'install', '-y', '--no-install-recommends',
'-o', 'APT::Architecture=arm64',
'linux-image-amd64', 'systemd', 'initramfs-tools'
]
self.mock_context.run.assert_called_with(expected_cmd)
def test_install_packages_failure(self):
"""Test package installation failure."""
stage = AptStage(self.test_options)
# Mock a failed command
self.mock_context.run.return_value = Mock(
returncode=1,
stdout='',
stderr='Package installation failed'
)
with self.assertRaises(RuntimeError) as context:
stage._install_packages(self.mock_context)
self.assertIn("Package installation failed", str(context.exception))
def test_cleanup_cache_success(self):
"""Test successful cache cleanup."""
stage = AptStage(self.test_options)
stage._cleanup_cache(self.mock_context)
# Verify that cleanup commands were called
expected_calls = [
(['apt-get', 'clean'],),
(['rm', '-rf', '/var/lib/apt/lists/*'],)
]
actual_calls = [call[0] for call in self.mock_context.run.call_args_list]
self.assertEqual(actual_calls, expected_calls)
def test_cleanup_cache_partial_failure(self):
"""Test cache cleanup with partial failures."""
stage = AptStage(self.test_options)
# Mock mixed success/failure
def mock_run(cmd):
if cmd == ['apt-get', 'clean']:
return Mock(returncode=1, stderr='Clean failed')
else:
return Mock(returncode=0, stderr='')
self.mock_context.run.side_effect = mock_run
# Should not raise an exception, just log warnings
stage._cleanup_cache(self.mock_context)
def test_log_apt_errors(self):
"""Test APT error logging functionality."""
stage = AptStage(self.test_options)
# Mock successful commands for error logging
self.mock_context.run.return_value = Mock(
returncode=0,
stdout='No broken packages',
stderr=''
)
# Should not raise an exception
stage._log_apt_errors(self.mock_context)
# Verify that diagnostic commands were called
expected_calls = [
(['apt-get', 'check'],),
(['dpkg', '--audit'],),
(['dpkg', '-l'],)
]
actual_calls = [call[0] for call in self.mock_context.run.call_args_list]
self.assertEqual(actual_calls, expected_calls)
def test_full_stage_execution(self):
"""Test complete stage execution flow."""
stage = AptStage(self.test_options)
stage.run(self.mock_context)
# Verify that all major steps were called
# This is a high-level test to ensure the flow works
self.assertGreater(self.mock_context.run.call_count, 0)
def test_stage_execution_with_exception(self):
"""Test stage execution handles exceptions properly."""
stage = AptStage(self.test_options)
# Mock an exception in one of the steps
self.mock_context.run.side_effect = Exception("Test exception")
with self.assertRaises(Exception) as context:
stage.run(self.mock_context)
self.assertIn("Test exception", str(context.exception))
class TestAptStageIntegration(unittest.TestCase):
"""Integration tests for AptStage with real filesystem operations."""
def setUp(self):
"""Set up integration test fixtures."""
self.temp_dir = tempfile.mkdtemp()
self.test_options = {
'packages': ['linux-image-amd64'],
'release': 'trixie',
'arch': 'amd64'
}
def tearDown(self):
"""Clean up integration test fixtures."""
shutil.rmtree(self.temp_dir)
def test_filesystem_operations(self):
"""Test that filesystem operations work correctly."""
stage = AptStage(self.test_options)
# Create a real context-like object
class RealContext:
def __init__(self, root):
self.root = root
def run(self, cmd):
# Mock command execution
return Mock(returncode=0, stdout='', stderr='')
context = RealContext(self.temp_dir)
# Test APT configuration setup
stage._setup_apt_config(context)
# Verify files were created
config_file = os.path.join(self.temp_dir, 'etc', 'apt', 'apt.conf.d', '99osbuild')
self.assertTrue(os.path.exists(config_file))
# Test repository configuration
stage._configure_repositories(context)
# Verify repository files were created
sources_dir = os.path.join(self.temp_dir, 'etc', 'apt', 'sources.list.d')
self.assertTrue(os.path.exists(sources_dir))
# Check that default repositories were created
expected_files = ['debian.list', 'debian-security.list', 'debian-updates.list']
for filename in expected_files:
filepath = os.path.join(sources_dir, filename)
self.assertTrue(os.path.exists(filepath))
if __name__ == '__main__':
unittest.main()

View file

@ -0,0 +1,534 @@
#!/usr/bin/env python3
"""
Performance Tests for Debian bootc-image-builder
Phase 4.2: Performance and Optimization (Weeks 23-24)
"""
import os
import sys
import time
import psutil
import tempfile
import shutil
import json
import unittest
import logging
import glob
from datetime import datetime
# Add the osbuild-stages directory to the path for each stage
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', 'osbuild-stages', 'apt-stage'))
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', 'osbuild-stages', 'debian-filesystem-stage'))
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', 'osbuild-stages', 'debian-kernel-stage'))
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', 'osbuild-stages', 'debian-grub-stage'))
# Import using the same pattern as our working tests
from apt_stage import AptStage
from debian_filesystem_stage import DebianFilesystemStage
from debian_kernel_stage import DebianKernelStage
from debian_grub_stage import DebianGrubStage
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
class PerformanceTest(unittest.TestCase):
"""Performance tests for Debian bootc-image-builder components."""
def setUp(self):
"""Set up test fixtures."""
self.temp_dir = tempfile.mkdtemp(prefix="perf_test_")
self.results = {}
# Record system information
self.results['system_info'] = {
'cpu_count': psutil.cpu_count(),
'memory_total': psutil.virtual_memory().total,
'disk_free': psutil.disk_usage('/').free,
'python_version': sys.version,
'timestamp': datetime.now().isoformat()
}
logger.info(f"Performance test setup - CPUs: {self.results['system_info']['cpu_count']}, "
f"Memory: {self.results['system_info']['memory_total'] // (1024**3)} GB")
def tearDown(self):
"""Clean up test fixtures."""
if os.path.exists(self.temp_dir):
shutil.rmtree(self.temp_dir)
def create_mock_kernel_files(self):
"""Create mock kernel files for testing."""
# Create /boot directory
boot_dir = os.path.join(self.temp_dir, "boot")
os.makedirs(boot_dir, exist_ok=True)
# Create mock kernel file
kernel_file = os.path.join(boot_dir, "vmlinuz-6.1.0-13-amd64")
with open(kernel_file, 'w') as f:
f.write("mock kernel content")
# Create mock initramfs
initramfs_file = os.path.join(boot_dir, "initrd.img-6.1.0-13-amd64")
with open(initramfs_file, 'w') as f:
f.write("mock initramfs content")
# Create /usr/lib/modules directory
modules_dir = os.path.join(self.temp_dir, "usr", "lib", "modules")
os.makedirs(modules_dir, exist_ok=True)
# Create mock kernel module directory
kernel_module_dir = os.path.join(modules_dir, "6.1.0-13-amd64")
os.makedirs(kernel_module_dir, exist_ok=True)
# Create mock module files
mock_modules = ["kernel.ko", "fs.ko", "net.ko"]
for module in mock_modules:
module_file = os.path.join(kernel_module_dir, module)
with open(module_file, 'w') as f:
f.write(f"mock {module} content")
# Create modules.dep file
modules_dep = os.path.join(kernel_module_dir, "modules.dep")
with open(modules_dep, 'w') as f:
f.write("kernel.ko:\nfs.ko: kernel.ko\nnet.ko: kernel.ko\n")
logger.info(f"Created mock kernel files in {self.temp_dir}")
def measure_performance(self, func, *args, **kwargs):
"""Measure performance of a function."""
process = psutil.Process()
initial_memory = process.memory_info().rss
start_time = time.time()
result = func(*args, **kwargs)
end_time = time.time()
final_memory = process.memory_info().rss
memory_used = final_memory - initial_memory
return {
'result': result,
'execution_time': end_time - start_time,
'memory_used': memory_used,
'peak_memory': max(initial_memory, final_memory)
}
def test_apt_stage_performance(self):
"""Test APT stage performance."""
logger.info("Testing APT stage performance...")
# Test configuration
test_options = {
'packages': [
'linux-image-amd64', 'systemd', 'initramfs-tools', 'grub-efi-amd64',
'util-linux', 'parted', 'e2fsprogs', 'dosfstools', 'ostree'
],
'release': 'trixie',
'arch': 'amd64',
'repos': [
{
'name': 'debian',
'url': 'http://deb.debian.org/debian',
'suite': 'trixie',
'components': ['main', 'contrib']
}
]
}
# Create mock context
class MockContext:
def __init__(self, root_dir):
self.root = root_dir
self.run_calls = []
def run(self, cmd, *args, **kwargs):
self.run_calls.append(cmd)
# Simulate successful command execution
return type('MockResult', (), {'returncode': 0, 'stdout': '', 'stderr': ''})()
context = MockContext(self.temp_dir)
# Test initialization performance
def init_apt_stage():
return AptStage(test_options)
init_metrics = self.measure_performance(init_apt_stage)
# Test execution performance
apt_stage = AptStage(test_options)
def run_apt_stage():
return apt_stage.run(context)
execution_metrics = self.measure_performance(run_apt_stage)
# Store results
self.results['apt_stage'] = {
'initialization': init_metrics,
'execution': execution_metrics,
'total_packages': len(test_options['packages']),
'repositories': len(test_options['repos'])
}
# Assertions for performance
self.assertLess(init_metrics['execution_time'], 1.0, "APT stage initialization should be fast")
self.assertLess(execution_metrics['execution_time'], 5.0, "APT stage execution should be reasonable")
self.assertLess(execution_metrics['memory_used'], 100 * 1024 * 1024, "APT stage should use reasonable memory") # 100 MB
logger.info(f"APT Stage - Init: {init_metrics['execution_time']:.3f}s, "
f"Exec: {execution_metrics['execution_time']:.3f}s, "
f"Memory: {execution_metrics['memory_used'] // 1024} KB")
def test_filesystem_stage_performance(self):
"""Test filesystem stage performance."""
logger.info("Testing filesystem stage performance...")
test_options = {
'rootfs_type': 'ext4',
'ostree_integration': True,
'home_symlink': True
}
class MockContext:
def __init__(self, root_dir):
self.root = root_dir
context = MockContext(self.temp_dir)
# Test filesystem stage performance
def run_filesystem_stage():
stage = DebianFilesystemStage(test_options)
return stage.run(context)
metrics = self.measure_performance(run_filesystem_stage)
# Store results
self.results['filesystem_stage'] = {
'execution': metrics,
'options': test_options
}
# Assertions for performance
self.assertLess(metrics['execution_time'], 2.0, "Filesystem stage should be fast")
self.assertLess(metrics['memory_used'], 50 * 1024 * 1024, "Filesystem stage should use reasonable memory") # 50 MB
logger.info(f"Filesystem Stage - Exec: {metrics['execution_time']:.3f}s, "
f"Memory: {metrics['memory_used'] // 1024} KB")
def test_kernel_stage_performance(self):
"""Test kernel stage performance."""
logger.info("Testing kernel stage performance...")
# Create mock kernel files for testing
self.create_mock_kernel_files()
test_options = {
'kernel_package': 'linux-image-amd64',
'initramfs_tools': True,
'ostree_integration': True,
'modules_autoload': True
}
class MockContext:
def __init__(self, root_dir):
self.root = root_dir
def run(self, cmd, *args, **kwargs):
# Simulate successful command execution
return type('MockResult', (), {'returncode': 0, 'stdout': '', 'stderr': ''})()
context = MockContext(self.temp_dir)
# Test kernel stage performance
def run_kernel_stage():
stage = DebianKernelStage(test_options)
return stage.run(context)
metrics = self.measure_performance(run_kernel_stage)
# Store results
self.results['kernel_stage'] = {
'execution': metrics,
'options': test_options
}
# Assertions for performance
self.assertLess(metrics['execution_time'], 3.0, "Kernel stage should be reasonable")
self.assertLess(metrics['memory_used'], 100 * 1024 * 1024, "Kernel stage should use reasonable memory") # 100 MB
logger.info(f"Kernel Stage - Exec: {metrics['execution_time']:.3f}s, "
f"Memory: {metrics['memory_used'] // 1024} KB")
def test_grub_stage_performance(self):
"""Test GRUB stage performance."""
logger.info("Testing GRUB stage performance...")
test_options = {
'ostree_integration': True,
'uefi': True,
'secure_boot': False
}
class MockContext:
def __init__(self, root_dir):
self.root = root_dir
def run(self, cmd, *args, **kwargs):
# Simulate successful command execution
return type('MockResult', (), {'returncode': 0, 'stdout': '', 'stderr': ''})()
context = MockContext(self.temp_dir)
# Test GRUB stage performance
def run_grub_stage():
stage = DebianGrubStage(test_options)
return stage.run(context)
metrics = self.measure_performance(run_grub_stage)
# Store results
self.results['grub_stage'] = {
'execution': metrics,
'options': test_options
}
# Assertions for performance
self.assertLess(metrics['execution_time'], 2.0, "GRUB stage should be fast")
self.assertLess(metrics['memory_used'], 50 * 1024 * 1024, "GRUB stage should use reasonable memory") # 50 MB
logger.info(f"GRUB Stage - Exec: {metrics['execution_time']:.3f}s, "
f"Memory: {metrics['memory_used'] // 1024} KB")
def test_full_pipeline_performance(self):
"""Test full pipeline performance."""
logger.info("Testing full pipeline performance...")
# Create mock kernel files for testing
self.create_mock_kernel_files()
# Test configuration for full pipeline
test_options = {
'packages': [
'linux-image-amd64', 'systemd', 'initramfs-tools', 'grub-efi-amd64',
'util-linux', 'parted', 'e2fsprogs', 'dosfstools', 'ostree'
],
'release': 'trixie',
'arch': 'amd64',
'repos': [
{
'name': 'debian',
'url': 'http://deb.debian.org/debian',
'suite': 'trixie',
'components': ['main', 'contrib']
}
]
}
class MockContext:
def __init__(self, root_dir):
self.root = root_dir
self.run_calls = []
def run(self, cmd, *args, **kwargs):
self.run_calls.append(cmd)
# Simulate successful command execution
return type('MockResult', (), {'returncode': 0, 'stdout': '', 'stderr': ''})()
context = MockContext(self.temp_dir)
# Test complete pipeline performance
def run_full_pipeline():
# Filesystem stage
fs_stage = DebianFilesystemStage({
'rootfs_type': 'ext4',
'ostree_integration': True,
'home_symlink': True
})
fs_stage.run(context)
# APT stage
apt_stage = AptStage(test_options)
apt_stage.run(context)
# Kernel stage
kernel_stage = DebianKernelStage({
'kernel_package': 'linux-image-amd64',
'initramfs_tools': True,
'ostree_integration': True,
'modules_autoload': True
})
kernel_stage.run(context)
# GRUB stage
grub_stage = DebianGrubStage({
'ostree_integration': True,
'uefi': True,
'secure_boot': False
})
grub_stage.run(context)
return len(context.run_calls)
metrics = self.measure_performance(run_full_pipeline)
# Store results
self.results['full_pipeline'] = {
'execution': metrics,
'total_commands': metrics['result'],
'stages_executed': 4
}
# Assertions for performance
self.assertLess(metrics['execution_time'], 10.0, "Full pipeline should complete in reasonable time")
self.assertLess(metrics['memory_used'], 200 * 1024 * 1024, "Full pipeline should use reasonable memory") # 200 MB
logger.info(f"Full Pipeline - Exec: {metrics['execution_time']:.3f}s, "
f"Memory: {metrics['memory_used'] // 1024} KB, "
f"Commands: {metrics['result']}")
def test_go_binary_performance(self):
"""Test Go binary performance."""
logger.info("Testing Go binary performance...")
go_binary = "bib/bootc-image-builder"
if not os.path.exists(go_binary):
logger.warning(f"Go binary not found: {go_binary}")
self.skipTest("Go binary not available")
# Test binary startup performance
def run_go_binary():
import subprocess
result = subprocess.run([go_binary, "--version"],
capture_output=True, text=True, timeout=10)
return result.returncode == 0
metrics = self.measure_performance(run_go_binary)
# Store results
self.results['go_binary'] = {
'startup': metrics,
'binary_size': os.path.getsize(go_binary) if os.path.exists(go_binary) else 0
}
# Assertions for performance
self.assertLess(metrics['execution_time'], 2.0, "Go binary startup should be fast")
self.assertLess(metrics['memory_used'], 50 * 1024 * 1024, "Go binary should use reasonable memory") # 50 MB
logger.info(f"Go Binary - Startup: {metrics['execution_time']:.3f}s, "
f"Memory: {metrics['memory_used'] // 1024} KB")
def test_performance_summary(self):
"""Generate performance summary and save results."""
logger.info("Generating performance summary...")
# Calculate summary statistics
total_execution_time = 0
total_memory_used = 0
stage_count = 0
peak_memory_values = []
for stage_name, stage_data in self.results.items():
if stage_name == 'system_info':
continue
if 'execution' in stage_data:
total_execution_time += stage_data['execution']['execution_time']
total_memory_used += stage_data['execution']['memory_used']
stage_count += 1
peak_memory_values.append(stage_data['execution']['peak_memory'])
# Performance summary with robust handling
self.results['summary'] = {
'total_execution_time': total_execution_time,
'total_memory_used': total_memory_used,
'average_execution_time': total_execution_time / stage_count if stage_count > 0 else 0,
'peak_memory_usage': max(peak_memory_values) if peak_memory_values else 0,
'stage_count': stage_count
}
# Save results to file
report_file = os.path.join(self.temp_dir, 'performance_results.json')
with open(report_file, 'w') as f:
json.dump(self.results, f, indent=2)
# Generate human-readable report
self.generate_human_readable_report()
# Final assertions
summary = self.results['summary']
self.assertLess(summary['total_execution_time'], 15.0, "Total execution time should be reasonable")
self.assertLess(summary['peak_memory_usage'], 300 * 1024 * 1024, "Peak memory usage should be reasonable") # 300 MB
logger.info(f"Performance summary - Total: {summary['total_execution_time']:.3f}s, "
f"Memory: {summary['total_memory_used'] // 1024} KB, "
f"Peak: {summary['peak_memory_usage'] // 1024} KB")
logger.info(f"Performance results saved to: {report_file}")
def generate_human_readable_report(self):
"""Generate human-readable performance report."""
report_file = os.path.join(self.temp_dir, 'performance_report.txt')
with open(report_file, 'w') as f:
f.write("=" * 80 + "\n")
f.write("DEBIAN BOOTC-IMAGE-BUILDER PERFORMANCE TEST RESULTS\n")
f.write("=" * 80 + "\n")
f.write(f"Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n")
# System information
f.write("SYSTEM INFORMATION\n")
f.write("-" * 40 + "\n")
sys_info = self.results['system_info']
f.write(f"CPU Count: {sys_info['cpu_count']}\n")
f.write(f"Total Memory: {sys_info['memory_total'] // (1024**3)} GB\n")
f.write(f"Free Disk Space: {sys_info['disk_free'] // (1024**3)} GB\n")
f.write(f"Python Version: {sys_info['python_version']}\n\n")
# Stage performance
f.write("STAGE PERFORMANCE\n")
f.write("-" * 40 + "\n")
for stage_name, stage_data in self.results.items():
if stage_name in ['system_info', 'summary']:
continue
f.write(f"\n{stage_name.upper().replace('_', ' ')}:\n")
if 'initialization' in stage_data:
init = stage_data['initialization']
f.write(f" Initialization: {init['execution_time']:.3f}s, "
f"{init['memory_used'] // 1024} KB\n")
if 'execution' in stage_data:
exec_data = stage_data['execution']
f.write(f" Execution: {exec_data['execution_time']:.3f}s, "
f"{exec_data['memory_used'] // 1024} KB\n")
# Summary
f.write("\n" + "=" * 80 + "\n")
f.write("PERFORMANCE SUMMARY\n")
f.write("=" * 80 + "\n")
summary = self.results['summary']
f.write(f"Total Execution Time: {summary['total_execution_time']:.3f}s\n")
f.write(f"Total Memory Used: {summary['total_memory_used'] // 1024} KB\n")
f.write(f"Average Execution Time: {summary['average_execution_time']:.3f}s\n")
f.write(f"Peak Memory Usage: {summary['peak_memory_usage'] // 1024} KB\n")
f.write(f"Stages Tested: {summary['stage_count']}\n")
f.write("\n✅ All performance tests passed!\n")
logger.info(f"Human-readable report saved to: {report_file}")
def main():
"""Run performance tests."""
print("=" * 80)
print("DEBIAN BOOTC-IMAGE-BUILDER PERFORMANCE TESTS")
print("Phase 4.2: Performance and Optimization (Weeks 23-24)")
print("=" * 80)
# Run tests
unittest.main(verbosity=2, exit=False)
if __name__ == "__main__":
main()