deb-bootc-image-builder/test/performance/test_performance.py
robojerk 126ee1a849
Some checks failed
particle-os CI / Test particle-os (push) Failing after 1s
particle-os CI / Integration Test (push) Has been skipped
particle-os CI / Security & Quality (push) Failing after 1s
Test particle-os Basic Functionality / test-basic (push) Failing after 1s
particle-os CI / Build and Release (push) Has been skipped
cleanup
2025-08-27 12:30:24 -07:00

496 lines
19 KiB
Python

#!/usr/bin/env python3
"""
Performance Tests for Debian bootc-image-builder
Phase 4.2: Performance and Optimization (Weeks 23-24)
"""
import os
import sys
import time
import psutil
import tempfile
import shutil
import json
import unittest
import logging
from datetime import datetime
# Add the project root to the path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
# Add the osbuild-stages directory to the path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', 'osbuild-stages'))
# Import using the correct module paths
import apt_stage.apt_stage as apt_module
import debian_filesystem_stage.debian_filesystem_stage as fs_module
import debian_kernel_stage.debian_kernel_stage as kernel_module
import debian_grub_stage.debian_grub_stage as grub_module
AptStage = apt_module.AptStage
DebianFilesystemStage = fs_module.DebianFilesystemStage
DebianKernelStage = kernel_module.DebianKernelStage
DebianGrubStage = grub_module.DebianGrubStage
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
class PerformanceTest(unittest.TestCase):
"""Performance tests for Debian bootc-image-builder components."""
def setUp(self):
"""Set up test fixtures."""
self.temp_dir = tempfile.mkdtemp(prefix="perf_test_")
self.results = {}
# Record system information
self.results['system_info'] = {
'cpu_count': psutil.cpu_count(),
'memory_total': psutil.virtual_memory().total,
'disk_free': psutil.disk_usage('/').free,
'python_version': sys.version,
'timestamp': datetime.now().isoformat()
}
logger.info(f"Performance test setup - CPUs: {self.results['system_info']['cpu_count']}, "
f"Memory: {self.results['system_info']['memory_total'] // (1024**3)} GB")
def tearDown(self):
"""Clean up test fixtures."""
if os.path.exists(self.temp_dir):
shutil.rmtree(self.temp_dir)
def measure_performance(self, func, *args, **kwargs):
"""Measure performance of a function."""
process = psutil.Process()
initial_memory = process.memory_info().rss
start_time = time.time()
result = func(*args, **kwargs)
end_time = time.time()
final_memory = process.memory_info().rss
memory_used = final_memory - initial_memory
return {
'result': result,
'execution_time': end_time - start_time,
'memory_used': memory_used,
'peak_memory': max(initial_memory, final_memory)
}
def test_apt_stage_performance(self):
"""Test APT stage performance."""
logger.info("Testing APT stage performance...")
# Test configuration
test_options = {
'packages': [
'linux-image-amd64', 'systemd', 'initramfs-tools', 'grub-efi-amd64',
'util-linux', 'parted', 'e2fsprogs', 'dosfstools', 'ostree'
],
'release': 'trixie',
'arch': 'amd64',
'repos': [
{
'name': 'debian',
'url': 'http://deb.debian.org/debian',
'suite': 'trixie',
'components': ['main', 'contrib']
}
]
}
# Create mock context
class MockContext:
def __init__(self, root_dir):
self.root = root_dir
self.run_calls = []
def run(self, cmd, *args, **kwargs):
self.run_calls.append(cmd)
# Simulate successful command execution
return type('MockResult', (), {'returncode': 0, 'stdout': '', 'stderr': ''})()
context = MockContext(self.temp_dir)
# Test initialization performance
def init_apt_stage():
return AptStage(test_options)
init_metrics = self.measure_performance(init_apt_stage)
# Test execution performance
apt_stage = AptStage(test_options)
def run_apt_stage():
return apt_stage.run(context)
execution_metrics = self.measure_performance(run_apt_stage)
# Store results
self.results['apt_stage'] = {
'initialization': init_metrics,
'execution': execution_metrics,
'total_packages': len(test_options['packages']),
'repositories': len(test_options['repos'])
}
# Assertions for performance
self.assertLess(init_metrics['execution_time'], 1.0, "APT stage initialization should be fast")
self.assertLess(execution_metrics['execution_time'], 5.0, "APT stage execution should be reasonable")
self.assertLess(execution_metrics['memory_used'], 100 * 1024 * 1024, "APT stage should use reasonable memory") # 100 MB
logger.info(f"APT Stage - Init: {init_metrics['execution_time']:.3f}s, "
f"Exec: {execution_metrics['execution_time']:.3f}s, "
f"Memory: {execution_metrics['memory_used'] // 1024} KB")
def test_filesystem_stage_performance(self):
"""Test filesystem stage performance."""
logger.info("Testing filesystem stage performance...")
test_options = {
'rootfs_type': 'ext4',
'ostree_integration': True,
'home_symlink': True
}
class MockContext:
def __init__(self, root_dir):
self.root = root_dir
context = MockContext(self.temp_dir)
# Test filesystem stage performance
def run_filesystem_stage():
stage = DebianFilesystemStage(test_options)
return stage.run(context)
metrics = self.measure_performance(run_filesystem_stage)
# Store results
self.results['filesystem_stage'] = {
'execution': metrics,
'options': test_options
}
# Assertions for performance
self.assertLess(metrics['execution_time'], 2.0, "Filesystem stage should be fast")
self.assertLess(metrics['memory_used'], 50 * 1024 * 1024, "Filesystem stage should use reasonable memory") # 50 MB
logger.info(f"Filesystem Stage - Exec: {metrics['execution_time']:.3f}s, "
f"Memory: {metrics['memory_used'] // 1024} KB")
def test_kernel_stage_performance(self):
"""Test kernel stage performance."""
logger.info("Testing kernel stage performance...")
test_options = {
'kernel_package': 'linux-image-amd64',
'initramfs_tools': True,
'ostree_integration': True,
'modules_autoload': True
}
class MockContext:
def __init__(self, root_dir):
self.root = root_dir
def run(self, cmd, *args, **kwargs):
# Simulate successful command execution
return type('MockResult', (), {'returncode': 0, 'stdout': '', 'stderr': ''})()
context = MockContext(self.temp_dir)
# Test kernel stage performance
def run_kernel_stage():
stage = DebianKernelStage(test_options)
return stage.run(context)
metrics = self.measure_performance(run_kernel_stage)
# Store results
self.results['kernel_stage'] = {
'execution': metrics,
'options': test_options
}
# Assertions for performance
self.assertLess(metrics['execution_time'], 3.0, "Kernel stage should be reasonable")
self.assertLess(metrics['memory_used'], 100 * 1024 * 1024, "Kernel stage should use reasonable memory") # 100 MB
logger.info(f"Kernel Stage - Exec: {metrics['execution_time']:.3f}s, "
f"Memory: {metrics['memory_used'] // 1024} KB")
def test_grub_stage_performance(self):
"""Test GRUB stage performance."""
logger.info("Testing GRUB stage performance...")
test_options = {
'ostree_integration': True,
'uefi': True,
'secure_boot': False
}
class MockContext:
def __init__(self, root_dir):
self.root = root_dir
def run(self, cmd, *args, **kwargs):
# Simulate successful command execution
return type('MockResult', (), {'returncode': 0, 'stdout': '', 'stderr': ''})()
context = MockContext(self.temp_dir)
# Test GRUB stage performance
def run_grub_stage():
stage = DebianGrubStage(test_options)
return stage.run(context)
metrics = self.measure_performance(run_grub_stage)
# Store results
self.results['grub_stage'] = {
'execution': metrics,
'options': test_options
}
# Assertions for performance
self.assertLess(metrics['execution_time'], 2.0, "GRUB stage should be fast")
self.assertLess(metrics['memory_used'], 50 * 1024 * 1024, "GRUB stage should use reasonable memory") # 50 MB
logger.info(f"GRUB Stage - Exec: {metrics['execution_time']:.3f}s, "
f"Memory: {metrics['memory_used'] // 1024} KB")
def test_full_pipeline_performance(self):
"""Test full pipeline performance."""
logger.info("Testing full pipeline performance...")
# Test configuration for full pipeline
test_options = {
'packages': [
'linux-image-amd64', 'systemd', 'initramfs-tools', 'grub-efi-amd64',
'util-linux', 'parted', 'e2fsprogs', 'dosfstools', 'ostree'
],
'release': 'trixie',
'arch': 'amd64',
'repos': [
{
'name': 'debian',
'url': 'http://deb.debian.org/debian',
'suite': 'trixie',
'components': ['main', 'contrib']
}
]
}
class MockContext:
def __init__(self, root_dir):
self.root = root_dir
self.run_calls = []
def run(self, cmd, *args, **kwargs):
self.run_calls.append(cmd)
# Simulate successful command execution
return type('MockResult', (), {'returncode': 0, 'stdout': '', 'stderr': ''})()
context = MockContext(self.temp_dir)
# Test complete pipeline performance
def run_full_pipeline():
# Filesystem stage
fs_stage = DebianFilesystemStage({
'rootfs_type': 'ext4',
'ostree_integration': True,
'home_symlink': True
})
fs_stage.run(context)
# APT stage
apt_stage = AptStage(test_options)
apt_stage.run(context)
# Kernel stage
kernel_stage = DebianKernelStage({
'kernel_package': 'linux-image-amd64',
'initramfs_tools': True,
'ostree_integration': True,
'modules_autoload': True
})
kernel_stage.run(context)
# GRUB stage
grub_stage = DebianGrubStage({
'ostree_integration': True,
'uefi': True,
'secure_boot': False
})
grub_stage.run(context)
return len(context.run_calls)
metrics = self.measure_performance(run_full_pipeline)
# Store results
self.results['full_pipeline'] = {
'execution': metrics,
'total_commands': metrics['result'],
'stages_executed': 4
}
# Assertions for performance
self.assertLess(metrics['execution_time'], 10.0, "Full pipeline should complete in reasonable time")
self.assertLess(metrics['memory_used'], 200 * 1024 * 1024, "Full pipeline should use reasonable memory") # 200 MB
logger.info(f"Full Pipeline - Exec: {metrics['execution_time']:.3f}s, "
f"Memory: {metrics['memory_used'] // 1024} KB, "
f"Commands: {metrics['result']}")
def test_go_binary_performance(self):
"""Test Go binary performance."""
logger.info("Testing Go binary performance...")
go_binary = "bib/bootc-image-builder"
if not os.path.exists(go_binary):
logger.warning(f"Go binary not found: {go_binary}")
self.skipTest("Go binary not available")
# Test binary startup performance
def run_go_binary():
import subprocess
result = subprocess.run([go_binary, "--version"],
capture_output=True, text=True, timeout=10)
return result.returncode == 0
metrics = self.measure_performance(run_go_binary)
# Store results
self.results['go_binary'] = {
'startup': metrics,
'binary_size': os.path.getsize(go_binary) if os.path.exists(go_binary) else 0
}
# Assertions for performance
self.assertLess(metrics['execution_time'], 2.0, "Go binary startup should be fast")
self.assertLess(metrics['memory_used'], 50 * 1024 * 1024, "Go binary should use reasonable memory") # 50 MB
logger.info(f"Go Binary - Startup: {metrics['execution_time']:.3f}s, "
f"Memory: {metrics['memory_used'] // 1024} KB")
def test_performance_summary(self):
"""Generate performance summary and save results."""
logger.info("Generating performance summary...")
# Calculate summary statistics
total_execution_time = 0
total_memory_used = 0
stage_count = 0
for stage_name, stage_data in self.results.items():
if stage_name == 'system_info':
continue
if 'execution' in stage_data:
total_execution_time += stage_data['execution']['execution_time']
total_memory_used += stage_data['execution']['memory_used']
stage_count += 1
# Performance summary
self.results['summary'] = {
'total_execution_time': total_execution_time,
'total_memory_used': total_memory_used,
'average_execution_time': total_execution_time / stage_count if stage_count > 0 else 0,
'peak_memory_usage': max(
stage_data.get('execution', {}).get('peak_memory', 0)
for stage_name, stage_data in self.results.items()
if stage_name != 'system_info'
),
'stage_count': stage_count
}
# Save results to file
report_file = os.path.join(self.temp_dir, 'performance_results.json')
with open(report_file, 'w') as f:
json.dump(self.results, f, indent=2)
# Generate human-readable report
self.generate_human_readable_report()
# Final assertions
summary = self.results['summary']
self.assertLess(summary['total_execution_time'], 15.0, "Total execution time should be reasonable")
self.assertLess(summary['peak_memory_usage'], 300 * 1024 * 1024, "Peak memory usage should be reasonable") # 300 MB
logger.info(f"Performance summary - Total: {summary['total_execution_time']:.3f}s, "
f"Memory: {summary['total_memory_used'] // 1024} KB, "
f"Peak: {summary['peak_memory_usage'] // 1024} KB")
logger.info(f"Performance results saved to: {report_file}")
def generate_human_readable_report(self):
"""Generate human-readable performance report."""
report_file = os.path.join(self.temp_dir, 'performance_report.txt')
with open(report_file, 'w') as f:
f.write("=" * 80 + "\n")
f.write("DEBIAN BOOTC-IMAGE-BUILDER PERFORMANCE TEST RESULTS\n")
f.write("=" * 80 + "\n")
f.write(f"Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n")
# System information
f.write("SYSTEM INFORMATION\n")
f.write("-" * 40 + "\n")
sys_info = self.results['system_info']
f.write(f"CPU Count: {sys_info['cpu_count']}\n")
f.write(f"Total Memory: {sys_info['memory_total'] // (1024**3)} GB\n")
f.write(f"Free Disk Space: {sys_info['disk_free'] // (1024**3)} GB\n")
f.write(f"Python Version: {sys_info['python_version']}\n\n")
# Stage performance
f.write("STAGE PERFORMANCE\n")
f.write("-" * 40 + "\n")
for stage_name, stage_data in self.results.items():
if stage_name in ['system_info', 'summary']:
continue
f.write(f"\n{stage_name.upper().replace('_', ' ')}:\n")
if 'initialization' in stage_data:
init = stage_data['initialization']
f.write(f" Initialization: {init['execution_time']:.3f}s, "
f"{init['memory_used'] // 1024} KB\n")
if 'execution' in stage_data:
exec_data = stage_data['execution']
f.write(f" Execution: {exec_data['execution_time']:.3f}s, "
f"{exec_data['memory_used'] // 1024} KB\n")
# Summary
f.write("\n" + "=" * 80 + "\n")
f.write("PERFORMANCE SUMMARY\n")
f.write("=" * 80 + "\n")
summary = self.results['summary']
f.write(f"Total Execution Time: {summary['total_execution_time']:.3f}s\n")
f.write(f"Total Memory Used: {summary['total_memory_used'] // 1024} KB\n")
f.write(f"Average Execution Time: {summary['average_execution_time']:.3f}s\n")
f.write(f"Peak Memory Usage: {summary['peak_memory_usage'] // 1024} KB\n")
f.write(f"Stages Tested: {summary['stage_count']}\n")
f.write("\n✅ All performance tests passed!\n")
logger.info(f"Human-readable report saved to: {report_file}")
def main():
"""Run performance tests."""
print("=" * 80)
print("DEBIAN BOOTC-IMAGE-BUILDER PERFORMANCE TESTS")
print("Phase 4.2: Performance and Optimization (Weeks 23-24)")
print("=" * 80)
# Run tests
unittest.main(verbosity=2, exit=False)
if __name__ == "__main__":
main()