cleanup
Some checks failed
particle-os CI / Test particle-os (push) Failing after 1s
particle-os CI / Integration Test (push) Has been skipped
particle-os CI / Security & Quality (push) Failing after 1s
Test particle-os Basic Functionality / test-basic (push) Failing after 1s
particle-os CI / Build and Release (push) Has been skipped
Some checks failed
particle-os CI / Test particle-os (push) Failing after 1s
particle-os CI / Integration Test (push) Has been skipped
particle-os CI / Security & Quality (push) Failing after 1s
Test particle-os Basic Functionality / test-basic (push) Failing after 1s
particle-os CI / Build and Release (push) Has been skipped
This commit is contained in:
parent
d782a8a4fb
commit
126ee1a849
76 changed files with 1683 additions and 470 deletions
|
|
@ -1,528 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
"""
|
||||
Performance Benchmarking Script for Debian bootc-image-builder
|
||||
Phase 4.2: Performance and Optimization (Weeks 23-24)
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import psutil
|
||||
import subprocess
|
||||
import tempfile
|
||||
import shutil
|
||||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
|
||||
# Add the project root to the path
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
|
||||
|
||||
# Add the osbuild-stages directory to the path
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'osbuild-stages'))
|
||||
|
||||
# Import using the correct module paths
|
||||
import apt_stage.apt_stage as apt_module
|
||||
import debian_filesystem_stage.debian_filesystem_stage as fs_module
|
||||
import debian_kernel_stage.debian_kernel_stage as kernel_module
|
||||
import debian_grub_stage.debian_grub_stage as grub_module
|
||||
|
||||
AptStage = apt_module.AptStage
|
||||
DebianFilesystemStage = fs_module.DebianFilesystemStage
|
||||
DebianKernelStage = kernel_module.DebianKernelStage
|
||||
DebianGrubStage = grub_module.DebianGrubStage
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class PerformanceBenchmark:
|
||||
"""Comprehensive performance benchmarking for Debian bootc-image-builder."""
|
||||
|
||||
def __init__(self):
|
||||
self.results = {}
|
||||
self.benchmark_dir = None
|
||||
self.start_time = None
|
||||
|
||||
def setup_benchmark_environment(self):
|
||||
"""Set up the benchmark environment."""
|
||||
logger.info("Setting up benchmark environment...")
|
||||
|
||||
# Create temporary directory for benchmarking
|
||||
self.benchmark_dir = tempfile.mkdtemp(prefix="debian_benchmark_")
|
||||
logger.info(f"Benchmark directory: {self.benchmark_dir}")
|
||||
|
||||
# Record system information
|
||||
self.results['system_info'] = {
|
||||
'cpu_count': psutil.cpu_count(),
|
||||
'memory_total': psutil.virtual_memory().total,
|
||||
'disk_free': psutil.disk_usage('/').free,
|
||||
'python_version': sys.version,
|
||||
'timestamp': datetime.now().isoformat()
|
||||
}
|
||||
|
||||
logger.info(f"System: {self.results['system_info']['cpu_count']} CPUs, "
|
||||
f"{self.results['system_info']['memory_total'] // (1024**3)} GB RAM")
|
||||
|
||||
def measure_memory_usage(self, func, *args, **kwargs):
|
||||
"""Measure memory usage of a function."""
|
||||
process = psutil.Process()
|
||||
initial_memory = process.memory_info().rss
|
||||
|
||||
start_time = time.time()
|
||||
result = func(*args, **kwargs)
|
||||
end_time = time.time()
|
||||
|
||||
final_memory = process.memory_info().rss
|
||||
memory_used = final_memory - initial_memory
|
||||
|
||||
return {
|
||||
'result': result,
|
||||
'execution_time': end_time - start_time,
|
||||
'memory_used': memory_used,
|
||||
'peak_memory': max(initial_memory, final_memory)
|
||||
}
|
||||
|
||||
def benchmark_apt_stage(self):
|
||||
"""Benchmark APT stage performance."""
|
||||
logger.info("Benchmarking APT stage...")
|
||||
|
||||
# Test configuration
|
||||
test_options = {
|
||||
'packages': [
|
||||
'linux-image-amd64', 'systemd', 'initramfs-tools', 'grub-efi-amd64',
|
||||
'util-linux', 'parted', 'e2fsprogs', 'dosfstools', 'ostree'
|
||||
],
|
||||
'release': 'trixie',
|
||||
'arch': 'amd64',
|
||||
'repos': [
|
||||
{
|
||||
'name': 'debian',
|
||||
'url': 'http://deb.debian.org/debian',
|
||||
'suite': 'trixie',
|
||||
'components': ['main', 'contrib']
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
# Create mock context
|
||||
class MockContext:
|
||||
def __init__(self, root_dir):
|
||||
self.root = root_dir
|
||||
self.run_calls = []
|
||||
|
||||
def run(self, cmd, *args, **kwargs):
|
||||
self.run_calls.append(cmd)
|
||||
# Simulate successful command execution
|
||||
return type('MockResult', (), {'returncode': 0, 'stdout': '', 'stderr': ''})()
|
||||
|
||||
context = MockContext(self.benchmark_dir)
|
||||
|
||||
# Benchmark APT stage initialization
|
||||
def init_apt_stage():
|
||||
return AptStage(test_options)
|
||||
|
||||
init_metrics = self.measure_memory_usage(init_apt_stage)
|
||||
|
||||
# Benchmark APT stage execution
|
||||
apt_stage = AptStage(test_options)
|
||||
|
||||
def run_apt_stage():
|
||||
return apt_stage.run(context)
|
||||
|
||||
execution_metrics = self.measure_memory_usage(run_apt_stage)
|
||||
|
||||
self.results['apt_stage'] = {
|
||||
'initialization': init_metrics,
|
||||
'execution': execution_metrics,
|
||||
'total_packages': len(test_options['packages']),
|
||||
'repositories': len(test_options['repos'])
|
||||
}
|
||||
|
||||
logger.info(f"APT Stage - Init: {init_metrics['execution_time']:.3f}s, "
|
||||
f"Exec: {execution_metrics['execution_time']:.3f}s, "
|
||||
f"Memory: {execution_metrics['memory_used'] // 1024} KB")
|
||||
|
||||
def benchmark_filesystem_stage(self):
|
||||
"""Benchmark filesystem stage performance."""
|
||||
logger.info("Benchmarking filesystem stage...")
|
||||
|
||||
test_options = {
|
||||
'rootfs_type': 'ext4',
|
||||
'ostree_integration': True,
|
||||
'home_symlink': True
|
||||
}
|
||||
|
||||
class MockContext:
|
||||
def __init__(self, root_dir):
|
||||
self.root = root_dir
|
||||
|
||||
context = MockContext(self.benchmark_dir)
|
||||
|
||||
# Benchmark filesystem stage
|
||||
def run_filesystem_stage():
|
||||
stage = DebianFilesystemStage(test_options)
|
||||
return stage.run(context)
|
||||
|
||||
metrics = self.measure_memory_usage(run_filesystem_stage)
|
||||
|
||||
self.results['filesystem_stage'] = {
|
||||
'execution': metrics,
|
||||
'options': test_options
|
||||
}
|
||||
|
||||
logger.info(f"Filesystem Stage - Exec: {metrics['execution_time']:.3f}s, "
|
||||
f"Memory: {metrics['memory_used'] // 1024} KB")
|
||||
|
||||
def benchmark_kernel_stage(self):
|
||||
"""Benchmark kernel stage performance."""
|
||||
logger.info("Benchmarking kernel stage...")
|
||||
|
||||
test_options = {
|
||||
'kernel_package': 'linux-image-amd64',
|
||||
'initramfs_tools': True,
|
||||
'ostree_integration': True,
|
||||
'modules_autoload': True
|
||||
}
|
||||
|
||||
class MockContext:
|
||||
def __init__(self, root_dir):
|
||||
self.root = root_dir
|
||||
|
||||
def run(self, cmd, *args, **kwargs):
|
||||
# Simulate successful command execution
|
||||
return type('MockResult', (), {'returncode': 0, 'stdout': '', 'stderr': ''})()
|
||||
|
||||
context = MockContext(self.benchmark_dir)
|
||||
|
||||
# Benchmark kernel stage
|
||||
def run_kernel_stage():
|
||||
stage = DebianKernelStage(test_options)
|
||||
return stage.run(context)
|
||||
|
||||
metrics = self.measure_memory_usage(run_kernel_stage)
|
||||
|
||||
self.results['kernel_stage'] = {
|
||||
'execution': metrics,
|
||||
'options': test_options
|
||||
}
|
||||
|
||||
logger.info(f"Kernel Stage - Exec: {metrics['execution_time']:.3f}s, "
|
||||
f"Memory: {metrics['memory_used'] // 1024} KB")
|
||||
|
||||
def benchmark_grub_stage(self):
|
||||
"""Benchmark GRUB stage performance."""
|
||||
logger.info("Benchmarking GRUB stage...")
|
||||
|
||||
test_options = {
|
||||
'ostree_integration': True,
|
||||
'uefi': True,
|
||||
'secure_boot': False
|
||||
}
|
||||
|
||||
class MockContext:
|
||||
def __init__(self, root_dir):
|
||||
self.root = root_dir
|
||||
|
||||
def run(self, cmd, *args, **kwargs):
|
||||
# Simulate successful command execution
|
||||
return type('MockResult', (), {'returncode': 0, 'stdout': '', 'stderr': ''})()
|
||||
|
||||
context = MockContext(self.benchmark_dir)
|
||||
|
||||
# Benchmark GRUB stage
|
||||
def run_grub_stage():
|
||||
stage = DebianGrubStage(test_options)
|
||||
return stage.run(context)
|
||||
|
||||
metrics = self.measure_memory_usage(run_grub_stage)
|
||||
|
||||
self.results['grub_stage'] = {
|
||||
'execution': metrics,
|
||||
'options': test_options
|
||||
}
|
||||
|
||||
logger.info(f"GRUB Stage - Exec: {metrics['execution_time']:.3f}s, "
|
||||
f"Memory: {metrics['memory_used'] // 1024} KB")
|
||||
|
||||
def benchmark_full_pipeline(self):
|
||||
"""Benchmark the complete pipeline."""
|
||||
logger.info("Benchmarking full pipeline...")
|
||||
|
||||
# Test configuration for full pipeline
|
||||
test_options = {
|
||||
'packages': [
|
||||
'linux-image-amd64', 'systemd', 'initramfs-tools', 'grub-efi-amd64',
|
||||
'util-linux', 'parted', 'e2fsprogs', 'dosfstools', 'ostree'
|
||||
],
|
||||
'release': 'trixie',
|
||||
'arch': 'amd64',
|
||||
'repos': [
|
||||
{
|
||||
'name': 'debian',
|
||||
'url': 'http://deb.debian.org/debian',
|
||||
'suite': 'trixie',
|
||||
'components': ['main', 'contrib']
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
class MockContext:
|
||||
def __init__(self, root_dir):
|
||||
self.root = root_dir
|
||||
self.run_calls = []
|
||||
|
||||
def run(self, cmd, *args, **kwargs):
|
||||
self.run_calls.append(cmd)
|
||||
# Simulate successful command execution
|
||||
return type('MockResult', (), {'returncode': 0, 'stdout': '', 'stderr': ''})()
|
||||
|
||||
context = MockContext(self.benchmark_dir)
|
||||
|
||||
# Benchmark complete pipeline
|
||||
def run_full_pipeline():
|
||||
# Filesystem stage
|
||||
fs_stage = DebianFilesystemStage({
|
||||
'rootfs_type': 'ext4',
|
||||
'ostree_integration': True,
|
||||
'home_symlink': True
|
||||
})
|
||||
fs_stage.run(context)
|
||||
|
||||
# APT stage
|
||||
apt_stage = AptStage(test_options)
|
||||
apt_stage.run(context)
|
||||
|
||||
# Kernel stage
|
||||
kernel_stage = DebianKernelStage({
|
||||
'kernel_package': 'linux-image-amd64',
|
||||
'initramfs_tools': True,
|
||||
'ostree_integration': True,
|
||||
'modules_autoload': True
|
||||
})
|
||||
kernel_stage.run(context)
|
||||
|
||||
# GRUB stage
|
||||
grub_stage = DebianGrubStage({
|
||||
'ostree_integration': True,
|
||||
'uefi': True,
|
||||
'secure_boot': False
|
||||
})
|
||||
grub_stage.run(context)
|
||||
|
||||
return len(context.run_calls)
|
||||
|
||||
metrics = self.measure_memory_usage(run_full_pipeline)
|
||||
|
||||
self.results['full_pipeline'] = {
|
||||
'execution': metrics,
|
||||
'total_commands': metrics['result'],
|
||||
'stages_executed': 4
|
||||
}
|
||||
|
||||
logger.info(f"Full Pipeline - Exec: {metrics['execution_time']:.3f}s, "
|
||||
f"Memory: {metrics['memory_used'] // 1024} KB, "
|
||||
f"Commands: {metrics['result']}")
|
||||
|
||||
def benchmark_go_binary(self):
|
||||
"""Benchmark Go binary performance."""
|
||||
logger.info("Benchmarking Go binary...")
|
||||
|
||||
go_binary = "bib/bootc-image-builder"
|
||||
if not os.path.exists(go_binary):
|
||||
logger.warning(f"Go binary not found: {go_binary}")
|
||||
return
|
||||
|
||||
# Benchmark binary startup time
|
||||
def run_go_binary():
|
||||
result = subprocess.run([go_binary, "--version"],
|
||||
capture_output=True, text=True, timeout=10)
|
||||
return result.returncode == 0
|
||||
|
||||
metrics = self.measure_memory_usage(run_go_binary)
|
||||
|
||||
self.results['go_binary'] = {
|
||||
'startup': metrics,
|
||||
'binary_size': os.path.getsize(go_binary) if os.path.exists(go_binary) else 0
|
||||
}
|
||||
|
||||
logger.info(f"Go Binary - Startup: {metrics['execution_time']:.3f}s, "
|
||||
f"Memory: {metrics['memory_used'] // 1024} KB")
|
||||
|
||||
def generate_performance_report(self):
|
||||
"""Generate comprehensive performance report."""
|
||||
logger.info("Generating performance report...")
|
||||
|
||||
# Calculate summary statistics
|
||||
total_execution_time = 0
|
||||
total_memory_used = 0
|
||||
|
||||
for stage_name, stage_data in self.results.items():
|
||||
if stage_name == 'system_info':
|
||||
continue
|
||||
|
||||
if 'execution' in stage_data:
|
||||
total_execution_time += stage_data['execution']['execution_time']
|
||||
total_memory_used += stage_data['execution']['memory_used']
|
||||
|
||||
# Performance summary
|
||||
self.results['summary'] = {
|
||||
'total_execution_time': total_execution_time,
|
||||
'total_memory_used': total_memory_used,
|
||||
'average_execution_time': total_execution_time / len([k for k in self.results.keys() if k != 'system_info']),
|
||||
'peak_memory_usage': max(
|
||||
stage_data.get('execution', {}).get('peak_memory', 0)
|
||||
for stage_name, stage_data in self.results.items()
|
||||
if stage_name != 'system_info'
|
||||
)
|
||||
}
|
||||
|
||||
# Save results to file
|
||||
report_file = os.path.join(self.benchmark_dir, 'performance_report.json')
|
||||
with open(report_file, 'w') as f:
|
||||
json.dump(self.results, f, indent=2)
|
||||
|
||||
# Generate human-readable report
|
||||
self.generate_human_readable_report()
|
||||
|
||||
logger.info(f"Performance report saved to: {report_file}")
|
||||
return report_file
|
||||
|
||||
def generate_human_readable_report(self):
|
||||
"""Generate human-readable performance report."""
|
||||
report_file = os.path.join(self.benchmark_dir, 'performance_report.txt')
|
||||
|
||||
with open(report_file, 'w') as f:
|
||||
f.write("=" * 80 + "\n")
|
||||
f.write("DEBIAN BOOTC-IMAGE-BUILDER PERFORMANCE REPORT\n")
|
||||
f.write("=" * 80 + "\n")
|
||||
f.write(f"Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n")
|
||||
|
||||
# System information
|
||||
f.write("SYSTEM INFORMATION\n")
|
||||
f.write("-" * 40 + "\n")
|
||||
sys_info = self.results['system_info']
|
||||
f.write(f"CPU Count: {sys_info['cpu_count']}\n")
|
||||
f.write(f"Total Memory: {sys_info['memory_total'] // (1024**3)} GB\n")
|
||||
f.write(f"Free Disk Space: {sys_info['disk_free'] // (1024**3)} GB\n")
|
||||
f.write(f"Python Version: {sys_info['python_version']}\n\n")
|
||||
|
||||
# Stage performance
|
||||
f.write("STAGE PERFORMANCE\n")
|
||||
f.write("-" * 40 + "\n")
|
||||
|
||||
for stage_name, stage_data in self.results.items():
|
||||
if stage_name in ['system_info', 'summary']:
|
||||
continue
|
||||
|
||||
f.write(f"\n{stage_name.upper().replace('_', ' ')}:\n")
|
||||
|
||||
if 'initialization' in stage_data:
|
||||
init = stage_data['initialization']
|
||||
f.write(f" Initialization: {init['execution_time']:.3f}s, "
|
||||
f"{init['memory_used'] // 1024} KB\n")
|
||||
|
||||
if 'execution' in stage_data:
|
||||
exec_data = stage_data['execution']
|
||||
f.write(f" Execution: {exec_data['execution_time']:.3f}s, "
|
||||
f"{exec_data['memory_used'] // 1024} KB\n")
|
||||
|
||||
# Summary
|
||||
f.write("\n" + "=" * 80 + "\n")
|
||||
f.write("PERFORMANCE SUMMARY\n")
|
||||
f.write("=" * 80 + "\n")
|
||||
summary = self.results['summary']
|
||||
f.write(f"Total Execution Time: {summary['total_execution_time']:.3f}s\n")
|
||||
f.write(f"Total Memory Used: {summary['total_memory_used'] // 1024} KB\n")
|
||||
f.write(f"Average Execution Time: {summary['average_execution_time']:.3f}s\n")
|
||||
f.write(f"Peak Memory Usage: {summary['peak_memory_usage'] // 1024} KB\n")
|
||||
|
||||
# Performance recommendations
|
||||
f.write("\n" + "=" * 80 + "\n")
|
||||
f.write("PERFORMANCE RECOMMENDATIONS\n")
|
||||
f.write("=" * 80 + "\n")
|
||||
|
||||
if summary['total_execution_time'] > 5.0:
|
||||
f.write("⚠️ Total execution time is high. Consider:\n")
|
||||
f.write(" - Parallel stage execution\n")
|
||||
f.write(" - Caching mechanisms\n")
|
||||
f.write(" - Optimizing package installation\n")
|
||||
|
||||
if summary['peak_memory_usage'] > 500 * 1024: # 500 MB
|
||||
f.write("⚠️ Peak memory usage is high. Consider:\n")
|
||||
f.write(" - Memory-efficient algorithms\n")
|
||||
f.write(" - Streaming processing\n")
|
||||
f.write(" - Garbage collection optimization\n")
|
||||
|
||||
f.write("\n✅ Performance benchmarks completed successfully!\n")
|
||||
|
||||
logger.info(f"Human-readable report saved to: {report_file}")
|
||||
|
||||
def cleanup(self):
|
||||
"""Clean up benchmark environment."""
|
||||
if self.benchmark_dir and os.path.exists(self.benchmark_dir):
|
||||
shutil.rmtree(self.benchmark_dir)
|
||||
logger.info("Benchmark environment cleaned up")
|
||||
|
||||
def run_all_benchmarks(self):
|
||||
"""Run all performance benchmarks."""
|
||||
try:
|
||||
self.setup_benchmark_environment()
|
||||
|
||||
logger.info("Starting performance benchmarks...")
|
||||
self.start_time = time.time()
|
||||
|
||||
# Run individual stage benchmarks
|
||||
self.benchmark_apt_stage()
|
||||
self.benchmark_filesystem_stage()
|
||||
self.benchmark_kernel_stage()
|
||||
self.benchmark_grub_stage()
|
||||
|
||||
# Run full pipeline benchmark
|
||||
self.benchmark_full_pipeline()
|
||||
|
||||
# Run Go binary benchmark
|
||||
self.benchmark_go_binary()
|
||||
|
||||
# Generate reports
|
||||
report_file = self.generate_performance_report()
|
||||
|
||||
total_time = time.time() - self.start_time
|
||||
logger.info(f"All benchmarks completed in {total_time:.2f} seconds")
|
||||
|
||||
return report_file
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Benchmark failed: {e}")
|
||||
raise
|
||||
finally:
|
||||
self.cleanup()
|
||||
|
||||
def main():
|
||||
"""Main function to run performance benchmarks."""
|
||||
print("=" * 80)
|
||||
print("DEBIAN BOOTC-IMAGE-BUILDER PERFORMANCE BENCHMARK")
|
||||
print("Phase 4.2: Performance and Optimization (Weeks 23-24)")
|
||||
print("=" * 80)
|
||||
|
||||
benchmark = PerformanceBenchmark()
|
||||
|
||||
try:
|
||||
report_file = benchmark.run_all_benchmarks()
|
||||
print(f"\n✅ Performance benchmarks completed successfully!")
|
||||
print(f"📊 Report saved to: {report_file}")
|
||||
|
||||
# Display quick summary
|
||||
summary = benchmark.results.get('summary', {})
|
||||
print(f"\n📈 Quick Summary:")
|
||||
print(f" Total Execution Time: {summary.get('total_execution_time', 0):.3f}s")
|
||||
print(f" Total Memory Used: {summary.get('total_memory_used', 0) // 1024} KB")
|
||||
print(f" Peak Memory Usage: {summary.get('peak_memory_usage', 0) // 1024} KB")
|
||||
|
||||
except Exception as e:
|
||||
print(f"\n❌ Benchmark failed: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Loading…
Add table
Add a link
Reference in a new issue