debian-forge/test-build-orchestration.py
robojerk 48c31fa24f
Some checks are pending
Checks / Spelling (push) Waiting to run
Checks / Python Linters (push) Waiting to run
Checks / Shell Linters (push) Waiting to run
Checks / 📦 Packit config lint (push) Waiting to run
Checks / 🔍 Check for valid snapshot urls (push) Waiting to run
Checks / 🔍 Check JSON files for formatting consistency (push) Waiting to run
Generate / Documentation (push) Waiting to run
Generate / Test Data (push) Waiting to run
Tests / Unittest (push) Waiting to run
Tests / Assembler test (legacy) (push) Waiting to run
Tests / Smoke run: unittest as normal user on default runner (push) Waiting to run
Implement enhanced build orchestration and artifact management
- Add build status tracking with state machine
- Implement build logging and monitoring system
- Add build progress tracking and cancellation support
- Create artifact management system with SQLite database
- Fix stage file extensions for proper Python imports
- Enhance resource allocation with actual resource tracking
- Add comprehensive testing for all components
2025-08-22 18:45:17 -07:00

301 lines
9.1 KiB
Python

#!/usr/bin/python3
"""
Test script for Debian Forge build orchestration system
This script tests the build queue, resource management, and OSBuild integration.
"""
import sys
import time
import tempfile
import os
from build_orchestrator import BuildOrchestrator, ResourceManager
def test_resource_manager():
"""Test the ResourceManager functionality"""
print("Testing ResourceManager...")
rm = ResourceManager()
# Test resource availability
available = rm.get_available_resources()
print(f"Available resources: CPU {available['cpu_percent']:.1f}%, "
f"Memory {available['memory_gb']:.1f}GB, "
f"Storage {available['storage_gb']:.1f}GB")
# Test resource allocation
test_reqs = {
"cpu_percent": 50,
"memory_gb": 2,
"storage_gb": 5
}
can_allocate = rm.can_allocate_resources(test_reqs)
print(f"Can allocate resources for {test_reqs}: {can_allocate}")
# Test with different requirements
small_reqs = {
"cpu_percent": 10,
"memory_gb": 1,
"storage_gb": 1
}
can_allocate_small = rm.can_allocate_resources(small_reqs)
print(f"Can allocate resources for {small_reqs}: {can_allocate_small}")
return True
def test_build_queue():
"""Test the build queue functionality"""
print("Testing build queue...")
from build_orchestrator import BuildQueue
queue = BuildQueue()
# Submit builds with different priorities
build1 = queue.submit_build("manifest1.json", priority=5)
build2 = queue.submit_build("manifest2.json", priority=3)
build3 = queue.submit_build("manifest3.json", priority=7)
print(f"Submitted builds: {build1}, {build2}, {build3}")
# Check queue status
builds = queue.list_builds()
print(f"Pending builds: {len(builds['pending'])}")
print(f"Running builds: {len(builds['running'])}")
print(f"Completed builds: {len(builds['completed'])}")
# Test priority ordering (higher priority should be first)
pending = builds['pending']
priorities = [b.priority for b in pending]
print(f"Build priorities in queue: {priorities}")
# Verify priority ordering (should be descending)
if priorities == sorted(priorities, reverse=True):
print("✅ Priority ordering is correct")
else:
print("❌ Priority ordering is incorrect")
return False
return True
def test_build_orchestrator():
"""Test the main build orchestrator"""
print("Testing build orchestrator...")
orchestrator = BuildOrchestrator()
# Submit builds with resource requirements
build1 = orchestrator.submit_build(
"test-manifest.json",
priority=5,
resource_requirements={"cpu_percent": 30, "memory_gb": 1, "storage_gb": 2}
)
build2 = orchestrator.submit_build(
"test-manifest.json",
priority=3,
resource_requirements={"cpu_percent": 60, "memory_gb": 3, "storage_gb": 5}
)
print(f"Submitted builds: {build1}, {build2}")
# Check resource status
resource_status = orchestrator.get_resource_status()
print(f"Resource status: {resource_status}")
# List builds
builds = orchestrator.list_builds()
print(f"Pending builds: {len(builds['pending'])}")
print(f"Running builds: {len(builds['running'])}")
print(f"Completed builds: {len(builds['completed'])}")
return True
def test_concurrent_builds():
"""Test concurrent build handling"""
print("Testing concurrent build handling...")
orchestrator = BuildOrchestrator()
# Submit multiple builds with different resource requirements
builds = []
for i in range(5):
build_id = orchestrator.submit_build(
f"test-manifest-{i}.json",
priority=5-i, # Higher priority for lower i
resource_requirements={
"cpu_percent": 20 + (i * 10),
"memory_gb": 1 + i,
"storage_gb": 2 + i
}
)
builds.append(build_id)
print(f"Submitted build {build_id}")
# Start orchestrator
orchestrator.start()
# Monitor for a short time
try:
for _ in range(10):
resource_status = orchestrator.get_resource_status()
print(f"Resources: CPU {resource_status['available_resources']['cpu_percent']:.1f}% free, "
f"Memory {resource_status['available_resources']['memory_gb']:.1f}GB free, "
f"Queue: {resource_status['queue_length']} pending")
time.sleep(1)
except KeyboardInterrupt:
pass
# Stop orchestrator
orchestrator.stop()
return True
def test_manifest_validation():
"""Test manifest validation and parsing"""
print("Testing manifest validation...")
# Create a test manifest
test_manifest = {
"version": "2",
"pipelines": [
{
"name": "debian-base",
"runner": "org.osbuild.linux",
"stages": [
{
"name": "org.osbuild.debootstrap",
"options": {
"suite": "bookworm",
"target": "debian-base",
"apt_proxy": "192.168.1.101:3142"
}
},
{
"name": "org.osbuild.apt",
"options": {
"packages": ["curl", "wget"],
"apt_proxy": "192.168.1.101:3142"
}
}
]
}
]
}
# Test manifest structure
if "version" in test_manifest and "pipelines" in test_manifest:
print("✅ Basic manifest structure is valid")
pipeline = test_manifest["pipelines"][0]
if "name" in pipeline and "runner" in pipeline and "stages" in pipeline:
print("✅ Pipeline structure is valid")
stages = pipeline["stages"]
if len(stages) == 2:
print("✅ Stage count is correct")
# Check stage names
stage_names = [s["name"] for s in stages]
expected_names = ["org.osbuild.debootstrap", "org.osbuild.apt"]
if stage_names == expected_names:
print("✅ Stage names are correct")
return True
else:
print(f"❌ Stage names mismatch: expected {expected_names}, got {stage_names}")
return False
else:
print(f"❌ Expected 2 stages, got {len(stages)}")
return False
else:
print("❌ Pipeline structure is invalid")
return False
else:
print("❌ Basic manifest structure is invalid")
return False
def test_apt_cacher_ng_integration():
"""Test apt-cacher-ng integration in manifests"""
print("Testing apt-cacher-ng integration...")
# Test that apt-cacher-ng address is correctly configured
expected_proxy = "192.168.1.101:3142"
# Check if the proxy is configured in test manifests
test_manifests = [
"test-debian-manifest.json",
"test-debian-atomic-manifest.json"
]
all_have_proxy = True
for manifest_file in test_manifests:
if os.path.exists(manifest_file):
try:
with open(manifest_file, 'r') as f:
content = f.read()
if expected_proxy in content:
print(f"{manifest_file} has apt-cacher-ng configuration")
else:
print(f"{manifest_file} missing apt-cacher-ng configuration")
all_have_proxy = False
except Exception as e:
print(f"❌ Error reading {manifest_file}: {e}")
all_have_proxy = False
else:
print(f"⚠️ {manifest_file} not found")
return all_have_proxy
def main():
"""Run all tests"""
print("Debian Forge Build Orchestration Tests")
print("=" * 50)
tests = [
test_resource_manager,
test_build_queue,
test_build_orchestrator,
test_concurrent_builds,
test_manifest_validation,
test_apt_cacher_ng_integration
]
passed = 0
total = len(tests)
for test in tests:
try:
print(f"\nRunning {test.__name__}...")
if test():
print(f"{test.__name__} passed")
passed += 1
else:
print(f"{test.__name__} failed")
except Exception as e:
print(f"{test.__name__} failed with exception: {e}")
print()
print("=" * 50)
print(f"Test Results: {passed}/{total} passed")
if passed == total:
print("🎉 All build orchestration tests passed!")
return 0
else:
print("⚠️ Some tests failed")
return 1
if __name__ == "__main__":
sys.exit(main())