debian-forge/test/debian/test-build-lifecycle.py

394 lines
12 KiB
Python

#!/usr/bin/python3
"""
Test script for complete build lifecycle in Debian Forge
This script tests the entire build process from submission to completion,
including failure handling, retry mechanisms, and cleanup.
"""
import sys
import time
import tempfile
import os
import json
from pathlib import Path
from build_orchestrator import BuildOrchestrator, BuildStatus
from artifact_manager import ArtifactManager
def test_build_submission_to_completion():
"""Test complete build lifecycle from submission to completion"""
print("Testing build submission to completion...")
# Create orchestrator and artifact manager
orchestrator = BuildOrchestrator()
artifact_manager = ArtifactManager()
# Create a simple test manifest
test_manifest = create_test_manifest()
manifest_path = "test-lifecycle-manifest.json"
with open(manifest_path, 'w') as f:
json.dump(test_manifest, f, indent=2)
try:
# Submit build
build_id = orchestrator.submit_build(
manifest_path,
priority=5,
resource_requirements={"cpu_percent": 10, "memory_gb": 1, "storage_gb": 1}
)
print(f"Submitted build {build_id}")
# Start orchestrator
orchestrator.start()
# Monitor build progress
start_time = time.time()
max_wait_time = 30 # 30 seconds max
while time.time() - start_time < max_wait_time:
status = orchestrator.get_build_status(build_id)
if status:
print(f"Build {build_id}: {status.status.value} (Progress: {status.progress:.1%})")
# Check for completion
if status.status in [BuildStatus.COMPLETED, BuildStatus.FAILED]:
if status.status == BuildStatus.COMPLETED:
print(f"✅ Build {build_id} completed successfully")
# Verify artifacts were created
artifacts = artifact_manager.get_build_artifacts(build_id)
if artifacts:
print(f"✅ Build artifacts created: {len(artifacts)} artifacts")
return True
else:
print("❌ No build artifacts found")
return False
else:
print(f"❌ Build {build_id} failed: {status.error_message}")
return False
# Check for timeout
if time.time() - start_time > max_wait_time:
print("❌ Build timed out")
break
time.sleep(2)
return False
finally:
orchestrator.stop()
# Cleanup
if os.path.exists(manifest_path):
os.remove(manifest_path)
def test_build_failure_handling():
"""Test build failure handling and error reporting"""
print("Testing build failure handling...")
orchestrator = BuildOrchestrator()
# Submit build with invalid manifest (should fail)
invalid_manifest_path = "nonexistent-manifest.json"
build_id = orchestrator.submit_build(
invalid_manifest_path,
priority=5,
resource_requirements={"cpu_percent": 10, "memory_gb": 1, "storage_gb": 1}
)
print(f"Submitted build {build_id} with invalid manifest")
# Start orchestrator
orchestrator.start()
try:
# Monitor for failure
start_time = time.time()
max_wait_time = 20
while time.time() - start_time < max_wait_time:
status = orchestrator.get_build_status(build_id)
if status:
print(f"Build {build_id}: {status.status.value}")
if status.status == BuildStatus.FAILED:
print(f"✅ Build failed as expected: {status.error_message}")
return True
elif status.status == BuildStatus.COMPLETED:
print("❌ Build should have failed but completed")
return False
time.sleep(1)
print("❌ Build did not fail within expected time")
return False
finally:
orchestrator.stop()
def test_build_retry_mechanisms():
"""Test build retry mechanisms"""
print("Testing build retry mechanisms...")
orchestrator = BuildOrchestrator()
# Submit multiple builds to test queue behavior
build_ids = []
for i in range(3):
build_id = orchestrator.submit_build(
"test-manifest.json",
priority=5-i, # Different priorities
resource_requirements={"cpu_percent": 10, "memory_gb": 1, "storage_gb": 1}
)
build_ids.append(build_id)
print(f"Submitted build {build_id} with priority {5-i}")
# Start orchestrator
orchestrator.start()
try:
# Monitor builds
start_time = time.time()
max_wait_time = 15
while time.time() - start_time < max_wait_time:
# Check status of all builds
all_completed = True
for build_id in build_ids:
status = orchestrator.get_build_status(build_id)
if status and status.status not in [BuildStatus.COMPLETED, BuildStatus.FAILED]:
all_completed = False
print(f"Build {build_id}: {status.status.value}")
if all_completed:
print("✅ All builds completed")
return True
time.sleep(1)
print("❌ Not all builds completed within expected time")
return False
finally:
orchestrator.stop()
def test_build_cleanup():
"""Test build cleanup operations"""
print("Testing build cleanup...")
orchestrator = BuildOrchestrator()
artifact_manager = ArtifactManager()
# Submit a build
build_id = orchestrator.submit_build(
"test-debian-manifest.json",
priority=5,
resource_requirements={"cpu_percent": 10, "memory_gb": 1, "storage_gb": 1}
)
print(f"Submitted build {build_id} for cleanup test")
# Start orchestrator briefly
orchestrator.start()
time.sleep(5) # Let it run for a bit
orchestrator.stop()
# Test build cancellation
if orchestrator.cancel_build(build_id):
print(f"✅ Build {build_id} cancelled successfully")
# Verify build status
status = orchestrator.get_build_status(build_id)
if status and status.status == BuildStatus.CANCELLED:
print("✅ Build status correctly shows cancelled")
# Test artifact cleanup
artifacts = artifact_manager.get_build_artifacts(build_id)
if artifacts:
# Remove artifacts
for artifact in artifacts:
if artifact_manager.remove_artifact(artifact.id):
print(f"✅ Removed artifact {artifact.id}")
# Verify cleanup
remaining_artifacts = artifact_manager.get_build_artifacts(build_id)
if not remaining_artifacts:
print("✅ All artifacts cleaned up successfully")
return True
else:
print(f"{len(remaining_artifacts)} artifacts still remain")
return False
else:
print("✅ No artifacts to clean up")
return True
else:
print("❌ Build status not correctly updated after cancellation")
return False
else:
print("❌ Failed to cancel build")
return False
def test_environment_isolation():
"""Test build environment isolation"""
print("Testing build environment isolation...")
# Create temporary build directories
with tempfile.TemporaryDirectory() as temp_dir:
build_dir = os.path.join(temp_dir, "build-001")
os.makedirs(build_dir, exist_ok=True)
# Create isolated environment files
env_files = [
"etc/environment",
"etc/hostname",
"var/lib/osbuild"
]
for env_file in env_files:
full_path = os.path.join(build_dir, env_file)
os.makedirs(os.path.dirname(full_path), exist_ok=True)
with open(full_path, 'w') as f:
f.write(f"# Isolated environment file: {env_file}\n")
# Verify isolation
isolated_files = []
for root, dirs, files in os.walk(build_dir):
for file in files:
file_path = os.path.join(root, file)
isolated_files.append(file_path)
if len(isolated_files) == len(env_files):
print(f"✅ Environment isolation working: {len(isolated_files)} files created")
return True
else:
print(f"❌ Environment isolation failed: expected {len(env_files)}, got {len(isolated_files)}")
return False
def test_resource_cleanup():
"""Test resource cleanup after builds"""
print("Testing resource cleanup...")
orchestrator = BuildOrchestrator()
# Submit builds to consume resources
build_ids = []
for i in range(2):
build_id = orchestrator.submit_build(
"test-manifest.json",
priority=5,
resource_requirements={"cpu_percent": 20, "memory_gb": 2, "storage_gb": 2}
)
build_ids.append(build_id)
# Start orchestrator
orchestrator.start()
try:
# Let builds run briefly
time.sleep(5)
# Check resource allocation
initial_status = orchestrator.get_resource_status()
print(f"Initial allocated resources: {initial_status['allocated_resources']}")
# Stop orchestrator (should trigger cleanup)
orchestrator.stop()
# Check resource cleanup
final_status = orchestrator.get_resource_status()
print(f"Final allocated resources: {final_status['allocated_resources']}")
if final_status['allocated_resources'] == 0:
print("✅ Resources cleaned up successfully")
return True
else:
print("❌ Resources not properly cleaned up")
return False
finally:
orchestrator.stop()
def create_test_manifest():
"""Create a simple test manifest for lifecycle testing"""
return {
"version": "2",
"pipelines": [
{
"name": "debian-lifecycle-test",
"runner": "org.osbuild.linux",
"stages": [
{
"name": "org.osbuild.mkdir",
"options": {
"paths": ["/tmp/test-lifecycle"]
}
},
{
"name": "org.osbuild.copy",
"options": {
"paths": [
{
"from": "test-debian-manifest.json",
"to": "/tmp/test-lifecycle/manifest.json"
}
]
}
}
]
}
]
}
def main():
"""Run all build lifecycle tests"""
print("Debian Forge Build Lifecycle Tests")
print("=" * 50)
tests = [
test_build_submission_to_completion,
test_build_failure_handling,
test_build_retry_mechanisms,
test_build_cleanup,
test_environment_isolation,
test_resource_cleanup
]
passed = 0
total = len(tests)
for test in tests:
try:
print(f"\nRunning {test.__name__}...")
if test():
passed += 1
else:
print(f"{test.__name__} failed")
except Exception as e:
print(f"{test.__name__} failed with exception: {e}")
print()
print("=" * 50)
print(f"Test Results: {passed}/{total} passed")
if passed == total:
print("🎉 All build lifecycle tests passed!")
return 0
else:
print("⚠️ Some tests failed")
return 1
if __name__ == "__main__":
sys.exit(main())