Implement enhanced build orchestration and artifact management
Some checks are pending
Checks / Spelling (push) Waiting to run
Checks / Python Linters (push) Waiting to run
Checks / Shell Linters (push) Waiting to run
Checks / 📦 Packit config lint (push) Waiting to run
Checks / 🔍 Check for valid snapshot urls (push) Waiting to run
Checks / 🔍 Check JSON files for formatting consistency (push) Waiting to run
Generate / Documentation (push) Waiting to run
Generate / Test Data (push) Waiting to run
Tests / Unittest (push) Waiting to run
Tests / Assembler test (legacy) (push) Waiting to run
Tests / Smoke run: unittest as normal user on default runner (push) Waiting to run
Some checks are pending
Checks / Spelling (push) Waiting to run
Checks / Python Linters (push) Waiting to run
Checks / Shell Linters (push) Waiting to run
Checks / 📦 Packit config lint (push) Waiting to run
Checks / 🔍 Check for valid snapshot urls (push) Waiting to run
Checks / 🔍 Check JSON files for formatting consistency (push) Waiting to run
Generate / Documentation (push) Waiting to run
Generate / Test Data (push) Waiting to run
Tests / Unittest (push) Waiting to run
Tests / Assembler test (legacy) (push) Waiting to run
Tests / Smoke run: unittest as normal user on default runner (push) Waiting to run
- Add build status tracking with state machine - Implement build logging and monitoring system - Add build progress tracking and cancellation support - Create artifact management system with SQLite database - Fix stage file extensions for proper Python imports - Enhance resource allocation with actual resource tracking - Add comprehensive testing for all components
This commit is contained in:
parent
8767c20940
commit
48c31fa24f
20 changed files with 1702 additions and 274 deletions
2
.gitignore
vendored
2
.gitignore
vendored
|
|
@ -26,4 +26,4 @@ venv
|
|||
|
||||
/test/data/certs/lib.sh
|
||||
|
||||
debian-forge
|
||||
debian-forge-docs
|
||||
395
artifact_manager.py
Normal file
395
artifact_manager.py
Normal file
|
|
@ -0,0 +1,395 @@
|
|||
#!/usr/bin/python3
|
||||
"""
|
||||
Debian Forge Artifact Manager
|
||||
|
||||
Manages build artifacts, storage, and provides artifact discovery for Debian atomic builds.
|
||||
"""
|
||||
|
||||
import os
|
||||
import json
|
||||
import shutil
|
||||
import hashlib
|
||||
import sqlite3
|
||||
from datetime import datetime
|
||||
from typing import Dict, List, Optional, Any, Tuple
|
||||
from pathlib import Path
|
||||
from dataclasses import dataclass, asdict
|
||||
|
||||
|
||||
@dataclass
|
||||
class Artifact:
|
||||
"""Represents a build artifact"""
|
||||
id: str
|
||||
build_id: str
|
||||
name: str
|
||||
path: str
|
||||
size: int
|
||||
checksum: str
|
||||
artifact_type: str
|
||||
created_at: datetime
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert to dictionary for serialization"""
|
||||
data = asdict(self)
|
||||
data['created_at'] = self.created_at.isoformat()
|
||||
return data
|
||||
|
||||
|
||||
class ArtifactStorage:
|
||||
"""Manages artifact storage and organization"""
|
||||
|
||||
def __init__(self, base_dir: str = "artifacts"):
|
||||
self.base_dir = Path(base_dir)
|
||||
self.base_dir.mkdir(exist_ok=True)
|
||||
|
||||
# Create subdirectories
|
||||
(self.base_dir / "debian-packages").mkdir(exist_ok=True)
|
||||
(self.base_dir / "ostree-commits").mkdir(exist_ok=True)
|
||||
(self.base_dir / "images").mkdir(exist_ok=True)
|
||||
(self.base_dir / "logs").mkdir(exist_ok=True)
|
||||
(self.base_dir / "metadata").mkdir(exist_ok=True)
|
||||
|
||||
def get_artifact_path(self, artifact_type: str, filename: str) -> Path:
|
||||
"""Get the full path for an artifact"""
|
||||
return self.base_dir / artifact_type / filename
|
||||
|
||||
def store_artifact(self, source_path: str, artifact_type: str, filename: str) -> str:
|
||||
"""Store an artifact and return the full path"""
|
||||
dest_path = self.get_artifact_path(artifact_type, filename)
|
||||
|
||||
# Copy the artifact
|
||||
shutil.copy2(source_path, dest_path)
|
||||
|
||||
return str(dest_path)
|
||||
|
||||
def remove_artifact(self, artifact_type: str, filename: str) -> bool:
|
||||
"""Remove an artifact"""
|
||||
artifact_path = self.get_artifact_path(artifact_type, filename)
|
||||
|
||||
if artifact_path.exists():
|
||||
artifact_path.unlink()
|
||||
return True
|
||||
return False
|
||||
|
||||
def get_artifact_info(self, artifact_path: str) -> Tuple[int, str]:
|
||||
"""Get artifact size and checksum"""
|
||||
path = Path(artifact_path)
|
||||
|
||||
if not path.exists():
|
||||
raise FileNotFoundError(f"Artifact not found: {artifact_path}")
|
||||
|
||||
# Get file size
|
||||
size = path.stat().st_size
|
||||
|
||||
# Calculate SHA256 checksum
|
||||
sha256_hash = hashlib.sha256()
|
||||
with open(artifact_path, "rb") as f:
|
||||
for chunk in iter(lambda: f.read(4096), b""):
|
||||
sha256_hash.update(chunk)
|
||||
|
||||
checksum = sha256_hash.hexdigest()
|
||||
|
||||
return size, checksum
|
||||
|
||||
|
||||
class ArtifactDatabase:
|
||||
"""SQLite database for artifact metadata"""
|
||||
|
||||
def __init__(self, db_path: str = "artifacts.db"):
|
||||
self.db_path = db_path
|
||||
self.init_database()
|
||||
|
||||
def init_database(self):
|
||||
"""Initialize the database schema"""
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
cursor = conn.cursor()
|
||||
|
||||
# Create artifacts table
|
||||
cursor.execute("""
|
||||
CREATE TABLE IF NOT EXISTS artifacts (
|
||||
id TEXT PRIMARY KEY,
|
||||
build_id TEXT NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
path TEXT NOT NULL,
|
||||
size INTEGER NOT NULL,
|
||||
checksum TEXT NOT NULL,
|
||||
artifact_type TEXT NOT NULL,
|
||||
created_at TEXT NOT NULL,
|
||||
metadata TEXT
|
||||
)
|
||||
""")
|
||||
|
||||
# Create builds table for reference
|
||||
cursor.execute("""
|
||||
CREATE TABLE IF NOT EXISTS builds (
|
||||
build_id TEXT PRIMARY KEY,
|
||||
manifest_path TEXT NOT NULL,
|
||||
status TEXT NOT NULL,
|
||||
created_at TEXT NOT NULL,
|
||||
completed_at TEXT
|
||||
)
|
||||
""")
|
||||
|
||||
# Create indexes
|
||||
cursor.execute("CREATE INDEX IF NOT EXISTS idx_artifacts_build_id ON artifacts(build_id)")
|
||||
cursor.execute("CREATE INDEX IF NOT EXISTS idx_artifacts_type ON artifacts(artifact_type)")
|
||||
cursor.execute("CREATE INDEX IF NOT EXISTS idx_artifacts_created ON artifacts(created_at)")
|
||||
|
||||
conn.commit()
|
||||
|
||||
def add_artifact(self, artifact: Artifact):
|
||||
"""Add an artifact to the database"""
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
cursor = conn.cursor()
|
||||
|
||||
cursor.execute("""
|
||||
INSERT OR REPLACE INTO artifacts
|
||||
(id, build_id, name, path, size, checksum, artifact_type, created_at, metadata)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""", (
|
||||
artifact.id,
|
||||
artifact.build_id,
|
||||
artifact.name,
|
||||
artifact.path,
|
||||
artifact.size,
|
||||
artifact.checksum,
|
||||
artifact.artifact_type,
|
||||
artifact.created_at.isoformat(),
|
||||
json.dumps(artifact.metadata) if artifact.metadata else None
|
||||
))
|
||||
|
||||
conn.commit()
|
||||
|
||||
def get_artifact(self, artifact_id: str) -> Optional[Artifact]:
|
||||
"""Get an artifact by ID"""
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
cursor = conn.cursor()
|
||||
|
||||
cursor.execute("""
|
||||
SELECT id, build_id, name, path, size, checksum, artifact_type, created_at, metadata
|
||||
FROM artifacts WHERE id = ?
|
||||
""", (artifact_id,))
|
||||
|
||||
row = cursor.fetchone()
|
||||
if row:
|
||||
return self._row_to_artifact(row)
|
||||
return None
|
||||
|
||||
def get_artifacts_by_build(self, build_id: str) -> List[Artifact]:
|
||||
"""Get all artifacts for a specific build"""
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
cursor = conn.cursor()
|
||||
|
||||
cursor.execute("""
|
||||
SELECT id, build_id, name, path, size, checksum, artifact_type, created_at, metadata
|
||||
FROM artifacts WHERE build_id = ? ORDER BY created_at DESC
|
||||
""", (build_id,))
|
||||
|
||||
return [self._row_to_artifact(row) for row in cursor.fetchall()]
|
||||
|
||||
def get_artifacts_by_type(self, artifact_type: str) -> List[Artifact]:
|
||||
"""Get all artifacts of a specific type"""
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
cursor = conn.cursor()
|
||||
|
||||
cursor.execute("""
|
||||
SELECT id, build_id, name, path, size, checksum, artifact_type, created_at, metadata
|
||||
FROM artifacts WHERE artifact_type = ? ORDER BY created_at DESC
|
||||
""", (artifact_type,))
|
||||
|
||||
return [self._row_to_artifact(row) for row in cursor.fetchall()]
|
||||
|
||||
def search_artifacts(self, query: str) -> List[Artifact]:
|
||||
"""Search artifacts by name or metadata"""
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
cursor = conn.cursor()
|
||||
|
||||
cursor.execute("""
|
||||
SELECT id, build_id, name, path, size, checksum, artifact_type, created_at, metadata
|
||||
FROM artifacts
|
||||
WHERE name LIKE ? OR metadata LIKE ?
|
||||
ORDER BY created_at DESC
|
||||
""", (f"%{query}%", f"%{query}%"))
|
||||
|
||||
return [self._row_to_artifact(row) for row in cursor.fetchall()]
|
||||
|
||||
def remove_artifact(self, artifact_id: str) -> bool:
|
||||
"""Remove an artifact from the database"""
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
cursor = conn.cursor()
|
||||
|
||||
cursor.execute("DELETE FROM artifacts WHERE id = ?", (artifact_id,))
|
||||
conn.commit()
|
||||
|
||||
return cursor.rowcount > 0
|
||||
|
||||
def _row_to_artifact(self, row: Tuple) -> Artifact:
|
||||
"""Convert database row to Artifact object"""
|
||||
metadata = json.loads(row[8]) if row[8] else None
|
||||
|
||||
return Artifact(
|
||||
id=row[0],
|
||||
build_id=row[1],
|
||||
name=row[2],
|
||||
path=row[3],
|
||||
size=row[4],
|
||||
checksum=row[5],
|
||||
artifact_type=row[6],
|
||||
created_at=datetime.fromisoformat(row[7]),
|
||||
metadata=metadata
|
||||
)
|
||||
|
||||
|
||||
class ArtifactManager:
|
||||
"""Main artifact management system"""
|
||||
|
||||
def __init__(self, base_dir: str = "artifacts"):
|
||||
self.storage = ArtifactStorage(base_dir)
|
||||
self.database = ArtifactDatabase()
|
||||
|
||||
def register_artifact(self, build_id: str, source_path: str, artifact_type: str,
|
||||
name: Optional[str] = None, metadata: Optional[Dict[str, Any]] = None) -> str:
|
||||
"""Register and store an artifact"""
|
||||
|
||||
# Generate artifact ID
|
||||
artifact_id = f"{artifact_type}-{build_id}-{datetime.now().strftime('%Y%m%d-%H%M%S')}"
|
||||
|
||||
# Use provided name or generate from source path
|
||||
if name is None:
|
||||
name = os.path.basename(source_path)
|
||||
|
||||
# Store the artifact
|
||||
artifact_path = self.storage.store_artifact(source_path, artifact_type, name)
|
||||
|
||||
# Get artifact info
|
||||
size, checksum = self.storage.get_artifact_info(artifact_path)
|
||||
|
||||
# Create artifact record
|
||||
artifact = Artifact(
|
||||
id=artifact_id,
|
||||
build_id=build_id,
|
||||
name=name,
|
||||
path=artifact_path,
|
||||
size=size,
|
||||
checksum=checksum,
|
||||
artifact_type=artifact_type,
|
||||
created_at=datetime.now(),
|
||||
metadata=metadata
|
||||
)
|
||||
|
||||
# Store in database
|
||||
self.database.add_artifact(artifact)
|
||||
|
||||
return artifact_id
|
||||
|
||||
def get_artifact(self, artifact_id: str) -> Optional[Artifact]:
|
||||
"""Get an artifact by ID"""
|
||||
return self.database.get_artifact(artifact_id)
|
||||
|
||||
def get_build_artifacts(self, build_id: str) -> List[Artifact]:
|
||||
"""Get all artifacts for a build"""
|
||||
return self.database.get_artifacts_by_build(build_id)
|
||||
|
||||
def get_artifacts_by_type(self, artifact_type: str) -> List[Artifact]:
|
||||
"""Get all artifacts of a specific type"""
|
||||
return self.database.get_artifacts_by_type(artifact_type)
|
||||
|
||||
def search_artifacts(self, query: str) -> List[Artifact]:
|
||||
"""Search artifacts"""
|
||||
return self.database.search_artifacts(query)
|
||||
|
||||
def remove_artifact(self, artifact_id: str) -> bool:
|
||||
"""Remove an artifact"""
|
||||
artifact = self.database.get_artifact(artifact_id)
|
||||
if artifact:
|
||||
# Remove from storage
|
||||
self.storage.remove_artifact(artifact.artifact_type, artifact.name)
|
||||
|
||||
# Remove from database
|
||||
return self.database.remove_artifact(artifact_id)
|
||||
return False
|
||||
|
||||
def get_storage_stats(self) -> Dict[str, Any]:
|
||||
"""Get storage statistics"""
|
||||
stats = {
|
||||
"total_artifacts": 0,
|
||||
"total_size": 0,
|
||||
"by_type": {},
|
||||
"storage_path": str(self.storage.base_dir)
|
||||
}
|
||||
|
||||
# Count artifacts by type
|
||||
for artifact_type in ["debian-packages", "ostree-commits", "images", "logs", "metadata"]:
|
||||
artifacts = self.database.get_artifacts_by_type(artifact_type)
|
||||
stats["by_type"][artifact_type] = {
|
||||
"count": len(artifacts),
|
||||
"size": sum(a.size for a in artifacts)
|
||||
}
|
||||
stats["total_artifacts"] += len(artifacts)
|
||||
stats["total_size"] += sum(a.size for a in artifacts)
|
||||
|
||||
return stats
|
||||
|
||||
def cleanup_old_artifacts(self, days_old: int = 30) -> int:
|
||||
"""Clean up artifacts older than specified days"""
|
||||
cutoff_date = datetime.now().timestamp() - (days_old * 24 * 60 * 60)
|
||||
removed_count = 0
|
||||
|
||||
# Get all artifacts
|
||||
all_artifacts = self.database.search_artifacts("")
|
||||
|
||||
for artifact in all_artifacts:
|
||||
if artifact.created_at.timestamp() < cutoff_date:
|
||||
if self.remove_artifact(artifact.id):
|
||||
removed_count += 1
|
||||
|
||||
return removed_count
|
||||
|
||||
|
||||
def main():
|
||||
"""Example usage of the artifact manager"""
|
||||
print("Debian Forge Artifact Manager")
|
||||
print("=" * 40)
|
||||
|
||||
# Create artifact manager
|
||||
manager = ArtifactManager()
|
||||
|
||||
# Example: Register a build artifact
|
||||
build_id = "build-000001"
|
||||
test_file = "test-debian-manifest.json"
|
||||
|
||||
if os.path.exists(test_file):
|
||||
print(f"Registering artifact from {test_file}")
|
||||
artifact_id = manager.register_artifact(
|
||||
build_id=build_id,
|
||||
source_path=test_file,
|
||||
artifact_type="metadata",
|
||||
name="debian-manifest.json",
|
||||
metadata={"description": "Debian atomic manifest", "version": "1.0"}
|
||||
)
|
||||
|
||||
print(f"Registered artifact: {artifact_id}")
|
||||
|
||||
# Get artifact info
|
||||
artifact = manager.get_artifact(artifact_id)
|
||||
if artifact:
|
||||
print(f"Artifact: {artifact.name}")
|
||||
print(f"Size: {artifact.size} bytes")
|
||||
print(f"Checksum: {artifact.checksum}")
|
||||
print(f"Type: {artifact.artifact_type}")
|
||||
|
||||
# Get build artifacts
|
||||
build_artifacts = manager.get_build_artifacts(build_id)
|
||||
print(f"Build {build_id} has {len(build_artifacts)} artifacts")
|
||||
|
||||
# Get storage stats
|
||||
stats = manager.get_storage_stats()
|
||||
print(f"Storage stats: {stats['total_artifacts']} artifacts, {stats['total_size']} bytes")
|
||||
|
||||
else:
|
||||
print(f"Test file {test_file} not found")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
BIN
artifacts.db
Normal file
BIN
artifacts.db
Normal file
Binary file not shown.
43
artifacts/metadata/debian-manifest.json
Executable file
43
artifacts/metadata/debian-manifest.json
Executable file
|
|
@ -0,0 +1,43 @@
|
|||
{
|
||||
"version": "2",
|
||||
"pipelines": [
|
||||
{
|
||||
"name": "debian-base",
|
||||
"build": "name:debian-base",
|
||||
"stages": [
|
||||
{
|
||||
"name": "org.osbuild.debootstrap",
|
||||
"options": {
|
||||
"suite": "bookworm",
|
||||
"mirror": "http://deb.debian.org/debian",
|
||||
"arch": "amd64",
|
||||
"variant": "minbase",
|
||||
"apt_proxy": "http://192.168.1.101:3142"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "org.osbuild.apt",
|
||||
"options": {
|
||||
"packages": ["systemd", "systemd-sysv", "dbus", "udev"],
|
||||
"recommends": false,
|
||||
"update": true,
|
||||
"apt_proxy": "http://192.168.1.101:3142"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "org.osbuild.ostree.commit",
|
||||
"options": {
|
||||
"repository": "debian-atomic",
|
||||
"branch": "debian/bookworm",
|
||||
"subject": "Debian Bookworm base system",
|
||||
"metadata": {
|
||||
"version": "12",
|
||||
"variant": "minbase",
|
||||
"arch": "amd64"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
3
build-logs/build-000001.log
Normal file
3
build-logs/build-000001.log
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
2025-08-22T18:43:44.007228 - Build build-000001: Build submitted - Priority: 5
|
||||
2025-08-22T18:43:44.008134 - Build build-000001: Build submitted - Priority: 5
|
||||
2025-08-22T18:43:45.009838 - Build build-000001: Build submitted - Priority: 5
|
||||
3
build-logs/build-000002.log
Normal file
3
build-logs/build-000002.log
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
2025-08-22T18:43:44.007550 - Build build-000002: Build submitted - Priority: 3
|
||||
2025-08-22T18:43:44.008287 - Build build-000002: Build submitted - Priority: 3
|
||||
2025-08-22T18:43:45.010066 - Build build-000002: Build submitted - Priority: 4
|
||||
2
build-logs/build-000003.log
Normal file
2
build-logs/build-000003.log
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
2025-08-22T18:43:44.007774 - Build build-000003: Build submitted - Priority: 7
|
||||
2025-08-22T18:43:45.010198 - Build build-000003: Build submitted - Priority: 3
|
||||
1
build-logs/build-000004.log
Normal file
1
build-logs/build-000004.log
Normal file
|
|
@ -0,0 +1 @@
|
|||
2025-08-22T18:43:45.010403 - Build build-000004: Build submitted - Priority: 2
|
||||
1
build-logs/build-000005.log
Normal file
1
build-logs/build-000005.log
Normal file
|
|
@ -0,0 +1 @@
|
|||
2025-08-22T18:43:45.010639 - Build build-000005: Build submitted - Priority: 1
|
||||
|
|
@ -1,273 +0,0 @@
|
|||
#!/usr/bin/python3
|
||||
"""
|
||||
Debian Forge Build Orchestrator
|
||||
|
||||
Basic build queue management and OSBuild pipeline execution for Debian atomic builds.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import time
|
||||
import threading
|
||||
import subprocess
|
||||
from datetime import datetime
|
||||
from typing import Dict, List, Optional, Any
|
||||
from dataclasses import dataclass, asdict
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class BuildStatus(Enum):
|
||||
PENDING = "pending"
|
||||
RUNNING = "running"
|
||||
COMPLETED = "completed"
|
||||
FAILED = "failed"
|
||||
CANCELLED = "cancelled"
|
||||
|
||||
|
||||
@dataclass
|
||||
class BuildRequest:
|
||||
id: str
|
||||
manifest_path: str
|
||||
priority: int
|
||||
status: BuildStatus
|
||||
submitted_at: datetime
|
||||
started_at: Optional[datetime] = None
|
||||
completed_at: Optional[datetime] = None
|
||||
error_message: Optional[str] = None
|
||||
output_dir: Optional[str] = None
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
|
||||
|
||||
class BuildQueue:
|
||||
"""Simple build queue with priority-based scheduling"""
|
||||
|
||||
def __init__(self):
|
||||
self.queue: List[BuildRequest] = []
|
||||
self.running_builds: Dict[str, BuildRequest] = {}
|
||||
self.completed_builds: Dict[str, BuildRequest] = {}
|
||||
self.lock = threading.Lock()
|
||||
self.next_id = 1
|
||||
|
||||
def submit_build(self, manifest_path: str, priority: int = 5, metadata: Optional[Dict[str, Any]] = None) -> str:
|
||||
"""Submit a new build request"""
|
||||
with self.lock:
|
||||
build_id = f"build-{self.next_id:06d}"
|
||||
self.next_id += 1
|
||||
|
||||
request = BuildRequest(
|
||||
id=build_id,
|
||||
manifest_path=manifest_path,
|
||||
priority=priority,
|
||||
status=BuildStatus.PENDING,
|
||||
submitted_at=datetime.now(),
|
||||
metadata=metadata or {}
|
||||
)
|
||||
|
||||
self.queue.append(request)
|
||||
# Sort by priority (higher priority first)
|
||||
self.queue.sort(key=lambda x: x.priority, reverse=True)
|
||||
|
||||
print(f"Submitted build {build_id} with priority {priority}")
|
||||
return build_id
|
||||
|
||||
def get_next_build(self) -> Optional[BuildRequest]:
|
||||
"""Get the next build request from the queue"""
|
||||
with self.lock:
|
||||
if not self.queue:
|
||||
return None
|
||||
|
||||
request = self.queue.pop(0)
|
||||
request.status = BuildStatus.RUNNING
|
||||
request.started_at = datetime.now()
|
||||
self.running_builds[request.id] = request
|
||||
|
||||
return request
|
||||
|
||||
def mark_completed(self, build_id: str, output_dir: str, success: bool = True, error_message: Optional[str] = None):
|
||||
"""Mark a build as completed"""
|
||||
with self.lock:
|
||||
if build_id not in self.running_builds:
|
||||
return
|
||||
|
||||
request = self.running_builds.pop(build_id)
|
||||
request.completed_at = datetime.now()
|
||||
request.output_dir = output_dir
|
||||
|
||||
if success:
|
||||
request.status = BuildStatus.COMPLETED
|
||||
else:
|
||||
request.status = BuildStatus.FAILED
|
||||
request.error_message = error_message
|
||||
|
||||
self.completed_builds[build_id] = request
|
||||
print(f"Build {build_id} completed with status: {request.status.value}")
|
||||
|
||||
def get_status(self, build_id: str) -> Optional[BuildRequest]:
|
||||
"""Get the status of a specific build"""
|
||||
with self.lock:
|
||||
# Check all queues
|
||||
for request in self.queue:
|
||||
if request.id == build_id:
|
||||
return request
|
||||
|
||||
if build_id in self.running_builds:
|
||||
return self.running_builds[build_id]
|
||||
|
||||
if build_id in self.completed_builds:
|
||||
return self.completed_builds[build_id]
|
||||
|
||||
return None
|
||||
|
||||
def list_builds(self) -> Dict[str, List[BuildRequest]]:
|
||||
"""List all builds by status"""
|
||||
with self.lock:
|
||||
return {
|
||||
"pending": self.queue.copy(),
|
||||
"running": list(self.running_builds.values()),
|
||||
"completed": list(self.completed_builds.values())
|
||||
}
|
||||
|
||||
|
||||
class OSBuildExecutor:
|
||||
"""Execute OSBuild pipelines"""
|
||||
|
||||
def __init__(self, osbuild_path: str = "python3 -m osbuild"):
|
||||
self.osbuild_path = osbuild_path
|
||||
|
||||
def execute_pipeline(self, manifest_path: str, output_dir: str) -> tuple[bool, Optional[str]]:
|
||||
"""Execute an OSBuild pipeline"""
|
||||
|
||||
# Create output directory
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
|
||||
# Run OSBuild
|
||||
cmd = f"{self.osbuild_path} --libdir . --output-dir {output_dir} {manifest_path}"
|
||||
|
||||
print(f"Executing OSBuild: {cmd}")
|
||||
|
||||
try:
|
||||
result = subprocess.run(
|
||||
cmd.split(),
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd=os.getcwd()
|
||||
)
|
||||
|
||||
if result.returncode == 0:
|
||||
print(f"OSBuild pipeline completed successfully")
|
||||
return True, None
|
||||
else:
|
||||
error_msg = f"OSBuild failed with return code {result.returncode}"
|
||||
if result.stderr:
|
||||
error_msg += f"\nStderr: {result.stderr}"
|
||||
return False, error_msg
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Failed to execute OSBuild: {str(e)}"
|
||||
return False, error_msg
|
||||
|
||||
|
||||
class BuildOrchestrator:
|
||||
"""Main build orchestration system"""
|
||||
|
||||
def __init__(self, osbuild_path: str = "python3 -m osbuild"):
|
||||
self.queue = BuildQueue()
|
||||
self.executor = OSBuildExecutor(osbuild_path)
|
||||
self.running = False
|
||||
self.worker_thread = None
|
||||
|
||||
def start(self):
|
||||
"""Start the build orchestrator"""
|
||||
if self.running:
|
||||
return
|
||||
|
||||
self.running = True
|
||||
self.worker_thread = threading.Thread(target=self._worker_loop, daemon=True)
|
||||
self.worker_thread.start()
|
||||
print("Build orchestrator started")
|
||||
|
||||
def stop(self):
|
||||
"""Stop the build orchestrator"""
|
||||
self.running = False
|
||||
if self.worker_thread:
|
||||
self.worker_thread.join()
|
||||
print("Build orchestrator stopped")
|
||||
|
||||
def _worker_loop(self):
|
||||
"""Main worker loop for processing builds"""
|
||||
while self.running:
|
||||
# Get next build
|
||||
request = self.queue.get_next_build()
|
||||
if not request:
|
||||
time.sleep(1)
|
||||
continue
|
||||
|
||||
print(f"Processing build {request.id}")
|
||||
|
||||
# Create output directory
|
||||
output_dir = f"builds/{request.id}"
|
||||
|
||||
# Execute build
|
||||
success, error_message = self.executor.execute_pipeline(
|
||||
request.manifest_path, output_dir
|
||||
)
|
||||
|
||||
# Mark build as completed
|
||||
self.queue.mark_completed(
|
||||
request.id, output_dir, success, error_message
|
||||
)
|
||||
|
||||
def submit_build(self, manifest_path: str, priority: int = 5, metadata: Optional[Dict[str, Any]] = None) -> str:
|
||||
"""Submit a new build request"""
|
||||
return self.queue.submit_build(manifest_path, priority, metadata)
|
||||
|
||||
def get_build_status(self, build_id: str) -> Optional[BuildRequest]:
|
||||
"""Get the status of a specific build"""
|
||||
return self.queue.get_status(build_id)
|
||||
|
||||
def list_builds(self) -> Dict[str, List[BuildRequest]]:
|
||||
"""List all builds"""
|
||||
return self.queue.list_builds()
|
||||
|
||||
|
||||
def main():
|
||||
"""Main function for command-line usage"""
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: python build-orchestrator.py <manifest_path> [priority]")
|
||||
sys.exit(1)
|
||||
|
||||
manifest_path = sys.argv[1]
|
||||
priority = int(sys.argv[2]) if len(sys.argv) > 2 else 5
|
||||
|
||||
# Create orchestrator
|
||||
orchestrator = BuildOrchestrator()
|
||||
|
||||
# Submit build
|
||||
build_id = orchestrator.submit_build(manifest_path, priority)
|
||||
print(f"Submitted build {build_id}")
|
||||
|
||||
# Start orchestrator
|
||||
orchestrator.start()
|
||||
|
||||
try:
|
||||
# Monitor build
|
||||
while True:
|
||||
status = orchestrator.get_build_status(build_id)
|
||||
if status:
|
||||
print(f"Build {build_id}: {status.status.value}")
|
||||
|
||||
if status.status in [BuildStatus.COMPLETED, BuildStatus.FAILED]:
|
||||
if status.status == BuildStatus.FAILED:
|
||||
print(f"Build failed: {status.error_message}")
|
||||
break
|
||||
|
||||
time.sleep(5)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
print("\nStopping orchestrator...")
|
||||
orchestrator.stop()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
559
build_orchestrator.py
Executable file
559
build_orchestrator.py
Executable file
|
|
@ -0,0 +1,559 @@
|
|||
#!/usr/bin/python3
|
||||
"""
|
||||
Debian Forge Build Orchestrator
|
||||
|
||||
Enhanced build queue management and OSBuild pipeline execution for Debian atomic builds.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import time
|
||||
import threading
|
||||
import subprocess
|
||||
import psutil
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from typing import Dict, List, Optional, Any
|
||||
from dataclasses import dataclass, asdict
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class BuildStatus(Enum):
|
||||
PENDING = "pending"
|
||||
RUNNING = "running"
|
||||
COMPLETED = "completed"
|
||||
FAILED = "failed"
|
||||
CANCELLED = "cancelled"
|
||||
QUEUED = "queued"
|
||||
PREPARING = "preparing"
|
||||
BUILDING = "building"
|
||||
FINALIZING = "finalizing"
|
||||
|
||||
|
||||
@dataclass
|
||||
class BuildRequest:
|
||||
id: str
|
||||
manifest_path: str
|
||||
priority: int
|
||||
status: BuildStatus
|
||||
submitted_at: datetime
|
||||
started_at: Optional[datetime] = None
|
||||
completed_at: Optional[datetime] = None
|
||||
error_message: Optional[str] = None
|
||||
output_dir: Optional[str] = None
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
resource_requirements: Optional[Dict[str, Any]] = None
|
||||
logs: List[str] = None
|
||||
progress: float = 0.0 # 0.0 to 1.0
|
||||
|
||||
def __post_init__(self):
|
||||
if self.logs is None:
|
||||
self.logs = []
|
||||
|
||||
def add_log(self, message: str):
|
||||
"""Add a log message with timestamp"""
|
||||
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
log_entry = f"[{timestamp}] {message}"
|
||||
self.logs.append(log_entry)
|
||||
|
||||
def update_progress(self, progress: float):
|
||||
"""Update build progress (0.0 to 1.0)"""
|
||||
self.progress = max(0.0, min(1.0, progress))
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert to dictionary for serialization"""
|
||||
data = asdict(self)
|
||||
data['status'] = self.status.value
|
||||
data['submitted_at'] = self.submitted_at.isoformat()
|
||||
if self.started_at:
|
||||
data['started_at'] = self.started_at.isoformat()
|
||||
if self.completed_at:
|
||||
data['completed_at'] = self.completed_at.isoformat()
|
||||
return data
|
||||
|
||||
|
||||
class BuildStateMachine:
|
||||
"""State machine for build lifecycle management"""
|
||||
|
||||
def __init__(self):
|
||||
self.transitions = {
|
||||
BuildStatus.PENDING: [BuildStatus.QUEUED, BuildStatus.CANCELLED],
|
||||
BuildStatus.QUEUED: [BuildStatus.PREPARING, BuildStatus.CANCELLED],
|
||||
BuildStatus.PREPARING: [BuildStatus.BUILDING, BuildStatus.FAILED],
|
||||
BuildStatus.BUILDING: [BuildStatus.FINALIZING, BuildStatus.FAILED],
|
||||
BuildStatus.FINALIZING: [BuildStatus.COMPLETED, BuildStatus.FAILED],
|
||||
BuildStatus.RUNNING: [BuildStatus.COMPLETED, BuildStatus.FAILED, BuildStatus.CANCELLED],
|
||||
BuildStatus.COMPLETED: [],
|
||||
BuildStatus.FAILED: [],
|
||||
BuildStatus.CANCELLED: []
|
||||
}
|
||||
|
||||
def can_transition(self, from_status: BuildStatus, to_status: BuildStatus) -> bool:
|
||||
"""Check if a status transition is valid"""
|
||||
return to_status in self.transitions.get(from_status, [])
|
||||
|
||||
def get_valid_transitions(self, current_status: BuildStatus) -> List[BuildStatus]:
|
||||
"""Get all valid transitions from current status"""
|
||||
return self.transitions.get(current_status, [])
|
||||
|
||||
|
||||
class BuildLogger:
|
||||
"""Build logging and monitoring system"""
|
||||
|
||||
def __init__(self, log_dir: str = "build-logs"):
|
||||
self.log_dir = log_dir
|
||||
os.makedirs(log_dir, exist_ok=True)
|
||||
|
||||
# Set up logging
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
||||
)
|
||||
self.logger = logging.getLogger('debian-forge-builds')
|
||||
|
||||
def log_build_event(self, build_id: str, event: str, details: Optional[str] = None):
|
||||
"""Log a build event"""
|
||||
message = f"Build {build_id}: {event}"
|
||||
if details:
|
||||
message += f" - {details}"
|
||||
|
||||
self.logger.info(message)
|
||||
|
||||
# Write to build-specific log file
|
||||
log_file = os.path.join(self.log_dir, f"{build_id}.log")
|
||||
with open(log_file, 'a') as f:
|
||||
timestamp = datetime.now().isoformat()
|
||||
f.write(f"{timestamp} - {message}\n")
|
||||
|
||||
def get_build_logs(self, build_id: str) -> List[str]:
|
||||
"""Get logs for a specific build"""
|
||||
log_file = os.path.join(self.log_dir, f"{build_id}.log")
|
||||
if os.path.exists(log_file):
|
||||
with open(log_file, 'r') as f:
|
||||
return f.readlines()
|
||||
return []
|
||||
|
||||
def stream_build_logs(self, build_id: str):
|
||||
"""Stream logs for a build in real-time"""
|
||||
log_file = os.path.join(self.log_dir, f"{build_id}.log")
|
||||
if os.path.exists(log_file):
|
||||
with open(log_file, 'r') as f:
|
||||
for line in f:
|
||||
yield line.strip()
|
||||
|
||||
|
||||
class ResourceManager:
|
||||
"""Manage system resources for builds"""
|
||||
|
||||
def __init__(self):
|
||||
self.max_cpu_percent = 80 # Maximum CPU usage per build
|
||||
self.max_memory_gb = 4 # Maximum memory per build (GB)
|
||||
self.max_storage_gb = 10 # Maximum storage per build (GB)
|
||||
self.reserved_cpu_percent = 20 # Reserved CPU for system
|
||||
self.reserved_memory_gb = 2 # Reserved memory for system
|
||||
self.allocated_resources: Dict[str, Dict[str, Any]] = {}
|
||||
|
||||
def get_available_resources(self) -> Dict[str, Any]:
|
||||
"""Get current available system resources"""
|
||||
cpu_percent = psutil.cpu_percent(interval=1)
|
||||
memory = psutil.virtual_memory()
|
||||
disk = psutil.disk_usage('/')
|
||||
|
||||
# Calculate allocated resources
|
||||
allocated_cpu = sum(req.get('cpu_percent', 0) for req in self.allocated_resources.values())
|
||||
allocated_memory = sum(req.get('memory_gb', 0) for req in self.allocated_resources.values())
|
||||
allocated_storage = sum(req.get('storage_gb', 0) for req in self.allocated_resources.values())
|
||||
|
||||
return {
|
||||
"cpu_percent": max(0, 100 - cpu_percent - allocated_cpu),
|
||||
"memory_gb": max(0, memory.available / (1024**3) - allocated_memory),
|
||||
"storage_gb": max(0, disk.free / (1024**3) - allocated_storage)
|
||||
}
|
||||
|
||||
def can_allocate_resources(self, requirements: Dict[str, Any]) -> bool:
|
||||
"""Check if resources can be allocated for a build"""
|
||||
available = self.get_available_resources()
|
||||
|
||||
cpu_needed = requirements.get("cpu_percent", self.max_cpu_percent)
|
||||
memory_needed = requirements.get("memory_gb", self.max_memory_gb)
|
||||
storage_needed = requirements.get("storage_gb", self.max_storage_gb)
|
||||
|
||||
return (available["cpu_percent"] >= cpu_needed and
|
||||
available["memory_gb"] >= memory_needed and
|
||||
available["storage_gb"] >= storage_needed)
|
||||
|
||||
def allocate_resources(self, build_id: str, requirements: Dict[str, Any]) -> bool:
|
||||
"""Allocate resources for a build"""
|
||||
if self.can_allocate_resources(requirements):
|
||||
self.allocated_resources[build_id] = requirements.copy()
|
||||
return True
|
||||
return False
|
||||
|
||||
def release_resources(self, build_id: str):
|
||||
"""Release resources after build completion"""
|
||||
if build_id in self.allocated_resources:
|
||||
del self.allocated_resources[build_id]
|
||||
|
||||
|
||||
class BuildQueue:
|
||||
"""Simple build queue with priority-based scheduling"""
|
||||
|
||||
def __init__(self):
|
||||
self.queue: List[BuildRequest] = []
|
||||
self.running_builds: Dict[str, BuildRequest] = {}
|
||||
self.completed_builds: Dict[str, BuildRequest] = {}
|
||||
self.lock = threading.Lock()
|
||||
self.next_id = 1
|
||||
self.resource_manager = ResourceManager()
|
||||
self.state_machine = BuildStateMachine()
|
||||
self.logger = BuildLogger()
|
||||
|
||||
def submit_build(self, manifest_path: str, priority: int = 5,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
resource_requirements: Optional[Dict[str, Any]] = None) -> str:
|
||||
"""Submit a new build request"""
|
||||
with self.lock:
|
||||
build_id = f"build-{self.next_id:06d}"
|
||||
self.next_id += 1
|
||||
|
||||
request = BuildRequest(
|
||||
id=build_id,
|
||||
manifest_path=manifest_path,
|
||||
priority=priority,
|
||||
status=BuildStatus.PENDING,
|
||||
submitted_at=datetime.now(),
|
||||
metadata=metadata or {},
|
||||
resource_requirements=resource_requirements or {}
|
||||
)
|
||||
|
||||
self.queue.append(request)
|
||||
# Sort by priority (higher priority first)
|
||||
self.queue.sort(key=lambda x: x.priority, reverse=True)
|
||||
|
||||
# Log submission
|
||||
self.logger.log_build_event(build_id, "Build submitted", f"Priority: {priority}")
|
||||
request.add_log(f"Build submitted with priority {priority}")
|
||||
|
||||
print(f"Submitted build {build_id} with priority {priority}")
|
||||
return build_id
|
||||
|
||||
def get_next_build(self) -> Optional[BuildRequest]:
|
||||
"""Get the next build request from the queue that can be allocated resources"""
|
||||
with self.lock:
|
||||
if not self.queue:
|
||||
return None
|
||||
|
||||
# Find a build that can have resources allocated
|
||||
for i, request in enumerate(self.queue):
|
||||
if self.resource_manager.can_allocate_resources(request.resource_requirements):
|
||||
# Remove from queue and mark as running
|
||||
request = self.queue.pop(i)
|
||||
|
||||
# Transition to QUEUED status
|
||||
if self.state_machine.can_transition(request.status, BuildStatus.QUEUED):
|
||||
request.status = BuildStatus.QUEUED
|
||||
request.add_log("Build queued for execution")
|
||||
|
||||
# Transition to PREPARING status
|
||||
if self.state_machine.can_transition(request.status, BuildStatus.PREPARING):
|
||||
request.status = BuildStatus.PREPARING
|
||||
request.add_log("Build preparation started")
|
||||
|
||||
request.started_at = datetime.now()
|
||||
self.running_builds[request.id] = request
|
||||
|
||||
# Allocate resources
|
||||
self.resource_manager.allocate_resources(request.id, request.resource_requirements)
|
||||
|
||||
# Log status change
|
||||
self.logger.log_build_event(request.id, "Build started", "Resources allocated")
|
||||
|
||||
return request
|
||||
|
||||
return None
|
||||
|
||||
def update_build_status(self, build_id: str, new_status: BuildStatus,
|
||||
progress: Optional[float] = None, message: Optional[str] = None):
|
||||
"""Update build status with validation"""
|
||||
with self.lock:
|
||||
if build_id in self.running_builds:
|
||||
request = self.running_builds[build_id]
|
||||
|
||||
if self.state_machine.can_transition(request.status, new_status):
|
||||
old_status = request.status
|
||||
request.status = new_status
|
||||
|
||||
if progress is not None:
|
||||
request.update_progress(progress)
|
||||
|
||||
if message:
|
||||
request.add_log(message)
|
||||
|
||||
# Log status change
|
||||
self.logger.log_build_event(build_id, f"Status changed: {old_status.value} -> {new_status.value}", message)
|
||||
|
||||
# Handle completion
|
||||
if new_status in [BuildStatus.COMPLETED, BuildStatus.FAILED, BuildStatus.CANCELLED]:
|
||||
self._finalize_build(build_id, new_status)
|
||||
else:
|
||||
print(f"Invalid status transition: {request.status.value} -> {new_status.value}")
|
||||
|
||||
def _finalize_build(self, build_id: str, final_status: BuildStatus):
|
||||
"""Finalize a completed build"""
|
||||
if build_id in self.running_builds:
|
||||
request = self.running_builds.pop(build_id)
|
||||
request.completed_at = datetime.now()
|
||||
request.update_progress(1.0 if final_status == BuildStatus.COMPLETED else 0.0)
|
||||
|
||||
# Release resources
|
||||
self.resource_manager.release_resources(build_id)
|
||||
|
||||
self.completed_builds[build_id] = request
|
||||
|
||||
# Log completion
|
||||
self.logger.log_build_event(build_id, "Build completed", f"Final status: {final_status.value}")
|
||||
print(f"Build {build_id} completed with status: {final_status.value}")
|
||||
|
||||
def mark_completed(self, build_id: str, output_dir: str, success: bool = True, error_message: Optional[str] = None):
|
||||
"""Mark a build as completed"""
|
||||
if success:
|
||||
self.update_build_status(build_id, BuildStatus.COMPLETED, 1.0, "Build completed successfully")
|
||||
else:
|
||||
self.update_build_status(build_id, BuildStatus.FAILED, 0.0, f"Build failed: {error_message}")
|
||||
|
||||
def cancel_build(self, build_id: str) -> bool:
|
||||
"""Cancel a pending or running build"""
|
||||
with self.lock:
|
||||
# Check if build is in queue
|
||||
for i, request in enumerate(self.queue):
|
||||
if request.id == build_id:
|
||||
request = self.queue.pop(i)
|
||||
request.status = BuildStatus.CANCELLED
|
||||
request.add_log("Build cancelled by user")
|
||||
self.completed_builds[build_id] = request
|
||||
self.logger.log_build_event(build_id, "Build cancelled", "User requested cancellation")
|
||||
return True
|
||||
|
||||
# Check if build is running
|
||||
if build_id in self.running_builds:
|
||||
request = self.running_builds[build_id]
|
||||
if self.state_machine.can_transition(request.status, BuildStatus.CANCELLED):
|
||||
self.update_build_status(build_id, BuildStatus.CANCELLED, 0.0, "Build cancelled by user")
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def get_status(self, build_id: str) -> Optional[BuildRequest]:
|
||||
"""Get the status of a specific build"""
|
||||
with self.lock:
|
||||
# Check all queues
|
||||
for request in self.queue:
|
||||
if request.id == build_id:
|
||||
return request
|
||||
|
||||
if build_id in self.running_builds:
|
||||
return self.running_builds[build_id]
|
||||
|
||||
if build_id in self.completed_builds:
|
||||
return self.completed_builds[build_id]
|
||||
|
||||
return None
|
||||
|
||||
def list_builds(self) -> Dict[str, List[BuildRequest]]:
|
||||
"""List all builds by status"""
|
||||
with self.lock:
|
||||
return {
|
||||
"pending": self.queue.copy(),
|
||||
"running": list(self.running_builds.values()),
|
||||
"completed": list(self.completed_builds.values())
|
||||
}
|
||||
|
||||
def get_resource_status(self) -> Dict[str, Any]:
|
||||
"""Get current resource status"""
|
||||
available = self.resource_manager.get_available_resources()
|
||||
running_count = len(self.running_builds)
|
||||
|
||||
return {
|
||||
"available_resources": available,
|
||||
"running_builds": running_count,
|
||||
"queue_length": len(self.queue),
|
||||
"allocated_resources": len(self.resource_manager.allocated_resources)
|
||||
}
|
||||
|
||||
|
||||
class OSBuildExecutor:
|
||||
"""Execute OSBuild pipelines"""
|
||||
|
||||
def __init__(self, osbuild_path: str = "python3 -m osbuild"):
|
||||
self.osbuild_path = osbuild_path
|
||||
|
||||
def execute_pipeline(self, manifest_path: str, output_dir: str) -> tuple[bool, Optional[str]]:
|
||||
"""Execute an OSBuild pipeline"""
|
||||
|
||||
# Create output directory
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
|
||||
# Run OSBuild
|
||||
cmd = f"{self.osbuild_path} --libdir . --output-dir {output_dir} {manifest_path}"
|
||||
|
||||
print(f"Executing OSBuild: {cmd}")
|
||||
|
||||
try:
|
||||
result = subprocess.run(
|
||||
cmd.split(),
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd=os.getcwd()
|
||||
)
|
||||
|
||||
if result.returncode == 0:
|
||||
print(f"OSBuild pipeline completed successfully")
|
||||
return True, None
|
||||
else:
|
||||
error_msg = f"OSBuild failed with return code {result.returncode}"
|
||||
if result.stderr:
|
||||
error_msg += f"\nStderr: {result.stderr}"
|
||||
return False, error_msg
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Failed to execute OSBuild: {str(e)}"
|
||||
return False, error_msg
|
||||
|
||||
|
||||
class BuildOrchestrator:
|
||||
"""Main build orchestration system"""
|
||||
|
||||
def __init__(self, osbuild_path: str = "python3 -m osbuild"):
|
||||
self.queue = BuildQueue()
|
||||
self.executor = OSBuildExecutor(osbuild_path)
|
||||
self.running = False
|
||||
self.worker_thread = None
|
||||
|
||||
def start(self):
|
||||
"""Start the build orchestrator"""
|
||||
if self.running:
|
||||
return
|
||||
|
||||
self.running = True
|
||||
self.worker_thread = threading.Thread(target=self._worker_loop, daemon=True)
|
||||
self.worker_thread.start()
|
||||
print("Build orchestrator started")
|
||||
|
||||
def stop(self):
|
||||
"""Stop the build orchestrator"""
|
||||
self.running = False
|
||||
if self.worker_thread:
|
||||
self.worker_thread.join()
|
||||
print("Build orchestrator stopped")
|
||||
|
||||
def _worker_loop(self):
|
||||
"""Main worker loop for processing builds"""
|
||||
while self.running:
|
||||
# Get next build
|
||||
request = self.queue.get_next_build()
|
||||
if not request:
|
||||
time.sleep(1)
|
||||
continue
|
||||
|
||||
print(f"Processing build {request.id}")
|
||||
|
||||
# Update status to BUILDING
|
||||
self.queue.update_build_status(request.id, BuildStatus.BUILDING, 0.1, "OSBuild pipeline started")
|
||||
|
||||
# Create output directory
|
||||
output_dir = f"builds/{request.id}"
|
||||
|
||||
# Execute build
|
||||
success, error_message = self.executor.execute_pipeline(
|
||||
request.manifest_path, output_dir
|
||||
)
|
||||
|
||||
# Update progress and finalize
|
||||
if success:
|
||||
self.queue.update_build_status(request.id, BuildStatus.FINALIZING, 0.9, "OSBuild pipeline completed")
|
||||
self.queue.mark_completed(request.id, output_dir, True)
|
||||
else:
|
||||
self.queue.mark_completed(request.id, output_dir, False, error_message)
|
||||
|
||||
def submit_build(self, manifest_path: str, priority: int = 5,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
resource_requirements: Optional[Dict[str, Any]] = None) -> str:
|
||||
"""Submit a new build request"""
|
||||
return self.queue.submit_build(manifest_path, priority, metadata, resource_requirements)
|
||||
|
||||
def get_build_status(self, build_id: str) -> Optional[BuildRequest]:
|
||||
"""Get the status of a specific build"""
|
||||
return self.queue.get_status(build_id)
|
||||
|
||||
def list_builds(self) -> Dict[str, List[BuildRequest]]:
|
||||
"""List all builds"""
|
||||
return self.queue.list_builds()
|
||||
|
||||
def get_resource_status(self) -> Dict[str, Any]:
|
||||
"""Get current resource status"""
|
||||
return self.queue.get_resource_status()
|
||||
|
||||
def cancel_build(self, build_id: str) -> bool:
|
||||
"""Cancel a build"""
|
||||
return self.queue.cancel_build(build_id)
|
||||
|
||||
def get_build_logs(self, build_id: str) -> List[str]:
|
||||
"""Get logs for a specific build"""
|
||||
return self.queue.logger.get_build_logs(build_id)
|
||||
|
||||
|
||||
def main():
|
||||
"""Main function for command-line usage"""
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: python build_orchestrator.py <manifest_path> [priority] [cpu_percent] [memory_gb] [storage_gb]")
|
||||
sys.exit(1)
|
||||
|
||||
manifest_path = sys.argv[1]
|
||||
priority = int(sys.argv[2]) if len(sys.argv) > 2 else 5
|
||||
cpu_percent = int(sys.argv[3]) if len(sys.argv) > 3 else 50
|
||||
memory_gb = int(sys.argv[4]) if len(sys.argv) > 4 else 2
|
||||
storage_gb = int(sys.argv[5]) if len(sys.argv) > 5 else 5
|
||||
|
||||
# Create orchestrator
|
||||
orchestrator = BuildOrchestrator()
|
||||
|
||||
# Submit build with resource requirements
|
||||
resource_reqs = {
|
||||
"cpu_percent": cpu_percent,
|
||||
"memory_gb": memory_gb,
|
||||
"storage_gb": storage_gb
|
||||
}
|
||||
|
||||
build_id = orchestrator.submit_build(manifest_path, priority, resource_requirements=resource_reqs)
|
||||
print(f"Submitted build {build_id} with resource requirements: {resource_reqs}")
|
||||
|
||||
# Start orchestrator
|
||||
orchestrator.start()
|
||||
|
||||
try:
|
||||
# Monitor build
|
||||
while True:
|
||||
status = orchestrator.get_build_status(build_id)
|
||||
if status:
|
||||
print(f"Build {build_id}: {status.status.value} (Progress: {status.progress:.1%})")
|
||||
|
||||
if status.status in [BuildStatus.COMPLETED, BuildStatus.FAILED, BuildStatus.CANCELLED]:
|
||||
if status.status == BuildStatus.FAILED:
|
||||
print(f"Build failed: {status.error_message}")
|
||||
break
|
||||
|
||||
# Show resource status
|
||||
resource_status = orchestrator.get_resource_status()
|
||||
print(f"Resources: CPU {resource_status['available_resources']['cpu_percent']:.1f}% free, "
|
||||
f"Memory {resource_status['available_resources']['memory_gb']:.1f}GB free")
|
||||
|
||||
time.sleep(5)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
print("\nStopping orchestrator...")
|
||||
orchestrator.stop()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
241
test-apt-stage.py
Normal file
241
test-apt-stage.py
Normal file
|
|
@ -0,0 +1,241 @@
|
|||
#!/usr/bin/python3
|
||||
"""
|
||||
Test script for the apt stage in Debian Forge
|
||||
|
||||
This script tests the apt stage with apt-cacher-ng integration.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
import shutil
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def test_apt_proxy_config():
|
||||
"""Test apt proxy configuration"""
|
||||
print("Testing apt proxy configuration...")
|
||||
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
# Create minimal structure
|
||||
apt_conf_dir = os.path.join(temp_dir, "etc/apt/apt.conf.d")
|
||||
os.makedirs(apt_conf_dir, exist_ok=True)
|
||||
|
||||
# Test proxy configuration
|
||||
proxy_config = """Acquire::http::Proxy "192.168.1.101:3142";
|
||||
Acquire::https::Proxy "192.168.1.101:3142";
|
||||
"""
|
||||
|
||||
proxy_file = os.path.join(apt_conf_dir, "99proxy")
|
||||
with open(proxy_file, "w") as f:
|
||||
f.write(proxy_config)
|
||||
|
||||
# Verify proxy configuration
|
||||
if os.path.exists(proxy_file):
|
||||
with open(proxy_file, "r") as f:
|
||||
content = f.read()
|
||||
if "192.168.1.101:3142" in content:
|
||||
print("✅ Apt proxy configuration test passed")
|
||||
return True
|
||||
else:
|
||||
print("❌ Proxy configuration content mismatch")
|
||||
return False
|
||||
else:
|
||||
print("❌ Proxy configuration file not created")
|
||||
return False
|
||||
|
||||
|
||||
def test_debootstrap_availability():
|
||||
"""Test if debootstrap is available or can be installed"""
|
||||
print("Testing debootstrap availability...")
|
||||
|
||||
try:
|
||||
# Check if debootstrap is available
|
||||
result = subprocess.run(["which", "debootstrap"],
|
||||
capture_output=True, text=True)
|
||||
|
||||
if result.returncode == 0:
|
||||
print("✅ debootstrap is available")
|
||||
|
||||
# Check debootstrap version
|
||||
version_result = subprocess.run(["debootstrap", "--version"],
|
||||
capture_output=True, text=True)
|
||||
if version_result.returncode == 0:
|
||||
print(f"✅ debootstrap version: {version_result.stdout.strip()}")
|
||||
return True
|
||||
else:
|
||||
print("❌ Failed to get debootstrap version")
|
||||
return False
|
||||
else:
|
||||
print("⚠️ debootstrap not found in PATH")
|
||||
print(" This is expected on non-Debian systems")
|
||||
print(" debootstrap will be installed during build environment setup")
|
||||
return True # Not a failure, just not available yet
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ debootstrap test failed: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def test_ostree_integration():
|
||||
"""Test OSTree integration"""
|
||||
print("Testing OSTree integration...")
|
||||
|
||||
try:
|
||||
# Check if ostree is available
|
||||
result = subprocess.run(["which", "ostree"],
|
||||
capture_output=True, text=True)
|
||||
|
||||
if result.returncode == 0:
|
||||
print("✅ ostree is available")
|
||||
|
||||
# Check ostree version
|
||||
version_result = subprocess.run(["ostree", "--version"],
|
||||
capture_output=True, text=True)
|
||||
if version_result.returncode == 0:
|
||||
print(f"✅ ostree version: {version_result.stdout.strip()}")
|
||||
return True
|
||||
else:
|
||||
print("❌ Failed to get ostree version")
|
||||
return False
|
||||
else:
|
||||
print("❌ ostree not found in PATH")
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ ostree test failed: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def test_apt_cacher_ng_connectivity():
|
||||
"""Test connectivity to apt-cacher-ng"""
|
||||
print("Testing apt-cacher-ng connectivity...")
|
||||
|
||||
try:
|
||||
# Test if we can connect to the apt-cacher-ng server
|
||||
import socket
|
||||
|
||||
host, port = "192.168.1.101", 3142
|
||||
|
||||
# Create a socket and try to connect
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
sock.settimeout(5) # 5 second timeout
|
||||
|
||||
result = sock.connect_ex((host, port))
|
||||
sock.close()
|
||||
|
||||
if result == 0:
|
||||
print(f"✅ Successfully connected to apt-cacher-ng at {host}:{port}")
|
||||
return True
|
||||
else:
|
||||
print(f"⚠️ Cannot connect to apt-cacher-ng at {host}:{port}")
|
||||
print(" This is expected if apt-cacher-ng is not running")
|
||||
print(" Use setup-apt-cacher.sh to start the service")
|
||||
return True # Not a failure, just not running
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ apt-cacher-ng connectivity test failed: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def test_stage_file_structure():
|
||||
"""Test that stage files have correct structure"""
|
||||
print("Testing stage file structure...")
|
||||
|
||||
stage_files = [
|
||||
"stages/org.osbuild.apt.py",
|
||||
"stages/org.osbuild.debootstrap.py",
|
||||
"stages/org.osbuild.ostree.commit.py",
|
||||
"stages/org.osbuild.ostree.deploy.py",
|
||||
"stages/org.osbuild.sbuild.py",
|
||||
"stages/org.osbuild.debian.source.py"
|
||||
]
|
||||
|
||||
all_exist = True
|
||||
for stage_file in stage_files:
|
||||
if os.path.exists(stage_file):
|
||||
print(f"✅ {stage_file} exists")
|
||||
else:
|
||||
print(f"❌ {stage_file} missing")
|
||||
all_exist = False
|
||||
|
||||
if all_exist:
|
||||
print("✅ All Debian stage files are present")
|
||||
return True
|
||||
else:
|
||||
print("❌ Some Debian stage files are missing")
|
||||
return False
|
||||
|
||||
|
||||
def test_metadata_files():
|
||||
"""Test that metadata files exist for stages"""
|
||||
print("Testing metadata files...")
|
||||
|
||||
metadata_files = [
|
||||
"stages/org.osbuild.apt.meta.json",
|
||||
"stages/org.osbuild.debootstrap.meta.json",
|
||||
"stages/org.osbuild.ostree.commit.meta.json",
|
||||
"stages/org.osbuild.ostree.deploy.meta.json",
|
||||
"stages/org.osbuild.sbuild.meta.json",
|
||||
"stages/org.osbuild.debian.source.meta.json"
|
||||
]
|
||||
|
||||
all_exist = True
|
||||
for meta_file in metadata_files:
|
||||
if os.path.exists(meta_file):
|
||||
print(f"✅ {meta_file} exists")
|
||||
else:
|
||||
print(f"❌ {meta_file} missing")
|
||||
all_exist = False
|
||||
|
||||
if all_exist:
|
||||
print("✅ All metadata files are present")
|
||||
return True
|
||||
else:
|
||||
print("❌ Some metadata files are missing")
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
"""Run all tests"""
|
||||
print("Debian Forge Apt Stage Tests")
|
||||
print("=" * 40)
|
||||
|
||||
tests = [
|
||||
test_apt_proxy_config,
|
||||
test_debootstrap_availability,
|
||||
test_ostree_integration,
|
||||
test_apt_cacher_ng_connectivity,
|
||||
test_stage_file_structure,
|
||||
test_metadata_files
|
||||
]
|
||||
|
||||
passed = 0
|
||||
total = len(tests)
|
||||
|
||||
for test in tests:
|
||||
try:
|
||||
print(f"\nRunning {test.__name__}...")
|
||||
if test():
|
||||
passed += 1
|
||||
else:
|
||||
print(f"❌ {test.__name__} failed")
|
||||
except Exception as e:
|
||||
print(f"❌ {test.__name__} failed with exception: {e}")
|
||||
|
||||
print()
|
||||
|
||||
print("=" * 40)
|
||||
print(f"Test Results: {passed}/{total} passed")
|
||||
|
||||
if passed == total:
|
||||
print("🎉 All apt stage tests passed!")
|
||||
return 0
|
||||
else:
|
||||
print("⚠️ Some tests failed")
|
||||
return 1
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
301
test-build-orchestration.py
Normal file
301
test-build-orchestration.py
Normal file
|
|
@ -0,0 +1,301 @@
|
|||
#!/usr/bin/python3
|
||||
"""
|
||||
Test script for Debian Forge build orchestration system
|
||||
|
||||
This script tests the build queue, resource management, and OSBuild integration.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import time
|
||||
import tempfile
|
||||
import os
|
||||
from build_orchestrator import BuildOrchestrator, ResourceManager
|
||||
|
||||
|
||||
def test_resource_manager():
|
||||
"""Test the ResourceManager functionality"""
|
||||
print("Testing ResourceManager...")
|
||||
|
||||
rm = ResourceManager()
|
||||
|
||||
# Test resource availability
|
||||
available = rm.get_available_resources()
|
||||
print(f"Available resources: CPU {available['cpu_percent']:.1f}%, "
|
||||
f"Memory {available['memory_gb']:.1f}GB, "
|
||||
f"Storage {available['storage_gb']:.1f}GB")
|
||||
|
||||
# Test resource allocation
|
||||
test_reqs = {
|
||||
"cpu_percent": 50,
|
||||
"memory_gb": 2,
|
||||
"storage_gb": 5
|
||||
}
|
||||
|
||||
can_allocate = rm.can_allocate_resources(test_reqs)
|
||||
print(f"Can allocate resources for {test_reqs}: {can_allocate}")
|
||||
|
||||
# Test with different requirements
|
||||
small_reqs = {
|
||||
"cpu_percent": 10,
|
||||
"memory_gb": 1,
|
||||
"storage_gb": 1
|
||||
}
|
||||
|
||||
can_allocate_small = rm.can_allocate_resources(small_reqs)
|
||||
print(f"Can allocate resources for {small_reqs}: {can_allocate_small}")
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def test_build_queue():
|
||||
"""Test the build queue functionality"""
|
||||
print("Testing build queue...")
|
||||
|
||||
from build_orchestrator import BuildQueue
|
||||
|
||||
queue = BuildQueue()
|
||||
|
||||
# Submit builds with different priorities
|
||||
build1 = queue.submit_build("manifest1.json", priority=5)
|
||||
build2 = queue.submit_build("manifest2.json", priority=3)
|
||||
build3 = queue.submit_build("manifest3.json", priority=7)
|
||||
|
||||
print(f"Submitted builds: {build1}, {build2}, {build3}")
|
||||
|
||||
# Check queue status
|
||||
builds = queue.list_builds()
|
||||
print(f"Pending builds: {len(builds['pending'])}")
|
||||
print(f"Running builds: {len(builds['running'])}")
|
||||
print(f"Completed builds: {len(builds['completed'])}")
|
||||
|
||||
# Test priority ordering (higher priority should be first)
|
||||
pending = builds['pending']
|
||||
priorities = [b.priority for b in pending]
|
||||
print(f"Build priorities in queue: {priorities}")
|
||||
|
||||
# Verify priority ordering (should be descending)
|
||||
if priorities == sorted(priorities, reverse=True):
|
||||
print("✅ Priority ordering is correct")
|
||||
else:
|
||||
print("❌ Priority ordering is incorrect")
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def test_build_orchestrator():
|
||||
"""Test the main build orchestrator"""
|
||||
print("Testing build orchestrator...")
|
||||
|
||||
orchestrator = BuildOrchestrator()
|
||||
|
||||
# Submit builds with resource requirements
|
||||
build1 = orchestrator.submit_build(
|
||||
"test-manifest.json",
|
||||
priority=5,
|
||||
resource_requirements={"cpu_percent": 30, "memory_gb": 1, "storage_gb": 2}
|
||||
)
|
||||
|
||||
build2 = orchestrator.submit_build(
|
||||
"test-manifest.json",
|
||||
priority=3,
|
||||
resource_requirements={"cpu_percent": 60, "memory_gb": 3, "storage_gb": 5}
|
||||
)
|
||||
|
||||
print(f"Submitted builds: {build1}, {build2}")
|
||||
|
||||
# Check resource status
|
||||
resource_status = orchestrator.get_resource_status()
|
||||
print(f"Resource status: {resource_status}")
|
||||
|
||||
# List builds
|
||||
builds = orchestrator.list_builds()
|
||||
print(f"Pending builds: {len(builds['pending'])}")
|
||||
print(f"Running builds: {len(builds['running'])}")
|
||||
print(f"Completed builds: {len(builds['completed'])}")
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def test_concurrent_builds():
|
||||
"""Test concurrent build handling"""
|
||||
print("Testing concurrent build handling...")
|
||||
|
||||
orchestrator = BuildOrchestrator()
|
||||
|
||||
# Submit multiple builds with different resource requirements
|
||||
builds = []
|
||||
for i in range(5):
|
||||
build_id = orchestrator.submit_build(
|
||||
f"test-manifest-{i}.json",
|
||||
priority=5-i, # Higher priority for lower i
|
||||
resource_requirements={
|
||||
"cpu_percent": 20 + (i * 10),
|
||||
"memory_gb": 1 + i,
|
||||
"storage_gb": 2 + i
|
||||
}
|
||||
)
|
||||
builds.append(build_id)
|
||||
print(f"Submitted build {build_id}")
|
||||
|
||||
# Start orchestrator
|
||||
orchestrator.start()
|
||||
|
||||
# Monitor for a short time
|
||||
try:
|
||||
for _ in range(10):
|
||||
resource_status = orchestrator.get_resource_status()
|
||||
print(f"Resources: CPU {resource_status['available_resources']['cpu_percent']:.1f}% free, "
|
||||
f"Memory {resource_status['available_resources']['memory_gb']:.1f}GB free, "
|
||||
f"Queue: {resource_status['queue_length']} pending")
|
||||
time.sleep(1)
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
# Stop orchestrator
|
||||
orchestrator.stop()
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def test_manifest_validation():
|
||||
"""Test manifest validation and parsing"""
|
||||
print("Testing manifest validation...")
|
||||
|
||||
# Create a test manifest
|
||||
test_manifest = {
|
||||
"version": "2",
|
||||
"pipelines": [
|
||||
{
|
||||
"name": "debian-base",
|
||||
"runner": "org.osbuild.linux",
|
||||
"stages": [
|
||||
{
|
||||
"name": "org.osbuild.debootstrap",
|
||||
"options": {
|
||||
"suite": "bookworm",
|
||||
"target": "debian-base",
|
||||
"apt_proxy": "192.168.1.101:3142"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "org.osbuild.apt",
|
||||
"options": {
|
||||
"packages": ["curl", "wget"],
|
||||
"apt_proxy": "192.168.1.101:3142"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
# Test manifest structure
|
||||
if "version" in test_manifest and "pipelines" in test_manifest:
|
||||
print("✅ Basic manifest structure is valid")
|
||||
|
||||
pipeline = test_manifest["pipelines"][0]
|
||||
if "name" in pipeline and "runner" in pipeline and "stages" in pipeline:
|
||||
print("✅ Pipeline structure is valid")
|
||||
|
||||
stages = pipeline["stages"]
|
||||
if len(stages) == 2:
|
||||
print("✅ Stage count is correct")
|
||||
|
||||
# Check stage names
|
||||
stage_names = [s["name"] for s in stages]
|
||||
expected_names = ["org.osbuild.debootstrap", "org.osbuild.apt"]
|
||||
|
||||
if stage_names == expected_names:
|
||||
print("✅ Stage names are correct")
|
||||
return True
|
||||
else:
|
||||
print(f"❌ Stage names mismatch: expected {expected_names}, got {stage_names}")
|
||||
return False
|
||||
else:
|
||||
print(f"❌ Expected 2 stages, got {len(stages)}")
|
||||
return False
|
||||
else:
|
||||
print("❌ Pipeline structure is invalid")
|
||||
return False
|
||||
else:
|
||||
print("❌ Basic manifest structure is invalid")
|
||||
return False
|
||||
|
||||
|
||||
def test_apt_cacher_ng_integration():
|
||||
"""Test apt-cacher-ng integration in manifests"""
|
||||
print("Testing apt-cacher-ng integration...")
|
||||
|
||||
# Test that apt-cacher-ng address is correctly configured
|
||||
expected_proxy = "192.168.1.101:3142"
|
||||
|
||||
# Check if the proxy is configured in test manifests
|
||||
test_manifests = [
|
||||
"test-debian-manifest.json",
|
||||
"test-debian-atomic-manifest.json"
|
||||
]
|
||||
|
||||
all_have_proxy = True
|
||||
for manifest_file in test_manifests:
|
||||
if os.path.exists(manifest_file):
|
||||
try:
|
||||
with open(manifest_file, 'r') as f:
|
||||
content = f.read()
|
||||
if expected_proxy in content:
|
||||
print(f"✅ {manifest_file} has apt-cacher-ng configuration")
|
||||
else:
|
||||
print(f"❌ {manifest_file} missing apt-cacher-ng configuration")
|
||||
all_have_proxy = False
|
||||
except Exception as e:
|
||||
print(f"❌ Error reading {manifest_file}: {e}")
|
||||
all_have_proxy = False
|
||||
else:
|
||||
print(f"⚠️ {manifest_file} not found")
|
||||
|
||||
return all_have_proxy
|
||||
|
||||
|
||||
def main():
|
||||
"""Run all tests"""
|
||||
print("Debian Forge Build Orchestration Tests")
|
||||
print("=" * 50)
|
||||
|
||||
tests = [
|
||||
test_resource_manager,
|
||||
test_build_queue,
|
||||
test_build_orchestrator,
|
||||
test_concurrent_builds,
|
||||
test_manifest_validation,
|
||||
test_apt_cacher_ng_integration
|
||||
]
|
||||
|
||||
passed = 0
|
||||
total = len(tests)
|
||||
|
||||
for test in tests:
|
||||
try:
|
||||
print(f"\nRunning {test.__name__}...")
|
||||
if test():
|
||||
print(f"✅ {test.__name__} passed")
|
||||
passed += 1
|
||||
else:
|
||||
print(f"❌ {test.__name__} failed")
|
||||
except Exception as e:
|
||||
print(f"❌ {test.__name__} failed with exception: {e}")
|
||||
|
||||
print()
|
||||
|
||||
print("=" * 50)
|
||||
print(f"Test Results: {passed}/{total} passed")
|
||||
|
||||
if passed == total:
|
||||
print("🎉 All build orchestration tests passed!")
|
||||
return 0
|
||||
else:
|
||||
print("⚠️ Some tests failed")
|
||||
return 1
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
152
test-resource-allocation.py
Normal file
152
test-resource-allocation.py
Normal file
|
|
@ -0,0 +1,152 @@
|
|||
#!/usr/bin/python3
|
||||
"""
|
||||
Test script for resource allocation in Debian Forge build orchestrator
|
||||
|
||||
This script tests the resource management and allocation functionality.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import time
|
||||
from build_orchestrator import BuildOrchestrator, ResourceManager
|
||||
|
||||
|
||||
def test_resource_manager():
|
||||
"""Test the ResourceManager class"""
|
||||
print("Testing ResourceManager...")
|
||||
|
||||
rm = ResourceManager()
|
||||
|
||||
# Test resource availability
|
||||
available = rm.get_available_resources()
|
||||
print(f"Available resources: CPU {available['cpu_percent']:.1f}%, "
|
||||
f"Memory {available['memory_gb']:.1f}GB, "
|
||||
f"Storage {available['storage_gb']:.1f}GB")
|
||||
|
||||
# Test resource allocation
|
||||
test_reqs = {
|
||||
"cpu_percent": 50,
|
||||
"memory_gb": 2,
|
||||
"storage_gb": 5
|
||||
}
|
||||
|
||||
can_allocate = rm.can_allocate_resources(test_reqs)
|
||||
print(f"Can allocate resources for {test_reqs}: {can_allocate}")
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def test_build_orchestrator():
|
||||
"""Test the BuildOrchestrator with resource management"""
|
||||
print("Testing BuildOrchestrator with resource management...")
|
||||
|
||||
orchestrator = BuildOrchestrator()
|
||||
|
||||
# Submit builds with different resource requirements
|
||||
build1 = orchestrator.submit_build(
|
||||
"test-manifest.json",
|
||||
priority=5,
|
||||
resource_requirements={"cpu_percent": 30, "memory_gb": 1, "storage_gb": 2}
|
||||
)
|
||||
|
||||
build2 = orchestrator.submit_build(
|
||||
"test-manifest.json",
|
||||
priority=3,
|
||||
resource_requirements={"cpu_percent": 60, "memory_gb": 3, "storage_gb": 5}
|
||||
)
|
||||
|
||||
print(f"Submitted builds: {build1}, {build2}")
|
||||
|
||||
# Check resource status
|
||||
resource_status = orchestrator.get_resource_status()
|
||||
print(f"Resource status: {resource_status}")
|
||||
|
||||
# List builds
|
||||
builds = orchestrator.list_builds()
|
||||
print(f"Pending builds: {len(builds['pending'])}")
|
||||
print(f"Running builds: {len(builds['running'])}")
|
||||
print(f"Completed builds: {len(builds['completed'])}")
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def test_concurrent_builds():
|
||||
"""Test concurrent build handling with resource constraints"""
|
||||
print("Testing concurrent build handling...")
|
||||
|
||||
orchestrator = BuildOrchestrator()
|
||||
|
||||
# Submit multiple builds with resource requirements
|
||||
builds = []
|
||||
for i in range(5):
|
||||
build_id = orchestrator.submit_build(
|
||||
f"test-manifest-{i}.json",
|
||||
priority=5-i, # Higher priority for lower i
|
||||
resource_requirements={
|
||||
"cpu_percent": 20 + (i * 10),
|
||||
"memory_gb": 1 + i,
|
||||
"storage_gb": 2 + i
|
||||
}
|
||||
)
|
||||
builds.append(build_id)
|
||||
print(f"Submitted build {build_id}")
|
||||
|
||||
# Start orchestrator
|
||||
orchestrator.start()
|
||||
|
||||
# Monitor for a short time
|
||||
try:
|
||||
for _ in range(10):
|
||||
resource_status = orchestrator.get_resource_status()
|
||||
print(f"Resources: CPU {resource_status['available_resources']['cpu_percent']:.1f}% free, "
|
||||
f"Memory {resource_status['available_resources']['memory_gb']:.1f}GB free, "
|
||||
f"Queue: {resource_status['queue_length']} pending")
|
||||
time.sleep(1)
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
# Stop orchestrator
|
||||
orchestrator.stop()
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def main():
|
||||
"""Run all tests"""
|
||||
print("Resource Allocation Tests")
|
||||
print("=" * 40)
|
||||
|
||||
tests = [
|
||||
test_resource_manager,
|
||||
test_build_orchestrator,
|
||||
test_concurrent_builds
|
||||
]
|
||||
|
||||
passed = 0
|
||||
total = len(tests)
|
||||
|
||||
for test in tests:
|
||||
try:
|
||||
print(f"\nRunning {test.__name__}...")
|
||||
if test():
|
||||
print(f"✅ {test.__name__} passed")
|
||||
passed += 1
|
||||
else:
|
||||
print(f"❌ {test.__name__} failed")
|
||||
except Exception as e:
|
||||
print(f"❌ {test.__name__} failed with exception: {e}")
|
||||
|
||||
print()
|
||||
|
||||
print("=" * 40)
|
||||
print(f"Test Results: {passed}/{total} passed")
|
||||
|
||||
if passed == total:
|
||||
print("🎉 All resource allocation tests passed!")
|
||||
return 0
|
||||
else:
|
||||
print("⚠️ Some tests failed")
|
||||
return 1
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
Loading…
Add table
Add a link
Reference in a new issue