#!/bin/bash # Comprehensive Testing Script for debian-forge # This script runs all types of tests: unit, integration, performance, and error handling set -e echo "๐Ÿงช Debian Forge Comprehensive Testing Suite" echo "===========================================" # Configuration TEST_DIR="./comprehensive-tests" RESULTS_DIR="./comprehensive-results" MANIFESTS_DIR="./test/data/manifests/debian" # Create directories mkdir -p "$TEST_DIR" "$RESULTS_DIR" # Test results tracking declare -A TEST_RESULTS declare -A TEST_TIMES echo "" echo "๐Ÿš€ Starting Comprehensive Test Suite..." echo "======================================" # 1. Unit Tests echo "" echo "๐Ÿ“‹ Running Unit Tests..." echo "========================" start_time=$(date +%s.%N) if python3 -m pytest test/ --tb=short -v > "$RESULTS_DIR/unit-tests.log" 2>&1; then end_time=$(date +%s.%N) unit_time=$(echo "$end_time - $start_time" | bc -l) TEST_RESULTS["unit"]="PASSED" TEST_TIMES["unit"]=$unit_time echo "โœ… Unit tests passed in $(printf "%.2f" $unit_time)s" else end_time=$(date +%s.%N) unit_time=$(echo "$end_time - $start_time" | bc -l) TEST_RESULTS["unit"]="FAILED" TEST_TIMES["unit"]=$unit_time echo "โŒ Unit tests failed in $(printf "%.2f" $unit_time)s" fi # 2. Integration Tests echo "" echo "๐Ÿ”— Running Integration Tests..." echo "===============================" # Test all manifest files manifest_files=( "debian-trixie-minimal.json" "ubuntu-jammy-server.json" "debian-atomic-container.json" "debian-trixie-arm64.json" "test-apt-basic.json" ) integration_passed=0 integration_failed=0 integration_total=${#manifest_files[@]} for manifest in "${manifest_files[@]}"; do manifest_path="$MANIFESTS_DIR/$manifest" if [ ! -f "$manifest_path" ]; then echo "โŒ Manifest not found: $manifest" ((integration_failed++)) continue fi echo "๐Ÿงช Testing: $manifest" start_time=$(date +%s.%N) if python3 -m osbuild "$manifest_path" --output-dir "$TEST_DIR/${manifest%.json}" --libdir . --json > "$RESULTS_DIR/${manifest%.json}_integration.log" 2>&1; then end_time=$(date +%s.%N) test_time=$(echo "$end_time - $start_time" | bc -l) echo " โœ… PASSED in $(printf "%.2f" $test_time)s" ((integration_passed++)) else end_time=$(date +%s.%N) test_time=$(echo "$end_time - $start_time" | bc -l) echo " โŒ FAILED in $(printf "%.2f" $test_time)s" ((integration_failed++)) fi done if [ $integration_failed -eq 0 ]; then TEST_RESULTS["integration"]="PASSED" echo "โœ… All integration tests passed ($integration_passed/$integration_total)" else TEST_RESULTS["integration"]="FAILED" echo "โŒ Integration tests failed ($integration_passed/$integration_total passed, $integration_failed failed)" fi # 3. Performance Tests echo "" echo "โšก Running Performance Tests..." echo "===============================" start_time=$(date +%s.%N) if ./scripts/performance-test.sh > "$RESULTS_DIR/performance-tests.log" 2>&1; then end_time=$(date +%s.%N) perf_time=$(echo "$end_time - $start_time" | bc -l) TEST_RESULTS["performance"]="PASSED" TEST_TIMES["performance"]=$perf_time echo "โœ… Performance tests passed in $(printf "%.2f" $perf_time)s" else end_time=$(date +%s.%N) perf_time=$(echo "$end_time - $start_time" | bc -l) TEST_RESULTS["performance"]="FAILED" TEST_TIMES["performance"]=$perf_time echo "โŒ Performance tests failed in $(printf "%.2f" $perf_time)s" fi # 4. Error Handling Tests echo "" echo "๐Ÿ”ง Running Error Handling Tests..." echo "==================================" start_time=$(date +%s.%N) if ./scripts/error-handling-test.sh > "$RESULTS_DIR/error-handling-tests.log" 2>&1; then end_time=$(date +%s.%N) error_time=$(echo "$end_time - $start_time" | bc -l) TEST_RESULTS["error_handling"]="PASSED" TEST_TIMES["error_handling"]=$error_time echo "โœ… Error handling tests passed in $(printf "%.2f" $error_time)s" else end_time=$(date +%s.%N) error_time=$(echo "$end_time - $start_time" | bc -l) TEST_RESULTS["error_handling"]="FAILED" TEST_TIMES["error_handling"]=$error_time echo "โŒ Error handling tests failed in $(printf "%.2f" $error_time)s" fi # 5. Code Quality Tests echo "" echo "๐Ÿ“Š Running Code Quality Tests..." echo "===============================" start_time=$(date +%s.%N) # Flake8 linting if command -v flake8 >/dev/null 2>&1; then if flake8 osbuild/ --output-file="$RESULTS_DIR/flake8.log" 2>&1; then echo "โœ… Flake8 linting passed" flake8_result="PASSED" else echo "โŒ Flake8 linting failed" flake8_result="FAILED" fi else echo "โš ๏ธ Flake8 not available, skipping linting" flake8_result="SKIPPED" fi # MyPy type checking if command -v mypy >/dev/null 2>&1; then if mypy osbuild/ --output-file="$RESULTS_DIR/mypy.log" 2>&1; then echo "โœ… MyPy type checking passed" mypy_result="PASSED" else echo "โŒ MyPy type checking failed" mypy_result="FAILED" fi else echo "โš ๏ธ MyPy not available, skipping type checking" mypy_result="SKIPPED" fi end_time=$(date +%s.%N) quality_time=$(echo "$end_time - $start_time" | bc -l) TEST_TIMES["code_quality"]=$quality_time if [ "$flake8_result" = "PASSED" ] && [ "$mypy_result" = "PASSED" ]; then TEST_RESULTS["code_quality"]="PASSED" elif [ "$flake8_result" = "SKIPPED" ] && [ "$mypy_result" = "SKIPPED" ]; then TEST_RESULTS["code_quality"]="SKIPPED" else TEST_RESULTS["code_quality"]="FAILED" fi # Generate comprehensive report echo "" echo "๐Ÿ“Š Generating Comprehensive Test Report..." echo "==========================================" cat > "$RESULTS_DIR/comprehensive-test-report.md" << EOF # Debian Forge Comprehensive Test Report Generated: $(date) ## Test Summary | Test Category | Result | Duration | Details | |---------------|--------|----------|---------| | Unit Tests | ${TEST_RESULTS["unit"]} | $(printf "%.2f" ${TEST_TIMES["unit"]})s | Python unit tests | | Integration Tests | ${TEST_RESULTS["integration"]} | N/A | Manifest validation tests | | Performance Tests | ${TEST_RESULTS["performance"]} | $(printf "%.2f" ${TEST_TIMES["performance"]})s | Build performance benchmarks | | Error Handling | ${TEST_RESULTS["error_handling"]} | $(printf "%.2f" ${TEST_TIMES["error_handling"]})s | Error scenario testing | | Code Quality | ${TEST_RESULTS["code_quality"]} | $(printf "%.2f" ${TEST_TIMES["code_quality"]})s | Linting and type checking | ## Detailed Results ### Unit Tests - **Status**: ${TEST_RESULTS["unit"]} - **Duration**: $(printf "%.2f" ${TEST_TIMES["unit"]})s - **Log**: [unit-tests.log](unit-tests.log) ### Integration Tests - **Status**: ${TEST_RESULTS["integration"]} - **Manifests Tested**: $integration_total - **Passed**: $integration_passed - **Failed**: $integration_failed ### Performance Tests - **Status**: ${TEST_RESULTS["performance"]} - **Duration**: $(printf "%.2f" ${TEST_TIMES["performance"]})s - **Log**: [performance-tests.log](performance-tests.log) ### Error Handling Tests - **Status**: ${TEST_RESULTS["error_handling"]} - **Duration**: $(printf "%.2f" ${TEST_TIMES["error_handling"]})s - **Log**: [error-handling-tests.log](error-handling-tests.log) ### Code Quality Tests - **Status**: ${TEST_RESULTS["code_quality"]} - **Duration**: $(printf "%.2f" ${TEST_TIMES["code_quality"]})s - **Flake8**: $flake8_result - **MyPy**: $mypy_result ## Overall Assessment EOF # Calculate overall status total_tests=0 passed_tests=0 failed_tests=0 skipped_tests=0 for test_type in "${!TEST_RESULTS[@]}"; do result="${TEST_RESULTS[$test_type]}" ((total_tests++)) case $result in "PASSED") ((passed_tests++)) ;; "FAILED") ((failed_tests++)) ;; "SKIPPED") ((skipped_tests++)) ;; esac done if [ $failed_tests -eq 0 ]; then overall_status="โœ… ALL TESTS PASSED" echo "- **Overall Status**: $overall_status" >> "$RESULTS_DIR/comprehensive-test-report.md" else overall_status="โŒ SOME TESTS FAILED" echo "- **Overall Status**: $overall_status" >> "$RESULTS_DIR/comprehensive-test-report.md" fi cat >> "$RESULTS_DIR/comprehensive-test-report.md" << EOF - **Total Test Categories**: $total_tests - **Passed**: $passed_tests - **Failed**: $failed_tests - **Skipped**: $skipped_tests ## Recommendations 1. **Fix Failed Tests**: Address any failing tests immediately 2. **Improve Coverage**: Add more test cases for better coverage 3. **Performance Optimization**: Focus on areas with slow performance 4. **Error Handling**: Enhance error handling based on test results 5. **Code Quality**: Address any linting or type checking issues ## Next Steps 1. Review detailed logs for failed tests 2. Implement fixes for identified issues 3. Add more comprehensive test cases 4. Set up automated testing in CI/CD 5. Monitor test results over time EOF echo "" echo "๐Ÿ“Š Comprehensive Test Report Generated" echo "======================================" echo "๐Ÿ“„ Report: $RESULTS_DIR/comprehensive-test-report.md" echo "๐Ÿ“ Results: $RESULTS_DIR/" echo "" echo "๐ŸŽฏ Test Summary:" echo "================" echo "โœ… Passed: $passed_tests" echo "โŒ Failed: $failed_tests" echo "โญ๏ธ Skipped: $skipped_tests" echo "๐Ÿ“Š Total: $total_tests" echo "" echo "๐Ÿ† Overall Status: $overall_status" echo "" echo "๐Ÿงช Comprehensive testing completed!"