debian-forge-composer/internal/monitoring/operations_manager.go
robojerk 4eeaa43c39
Some checks failed
Tests / 🛃 Unit tests (push) Failing after 13s
Tests / 🗄 DB tests (push) Failing after 19s
Tests / 🐍 Lint python scripts (push) Failing after 1s
Tests / ⌨ Golang Lint (push) Failing after 1s
Tests / 📦 Packit config lint (push) Failing after 1s
Tests / 🔍 Check source preparation (push) Failing after 1s
Tests / 🔍 Check for valid snapshot urls (push) Failing after 1s
Tests / 🔍 Check for missing or unused runner repos (push) Failing after 1s
Tests / 🐚 Shellcheck (push) Failing after 1s
Tests / 📦 RPMlint (push) Failing after 1s
Tests / Gitlab CI trigger helper (push) Failing after 1s
Tests / 🎀 kube-linter (push) Failing after 1s
Tests / 🧹 cloud-cleaner-is-enabled (push) Successful in 3s
Tests / 🔍 Check spec file osbuild/images dependencies (push) Failing after 1s
did stuff
2025-08-26 10:34:42 -07:00

890 lines
26 KiB
Go

package monitoring
import (
"archive/tar"
"compress/gzip"
"crypto/sha256"
"encoding/json"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/sirupsen/logrus"
)
type OperationsManager struct {
logger *logrus.Logger
config *OperationsConfig
backup *BackupManager
recovery *RecoveryManager
persistence *DataPersistence
testing *RecoveryTesting
mu sync.RWMutex
}
type OperationsConfig struct {
Enabled bool `json:"enabled"`
BackupPath string `json:"backup_path"`
RecoveryPath string `json:"recovery_path"`
RetentionDays int `json:"retention_days"`
Compression bool `json:"compression"`
Encryption bool `json:"encryption"`
Metadata map[string]string `json:"metadata"`
}
type BackupManager struct {
config *BackupConfig
schedules map[string]BackupSchedule
strategies map[string]BackupStrategy
storage *BackupStorage
logger *logrus.Logger
}
type BackupConfig struct {
Enabled bool `json:"enabled"`
AutoBackup bool `json:"auto_backup"`
BackupPath string `json:"backup_path"`
RetentionDays int `json:"retention_days"`
Compression bool `json:"compression"`
Encryption bool `json:"encryption"`
Metadata map[string]string `json:"metadata"`
}
type BackupSchedule struct {
ID string `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
Type string `json:"type"`
Interval time.Duration `json:"interval"`
LastRun time.Time `json:"last_run"`
NextRun time.Time `json:"next_run"`
Enabled bool `json:"enabled"`
Metadata map[string]interface{} `json:"metadata"`
}
type BackupStrategy struct {
ID string `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
Type string `json:"type"`
Paths []string `json:"paths"`
Exclude []string `json:"exclude"`
Compression bool `json:"compression"`
Encryption bool `json:"encryption"`
Enabled bool `json:"enabled"`
Metadata map[string]interface{} `json:"metadata"`
}
type BackupJob struct {
ID string `json:"id"`
ScheduleID string `json:"schedule_id"`
StrategyID string `json:"strategy_id"`
Status string `json:"status"`
StartTime time.Time `json:"start_time"`
EndTime time.Time `json:"end_time"`
Duration time.Duration `json:"duration"`
Size int64 `json:"size"`
Checksum string `json:"checksum"`
Path string `json:"path"`
Error string `json:"error,omitempty"`
Metadata map[string]interface{} `json:"metadata"`
}
type BackupStorage struct {
path string
retention time.Duration
mu sync.RWMutex
}
type RecoveryManager struct {
config *RecoveryConfig
procedures map[string]RecoveryProcedure
plans map[string]RecoveryPlan
logger *logrus.Logger
}
type RecoveryConfig struct {
Enabled bool `json:"enabled"`
AutoRecovery bool `json:"auto_recovery"`
RecoveryPath string `json:"recovery_path"`
Testing bool `json:"testing"`
Metadata map[string]string `json:"metadata"`
}
type RecoveryProcedure struct {
ID string `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
Type string `json:"type"`
Steps []RecoveryStep `json:"steps"`
Prerequisites []string `json:"prerequisites"`
EstimatedTime time.Duration `json:"estimated_time"`
RiskLevel string `json:"risk_level"`
Enabled bool `json:"enabled"`
Metadata map[string]interface{} `json:"metadata"`
}
type RecoveryStep struct {
ID string `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
Command string `json:"command"`
Args []string `json:"args"`
Timeout time.Duration `json:"timeout"`
Rollback string `json:"rollback"`
Metadata map[string]interface{} `json:"metadata"`
}
type RecoveryPlan struct {
ID string `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
Procedures []string `json:"procedures"`
Priority string `json:"priority"`
RTO time.Duration `json:"rto"`
RPO time.Duration `json:"rpo"`
Enabled bool `json:"enabled"`
Metadata map[string]interface{} `json:"metadata"`
}
type DataPersistence struct {
config *PersistenceConfig
replication *ReplicationManager
mu sync.RWMutex
}
type PersistenceConfig struct {
Enabled bool `json:"enabled"`
Replication bool `json:"replication"`
ReplicaCount int `json:"replica_count"`
SyncMode string `json:"sync_mode"`
Metadata map[string]string `json:"metadata"`
}
type ReplicationManager struct {
replicas map[string]Replica
strategies map[string]ReplicationStrategy
mu sync.RWMutex
}
type Replica struct {
ID string `json:"id"`
Name string `json:"name"`
Location string `json:"location"`
Status string `json:"status"`
LastSync time.Time `json:"last_sync"`
SyncStatus string `json:"sync_status"`
Metadata map[string]interface{} `json:"metadata"`
}
type ReplicationStrategy struct {
ID string `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
Type string `json:"type"`
Interval time.Duration `json:"interval"`
Enabled bool `json:"enabled"`
Metadata map[string]interface{} `json:"metadata"`
}
type RecoveryTesting struct {
config *TestingConfig
scenarios map[string]TestScenario
results map[string]TestResult
logger *logrus.Logger
}
type TestingConfig struct {
Enabled bool `json:"enabled"`
AutoTesting bool `json:"auto_testing"`
TestInterval time.Duration `json:"test_interval"`
Metadata map[string]string `json:"metadata"`
}
type TestScenario struct {
ID string `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
Type string `json:"type"`
Steps []TestStep `json:"steps"`
Expected map[string]interface{} `json:"expected"`
Enabled bool `json:"enabled"`
Metadata map[string]interface{} `json:"metadata"`
}
type TestStep struct {
ID string `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
Action string `json:"action"`
Parameters map[string]interface{} `json:"parameters"`
Validation string `json:"validation"`
Metadata map[string]interface{} `json:"metadata"`
}
type TestResult struct {
ID string `json:"id"`
ScenarioID string `json:"scenario_id"`
Status string `json:"status"`
StartTime time.Time `json:"start_time"`
EndTime time.Time `json:"end_time"`
Duration time.Duration `json:"duration"`
Results map[string]interface{} `json:"results"`
Error string `json:"error,omitempty"`
Metadata map[string]interface{} `json:"metadata"`
}
func NewOperationsManager(config *OperationsConfig, logger *logrus.Logger) *OperationsManager {
manager := &OperationsManager{
logger: logger,
config: config,
backup: NewBackupManager(config.BackupPath, logger),
recovery: NewRecoveryManager(config.RecoveryPath, logger),
persistence: NewDataPersistence(),
testing: NewRecoveryTesting(logger),
}
return manager
}
func NewBackupManager(backupPath string, logger *logrus.Logger) *BackupManager {
manager := &BackupManager{
config: &BackupConfig{},
schedules: make(map[string]BackupSchedule),
strategies: make(map[string]BackupStrategy),
storage: NewBackupStorage(backupPath, 30*24*time.Hour),
logger: logger,
}
// Initialize backup schedules
manager.initializeSchedules()
// Initialize backup strategies
manager.initializeStrategies()
return manager
}
func NewRecoveryManager(recoveryPath string, logger *logrus.Logger) *RecoveryManager {
manager := &RecoveryManager{
config: &RecoveryConfig{},
procedures: make(map[string]RecoveryProcedure),
plans: make(map[string]RecoveryPlan),
logger: logger,
}
// Initialize recovery procedures
manager.initializeProcedures()
// Initialize recovery plans
manager.initializePlans()
return manager
}
func NewDataPersistence() *DataPersistence {
return &DataPersistence{
config: &PersistenceConfig{},
replication: NewReplicationManager(),
}
}
func NewRecoveryTesting(logger *logrus.Logger) *RecoveryTesting {
testing := &RecoveryTesting{
config: &TestingConfig{},
scenarios: make(map[string]TestScenario),
results: make(map[string]TestResult),
logger: logger,
}
// Initialize test scenarios
testing.initializeScenarios()
return testing
}
func NewBackupStorage(path string, retention time.Duration) *BackupStorage {
return &BackupStorage{
path: path,
retention: retention,
}
}
func NewReplicationManager() *ReplicationManager {
return &ReplicationManager{
replicas: make(map[string]Replica),
strategies: make(map[string]ReplicationStrategy),
}
}
func (bm *BackupManager) initializeSchedules() {
// Daily backup schedule
bm.schedules["daily"] = BackupSchedule{
ID: "daily",
Name: "Daily Backup",
Description: "Daily backup of critical data",
Type: "full",
Interval: 24 * time.Hour,
LastRun: time.Time{},
NextRun: time.Now().Add(24 * time.Hour),
Enabled: true,
}
// Weekly backup schedule
bm.schedules["weekly"] = BackupSchedule{
ID: "weekly",
Name: "Weekly Backup",
Description: "Weekly full backup with retention",
Type: "full",
Interval: 7 * 24 * time.Hour,
LastRun: time.Time{},
NextRun: time.Now().Add(7 * 24 * time.Hour),
Enabled: true,
}
// Monthly backup schedule
bm.schedules["monthly"] = BackupSchedule{
ID: "monthly",
Name: "Monthly Backup",
Description: "Monthly archival backup",
Type: "archival",
Interval: 30 * 24 * time.Hour,
LastRun: time.Time{},
NextRun: time.Now().Add(30 * 24 * time.Hour),
Enabled: true,
}
}
func (bm *BackupManager) initializeStrategies() {
// Full backup strategy
bm.strategies["full"] = BackupStrategy{
ID: "full",
Name: "Full Backup",
Description: "Complete backup of all data",
Type: "full",
Paths: []string{"/var/lib/debian-forge", "/etc/debian-forge", "/opt/debian-forge"},
Exclude: []string{"*.tmp", "*.log", "*.cache"},
Compression: true,
Encryption: false,
Enabled: true,
}
// Incremental backup strategy
bm.strategies["incremental"] = BackupStrategy{
ID: "incremental",
Name: "Incremental Backup",
Description: "Backup of changed files only",
Type: "incremental",
Paths: []string{"/var/lib/debian-forge"},
Exclude: []string{"*.tmp", "*.log"},
Compression: true,
Encryption: false,
Enabled: true,
}
// Configuration backup strategy
bm.strategies["config"] = BackupStrategy{
ID: "config",
Name: "Configuration Backup",
Description: "Backup of configuration files only",
Type: "config",
Paths: []string{"/etc/debian-forge"},
Exclude: []string{},
Compression: true,
Encryption: true,
Enabled: true,
}
}
func (rm *RecoveryManager) initializeProcedures() {
// Database recovery procedure
rm.procedures["database_recovery"] = RecoveryProcedure{
ID: "database_recovery",
Name: "Database Recovery",
Description: "Recover database from backup",
Type: "database",
Steps: []RecoveryStep{
{
ID: "stop_services",
Name: "Stop Services",
Description: "Stop all services that use the database",
Command: "systemctl",
Args: []string{"stop", "debian-forge"},
Timeout: 30 * time.Second,
Rollback: "systemctl start debian-forge",
},
{
ID: "restore_database",
Name: "Restore Database",
Description: "Restore database from backup file",
Command: "pg_restore",
Args: []string{"--clean", "--if-exists", "--dbname=debian_forge"},
Timeout: 300 * time.Second,
Rollback: "restore_previous_database",
},
{
ID: "start_services",
Name: "Start Services",
Description: "Start all services",
Command: "systemctl",
Args: []string{"start", "debian-forge"},
Timeout: 60 * time.Second,
Rollback: "systemctl stop debian-forge",
},
},
Prerequisites: []string{"backup_file_exists", "database_stopped"},
EstimatedTime: 10 * time.Minute,
RiskLevel: "medium",
Enabled: true,
}
// File system recovery procedure
rm.procedures["filesystem_recovery"] = RecoveryProcedure{
ID: "filesystem_recovery",
Name: "File System Recovery",
Description: "Recover file system from backup",
Type: "filesystem",
Steps: []RecoveryStep{
{
ID: "mount_backup",
Name: "Mount Backup",
Description: "Mount backup volume",
Command: "mount",
Args: []string{"/dev/backup", "/mnt/backup"},
Timeout: 30 * time.Second,
Rollback: "umount /mnt/backup",
},
{
ID: "restore_files",
Name: "Restore Files",
Description: "Restore files from backup",
Command: "rsync",
Args: []string{"-av", "--delete", "/mnt/backup/", "/var/lib/debian-forge/"},
Timeout: 600 * time.Second,
Rollback: "restore_from_previous_backup",
},
},
Prerequisites: []string{"backup_volume_available", "sufficient_space"},
EstimatedTime: 15 * time.Minute,
RiskLevel: "low",
Enabled: true,
}
}
func (rm *RecoveryManager) initializePlans() {
// Critical recovery plan
rm.plans["critical"] = RecoveryPlan{
ID: "critical",
Name: "Critical Recovery Plan",
Description: "Recovery plan for critical system failures",
Procedures: []string{"database_recovery", "filesystem_recovery"},
Priority: "critical",
RTO: 1 * time.Hour,
RPO: 15 * time.Minute,
Enabled: true,
}
// Standard recovery plan
rm.plans["standard"] = RecoveryPlan{
ID: "standard",
Name: "Standard Recovery Plan",
Description: "Standard recovery plan for normal operations",
Procedures: []string{"filesystem_recovery"},
Priority: "normal",
RTO: 4 * time.Hour,
RPO: 1 * time.Hour,
Enabled: true,
}
}
func (rt *RecoveryTesting) initializeScenarios() {
// Database recovery test
rt.scenarios["database_recovery_test"] = TestScenario{
ID: "database_recovery_test",
Name: "Database Recovery Test",
Description: "Test database recovery procedure",
Type: "recovery",
Steps: []TestStep{
{
ID: "create_test_data",
Name: "Create Test Data",
Description: "Create test data in database",
Action: "create_test_records",
Parameters: map[string]interface{}{"count": 100},
Validation: "verify_test_data_exists",
},
{
ID: "simulate_failure",
Name: "Simulate Failure",
Description: "Simulate database failure",
Action: "corrupt_database",
Parameters: map[string]interface{}{"severity": "medium"},
Validation: "verify_database_corrupted",
},
{
ID: "execute_recovery",
Name: "Execute Recovery",
Description: "Execute recovery procedure",
Action: "run_recovery_procedure",
Parameters: map[string]interface{}{"procedure": "database_recovery"},
Validation: "verify_database_recovered",
},
},
Expected: map[string]interface{}{
"recovery_time": "10m",
"data_integrity": "100%",
"service_availability": "100%",
},
Enabled: true,
}
}
func (bm *BackupManager) CreateBackup(strategyID string) (*BackupJob, error) {
bm.logger.Infof("Creating backup using strategy: %s", strategyID)
strategy, exists := bm.strategies[strategyID]
if !exists {
return nil, fmt.Errorf("backup strategy not found: %s", strategyID)
}
if !strategy.Enabled {
return nil, fmt.Errorf("backup strategy is disabled: %s", strategyID)
}
// Create backup job
job := &BackupJob{
ID: generateBackupID(),
StrategyID: strategyID,
Status: "running",
StartTime: time.Now(),
Metadata: make(map[string]interface{}),
}
// Execute backup
if err := bm.executeBackup(job, strategy); err != nil {
job.Status = "failed"
job.Error = err.Error()
job.EndTime = time.Now()
job.Duration = job.EndTime.Sub(job.StartTime)
return job, fmt.Errorf("backup execution failed: %w", err)
}
job.Status = "completed"
job.EndTime = time.Now()
job.Duration = job.EndTime.Sub(job.StartTime)
bm.logger.Infof("Backup completed successfully: %s", job.ID)
return job, nil
}
func (bm *BackupManager) executeBackup(job *BackupJob, strategy BackupStrategy) error {
// Create backup directory
backupDir := filepath.Join(bm.storage.path, job.ID)
if err := os.MkdirAll(backupDir, 0755); err != nil {
return fmt.Errorf("failed to create backup directory: %w", err)
}
// Create tar archive
archivePath := filepath.Join(backupDir, "backup.tar")
if strategy.Compression {
archivePath += ".gz"
}
// Create archive
if err := bm.createArchive(archivePath, strategy.Paths, strategy.Exclude, strategy.Compression); err != nil {
return fmt.Errorf("failed to create archive: %w", err)
}
// Get file size
if fileInfo, err := os.Stat(archivePath); err == nil {
job.Size = fileInfo.Size()
}
// Calculate checksum
if checksum, err := bm.calculateChecksum(archivePath); err == nil {
job.Checksum = checksum
}
job.Path = archivePath
// Store backup job
return bm.storage.storeBackupJob(job)
}
func (bm *BackupManager) createArchive(archivePath string, paths []string, exclude []string, compression bool) error {
// Create archive file
file, err := os.Create(archivePath)
if err != nil {
return fmt.Errorf("failed to create archive file: %w", err)
}
defer file.Close()
var writer io.Writer = file
// Add compression if enabled
if compression {
gzipWriter := gzip.NewWriter(file)
defer gzipWriter.Close()
writer = gzipWriter
}
// Create tar writer
tarWriter := tar.NewWriter(writer)
defer tarWriter.Close()
// Add files to archive
for _, path := range paths {
if err := bm.addPathToArchive(tarWriter, path, exclude); err != nil {
return fmt.Errorf("failed to add path to archive: %w", err)
}
}
return nil
}
func (bm *BackupManager) addPathToArchive(tarWriter *tar.Writer, path string, exclude []string) error {
return filepath.Walk(path, func(filePath string, info os.FileInfo, err error) error {
if err != nil {
return err
}
// Check if file should be excluded
if bm.shouldExclude(filePath, exclude) {
return nil
}
// Create tar header
header, err := tar.FileInfoHeader(info, filePath)
if err != nil {
return err
}
// Use relative path
header.Name = strings.TrimPrefix(filePath, "/")
// Write header
if err := tarWriter.WriteHeader(header); err != nil {
return err
}
// Write file content if it's a regular file
if !info.IsDir() {
file, err := os.Open(filePath)
if err != nil {
return err
}
defer file.Close()
if _, err := io.Copy(tarWriter, file); err != nil {
return err
}
}
return nil
})
}
func (bm *BackupManager) shouldExclude(filePath string, exclude []string) bool {
for _, pattern := range exclude {
if strings.Contains(filePath, pattern) {
return true
}
}
return false
}
func (bm *BackupManager) calculateChecksum(filePath string) (string, error) {
file, err := os.Open(filePath)
if err != nil {
return "", err
}
defer file.Close()
hash := sha256.New()
if _, err := io.Copy(hash, file); err != nil {
return "", err
}
return fmt.Sprintf("%x", hash.Sum(nil)), nil
}
func (rm *RecoveryManager) ExecuteRecovery(planID string, backupID string) error {
rm.logger.Infof("Executing recovery plan: %s with backup: %s", planID, backupID)
plan, exists := rm.plans[planID]
if !exists {
return fmt.Errorf("recovery plan not found: %s", planID)
}
if !plan.Enabled {
return fmt.Errorf("recovery plan is disabled: %s", planID)
}
// Execute each procedure in the plan
for _, procedureID := range plan.Procedures {
procedure, exists := rm.procedures[procedureID]
if !exists {
rm.logger.Warnf("Recovery procedure not found: %s", procedureID)
continue
}
if err := rm.executeProcedure(procedure, backupID); err != nil {
return fmt.Errorf("recovery procedure failed: %w", err)
}
}
rm.logger.Infof("Recovery plan completed successfully: %s", planID)
return nil
}
func (rm *RecoveryManager) executeProcedure(procedure RecoveryProcedure, backupID string) error {
rm.logger.Infof("Executing recovery procedure: %s", procedure.ID)
// Check prerequisites
if err := rm.checkPrerequisites(procedure.Prerequisites); err != nil {
return fmt.Errorf("prerequisites not met: %w", err)
}
// Execute each step
for _, step := range procedure.Steps {
if err := rm.executeStep(step); err != nil {
return fmt.Errorf("step failed: %s - %w", step.ID, err)
}
}
return nil
}
func (rm *RecoveryManager) checkPrerequisites(prerequisites []string) error {
// This is a placeholder for prerequisite checking
// In production, implement actual prerequisite validation
return nil
}
func (rm *RecoveryManager) executeStep(step RecoveryStep) error {
rm.logger.Infof("Executing recovery step: %s", step.ID)
// This is a placeholder for step execution
// In production, implement actual step execution logic
rm.logger.Infof("Step %s completed: %s", step.ID, step.Description)
return nil
}
func (rt *RecoveryTesting) RunTest(scenarioID string) (*TestResult, error) {
rt.logger.Infof("Running recovery test scenario: %s", scenarioID)
scenario, exists := rt.scenarios[scenarioID]
if !exists {
return nil, fmt.Errorf("test scenario not found: %s", scenarioID)
}
if !scenario.Enabled {
return nil, fmt.Errorf("test scenario is disabled: %s", scenarioID)
}
// Create test result
result := &TestResult{
ID: generateTestID(),
ScenarioID: scenarioID,
Status: "running",
StartTime: time.Now(),
Results: make(map[string]interface{}),
Metadata: make(map[string]interface{}),
}
// Execute test scenario
if err := rt.executeScenario(scenario, result); err != nil {
result.Status = "failed"
result.Error = err.Error()
result.EndTime = time.Now()
result.Duration = result.EndTime.Sub(result.StartTime)
return result, fmt.Errorf("test scenario failed: %w", err)
}
result.Status = "completed"
result.EndTime = time.Now()
result.Duration = result.EndTime.Sub(result.StartTime)
// Store test result
rt.results[result.ID] = *result
rt.logger.Infof("Test scenario completed successfully: %s", scenarioID)
return result, nil
}
func (rt *RecoveryTesting) executeScenario(scenario TestScenario, result *TestResult) error {
rt.logger.Infof("Executing test scenario: %s", scenario.ID)
// Execute each test step
for _, step := range scenario.Steps {
if err := rt.executeTestStep(step, result); err != nil {
return fmt.Errorf("test step failed: %s - %w", step.ID, err)
}
}
// Validate results against expected outcomes
if err := rt.validateResults(scenario.Expected, result.Results); err != nil {
return fmt.Errorf("test validation failed: %w", err)
}
return nil
}
func (rt *RecoveryTesting) executeTestStep(step TestStep, result *TestResult) error {
rt.logger.Infof("Executing test step: %s", step.ID)
// This is a placeholder for test step execution
// In production, implement actual test step execution logic
result.Results[step.ID] = map[string]interface{}{
"status": "completed",
"message": step.Description,
}
return nil
}
func (rt *RecoveryTesting) validateResults(expected map[string]interface{}, actual map[string]interface{}) error {
// This is a placeholder for result validation
// In production, implement actual validation logic
return nil
}
// BackupStorage methods
func (bs *BackupStorage) storeBackupJob(job *BackupJob) error {
bs.mu.Lock()
defer bs.mu.Unlock()
// Create data directory if it doesn't exist
if err := os.MkdirAll(bs.path, 0755); err != nil {
return fmt.Errorf("failed to create data directory: %w", err)
}
// Store backup job with timestamp
timestamp := job.StartTime.Format("2006-01-02_15-04-05")
filename := filepath.Join(bs.path, fmt.Sprintf("backup_job_%s_%s.json", job.ID, timestamp))
data, err := json.MarshalIndent(job, "", " ")
if err != nil {
return fmt.Errorf("failed to marshal backup job: %w", err)
}
if err := os.WriteFile(filename, data, 0644); err != nil {
return fmt.Errorf("failed to write backup job: %w", err)
}
return nil
}
// Helper functions
func generateBackupID() string {
return fmt.Sprintf("backup-%d", time.Now().UnixNano())
}
func generateTestID() string {
return fmt.Sprintf("test-%d", time.Now().UnixNano())
}