diff --git a/go.mod b/go.mod index 470a347..1916284 100644 --- a/go.mod +++ b/go.mod @@ -13,6 +13,7 @@ require ( github.com/gorilla/websocket v1.5.3 github.com/lib/pq v1.10.9 github.com/mattn/go-sqlite3 v1.14.22 + github.com/robfig/cron/v3 v3.0.1 gopkg.in/yaml.v3 v3.0.1 ) diff --git a/go.sum b/go.sum index 087dfb5..316f30f 100644 --- a/go.sum +++ b/go.sum @@ -61,6 +61,8 @@ github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZ github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= diff --git a/internal/api/backup_handlers.go b/internal/api/backup_handlers.go new file mode 100644 index 0000000..c8d1c69 --- /dev/null +++ b/internal/api/backup_handlers.go @@ -0,0 +1,256 @@ +package api + +import ( + "net/http" + "strconv" + + "github.com/flatrun/agent/internal/backup" + "github.com/flatrun/agent/pkg/models" + "github.com/gin-gonic/gin" +) + +func (s *Server) listBackups(c *gin.Context) { + if s.backupManager == nil { + c.JSON(http.StatusServiceUnavailable, gin.H{"error": "Backup manager not enabled"}) + return + } + + filter := &backup.BackupListFilter{ + DeploymentName: c.Query("deployment"), + } + + if limit := c.Query("limit"); limit != "" { + if l, err := strconv.Atoi(limit); err == nil { + filter.Limit = l + } + } + + backups, err := s.backupManager.ListBackups(filter) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + + c.JSON(http.StatusOK, gin.H{"backups": backups}) +} + +func (s *Server) getBackup(c *gin.Context) { + if s.backupManager == nil { + c.JSON(http.StatusServiceUnavailable, gin.H{"error": "Backup manager not enabled"}) + return + } + + backupID := c.Param("id") + b, err := s.backupManager.GetBackup(backupID) + if err != nil { + c.JSON(http.StatusNotFound, gin.H{"error": err.Error()}) + return + } + + c.JSON(http.StatusOK, gin.H{"backup": b}) +} + +func (s *Server) createBackup(c *gin.Context) { + if s.backupManager == nil { + c.JSON(http.StatusServiceUnavailable, gin.H{"error": "Backup manager not enabled"}) + return + } + + var req backup.CreateBackupRequest + if err := c.ShouldBindJSON(&req); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + return + } + + deployment, err := s.manager.GetDeployment(req.DeploymentName) + if err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "Deployment not found: " + req.DeploymentName}) + return + } + + var spec *backup.BackupSpec + if deployment.Metadata != nil && deployment.Metadata.Backup != nil { + spec = deployment.Metadata.Backup + } + + jobID := s.backupManager.StartBackupJob(req.DeploymentName, spec) + c.JSON(http.StatusAccepted, gin.H{"job_id": jobID, "message": "Backup job started"}) +} + +func (s *Server) createDeploymentBackup(c *gin.Context) { + if s.backupManager == nil { + c.JSON(http.StatusServiceUnavailable, gin.H{"error": "Backup manager not enabled"}) + return + } + + deploymentName := c.Param("name") + deployment, err := s.manager.GetDeployment(deploymentName) + if err != nil { + c.JSON(http.StatusNotFound, gin.H{"error": "Deployment not found"}) + return + } + + var spec *backup.BackupSpec + if deployment.Metadata != nil && deployment.Metadata.Backup != nil { + spec = deployment.Metadata.Backup + } + + jobID := s.backupManager.StartBackupJob(deploymentName, spec) + c.JSON(http.StatusAccepted, gin.H{"job_id": jobID, "message": "Backup job started"}) +} + +func (s *Server) listDeploymentBackups(c *gin.Context) { + if s.backupManager == nil { + c.JSON(http.StatusServiceUnavailable, gin.H{"error": "Backup manager not enabled"}) + return + } + + deploymentName := c.Param("name") + + filter := &backup.BackupListFilter{ + DeploymentName: deploymentName, + } + + if limit := c.Query("limit"); limit != "" { + if l, err := strconv.Atoi(limit); err == nil { + filter.Limit = l + } + } + + backups, err := s.backupManager.ListBackups(filter) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + + c.JSON(http.StatusOK, gin.H{"backups": backups}) +} + +func (s *Server) deleteBackup(c *gin.Context) { + if s.backupManager == nil { + c.JSON(http.StatusServiceUnavailable, gin.H{"error": "Backup manager not enabled"}) + return + } + + backupID := c.Param("id") + if err := s.backupManager.DeleteBackup(backupID); err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + + c.JSON(http.StatusOK, gin.H{"message": "Backup deleted successfully"}) +} + +func (s *Server) downloadBackup(c *gin.Context) { + if s.backupManager == nil { + c.JSON(http.StatusServiceUnavailable, gin.H{"error": "Backup manager not enabled"}) + return + } + + backupID := c.Param("id") + backupPath, err := s.backupManager.GetBackupPath(backupID) + if err != nil { + c.JSON(http.StatusNotFound, gin.H{"error": err.Error()}) + return + } + + c.Header("Content-Description", "File Transfer") + c.Header("Content-Disposition", "attachment; filename="+backupID+".tar.gz") + c.Header("Content-Type", "application/gzip") + c.File(backupPath) +} + +func (s *Server) getDeploymentBackupConfig(c *gin.Context) { + deploymentName := c.Param("name") + deployment, err := s.manager.GetDeployment(deploymentName) + if err != nil { + c.JSON(http.StatusNotFound, gin.H{"error": "Deployment not found"}) + return + } + + var spec *backup.BackupSpec + if deployment.Metadata != nil && deployment.Metadata.Backup != nil { + spec = deployment.Metadata.Backup + } + + c.JSON(http.StatusOK, gin.H{"backup_config": spec}) +} + +func (s *Server) updateDeploymentBackupConfig(c *gin.Context) { + deploymentName := c.Param("name") + deployment, err := s.manager.GetDeployment(deploymentName) + if err != nil { + c.JSON(http.StatusNotFound, gin.H{"error": "Deployment not found"}) + return + } + + var spec backup.BackupSpec + if err := c.ShouldBindJSON(&spec); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + return + } + + if deployment.Metadata == nil { + deployment.Metadata = &models.ServiceMetadata{} + } + deployment.Metadata.Backup = &spec + + if err := s.manager.SaveMetadata(deploymentName, deployment.Metadata); err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + + c.JSON(http.StatusOK, gin.H{"backup_config": spec}) +} + +func (s *Server) restoreBackup(c *gin.Context) { + if s.backupManager == nil { + c.JSON(http.StatusServiceUnavailable, gin.H{"error": "Backup manager not enabled"}) + return + } + + backupID := c.Param("id") + + var req backup.RestoreBackupRequest + if err := c.ShouldBindJSON(&req); err != nil { + req = backup.RestoreBackupRequest{} + } + req.BackupID = backupID + + jobID := s.backupManager.StartRestoreJob(&req) + c.JSON(http.StatusAccepted, gin.H{"job_id": jobID, "message": "Restore job started"}) +} + +func (s *Server) getBackupJob(c *gin.Context) { + if s.backupManager == nil { + c.JSON(http.StatusServiceUnavailable, gin.H{"error": "Backup manager not enabled"}) + return + } + + jobID := c.Param("id") + job := s.backupManager.GetJob(jobID) + if job == nil { + c.JSON(http.StatusNotFound, gin.H{"error": "Job not found"}) + return + } + + c.JSON(http.StatusOK, gin.H{"job": job}) +} + +func (s *Server) listBackupJobs(c *gin.Context) { + if s.backupManager == nil { + c.JSON(http.StatusServiceUnavailable, gin.H{"error": "Backup manager not enabled"}) + return + } + + deploymentName := c.Query("deployment") + limit := 50 + if l := c.Query("limit"); l != "" { + if parsed, err := strconv.Atoi(l); err == nil { + limit = parsed + } + } + + jobs := s.backupManager.ListJobs(deploymentName, limit) + c.JSON(http.StatusOK, gin.H{"jobs": jobs}) +} diff --git a/internal/api/scheduler_handlers.go b/internal/api/scheduler_handlers.go new file mode 100644 index 0000000..20360ad --- /dev/null +++ b/internal/api/scheduler_handlers.go @@ -0,0 +1,211 @@ +package api + +import ( + "net/http" + "strconv" + + "github.com/flatrun/agent/internal/scheduler" + "github.com/gin-gonic/gin" +) + +func (s *Server) listScheduledTasks(c *gin.Context) { + if s.schedulerManager == nil { + c.JSON(http.StatusServiceUnavailable, gin.H{"error": "Scheduler not enabled"}) + return + } + + deploymentName := c.Query("deployment") + + var tasks []scheduler.ScheduledTask + var err error + + if deploymentName != "" { + tasks, err = s.schedulerManager.GetTasksByDeployment(deploymentName) + } else { + tasks, err = s.schedulerManager.GetAllTasks() + } + + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + + c.JSON(http.StatusOK, gin.H{"tasks": tasks}) +} + +func (s *Server) getScheduledTask(c *gin.Context) { + if s.schedulerManager == nil { + c.JSON(http.StatusServiceUnavailable, gin.H{"error": "Scheduler not enabled"}) + return + } + + id, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid task ID"}) + return + } + + task, err := s.schedulerManager.GetTask(id) + if err != nil { + c.JSON(http.StatusNotFound, gin.H{"error": "Task not found"}) + return + } + + c.JSON(http.StatusOK, gin.H{"task": task}) +} + +func (s *Server) createScheduledTask(c *gin.Context) { + if s.schedulerManager == nil { + c.JSON(http.StatusServiceUnavailable, gin.H{"error": "Scheduler not enabled"}) + return + } + + var req scheduler.CreateTaskRequest + if err := c.ShouldBindJSON(&req); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + return + } + + if err := s.schedulerManager.ValidateCronExpr(req.CronExpr); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid cron expression: " + err.Error()}) + return + } + + _, err := s.manager.GetDeployment(req.DeploymentName) + if err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "Deployment not found: " + req.DeploymentName}) + return + } + + task, err := s.schedulerManager.CreateTask(&req) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + + c.JSON(http.StatusCreated, gin.H{"task": task}) +} + +func (s *Server) updateScheduledTask(c *gin.Context) { + if s.schedulerManager == nil { + c.JSON(http.StatusServiceUnavailable, gin.H{"error": "Scheduler not enabled"}) + return + } + + id, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid task ID"}) + return + } + + var req scheduler.UpdateTaskRequest + if err := c.ShouldBindJSON(&req); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + return + } + + if req.CronExpr != nil { + if err := s.schedulerManager.ValidateCronExpr(*req.CronExpr); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid cron expression: " + err.Error()}) + return + } + } + + task, err := s.schedulerManager.UpdateTask(id, &req) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + + c.JSON(http.StatusOK, gin.H{"task": task}) +} + +func (s *Server) deleteScheduledTask(c *gin.Context) { + if s.schedulerManager == nil { + c.JSON(http.StatusServiceUnavailable, gin.H{"error": "Scheduler not enabled"}) + return + } + + id, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid task ID"}) + return + } + + if err := s.schedulerManager.DeleteTask(id); err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + + c.JSON(http.StatusOK, gin.H{"message": "Task deleted successfully"}) +} + +func (s *Server) runTaskNow(c *gin.Context) { + if s.schedulerManager == nil { + c.JSON(http.StatusServiceUnavailable, gin.H{"error": "Scheduler not enabled"}) + return + } + + id, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid task ID"}) + return + } + + if err := s.schedulerManager.RunTaskNow(id); err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + + c.JSON(http.StatusAccepted, gin.H{"message": "Task execution started"}) +} + +func (s *Server) getTaskExecutions(c *gin.Context) { + if s.schedulerManager == nil { + c.JSON(http.StatusServiceUnavailable, gin.H{"error": "Scheduler not enabled"}) + return + } + + id, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid task ID"}) + return + } + + limit := 50 + if l := c.Query("limit"); l != "" { + if parsed, err := strconv.Atoi(l); err == nil { + limit = parsed + } + } + + executions, err := s.schedulerManager.GetTaskExecutions(id, limit) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + + c.JSON(http.StatusOK, gin.H{"executions": executions}) +} + +func (s *Server) getRecentExecutions(c *gin.Context) { + if s.schedulerManager == nil { + c.JSON(http.StatusServiceUnavailable, gin.H{"error": "Scheduler not enabled"}) + return + } + + limit := 50 + if l := c.Query("limit"); l != "" { + if parsed, err := strconv.Atoi(l); err == nil { + limit = parsed + } + } + + executions, err := s.schedulerManager.GetRecentExecutions(limit) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + + c.JSON(http.StatusOK, gin.H{"executions": executions}) +} diff --git a/internal/api/server.go b/internal/api/server.go index fbaec7b..705e5c9 100644 --- a/internal/api/server.go +++ b/internal/api/server.go @@ -16,6 +16,7 @@ import ( "time" "github.com/flatrun/agent/internal/auth" + "github.com/flatrun/agent/internal/backup" "github.com/flatrun/agent/internal/certs" "github.com/flatrun/agent/internal/credentials" "github.com/flatrun/agent/internal/database" @@ -24,6 +25,7 @@ import ( "github.com/flatrun/agent/internal/infra" "github.com/flatrun/agent/internal/networks" "github.com/flatrun/agent/internal/proxy" + "github.com/flatrun/agent/internal/scheduler" "github.com/flatrun/agent/internal/security" "github.com/flatrun/agent/internal/system" "github.com/flatrun/agent/internal/traffic" @@ -55,6 +57,8 @@ type Server struct { credentialsManager *credentials.Manager securityManager *security.Manager trafficManager *traffic.Manager + backupManager *backup.Manager + schedulerManager *scheduler.Manager } func New(cfg *config.Config, configPath string) *Server { @@ -121,6 +125,12 @@ func New(cfg *config.Config, configPath string) *Server { log.Printf("Warning: Failed to initialize traffic manager: %v", err) } + var backupManager *backup.Manager + backupManager, err = backup.NewManager(cfg.DeploymentsPath) + if err != nil { + log.Printf("Warning: Failed to initialize backup manager: %v", err) + } + s := &Server{ config: cfg, configPath: configPath, @@ -138,6 +148,18 @@ func New(cfg *config.Config, configPath string) *Server { credentialsManager: credentialsManager, securityManager: securityManager, trafficManager: trafficManager, + backupManager: backupManager, + } + + if backupManager != nil { + executor := scheduler.NewExecutor(backupManager, manager) + schedulerManager, err := scheduler.NewManager(cfg.DeploymentsPath, executor) + if err != nil { + log.Printf("Warning: Failed to initialize scheduler manager: %v", err) + } else { + s.schedulerManager = schedulerManager + schedulerManager.Start() + } } s.setupRoutes() @@ -306,6 +328,30 @@ func (s *Server) setupRoutes() { protected.GET("/traffic/unknown-domains", s.getUnknownDomainStats) protected.POST("/traffic/cleanup", s.cleanupTrafficLogs) protected.GET("/deployments/:name/traffic", s.getDeploymentTrafficStats) + + // Backup endpoints + protected.GET("/backups", s.listBackups) + protected.GET("/backups/:id", s.getBackup) + protected.POST("/backups", s.createBackup) + protected.DELETE("/backups/:id", s.deleteBackup) + protected.GET("/backups/:id/download", s.downloadBackup) + protected.GET("/deployments/:name/backups", s.listDeploymentBackups) + protected.POST("/deployments/:name/backups", s.createDeploymentBackup) + protected.GET("/deployments/:name/backup-config", s.getDeploymentBackupConfig) + protected.PUT("/deployments/:name/backup-config", s.updateDeploymentBackupConfig) + protected.POST("/backups/:id/restore", s.restoreBackup) + protected.GET("/backups/jobs", s.listBackupJobs) + protected.GET("/backups/jobs/:id", s.getBackupJob) + + // Scheduler endpoints + protected.GET("/scheduler/tasks", s.listScheduledTasks) + protected.GET("/scheduler/tasks/:id", s.getScheduledTask) + protected.POST("/scheduler/tasks", s.createScheduledTask) + protected.PUT("/scheduler/tasks/:id", s.updateScheduledTask) + protected.DELETE("/scheduler/tasks/:id", s.deleteScheduledTask) + protected.POST("/scheduler/tasks/:id/run", s.runTaskNow) + protected.GET("/scheduler/tasks/:id/executions", s.getTaskExecutions) + protected.GET("/scheduler/executions", s.getRecentExecutions) } // Ingest endpoints (no auth - called by nginx Lua) diff --git a/internal/backup/jobs.go b/internal/backup/jobs.go new file mode 100644 index 0000000..840d5eb --- /dev/null +++ b/internal/backup/jobs.go @@ -0,0 +1,193 @@ +package backup + +import ( + "context" + "sync" + "time" +) + +type JobType string +type JobStatus string + +const ( + JobTypeBackup JobType = "backup" + JobTypeRestore JobType = "restore" + + JobStatusPending JobStatus = "pending" + JobStatusRunning JobStatus = "running" + JobStatusCompleted JobStatus = "completed" + JobStatusFailed JobStatus = "failed" +) + +type Job struct { + ID string `json:"id"` + Type JobType `json:"type"` + Status JobStatus `json:"status"` + DeploymentName string `json:"deployment_name"` + BackupID string `json:"backup_id,omitempty"` + Progress string `json:"progress,omitempty"` + Error string `json:"error,omitempty"` + StartedAt time.Time `json:"started_at"` + CompletedAt *time.Time `json:"completed_at,omitempty"` +} + +type JobTracker struct { + jobs map[string]*Job + mu sync.RWMutex +} + +func NewJobTracker() *JobTracker { + return &JobTracker{ + jobs: make(map[string]*Job), + } +} + +func (t *JobTracker) CreateJob(id string, jobType JobType, deploymentName string) *Job { + t.mu.Lock() + defer t.mu.Unlock() + + job := &Job{ + ID: id, + Type: jobType, + Status: JobStatusPending, + DeploymentName: deploymentName, + StartedAt: time.Now(), + } + t.jobs[id] = job + return job +} + +func (t *JobTracker) GetJob(id string) *Job { + t.mu.RLock() + defer t.mu.RUnlock() + return t.jobs[id] +} + +func (t *JobTracker) UpdateStatus(id string, status JobStatus, progress string) { + t.mu.Lock() + defer t.mu.Unlock() + + if job, ok := t.jobs[id]; ok { + job.Status = status + job.Progress = progress + if status == JobStatusCompleted || status == JobStatusFailed { + now := time.Now() + job.CompletedAt = &now + } + } +} + +func (t *JobTracker) SetError(id string, err error) { + t.mu.Lock() + defer t.mu.Unlock() + + if job, ok := t.jobs[id]; ok { + job.Status = JobStatusFailed + job.Error = err.Error() + now := time.Now() + job.CompletedAt = &now + } +} + +func (t *JobTracker) SetBackupID(id string, backupID string) { + t.mu.Lock() + defer t.mu.Unlock() + + if job, ok := t.jobs[id]; ok { + job.BackupID = backupID + } +} + +func (t *JobTracker) ListJobs(deploymentName string, limit int) []*Job { + t.mu.RLock() + defer t.mu.RUnlock() + + var jobs []*Job + for _, job := range t.jobs { + if deploymentName == "" || job.DeploymentName == deploymentName { + jobs = append(jobs, job) + } + } + + if limit > 0 && len(jobs) > limit { + jobs = jobs[:limit] + } + + return jobs +} + +func (t *JobTracker) Cleanup(olderThan time.Duration) { + t.mu.Lock() + defer t.mu.Unlock() + + cutoff := time.Now().Add(-olderThan) + for id, job := range t.jobs { + if job.CompletedAt != nil && job.CompletedAt.Before(cutoff) { + delete(t.jobs, id) + } + } +} + +func (m *Manager) StartBackupJob(deploymentName string, spec *BackupSpec) string { + jobID := generateJobID("backup", deploymentName) + m.jobs.CreateJob(jobID, JobTypeBackup, deploymentName) + + go func() { + m.jobs.UpdateStatus(jobID, JobStatusRunning, "Starting backup") + + backup, err := m.CreateBackup(context.Background(), deploymentName, spec) + if err != nil { + m.jobs.SetError(jobID, err) + return + } + + m.jobs.SetBackupID(jobID, backup.ID) + m.jobs.UpdateStatus(jobID, JobStatusCompleted, "Backup completed") + }() + + return jobID +} + +func (m *Manager) StartRestoreJob(req *RestoreBackupRequest) string { + backup, err := m.GetBackup(req.BackupID) + if err != nil { + jobID := generateJobID("restore", req.BackupID) + m.jobs.CreateJob(jobID, JobTypeRestore, "") + m.jobs.SetError(jobID, err) + return jobID + } + + deploymentName := backup.DeploymentName + if req.DeploymentName != "" { + deploymentName = req.DeploymentName + } + + jobID := generateJobID("restore", deploymentName) + job := m.jobs.CreateJob(jobID, JobTypeRestore, deploymentName) + job.BackupID = req.BackupID + + go func() { + m.jobs.UpdateStatus(jobID, JobStatusRunning, "Starting restore") + + if err := m.RestoreBackup(context.Background(), req); err != nil { + m.jobs.SetError(jobID, err) + return + } + + m.jobs.UpdateStatus(jobID, JobStatusCompleted, "Restore completed") + }() + + return jobID +} + +func (m *Manager) GetJob(jobID string) *Job { + return m.jobs.GetJob(jobID) +} + +func (m *Manager) ListJobs(deploymentName string, limit int) []*Job { + return m.jobs.ListJobs(deploymentName, limit) +} + +func generateJobID(prefix, name string) string { + return prefix + "_" + name + "_" + time.Now().Format("20060102_150405") +} diff --git a/internal/backup/jobs_test.go b/internal/backup/jobs_test.go new file mode 100644 index 0000000..f50caac --- /dev/null +++ b/internal/backup/jobs_test.go @@ -0,0 +1,205 @@ +package backup + +import ( + "testing" + "time" +) + +func TestNewJobTracker(t *testing.T) { + tracker := NewJobTracker() + if tracker == nil { + t.Fatal("Expected tracker to be non-nil") + } + if tracker.jobs == nil { + t.Fatal("Expected jobs map to be initialized") + } +} + +func TestCreateJob(t *testing.T) { + tracker := NewJobTracker() + + job := tracker.CreateJob("test-job-1", JobTypeBackup, "my-deployment") + + if job == nil { + t.Fatal("Expected job to be non-nil") + } + if job.ID != "test-job-1" { + t.Errorf("Expected ID 'test-job-1', got: %s", job.ID) + } + if job.Type != JobTypeBackup { + t.Errorf("Expected type 'backup', got: %s", job.Type) + } + if job.DeploymentName != "my-deployment" { + t.Errorf("Expected deployment 'my-deployment', got: %s", job.DeploymentName) + } + if job.Status != JobStatusPending { + t.Errorf("Expected status 'pending', got: %s", job.Status) + } + if job.StartedAt.IsZero() { + t.Error("Expected StartedAt to be set") + } +} + +func TestGetJob(t *testing.T) { + tracker := NewJobTracker() + + tracker.CreateJob("test-job-1", JobTypeBackup, "my-deployment") + + job := tracker.GetJob("test-job-1") + if job == nil { + t.Fatal("Expected to find job") + } + if job.ID != "test-job-1" { + t.Errorf("Expected ID 'test-job-1', got: %s", job.ID) + } + + notFound := tracker.GetJob("nonexistent") + if notFound != nil { + t.Error("Expected nil for nonexistent job") + } +} + +func TestUpdateStatus(t *testing.T) { + tracker := NewJobTracker() + + tracker.CreateJob("test-job-1", JobTypeBackup, "my-deployment") + + tracker.UpdateStatus("test-job-1", JobStatusRunning, "Processing...") + + job := tracker.GetJob("test-job-1") + if job.Status != JobStatusRunning { + t.Errorf("Expected status 'running', got: %s", job.Status) + } + if job.Progress != "Processing..." { + t.Errorf("Expected progress 'Processing...', got: %s", job.Progress) + } + if job.CompletedAt != nil { + t.Error("Expected CompletedAt to be nil for running job") + } + + tracker.UpdateStatus("test-job-1", JobStatusCompleted, "Done") + + job = tracker.GetJob("test-job-1") + if job.Status != JobStatusCompleted { + t.Errorf("Expected status 'completed', got: %s", job.Status) + } + if job.CompletedAt == nil { + t.Error("Expected CompletedAt to be set for completed job") + } +} + +func TestSetError(t *testing.T) { + tracker := NewJobTracker() + + tracker.CreateJob("test-job-1", JobTypeRestore, "my-deployment") + tracker.UpdateStatus("test-job-1", JobStatusRunning, "Restoring...") + + testErr := &testError{msg: "restore failed: disk full"} + tracker.SetError("test-job-1", testErr) + + job := tracker.GetJob("test-job-1") + if job.Status != JobStatusFailed { + t.Errorf("Expected status 'failed', got: %s", job.Status) + } + if job.Error != "restore failed: disk full" { + t.Errorf("Expected error message, got: %s", job.Error) + } + if job.CompletedAt == nil { + t.Error("Expected CompletedAt to be set for failed job") + } +} + +func TestSetBackupID(t *testing.T) { + tracker := NewJobTracker() + + tracker.CreateJob("test-job-1", JobTypeBackup, "my-deployment") + tracker.SetBackupID("test-job-1", "backup-20240101-120000") + + job := tracker.GetJob("test-job-1") + if job.BackupID != "backup-20240101-120000" { + t.Errorf("Expected backup ID, got: %s", job.BackupID) + } +} + +func TestListJobs(t *testing.T) { + tracker := NewJobTracker() + + tracker.CreateJob("job-1", JobTypeBackup, "deployment-a") + tracker.CreateJob("job-2", JobTypeRestore, "deployment-a") + tracker.CreateJob("job-3", JobTypeBackup, "deployment-b") + + allJobs := tracker.ListJobs("", 0) + if len(allJobs) != 3 { + t.Errorf("Expected 3 jobs, got: %d", len(allJobs)) + } + + deploymentAJobs := tracker.ListJobs("deployment-a", 0) + if len(deploymentAJobs) != 2 { + t.Errorf("Expected 2 jobs for deployment-a, got: %d", len(deploymentAJobs)) + } + + limitedJobs := tracker.ListJobs("", 2) + if len(limitedJobs) != 2 { + t.Errorf("Expected 2 jobs with limit, got: %d", len(limitedJobs)) + } +} + +func TestCleanup(t *testing.T) { + tracker := NewJobTracker() + + job1 := tracker.CreateJob("old-job", JobTypeBackup, "deployment") + oldTime := time.Now().Add(-2 * time.Hour) + job1.CompletedAt = &oldTime + job1.Status = JobStatusCompleted + + job2 := tracker.CreateJob("new-job", JobTypeBackup, "deployment") + newTime := time.Now().Add(-10 * time.Minute) + job2.CompletedAt = &newTime + job2.Status = JobStatusCompleted + + tracker.CreateJob("running-job", JobTypeBackup, "deployment") + + tracker.Cleanup(1 * time.Hour) + + if tracker.GetJob("old-job") != nil { + t.Error("Expected old completed job to be cleaned up") + } + if tracker.GetJob("new-job") == nil { + t.Error("Expected recent completed job to remain") + } + if tracker.GetJob("running-job") == nil { + t.Error("Expected running job to remain") + } +} + +func TestJobStatusConstants(t *testing.T) { + if JobStatusPending != "pending" { + t.Errorf("Expected 'pending', got: %s", JobStatusPending) + } + if JobStatusRunning != "running" { + t.Errorf("Expected 'running', got: %s", JobStatusRunning) + } + if JobStatusCompleted != "completed" { + t.Errorf("Expected 'completed', got: %s", JobStatusCompleted) + } + if JobStatusFailed != "failed" { + t.Errorf("Expected 'failed', got: %s", JobStatusFailed) + } +} + +func TestJobTypeConstants(t *testing.T) { + if JobTypeBackup != "backup" { + t.Errorf("Expected 'backup', got: %s", JobTypeBackup) + } + if JobTypeRestore != "restore" { + t.Errorf("Expected 'restore', got: %s", JobTypeRestore) + } +} + +type testError struct { + msg string +} + +func (e *testError) Error() string { + return e.msg +} diff --git a/internal/backup/manager.go b/internal/backup/manager.go new file mode 100644 index 0000000..ed203cf --- /dev/null +++ b/internal/backup/manager.go @@ -0,0 +1,927 @@ +package backup + +import ( + "archive/tar" + "compress/gzip" + "context" + "encoding/json" + "fmt" + "io" + "log" + "os" + "os/exec" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/flatrun/agent/pkg/version" +) + +type Manager struct { + deploymentsPath string + backupsPath string + jobs *JobTracker +} + +func NewManager(deploymentsPath string) (*Manager, error) { + backupsPath := filepath.Join(deploymentsPath, ".flatrun", "backups") + if err := os.MkdirAll(backupsPath, 0755); err != nil { + return nil, fmt.Errorf("failed to create backups directory: %w", err) + } + + return &Manager{ + deploymentsPath: deploymentsPath, + backupsPath: backupsPath, + jobs: NewJobTracker(), + }, nil +} + +func (m *Manager) CreateBackup(ctx context.Context, deploymentName string, spec *BackupSpec) (*Backup, error) { + deploymentPath := filepath.Join(m.deploymentsPath, deploymentName) + if _, err := os.Stat(deploymentPath); os.IsNotExist(err) { + return nil, fmt.Errorf("deployment not found: %s", deploymentName) + } + + backupID := fmt.Sprintf("%s_%s", deploymentName, time.Now().Format("20060102_150405")) + backupDir := filepath.Join(m.backupsPath, deploymentName) + if err := os.MkdirAll(backupDir, 0755); err != nil { + return nil, fmt.Errorf("failed to create backup directory: %w", err) + } + + backup := &Backup{ + ID: backupID, + DeploymentName: deploymentName, + Status: BackupStatusInProgress, + CreatedAt: time.Now(), + Components: []string{}, + } + + tempDir, err := os.MkdirTemp("", "flatrun-backup-*") + if err != nil { + return nil, fmt.Errorf("failed to create temp directory: %w", err) + } + defer os.RemoveAll(tempDir) + + metadata := BackupMetadata{ + ID: backupID, + DeploymentName: deploymentName, + DeploymentPath: deploymentPath, + CreatedAt: time.Now(), + AgentVersion: version.Version, + Components: BackupComponents{}, + } + + if spec != nil { + if err := m.executeHooks(ctx, deploymentName, spec.PreHooks); err != nil { + log.Printf("Backup: pre-hook warning: %v", err) + } + } + + if err := m.backupComposeFile(deploymentPath, tempDir, &metadata); err != nil { + log.Printf("Backup: compose file warning: %v", err) + } else { + backup.Components = append(backup.Components, "compose") + } + + if err := m.backupEnvFile(deploymentPath, tempDir, &metadata); err != nil { + log.Printf("Backup: env file warning: %v", err) + } else { + backup.Components = append(backup.Components, "env") + } + + if err := m.backupMetadataFile(deploymentPath, tempDir, &metadata); err != nil { + log.Printf("Backup: metadata file warning: %v", err) + } else { + backup.Components = append(backup.Components, "metadata") + } + + if err := m.backupMountedData(deploymentPath, tempDir, &metadata); err != nil { + log.Printf("Backup: mounted data warning: %v", err) + } + if len(metadata.Components.MountedData) > 0 { + backup.Components = append(backup.Components, "mounted_data") + } + + if spec != nil && len(spec.ContainerPaths) > 0 { + if err := m.backupContainerData(ctx, deploymentName, spec.ContainerPaths, tempDir, &metadata); err != nil { + log.Printf("Backup: container data warning: %v", err) + } + if len(metadata.Components.ContainerData) > 0 { + backup.Components = append(backup.Components, "container_data") + } + } + + if spec != nil && len(spec.Databases) > 0 { + if err := m.backupDatabases(ctx, deploymentName, spec.Databases, tempDir, &metadata); err != nil { + log.Printf("Backup: database warning: %v", err) + } + if len(metadata.Components.Databases) > 0 { + backup.Components = append(backup.Components, "databases") + } + } + + metadataJSON, _ := json.MarshalIndent(metadata, "", " ") + if err := os.WriteFile(filepath.Join(tempDir, "backup.json"), metadataJSON, 0644); err != nil { + return nil, fmt.Errorf("failed to write backup metadata: %w", err) + } + + archivePath := filepath.Join(backupDir, backupID+".tar.gz") + if err := m.createArchive(tempDir, archivePath); err != nil { + backup.Status = BackupStatusFailed + backup.Error = err.Error() + return backup, fmt.Errorf("failed to create backup archive: %w", err) + } + + if spec != nil { + if err := m.executeHooks(ctx, deploymentName, spec.PostHooks); err != nil { + log.Printf("Backup: post-hook warning: %v", err) + } + } + + info, _ := os.Stat(archivePath) + if info != nil { + backup.Size = info.Size() + } + + backup.Path = archivePath + backup.Status = BackupStatusCompleted + now := time.Now() + backup.CompletedAt = &now + + log.Printf("Backup completed: %s (%d bytes)", backupID, backup.Size) + return backup, nil +} + +func (m *Manager) backupComposeFile(deploymentPath, tempDir string, metadata *BackupMetadata) error { + composePath := filepath.Join(deploymentPath, "docker-compose.yml") + if _, err := os.Stat(composePath); err != nil { + composePath = filepath.Join(deploymentPath, "compose.yml") + if _, err := os.Stat(composePath); err != nil { + return fmt.Errorf("compose file not found") + } + } + + destPath := filepath.Join(tempDir, "docker-compose.yml") + if err := copyFile(composePath, destPath); err != nil { + return err + } + + metadata.Components.ComposeFile = true + return nil +} + +func (m *Manager) backupEnvFile(deploymentPath, tempDir string, metadata *BackupMetadata) error { + envFiles := []string{".env", ".env.flatrun"} + envDir := filepath.Join(tempDir, "env") + if err := os.MkdirAll(envDir, 0755); err != nil { + return fmt.Errorf("failed to create env backup directory: %w", err) + } + + found := false + for _, envFile := range envFiles { + envPath := filepath.Join(deploymentPath, envFile) + if _, err := os.Stat(envPath); err == nil { + destPath := filepath.Join(envDir, envFile) + if err := copyFile(envPath, destPath); err == nil { + found = true + } + } + } + + if found { + metadata.Components.EnvFile = true + } + return nil +} + +func (m *Manager) backupMetadataFile(deploymentPath, tempDir string, metadata *BackupMetadata) error { + metaPath := filepath.Join(deploymentPath, ".flatrun.yml") + if _, err := os.Stat(metaPath); err != nil { + return nil + } + + destPath := filepath.Join(tempDir, ".flatrun.yml") + if err := copyFile(metaPath, destPath); err != nil { + return err + } + + metadata.Components.Metadata = true + return nil +} + +func (m *Manager) backupMountedData(deploymentPath, tempDir string, metadata *BackupMetadata) error { + dataDir := filepath.Join(tempDir, "data") + if err := os.MkdirAll(dataDir, 0755); err != nil { + return fmt.Errorf("failed to create data backup directory: %w", err) + } + + commonDataDirs := []string{"data", "uploads", "storage", "config", "logs"} + for _, dir := range commonDataDirs { + srcPath := filepath.Join(deploymentPath, dir) + if info, err := os.Stat(srcPath); err == nil && info.IsDir() { + destPath := filepath.Join(dataDir, dir) + if err := copyDir(srcPath, destPath); err != nil { + log.Printf("Backup: failed to copy %s: %v", dir, err) + continue + } + metadata.Components.MountedData = append(metadata.Components.MountedData, dir) + } + } + + return nil +} + +func (m *Manager) backupContainerData(ctx context.Context, deploymentName string, paths []ContainerPath, tempDir string, metadata *BackupMetadata) error { + containerDir := filepath.Join(tempDir, "container_data") + if err := os.MkdirAll(containerDir, 0755); err != nil { + return fmt.Errorf("failed to create container data backup directory: %w", err) + } + + for _, path := range paths { + containerName := fmt.Sprintf("%s-%s", deploymentName, path.Service) + if path.Service == deploymentName || path.Service == "" { + containerName = deploymentName + } + + destPath := filepath.Join(containerDir, path.Service, filepath.Base(path.ContainerPath)) + if err := os.MkdirAll(filepath.Dir(destPath), 0755); err != nil { + return fmt.Errorf("failed to create directory for %s: %w", path.ContainerPath, err) + } + + cmd := exec.CommandContext(ctx, "docker", "cp", fmt.Sprintf("%s:%s", containerName, path.ContainerPath), destPath) + if err := cmd.Run(); err != nil { + if path.Required { + return fmt.Errorf("failed to copy %s from container %s: %w", path.ContainerPath, containerName, err) + } + log.Printf("Backup: optional container path %s not available: %v", path.ContainerPath, err) + continue + } + + metadata.Components.ContainerData = append(metadata.Components.ContainerData, fmt.Sprintf("%s:%s", path.Service, path.ContainerPath)) + } + + return nil +} + +func (m *Manager) backupDatabases(ctx context.Context, deploymentName string, databases []DatabaseSpec, tempDir string, metadata *BackupMetadata) error { + dbDir := filepath.Join(tempDir, "databases") + if err := os.MkdirAll(dbDir, 0755); err != nil { + return fmt.Errorf("failed to create databases backup directory: %w", err) + } + + for _, db := range databases { + var dumpPath string + var err error + + switch db.Type { + case "mysql", "mariadb": + dumpPath, err = m.dumpMySQL(ctx, deploymentName, &db, dbDir) + case "postgresql", "postgres": + dumpPath, err = m.dumpPostgres(ctx, deploymentName, &db, dbDir) + default: + log.Printf("Backup: unsupported database type: %s", db.Type) + continue + } + + if err != nil { + log.Printf("Backup: failed to dump database %s: %v", db.Service, err) + continue + } + + metadata.Components.Databases = append(metadata.Components.Databases, filepath.Base(dumpPath)) + } + + return nil +} + +func (m *Manager) dumpMySQL(ctx context.Context, deploymentName string, db *DatabaseSpec, dbDir string) (string, error) { + containerName := fmt.Sprintf("%s-%s", deploymentName, db.Service) + if db.Service == deploymentName || db.Service == "" { + containerName = deploymentName + } + + dumpFile := filepath.Join(dbDir, fmt.Sprintf("%s_mysql.sql", db.Service)) + + host := db.Host + if host == "" { + host = "localhost" + } + user := db.User + if user == "" { + user = "root" + } + database := db.Database + if database == "" { + database = deploymentName + } + + args := []string{"exec"} + + if db.Password != "" { + args = append(args, "-e", "MYSQL_PWD="+db.Password) + } + + args = append(args, containerName, "mysqldump", + "-h", host, + "-u", user, + "--single-transaction", "--routines", "--triggers", database) + + cmd := exec.CommandContext(ctx, "docker", args...) + output, err := cmd.Output() + if err != nil { + return "", fmt.Errorf("mysqldump failed: %w", err) + } + + if err := os.WriteFile(dumpFile, output, 0644); err != nil { + return "", fmt.Errorf("failed to write dump file: %w", err) + } + + return dumpFile, nil +} + +func (m *Manager) dumpPostgres(ctx context.Context, deploymentName string, db *DatabaseSpec, dbDir string) (string, error) { + containerName := fmt.Sprintf("%s-%s", deploymentName, db.Service) + if db.Service == deploymentName || db.Service == "" { + containerName = deploymentName + } + + dumpFile := filepath.Join(dbDir, fmt.Sprintf("%s_postgres.sql", db.Service)) + + user := db.User + if user == "" { + user = "postgres" + } + database := db.Database + if database == "" { + database = deploymentName + } + + args := []string{ + "exec", + } + + if db.Password != "" { + args = append(args, "-e", fmt.Sprintf("PGPASSWORD=%s", db.Password)) + } + + args = append(args, containerName, "pg_dump", "-U", user, database) + + cmd := exec.CommandContext(ctx, "docker", args...) + output, err := cmd.Output() + if err != nil { + return "", fmt.Errorf("pg_dump failed: %w", err) + } + + if err := os.WriteFile(dumpFile, output, 0644); err != nil { + return "", fmt.Errorf("failed to write dump file: %w", err) + } + + return dumpFile, nil +} + +func (m *Manager) executeHooks(ctx context.Context, deploymentName string, hooks []HookSpec) error { + for _, hook := range hooks { + containerName := fmt.Sprintf("%s-%s", deploymentName, hook.Service) + if hook.Service == deploymentName || hook.Service == "" { + containerName = deploymentName + } + + timeout := hook.Timeout + if timeout <= 0 { + timeout = 60 + } + + hookCtx, cancel := context.WithTimeout(ctx, time.Duration(timeout)*time.Second) + cmd := exec.CommandContext(hookCtx, "docker", "exec", containerName, "sh", "-c", hook.Command) + if err := cmd.Run(); err != nil { + cancel() + return fmt.Errorf("hook failed for %s: %w", hook.Service, err) + } + cancel() + } + return nil +} + +func (m *Manager) createArchive(sourceDir, destPath string) error { + file, err := os.Create(destPath) + if err != nil { + return err + } + defer file.Close() + + gzWriter := gzip.NewWriter(file) + defer gzWriter.Close() + + tarWriter := tar.NewWriter(gzWriter) + defer tarWriter.Close() + + return filepath.Walk(sourceDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + relPath, err := filepath.Rel(sourceDir, path) + if err != nil { + return err + } + + if relPath == "." { + return nil + } + + header, err := tar.FileInfoHeader(info, "") + if err != nil { + return err + } + + header.Name = relPath + + if err := tarWriter.WriteHeader(header); err != nil { + return err + } + + if !info.IsDir() { + file, err := os.Open(path) + if err != nil { + return err + } + defer file.Close() + + _, err = io.Copy(tarWriter, file) + return err + } + + return nil + }) +} + +func (m *Manager) ListBackups(filter *BackupListFilter) ([]Backup, error) { + var backups []Backup + + deploymentDirs, err := os.ReadDir(m.backupsPath) + if err != nil { + if os.IsNotExist(err) { + return backups, nil + } + return nil, err + } + + for _, deploymentDir := range deploymentDirs { + if !deploymentDir.IsDir() { + continue + } + + if filter.DeploymentName != "" && deploymentDir.Name() != filter.DeploymentName { + continue + } + + backupDir := filepath.Join(m.backupsPath, deploymentDir.Name()) + files, err := os.ReadDir(backupDir) + if err != nil { + continue + } + + for _, file := range files { + if !strings.HasSuffix(file.Name(), ".tar.gz") { + continue + } + + info, err := file.Info() + if err != nil { + continue + } + + backupID := strings.TrimSuffix(file.Name(), ".tar.gz") + backup := Backup{ + ID: backupID, + DeploymentName: deploymentDir.Name(), + Status: BackupStatusCompleted, + Size: info.Size(), + Path: filepath.Join(backupDir, file.Name()), + CreatedAt: info.ModTime(), + } + + backups = append(backups, backup) + } + } + + sort.Slice(backups, func(i, j int) bool { + return backups[i].CreatedAt.After(backups[j].CreatedAt) + }) + + if filter.Limit > 0 && len(backups) > filter.Limit { + backups = backups[:filter.Limit] + } + + return backups, nil +} + +func (m *Manager) GetBackup(backupID string) (*Backup, error) { + parts := strings.SplitN(backupID, "_", 2) + if len(parts) < 2 { + return nil, fmt.Errorf("invalid backup ID format") + } + + deploymentName := parts[0] + backupPath := filepath.Join(m.backupsPath, deploymentName, backupID+".tar.gz") + + info, err := os.Stat(backupPath) + if err != nil { + return nil, fmt.Errorf("backup not found: %s", backupID) + } + + return &Backup{ + ID: backupID, + DeploymentName: deploymentName, + Status: BackupStatusCompleted, + Size: info.Size(), + Path: backupPath, + CreatedAt: info.ModTime(), + }, nil +} + +func (m *Manager) DeleteBackup(backupID string) error { + backup, err := m.GetBackup(backupID) + if err != nil { + return err + } + + return os.Remove(backup.Path) +} + +func (m *Manager) GetBackupPath(backupID string) (string, error) { + backup, err := m.GetBackup(backupID) + if err != nil { + return "", err + } + return backup.Path, nil +} + +func (m *Manager) CleanupOldBackups(deploymentName string, keepCount int) (int, error) { + backups, err := m.ListBackups(&BackupListFilter{DeploymentName: deploymentName}) + if err != nil { + return 0, err + } + + if len(backups) <= keepCount { + return 0, nil + } + + deleted := 0 + for _, backup := range backups[keepCount:] { + if err := m.DeleteBackup(backup.ID); err != nil { + log.Printf("Failed to delete old backup %s: %v", backup.ID, err) + continue + } + deleted++ + } + + return deleted, nil +} + +func (m *Manager) RestoreBackup(ctx context.Context, req *RestoreBackupRequest) error { + backup, err := m.GetBackup(req.BackupID) + if err != nil { + return err + } + + deploymentName := backup.DeploymentName + if req.DeploymentName != "" { + deploymentName = req.DeploymentName + } + + deploymentPath := filepath.Join(m.deploymentsPath, deploymentName) + + tempDir, err := os.MkdirTemp("", "flatrun-restore-*") + if err != nil { + return fmt.Errorf("failed to create temp directory: %w", err) + } + defer os.RemoveAll(tempDir) + + if err := m.extractArchive(backup.Path, tempDir); err != nil { + return fmt.Errorf("failed to extract backup: %w", err) + } + + metadataPath := filepath.Join(tempDir, "backup.json") + metadataBytes, err := os.ReadFile(metadataPath) + if err != nil { + return fmt.Errorf("failed to read backup metadata: %w", err) + } + + var metadata BackupMetadata + if err := json.Unmarshal(metadataBytes, &metadata); err != nil { + return fmt.Errorf("failed to parse backup metadata: %w", err) + } + + if req.StopFirst { + log.Printf("Restore: stopping deployment %s", deploymentName) + cmd := exec.CommandContext(ctx, "docker", "compose", "-f", + filepath.Join(deploymentPath, "docker-compose.yml"), "stop") + cmd.Dir = deploymentPath + if err := cmd.Run(); err != nil { + log.Printf("Restore: warning - failed to stop deployment: %v", err) + } + } + + if err := os.MkdirAll(deploymentPath, 0755); err != nil { + return fmt.Errorf("failed to create deployment directory: %w", err) + } + + if metadata.Components.ComposeFile { + if err := m.restoreComposeFile(tempDir, deploymentPath); err != nil { + return fmt.Errorf("failed to restore compose file: %w", err) + } + log.Printf("Restore: restored compose file") + } + + if metadata.Components.EnvFile { + if err := m.restoreEnvFiles(tempDir, deploymentPath); err != nil { + log.Printf("Restore: warning - failed to restore env files: %v", err) + } else { + log.Printf("Restore: restored env files") + } + } + + if metadata.Components.Metadata { + if err := m.restoreMetadataFile(tempDir, deploymentPath); err != nil { + log.Printf("Restore: warning - failed to restore metadata file: %v", err) + } else { + log.Printf("Restore: restored metadata file") + } + } + + if req.RestoreData && len(metadata.Components.MountedData) > 0 { + if err := m.restoreMountedData(tempDir, deploymentPath, metadata.Components.MountedData); err != nil { + log.Printf("Restore: warning - failed to restore mounted data: %v", err) + } else { + log.Printf("Restore: restored mounted data") + } + } + + if req.StopFirst { + log.Printf("Restore: starting deployment %s", deploymentName) + cmd := exec.CommandContext(ctx, "docker", "compose", "-f", + filepath.Join(deploymentPath, "docker-compose.yml"), "up", "-d") + cmd.Dir = deploymentPath + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to start deployment after restore: %w", err) + } + + time.Sleep(5 * time.Second) + } + + if req.RestoreData && len(metadata.Components.ContainerData) > 0 { + if err := m.restoreContainerData(ctx, deploymentName, tempDir, metadata.Components.ContainerData); err != nil { + log.Printf("Restore: warning - failed to restore container data: %v", err) + } else { + log.Printf("Restore: restored container data") + } + } + + if req.RestoreDB && len(metadata.Components.Databases) > 0 { + if err := m.restoreDatabases(ctx, deploymentName, tempDir, metadata.Components.Databases); err != nil { + log.Printf("Restore: warning - failed to restore databases: %v", err) + } else { + log.Printf("Restore: restored databases") + } + } + + log.Printf("Restore completed for %s from backup %s", deploymentName, req.BackupID) + return nil +} + +func (m *Manager) extractArchive(archivePath, destDir string) error { + file, err := os.Open(archivePath) + if err != nil { + return err + } + defer file.Close() + + gzReader, err := gzip.NewReader(file) + if err != nil { + return err + } + defer gzReader.Close() + + tarReader := tar.NewReader(gzReader) + + for { + header, err := tarReader.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + + destPath := filepath.Join(destDir, header.Name) + + switch header.Typeflag { + case tar.TypeDir: + if err := os.MkdirAll(destPath, 0755); err != nil { + return err + } + case tar.TypeReg: + if err := os.MkdirAll(filepath.Dir(destPath), 0755); err != nil { + return err + } + outFile, err := os.Create(destPath) + if err != nil { + return err + } + if _, err := io.Copy(outFile, tarReader); err != nil { + outFile.Close() + return err + } + outFile.Close() + } + } + + return nil +} + +func (m *Manager) restoreComposeFile(tempDir, deploymentPath string) error { + srcPath := filepath.Join(tempDir, "docker-compose.yml") + destPath := filepath.Join(deploymentPath, "docker-compose.yml") + return copyFile(srcPath, destPath) +} + +func (m *Manager) restoreEnvFiles(tempDir, deploymentPath string) error { + envDir := filepath.Join(tempDir, "env") + if _, err := os.Stat(envDir); os.IsNotExist(err) { + return nil + } + + files, err := os.ReadDir(envDir) + if err != nil { + return err + } + + for _, file := range files { + if file.IsDir() { + continue + } + srcPath := filepath.Join(envDir, file.Name()) + destPath := filepath.Join(deploymentPath, file.Name()) + if err := copyFile(srcPath, destPath); err != nil { + return err + } + } + + return nil +} + +func (m *Manager) restoreMetadataFile(tempDir, deploymentPath string) error { + srcPath := filepath.Join(tempDir, ".flatrun.yml") + if _, err := os.Stat(srcPath); os.IsNotExist(err) { + return nil + } + destPath := filepath.Join(deploymentPath, ".flatrun.yml") + return copyFile(srcPath, destPath) +} + +func (m *Manager) restoreMountedData(tempDir, deploymentPath string, dataItems []string) error { + dataDir := filepath.Join(tempDir, "data") + if _, err := os.Stat(dataDir); os.IsNotExist(err) { + return nil + } + + for _, item := range dataItems { + srcPath := filepath.Join(dataDir, item) + destPath := filepath.Join(deploymentPath, item) + if _, err := os.Stat(srcPath); os.IsNotExist(err) { + continue + } + if err := os.RemoveAll(destPath); err != nil { + log.Printf("Restore: warning - failed to remove existing %s: %v", item, err) + } + if err := copyDir(srcPath, destPath); err != nil { + return fmt.Errorf("failed to restore %s: %w", item, err) + } + } + + return nil +} + +func (m *Manager) restoreContainerData(ctx context.Context, deploymentName, tempDir string, items []string) error { + containerDir := filepath.Join(tempDir, "container_data") + if _, err := os.Stat(containerDir); os.IsNotExist(err) { + return nil + } + + for _, item := range items { + parts := strings.SplitN(item, ":", 2) + if len(parts) != 2 { + continue + } + service := parts[0] + containerPath := parts[1] + + containerName := fmt.Sprintf("%s-%s", deploymentName, service) + srcPath := filepath.Join(containerDir, service, filepath.Base(containerPath)) + + if _, err := os.Stat(srcPath); os.IsNotExist(err) { + continue + } + + cmd := exec.CommandContext(ctx, "docker", "cp", srcPath, fmt.Sprintf("%s:%s", containerName, containerPath)) + if err := cmd.Run(); err != nil { + log.Printf("Restore: warning - failed to restore %s to container %s: %v", containerPath, containerName, err) + } + } + + return nil +} + +func (m *Manager) restoreDatabases(ctx context.Context, deploymentName, tempDir string, dbFiles []string) error { + dbDir := filepath.Join(tempDir, "databases") + if _, err := os.Stat(dbDir); os.IsNotExist(err) { + return nil + } + + for _, dbFile := range dbFiles { + dumpPath := filepath.Join(dbDir, dbFile) + if _, err := os.Stat(dumpPath); os.IsNotExist(err) { + continue + } + + if strings.Contains(dbFile, "_mysql.sql") { + service := strings.TrimSuffix(dbFile, "_mysql.sql") + if err := m.restoreMySQL(ctx, deploymentName, service, dumpPath); err != nil { + log.Printf("Restore: warning - failed to restore MySQL database %s: %v", service, err) + } + } else if strings.Contains(dbFile, "_postgres.sql") { + service := strings.TrimSuffix(dbFile, "_postgres.sql") + if err := m.restorePostgres(ctx, deploymentName, service, dumpPath); err != nil { + log.Printf("Restore: warning - failed to restore PostgreSQL database %s: %v", service, err) + } + } + } + + return nil +} + +func (m *Manager) restoreMySQL(ctx context.Context, deploymentName, service, dumpPath string) error { + containerName := fmt.Sprintf("%s-%s", deploymentName, service) + + dumpContent, err := os.ReadFile(dumpPath) + if err != nil { + return err + } + + cmd := exec.CommandContext(ctx, "docker", "exec", "-i", containerName, "mysql", "-u", "root", deploymentName) + cmd.Stdin = strings.NewReader(string(dumpContent)) + return cmd.Run() +} + +func (m *Manager) restorePostgres(ctx context.Context, deploymentName, service, dumpPath string) error { + containerName := fmt.Sprintf("%s-%s", deploymentName, service) + + dumpContent, err := os.ReadFile(dumpPath) + if err != nil { + return err + } + + cmd := exec.CommandContext(ctx, "docker", "exec", "-i", containerName, "psql", "-U", "postgres", deploymentName) + cmd.Stdin = strings.NewReader(string(dumpContent)) + return cmd.Run() +} + +func copyFile(src, dst string) error { + source, err := os.Open(src) + if err != nil { + return err + } + defer source.Close() + + dest, err := os.Create(dst) + if err != nil { + return err + } + defer dest.Close() + + _, err = io.Copy(dest, source) + return err +} + +func copyDir(src, dst string) error { + return filepath.Walk(src, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + relPath, err := filepath.Rel(src, path) + if err != nil { + return err + } + + destPath := filepath.Join(dst, relPath) + + if info.IsDir() { + return os.MkdirAll(destPath, info.Mode()) + } + + return copyFile(path, destPath) + }) +} diff --git a/internal/backup/manager_test.go b/internal/backup/manager_test.go new file mode 100644 index 0000000..06c0277 --- /dev/null +++ b/internal/backup/manager_test.go @@ -0,0 +1,390 @@ +package backup + +import ( + "context" + "os" + "path/filepath" + "testing" +) + +func setupTestManager(t *testing.T) (*Manager, string) { + tmpDir, err := os.MkdirTemp("", "backup-test-*") + if err != nil { + t.Fatalf("Failed to create temp dir: %v", err) + } + + m, err := NewManager(tmpDir) + if err != nil { + os.RemoveAll(tmpDir) + t.Fatalf("Failed to create manager: %v", err) + } + + return m, tmpDir +} + +func TestNewManager(t *testing.T) { + m, tmpDir := setupTestManager(t) + defer os.RemoveAll(tmpDir) + + if m == nil { + t.Fatal("Expected manager to be non-nil") + } + if m.deploymentsPath != tmpDir { + t.Errorf("Expected deploymentsPath %s, got: %s", tmpDir, m.deploymentsPath) + } + + expectedBackupsPath := filepath.Join(tmpDir, ".flatrun", "backups") + if m.backupsPath != expectedBackupsPath { + t.Errorf("Expected backupsPath %s, got: %s", expectedBackupsPath, m.backupsPath) + } + + if _, err := os.Stat(m.backupsPath); os.IsNotExist(err) { + t.Error("Expected backups directory to be created") + } + + if m.jobs == nil { + t.Error("Expected job tracker to be initialized") + } +} + +func TestCreateBackup_DeploymentNotFound(t *testing.T) { + m, tmpDir := setupTestManager(t) + defer os.RemoveAll(tmpDir) + + ctx := context.Background() + _, err := m.CreateBackup(ctx, "nonexistent", nil) + if err == nil { + t.Error("Expected error for nonexistent deployment") + } +} + +func TestCreateBackup_Success(t *testing.T) { + m, tmpDir := setupTestManager(t) + defer os.RemoveAll(tmpDir) + + deploymentPath := filepath.Join(tmpDir, "test-deployment") + if err := os.MkdirAll(deploymentPath, 0755); err != nil { + t.Fatalf("Failed to create deployment dir: %v", err) + } + + composeContent := `version: '3' +services: + web: + image: nginx +` + if err := os.WriteFile(filepath.Join(deploymentPath, "docker-compose.yml"), []byte(composeContent), 0644); err != nil { + t.Fatalf("Failed to create compose file: %v", err) + } + + envContent := "FOO=bar\n" + if err := os.WriteFile(filepath.Join(deploymentPath, ".env"), []byte(envContent), 0644); err != nil { + t.Fatalf("Failed to create env file: %v", err) + } + + ctx := context.Background() + backup, err := m.CreateBackup(ctx, "test-deployment", nil) + if err != nil { + t.Fatalf("Failed to create backup: %v", err) + } + + if backup.ID == "" { + t.Error("Expected backup ID to be set") + } + if backup.DeploymentName != "test-deployment" { + t.Errorf("Expected deployment name 'test-deployment', got: %s", backup.DeploymentName) + } + if backup.Status != BackupStatusCompleted { + t.Errorf("Expected status 'completed', got: %s", backup.Status) + } + if backup.Size <= 0 { + t.Error("Expected backup size to be greater than 0") + } + if backup.Path == "" { + t.Error("Expected backup path to be set") + } + + if _, err := os.Stat(backup.Path); os.IsNotExist(err) { + t.Error("Expected backup file to exist") + } + + hasCompose := false + hasEnv := false + for _, comp := range backup.Components { + if comp == "compose" { + hasCompose = true + } + if comp == "env" { + hasEnv = true + } + } + if !hasCompose { + t.Error("Expected 'compose' component") + } + if !hasEnv { + t.Error("Expected 'env' component") + } +} + +func TestListBackups_Empty(t *testing.T) { + m, tmpDir := setupTestManager(t) + defer os.RemoveAll(tmpDir) + + backups, err := m.ListBackups(&BackupListFilter{}) + if err != nil { + t.Fatalf("Failed to list backups: %v", err) + } + if len(backups) != 0 { + t.Errorf("Expected 0 backups, got: %d", len(backups)) + } +} + +func TestListBackups_WithBackups(t *testing.T) { + m, tmpDir := setupTestManager(t) + defer os.RemoveAll(tmpDir) + + deploymentPath := filepath.Join(tmpDir, "test-deployment") + if err := os.MkdirAll(deploymentPath, 0755); err != nil { + t.Fatalf("Failed to create deployment dir: %v", err) + } + if err := os.WriteFile(filepath.Join(deploymentPath, "docker-compose.yml"), []byte("version: '3'\n"), 0644); err != nil { + t.Fatalf("Failed to create compose file: %v", err) + } + + ctx := context.Background() + _, err := m.CreateBackup(ctx, "test-deployment", nil) + if err != nil { + t.Fatalf("Failed to create backup: %v", err) + } + + backups, err := m.ListBackups(&BackupListFilter{}) + if err != nil { + t.Fatalf("Failed to list backups: %v", err) + } + if len(backups) != 1 { + t.Errorf("Expected 1 backup, got: %d", len(backups)) + } +} + +func TestListBackups_FilterByDeployment(t *testing.T) { + m, tmpDir := setupTestManager(t) + defer os.RemoveAll(tmpDir) + + for _, name := range []string{"deployment-a", "deployment-b"} { + deploymentPath := filepath.Join(tmpDir, name) + if err := os.MkdirAll(deploymentPath, 0755); err != nil { + t.Fatalf("Failed to create deployment dir: %v", err) + } + if err := os.WriteFile(filepath.Join(deploymentPath, "docker-compose.yml"), []byte("version: '3'\n"), 0644); err != nil { + t.Fatalf("Failed to create compose file: %v", err) + } + _, err := m.CreateBackup(context.Background(), name, nil) + if err != nil { + t.Fatalf("Failed to create backup: %v", err) + } + } + + backups, err := m.ListBackups(&BackupListFilter{DeploymentName: "deployment-a"}) + if err != nil { + t.Fatalf("Failed to list backups: %v", err) + } + if len(backups) != 1 { + t.Errorf("Expected 1 backup for deployment-a, got: %d", len(backups)) + } + if backups[0].DeploymentName != "deployment-a" { + t.Errorf("Expected deployment-a, got: %s", backups[0].DeploymentName) + } +} + +func TestGetBackup(t *testing.T) { + m, tmpDir := setupTestManager(t) + defer os.RemoveAll(tmpDir) + + deploymentPath := filepath.Join(tmpDir, "test-deployment") + if err := os.MkdirAll(deploymentPath, 0755); err != nil { + t.Fatalf("Failed to create deployment dir: %v", err) + } + if err := os.WriteFile(filepath.Join(deploymentPath, "docker-compose.yml"), []byte("version: '3'\n"), 0644); err != nil { + t.Fatalf("Failed to create compose file: %v", err) + } + + ctx := context.Background() + created, err := m.CreateBackup(ctx, "test-deployment", nil) + if err != nil { + t.Fatalf("Failed to create backup: %v", err) + } + + backup, err := m.GetBackup(created.ID) + if err != nil { + t.Fatalf("Failed to get backup: %v", err) + } + if backup.ID != created.ID { + t.Errorf("Expected ID %s, got: %s", created.ID, backup.ID) + } + + _, err = m.GetBackup("nonexistent_backup") + if err == nil { + t.Error("Expected error for nonexistent backup") + } +} + +func TestDeleteBackup(t *testing.T) { + m, tmpDir := setupTestManager(t) + defer os.RemoveAll(tmpDir) + + deploymentPath := filepath.Join(tmpDir, "test-deployment") + if err := os.MkdirAll(deploymentPath, 0755); err != nil { + t.Fatalf("Failed to create deployment dir: %v", err) + } + if err := os.WriteFile(filepath.Join(deploymentPath, "docker-compose.yml"), []byte("version: '3'\n"), 0644); err != nil { + t.Fatalf("Failed to create compose file: %v", err) + } + + ctx := context.Background() + created, err := m.CreateBackup(ctx, "test-deployment", nil) + if err != nil { + t.Fatalf("Failed to create backup: %v", err) + } + + err = m.DeleteBackup(created.ID) + if err != nil { + t.Fatalf("Failed to delete backup: %v", err) + } + + if _, err := os.Stat(created.Path); !os.IsNotExist(err) { + t.Error("Expected backup file to be deleted") + } + + _, err = m.GetBackup(created.ID) + if err == nil { + t.Error("Expected error when getting deleted backup") + } +} + +func TestGetBackupPath(t *testing.T) { + m, tmpDir := setupTestManager(t) + defer os.RemoveAll(tmpDir) + + deploymentPath := filepath.Join(tmpDir, "test-deployment") + if err := os.MkdirAll(deploymentPath, 0755); err != nil { + t.Fatalf("Failed to create deployment dir: %v", err) + } + if err := os.WriteFile(filepath.Join(deploymentPath, "docker-compose.yml"), []byte("version: '3'\n"), 0644); err != nil { + t.Fatalf("Failed to create compose file: %v", err) + } + + ctx := context.Background() + created, err := m.CreateBackup(ctx, "test-deployment", nil) + if err != nil { + t.Fatalf("Failed to create backup: %v", err) + } + + path, err := m.GetBackupPath(created.ID) + if err != nil { + t.Fatalf("Failed to get backup path: %v", err) + } + if path != created.Path { + t.Errorf("Expected path %s, got: %s", created.Path, path) + } +} + +func TestCleanupOldBackups(t *testing.T) { + m, tmpDir := setupTestManager(t) + defer os.RemoveAll(tmpDir) + + deploymentPath := filepath.Join(tmpDir, "test-deployment") + if err := os.MkdirAll(deploymentPath, 0755); err != nil { + t.Fatalf("Failed to create deployment dir: %v", err) + } + if err := os.WriteFile(filepath.Join(deploymentPath, "docker-compose.yml"), []byte("version: '3'\n"), 0644); err != nil { + t.Fatalf("Failed to create compose file: %v", err) + } + + backupDir := filepath.Join(m.backupsPath, "test-deployment") + if err := os.MkdirAll(backupDir, 0755); err != nil { + t.Fatalf("Failed to create backup dir: %v", err) + } + for i := 0; i < 5; i++ { + backupFile := filepath.Join(backupDir, "test-deployment_2024010"+string(rune('1'+i))+"_120000.tar.gz") + if err := os.WriteFile(backupFile, []byte("test backup content"), 0644); err != nil { + t.Fatalf("Failed to create backup file: %v", err) + } + } + + backups, _ := m.ListBackups(&BackupListFilter{DeploymentName: "test-deployment"}) + if len(backups) != 5 { + t.Fatalf("Expected 5 backups, got: %d", len(backups)) + } + + deleted, err := m.CleanupOldBackups("test-deployment", 2) + if err != nil { + t.Fatalf("Failed to cleanup backups: %v", err) + } + if deleted != 3 { + t.Errorf("Expected 3 deleted, got: %d", deleted) + } + + backups, _ = m.ListBackups(&BackupListFilter{DeploymentName: "test-deployment"}) + if len(backups) != 2 { + t.Errorf("Expected 2 backups remaining, got: %d", len(backups)) + } +} + +func TestStartBackupJob(t *testing.T) { + m, tmpDir := setupTestManager(t) + defer os.RemoveAll(tmpDir) + + deploymentPath := filepath.Join(tmpDir, "test-deployment") + if err := os.MkdirAll(deploymentPath, 0755); err != nil { + t.Fatalf("Failed to create deployment dir: %v", err) + } + if err := os.WriteFile(filepath.Join(deploymentPath, "docker-compose.yml"), []byte("version: '3'\n"), 0644); err != nil { + t.Fatalf("Failed to create compose file: %v", err) + } + + jobID := m.StartBackupJob("test-deployment", nil) + + if jobID == "" { + t.Error("Expected job ID to be returned") + } + + job := m.GetJob(jobID) + if job == nil { + t.Fatal("Expected to find job") + } + if job.Type != JobTypeBackup { + t.Errorf("Expected job type 'backup', got: %s", job.Type) + } +} + +func TestManagerGetJob(t *testing.T) { + m, tmpDir := setupTestManager(t) + defer os.RemoveAll(tmpDir) + + m.jobs.CreateJob("test-job", JobTypeBackup, "deployment") + + job := m.GetJob("test-job") + if job == nil { + t.Fatal("Expected to find job") + } + if job.ID != "test-job" { + t.Errorf("Expected ID 'test-job', got: %s", job.ID) + } +} + +func TestManagerListJobs(t *testing.T) { + m, tmpDir := setupTestManager(t) + defer os.RemoveAll(tmpDir) + + m.jobs.CreateJob("job-1", JobTypeBackup, "deployment-a") + m.jobs.CreateJob("job-2", JobTypeRestore, "deployment-b") + + jobs := m.ListJobs("", 0) + if len(jobs) != 2 { + t.Errorf("Expected 2 jobs, got: %d", len(jobs)) + } + + jobs = m.ListJobs("deployment-a", 0) + if len(jobs) != 1 { + t.Errorf("Expected 1 job for deployment-a, got: %d", len(jobs)) + } +} diff --git a/internal/backup/types.go b/internal/backup/types.go new file mode 100644 index 0000000..9d4d469 --- /dev/null +++ b/internal/backup/types.go @@ -0,0 +1,73 @@ +package backup + +import ( + "time" + + "github.com/flatrun/agent/pkg/models" +) + +type BackupStatus string + +const ( + BackupStatusPending BackupStatus = "pending" + BackupStatusInProgress BackupStatus = "in_progress" + BackupStatusCompleted BackupStatus = "completed" + BackupStatusFailed BackupStatus = "failed" +) + +type BackupSpec = models.BackupSpec +type ContainerPath = models.ContainerBackupPath +type DatabaseSpec = models.DatabaseBackupSpec +type HookSpec = models.BackupHookSpec + +type Backup struct { + ID string `json:"id"` + DeploymentName string `json:"deployment_name"` + Status BackupStatus `json:"status"` + Size int64 `json:"size"` + Path string `json:"path"` + Components []string `json:"components"` + Error string `json:"error,omitempty"` + CreatedAt time.Time `json:"created_at"` + CompletedAt *time.Time `json:"completed_at,omitempty"` + ExpiresAt *time.Time `json:"expires_at,omitempty"` +} + +type BackupMetadata struct { + ID string `json:"id"` + DeploymentName string `json:"deployment_name"` + DeploymentPath string `json:"deployment_path"` + CreatedAt time.Time `json:"created_at"` + AgentVersion string `json:"agent_version"` + Components BackupComponents `json:"components"` + ContainerStates map[string]string `json:"container_states,omitempty"` +} + +type BackupComponents struct { + ComposeFile bool `json:"compose_file"` + EnvFile bool `json:"env_file"` + Metadata bool `json:"metadata"` + MountedData []string `json:"mounted_data,omitempty"` + ContainerData []string `json:"container_data,omitempty"` + Databases []string `json:"databases,omitempty"` +} + +type CreateBackupRequest struct { + DeploymentName string `json:"deployment_name" binding:"required"` + Description string `json:"description,omitempty"` +} + +type RestoreBackupRequest struct { + BackupID string `json:"backup_id" binding:"required"` + DeploymentName string `json:"deployment_name,omitempty"` + RestoreData bool `json:"restore_data"` + RestoreDB bool `json:"restore_db"` + StopFirst bool `json:"stop_first"` +} + +type BackupListFilter struct { + DeploymentName string + Status BackupStatus + Limit int + Offset int +} diff --git a/internal/scheduler/db.go b/internal/scheduler/db.go new file mode 100644 index 0000000..f3818dd --- /dev/null +++ b/internal/scheduler/db.go @@ -0,0 +1,423 @@ +package scheduler + +import ( + "database/sql" + "encoding/json" + "os" + "path/filepath" + "sync" + "time" + + _ "github.com/mattn/go-sqlite3" +) + +type DB struct { + conn *sql.DB + path string + mu sync.RWMutex +} + +func NewDB(deploymentsPath string) (*DB, error) { + dbDir := filepath.Join(deploymentsPath, ".flatrun") + if err := os.MkdirAll(dbDir, 0755); err != nil { + return nil, err + } + + dbPath := filepath.Join(dbDir, "scheduler.db") + conn, err := sql.Open("sqlite3", dbPath+"?_journal_mode=WAL&_busy_timeout=5000") + if err != nil { + return nil, err + } + + conn.SetMaxOpenConns(1) + conn.SetMaxIdleConns(1) + conn.SetConnMaxLifetime(time.Hour) + + db := &DB{conn: conn, path: dbPath} + if err := db.migrate(); err != nil { + conn.Close() + return nil, err + } + + return db, nil +} + +func (db *DB) Close() error { + db.mu.Lock() + defer db.mu.Unlock() + return db.conn.Close() +} + +func (db *DB) migrate() error { + schema := ` + CREATE TABLE IF NOT EXISTS scheduled_tasks ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + name TEXT NOT NULL, + type TEXT NOT NULL CHECK (type IN ('backup', 'command')), + deployment_name TEXT NOT NULL, + cron_expr TEXT NOT NULL, + enabled BOOLEAN DEFAULT TRUE, + config TEXT, + last_run DATETIME, + next_run DATETIME, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP, + updated_at DATETIME DEFAULT CURRENT_TIMESTAMP + ); + + CREATE INDEX IF NOT EXISTS idx_tasks_deployment ON scheduled_tasks(deployment_name); + CREATE INDEX IF NOT EXISTS idx_tasks_enabled ON scheduled_tasks(enabled); + CREATE INDEX IF NOT EXISTS idx_tasks_next_run ON scheduled_tasks(next_run); + + CREATE TABLE IF NOT EXISTS task_executions ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + task_id INTEGER NOT NULL, + status TEXT NOT NULL CHECK (status IN ('pending', 'running', 'completed', 'failed')), + output TEXT, + error TEXT, + started_at DATETIME DEFAULT CURRENT_TIMESTAMP, + ended_at DATETIME, + duration_ms INTEGER, + FOREIGN KEY (task_id) REFERENCES scheduled_tasks(id) ON DELETE CASCADE + ); + + CREATE INDEX IF NOT EXISTS idx_executions_task ON task_executions(task_id); + CREATE INDEX IF NOT EXISTS idx_executions_started ON task_executions(started_at DESC); + ` + + _, err := db.conn.Exec(schema) + return err +} + +func (db *DB) CreateTask(task *ScheduledTask) (int64, error) { + db.mu.Lock() + defer db.mu.Unlock() + + configJSON, err := json.Marshal(task.Config) + if err != nil { + return 0, err + } + + result, err := db.conn.Exec(` + INSERT INTO scheduled_tasks (name, type, deployment_name, cron_expr, enabled, config, next_run) + VALUES (?, ?, ?, ?, ?, ?, ?)`, + task.Name, task.Type, task.DeploymentName, task.CronExpr, task.Enabled, string(configJSON), task.NextRun, + ) + if err != nil { + return 0, err + } + return result.LastInsertId() +} + +func (db *DB) UpdateTask(id int64, req *UpdateTaskRequest) error { + db.mu.Lock() + defer db.mu.Unlock() + + task, err := db.getTaskByIDLocked(id) + if err != nil { + return err + } + + if req.Name != nil { + task.Name = *req.Name + } + if req.CronExpr != nil { + task.CronExpr = *req.CronExpr + } + if req.Enabled != nil { + task.Enabled = *req.Enabled + } + if req.Config != nil { + task.Config = *req.Config + } + + configJSON, err := json.Marshal(task.Config) + if err != nil { + return err + } + + _, err = db.conn.Exec(` + UPDATE scheduled_tasks + SET name = ?, cron_expr = ?, enabled = ?, config = ?, updated_at = CURRENT_TIMESTAMP + WHERE id = ?`, + task.Name, task.CronExpr, task.Enabled, string(configJSON), id, + ) + return err +} + +func (db *DB) DeleteTask(id int64) error { + db.mu.Lock() + defer db.mu.Unlock() + + _, err := db.conn.Exec("DELETE FROM scheduled_tasks WHERE id = ?", id) + return err +} + +func (db *DB) GetTask(id int64) (*ScheduledTask, error) { + db.mu.RLock() + defer db.mu.RUnlock() + return db.getTaskByIDLocked(id) +} + +func (db *DB) getTaskByIDLocked(id int64) (*ScheduledTask, error) { + var task ScheduledTask + var configJSON string + var lastRun, nextRun sql.NullTime + + err := db.conn.QueryRow(` + SELECT id, name, type, deployment_name, cron_expr, enabled, config, last_run, next_run, created_at, updated_at + FROM scheduled_tasks WHERE id = ?`, id).Scan( + &task.ID, &task.Name, &task.Type, &task.DeploymentName, &task.CronExpr, + &task.Enabled, &configJSON, &lastRun, &nextRun, &task.CreatedAt, &task.UpdatedAt, + ) + if err != nil { + return nil, err + } + + if err := json.Unmarshal([]byte(configJSON), &task.Config); err != nil { + return nil, err + } + + if lastRun.Valid { + task.LastRun = &lastRun.Time + } + if nextRun.Valid { + task.NextRun = &nextRun.Time + } + + return &task, nil +} + +func (db *DB) GetTasksByDeployment(deploymentName string) ([]ScheduledTask, error) { + db.mu.RLock() + defer db.mu.RUnlock() + + rows, err := db.conn.Query(` + SELECT id, name, type, deployment_name, cron_expr, enabled, config, last_run, next_run, created_at, updated_at + FROM scheduled_tasks + WHERE deployment_name = ? + ORDER BY created_at DESC`, deploymentName) + if err != nil { + return nil, err + } + defer rows.Close() + + return db.scanTasks(rows) +} + +func (db *DB) GetAllTasks() ([]ScheduledTask, error) { + db.mu.RLock() + defer db.mu.RUnlock() + + rows, err := db.conn.Query(` + SELECT id, name, type, deployment_name, cron_expr, enabled, config, last_run, next_run, created_at, updated_at + FROM scheduled_tasks + ORDER BY created_at DESC`) + if err != nil { + return nil, err + } + defer rows.Close() + + return db.scanTasks(rows) +} + +func (db *DB) GetEnabledTasks() ([]ScheduledTask, error) { + db.mu.RLock() + defer db.mu.RUnlock() + + rows, err := db.conn.Query(` + SELECT id, name, type, deployment_name, cron_expr, enabled, config, last_run, next_run, created_at, updated_at + FROM scheduled_tasks + WHERE enabled = TRUE + ORDER BY next_run ASC`) + if err != nil { + return nil, err + } + defer rows.Close() + + return db.scanTasks(rows) +} + +func (db *DB) GetDueTasks() ([]ScheduledTask, error) { + db.mu.RLock() + defer db.mu.RUnlock() + + rows, err := db.conn.Query(` + SELECT id, name, type, deployment_name, cron_expr, enabled, config, last_run, next_run, created_at, updated_at + FROM scheduled_tasks + WHERE enabled = TRUE AND next_run <= datetime('now') + ORDER BY next_run ASC`) + if err != nil { + return nil, err + } + defer rows.Close() + + return db.scanTasks(rows) +} + +func (db *DB) scanTasks(rows *sql.Rows) ([]ScheduledTask, error) { + var tasks []ScheduledTask + for rows.Next() { + var task ScheduledTask + var configJSON string + var lastRun, nextRun sql.NullTime + + if err := rows.Scan( + &task.ID, &task.Name, &task.Type, &task.DeploymentName, &task.CronExpr, + &task.Enabled, &configJSON, &lastRun, &nextRun, &task.CreatedAt, &task.UpdatedAt, + ); err != nil { + return nil, err + } + + if err := json.Unmarshal([]byte(configJSON), &task.Config); err != nil { + return nil, err + } + + if lastRun.Valid { + task.LastRun = &lastRun.Time + } + if nextRun.Valid { + task.NextRun = &nextRun.Time + } + + tasks = append(tasks, task) + } + return tasks, nil +} + +func (db *DB) UpdateTaskRun(id int64, lastRun, nextRun time.Time) error { + db.mu.Lock() + defer db.mu.Unlock() + + _, err := db.conn.Exec(` + UPDATE scheduled_tasks + SET last_run = ?, next_run = ?, updated_at = CURRENT_TIMESTAMP + WHERE id = ?`, + lastRun, nextRun, id, + ) + return err +} + +func (db *DB) UpdateTaskNextRun(id int64, nextRun time.Time) error { + db.mu.Lock() + defer db.mu.Unlock() + + _, err := db.conn.Exec(` + UPDATE scheduled_tasks + SET next_run = ?, updated_at = CURRENT_TIMESTAMP + WHERE id = ?`, + nextRun, id, + ) + return err +} + +func (db *DB) CreateExecution(exec *TaskExecution) (int64, error) { + db.mu.Lock() + defer db.mu.Unlock() + + result, err := db.conn.Exec(` + INSERT INTO task_executions (task_id, status, started_at) + VALUES (?, ?, ?)`, + exec.TaskID, exec.Status, exec.StartedAt, + ) + if err != nil { + return 0, err + } + return result.LastInsertId() +} + +func (db *DB) UpdateExecution(id int64, status TaskStatus, output, errMsg string, endedAt time.Time, durationMs int64) error { + db.mu.Lock() + defer db.mu.Unlock() + + _, err := db.conn.Exec(` + UPDATE task_executions + SET status = ?, output = ?, error = ?, ended_at = ?, duration_ms = ? + WHERE id = ?`, + status, output, errMsg, endedAt, durationMs, id, + ) + return err +} + +func (db *DB) GetExecutionsByTask(taskID int64, limit int) ([]TaskExecution, error) { + db.mu.RLock() + defer db.mu.RUnlock() + + if limit <= 0 { + limit = 50 + } + + rows, err := db.conn.Query(` + SELECT id, task_id, status, output, error, started_at, ended_at, duration_ms + FROM task_executions + WHERE task_id = ? + ORDER BY started_at DESC + LIMIT ?`, taskID, limit) + if err != nil { + return nil, err + } + defer rows.Close() + + return db.scanExecutions(rows) +} + +func (db *DB) GetRecentExecutions(limit int) ([]TaskExecution, error) { + db.mu.RLock() + defer db.mu.RUnlock() + + if limit <= 0 { + limit = 50 + } + + rows, err := db.conn.Query(` + SELECT id, task_id, status, output, error, started_at, ended_at, duration_ms + FROM task_executions + ORDER BY started_at DESC + LIMIT ?`, limit) + if err != nil { + return nil, err + } + defer rows.Close() + + return db.scanExecutions(rows) +} + +func (db *DB) scanExecutions(rows *sql.Rows) ([]TaskExecution, error) { + var executions []TaskExecution + for rows.Next() { + var exec TaskExecution + var output, errMsg sql.NullString + var endedAt sql.NullTime + var durationMs sql.NullInt64 + + if err := rows.Scan( + &exec.ID, &exec.TaskID, &exec.Status, &output, &errMsg, + &exec.StartedAt, &endedAt, &durationMs, + ); err != nil { + return nil, err + } + + exec.Output = output.String + exec.Error = errMsg.String + if endedAt.Valid { + exec.EndedAt = &endedAt.Time + } + if durationMs.Valid { + exec.Duration = durationMs.Int64 + } + + executions = append(executions, exec) + } + return executions, nil +} + +func (db *DB) CleanupOldExecutions(olderThan time.Duration) (int64, error) { + db.mu.Lock() + defer db.mu.Unlock() + + cutoff := time.Now().Add(-olderThan) + result, err := db.conn.Exec("DELETE FROM task_executions WHERE started_at < ?", cutoff) + if err != nil { + return 0, err + } + return result.RowsAffected() +} diff --git a/internal/scheduler/executor.go b/internal/scheduler/executor.go new file mode 100644 index 0000000..163b029 --- /dev/null +++ b/internal/scheduler/executor.go @@ -0,0 +1,83 @@ +package scheduler + +import ( + "context" + "fmt" + "log" + "os/exec" + "time" + + "github.com/flatrun/agent/internal/backup" + "github.com/flatrun/agent/internal/docker" +) + +type Executor struct { + backupManager *backup.Manager + dockerManager *docker.Manager +} + +func NewExecutor(backupManager *backup.Manager, dockerManager *docker.Manager) *Executor { + return &Executor{ + backupManager: backupManager, + dockerManager: dockerManager, + } +} + +func (e *Executor) ExecuteBackup(ctx context.Context, deploymentName string, config *BackupTaskConfig) (string, error) { + if e.backupManager == nil { + return "", fmt.Errorf("backup manager not available") + } + + deployment, err := e.dockerManager.GetDeployment(deploymentName) + if err != nil { + return "", fmt.Errorf("deployment not found: %w", err) + } + + var spec *backup.BackupSpec + if deployment.Metadata != nil && deployment.Metadata.Backup != nil { + spec = deployment.Metadata.Backup + } + + b, err := e.backupManager.CreateBackup(ctx, deploymentName, spec) + if err != nil { + return "", err + } + + if config.RetentionCount > 0 { + deleted, err := e.backupManager.CleanupOldBackups(deploymentName, config.RetentionCount) + if err != nil { + log.Printf("Scheduler: failed to cleanup old backups: %v", err) + } else if deleted > 0 { + log.Printf("Scheduler: cleaned up %d old backups for %s", deleted, deploymentName) + } + } + + return fmt.Sprintf("Backup created: %s (%d bytes)", b.ID, b.Size), nil +} + +func (e *Executor) ExecuteCommand(ctx context.Context, deploymentName string, config *CommandTaskConfig) (string, error) { + if e.dockerManager == nil { + return "", fmt.Errorf("docker manager not available") + } + + containerName := deploymentName + if config.Service != "" && config.Service != deploymentName { + containerName = fmt.Sprintf("%s-%s", deploymentName, config.Service) + } + + timeout := config.Timeout + if timeout <= 0 { + timeout = 300 + } + + cmdCtx, cancel := context.WithTimeout(ctx, time.Duration(timeout)*time.Second) + defer cancel() + + cmd := exec.CommandContext(cmdCtx, "docker", "exec", containerName, "sh", "-c", config.Command) + output, err := cmd.CombinedOutput() + if err != nil { + return string(output), fmt.Errorf("command failed: %w", err) + } + + return string(output), nil +} diff --git a/internal/scheduler/scheduler.go b/internal/scheduler/scheduler.go new file mode 100644 index 0000000..ae8bff5 --- /dev/null +++ b/internal/scheduler/scheduler.go @@ -0,0 +1,275 @@ +package scheduler + +import ( + "context" + "fmt" + "log" + "sync" + "time" + + "github.com/robfig/cron/v3" +) + +type TaskExecutor interface { + ExecuteBackup(ctx context.Context, deploymentName string, config *BackupTaskConfig) (string, error) + ExecuteCommand(ctx context.Context, deploymentName string, config *CommandTaskConfig) (string, error) +} + +type Manager struct { + db *DB + executor TaskExecutor + parser cron.Parser + stopCh chan struct{} + wg sync.WaitGroup + mu sync.RWMutex + running bool +} + +func NewManager(deploymentsPath string, executor TaskExecutor) (*Manager, error) { + db, err := NewDB(deploymentsPath) + if err != nil { + return nil, fmt.Errorf("failed to initialize scheduler database: %w", err) + } + + parser := cron.NewParser(cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor) + + return &Manager{ + db: db, + executor: executor, + parser: parser, + stopCh: make(chan struct{}), + }, nil +} + +func (m *Manager) Start() { + m.mu.Lock() + if m.running { + m.mu.Unlock() + return + } + m.running = true + m.stopCh = make(chan struct{}) + m.mu.Unlock() + + m.wg.Add(1) + go m.runLoop() + + log.Println("Scheduler started") +} + +func (m *Manager) Stop() { + m.mu.Lock() + if !m.running { + m.mu.Unlock() + return + } + m.running = false + close(m.stopCh) + m.mu.Unlock() + + m.wg.Wait() + m.db.Close() + + log.Println("Scheduler stopped") +} + +func (m *Manager) runLoop() { + defer m.wg.Done() + + ticker := time.NewTicker(30 * time.Second) + defer ticker.Stop() + + m.checkAndRunTasks() + + for { + select { + case <-m.stopCh: + return + case <-ticker.C: + m.checkAndRunTasks() + } + } +} + +func (m *Manager) checkAndRunTasks() { + tasks, err := m.db.GetDueTasks() + if err != nil { + log.Printf("Scheduler: failed to get due tasks: %v", err) + return + } + + for _, task := range tasks { + go m.executeTask(task) + } +} + +func (m *Manager) executeTask(task ScheduledTask) { + exec := &TaskExecution{ + TaskID: task.ID, + Status: TaskStatusRunning, + StartedAt: time.Now(), + } + + execID, err := m.db.CreateExecution(exec) + if err != nil { + log.Printf("Scheduler: failed to create execution record for task %d: %v", task.ID, err) + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Minute) + defer cancel() + + var output string + var execErr error + + switch task.Type { + case TaskTypeBackup: + if task.Config.BackupConfig != nil { + output, execErr = m.executor.ExecuteBackup(ctx, task.DeploymentName, task.Config.BackupConfig) + } else { + execErr = fmt.Errorf("backup config is nil") + } + case TaskTypeCommand: + if task.Config.CommandConfig != nil { + output, execErr = m.executor.ExecuteCommand(ctx, task.DeploymentName, task.Config.CommandConfig) + } else { + execErr = fmt.Errorf("command config is nil") + } + default: + execErr = fmt.Errorf("unknown task type: %s", task.Type) + } + + endedAt := time.Now() + durationMs := endedAt.Sub(exec.StartedAt).Milliseconds() + status := TaskStatusCompleted + var errMsg string + + if execErr != nil { + status = TaskStatusFailed + errMsg = execErr.Error() + log.Printf("Scheduler: task %d (%s) failed: %v", task.ID, task.Name, execErr) + } else { + log.Printf("Scheduler: task %d (%s) completed successfully", task.ID, task.Name) + } + + if err := m.db.UpdateExecution(execID, status, output, errMsg, endedAt, durationMs); err != nil { + log.Printf("Scheduler: failed to update execution %d: %v", execID, err) + } + + nextRun, err := m.calculateNextRun(task.CronExpr) + if err != nil { + log.Printf("Scheduler: failed to calculate next run for task %d: %v", task.ID, err) + return + } + + if err := m.db.UpdateTaskRun(task.ID, endedAt, nextRun); err != nil { + log.Printf("Scheduler: failed to update task %d run times: %v", task.ID, err) + } +} + +func (m *Manager) calculateNextRun(cronExpr string) (time.Time, error) { + schedule, err := m.parser.Parse(cronExpr) + if err != nil { + return time.Time{}, fmt.Errorf("invalid cron expression: %w", err) + } + return schedule.Next(time.Now()), nil +} + +func (m *Manager) ValidateCronExpr(cronExpr string) error { + _, err := m.parser.Parse(cronExpr) + return err +} + +func (m *Manager) CreateTask(req *CreateTaskRequest) (*ScheduledTask, error) { + nextRun, err := m.calculateNextRun(req.CronExpr) + if err != nil { + return nil, err + } + + task := &ScheduledTask{ + Name: req.Name, + Type: req.Type, + DeploymentName: req.DeploymentName, + CronExpr: req.CronExpr, + Enabled: req.Enabled, + Config: req.Config, + NextRun: &nextRun, + CreatedAt: time.Now(), + UpdatedAt: time.Now(), + } + + id, err := m.db.CreateTask(task) + if err != nil { + return nil, err + } + + task.ID = id + return task, nil +} + +func (m *Manager) UpdateTask(id int64, req *UpdateTaskRequest) (*ScheduledTask, error) { + if req.CronExpr != nil { + if err := m.ValidateCronExpr(*req.CronExpr); err != nil { + return nil, err + } + } + + if err := m.db.UpdateTask(id, req); err != nil { + return nil, err + } + + task, err := m.db.GetTask(id) + if err != nil { + return nil, err + } + + if req.CronExpr != nil || (req.Enabled != nil && *req.Enabled) { + nextRun, err := m.calculateNextRun(task.CronExpr) + if err == nil { + if err := m.db.UpdateTaskNextRun(id, nextRun); err != nil { + log.Printf("Scheduler: failed to update next run time: %v", err) + } + task.NextRun = &nextRun + } + } + + return task, nil +} + +func (m *Manager) DeleteTask(id int64) error { + return m.db.DeleteTask(id) +} + +func (m *Manager) GetTask(id int64) (*ScheduledTask, error) { + return m.db.GetTask(id) +} + +func (m *Manager) GetAllTasks() ([]ScheduledTask, error) { + return m.db.GetAllTasks() +} + +func (m *Manager) GetTasksByDeployment(deploymentName string) ([]ScheduledTask, error) { + return m.db.GetTasksByDeployment(deploymentName) +} + +func (m *Manager) GetTaskExecutions(taskID int64, limit int) ([]TaskExecution, error) { + return m.db.GetExecutionsByTask(taskID, limit) +} + +func (m *Manager) GetRecentExecutions(limit int) ([]TaskExecution, error) { + return m.db.GetRecentExecutions(limit) +} + +func (m *Manager) RunTaskNow(id int64) error { + task, err := m.db.GetTask(id) + if err != nil { + return err + } + + go m.executeTask(*task) + return nil +} + +func (m *Manager) Cleanup(olderThan time.Duration) (int64, error) { + return m.db.CleanupOldExecutions(olderThan) +} diff --git a/internal/scheduler/types.go b/internal/scheduler/types.go new file mode 100644 index 0000000..e08395a --- /dev/null +++ b/internal/scheduler/types.go @@ -0,0 +1,80 @@ +package scheduler + +import ( + "time" +) + +type TaskType string + +const ( + TaskTypeBackup TaskType = "backup" + TaskTypeCommand TaskType = "command" +) + +type TaskStatus string + +const ( + TaskStatusPending TaskStatus = "pending" + TaskStatusRunning TaskStatus = "running" + TaskStatusCompleted TaskStatus = "completed" + TaskStatusFailed TaskStatus = "failed" +) + +type ScheduledTask struct { + ID int64 `json:"id"` + Name string `json:"name"` + Type TaskType `json:"type"` + DeploymentName string `json:"deployment_name"` + CronExpr string `json:"cron_expr"` + Enabled bool `json:"enabled"` + Config TaskConfig `json:"config"` + LastRun *time.Time `json:"last_run,omitempty"` + NextRun *time.Time `json:"next_run,omitempty"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +type TaskConfig struct { + // For backup tasks + BackupConfig *BackupTaskConfig `json:"backup_config,omitempty"` + // For command tasks + CommandConfig *CommandTaskConfig `json:"command_config,omitempty"` +} + +type BackupTaskConfig struct { + RetentionCount int `json:"retention_count"` + StoragePath string `json:"storage_path,omitempty"` +} + +type CommandTaskConfig struct { + Service string `json:"service"` + Command string `json:"command"` + Timeout int `json:"timeout"` +} + +type TaskExecution struct { + ID int64 `json:"id"` + TaskID int64 `json:"task_id"` + Status TaskStatus `json:"status"` + Output string `json:"output,omitempty"` + Error string `json:"error,omitempty"` + StartedAt time.Time `json:"started_at"` + EndedAt *time.Time `json:"ended_at,omitempty"` + Duration int64 `json:"duration_ms,omitempty"` +} + +type CreateTaskRequest struct { + Name string `json:"name" binding:"required"` + Type TaskType `json:"type" binding:"required"` + DeploymentName string `json:"deployment_name" binding:"required"` + CronExpr string `json:"cron_expr" binding:"required"` + Enabled bool `json:"enabled"` + Config TaskConfig `json:"config"` +} + +type UpdateTaskRequest struct { + Name *string `json:"name,omitempty"` + CronExpr *string `json:"cron_expr,omitempty"` + Enabled *bool `json:"enabled,omitempty"` + Config *TaskConfig `json:"config,omitempty"` +} diff --git a/internal/traffic/models.go b/internal/traffic/models.go index 83933f3..b1f4498 100644 --- a/internal/traffic/models.go +++ b/internal/traffic/models.go @@ -87,10 +87,10 @@ type IngestTrafficLog struct { } type UnknownDomainStats struct { - TotalRequests int64 `json:"total_requests"` - TopDomains []UnknownDomainEntry `json:"top_domains"` - TopIPs []UnknownDomainIPEntry `json:"top_ips"` - RecentLogs []TrafficLog `json:"recent_logs"` + TotalRequests int64 `json:"total_requests"` + TopDomains []UnknownDomainEntry `json:"top_domains"` + TopIPs []UnknownDomainIPEntry `json:"top_ips"` + RecentLogs []TrafficLog `json:"recent_logs"` } type UnknownDomainEntry struct { diff --git a/pkg/models/deployment.go b/pkg/models/deployment.go index 6624bb8..d32def1 100644 --- a/pkg/models/deployment.go +++ b/pkg/models/deployment.go @@ -31,6 +31,43 @@ type ServiceMetadata struct { HealthCheck HealthCheckConfig `yaml:"healthcheck" json:"healthcheck"` QuickActions []QuickAction `yaml:"quick_actions,omitempty" json:"quick_actions,omitempty"` Security *DeploymentSecurityConfig `yaml:"security,omitempty" json:"security,omitempty"` + Backup *BackupSpec `yaml:"backup,omitempty" json:"backup,omitempty"` +} + +type BackupSpec struct { + ContainerPaths []ContainerBackupPath `yaml:"container_paths,omitempty" json:"container_paths,omitempty"` + Databases []DatabaseBackupSpec `yaml:"databases,omitempty" json:"databases,omitempty"` + PreHooks []BackupHookSpec `yaml:"pre_hooks,omitempty" json:"pre_hooks,omitempty"` + PostHooks []BackupHookSpec `yaml:"post_hooks,omitempty" json:"post_hooks,omitempty"` + ExcludePatterns []string `yaml:"exclude_patterns,omitempty" json:"exclude_patterns,omitempty"` +} + +type ContainerBackupPath struct { + Service string `yaml:"service" json:"service"` + ContainerPath string `yaml:"container_path" json:"container_path"` + Description string `yaml:"description,omitempty" json:"description,omitempty"` + Required bool `yaml:"required" json:"required"` +} + +type DatabaseBackupSpec struct { + Service string `yaml:"service" json:"service"` + Type string `yaml:"type" json:"type"` + HostEnv string `yaml:"host_env,omitempty" json:"host_env,omitempty"` + PortEnv string `yaml:"port_env,omitempty" json:"port_env,omitempty"` + UserEnv string `yaml:"user_env,omitempty" json:"user_env,omitempty"` + PasswordEnv string `yaml:"password_env,omitempty" json:"password_env,omitempty"` + DatabaseEnv string `yaml:"database_env,omitempty" json:"database_env,omitempty"` + Host string `yaml:"host,omitempty" json:"host,omitempty"` + Port int `yaml:"port,omitempty" json:"port,omitempty"` + User string `yaml:"user,omitempty" json:"user,omitempty"` + Password string `yaml:"password,omitempty" json:"password,omitempty"` + Database string `yaml:"database,omitempty" json:"database,omitempty"` +} + +type BackupHookSpec struct { + Service string `yaml:"service" json:"service"` + Command string `yaml:"command" json:"command"` + Timeout int `yaml:"timeout,omitempty" json:"timeout,omitempty"` } type QuickAction struct { diff --git a/templates/laravel/metadata.yml b/templates/laravel/metadata.yml index fb41141..e159b5e 100644 --- a/templates/laravel/metadata.yml +++ b/templates/laravel/metadata.yml @@ -63,3 +63,29 @@ files: CACHE_DRIVER=file SESSION_DRIVER=file QUEUE_CONNECTION=sync +backup: + container_paths: + - service: app + container_path: /app/storage + description: Storage directory (logs, cache, uploads) + required: true + - service: app + container_path: /app/.env + description: Environment configuration + required: true + databases: + - service: app + type: mysql + host_env: DB_HOST + port_env: DB_PORT + user_env: DB_USERNAME + password_env: DB_PASSWORD + database_env: DB_DATABASE + pre_hooks: + - service: app + command: php artisan down --retry=60 + timeout: 30 + post_hooks: + - service: app + command: php artisan up + timeout: 30 diff --git a/templates/wordpress/metadata.yml b/templates/wordpress/metadata.yml index 3f494f2..3fa9489 100644 --- a/templates/wordpress/metadata.yml +++ b/templates/wordpress/metadata.yml @@ -24,3 +24,32 @@ mounts: description: Bind mount for WordPress plugins type: file required: false +backup: + container_paths: + - service: wordpress + container_path: /var/www/html/wp-content/plugins + description: WordPress plugins + required: false + - service: wordpress + container_path: /var/www/html/wp-content/themes + description: WordPress themes + required: false + - service: wordpress + container_path: /var/www/html/wp-content/uploads + description: Media uploads + required: true + databases: + - service: wordpress + type: mysql + host_env: WORDPRESS_DB_HOST + user_env: WORDPRESS_DB_USER + password_env: WORDPRESS_DB_PASSWORD + database_env: WORDPRESS_DB_NAME + pre_hooks: + - service: wordpress + command: wp maintenance-mode activate --allow-root || true + timeout: 30 + post_hooks: + - service: wordpress + command: wp maintenance-mode deactivate --allow-root || true + timeout: 30