diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 0000000..b15f395 --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,113 @@ +name: Tests + +on: + push: + branches: [ main, develop ] + pull_request: + branches: [ main, develop ] + +jobs: + test: + name: Run Tests + runs-on: ubuntu-latest + + strategy: + matrix: + go-version: [1.21, 1.22] + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Install libvips + run: | + sudo apt-get update + sudo apt-get install -y libvips-dev + + - name: Set up Go + uses: actions/setup-go@v4 + with: + go-version: ${{ matrix.go-version }} + + - name: Cache Go modules + uses: actions/cache@v4 + with: + path: | + ~/go/pkg/mod + ~/.cache/go-build + key: ${{ runner.os }}-go-${{ matrix.go-version }}-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go-${{ matrix.go-version }}- + + - name: Download dependencies + run: go mod download + + - name: Verify dependencies + run: go mod verify + + - name: Run tests + run: make test + + - name: Run tests with coverage + run: make test-coverage + + - name: Upload coverage reports to Codecov + uses: codecov/codecov-action@v4 + with: + file: ./coverage.out + flags: unittests + name: codecov-umbrella + fail_ci_if_error: false + + lint: + name: Lint + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Install libvips + run: | + sudo apt-get update + sudo apt-get install -y libvips-dev + + - name: Set up Go + uses: actions/setup-go@v4 + with: + go-version: 1.22 + + - name: golangci-lint + uses: golangci/golangci-lint-action@v3 + with: + version: latest + args: --timeout=5m + + build: + name: Build + runs-on: ubuntu-latest + needs: [test, lint] + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Install libvips + run: | + sudo apt-get update + sudo apt-get install -y libvips-dev + + - name: Set up Go + uses: actions/setup-go@v4 + with: + go-version: 1.22 + + - name: Build application + run: make build + + - name: Upload build artifact + uses: actions/upload-artifact@v4 + with: + name: mediaflow-binary + path: mediaflow + retention-days: 7 \ No newline at end of file diff --git a/.gitignore b/.gitignore index 139cef9..f8f4573 100644 --- a/.gitignore +++ b/.gitignore @@ -32,4 +32,4 @@ go.work.sum # .vscode/ tmp/* -mediaflow \ No newline at end of file +mediaflow.env.local diff --git a/Makefile b/Makefile index 6bfb91b..6ebaf4e 100644 --- a/Makefile +++ b/Makefile @@ -59,6 +59,24 @@ run-image: @echo "Running image ๐Ÿš€" @set -a && . ./.env && docker run -p 8080:8080 --replace -n mediaflow-server --rm $(IMAGE_FULL_NAME) +test: + @echo "Running tests ๐Ÿงช" + @go test -v ./internal/... + +test-coverage: + @echo "Running tests with coverage ๐Ÿ“Š" + @go test -v -coverprofile=coverage.out ./internal/... + @go tool cover -html=coverage.out -o coverage.html + @echo "Coverage report generated: coverage.html" + +test-upload: + @echo "Running upload module tests ๐Ÿ”„" + @go test -v ./internal/upload + +test-auth: + @echo "Running auth module tests ๐Ÿ”" + @go test -v ./internal/auth + clean: @echo "Cleaning up ๐Ÿงน" - @rm -f mediaflow + @rm -f mediaflow coverage.out coverage.html diff --git a/examples/storage-config.yaml b/examples/storage-config.yaml index 0a4f732..1132741 100644 --- a/examples/storage-config.yaml +++ b/examples/storage-config.yaml @@ -29,4 +29,63 @@ storage_options: sizes: ["256", "512"] default_size: "256" quality: 90 - convert_to: "webp" \ No newline at end of file + convert_to: "webp" + + +upload_options: + avatar: + kind: "image" + allowed_mimes: ["image/jpeg", "image/png", "image/webp"] + size_max_bytes: 5242880 # 5MB + multipart_threshold_mb: 15 + part_size_mb: 8 + token_ttl_seconds: 900 # 15 minutes + path_template: "raw/{shard?}/{key_base}.{ext}" + enable_sharding: true + + photo: + kind: "image" + allowed_mimes: ["image/jpeg", "image/png", "image/webp"] + size_max_bytes: 20971520 # 20MB + multipart_threshold_mb: 15 + part_size_mb: 8 + token_ttl_seconds: 900 + path_template: "raw/{shard?}/{key_base}.{ext}" + enable_sharding: true + + video: + kind: "video" + allowed_mimes: ["video/mp4", "video/quicktime", "video/webm"] + size_max_bytes: 104857600 # 100MB + multipart_threshold_mb: 15 + part_size_mb: 8 + token_ttl_seconds: 1800 # 30 minutes + path_template: "raw/{shard?}/{key_base}.{ext}" + enable_sharding: true + + default: + kind: "image" + allowed_mimes: ["image/jpeg", "image/png"] + size_max_bytes: 10485760 # 10MB + multipart_threshold_mb: 15 + part_size_mb: 8 + token_ttl_seconds: 900 + path_template: "raw/{shard?}/{key_base}.{ext}" + enable_sharding: true + +videos: + product: + origin_folder: "originals/videos/products" + constraints: + max_fps: 30 + max_size_mb: 500 + proxy: + enabled: true + folder: "proxies/videos/products" + height: 240 + preview_seconds: 4 + posters: + folder: "posters/videos" + time_percent: 10 + format: "jpg" + quality: 90 \ No newline at end of file diff --git a/features/mediaflow_rolling_batch_parts.MD b/features/mediaflow_rolling_batch_parts.MD new file mode 100644 index 0000000..7217727 --- /dev/null +++ b/features/mediaflow_rolling_batch_parts.MD @@ -0,0 +1,251 @@ +# MediaFlow | Rolling Batch Parts v1.1 (Stateless Part Batching) + +## 1) Problem Statement + +Current implementation pre-generates up to 100 part URLs upfront, which: +- Wastes resources for smaller files that don't need all parts +- Limits maximum file size to 100 ร— part_size (e.g., 800MB with 8MB parts) +- Creates very large response payloads for large files +- Doesn't follow the "rolling batch" pattern from original spec + +## 2) Solution: Rolling Batch Parts + +Implement on-demand part URL generation in configurable batches. + +### Initial Presign Response +- Generate only first **N parts** (configurable, default 10-20) +- Include metadata to indicate more parts available +- Client can request additional batches as upload progresses + +### Rolling Batch Endpoint +New endpoint for requesting additional part batches as needed. + +## 3) API Changes + +### 3.1 Enhanced Multipart Response +```json +{ + "object_key": "raw/43/large-video.mp4", + "upload": { + "multipart": { + "upload_id": "abc123...", + "part_size": 8388608, + "total_parts_estimated": 125, + "parts": [ + {"part_number": 1, "method": "PUT", "url": "...", "expires_at": "..."}, + {"part_number": 2, "method": "PUT", "url": "...", "expires_at": "..."} + // ... up to initial_batch_size parts + ], + "batch_info": { + "parts_in_batch": 10, + "next_part_number": 11, + "has_more_parts": true, + "batch_endpoint": "/v1/uploads/presign/parts" + } + } + } +} +``` + +### 3.2 New Endpoint: POST /v1/uploads/presign/parts + +**Request:** +```json +{ + "upload_id": "abc123...", + "object_key": "raw/43/large-video.mp4", + "start_part": 11, + "count": 10, + "expires_seconds": 1800 +} +``` + +**Response:** +```json +{ + "parts": [ + {"part_number": 11, "method": "PUT", "url": "...", "expires_at": "..."}, + {"part_number": 12, "method": "PUT", "url": "...", "expires_at": "..."} + // ... up to 'count' parts + ], + "batch_info": { + "parts_in_batch": 10, + "next_part_number": 21, + "has_more_parts": true + } +} +``` + +## 4) Configuration + +### 4.1 Upload Config Extensions +```yaml +upload_options: + video: + # ... existing config + initial_batch_size: 10 # Parts in first response + max_batch_size: 20 # Max parts per batch request + part_url_ttl_seconds: 1800 # Individual part URL expiry +``` + +### 4.2 Batch Size Logic +- **Small files** (< 10 parts): Generate all parts in initial response +- **Medium files** (10-50 parts): Generate initial batch, allow additional requests +- **Large files** (50+ parts): Rolling batches required + +## 5) Implementation Details + +### 5.1 Service Layer Changes +```go +type BatchInfo struct { + PartsInBatch int `json:"parts_in_batch"` + NextPartNumber int `json:"next_part_number,omitempty"` + HasMoreParts bool `json:"has_more_parts"` + BatchEndpoint string `json:"batch_endpoint,omitempty"` +} + +type MultipartUpload struct { + UploadID string `json:"upload_id"` + PartSize int64 `json:"part_size"` + TotalPartsEst int `json:"total_parts_estimated"` + Parts []PartUpload `json:"parts"` + BatchInfo *BatchInfo `json:"batch_info,omitempty"` +} +``` + +### 5.2 Batch Request Validation +- Validate `upload_id` exists and is not expired +- Validate `object_key` matches the upload +- Validate `start_part` is sequential (no gaps) +- Validate `count` โ‰ค `max_batch_size` +- Rate limiting per upload_id + +### 5.3 Error Handling +```json +{ + "code": "invalid_upload_id", + "message": "Upload ID not found or expired", + "hint": "Start a new upload" +} + +{ + "code": "invalid_part_range", + "message": "Part 25 requested but only up to part 20 uploaded", + "hint": "Request sequential parts only" +} + +{ + "code": "batch_size_exceeded", + "message": "Requested 50 parts, maximum 20 allowed", + "hint": "Reduce count parameter" +} +``` + +## 6) Client Workflow + +### 6.1 Upload Flow +1. **Initial presign**: Get first batch of part URLs + upload metadata +2. **Start uploading**: Use provided part URLs +3. **Monitor progress**: When approaching end of current batch +4. **Request next batch**: Call `/presign/parts` for more URLs +5. **Continue upload**: Use new part URLs +6. **Complete upload**: Client calls S3 CompleteMultipartUpload + +### 6.2 Batch Request Timing +- **Proactive**: Request next batch when 80% through current batch +- **Lazy**: Request next batch only when current batch exhausted +- **Configurable**: Client can tune based on upload speed + +## 7) Benefits + +### 7.1 Efficiency +- Smaller initial response payloads +- Generate URLs only as needed +- Reduce wasted presigned URLs for failed uploads + +### 7.2 Scalability +- Support unlimited file sizes +- Configurable batch sizes per use case +- Better resource utilization + +### 7.3 Flexibility +- Client controls batch timing +- Different strategies for different file types +- Graceful handling of network interruptions + +## 8) Configuration Examples + +### 8.1 Small Files (Images, Documents) +```yaml +avatar: + initial_batch_size: 5 + max_batch_size: 10 + part_size_mb: 5 +``` + +### 8.2 Large Files (Videos) +```yaml +video: + initial_batch_size: 10 + max_batch_size: 20 + part_size_mb: 8 +``` + +### 8.3 Massive Files (Raw Video, Datasets) +```yaml +raw_video: + initial_batch_size: 15 + max_batch_size: 25 + part_size_mb: 16 +``` + +## 9) Migration Path + +### 9.1 Phase 1: Add Batch Info +- Add `batch_info` to existing multipart responses +- Maintain current behavior (generate all parts up to 100) +- `has_more_parts: false` for backward compatibility + +### 9.2 Phase 2: Implement Rolling Batches +- Add `/presign/parts` endpoint +- Reduce initial batch sizes +- Enable `has_more_parts: true` for large files + +### 9.3 Phase 3: Optimize +- Add rate limiting and abuse prevention +- Implement batch size auto-tuning +- Add metrics and monitoring + +## 10) Security Considerations + +### 10.1 Upload ID Validation +- Verify upload_id is valid and not expired +- Prevent unauthorized part generation +- Log all batch requests for audit + +### 10.2 Rate Limiting +- Limit batch requests per upload_id per minute +- Prevent abuse of part generation endpoint +- Implement exponential backoff hints + +### 10.3 Resource Protection +- Maximum total parts per upload (e.g., 10,000) +- Maximum concurrent uploads per client +- Cleanup expired upload sessions + +## 11) Future Enhancements + +### 11.1 Adaptive Batching +- Auto-adjust batch size based on upload speed +- Larger batches for fast connections +- Smaller batches for slow/unreliable connections + +### 11.2 Part Prediction +- Pre-generate next batch based on upload progress +- Cache commonly requested part ranges +- Optimize for typical upload patterns + +### 11.3 Resume Support +- Track which parts have been uploaded +- Support resume from any part number +- Handle partial uploads gracefully \ No newline at end of file diff --git a/go.mod b/go.mod index 249a562..c79e139 100644 --- a/go.mod +++ b/go.mod @@ -26,4 +26,5 @@ require ( github.com/aws/aws-sdk-go-v2/service/ssooidc v1.31.1 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.35.1 // indirect github.com/aws/smithy-go v1.22.5 // indirect + github.com/golang-jwt/jwt/v5 v5.3.0 // indirect ) diff --git a/go.sum b/go.sum index 8a8f92a..d0ea0f3 100644 --- a/go.sum +++ b/go.sum @@ -34,6 +34,8 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.35.1 h1:iF4Xxkc0H9c/K2dS0zZw3SCkj0Z7 github.com/aws/aws-sdk-go-v2/service/sts v1.35.1/go.mod h1:0bxIatfN0aLq4mjoLDeBpOjOke68OsFlXPDFJ7V0MYw= github.com/aws/smithy-go v1.22.5 h1:P9ATCXPMb2mPjYBgueqJNCA5S9UfktsW0tTxi+a7eqw= github.com/aws/smithy-go v1.22.5/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= +github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= +github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/h2non/bimg.v1 v1.1.9 h1:wZIUbeOnwr37Ta4aofhIv8OI8v4ujpjXC9mXnAGpQjM= diff --git a/internal/api/image.go b/internal/api/image.go index 5f82d5b..7127de0 100644 --- a/internal/api/image.go +++ b/internal/api/image.go @@ -95,7 +95,7 @@ func (h *ImageAPI) HandleThumbnailType(w http.ResponseWriter, r *http.Request, i w.Header().Set("Content-Type", "image/"+so.ConvertTo) w.Header().Set("Cache-Control", fmt.Sprintf("public, max-age=%d", cd)) w.Header().Set("ETag", fmt.Sprintf(`"%s/%s_%s"`, thumbType, baseName, size)) - w.Write(imageData) + w.Write(imageData) //nolint:errcheck } } @@ -115,7 +115,7 @@ func (h *ImageAPI) HandleOriginals(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "image/"+so.ConvertTo) w.Header().Set("Cache-Control", fmt.Sprintf("public, max-age=%d", so.CacheDuration)) w.Header().Set("ETag", fmt.Sprintf(`"%s/%s"`, thumbType, baseName)) - w.Write(imageData) + w.Write(imageData) //nolint:errcheck } } diff --git a/internal/auth/middleware.go b/internal/auth/middleware.go new file mode 100644 index 0000000..d240468 --- /dev/null +++ b/internal/auth/middleware.go @@ -0,0 +1,64 @@ +package auth + +import ( + "encoding/json" + "net/http" + "strings" +) + +type Config struct { + APIKey string +} + +type ErrorResponse struct { + Code string `json:"code"` + Message string `json:"message"` + Hint string `json:"hint,omitempty"` +} + +// APIKeyMiddleware validates API key authentication +func APIKeyMiddleware(config *Config) func(http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Skip auth if no API key configured (for development) + if config.APIKey == "" { + next.ServeHTTP(w, r) + return + } + + // Check Authorization header (Bearer token) + authHeader := r.Header.Get("Authorization") + if authHeader != "" { + if strings.HasPrefix(authHeader, "Bearer ") { + token := strings.TrimPrefix(authHeader, "Bearer ") + if token == config.APIKey { + next.ServeHTTP(w, r) + return + } + } + } + + // Check X-API-Key header + apiKeyHeader := r.Header.Get("X-API-Key") + if apiKeyHeader == config.APIKey { + next.ServeHTTP(w, r) + return + } + + // No valid authentication found + writeUnauthorized(w) + }) + } +} + +func writeUnauthorized(w http.ResponseWriter) { + errorResp := ErrorResponse{ + Code: "unauthorized", + Message: "Invalid or missing API key", + Hint: "Provide API key via Authorization: Bearer or X-API-Key: ", + } + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusUnauthorized) + _ = json.NewEncoder(w).Encode(errorResp) +} \ No newline at end of file diff --git a/internal/auth/middleware_test.go b/internal/auth/middleware_test.go new file mode 100644 index 0000000..a04d582 --- /dev/null +++ b/internal/auth/middleware_test.go @@ -0,0 +1,199 @@ +package auth + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "testing" +) + +func TestAPIKeyMiddleware(t *testing.T) { + // Create a test handler that returns "OK" if auth passes + testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte("OK")) + }) + + tests := []struct { + name string + apiKey string + authHeader string + apiKeyHeader string + expectedStatus int + expectedBody string + }{ + { + name: "No API key configured - should pass", + apiKey: "", + authHeader: "", + apiKeyHeader: "", + expectedStatus: http.StatusOK, + expectedBody: "OK", + }, + { + name: "Valid Bearer token", + apiKey: "test-secret-key", + authHeader: "Bearer test-secret-key", + apiKeyHeader: "", + expectedStatus: http.StatusOK, + expectedBody: "OK", + }, + { + name: "Valid X-API-Key header", + apiKey: "test-secret-key", + authHeader: "", + apiKeyHeader: "test-secret-key", + expectedStatus: http.StatusOK, + expectedBody: "OK", + }, + { + name: "Invalid Bearer token", + apiKey: "test-secret-key", + authHeader: "Bearer wrong-key", + apiKeyHeader: "", + expectedStatus: http.StatusUnauthorized, + expectedBody: `{"code":"unauthorized","message":"Invalid or missing API key","hint":"Provide API key via Authorization: Bearer or X-API-Key: "}`, + }, + { + name: "Invalid X-API-Key", + apiKey: "test-secret-key", + authHeader: "", + apiKeyHeader: "wrong-key", + expectedStatus: http.StatusUnauthorized, + expectedBody: `{"code":"unauthorized","message":"Invalid or missing API key","hint":"Provide API key via Authorization: Bearer or X-API-Key: "}`, + }, + { + name: "No auth headers provided", + apiKey: "test-secret-key", + authHeader: "", + apiKeyHeader: "", + expectedStatus: http.StatusUnauthorized, + expectedBody: `{"code":"unauthorized","message":"Invalid or missing API key","hint":"Provide API key via Authorization: Bearer or X-API-Key: "}`, + }, + { + name: "Malformed Bearer token", + apiKey: "test-secret-key", + authHeader: "InvalidFormat test-secret-key", + apiKeyHeader: "", + expectedStatus: http.StatusUnauthorized, + expectedBody: `{"code":"unauthorized","message":"Invalid or missing API key","hint":"Provide API key via Authorization: Bearer or X-API-Key: "}`, + }, + { + name: "Empty Bearer token", + apiKey: "test-secret-key", + authHeader: "Bearer ", + apiKeyHeader: "", + expectedStatus: http.StatusUnauthorized, + expectedBody: `{"code":"unauthorized","message":"Invalid or missing API key","hint":"Provide API key via Authorization: Bearer or X-API-Key: "}`, + }, + { + name: "Both headers provided - Bearer wins", + apiKey: "test-secret-key", + authHeader: "Bearer test-secret-key", + apiKeyHeader: "wrong-key", + expectedStatus: http.StatusOK, + expectedBody: "OK", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Setup + config := &Config{APIKey: tt.apiKey} + middleware := APIKeyMiddleware(config) + handler := middleware(testHandler) + + // Create request + req := httptest.NewRequest("POST", "/test", nil) + if tt.authHeader != "" { + req.Header.Set("Authorization", tt.authHeader) + } + if tt.apiKeyHeader != "" { + req.Header.Set("X-API-Key", tt.apiKeyHeader) + } + + // Execute + rr := httptest.NewRecorder() + handler.ServeHTTP(rr, req) + + // Assert status + if rr.Code != tt.expectedStatus { + t.Errorf("Expected status %d, got %d", tt.expectedStatus, rr.Code) + } + + // Assert body + if tt.expectedStatus == http.StatusUnauthorized { + // For error responses, check JSON structure + var errorResp ErrorResponse + if err := json.Unmarshal(rr.Body.Bytes(), &errorResp); err != nil { + t.Errorf("Failed to parse error response: %v", err) + } + if errorResp.Code != "unauthorized" { + t.Errorf("Expected error code 'unauthorized', got '%s'", errorResp.Code) + } + if errorResp.Message != "Invalid or missing API key" { + t.Errorf("Expected error message 'Invalid or missing API key', got '%s'", errorResp.Message) + } + } else { + // For success responses + if rr.Body.String() != tt.expectedBody { + t.Errorf("Expected body '%s', got '%s'", tt.expectedBody, rr.Body.String()) + } + } + }) + } +} + +func TestAPIKeyMiddleware_ContentType(t *testing.T) { + config := &Config{APIKey: "test-key"} + middleware := APIKeyMiddleware(config) + testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte("OK")) + }) + handler := middleware(testHandler) + + req := httptest.NewRequest("POST", "/test", nil) + // No auth headers - should fail + rr := httptest.NewRecorder() + handler.ServeHTTP(rr, req) + + // Check Content-Type header is set correctly + contentType := rr.Header().Get("Content-Type") + if contentType != "application/json" { + t.Errorf("Expected Content-Type 'application/json', got '%s'", contentType) + } +} + +func TestWriteUnauthorized(t *testing.T) { + rr := httptest.NewRecorder() + writeUnauthorized(rr) + + // Check status + if rr.Code != http.StatusUnauthorized { + t.Errorf("Expected status %d, got %d", http.StatusUnauthorized, rr.Code) + } + + // Check Content-Type + contentType := rr.Header().Get("Content-Type") + if contentType != "application/json" { + t.Errorf("Expected Content-Type 'application/json', got '%s'", contentType) + } + + // Check response body structure + var errorResp ErrorResponse + if err := json.Unmarshal(rr.Body.Bytes(), &errorResp); err != nil { + t.Errorf("Failed to parse error response: %v", err) + } + + if errorResp.Code != "unauthorized" { + t.Errorf("Expected code 'unauthorized', got '%s'", errorResp.Code) + } + + if errorResp.Message == "" { + t.Error("Expected non-empty message") + } + + if errorResp.Hint == "" { + t.Error("Expected non-empty hint") + } +} \ No newline at end of file diff --git a/internal/config/config.go b/internal/config/config.go index 5200a40..40fb874 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -18,6 +18,8 @@ type Config struct { AWSAccessKey string AWSSecretKey string CacheMaxAge string + // API authentication + APIKey string } func Load() *Config { @@ -29,6 +31,8 @@ func Load() *Config { AWSAccessKey: getEnv("AWS_ACCESS_KEY_ID", ""), AWSSecretKey: getEnv("AWS_SECRET_ACCESS_KEY", ""), CacheMaxAge: getEnv("CACHE_MAX_AGE", "86400"), + // API authentication + APIKey: getEnv("API_KEY", ""), } } @@ -44,6 +48,18 @@ type StorageOptions struct { type StorageConfig struct { StorageOptions map[string]StorageOptions `yaml:"storage_options"` + UploadOptions map[string]UploadOptions `yaml:"upload_options,omitempty"` +} + +type UploadOptions struct { + Kind string `yaml:"kind"` + AllowedMimes []string `yaml:"allowed_mimes"` + SizeMaxBytes int64 `yaml:"size_max_bytes"` + MultipartThresholdMB int64 `yaml:"multipart_threshold_mb"` + PartSizeMB int64 `yaml:"part_size_mb"` + TokenTTLSeconds int64 `yaml:"token_ttl_seconds"` + PathTemplate string `yaml:"path_template"` + EnableSharding bool `yaml:"enable_sharding"` } func LoadStorageConfig(s3 *s3.Client, config *Config) (*StorageConfig, error) { @@ -97,6 +113,20 @@ func (sc *StorageConfig) GetStorageOptions(imageType string) *StorageOptions { return DefaultStorageOptions() } +func (sc *StorageConfig) GetUploadOptions(profile string) *UploadOptions { + if options, exists := sc.UploadOptions[profile]; exists { + return &options + } + + // Return default if type not found + if defaultOptions, exists := sc.UploadOptions["default"]; exists { + return &defaultOptions + } + + // Return nil if no upload options configured + return nil +} + func DefaultStorageOptions() *StorageOptions { return &StorageOptions{ OriginFolder: "originals", diff --git a/internal/response/response.go b/internal/response/response.go index 55faf86..d345899 100644 --- a/internal/response/response.go +++ b/internal/response/response.go @@ -17,13 +17,13 @@ type JSONResponse struct { func (r *JSONResponse) Write(w http.ResponseWriter) { w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(r) + _ = json.NewEncoder(w).Encode(r) } func (r *JSONResponse) WriteError(w http.ResponseWriter, status int) { w.Header().Set("Content-Type", "application/json") w.WriteHeader(status) - json.NewEncoder(w).Encode(r) + _ = json.NewEncoder(w).Encode(r) } type PlainResponse struct { @@ -32,13 +32,13 @@ type PlainResponse struct { func (r *PlainResponse) Write(w http.ResponseWriter) { w.Header().Set("Content-Type", "text/plain") - w.Write([]byte(r.Message)) + _, _ = w.Write([]byte(r.Message)) } func (r *PlainResponse) WriteError(w http.ResponseWriter, status int) { w.Header().Set("Content-Type", "text/plain") w.WriteHeader(status) - w.Write([]byte(r.Message)) + _, _ = w.Write([]byte(r.Message)) } func NewJSONResponse(message string) ResponseWriter { diff --git a/internal/s3/client.go b/internal/s3/client.go index e614b41..b79144a 100644 --- a/internal/s3/client.go +++ b/internal/s3/client.go @@ -6,6 +6,7 @@ import ( "io" utils "mediaflow/internal" "os" + "time" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/config" @@ -14,8 +15,9 @@ import ( ) type Client struct { - s3Client *s3.Client - bucket string + s3Client *s3.Client + bucket string + presigner *s3.PresignClient } func NewClient(ctx context.Context, region, bucket, accessKey, secretKey, endpoint string) (*Client, error) { @@ -44,9 +46,12 @@ func NewClient(ctx context.Context, region, bucket, accessKey, secretKey, endpoi } }) + presigner := s3.NewPresignClient(s3Client) + return &Client{ - s3Client: s3Client, - bucket: bucket, + s3Client: s3Client, + bucket: bucket, + presigner: presigner, }, nil } @@ -71,3 +76,69 @@ func (c *Client) PutObject(ctx context.Context, key string, body io.Reader) erro }) return err } + +// PresignPutObject generates a presigned URL for PUT operations +func (c *Client) PresignPutObject(ctx context.Context, key string, expires time.Duration, headers map[string]string) (string, error) { + input := &s3.PutObjectInput{ + Bucket: aws.String(c.bucket), + Key: aws.String(key), + } + + // Add required headers to the input + if contentType, ok := headers["Content-Type"]; ok { + input.ContentType = aws.String(contentType) + } + // Note: SSE removed for MinIO compatibility + + request, err := c.presigner.PresignPutObject(ctx, input, func(opts *s3.PresignOptions) { + opts.Expires = expires + }) + if err != nil { + return "", err + } + + return request.URL, nil +} + +// CreateMultipartUpload creates a multipart upload and returns the upload ID +func (c *Client) CreateMultipartUpload(ctx context.Context, key string, headers map[string]string) (string, error) { + input := &s3.CreateMultipartUploadInput{ + Bucket: aws.String(c.bucket), + Key: aws.String(key), + } + + // Add required headers + if contentType, ok := headers["Content-Type"]; ok { + input.ContentType = aws.String(contentType) + } + // Note: SSE removed for MinIO compatibility + + result, err := c.s3Client.CreateMultipartUpload(ctx, input) + if err != nil { + return "", err + } + + return *result.UploadId, nil +} + +// PresignUploadPart generates a presigned URL for uploading a part +func (c *Client) PresignUploadPart(ctx context.Context, key, uploadID string, partNumber int32, expires time.Duration) (string, error) { + input := &s3.UploadPartInput{ + Bucket: aws.String(c.bucket), + Key: aws.String(key), + UploadId: aws.String(uploadID), + PartNumber: aws.Int32(partNumber), + } + + request, err := c.presigner.PresignUploadPart(ctx, input, func(opts *s3.PresignOptions) { + opts.Expires = expires + }) + if err != nil { + return "", err + } + + return request.URL, nil +} + +// Note: AWS SDK doesn't support presigning CompleteMultipartUpload and AbortMultipartUpload +// These operations must be done server-side or using direct API calls diff --git a/internal/service/image.go b/internal/service/image.go index 42e5509..d135a1f 100644 --- a/internal/service/image.go +++ b/internal/service/image.go @@ -197,7 +197,7 @@ func DetermineMimeType(file multipart.File) (string, error) { } // Reset the file pointer to the beginning - file.Seek(0, io.SeekStart) + _, _ = file.Seek(0, io.SeekStart) contentType := http.DetectContentType(buf[:n]) return contentType, nil diff --git a/internal/upload/handlers.go b/internal/upload/handlers.go new file mode 100644 index 0000000..2bbb5e1 --- /dev/null +++ b/internal/upload/handlers.go @@ -0,0 +1,116 @@ +package upload + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + + "mediaflow/internal/config" +) + +type Handler struct { + uploadService *Service + storageConfig *config.StorageConfig + ctx context.Context +} + +func NewHandler(ctx context.Context, uploadService *Service, storageConfig *config.StorageConfig) *Handler { + return &Handler{ + uploadService: uploadService, + storageConfig: storageConfig, + ctx: ctx, + } +} + +// HandlePresign handles POST /v1/uploads/presign +func (h *Handler) HandlePresign(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + h.writeError(w, http.StatusMethodNotAllowed, ErrBadRequest, "Method not allowed", "") + return + } + + // Parse request body + var req PresignRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + h.writeError(w, http.StatusBadRequest, ErrBadRequest, "Invalid request body", "") + return + } + + // Validate required fields + if req.KeyBase == "" { + h.writeError(w, http.StatusBadRequest, ErrBadRequest, "key_base is required", "") + return + } + if req.Ext == "" { + h.writeError(w, http.StatusBadRequest, ErrBadRequest, "ext is required", "") + return + } + if req.Mime == "" { + h.writeError(w, http.StatusBadRequest, ErrBadRequest, "mime is required", "") + return + } + if req.SizeBytes <= 0 { + h.writeError(w, http.StatusBadRequest, ErrBadRequest, "size_bytes must be greater than 0", "") + return + } + if req.Kind == "" { + h.writeError(w, http.StatusBadRequest, ErrBadRequest, "kind is required", "") + return + } + if req.Profile == "" { + h.writeError(w, http.StatusBadRequest, ErrBadRequest, "profile is required", "") + return + } + + // Get upload options for the profile + uploadOptions := h.storageConfig.GetUploadOptions(req.Profile) + if uploadOptions == nil { + h.writeError(w, http.StatusBadRequest, ErrBadRequest, fmt.Sprintf("No upload configuration for profile: %s", req.Profile), "Configure upload_options in your storage config") + return + } + + // Validate kind matches upload options + if uploadOptions.Kind != req.Kind { + h.writeError(w, http.StatusBadRequest, ErrBadRequest, fmt.Sprintf("Kind mismatch: expected %s, got %s", uploadOptions.Kind, req.Kind), "") + return + } + + // Generate presigned upload + presignResp, err := h.uploadService.PresignUpload(h.ctx, &req, uploadOptions) + if err != nil { + if err.Error() == fmt.Sprintf("mime type not allowed: %s", req.Mime) { + h.writeError(w, http.StatusBadRequest, ErrMimeNotAllowed, err.Error(), "Check allowed_mimes in upload configuration") + return + } + if err.Error() == fmt.Sprintf("file size exceeds maximum: %d > %d", req.SizeBytes, uploadOptions.SizeMaxBytes) { + h.writeError(w, http.StatusBadRequest, ErrSizeTooLarge, err.Error(), "Reduce file size or check size_max_bytes in configuration") + return + } + // Log the actual error for debugging + fmt.Printf("Upload error: %v\n", err) + h.writeError(w, http.StatusInternalServerError, ErrBadRequest, fmt.Sprintf("Failed to generate presigned upload: %v", err), "") + return + } + + // Return presigned response + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(presignResp) +} + +// Note: Part presigning is now handled via batch presigning in the main endpoint +// No separate part endpoint needed for stateless design + +// writeError writes a standardized error response +func (h *Handler) writeError(w http.ResponseWriter, statusCode int, code, message, hint string) { + errorResp := ErrorResponse{ + Code: code, + Message: message, + Hint: hint, + } + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(statusCode) + _ = json.NewEncoder(w).Encode(errorResp) +} diff --git a/internal/upload/handlers_test.go b/internal/upload/handlers_test.go new file mode 100644 index 0000000..75b7276 --- /dev/null +++ b/internal/upload/handlers_test.go @@ -0,0 +1,537 @@ +package upload + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "mediaflow/internal/config" +) + +// Create a test handler that uses an interface for the upload service +type TestHandler struct { + uploadService UploadService + storageConfig *config.StorageConfig + ctx context.Context +} + +func (h *TestHandler) HandlePresign(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + h.writeError(w, http.StatusMethodNotAllowed, ErrBadRequest, "Method not allowed", "") + return + } + + // Parse request body + var req PresignRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + h.writeError(w, http.StatusBadRequest, ErrBadRequest, "Invalid request body", "") + return + } + + // Validate required fields + if req.KeyBase == "" { + h.writeError(w, http.StatusBadRequest, ErrBadRequest, "key_base is required", "") + return + } + if req.Ext == "" { + h.writeError(w, http.StatusBadRequest, ErrBadRequest, "ext is required", "") + return + } + if req.Mime == "" { + h.writeError(w, http.StatusBadRequest, ErrBadRequest, "mime is required", "") + return + } + if req.SizeBytes <= 0 { + h.writeError(w, http.StatusBadRequest, ErrBadRequest, "size_bytes must be greater than 0", "") + return + } + if req.Kind == "" { + h.writeError(w, http.StatusBadRequest, ErrBadRequest, "kind is required", "") + return + } + if req.Profile == "" { + h.writeError(w, http.StatusBadRequest, ErrBadRequest, "profile is required", "") + return + } + + // Get upload options for the profile + uploadOptions := h.storageConfig.GetUploadOptions(req.Profile) + if uploadOptions == nil { + h.writeError(w, http.StatusBadRequest, ErrBadRequest, fmt.Sprintf("No upload configuration for profile: %s", req.Profile), "Configure upload_options in your storage config") + return + } + + // Validate kind matches upload options + if uploadOptions.Kind != req.Kind { + h.writeError(w, http.StatusBadRequest, ErrBadRequest, fmt.Sprintf("Kind mismatch: expected %s, got %s", uploadOptions.Kind, req.Kind), "") + return + } + + // Generate presigned upload + presignResp, err := h.uploadService.PresignUpload(h.ctx, &req, uploadOptions) + if err != nil { + errStr := err.Error() + if strings.Contains(errStr, "mime type not allowed:") { + h.writeError(w, http.StatusBadRequest, ErrMimeNotAllowed, err.Error(), "Check allowed_mimes in upload configuration") + return + } + if strings.Contains(errStr, "file size exceeds maximum:") { + h.writeError(w, http.StatusBadRequest, ErrSizeTooLarge, err.Error(), "Reduce file size or check size_max_bytes in configuration") + return + } + h.writeError(w, http.StatusInternalServerError, ErrBadRequest, fmt.Sprintf("Failed to generate presigned upload: %v", err), "") + return + } + + // Return presigned response + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(presignResp) +} + +func (h *TestHandler) writeError(w http.ResponseWriter, statusCode int, code, message, hint string) { + errorResp := ErrorResponse{ + Code: code, + Message: message, + Hint: hint, + } + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(statusCode) + _ = json.NewEncoder(w).Encode(errorResp) +} + +// UploadService interface for dependency injection +type UploadService interface { + PresignUpload(ctx context.Context, req *PresignRequest, uploadOptions *config.UploadOptions) (*PresignResponse, error) +} + +// MockUploadService implements the upload service interface for testing +type MockUploadService struct { + presignUploadFunc func(ctx context.Context, req *PresignRequest, uploadOptions *config.UploadOptions) (*PresignResponse, error) +} + +func (m *MockUploadService) PresignUpload(ctx context.Context, req *PresignRequest, uploadOptions *config.UploadOptions) (*PresignResponse, error) { + if m.presignUploadFunc != nil { + return m.presignUploadFunc(ctx, req, uploadOptions) + } + + // Default mock response + return &PresignResponse{ + ObjectKey: "raw/ab/test-key.jpg", + Upload: &UploadDetails{ + Single: &SingleUpload{ + Method: "PUT", + URL: "https://test.s3.amazonaws.com/bucket/raw/ab/test-key.jpg", + Headers: map[string]string{"Content-Type": "image/jpeg"}, + ExpiresAt: time.Now().Add(15 * time.Minute), + }, + }, + }, nil +} + +func TestHandler_HandlePresign_Success(t *testing.T) { + // Setup + mockService := &MockUploadService{} + storageConfig := &config.StorageConfig{ + UploadOptions: map[string]config.UploadOptions{ + "avatar": { + Kind: "image", + AllowedMimes: []string{"image/jpeg", "image/png"}, + SizeMaxBytes: 5 * 1024 * 1024, + MultipartThresholdMB: 15, + PartSizeMB: 8, + TokenTTLSeconds: 900, + PathTemplate: "raw/{shard?}/{key_base}.{ext}", + EnableSharding: true, + }, + }, + } + + handler := &TestHandler{ + uploadService: mockService, + storageConfig: storageConfig, + ctx: context.Background(), + } + + // Create request + requestBody := PresignRequest{ + KeyBase: "test-key", + Ext: "jpg", + Mime: "image/jpeg", + SizeBytes: 1024000, + Kind: "image", + Profile: "avatar", + Multipart: "auto", + } + + body, _ := json.Marshal(requestBody) + req := httptest.NewRequest("POST", "/v1/uploads/presign", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + + // Execute + rr := httptest.NewRecorder() + handler.HandlePresign(rr, req) + + // Assert + if rr.Code != http.StatusOK { + t.Errorf("Expected status %d, got %d. Body: %s", http.StatusOK, rr.Code, rr.Body.String()) + } + + var response PresignResponse + if err := json.Unmarshal(rr.Body.Bytes(), &response); err != nil { + t.Errorf("Failed to parse response: %v", err) + } + + if response.ObjectKey == "" { + t.Errorf("Expected non-empty ObjectKey") + } + + if response.Upload == nil { + t.Errorf("Expected Upload details") + } +} + +func TestHandler_HandlePresign_ValidationErrors(t *testing.T) { + // Setup + storageConfig := &config.StorageConfig{ + UploadOptions: map[string]config.UploadOptions{ + "avatar": { + Kind: "image", + AllowedMimes: []string{"image/jpeg", "image/png"}, + SizeMaxBytes: 5 * 1024 * 1024, + MultipartThresholdMB: 15, + PartSizeMB: 8, + TokenTTLSeconds: 900, + PathTemplate: "raw/{shard?}/{key_base}.{ext}", + EnableSharding: true, + }, + }, + } + + handler := &TestHandler{ + uploadService: &MockUploadService{}, + storageConfig: storageConfig, + ctx: context.Background(), + } + + tests := []struct { + name string + method string + requestBody interface{} + expectedStatus int + expectedCode string + }{ + { + name: "Invalid method", + method: "GET", + requestBody: map[string]interface{}{}, + expectedStatus: http.StatusMethodNotAllowed, + expectedCode: ErrBadRequest, + }, + { + name: "Missing key_base", + method: "POST", + requestBody: map[string]interface{}{ + "ext": "jpg", + "mime": "image/jpeg", + "size_bytes": 1024000, + "kind": "image", + "profile": "avatar", + }, + expectedStatus: http.StatusBadRequest, + expectedCode: ErrBadRequest, + }, + { + name: "Missing ext", + method: "POST", + requestBody: map[string]interface{}{ + "key_base": "test-key", + "mime": "image/jpeg", + "size_bytes": 1024000, + "kind": "image", + "profile": "avatar", + }, + expectedStatus: http.StatusBadRequest, + expectedCode: ErrBadRequest, + }, + { + name: "Missing mime", + method: "POST", + requestBody: map[string]interface{}{ + "key_base": "test-key", + "ext": "jpg", + "size_bytes": 1024000, + "kind": "image", + "profile": "avatar", + }, + expectedStatus: http.StatusBadRequest, + expectedCode: ErrBadRequest, + }, + { + name: "Invalid size_bytes", + method: "POST", + requestBody: map[string]interface{}{ + "key_base": "test-key", + "ext": "jpg", + "mime": "image/jpeg", + "size_bytes": 0, + "kind": "image", + "profile": "avatar", + }, + expectedStatus: http.StatusBadRequest, + expectedCode: ErrBadRequest, + }, + { + name: "Missing kind", + method: "POST", + requestBody: map[string]interface{}{ + "key_base": "test-key", + "ext": "jpg", + "mime": "image/jpeg", + "size_bytes": 1024000, + "profile": "avatar", + }, + expectedStatus: http.StatusBadRequest, + expectedCode: ErrBadRequest, + }, + { + name: "Missing profile", + method: "POST", + requestBody: map[string]interface{}{ + "key_base": "test-key", + "ext": "jpg", + "mime": "image/jpeg", + "size_bytes": 1024000, + "kind": "image", + }, + expectedStatus: http.StatusBadRequest, + expectedCode: ErrBadRequest, + }, + { + name: "Invalid profile", + method: "POST", + requestBody: map[string]interface{}{ + "key_base": "test-key", + "ext": "jpg", + "mime": "image/jpeg", + "size_bytes": 1024000, + "kind": "image", + "profile": "nonexistent", + }, + expectedStatus: http.StatusBadRequest, + expectedCode: ErrBadRequest, + }, + { + name: "Kind mismatch", + method: "POST", + requestBody: map[string]interface{}{ + "key_base": "test-key", + "ext": "jpg", + "mime": "image/jpeg", + "size_bytes": 1024000, + "kind": "video", // Profile is configured for "image" + "profile": "avatar", + }, + expectedStatus: http.StatusBadRequest, + expectedCode: ErrBadRequest, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var body []byte + if tt.requestBody != nil { + body, _ = json.Marshal(tt.requestBody) + } + + req := httptest.NewRequest(tt.method, "/v1/uploads/presign", bytes.NewReader(body)) + if tt.method == "POST" { + req.Header.Set("Content-Type", "application/json") + } + + rr := httptest.NewRecorder() + handler.HandlePresign(rr, req) + + if rr.Code != tt.expectedStatus { + t.Errorf("Expected status %d, got %d. Body: %s", tt.expectedStatus, rr.Code, rr.Body.String()) + } + + var errorResp ErrorResponse + if err := json.Unmarshal(rr.Body.Bytes(), &errorResp); err != nil { + t.Errorf("Failed to parse error response: %v", err) + } + + if errorResp.Code != tt.expectedCode { + t.Errorf("Expected error code '%s', got '%s'", tt.expectedCode, errorResp.Code) + } + }) + } +} + +func TestHandler_HandlePresign_ServiceErrors(t *testing.T) { + // Setup + storageConfig := &config.StorageConfig{ + UploadOptions: map[string]config.UploadOptions{ + "avatar": { + Kind: "image", + AllowedMimes: []string{"image/jpeg", "image/png"}, + SizeMaxBytes: 5 * 1024 * 1024, + MultipartThresholdMB: 15, + PartSizeMB: 8, + TokenTTLSeconds: 900, + PathTemplate: "raw/{shard?}/{key_base}.{ext}", + EnableSharding: true, + }, + }, + } + + tests := []struct { + name string + serviceError error + expectedStatus int + expectedCode string + }{ + { + name: "MIME not allowed", + serviceError: fmt.Errorf("mime type not allowed: text/plain"), + expectedStatus: http.StatusBadRequest, + expectedCode: ErrMimeNotAllowed, + }, + { + name: "File too large", + serviceError: fmt.Errorf("file size exceeds maximum: 10485760 > 5242880"), + expectedStatus: http.StatusBadRequest, + expectedCode: ErrSizeTooLarge, + }, + { + name: "Generic service error", + serviceError: fmt.Errorf("some other error"), + expectedStatus: http.StatusInternalServerError, + expectedCode: ErrBadRequest, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockService := &MockUploadService{ + presignUploadFunc: func(ctx context.Context, req *PresignRequest, uploadOptions *config.UploadOptions) (*PresignResponse, error) { + return nil, tt.serviceError + }, + } + + handler := &TestHandler{ + uploadService: mockService, + storageConfig: storageConfig, + ctx: context.Background(), + } + + requestBody := PresignRequest{ + KeyBase: "test-key", + Ext: "jpg", + Mime: "image/jpeg", + SizeBytes: 1024000, + Kind: "image", + Profile: "avatar", + Multipart: "auto", + } + + body, _ := json.Marshal(requestBody) + req := httptest.NewRequest("POST", "/v1/uploads/presign", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + + rr := httptest.NewRecorder() + handler.HandlePresign(rr, req) + + if rr.Code != tt.expectedStatus { + t.Errorf("Expected status %d, got %d. Body: %s", tt.expectedStatus, rr.Code, rr.Body.String()) + } + + var errorResp ErrorResponse + if err := json.Unmarshal(rr.Body.Bytes(), &errorResp); err != nil { + t.Errorf("Failed to parse error response: %v", err) + } + + if errorResp.Code != tt.expectedCode { + t.Errorf("Expected error code '%s', got '%s'", tt.expectedCode, errorResp.Code) + } + }) + } +} + +func TestHandler_HandlePresign_InvalidJSON(t *testing.T) { + storageConfig := &config.StorageConfig{ + UploadOptions: map[string]config.UploadOptions{ + "avatar": { + Kind: "image", + }, + }, + } + + handler := &TestHandler{ + uploadService: &MockUploadService{}, + storageConfig: storageConfig, + ctx: context.Background(), + } + + req := httptest.NewRequest("POST", "/v1/uploads/presign", bytes.NewReader([]byte("invalid json"))) + req.Header.Set("Content-Type", "application/json") + + rr := httptest.NewRecorder() + handler.HandlePresign(rr, req) + + if rr.Code != http.StatusBadRequest { + t.Errorf("Expected status %d, got %d", http.StatusBadRequest, rr.Code) + } + + var errorResp ErrorResponse + if err := json.Unmarshal(rr.Body.Bytes(), &errorResp); err != nil { + t.Errorf("Failed to parse error response: %v", err) + } + + if errorResp.Code != ErrBadRequest { + t.Errorf("Expected error code '%s', got '%s'", ErrBadRequest, errorResp.Code) + } +} + +func TestHandler_writeError(t *testing.T) { + handler := &TestHandler{} + + rr := httptest.NewRecorder() + handler.writeError(rr, http.StatusBadRequest, ErrBadRequest, "Test error", "Test hint") + + // Check status + if rr.Code != http.StatusBadRequest { + t.Errorf("Expected status %d, got %d", http.StatusBadRequest, rr.Code) + } + + // Check Content-Type + contentType := rr.Header().Get("Content-Type") + if contentType != "application/json" { + t.Errorf("Expected Content-Type 'application/json', got '%s'", contentType) + } + + // Check response body + var errorResp ErrorResponse + if err := json.Unmarshal(rr.Body.Bytes(), &errorResp); err != nil { + t.Errorf("Failed to parse error response: %v", err) + } + + if errorResp.Code != ErrBadRequest { + t.Errorf("Expected code '%s', got '%s'", ErrBadRequest, errorResp.Code) + } + + if errorResp.Message != "Test error" { + t.Errorf("Expected message 'Test error', got '%s'", errorResp.Message) + } + + if errorResp.Hint != "Test hint" { + t.Errorf("Expected hint 'Test hint', got '%s'", errorResp.Hint) + } +} diff --git a/internal/upload/integration_test.go b/internal/upload/integration_test.go new file mode 100644 index 0000000..039b09d --- /dev/null +++ b/internal/upload/integration_test.go @@ -0,0 +1,444 @@ +package upload + +import ( + "bytes" + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "mediaflow/internal/auth" + "mediaflow/internal/config" +) + +// Integration tests that test the complete upload flow with authentication +func TestUploadIntegration_WithAuth(t *testing.T) { + // Setup configuration + cfg := &config.Config{ + APIKey: "test-api-key", + } + + storageConfig := &config.StorageConfig{ + UploadOptions: map[string]config.UploadOptions{ + "avatar": { + Kind: "image", + AllowedMimes: []string{"image/jpeg", "image/png"}, + SizeMaxBytes: 5 * 1024 * 1024, + MultipartThresholdMB: 15, + PartSizeMB: 8, + TokenTTLSeconds: 900, + PathTemplate: "raw/{shard?}/{key_base}.{ext}", + EnableSharding: true, + }, + }, + } + + // Create a mock upload service + mockService := &MockUploadService{ + presignUploadFunc: func(ctx context.Context, req *PresignRequest, uploadOptions *config.UploadOptions) (*PresignResponse, error) { + return &PresignResponse{ + ObjectKey: "raw/ab/test-key.jpg", + Upload: &UploadDetails{ + Single: &SingleUpload{ + Method: "PUT", + URL: "https://test.s3.amazonaws.com/bucket/raw/ab/test-key.jpg", + Headers: map[string]string{"Content-Type": "image/jpeg"}, + ExpiresAt: time.Now().Add(15 * time.Minute), + }, + }, + }, nil + }, + } + + // Create handler with auth middleware + handler := &TestHandler{ + uploadService: mockService, + storageConfig: storageConfig, + ctx: context.Background(), + } + + // Wrap with auth middleware + authConfig := &auth.Config{APIKey: cfg.APIKey} + middleware := auth.APIKeyMiddleware(authConfig) + authenticatedHandler := middleware(http.HandlerFunc(handler.HandlePresign)) + + tests := []struct { + name string + authHeader string + apiKeyHeader string + requestBody PresignRequest + expectedStatus int + }{ + { + name: "Valid Bearer token", + authHeader: "Bearer test-api-key", + requestBody: PresignRequest{ + KeyBase: "test-key", + Ext: "jpg", + Mime: "image/jpeg", + SizeBytes: 1024000, + Kind: "image", + Profile: "avatar", + Multipart: "auto", + }, + expectedStatus: http.StatusOK, + }, + { + name: "Valid X-API-Key header", + apiKeyHeader: "test-api-key", + requestBody: PresignRequest{ + KeyBase: "test-key", + Ext: "jpg", + Mime: "image/jpeg", + SizeBytes: 1024000, + Kind: "image", + Profile: "avatar", + Multipart: "auto", + }, + expectedStatus: http.StatusOK, + }, + { + name: "Invalid Bearer token", + authHeader: "Bearer wrong-key", + requestBody: PresignRequest{ + KeyBase: "test-key", + Ext: "jpg", + Mime: "image/jpeg", + SizeBytes: 1024000, + Kind: "image", + Profile: "avatar", + Multipart: "auto", + }, + expectedStatus: http.StatusUnauthorized, + }, + { + name: "No authentication", + requestBody: PresignRequest{ + KeyBase: "test-key", + Ext: "jpg", + Mime: "image/jpeg", + SizeBytes: 1024000, + Kind: "image", + Profile: "avatar", + Multipart: "auto", + }, + expectedStatus: http.StatusUnauthorized, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + body, _ := json.Marshal(tt.requestBody) + req := httptest.NewRequest("POST", "/v1/uploads/presign", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + + if tt.authHeader != "" { + req.Header.Set("Authorization", tt.authHeader) + } + if tt.apiKeyHeader != "" { + req.Header.Set("X-API-Key", tt.apiKeyHeader) + } + + rr := httptest.NewRecorder() + authenticatedHandler.ServeHTTP(rr, req) + + if rr.Code != tt.expectedStatus { + t.Errorf("Expected status %d, got %d. Body: %s", tt.expectedStatus, rr.Code, rr.Body.String()) + } + + // For successful requests, verify response structure + if tt.expectedStatus == http.StatusOK { + var response PresignResponse + if err := json.Unmarshal(rr.Body.Bytes(), &response); err != nil { + t.Errorf("Failed to parse response: %v", err) + } + + if response.ObjectKey == "" { + t.Errorf("Expected non-empty ObjectKey") + } + + if response.Upload == nil { + t.Errorf("Expected Upload details") + } + + if response.Upload.Single == nil { + t.Errorf("Expected Single upload details") + } + } + + // For error requests, verify error structure + if tt.expectedStatus != http.StatusOK { + var errorResp auth.ErrorResponse + if err := json.Unmarshal(rr.Body.Bytes(), &errorResp); err != nil { + t.Errorf("Failed to parse error response: %v", err) + } + + if errorResp.Code == "" { + t.Errorf("Expected non-empty error code") + } + } + }) + } +} + +func TestUploadIntegration_ValidationFlow(t *testing.T) { + // Setup configuration for validation tests + cfg := &config.Config{ + APIKey: "test-api-key", + } + + storageConfig := &config.StorageConfig{ + UploadOptions: map[string]config.UploadOptions{ + "avatar": { + Kind: "image", + AllowedMimes: []string{"image/jpeg", "image/png"}, + SizeMaxBytes: 1024 * 1024, // 1MB limit for testing + MultipartThresholdMB: 15, + PartSizeMB: 8, + TokenTTLSeconds: 900, + PathTemplate: "raw/{shard?}/{key_base}.{ext}", + EnableSharding: true, + }, + }, + } + + // Create real service with mock S3 client for more realistic testing + mockS3 := &MockS3Client{ + presignPutObjectFunc: func(ctx context.Context, key string, expires time.Duration, headers map[string]string) (string, error) { + return "https://test.s3.amazonaws.com/bucket/" + key, nil + }, + } + + realService := NewService(mockS3, &config.Config{S3Bucket: "test-bucket"}) + + handler := &Handler{ + uploadService: realService, + storageConfig: storageConfig, + ctx: context.Background(), + } + + // Wrap with auth middleware + authConfig := &auth.Config{APIKey: cfg.APIKey} + middleware := auth.APIKeyMiddleware(authConfig) + authenticatedHandler := middleware(http.HandlerFunc(handler.HandlePresign)) + + tests := []struct { + name string + requestBody PresignRequest + expectedStatus int + expectedError string + }{ + { + name: "Valid request", + requestBody: PresignRequest{ + KeyBase: "test-key", + Ext: "jpg", + Mime: "image/jpeg", + SizeBytes: 500000, // 500KB - within limit + Kind: "image", + Profile: "avatar", + Multipart: "auto", + }, + expectedStatus: http.StatusOK, + }, + { + name: "File too large", + requestBody: PresignRequest{ + KeyBase: "test-key", + Ext: "jpg", + Mime: "image/jpeg", + SizeBytes: 2 * 1024 * 1024, // 2MB - exceeds 1MB limit + Kind: "image", + Profile: "avatar", + Multipart: "auto", + }, + expectedStatus: http.StatusBadRequest, + expectedError: "size_too_large", + }, + { + name: "Invalid MIME type", + requestBody: PresignRequest{ + KeyBase: "test-key", + Ext: "txt", + Mime: "text/plain", + SizeBytes: 500000, + Kind: "image", + Profile: "avatar", + Multipart: "auto", + }, + expectedStatus: http.StatusBadRequest, + expectedError: "mime_not_allowed", + }, + { + name: "Invalid profile", + requestBody: PresignRequest{ + KeyBase: "test-key", + Ext: "jpg", + Mime: "image/jpeg", + SizeBytes: 500000, + Kind: "image", + Profile: "nonexistent", + Multipart: "auto", + }, + expectedStatus: http.StatusBadRequest, + expectedError: "bad_request", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + body, _ := json.Marshal(tt.requestBody) + req := httptest.NewRequest("POST", "/v1/uploads/presign", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", "Bearer test-api-key") + + rr := httptest.NewRecorder() + authenticatedHandler.ServeHTTP(rr, req) + + if rr.Code != tt.expectedStatus { + t.Errorf("Expected status %d, got %d. Body: %s", tt.expectedStatus, rr.Code, rr.Body.String()) + } + + if tt.expectedStatus == http.StatusOK { + var response PresignResponse + if err := json.Unmarshal(rr.Body.Bytes(), &response); err != nil { + t.Errorf("Failed to parse response: %v", err) + } + + // Verify response has required fields + if response.ObjectKey == "" { + t.Errorf("Expected non-empty ObjectKey") + } + + if response.Upload == nil || response.Upload.Single == nil { + t.Errorf("Expected Single upload details") + } + + // Verify object key follows template pattern + if !strings.Contains(response.ObjectKey, "raw/") { + t.Errorf("Expected object key to contain 'raw/', got: %s", response.ObjectKey) + } + + if !strings.Contains(response.ObjectKey, ".jpg") { + t.Errorf("Expected object key to contain extension, got: %s", response.ObjectKey) + } + } else { + var errorResp ErrorResponse + if err := json.Unmarshal(rr.Body.Bytes(), &errorResp); err != nil { + t.Errorf("Failed to parse error response: %v", err) + } + + if errorResp.Code != tt.expectedError { + t.Errorf("Expected error code '%s', got '%s'", tt.expectedError, errorResp.Code) + } + } + }) + } +} + +func TestUploadIntegration_MultipartStrategy(t *testing.T) { + // Setup configuration for multipart testing + cfg := &config.Config{ + APIKey: "test-api-key", + } + + storageConfig := &config.StorageConfig{ + UploadOptions: map[string]config.UploadOptions{ + "video": { + Kind: "video", + AllowedMimes: []string{"video/mp4"}, + SizeMaxBytes: 100 * 1024 * 1024, // 100MB + MultipartThresholdMB: 15, // 15MB threshold + PartSizeMB: 8, // 8MB parts + TokenTTLSeconds: 900, + PathTemplate: "raw/{shard?}/{key_base}.{ext}", + EnableSharding: true, + }, + }, + } + + // Create mock S3 client for multipart testing + mockS3 := &MockS3Client{ + createMultipartUploadFunc: func(ctx context.Context, key string, headers map[string]string) (string, error) { + return "test-upload-id", nil + }, + presignUploadPartFunc: func(ctx context.Context, key, uploadID string, partNumber int32, expires time.Duration) (string, error) { + return "https://test.s3.amazonaws.com/bucket/" + key + "?partNumber=" + string(rune(partNumber+'0')), nil + }, + } + + realService := NewService(mockS3, &config.Config{S3Bucket: "test-bucket"}) + + handler := &Handler{ + uploadService: realService, + storageConfig: storageConfig, + ctx: context.Background(), + } + + // Wrap with auth middleware + authConfig := &auth.Config{APIKey: cfg.APIKey} + middleware := auth.APIKeyMiddleware(authConfig) + authenticatedHandler := middleware(http.HandlerFunc(handler.HandlePresign)) + + // Test multipart upload for large file + requestBody := PresignRequest{ + KeyBase: "large-video", + Ext: "mp4", + Mime: "video/mp4", + SizeBytes: 50 * 1024 * 1024, // 50MB - above 15MB threshold + Kind: "video", + Profile: "video", + Multipart: "auto", + } + + body, _ := json.Marshal(requestBody) + req := httptest.NewRequest("POST", "/v1/uploads/presign", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", "Bearer test-api-key") + + rr := httptest.NewRecorder() + authenticatedHandler.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Errorf("Expected status %d, got %d. Body: %s", http.StatusOK, rr.Code, rr.Body.String()) + } + + var response PresignResponse + if err := json.Unmarshal(rr.Body.Bytes(), &response); err != nil { + t.Errorf("Failed to parse response: %v", err) + } + + // Verify multipart response + if response.Upload.Multipart == nil { + t.Errorf("Expected multipart upload details") + } + + if response.Upload.Single != nil { + t.Errorf("Expected no single upload details for large file") + } + + if response.Upload.Multipart.UploadID != "test-upload-id" { + t.Errorf("Expected upload ID 'test-upload-id', got '%s'", response.Upload.Multipart.UploadID) + } + + if len(response.Upload.Multipart.Parts) == 0 { + t.Errorf("Expected part URLs to be generated") + } + + // Verify part numbers are sequential + for i, part := range response.Upload.Multipart.Parts { + expectedPartNumber := i + 1 + if part.PartNumber != expectedPartNumber { + t.Errorf("Expected part number %d, got %d", expectedPartNumber, part.PartNumber) + } + if part.Method != "PUT" { + t.Errorf("Expected PUT method for part, got %s", part.Method) + } + if part.URL == "" { + t.Errorf("Expected non-empty URL for part %d", part.PartNumber) + } + } +} \ No newline at end of file diff --git a/internal/upload/interfaces.go b/internal/upload/interfaces.go new file mode 100644 index 0000000..3ab10c5 --- /dev/null +++ b/internal/upload/interfaces.go @@ -0,0 +1,13 @@ +package upload + +import ( + "context" + "time" +) + +// S3Client interface for dependency injection and testing +type S3Client interface { + CreateMultipartUpload(ctx context.Context, key string, headers map[string]string) (string, error) + PresignPutObject(ctx context.Context, key string, expires time.Duration, headers map[string]string) (string, error) + PresignUploadPart(ctx context.Context, key, uploadID string, partNumber int32, expires time.Duration) (string, error) +} \ No newline at end of file diff --git a/internal/upload/service.go b/internal/upload/service.go new file mode 100644 index 0000000..bc305cf --- /dev/null +++ b/internal/upload/service.go @@ -0,0 +1,201 @@ +package upload + +import ( + "context" + "crypto/sha1" + "fmt" + "math" + "strings" + "time" + + "mediaflow/internal/config" +) + +type Service struct { + s3Client S3Client + config *config.Config +} + +func NewService(s3Client S3Client, config *config.Config) *Service { + return &Service{ + s3Client: s3Client, + config: config, + } +} + +// PresignUpload generates presigned URLs for upload based on the request +func (s *Service) PresignUpload(ctx context.Context, req *PresignRequest, uploadOptions *config.UploadOptions) (*PresignResponse, error) { + // Validate MIME type + if !s.isMimeAllowed(req.Mime, uploadOptions.AllowedMimes) { + return nil, fmt.Errorf("mime type not allowed: %s", req.Mime) + } + + // Validate file size + if req.SizeBytes > uploadOptions.SizeMaxBytes { + return nil, fmt.Errorf("file size exceeds maximum: %d > %d", req.SizeBytes, uploadOptions.SizeMaxBytes) + } + + // Generate shard if not provided and sharding is enabled + shard := req.Shard + if shard == "" && uploadOptions.EnableSharding { + shard = GenerateShard(req.KeyBase) + } + + // Build object key from template + objectKey := s.buildObjectKey(uploadOptions.PathTemplate, req.KeyBase, req.Ext, shard) + + // Determine upload strategy + strategy := s.determineStrategy(req.Multipart, req.SizeBytes, uploadOptions.MultipartThresholdMB) + + // Create required headers + headers := s.buildRequiredHeaders(req.Mime) + + // Create presigned URLs based on strategy + expiresAt := time.Now().Add(time.Duration(uploadOptions.TokenTTLSeconds) * time.Second) + uploadDetails, err := s.createUploadDetails(ctx, strategy, objectKey, headers, expiresAt, uploadOptions.PartSizeMB, req.SizeBytes) + if err != nil { + return nil, fmt.Errorf("failed to create upload details: %w", err) + } + + return &PresignResponse{ + ObjectKey: objectKey, + Upload: uploadDetails, + }, nil +} + +// Helper methods + +func (s *Service) isMimeAllowed(mime string, allowedMimes []string) bool { + for _, allowed := range allowedMimes { + if mime == allowed { + return true + } + } + return false +} + +func (s *Service) buildObjectKey(template, keyBase, ext, shard string) string { + objectKey := template + + // Replace placeholders in template + objectKey = strings.ReplaceAll(objectKey, "{key_base}", keyBase) + objectKey = strings.ReplaceAll(objectKey, "{ext}", ext) + + // Handle optional shard + if shard != "" { + objectKey = strings.ReplaceAll(objectKey, "{shard?}", shard) + objectKey = strings.ReplaceAll(objectKey, "{shard}", shard) + } else { + // Remove shard placeholders if no shard + objectKey = strings.ReplaceAll(objectKey, "/{shard?}", "") + objectKey = strings.ReplaceAll(objectKey, "{shard?}/", "") + objectKey = strings.ReplaceAll(objectKey, "{shard?}", "") + } + + return objectKey +} + +func (s *Service) determineStrategy(multipart string, sizeBytes int64, thresholdMB int64) string { + thresholdBytes := thresholdMB * 1024 * 1024 + + switch multipart { + case "force": + return "multipart" + case "off": + return "single" + case "auto": + fallthrough + default: + if sizeBytes > thresholdBytes { + return "multipart" + } + return "single" + } +} + +func (s *Service) buildRequiredHeaders(mime string) map[string]string { + headers := map[string]string{ + "Content-Type": mime, + } + + // Note: Server-side encryption disabled for MinIO compatibility + // In production, configure proper SSE based on your storage backend + + return headers +} + +func (s *Service) createUploadDetails(ctx context.Context, strategy, objectKey string, headers map[string]string, expiresAt time.Time, partSizeMB int64, totalSizeBytes int64) (*UploadDetails, error) { + expires := time.Until(expiresAt) + + if strategy == "single" { + // Add If-None-Match header for overwrite prevention + singleHeaders := make(map[string]string) + for k, v := range headers { + singleHeaders[k] = v + } + singleHeaders["If-None-Match"] = "*" + + url, err := s.s3Client.PresignPutObject(ctx, objectKey, expires, singleHeaders) + if err != nil { + return nil, err + } + + return &UploadDetails{ + Single: &SingleUpload{ + Method: "PUT", + URL: url, + Headers: singleHeaders, + ExpiresAt: expiresAt, + }, + }, nil + } + + // For multipart uploads, create the multipart upload and generate part URLs + uploadID, err := s.s3Client.CreateMultipartUpload(ctx, objectKey, headers) + if err != nil { + return nil, fmt.Errorf("failed to create multipart upload: %w", err) + } + + // Calculate number of parts needed + partSizeBytes := partSizeMB * 1024 * 1024 + numParts := int(math.Ceil(float64(totalSizeBytes) / float64(partSizeBytes))) + + // Generate presigned URLs for each part (limit to reasonable number) + maxParts := 100 // Reasonable limit for batch presigning + if numParts > maxParts { + numParts = maxParts + } + + parts := make([]PartUpload, numParts) + for i := 0; i < numParts; i++ { + partNumber := i + 1 + partURL, err := s.s3Client.PresignUploadPart(ctx, objectKey, uploadID, int32(partNumber), expires) + if err != nil { + return nil, fmt.Errorf("failed to presign part %d: %w", partNumber, err) + } + + parts[i] = PartUpload{ + PartNumber: partNumber, + Method: "PUT", + URL: partURL, + Headers: headers, + ExpiresAt: expiresAt, + } + } + + return &UploadDetails{ + Multipart: &MultipartUpload{ + UploadID: uploadID, + PartSize: partSizeBytes, + Parts: parts, + // Note: Complete and Abort operations aren't presignable, + // client must handle these via direct API calls + }, + }, nil +} + +// GenerateShard creates a shard from key_base using SHA1 hash +func GenerateShard(keyBase string) string { + hash := sha1.Sum([]byte(keyBase)) + return fmt.Sprintf("%02x", hash[:1]) // First 2 hex characters +} \ No newline at end of file diff --git a/internal/upload/service_test.go b/internal/upload/service_test.go new file mode 100644 index 0000000..efae285 --- /dev/null +++ b/internal/upload/service_test.go @@ -0,0 +1,463 @@ +package upload + +import ( + "context" + "testing" + "time" + + "mediaflow/internal/config" +) + +// MockS3Client implements S3Client interface for testing +type MockS3Client struct { + createMultipartUploadFunc func(ctx context.Context, key string, headers map[string]string) (string, error) + presignPutObjectFunc func(ctx context.Context, key string, expires time.Duration, headers map[string]string) (string, error) + presignUploadPartFunc func(ctx context.Context, key, uploadID string, partNumber int32, expires time.Duration) (string, error) +} + +func (m *MockS3Client) CreateMultipartUpload(ctx context.Context, key string, headers map[string]string) (string, error) { + if m.createMultipartUploadFunc != nil { + return m.createMultipartUploadFunc(ctx, key, headers) + } + return "test-upload-id", nil +} + +func (m *MockS3Client) PresignPutObject(ctx context.Context, key string, expires time.Duration, headers map[string]string) (string, error) { + if m.presignPutObjectFunc != nil { + return m.presignPutObjectFunc(ctx, key, expires, headers) + } + return "https://test.s3.amazonaws.com/bucket/" + key, nil +} + +func (m *MockS3Client) PresignUploadPart(ctx context.Context, key, uploadID string, partNumber int32, expires time.Duration) (string, error) { + if m.presignUploadPartFunc != nil { + return m.presignUploadPartFunc(ctx, key, uploadID, partNumber, expires) + } + return "https://test.s3.amazonaws.com/bucket/" + key + "?partNumber=" + string(rune(partNumber)), nil +} + +func TestGenerateShard(t *testing.T) { + tests := []struct { + keyBase string + expected string + }{ + {"test-key-1", "1a"}, + {"test-key-2", "0d"}, + {"different-key", "af"}, + {"", "da"}, // SHA1 of empty string + } + + for _, tt := range tests { + t.Run(tt.keyBase, func(t *testing.T) { + result := GenerateShard(tt.keyBase) + if result != tt.expected { + t.Errorf("GenerateShard(%s) = %s, expected %s", tt.keyBase, result, tt.expected) + } + // Verify it's always 2 hex characters + if len(result) != 2 { + t.Errorf("Expected 2 characters, got %d", len(result)) + } + }) + } +} + +func TestService_isMimeAllowed(t *testing.T) { + service := &Service{} + + tests := []struct { + name string + mime string + allowedMimes []string + expected bool + }{ + { + name: "Allowed MIME type", + mime: "image/jpeg", + allowedMimes: []string{"image/jpeg", "image/png"}, + expected: true, + }, + { + name: "Not allowed MIME type", + mime: "text/plain", + allowedMimes: []string{"image/jpeg", "image/png"}, + expected: false, + }, + { + name: "Empty allowed list", + mime: "image/jpeg", + allowedMimes: []string{}, + expected: false, + }, + { + name: "Case sensitive", + mime: "IMAGE/JPEG", + allowedMimes: []string{"image/jpeg"}, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := service.isMimeAllowed(tt.mime, tt.allowedMimes) + if result != tt.expected { + t.Errorf("isMimeAllowed(%s, %v) = %t, expected %t", tt.mime, tt.allowedMimes, result, tt.expected) + } + }) + } +} + +func TestService_determineStrategy(t *testing.T) { + service := &Service{} + + tests := []struct { + name string + multipart string + sizeBytes int64 + thresholdMB int64 + expected string + }{ + { + name: "Force multipart", + multipart: "force", + sizeBytes: 1000000, + thresholdMB: 15, + expected: "multipart", + }, + { + name: "Force single", + multipart: "off", + sizeBytes: 50000000, + thresholdMB: 15, + expected: "single", + }, + { + name: "Auto - below threshold", + multipart: "auto", + sizeBytes: 10 * 1024 * 1024, // 10MB + thresholdMB: 15, + expected: "single", + }, + { + name: "Auto - above threshold", + multipart: "auto", + sizeBytes: 20 * 1024 * 1024, // 20MB + thresholdMB: 15, + expected: "multipart", + }, + { + name: "Empty multipart (defaults to auto)", + multipart: "", + sizeBytes: 20 * 1024 * 1024, // 20MB + thresholdMB: 15, + expected: "multipart", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := service.determineStrategy(tt.multipart, tt.sizeBytes, tt.thresholdMB) + if result != tt.expected { + t.Errorf("determineStrategy(%s, %d, %d) = %s, expected %s", tt.multipart, tt.sizeBytes, tt.thresholdMB, result, tt.expected) + } + }) + } +} + +func TestService_buildRequiredHeaders(t *testing.T) { + service := &Service{} + + tests := []struct { + name string + mime string + expected map[string]string + }{ + { + name: "Image MIME type", + mime: "image/jpeg", + expected: map[string]string{ + "Content-Type": "image/jpeg", + }, + }, + { + name: "Video MIME type", + mime: "video/mp4", + expected: map[string]string{ + "Content-Type": "video/mp4", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := service.buildRequiredHeaders(tt.mime) + + for key, expectedValue := range tt.expected { + if result[key] != expectedValue { + t.Errorf("Expected header %s = %s, got %s", key, expectedValue, result[key]) + } + } + }) + } +} + +func TestService_buildObjectKey(t *testing.T) { + service := &Service{} + + tests := []struct { + name string + template string + keyBase string + ext string + shard string + expected string + }{ + { + name: "With shard", + template: "raw/{shard?}/{key_base}.{ext}", + keyBase: "test-key", + ext: "jpg", + shard: "ab", + expected: "raw/ab/test-key.jpg", + }, + { + name: "Without shard", + template: "raw/{shard?}/{key_base}.{ext}", + keyBase: "test-key", + ext: "jpg", + shard: "", + expected: "raw/test-key.jpg", + }, + { + name: "Simple template", + template: "{key_base}.{ext}", + keyBase: "test-key", + ext: "mp4", + shard: "", + expected: "test-key.mp4", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := service.buildObjectKey(tt.template, tt.keyBase, tt.ext, tt.shard) + if result != tt.expected { + t.Errorf("buildObjectKey(%s, %s, %s, %s) = %s, expected %s", tt.template, tt.keyBase, tt.ext, tt.shard, result, tt.expected) + } + }) + } +} + +func TestService_PresignUpload_Validation(t *testing.T) { + mockS3 := &MockS3Client{} + cfg := &config.Config{S3Bucket: "test-bucket"} + service := NewService(mockS3, cfg) + + uploadOptions := &config.UploadOptions{ + AllowedMimes: []string{"image/jpeg", "image/png"}, + SizeMaxBytes: 5 * 1024 * 1024, // 5MB + MultipartThresholdMB: 15, + PartSizeMB: 8, + TokenTTLSeconds: 900, + PathTemplate: "raw/{shard?}/{key_base}.{ext}", + EnableSharding: true, + } + + tests := []struct { + name string + request *PresignRequest + expectError bool + errorMsg string + }{ + { + name: "Valid request", + request: &PresignRequest{ + KeyBase: "test-key", + Ext: "jpg", + Mime: "image/jpeg", + SizeBytes: 1024000, + Kind: "image", + Profile: "avatar", + Multipart: "auto", + }, + expectError: false, + }, + { + name: "Invalid MIME type", + request: &PresignRequest{ + KeyBase: "test-key", + Ext: "txt", + Mime: "text/plain", + SizeBytes: 1024000, + Kind: "image", + Profile: "avatar", + Multipart: "auto", + }, + expectError: true, + errorMsg: "mime type not allowed: text/plain", + }, + { + name: "File too large", + request: &PresignRequest{ + KeyBase: "test-key", + Ext: "jpg", + Mime: "image/jpeg", + SizeBytes: 10 * 1024 * 1024, // 10MB > 5MB limit + Kind: "image", + Profile: "avatar", + Multipart: "auto", + }, + expectError: true, + errorMsg: "file size exceeds maximum: 10485760 > 5242880", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := context.Background() + result, err := service.PresignUpload(ctx, tt.request, uploadOptions) + + if tt.expectError { + if err == nil { + t.Errorf("Expected error but got none") + } else if err.Error() != tt.errorMsg { + t.Errorf("Expected error '%s', got '%s'", tt.errorMsg, err.Error()) + } + } else { + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if result == nil { + t.Errorf("Expected result but got nil") + } else { + // Validate response structure + if result.ObjectKey == "" { + t.Errorf("Expected non-empty ObjectKey") + } + if result.Upload == nil { + t.Errorf("Expected Upload details") + } + } + } + }) + } +} + +func TestService_PresignUpload_SingleStrategy(t *testing.T) { + mockS3 := &MockS3Client{ + presignPutObjectFunc: func(ctx context.Context, key string, expires time.Duration, headers map[string]string) (string, error) { + return "https://test.s3.amazonaws.com/bucket/" + key, nil + }, + } + cfg := &config.Config{S3Bucket: "test-bucket"} + service := NewService(mockS3, cfg) + + uploadOptions := &config.UploadOptions{ + AllowedMimes: []string{"image/jpeg"}, + SizeMaxBytes: 5 * 1024 * 1024, + MultipartThresholdMB: 15, + PartSizeMB: 8, + TokenTTLSeconds: 900, + PathTemplate: "raw/{key_base}.{ext}", + EnableSharding: false, + } + + request := &PresignRequest{ + KeyBase: "test-key", + Ext: "jpg", + Mime: "image/jpeg", + SizeBytes: 1024000, // 1MB - below threshold + Kind: "image", + Profile: "avatar", + Multipart: "auto", + } + + ctx := context.Background() + result, err := service.PresignUpload(ctx, request, uploadOptions) + + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + if result.Upload.Single == nil { + t.Errorf("Expected single upload details") + } + + if result.Upload.Multipart != nil { + t.Errorf("Expected no multipart upload details") + } + + if result.Upload.Single.Method != "PUT" { + t.Errorf("Expected PUT method, got %s", result.Upload.Single.Method) + } + + if result.ObjectKey != "raw/test-key.jpg" { + t.Errorf("Expected object key 'raw/test-key.jpg', got '%s'", result.ObjectKey) + } +} + +func TestService_PresignUpload_MultipartStrategy(t *testing.T) { + mockS3 := &MockS3Client{ + createMultipartUploadFunc: func(ctx context.Context, key string, headers map[string]string) (string, error) { + return "test-upload-id", nil + }, + presignUploadPartFunc: func(ctx context.Context, key, uploadID string, partNumber int32, expires time.Duration) (string, error) { + return "https://test.s3.amazonaws.com/bucket/" + key + "?partNumber=" + string(rune(partNumber+'0')), nil + }, + } + cfg := &config.Config{S3Bucket: "test-bucket"} + service := NewService(mockS3, cfg) + + uploadOptions := &config.UploadOptions{ + AllowedMimes: []string{"video/mp4"}, + SizeMaxBytes: 100 * 1024 * 1024, + MultipartThresholdMB: 15, + PartSizeMB: 8, + TokenTTLSeconds: 900, + PathTemplate: "raw/{key_base}.{ext}", + EnableSharding: false, + } + + request := &PresignRequest{ + KeyBase: "test-video", + Ext: "mp4", + Mime: "video/mp4", + SizeBytes: 50 * 1024 * 1024, // 50MB - above threshold + Kind: "video", + Profile: "video", + Multipart: "auto", + } + + ctx := context.Background() + result, err := service.PresignUpload(ctx, request, uploadOptions) + + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + if result.Upload.Multipart == nil { + t.Errorf("Expected multipart upload details") + } + + if result.Upload.Single != nil { + t.Errorf("Expected no single upload details") + } + + if result.Upload.Multipart.UploadID != "test-upload-id" { + t.Errorf("Expected upload ID 'test-upload-id', got '%s'", result.Upload.Multipart.UploadID) + } + + if len(result.Upload.Multipart.Parts) == 0 { + t.Errorf("Expected part URLs to be generated") + } + + // Check that part numbers are sequential + for i, part := range result.Upload.Multipart.Parts { + expectedPartNumber := i + 1 + if part.PartNumber != expectedPartNumber { + t.Errorf("Expected part number %d, got %d", expectedPartNumber, part.PartNumber) + } + if part.Method != "PUT" { + t.Errorf("Expected PUT method for part, got %s", part.Method) + } + } +} \ No newline at end of file diff --git a/internal/upload/types.go b/internal/upload/types.go new file mode 100644 index 0000000..f618a0d --- /dev/null +++ b/internal/upload/types.go @@ -0,0 +1,103 @@ +package upload + +import "time" + +// PresignRequest represents the request to generate presigned URLs +type PresignRequest struct { + KeyBase string `json:"key_base" validate:"required"` + Ext string `json:"ext" validate:"required"` + Mime string `json:"mime" validate:"required"` + SizeBytes int64 `json:"size_bytes" validate:"required,min=1"` + Kind string `json:"kind" validate:"required,oneof=image video"` + Profile string `json:"profile" validate:"required"` + Multipart string `json:"multipart" validate:"oneof=auto force off"` + Shard string `json:"shard,omitempty"` +} + +// PresignResponse represents the response containing presigned URLs +type PresignResponse struct { + ObjectKey string `json:"object_key"` + Upload *UploadDetails `json:"upload"` +} + +// UploadDetails contains the upload strategy details +type UploadDetails struct { + Single *SingleUpload `json:"single,omitempty"` + Multipart *MultipartUpload `json:"multipart,omitempty"` +} + +// SingleUpload contains details for single PUT upload +type SingleUpload struct { + Method string `json:"method"` + URL string `json:"url"` + Headers map[string]string `json:"headers"` + ExpiresAt time.Time `json:"expires_at"` +} + +// MultipartUpload contains details for multipart upload +type MultipartUpload struct { + UploadID string `json:"upload_id"` + PartSize int64 `json:"part_size"` + Create *UploadAction `json:"create"` + Parts []PartUpload `json:"parts"` // Pre-generated part URLs + Complete *UploadAction `json:"complete"` + Abort *UploadAction `json:"abort"` +} + +// UploadAction represents an upload action (create, complete, abort) +type UploadAction struct { + Method string `json:"method"` + URL string `json:"url"` + Headers map[string]string `json:"headers"` + ExpiresAt time.Time `json:"expires_at"` +} + +// PartUpload represents individual part upload details +type PartUpload struct { + PartNumber int `json:"part_number"` + Method string `json:"method"` + URL string `json:"url"` + Headers map[string]string `json:"headers"` + ExpiresAt time.Time `json:"expires_at"` +} + + +// UploadPolicy defines upload constraints for different kinds and profiles +type UploadPolicy struct { + Kind string `yaml:"kind"` + Profile string `yaml:"profile"` + AllowedMimes []string `yaml:"allowed_mimes"` + SizeMaxBytes int64 `yaml:"size_max_bytes"` + MultipartThresh int64 `yaml:"multipart_threshold_bytes"` +} + +// UploadConfig contains upload-related configuration +type UploadConfig struct { + MultipartThresholdMB int64 `yaml:"multipart_threshold_mb"` + PartSizeMB int64 `yaml:"part_size_mb"` + TokenTTLSeconds int64 `yaml:"token_ttl_seconds"` + SigningAlgorithm string `yaml:"signing_alg"` + ActiveKeyID string `yaml:"active_kid"` + PathTemplateRaw string `yaml:"path_template_raw"` + EnableSharding bool `yaml:"enable_sharding"` + Policies []UploadPolicy `yaml:"policies"` +} + +// ErrorResponse represents error responses from the upload API +type ErrorResponse struct { + Code string `json:"code"` + Message string `json:"message"` + Hint string `json:"hint,omitempty"` + RetryAfterSeconds int `json:"retry_after_seconds,omitempty"` +} + +// Standard error codes +const ( + ErrUnauthorized = "unauthorized" + ErrMimeNotAllowed = "mime_not_allowed" + ErrSizeTooLarge = "size_too_large" + ErrSignatureInvalid = "signature_invalid" + ErrStorageDenied = "storage_denied" + ErrBadRequest = "bad_request" + ErrRateLimited = "rate_limited" +) \ No newline at end of file diff --git a/internal/utils.go b/internal/utils.go index a43e72c..582e662 100644 --- a/internal/utils.go +++ b/internal/utils.go @@ -20,7 +20,7 @@ func GracefulExit(reason string) { fmt.Printf("๐Ÿšจ %s", reason) process, err := os.FindProcess(os.Getpid()) if err == nil { - process.Signal(syscall.SIGTERM) + _ = process.Signal(syscall.SIGTERM) } } diff --git a/main.go b/main.go index 7230538..fffefa9 100644 --- a/main.go +++ b/main.go @@ -12,9 +12,11 @@ import ( utils "mediaflow/internal" "mediaflow/internal/api" + "mediaflow/internal/auth" "mediaflow/internal/config" "mediaflow/internal/response" "mediaflow/internal/service" + "mediaflow/internal/upload" ) func main() { @@ -28,11 +30,24 @@ func main() { } imageAPI := api.NewImageAPI(ctx, imageService, storageConfig) + // Setup upload service and handlers + uploadService := upload.NewService(imageService.S3Client, cfg) + uploadHandler := upload.NewHandler(ctx, uploadService, storageConfig) + + // Setup authentication middleware + authConfig := &auth.Config{APIKey: cfg.APIKey} + authMiddleware := auth.APIKeyMiddleware(authConfig) + mux := http.NewServeMux() - // APIs + // Image APIs (no auth required) mux.HandleFunc("/thumb/{type}/{image_id}", imageAPI.HandleThumbnailTypes) mux.HandleFunc("/originals/{type}/{image_id}", imageAPI.HandleOriginals) + + // Upload APIs (auth required) + mux.Handle("/v1/uploads/presign", authMiddleware(http.HandlerFunc(uploadHandler.HandlePresign))) + + // Health check mux.HandleFunc("/health", func(w http.ResponseWriter, r *http.Request) { response.JSON("OK").Write(w) }) diff --git a/mediaflow-test b/mediaflow-test new file mode 100755 index 0000000..876beb2 Binary files /dev/null and b/mediaflow-test differ