diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..36236ae --- /dev/null +++ b/.dockerignore @@ -0,0 +1,18 @@ +# .dockerignore: exclude files/directories from build context +.git +.gitignore +.dockerignore +Dockerfile +vendor/ +*.md +*.exe +*.exe~ +*.dll +*.so +*.dylib +*.swp +*~ +.DS_Store +db/ +*.conf +*.md \ No newline at end of file diff --git a/.env b/.env index e69de29..dd7ef35 100644 --- a/.env +++ b/.env @@ -0,0 +1,19 @@ +# Manager +MANAGER_IMAGE=ntask_manager:latest +MANAGER_HTTP_PORT=8080 +MANAGER_HTTPS_PORT=8443 + +# Worker +WORKER_IMAGE=ntask_worker:latest +WORKER_REPLICAS=2 +WORKER_REPLICAS_DEV=2 +WORKER_REPLICAS_TEST=100 + +# Database +MYSQL_VERSION=8.0 +DB_PORT=3306 +MYSQL_ROOT_PASSWORD=your_password_root +MYSQL_USER=your_username +MYSQL_PASSWORD=your_password +MYSQL_DATABASE=manager +MYSQL_ROOT_HOST=0.0.0.0 diff --git a/.github/workflows/build-and-release.yml b/.github/workflows/build-and-release.yml new file mode 100644 index 0000000..f165848 --- /dev/null +++ b/.github/workflows/build-and-release.yml @@ -0,0 +1,67 @@ +name: Build and Release + +on: + push: + tags: + - 'v*' + +jobs: + build: + runs-on: ubuntu-latest + strategy: + matrix: + go-version: [1.21.6] # Add more versions if needed + os: [windows, linux, darwin] + arch: [amd64, arm64, 386, arm] + + steps: + - name: Checkout code + uses: actions/checkout@v2 + + - name: Set up Go + uses: actions/setup-go@v2 + with: + go-version: ${{ matrix.go-version }} + # Set other Go-related options as needed + + - name: Build binary + run: | + if [ "${{ runner.os }}" = "Windows" ]; then + go build -o ${GITHUB_REPOSITORY}-${{ matrix.os }}-${{ matrix.arch }}.exe + else + go build -o ${GITHUB_REPOSITORY}-${{ matrix.os }}-${{ matrix.arch }} + fi + + release: + needs: build + runs-on: ubuntu-latest + + steps: + - name: Create Release Draft + if: startsWith(github.ref, 'refs/tags/v') + id: create_release + uses: actions/create-release@v1 + with: + tag_name: ${{ github.ref }} + release_name: Release ${{ github.ref }} + draft: true + body: | + Release notes for version ${{ github.ref }} + + - name: Upload Release Assets + if: startsWith(github.ref, 'refs/tags/v') + id: upload-release-assets + uses: actions/upload-release-asset@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + upload_url: ${{ steps.create_release.outputs.upload_url }} + asset_path: | + ${GITHUB_REPOSITORY}-windows-amd64.exe + ${GITHUB_REPOSITORY}-windows-386.exe + ${GITHUB_REPOSITORY}-linux-amd64 + ${GITHUB_REPOSITORY}-linux-arm64 + ${GITHUB_REPOSITORY}-linux-386 + ${GITHUB_REPOSITORY}-darwin-amd64 + ${GITHUB_REPOSITORY}-darwin-arm64 + asset_name: ${GITHUB_REPOSITORY}-${{ matrix.arch }} diff --git a/.github/workflows/build-manager-dev.yml b/.github/workflows/build-manager-dev.yml index 6688ffa..19a3d34 100644 --- a/.github/workflows/build-manager-dev.yml +++ b/.github/workflows/build-manager-dev.yml @@ -10,46 +10,47 @@ jobs: runs-on: ubuntu-latest steps: - name: Set up QEMU - uses: docker/setup-qemu-action@v1 + uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 + uses: docker/setup-buildx-action@v3 - name: Checkout uses: actions/checkout@v3 - # manager docker + - name: Set lowercase repository name + id: set_lowercase_repo + run: | + LOWERCASE_REPO=$(echo "${GITHUB_REPOSITORY}" | tr '[:upper:]' '[:lower:]') + echo "::set-output name=lowercase_repo::${LOWERCASE_REPO}" + + # Docker - name: Extract metadata (tags, labels) for Docker id: meta uses: docker/metadata-action@v5 with: - images: ${{ github.actor }}/ntask-manager + images: ${{ steps.set_lowercase_repo.outputs.lowercase_repo }}-manager - name: Login to DockerHub - uses: docker/login-action@v1 + uses: docker/login-action@v3 with: username: ${{ github.actor }} password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Build and push docker - id: docker_build - uses: docker/build-push-action@v2 - with: - context: . - file: ./manager/Dockerfile - platforms: linux/amd64 - push: true - tags: ${{ secrets.DOCKERHUB_USERNAME }}/ntask-manager:dev - name: Login to GitHub Container Registry run: echo ${{ secrets.GITHUB_TOKEN }} | docker login ghcr.io -u ${{ github.actor }} --password-stdin - - name: Build and push docker - id: docker_build_ghcr + # Push + - name: Build and push + id: docker_build uses: docker/build-push-action@v5 with: context: . file: ./manager/Dockerfile - platforms: linux/amd64 + platforms: linux/amd64,linux/arm64 push: true - tags: ghcr.io/${{ github.actor }}/ntask-manager:dev + tags: | + ${{ steps.set_lowercase_repo.outputs.lowercase_repo }}-manager:dev + ghcr.io/${{ steps.set_lowercase_repo.outputs.lowercase_repo }}-manager:dev + labels: ${{ steps.meta.outputs.labels }} diff --git a/.github/workflows/build-manager.yml b/.github/workflows/build-manager.yml index ec0bf1a..6e51674 100644 --- a/.github/workflows/build-manager.yml +++ b/.github/workflows/build-manager.yml @@ -2,8 +2,6 @@ name: Docker Build and Publish Manager on: push: - branches: - - 'main' tags: - 'v*' @@ -12,47 +10,47 @@ jobs: runs-on: ubuntu-latest steps: - name: Set up QEMU - uses: docker/setup-qemu-action@v1 + uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 + uses: docker/setup-buildx-action@v3 - name: Checkout uses: actions/checkout@v3 + - name: Set lowercase repository name + id: set_lowercase_repo + run: | + LOWERCASE_REPO=$(echo "${GITHUB_REPOSITORY}" | tr '[:upper:]' '[:lower:]') + echo "::set-output name=lowercase_repo::${LOWERCASE_REPO}" + # manager docker - name: Extract metadata (tags, labels) for Docker id: meta uses: docker/metadata-action@v5 with: - images: ${{ github.actor }}/ntask-manager + images: ${{ steps.set_lowercase_repo.outputs.lowercase_repo }}-manager - name: Login to DockerHub - uses: docker/login-action@v1 + uses: docker/login-action@v3 with: username: ${{ github.actor }} password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Build and push docker - id: docker_build - uses: docker/build-push-action@v2 - with: - context: . - file: ./manager/Dockerfile - platforms: linux/amd64 - push: true - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} - name: Login to GitHub Container Registry run: echo ${{ secrets.GITHUB_TOKEN }} | docker login ghcr.io -u ${{ github.actor }} --password-stdin - - name: Build and push docker - id: docker_build_ghcr + # Push + - name: Build and push + id: docker_build uses: docker/build-push-action@v5 with: context: . file: ./manager/Dockerfile - platforms: linux/amd64 + platforms: linux/amd64,linux/arm64 push: true - tags: ghcr.io/${{ github.actor }}/ntask-manager:latest, ghcr.io/${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} + tags: | + ${{ steps.meta.outputs.tags }} + ghcr.io/${{ steps.set_lowercase_repo.outputs.lowercase_repo }}-manager:latest + ghcr.io/${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} \ No newline at end of file diff --git a/.github/workflows/build-worker-dev.yml b/.github/workflows/build-worker-dev.yml new file mode 100644 index 0000000..caf7065 --- /dev/null +++ b/.github/workflows/build-worker-dev.yml @@ -0,0 +1,56 @@ +name: Docker Build and Publish Worker DEV + +on: + push: + branches: + - 'dev' + +jobs: + docker: + runs-on: ubuntu-latest + steps: + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Checkout + uses: actions/checkout@v3 + + - name: Set lowercase repository name + id: set_lowercase_repo + run: | + LOWERCASE_REPO=$(echo "${GITHUB_REPOSITORY}" | tr '[:upper:]' '[:lower:]') + echo "::set-output name=lowercase_repo::${LOWERCASE_REPO}" + + # Docker + - name: Extract metadata (tags, labels) for Docker + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ steps.set_lowercase_repo.outputs.lowercase_repo }}-worker + + - name: Login to DockerHub + uses: docker/login-action@v3 + with: + username: ${{ github.actor }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Login to GitHub Container Registry + run: echo ${{ secrets.GITHUB_TOKEN }} | docker login ghcr.io -u ${{ github.actor }} --password-stdin + + # Push + - name: Build and push + id: docker_build + uses: docker/build-push-action@v5 + with: + context: . + file: ./worker/Dockerfile + platforms: linux/amd64,linux/arm64 + push: true + tags: | + ${{ steps.set_lowercase_repo.outputs.lowercase_repo }}-worker:dev + ghcr.io/${{ steps.set_lowercase_repo.outputs.lowercase_repo }}-worker:dev + labels: ${{ steps.meta.outputs.labels }} + diff --git a/.github/workflows/build-worker.yml b/.github/workflows/build-worker.yml index 0c27a54..827194b 100644 --- a/.github/workflows/build-worker.yml +++ b/.github/workflows/build-worker.yml @@ -2,8 +2,6 @@ name: Docker Build and Publish Worker on: push: - branches: - - 'main' tags: - 'v*' @@ -12,47 +10,47 @@ jobs: runs-on: ubuntu-latest steps: - name: Set up QEMU - uses: docker/setup-qemu-action@v1 + uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 + uses: docker/setup-buildx-action@v3 - name: Checkout uses: actions/checkout@v3 + - name: Set lowercase repository name + id: set_lowercase_repo + run: | + LOWERCASE_REPO=$(echo "${GITHUB_REPOSITORY}" | tr '[:upper:]' '[:lower:]') + echo "::set-output name=lowercase_repo::${LOWERCASE_REPO}" + # worker docker - name: Extract metadata (tags, labels) for Docker id: meta uses: docker/metadata-action@v5 with: - images: ${{ github.actor }}/ntask-worker + images: ${{ steps.set_lowercase_repo.outputs.lowercase_repo }}-worker - name: Login to DockerHub - uses: docker/login-action@v1 + uses: docker/login-action@v3 with: username: ${{ github.actor }} password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Build and push docker - id: docker_build - uses: docker/build-push-action@v2 - with: - context: . - file: ./worker/Dockerfile - platforms: linux/amd64 - push: true - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} - name: Login to GitHub Container Registry run: echo ${{ secrets.GITHUB_TOKEN }} | docker login ghcr.io -u ${{ github.actor }} --password-stdin - - name: Build and push docker - id: docker_build_ghcr + # Push + - name: Build and push + id: docker_build uses: docker/build-push-action@v5 with: context: . file: ./worker/Dockerfile - platforms: linux/amd64 + platforms: linux/amd64,linux/arm64 push: true - tags: ghcr.io/${{ github.actor }}/ntask-worker:latest, ghcr.io/${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} + tags: | + ${{ steps.meta.outputs.tags }} + ghcr.io/${{ steps.set_lowercase_repo.outputs.lowercase_repo }}-worker:latest + ghcr.io/${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} \ No newline at end of file diff --git a/.gitignore b/.gitignore index eae41f5..a96ab9c 100644 --- a/.gitignore +++ b/.gitignore @@ -21,3 +21,5 @@ go.work db/* output/* +nTask +.vscode/* diff --git a/.vscode/config.json b/.vscode/config.json deleted file mode 100644 index 14e8724..0000000 --- a/.vscode/config.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "go.useLanguageServer": true, - "go.lintTool": "golangci-lint", - "go.lintOnSave": "package", - "editor.codeActionsOnSave": { - "source.organizeImports": true, - "source.fixAll": true - }, - "go.formatTool": "goimports", - "go.gopath": "${workspaceFolder}", - "go.goroot": "", - "go.toolsEnvVars": {"GO111MODULE": "on"}, - "go.testFlags": ["-v"], - "go.toolsManagement.autoUpdate": true, - "go.autocompleteUnimportedPackages": true, - "go.inferGopath": true, - "go.buildOnSave": "workspace" - } - \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json deleted file mode 100644 index 316881f..0000000 --- a/.vscode/settings.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "go.useLanguageServer": true, - "go.lintTool": "golangci-lint", - "go.lintOnSave": "package", - "editor.codeActionsOnSave": { - "source.organizeImports": true, - "source.fixAll": true - }, - "go.formatTool": "goimports", - "go.gopath": "${workspaceFolder}", - "go.goroot": "", - "go.toolsEnvVars": {"GO111MODULE": "on"}, - "go.testFlags": ["-v"], - "go.toolsManagement.autoUpdate": true, - "go.autocompleteUnimportedPackages": true, - "go.inferGopath": true, - "go.buildOnSave": "workspace" -} diff --git a/README.md b/README.md index 2d11892..c6b0f8b 100644 --- a/README.md +++ b/README.md @@ -7,22 +7,22 @@ GitHub releases - GitHub stars + GitHub stars - GitHub forks + GitHub forks - GitHub issues + GitHub issues CodeFactor - - LoC + + - GitHub license + GitHub license
@@ -32,6 +32,13 @@ ntask-worker Docker Image +
+ + ntask-worker Docker Image + + + ntask-worker Docker Image +

@@ -79,6 +86,8 @@ You can connect another API, Telegram bot ot a simple bash script to the manager - **Logging**: Output logging to file is supported, facilitating easy tracking and analysis of task execution. +- **Timeout**: Timeout on tasks. + - **Documentation and Web Interface**: nTask provides Swagger documentation for easy integration and interaction with the API, along with an optional web interface for a more user-friendly experience. ## Installation @@ -140,7 +149,8 @@ The manager requires a configuration file named `manager.conf` to be present in }, "statusCheckSeconds": 10, "StatusCheckDown": 360, - "port": "8080", + "httpPort": 8080, + "httpsPort": 8443, "dbUsername": "your_username", "dbPassword": "your_password", "dbHost": "db", @@ -155,7 +165,8 @@ The manager requires a configuration file named `manager.conf` to be present in - `workers`: A map of worker names and their corresponding tokens for authentication. (In this case all workers use the same token called workers) - `statusCheckSeconds`: The interval in seconds between status check requests from the manager to the workers. - `StatusCheckDown`: The number of seconds after which a worker is marked as down if the status check request fails. -- `port`: The port on which the manager should listen for incoming connections. +- `httpPort`: The port on which the manager should listen for incoming connections without TLS. +- `httpsPort`: The port on which the manager should listen for incoming connections with TLS. - `dbUsername`: The username for the database connection. - `dbPassword`: The password for the database connection. - `dbHost`: The hostname of the database server. @@ -173,7 +184,7 @@ The worker requires a configuration file named `workerouter.conf` to be present "name": "", "iddleThreads": 2, "managerIP": "127.0.0.1", - "managerPort": "8080", + "managerPort": 8443, "managerOauthToken": "IeH0vpYFz2Yol6RdLvYZz62TFMv5FF", "CA": "./certs/ca-cert.pem", "insecureModules": true, @@ -381,6 +392,54 @@ ssh -L local_port:remote_server:remote_port -R remote_port:localhost:local_port This command establishes a tunnel between the manager and the worker, allowing secure communication without exposing the API to the internet. +## Using Cloud + +### Digital Ocean + +You can use Digital Ocean as cloud for the workers. + +#### Configure workers and snapshot + +- Install tool in workers +- Start worker as a service +- Shutdown and create snapshot + +#### Configure manager + +cloud.conf + +``` bash +{ + "provider": "digitalocean", + "apiKey": "", + "snapshotName": "ntask-worker", + "servers": 4, + "region": "", + "size": "", + "sshKeys": "a3:32:a3:f3:4a:d4:dd:33:c2:87:98:33:aa:a1:a1:dd", + "sshPort": 22, + "recreate": false +} +``` + +- Replace "ntask-worker" with your spanshot name + + +#### Usage + + Add the following flag to the worker `--configCloudFile ./cloud.conf` + + +You can use my referral link to get 200$ in credit over 60 days: + +DigitalOcean Referral Badge + +WARNING: If you run too many very intensive tasks on the same droplet, the worker may start to crash and become unstable, as tasks are closed and the connection is cut off. Don't run a large number of tasks on the same worker simultaneously causing the CPU usage to reach 100%. (In s-2vcpu-2gb-90gb-intel max 6 threads to execute big tasks) + +### Other clouds + +TODO + ## Global flags The nTask Manager supports the following global flags: @@ -442,11 +501,15 @@ To generate the Swagger documentation, follow these steps: The diagram above illustrates the architecture of the nTask Manager and its interactions with the workers. ## TODO -- Code tests -- DigitalOcean API - - Get list of droplets to SSH connect - - Dinamic number of droplets between min and max -- Optimize small tasks +- [ ] Code tests +- [x] Timeouts +- [x] Notes in tasks +- [x] DigitalOcean API + - [x] Get list of droplets to SSH connect + - [x] Create droplets from snapshot + - [ ] Dinamic number of droplets between min and max +- [ ] Oracle cloud +- [ ] Optimize small tasks ## Author diff --git a/certs/ca-cert.pem b/certs/ca-cert.pem index 0e065e3..8de5bc2 100644 --- a/certs/ca-cert.pem +++ b/certs/ca-cert.pem @@ -1,19 +1,19 @@ -----BEGIN CERTIFICATE----- -MIIDIzCCAgugAwIBAgIUc3Jh+/9Xusdd/DpNTBkSNFjpWkkwDQYJKoZIhvcNAQEL -BQAwITEOMAwGA1UEAwwFblRhc2sxDzANBgNVBAoMBnI0dWxjbDAeFw0yNDAxMzAx -OTM0MDNaFw0yNDAyMjkxOTM0MDNaMCExDjAMBgNVBAMMBW5UYXNrMQ8wDQYDVQQK -DAZyNHVsY2wwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDHD7yHomPx -V3ASuaGT+5s75Yza2HmnpbZ4VvOGI32NHSn7OkLJWSr6IHjzlqrBrk4JnJce6uyk -/cN5EdzU+f8fgYMqFIf6JZRc+/H6ZPvYH/rH9nyuQhHs/oIQgxZTbvitYZ5g1kpC -U2dh8Yu7x2EAW57Qq3q35/06iUYS1oueW8WQ6OILQZS3Vv6GuBfoXNXa8hFKIBHj -503HK2fuyUEKU6uKX5bKRVhhEiVplNTRxFE3il+uOOdWi6naG/Bseid1VP8j2ihB -feK4r9n/HCNmmTJBdW+MObpG2OmmiiPyrgUaRiK3I0nqh2ucsPYzgSCbTb3YcewL -5L66qQ5haqY1AgMBAAGjUzBRMB0GA1UdDgQWBBTeuWAvF4bTXyok6li/DtuOZorb -KTAfBgNVHSMEGDAWgBTeuWAvF4bTXyok6li/DtuOZorbKTAPBgNVHRMBAf8EBTAD -AQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCqdGO4Q9d+Bns/S94bMJ/5ybhZ3UjDqkM8 -O20Afqxjv53mPVVEU91Sz+xBCmpkCChnnGY3RdEhXX383GGNRq/2jUYkbk+gILVd -mkVnBRUBySvcHm7Asoj6xVAsml9NJS7xmP9OYXnXJCJ5jUGpqGM/XzvGiMzqhumG -U0C0Pjfs7eUBKnUDz7mVS+ucagATd1tSIxupqpIiRXAnlDe0pkBkHVhhEz55SZ7r -l78Jxf9y5AenyXngf/2f5+8npzoDU73pt3N2gSsa6P3d3E9Rfpyh1az69/uCu8Kx -CLhMAJLGOL3jAtQ9pESQVAT+wTod47NMN7d6oKqYvlwuKRnaynVl +MIIDIzCCAgugAwIBAgIUPnSZ0kOK7WIvRcOjphjoUZcRcNowDQYJKoZIhvcNAQEL +BQAwITEOMAwGA1UEAwwFblRhc2sxDzANBgNVBAoMBnI0dWxjbDAeFw0yNTA3MDgx +NTIxMzhaFw0yNTA4MDcxNTIxMzhaMCExDjAMBgNVBAMMBW5UYXNrMQ8wDQYDVQQK +DAZyNHVsY2wwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCOUUOEe8/u +DAhsMqTdichiisLcviOqjzVUWxRGLIBEpP+06ODwKEItPNfZ/3FXPYd1/5WteSVw +rcNucALi9UVLRw1/St3cSVl/n2Iy6if5g3Mn8nprn2f+ucUqcAbNBeeipjIjoaUY +U/ykuQW81ele8COkXZBdsJ6UGOi/Cy5YFy5Vf83JYibM9l4oO97JfWrHweEqVO38 +IsfCXkxgszy4tJBiqP+38uWjovqkUoJ1JYSepZWppfrnF0kYT4HpgKzbnFgSYNVL +1ACmlMCy1KMTX64cnMdRjsRPHoyb8IdgCVTFJJ9MDRe5/t/tHkuyZMWWUb3YBVuL +XpuvUDUiWyGzAgMBAAGjUzBRMB0GA1UdDgQWBBTFaO4SV96B/IzqJ6Kyi7Os2v0u +6TAfBgNVHSMEGDAWgBTFaO4SV96B/IzqJ6Kyi7Os2v0u6TAPBgNVHRMBAf8EBTAD +AQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCDHpQK2mN93PGMpiPw3GCoDka1WSYgjbBB +MmiLETBF2dbTZVlXIjAsNQTSSIQomctPCvkqWVRNs6Y4P5BgRWbIQj4v+ADsNK1V +V0IE148L/eJ/csKjY6pppn/Fn0S2EPWVFJ41/KbANV8drlQjNNZa545reb2p4NAS +HjNTCjI58Eu8o7nfsN9OvkCv2HgWOqrZ5Rdyx9zP3bVSFnI9GY/Rx3kqFj1rbnoE +dSqwU4SngM6Riuj6CFRc8E1r0VHbZGIUhUnkcxIIgEZUiGyJo6rx5IBfYNojM2uJ +ExbRM+ZVPiE7vksSDV8ZLVmDH45L66eHedlR9H/7Ogp1EqVYdH4i -----END CERTIFICATE----- diff --git a/certs/ca-cert.srl b/certs/ca-cert.srl index 257266b..19c6fc7 100644 --- a/certs/ca-cert.srl +++ b/certs/ca-cert.srl @@ -1 +1 @@ -360BEDFD841BFB41D34056C05D30992B5F9823D7 +360BEDFD841BFB41D34056C05D30992B5F9823DB diff --git a/certs/ca-key.pem b/certs/ca-key.pem index f84d80e..be3f429 100644 --- a/certs/ca-key.pem +++ b/certs/ca-key.pem @@ -1,28 +1,28 @@ -----BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDHD7yHomPxV3AS -uaGT+5s75Yza2HmnpbZ4VvOGI32NHSn7OkLJWSr6IHjzlqrBrk4JnJce6uyk/cN5 -EdzU+f8fgYMqFIf6JZRc+/H6ZPvYH/rH9nyuQhHs/oIQgxZTbvitYZ5g1kpCU2dh -8Yu7x2EAW57Qq3q35/06iUYS1oueW8WQ6OILQZS3Vv6GuBfoXNXa8hFKIBHj503H -K2fuyUEKU6uKX5bKRVhhEiVplNTRxFE3il+uOOdWi6naG/Bseid1VP8j2ihBfeK4 -r9n/HCNmmTJBdW+MObpG2OmmiiPyrgUaRiK3I0nqh2ucsPYzgSCbTb3YcewL5L66 -qQ5haqY1AgMBAAECggEALwl65RnsP9UHeIVAtvUXQ1oEpJnOdVzk9x6kwKeWPUgM -6X5k1asqSpxtuDF7+/QyIHdOBlJAxOPp0qvz4KeKL2mtEr5zOxqyKh6mmSJPmExG -OAX4hDy8e6HQHhK7rc9lF6Mfh4ZbWbzXiv9Go4KDW1BLAMfkYZyB69kQI9dqemqN -wVqqjR/0NNSaUIPommCfjl5fxgO+T62cqQxIT1QG73Mml4gXcoAE5xBGeKf+hFxF -9OckYVRL7KVrN+I8SFu7qbHUV8WQfUC0EtFYrvbsfH5dmddlTj73JprZdtap3JEQ -zvnt/NCYF/SY3NKEkO0eU8Ni+lHdmAXvYwX5xcWOwQKBgQDKu+OeQ53YgDAxPWBL -AvywfNsWBGAPmUFuerVF6qYMp+BTtBa8CHcEA0gSYjaGuagce/hppjMFLivNM9WP -kN9PuvSKfElQz8i4Fl7RXHv8h6zGXnu5gVnJ/r8wcG6kA6Kl4qiKiNYCU1GV199B -UcO8NI0507B4uhC2GQkAfVxdWQKBgQD7XNUxqk4diY/jMC3TeHtS+hP4dKxU4+zZ -3ZatNtDxFCh066GkqFth52hRPH3TsT2o3EiTM2wftCwF0skabPqOoxwEN4URMLAW -C1TYGEzYKNYaWh1mNBDQe6TyafrSooX6A25uk4/Ap7JmSv7Ysn/PT0hbDzjwOuyK -4Vixa5aoPQKBgEQ0Mb9swA22EoB+RYb22kwFtS8TCb41sO2aGqIK7xIS6EVAsOVR -c7jF4dlNcUqh6wyqKEhiwYdcoR/H8HD8LCSGoP52EbQ+Myi7XerRUmUCv/18i+M8 -wRhTu75wFMjY8D8eodT5dAYUQb5HgbRX7aHDjD+IGDaFYlng0kZ35jsBAoGBAMxG -aUvvZ4RBoxmysctGApMwgMJNry9d+8Iifq1N+wewpiA+ziKOX5V1BiXezzMWu1Fb -k+9svtYVCiHBZ4V+QzFgBQi4Rf/uXWvM0aq8NNcGeNj5myLP9Uo48Ze/4QME6XSB -DWH3sb+TiTvwfqOEjLHhcJ/wAwnYGRvUfsvQ76LRAoGAEiARGYR70ciGCYfKVVSI -25bu5yKmfL4xDRKKwslww+vwxbZXD4W92ymxGRTNTD1zEP8LRnZUzT0FX0DiLJlu -2mZ4w6WWAsNodY/rDXaWHXQ08839i5x0fdIVdYhwTsnKJ8/+NTZxV27bGasrj5Cy -5U0ohG7Xv5iEq4T+or4hVWQ= +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCOUUOEe8/uDAhs +MqTdichiisLcviOqjzVUWxRGLIBEpP+06ODwKEItPNfZ/3FXPYd1/5WteSVwrcNu +cALi9UVLRw1/St3cSVl/n2Iy6if5g3Mn8nprn2f+ucUqcAbNBeeipjIjoaUYU/yk +uQW81ele8COkXZBdsJ6UGOi/Cy5YFy5Vf83JYibM9l4oO97JfWrHweEqVO38IsfC +Xkxgszy4tJBiqP+38uWjovqkUoJ1JYSepZWppfrnF0kYT4HpgKzbnFgSYNVL1ACm +lMCy1KMTX64cnMdRjsRPHoyb8IdgCVTFJJ9MDRe5/t/tHkuyZMWWUb3YBVuLXpuv +UDUiWyGzAgMBAAECggEAKyicu5gaifuvMDS5h4+Jq8yd5Hhq84Xja9YFE3TaXzbU +buXylRH4Y6SgZvVOx2Ca7F9aYlWrU3z2l86zKWWtInOFNdWb63p5ADly+7LD9ah1 +6ybfFjUswfRPRRvdDB7ezJXu/Zw/J14M711GzqnmUEqt3GIdbjQqGwpkxs7auo61 +SoflqM/6LLZbXvJt+kcyKb2jqLBTZyuTFFe4OAEcpO7ia9XvKjOdtvStelLB+IWx +L1g1byY5YzwaLyRi5uqrawTnr+K0Fjw8GIy/sLv8S8cUiku5tRgaXGPcqHNv6n5Q +PerA8hWElTFWc10bydKSnmuZGgsV9dua4m2kZEX6NQKBgQDDc3n1Si61sW18LZuu +EhN4iNv9QHIlGlz6Q8JNzq/jq0H3DCOSsQ4wwthGKEnD/lAD5ZPYy8MgAXACI2co +IF9iFbwTh+B+5dUrdTetQHCWqgO4Z0K96HxnKQhPM6DssvsMTByiB32KFMgLBzmt +4zQaSNo7+MaOdJ/+Y69MLQUvfQKBgQC6Z/LlQlMLi9X15THSaCScCqSeNG1re6xO +72CMcX9jaHuG+t6Zj4Xsj5yeRyfDgOECWUGyKH1nuuo7lhCvvEsa+ekRslfXLN8L +ijEXXohiiSuGF1h6Z0OxCqxxTN+iBNrhDlvTciPZHiwwsnPjTwi7n2lvSiW3NiQI +8k5dk3S87wKBgB+Y9Jd+Ja0EASI5MVjj3Tf4UDBSEeIDsBJZj/kKOA9DaYep87pM +W3HwngxBiSNhYsgufkdnMaVj9hqyZIPtL95oix5RMi7xsxOygIzGxSzmUXGnZ/fR +tQLsdEZxn0uE5+cmCt76/QWstGZsBZwiBhNVwco+PsNbAV6b+QQEpzj1AoGAT338 +y1/ZYlcjy1UORhg7ZH/1a7IX53o6JP9YQQw1swLfitR5EStvhU5JEAZwIqNXUVUW ++w7hS9ceBIMeYpL9xGdGyVEwvIZaJ+WqwbiCmOK+ONTD2o8nAtz2ZOrPg5xmawtg +hW9rci+30JyJZypRIxH8OIV6SANCor4SRT2YzukCgYEAh5GRm2QbsJt6iWB+m2MF +Hr+3r8a12OcKUhYKaBhscTuw7t5lC3xo19iNyV1mCJIoAqq4iM+uaBB1qIt3RgwQ +lknJ+CTNTArzcHEaabyPMvuAwltyU57AqblPjdO5jBzQfC2EPIw840G8dIR61wOj +ZWvHh/mrLV97ormONZIuTT0= -----END PRIVATE KEY----- diff --git a/certs/manager/ca-cert.pem b/certs/manager/ca-cert.pem index 0e065e3..8de5bc2 100644 --- a/certs/manager/ca-cert.pem +++ b/certs/manager/ca-cert.pem @@ -1,19 +1,19 @@ -----BEGIN CERTIFICATE----- -MIIDIzCCAgugAwIBAgIUc3Jh+/9Xusdd/DpNTBkSNFjpWkkwDQYJKoZIhvcNAQEL -BQAwITEOMAwGA1UEAwwFblRhc2sxDzANBgNVBAoMBnI0dWxjbDAeFw0yNDAxMzAx -OTM0MDNaFw0yNDAyMjkxOTM0MDNaMCExDjAMBgNVBAMMBW5UYXNrMQ8wDQYDVQQK -DAZyNHVsY2wwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDHD7yHomPx -V3ASuaGT+5s75Yza2HmnpbZ4VvOGI32NHSn7OkLJWSr6IHjzlqrBrk4JnJce6uyk -/cN5EdzU+f8fgYMqFIf6JZRc+/H6ZPvYH/rH9nyuQhHs/oIQgxZTbvitYZ5g1kpC -U2dh8Yu7x2EAW57Qq3q35/06iUYS1oueW8WQ6OILQZS3Vv6GuBfoXNXa8hFKIBHj -503HK2fuyUEKU6uKX5bKRVhhEiVplNTRxFE3il+uOOdWi6naG/Bseid1VP8j2ihB -feK4r9n/HCNmmTJBdW+MObpG2OmmiiPyrgUaRiK3I0nqh2ucsPYzgSCbTb3YcewL -5L66qQ5haqY1AgMBAAGjUzBRMB0GA1UdDgQWBBTeuWAvF4bTXyok6li/DtuOZorb -KTAfBgNVHSMEGDAWgBTeuWAvF4bTXyok6li/DtuOZorbKTAPBgNVHRMBAf8EBTAD -AQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCqdGO4Q9d+Bns/S94bMJ/5ybhZ3UjDqkM8 -O20Afqxjv53mPVVEU91Sz+xBCmpkCChnnGY3RdEhXX383GGNRq/2jUYkbk+gILVd -mkVnBRUBySvcHm7Asoj6xVAsml9NJS7xmP9OYXnXJCJ5jUGpqGM/XzvGiMzqhumG -U0C0Pjfs7eUBKnUDz7mVS+ucagATd1tSIxupqpIiRXAnlDe0pkBkHVhhEz55SZ7r -l78Jxf9y5AenyXngf/2f5+8npzoDU73pt3N2gSsa6P3d3E9Rfpyh1az69/uCu8Kx -CLhMAJLGOL3jAtQ9pESQVAT+wTod47NMN7d6oKqYvlwuKRnaynVl +MIIDIzCCAgugAwIBAgIUPnSZ0kOK7WIvRcOjphjoUZcRcNowDQYJKoZIhvcNAQEL +BQAwITEOMAwGA1UEAwwFblRhc2sxDzANBgNVBAoMBnI0dWxjbDAeFw0yNTA3MDgx +NTIxMzhaFw0yNTA4MDcxNTIxMzhaMCExDjAMBgNVBAMMBW5UYXNrMQ8wDQYDVQQK +DAZyNHVsY2wwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCOUUOEe8/u +DAhsMqTdichiisLcviOqjzVUWxRGLIBEpP+06ODwKEItPNfZ/3FXPYd1/5WteSVw +rcNucALi9UVLRw1/St3cSVl/n2Iy6if5g3Mn8nprn2f+ucUqcAbNBeeipjIjoaUY +U/ykuQW81ele8COkXZBdsJ6UGOi/Cy5YFy5Vf83JYibM9l4oO97JfWrHweEqVO38 +IsfCXkxgszy4tJBiqP+38uWjovqkUoJ1JYSepZWppfrnF0kYT4HpgKzbnFgSYNVL +1ACmlMCy1KMTX64cnMdRjsRPHoyb8IdgCVTFJJ9MDRe5/t/tHkuyZMWWUb3YBVuL +XpuvUDUiWyGzAgMBAAGjUzBRMB0GA1UdDgQWBBTFaO4SV96B/IzqJ6Kyi7Os2v0u +6TAfBgNVHSMEGDAWgBTFaO4SV96B/IzqJ6Kyi7Os2v0u6TAPBgNVHRMBAf8EBTAD +AQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCDHpQK2mN93PGMpiPw3GCoDka1WSYgjbBB +MmiLETBF2dbTZVlXIjAsNQTSSIQomctPCvkqWVRNs6Y4P5BgRWbIQj4v+ADsNK1V +V0IE148L/eJ/csKjY6pppn/Fn0S2EPWVFJ41/KbANV8drlQjNNZa545reb2p4NAS +HjNTCjI58Eu8o7nfsN9OvkCv2HgWOqrZ5Rdyx9zP3bVSFnI9GY/Rx3kqFj1rbnoE +dSqwU4SngM6Riuj6CFRc8E1r0VHbZGIUhUnkcxIIgEZUiGyJo6rx5IBfYNojM2uJ +ExbRM+ZVPiE7vksSDV8ZLVmDH45L66eHedlR9H/7Ogp1EqVYdH4i -----END CERTIFICATE----- diff --git a/certs/manager/cert.pem b/certs/manager/cert.pem index 76dcf27..2b440c7 100644 --- a/certs/manager/cert.pem +++ b/certs/manager/cert.pem @@ -1,20 +1,20 @@ -----BEGIN CERTIFICATE----- -MIIDNDCCAhygAwIBAgIUE0IqD/pf7nIBEpWUjkdfS+QQjrowDQYJKoZIhvcNAQEL -BQAwITEOMAwGA1UEAwwFblRhc2sxDzANBgNVBAoMBnI0dWxjbDAeFw0yNDAxMzAx -OTM0MDNaFw0yNTAxMjkxOTM0MDNaMCMxEDAOBgNVBAMMB01hbmFnZXIxDzANBgNV -BAoMBnI0dWxjbDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALzpBMjG -KO7CNOSBJr+JYjZF5tNGU2GWiGwg8YTpxiu/wd9hM7laroZhIXGtiBQecqC5JFyR -BGji5s7FjTI0mOGkettwX3YZGLrokIJu4tG7qBYmeSctvPtybcMgdTQSXsOpgcKP -MEWfEUrL5Hl4RWiPOM57o79wbh15A+AMgOjCp15lJXZI6DxC3F5/Nh5X9SQ6Fz5H -uKvG7cRll2j5f8eq2Zup2m6/vaRt3AgqQKuWoJJBYO4NIyKKyuWlyEnNmsraNSXw -GQp/+GWJgUnEZUso9hPZWCIOfNFQW0WvbDd7x9BoLL1sa1j3g1h9m5UJO1J6mgPU -ltwi23jxNyM4RuUCAwEAAaNiMGAwHgYDVR0RBBcwFYcEfwAAAYINbWFuYWdlci5s -b2NhbDAdBgNVHQ4EFgQUri9jEXH/otROYllQJE+VXpgQbZMwHwYDVR0jBBgwFoAU -3rlgLxeG018qJOpYvw7bjmaK2ykwDQYJKoZIhvcNAQELBQADggEBAFVP9Q/vCaND -JAO5qW5Lqv30H/3xERe2noLR+WnAZTjvdU2OMeDKRKbIKFJfan89+rsGc7IwjhZH -u36sMs8Ql9zKgfdRx+a5z/LkuInID121A8C/nukXnJij2Cm3yNyHo531UYmZfT0p -IwsxljLq60lBDvGipHsfVlbIPfIVYW1Aj68xfOdHxdn0WnCwdTsgg9m1mnRdJOG/ -tbWw1QdECgMOeIfDXTyScdlzrOWiw0liFxcfrtvzwYAHrVPld1Jd5Js+0bms7SWp -zCzFbzt/Kf0t6iDc488tpZHlFj+0svWs7MbvOwHi0bNe8l8OgApCFsheeTjGO8IX -V/gY6zSWf+M= +MIIDNDCCAhygAwIBAgIUNgvt/YQb+0HTQFbAXTCZK1+YI9swDQYJKoZIhvcNAQEL +BQAwITEOMAwGA1UEAwwFblRhc2sxDzANBgNVBAoMBnI0dWxjbDAeFw0yNTA3MDgx +NTIxMzhaFw0zNTA3MDYxNTIxMzhaMCMxEDAOBgNVBAMMB01hbmFnZXIxDzANBgNV +BAoMBnI0dWxjbDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMNyLtkP +//VjygtvcGDsnqvGGOZIFJgJ7BhL5FUzaRigZMHrZ2RO08v/5nO3cFksHJVIfL91 +UugC1PyX5JwWBR3JTRZsAnHQ8CpD+DTFhjA7u23foyKHXbQhuSNurDhq+nPYBJ7R +1e9H4aCEWXZWBGYLHKqumhGAr0vZOngzbrpByPgGod+zyVGrdNr6aSPkqHBaLydt +WsenSVnh/QAtdp5kzhvFKWaHA05Vr9f9mDOj2jhAsMAZ8iY7juFFIyf2D5DedGBR +hfZBXGhXVA9hroqdqvA0HUC1HKrfob149wf4aFp6omx0IcqO7sL+G5056fUgILok +RV/hP2ekhnWUwFsCAwEAAaNiMGAwHgYDVR0RBBcwFYcEfwAAAYINbWFuYWdlci5s +b2NhbDAdBgNVHQ4EFgQUruGsDBvvX7djX/rCBf7nITFOMJEwHwYDVR0jBBgwFoAU +xWjuElfegfyM6ieisouzrNr9LukwDQYJKoZIhvcNAQELBQADggEBAHW56dRTsEMW +lpgMm8n0sV8FwBtNQx2a8fo6eaw9VD0Oz/nY5sHyxZ3GU+ROyab6hUCRtg5JWYsW +pUohOOFpKwTcSrEE2V8ZhI3O/Fs3A6XR1+vjdu/YQEtoEHEC4lvergqsNaxzQANV +hpcrc8NN/zVQQdOeWtfyc/CiMQjXOWsPBO8+nsw8G9rwKsu9N6jokNNOqWIzNNre +Lv/o6+V/jRR9rAw0cCjyMia/f/mlx0o//sq5nX3i8VO8bZf5ToqsD7NJ31Vye5Db +pnwz7MR7Uu0LxwGPESF8SF4aT1nJN6G9dRp9NeECfBIfoCqU1SZsoUT9UyXd+7O9 +k1NOaJZxGGc= -----END CERTIFICATE----- diff --git a/certs/manager/csr.pem b/certs/manager/csr.pem index 5a8ed53..bcda800 100644 --- a/certs/manager/csr.pem +++ b/certs/manager/csr.pem @@ -1,16 +1,16 @@ -----BEGIN CERTIFICATE REQUEST----- MIICmTCCAYECAQAwIzEQMA4GA1UEAwwHTWFuYWdlcjEPMA0GA1UECgwGcjR1bGNs -MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvOkEyMYo7sI05IEmv4li -NkXm00ZTYZaIbCDxhOnGK7/B32EzuVquhmEhca2IFB5yoLkkXJEEaOLmzsWNMjSY -4aR623BfdhkYuuiQgm7i0buoFiZ5Jy28+3JtwyB1NBJew6mBwo8wRZ8RSsvkeXhF -aI84znujv3BuHXkD4AyA6MKnXmUldkjoPELcXn82Hlf1JDoXPke4q8btxGWXaPl/ -x6rZm6nabr+9pG3cCCpAq5agkkFg7g0jIorK5aXISc2ayto1JfAZCn/4ZYmBScRl -Syj2E9lYIg580VBbRa9sN3vH0GgsvWxrWPeDWH2blQk7UnqaA9SW3CLbePE3IzhG -5QIDAQABoDEwLwYJKoZIhvcNAQkOMSIwIDAeBgNVHREEFzAVhwR/AAABgg1tYW5h -Z2VyLmxvY2FsMA0GCSqGSIb3DQEBCwUAA4IBAQADKhPXSckLeTPlb5/FqxTUno2+ -FzR0hwcIkkgwdmi8DCTOua8Lm+7W0xY7LaFXbxsXgAMEVcqS/qwm7GsRv4UegQQ8 -J/SgRzvKhsBLs9okKd1dZuzwZJPvSJOzSNaJdQNws+yt1mHKHw3A/Dm0ripaEpbM -+7uXs0w4bMpU9Vx4VLQSQRDLSmAFbS1xXsRqaLyIvTkbZ4XAntj+9s8jV2bS8Z8r -/Fpo5HeScfBtq2Wt8klh1K45Iy6fxnyeeEUBAP/Odk7WCnWt2k3HMtVMhWy6IJrA -ovwMvEMM2AlmNBXGZfu4AIk1V6KkYXAEMJ3WDAVPvoNWmpwola7NJc2ndSr3 +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAw3Iu2Q//9WPKC29wYOye +q8YY5kgUmAnsGEvkVTNpGKBkwetnZE7Ty//mc7dwWSwclUh8v3VS6ALU/JfknBYF +HclNFmwCcdDwKkP4NMWGMDu7bd+jIoddtCG5I26sOGr6c9gEntHV70fhoIRZdlYE +Zgscqq6aEYCvS9k6eDNuukHI+Aah37PJUat02vppI+SocFovJ21ax6dJWeH9AC12 +nmTOG8UpZocDTlWv1/2YM6PaOECwwBnyJjuO4UUjJ/YPkN50YFGF9kFcaFdUD2Gu +ip2q8DQdQLUcqt+hvXj3B/hoWnqibHQhyo7uwv4bnTnp9SAguiRFX+E/Z6SGdZTA +WwIDAQABoDEwLwYJKoZIhvcNAQkOMSIwIDAeBgNVHREEFzAVhwR/AAABgg1tYW5h +Z2VyLmxvY2FsMA0GCSqGSIb3DQEBCwUAA4IBAQATi6aPBo7VJy68YIStuJ7D/oSL +USsyXr/AkUshVBwlhyfOvz6J3IGf2UskchEPQCDXZ907sRQxnL9/O4TtocaErXaY +Z7C9Ho7JriBIZv7RPW9Uv72nIZN2XOYS36D6A8wk0/x7bke4xdgJQKqrngdMRjfE +9rv7WYqzwrUYGSFN4u3pMhVmXlvNUkDPlnIGZmjwoEP3PSi8sI8EzgLntCy+8pJM +EbX0xPy90/fvvswipoxW/TWlUlMLd+1IbGw5XBR6FmI/iWOKHGhZS6y5nGPmxeas +YgysAEpNps/jUzui7nhqeSEfEzf0wZCS+eY00rruYawpWBGzDs3sKo8nZioA -----END CERTIFICATE REQUEST----- diff --git a/certs/manager/key.pem b/certs/manager/key.pem index 3f2dbc9..5002d7a 100644 --- a/certs/manager/key.pem +++ b/certs/manager/key.pem @@ -1,28 +1,28 @@ -----BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC86QTIxijuwjTk -gSa/iWI2RebTRlNhlohsIPGE6cYrv8HfYTO5Wq6GYSFxrYgUHnKguSRckQRo4ubO -xY0yNJjhpHrbcF92GRi66JCCbuLRu6gWJnknLbz7cm3DIHU0El7DqYHCjzBFnxFK -y+R5eEVojzjOe6O/cG4deQPgDIDowqdeZSV2SOg8QtxefzYeV/UkOhc+R7irxu3E -ZZdo+X/Hqtmbqdpuv72kbdwIKkCrlqCSQWDuDSMiisrlpchJzZrK2jUl8BkKf/hl -iYFJxGVLKPYT2VgiDnzRUFtFr2w3e8fQaCy9bGtY94NYfZuVCTtSepoD1JbcItt4 -8TcjOEblAgMBAAECggEAKrXAKChtHrX3wWXVvd4wvzaEnmW6+khPZar5D+TOshtz -mK5gRFrVNHqHVi02o93SarYRG4CJ77DFICCX8K7llbZbNHuuiYPZDIECEwtA6swz -j3Z5U7tKi8ruN/yBoxk4JaKQPM2ky0jQXwnECRaBEse2vzBC8EhaDP3MO354MqdT -XRqF0u7uipxTaulnQQvQ3ZFpjlTG/Syrv5Ks4P3NbLVtvtbGBbtHd4hzM472op/G -FklenEKRPedRwMlUm4J+ABClAo6kNHZ/vCPBQDsyOMpb7OTyLaywYFcKFpYYUba7 -pFH2DL7XBTeAOV0G57+j4HRatFBiq1crhle1GtGWbQKBgQD7ZX0skJFfhtFR6w1C -N83IfZ956IFpSD805ivjxis+m009jxVcGVLQEzhUWGyb+WyLXZ0fJ4ifc8Sp2gh2 -u3qZg58BkGEiaCYC73bM8JnleuWxi9ACLcLRQljsw/XzCMVxAYXq2SYTCMMKFpdh -fFiaHjaDnc+CMvqBlO3muZ0/gwKBgQDAXppqPy8NHSC3Zxyz8u0s/V3Uk1QVVl7h -LBH9hSVTZGYjBBZ9zoAr8Stki0PnL8yHyeTF0O7QWhZjhvTxaOHdCa2GbRVZ/rjG -KWv809QEjnFi2VQ/4VgyB0MiUdQHyzpQviqFx6XgRMIftfKXy8LmAFfeD/5t3nYa -HFnHr9xrdwKBgGx9+Q5ReZtrOEyNDxTDtnhO1pMq5yaDelue9dP/wsvrA+OMK2Cq -wRVxJf8ohf6uHszqYpN+YTTHJllS8hIjiJ5Vsjpfj7vkjHr50yBQuWnSpuv/dY5r -J0ddxbiwPSVcZLEHQj7+5bKTNnDVHRGCM06XuVkFsvbyfy+LETxgYF93AoGAT2S8 -Bi2dlaP35LnBtuMD0BWhrCJCCaxj7DrsEd3p0ckV/k2pmrKnY3tdlVmE5N1tZH2G -1b2tUoBbzSfd3+SRk1BzNY+/yCzAxchCsU4bquW/FjTr+JFgfQVSR8/N2omdv8U8 -d4o2g6DdHYlSXiPShGqP2S5wq5es1ons4+VI63ECgYEAuP2rBbC0sRaCud6+HAY7 -weyBlhx01ZNA7LSNWXVW+dj+vgUx0zi1B29ADkg24q+/wNtFQ6dkKLPuu2mugNkb -jfhsZ4igP8RWBmZ1hS++GwPhNsFL7Ua1CzJfAbd4Ra9rTidc13iKCGBu6TjQE/Lk -oU1YINWaJzo+s1V4p3XaW4o= +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDDci7ZD//1Y8oL +b3Bg7J6rxhjmSBSYCewYS+RVM2kYoGTB62dkTtPL/+Zzt3BZLByVSHy/dVLoAtT8 +l+ScFgUdyU0WbAJx0PAqQ/g0xYYwO7tt36Mih120Ibkjbqw4avpz2ASe0dXvR+Gg +hFl2VgRmCxyqrpoRgK9L2Tp4M266Qcj4BqHfs8lRq3Ta+mkj5KhwWi8nbVrHp0lZ +4f0ALXaeZM4bxSlmhwNOVa/X/Zgzo9o4QLDAGfImO47hRSMn9g+Q3nRgUYX2QVxo +V1QPYa6KnarwNB1AtRyq36G9ePcH+GhaeqJsdCHKju7C/hudOen1ICC6JEVf4T9n +pIZ1lMBbAgMBAAECggEAHU1Fo8pnzAD5fixnc0iWY1dwTToaSE8Yg+4A3hs2kvgd +Ewu5dQryav/DK68hW2UnJeR3u3aaBunBx9r37cTJYJVZBtcLqMez8go9sJ+6hgm2 +iHsCaL9Vt5dku+CwVkcHpI2K58q64cRrmdat2PV9dFUPJ7HErIIYBe2b/ABuBeNq +7Ru6GooiD8Mmw4K7At6ylK5o5fk00I54T5jiOzmOjI+t6Dd0SWQBd2YYR37Qap0M +Q+eJd6xWBq58HrNV7rWrogiX8CCPBRabQZU4QqcHZ6QO7SxUunapqMpBUEIkKnjt +sOnVf2Bvd6G55hcFpmMIF7HOctyAwCc4XvFG1vEb2QKBgQDgDEcAA8Wz5jBjdCk9 +4IgNafLoOYJLNeomP8FNwVpSsl096ah3L4iU4OjWaFn+6e2vGno0MMTfdaV/K+bQ +16OgSa6/k/nJ8/fAat0WMKHRV6zWi6EuiewgJlH6S6gsLLuWg66MVl28dingP/G0 +MZha2TfGloNY7/Y+OE7qNa08gwKBgQDfUa7zppcITdaIO5EFmTgrXvakBpOCefuK +nY4oW44398zgQFrFcntn/pL2chKetY2AEZmVryMbpvIXcR6TSvpmbV0RcVwll3J3 +kubNb0EKokXyzj6zSefRog/9YrEp/fP0HjmzVWpBvL0U38DC1ec+I2al94TzdHBx +YpEp991VSQKBgBRsShSQQ3dQplyKfC/txbkjTUcp5tygP4XZDe/ejjM3mxRHtlmW +9aZMkizpOjrobAgoK7eSMoAEI1/iSpoC7iFwVgOUo5JrMMxTcW/SYw3IJjMwazLo +06NimtpsBxpvwXHI8yJLxSOmyc5tRcUdODwfzNvHYJW82f919ouW9QGtAoGAd6we +RROQ/DI6NOc23/kr0JIjOeMMSW91DONBuv7VR8jTUZkrC4Q9/Yy+1LJsqABCw86q +rzwbZEYilc1YxTX42crhhOdqqfpurQVcCdsEz94F/gfoMcv5Hq3qQ68PoaElijxo +T2IGfxPVf8ooUkCumu39pBLwfEZ5u0lG4WaihYECgYEAih2HvC3skn+tft/Jftwa +R7wE8VyrZB22zQ/gyvETJoFuAwAEOm9c4cMlgMK5uGoFZp7rYIT09vs+m6gc2+Pw +1fxzsGIKyp8icoe6Bg1XoE3oksI/QKt3hyzwLVAogPAwK3rQq/ypEvJHBVJ376ex +b9+ZBGpyyObClK2ERjU1xWY= -----END PRIVATE KEY----- diff --git a/cloud.conf b/cloud.conf new file mode 100644 index 0000000..eabddc4 --- /dev/null +++ b/cloud.conf @@ -0,0 +1,11 @@ +{ + "provider": "digitalocean", + "apiKey": "", + "snapshotName": "ntask-worker", + "servers": 4, + "region": "", + "size": "", + "sshKeys": "a3:32:a3:f3:4a:d4:dd:33:c2:87:98:33:aa:a1:a1:dd", + "sshPort": 22, + "recreate": false +} diff --git a/docker-compose-dev.yml b/docker-compose-dev.yml new file mode 100644 index 0000000..b49914a --- /dev/null +++ b/docker-compose-dev.yml @@ -0,0 +1,56 @@ +version: '3.4' + +services: + + db: + image: mysql:${MYSQL_VERSION:-8.0} + container_name: nTask_db + restart: unless-stopped + env_file: + - .env + ports: + - "${DB_PORT:-3306}:3306" + volumes: + - ./db:/var/lib/mysql + + manager: + image: ${MANAGER_IMAGE:-ntask_manager:latest} + build: + context: . + dockerfile: ./manager/Dockerfile + container_name: nTask_manager + restart: unless-stopped + env_file: + - .env + ports: + - "${MANAGER_HTTP_PORT:-8080}:8080" + - "${MANAGER_HTTPS_PORT:-8443}:8443" + depends_on: + - db + command: manager --swagger --verbose --debug + volumes: + - ./manager.conf:/config/manager.conf + - ./output/:/config/output/ + - ./certs/:/config/certs/ + + worker: + image: ${WORKER_IMAGE:-ntask_worker:latest} + build: + context: . + dockerfile: ./worker/Dockerfile + restart: unless-stopped + env_file: + - .env + depends_on: + - manager + command: worker --verbose --debug + volumes: + - ./worker.conf:/config/worker.conf + - ./certs/:/config/certs/ + # If you’re on Swarm, this will give you N replicas out of the box: + deploy: + replicas: ${WORKER_REPLICAS_DEV:-2} + restart_policy: + condition: any + + \ No newline at end of file diff --git a/docker-compose-test.yml b/docker-compose-test.yml index 97e3a26..7c5646f 100644 --- a/docker-compose-test.yml +++ b/docker-compose-test.yml @@ -1,331 +1,70 @@ version: '3.4' + services: + db: + image: mysql:${MYSQL_VERSION:-8.0} + container_name: nTask_db + restart: unless-stopped + env_file: + - .env + ports: + - "${DB_PORT:-3306}:3306" + volumes: + - ./db:/var/lib/mysql + networks: + - manager_db + manager: - build: + image: ${MANAGER_IMAGE:-ntask_manager:latest} + build: context: . dockerfile: ./manager/Dockerfile - restart: unless-stopped container_name: nTask_manager + restart: unless-stopped + env_file: + - .env ports: - - 8080:8080 + - "${MANAGER_HTTP_PORT:-8080}:8080" + - "${MANAGER_HTTPS_PORT:-8443}:8443" depends_on: - db - env_file: .env command: manager --swagger --verbose --debug volumes: - ./manager.conf:/config/manager.conf - ./output/:/config/output/ - ./certs/:/config/certs/ + deploy: + mode: replicated + replicas: 1 + restart_policy: + condition: any + networks: + - manager_worker + - manager_db worker: - build: + image: ${WORKER_IMAGE:-ntask_worker:latest} + build: context: . dockerfile: ./worker/Dockerfile restart: unless-stopped - container_name: nTask-worker - depends_on: - - manager - env_file: .env - command: worker --verbose --debug - volumes: - - ./worker.conf:/config/worker.conf - - ./certs/:/config/certs/ - - worker2: -# build: -# context: . -# dockerfile: ./worker/Dockerfile - image: ntask-worker - restart: unless-stopped - container_name: nTask-worker2 - depends_on: - - manager - env_file: .env - command: worker --verbose --debug - volumes: - - ./worker.conf:/config/worker.conf - - ./certs/:/config/certs/ - - worker3: -# build: -# context: . -# dockerfile: ./worker/Dockerfile - image: ntask-worker - restart: unless-stopped - container_name: nTask-worker3 + env_file: + - .env depends_on: - manager - env_file: .env command: worker --verbose --debug volumes: - ./worker.conf:/config/worker.conf - ./certs/:/config/certs/ + deploy: + replicas: ${WORKER_REPLICAS_TEST:-10} + restart_policy: + condition: any + networks: + - manager_worker - worker4: -# build: -# context: . -# dockerfile: ./worker/Dockerfile - image: ntask-worker - restart: unless-stopped - container_name: nTask-worker4 - depends_on: - - manager - env_file: .env - command: worker --verbose --debug - volumes: - - ./worker.conf:/config/worker.conf - - ./certs/:/config/certs/ - - worker5: -# build: -# context: . -# dockerfile: ./worker/Dockerfile - image: ntask-worker - restart: unless-stopped - container_name: nTask-worker5 - depends_on: - - manager - env_file: .env - command: worker --verbose --debug - volumes: - - ./worker.conf:/config/worker.conf - - ./certs/:/config/certs/ - - worker6: -# build: -# context: . -# dockerfile: ./worker/Dockerfile - image: ntask-worker - restart: unless-stopped - container_name: nTask-worker6 - depends_on: - - manager - env_file: .env - command: worker --verbose --debug - volumes: - - ./worker.conf:/config/worker.conf - - ./certs/:/config/certs/ - - worker7: -# build: -# context: . -# dockerfile: ./worker/Dockerfile - image: ntask-worker - restart: unless-stopped - container_name: nTask-worker7 - depends_on: - - manager - env_file: .env - command: worker --verbose --debug - volumes: - - ./worker.conf:/config/worker.conf - - ./certs/:/config/certs/ - - worker8: -# build: -# context: . -# dockerfile: ./worker/Dockerfile - image: ntask-worker - restart: unless-stopped - container_name: nTask-worker8 - depends_on: - - manager - env_file: .env - command: worker --verbose --debug - volumes: - - ./worker.conf:/config/worker.conf - - ./certs/:/config/certs/ - - worker9: -# build: -# context: . -# dockerfile: ./worker/Dockerfile - image: ntask-worker - restart: unless-stopped - container_name: nTask-worker9 - depends_on: - - manager - env_file: .env - command: worker --verbose --debug - volumes: - - ./worker.conf:/config/worker.conf - - ./certs/:/config/certs/ - - worker10: -# build: -# context: . -# dockerfile: ./worker/Dockerfile - image: ntask-worker - restart: unless-stopped - container_name: nTask-worker10 - depends_on: - - manager - env_file: .env - command: worker --verbose --debug - volumes: - - ./worker.conf:/config/worker.conf - - ./certs/:/config/certs/ - - worker11: -# build: -# context: . -# dockerfile: ./worker/Dockerfile - image: ntask-worker - restart: unless-stopped - container_name: nTask-worker11 - depends_on: - - manager - env_file: .env - command: worker --verbose --debug - volumes: - - ./worker.conf:/config/worker.conf - - ./certs/:/config/certs/ - - worker12: -# build: -# context: . -# dockerfile: ./worker/Dockerfile - image: ntask-worker - restart: unless-stopped - container_name: nTask-worker12 - depends_on: - - manager - env_file: .env - command: worker --verbose --debug - volumes: - - ./worker.conf:/config/worker.conf - - ./certs/:/config/certs/ - - worker13: -# build: -# context: . -# dockerfile: ./worker/Dockerfile - image: ntask-worker - restart: unless-stopped - container_name: nTask-worker13 - depends_on: - - manager - env_file: .env - command: worker --verbose --debug - volumes: - - ./worker.conf:/config/worker.conf - - ./certs/:/config/certs/ - - worker14: -# build: -# context: . -# dockerfile: ./worker/Dockerfile - image: ntask-worker - restart: unless-stopped - container_name: nTask-worker14 - depends_on: - - manager - env_file: .env - command: worker --verbose --debug - volumes: - - ./worker.conf:/config/worker.conf - - ./certs/:/config/certs/ - - worker15: -# build: -# context: . -# dockerfile: ./worker/Dockerfile - image: ntask-worker - restart: unless-stopped - container_name: nTask-worker15 - depends_on: - - manager - env_file: .env - command: worker --verbose --debug - volumes: - - ./worker.conf:/config/worker.conf - - ./certs/:/config/certs/ - - worker16: -# build: -# context: . -# dockerfile: ./worker/Dockerfile - image: ntask-worker - restart: unless-stopped - container_name: nTask-worker16 - depends_on: - - manager - env_file: .env - command: worker --verbose --debug - volumes: - - ./worker.conf:/config/worker.conf - - ./certs/:/config/certs/ - - worker17: -# build: -# context: . -# dockerfile: ./worker/Dockerfile - image: ntask-worker - restart: unless-stopped - container_name: nTask-worker17 - depends_on: - - manager - env_file: .env - command: worker --verbose --debug - volumes: - - ./worker.conf:/config/worker.conf - - ./certs/:/config/certs/ - - worker18: -# build: -# context: . -# dockerfile: ./worker/Dockerfile - image: ntask-worker - restart: unless-stopped - container_name: nTask-worker18 - depends_on: - - manager - env_file: .env - command: worker --verbose --debug - volumes: - - ./worker.conf:/config/worker.conf - - ./certs/:/config/certs/ - - worker19: -# build: -# context: . -# dockerfile: ./worker/Dockerfile - image: ntask-worker - restart: unless-stopped - container_name: nTask-worker19 - depends_on: - - manager - env_file: .env - command: worker --verbose --debug - volumes: - - ./worker.conf:/config/worker.conf - - ./certs/:/config/certs/ - - worker20: -# build: -# context: . -# dockerfile: ./worker/Dockerfile - image: ntask-worker - restart: unless-stopped - container_name: nTask-worker20 - depends_on: - - manager - env_file: .env - command: worker --verbose --debug - volumes: - - ./worker.conf:/config/worker.conf - - ./certs/:/config/certs/ - - db: - image: mysql - command: --default-authentication-plugin=caching_sha2_password - restart: unless-stopped -# ports: -# - 3306:3306 - environment: - MYSQL_ROOT_PASSWORD: your_password_root - MYSQL_USER: your_username - MYSQL_PASSWORD: your_password - MYSQL_DATABASE: manager - volumes: - - ./db:/var/lib/mysql +networks: + manager_worker: + driver: bridge + manager_db: + driver: bridge diff --git a/docker-compose.yml b/docker-compose.yml index 8aebbb9..8355c5b 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,34 +1,54 @@ version: '3.4' + services: + db: + image: mysql:${MYSQL_VERSION:-8.0} + container_name: nTask_db + restart: unless-stopped + env_file: + - .env + ports: + - "${DB_PORT:-3306}:3306" + volumes: + - ./db:/var/lib/mysql manager: + image: ${MANAGER_IMAGE:-ntask_manager:latest} build: context: . dockerfile: ./manager/Dockerfile - restart: unless-stopped container_name: nTask_manager + restart: unless-stopped + env_file: + - .env ports: - - 8080:8080 + - "${MANAGER_HTTP_PORT:-8080}:8080" + - "${MANAGER_HTTPS_PORT:-8443}:8443" depends_on: - db - env_file: .env - command: manager --swagger --verbose --debug #--configSSHFile ./ssh.conf + command: manager --swagger volumes: - ./manager.conf:/config/manager.conf - ./output/:/config/output/ - ./certs/:/config/certs/ -# - ./ssh.conf:/config/ssh.conf -# - ./ssh_key:/config/ssh_key - db: - image: mysql - command: --default-authentication-plugin=caching_sha2_password + worker: + image: ${WORKER_IMAGE:-ntask_worker:latest} + build: + context: . + dockerfile: ./worker/Dockerfile restart: unless-stopped -# ports: -# - 3306:3306 - environment: - MYSQL_ROOT_PASSWORD: your_password_root - MYSQL_USER: your_username - MYSQL_PASSWORD: your_password - MYSQL_DATABASE: manager + env_file: + - .env + depends_on: + - manager + command: worker volumes: - - ./db:/var/lib/mysql + - ./worker.conf:/config/worker.conf + - ./certs/:/config/certs/ + # If you’re on Swarm, this will give you N replicas out of the box: + deploy: + replicas: ${WORKER_REPLICAS:-2} + restart_policy: + condition: any + + diff --git a/docs/docs.go b/docs/docs.go index 7cec7c2..89ac4d9 100644 --- a/docs/docs.go +++ b/docs/docs.go @@ -91,6 +91,12 @@ const docTemplate = `{ "name": "command", "in": "query" }, + { + "type": "string", + "description": "Task files", + "name": "file", + "in": "query" + }, { "type": "string", "description": "Task name", @@ -546,17 +552,11 @@ const docTemplate = `{ "args": { "type": "string" }, - "fileContent": { - "type": "string" - }, "module": { "type": "string" }, "output": { "type": "string" - }, - "remoteFilePath": { - "type": "string" } } }, @@ -566,14 +566,8 @@ const docTemplate = `{ "args": { "type": "string" }, - "fileContent": { - "type": "string" - }, "module": { "type": "string" - }, - "remoteFilePath": { - "type": "string" } } }, @@ -585,6 +579,17 @@ const docTemplate = `{ } } }, + "globalstructs.File": { + "type": "object", + "properties": { + "fileContentB64": { + "type": "string" + }, + "remoteFilePath": { + "type": "string" + } + } + }, "globalstructs.Task": { "type": "object", "properties": { @@ -594,7 +599,7 @@ const docTemplate = `{ "callbackURL": { "type": "string" }, - "command": { + "commands": { "type": "array", "items": { "$ref": "#/definitions/globalstructs.Command" @@ -606,12 +611,21 @@ const docTemplate = `{ "executedAt": { "type": "string" }, + "files": { + "type": "array", + "items": { + "$ref": "#/definitions/globalstructs.File" + } + }, "id": { "type": "string" }, "name": { "type": "string" }, + "notes": { + "type": "string" + }, "priority": { "type": "integer" }, @@ -619,6 +633,10 @@ const docTemplate = `{ "description": "pending, running, done, failed, deleted", "type": "string" }, + "timeout": { + "description": "timeout in seconds", + "type": "integer" + }, "updatedAt": { "type": "string" }, @@ -633,29 +651,45 @@ const docTemplate = `{ "globalstructs.TaskSwagger": { "type": "object", "properties": { - "command": { + "commands": { "type": "array", "items": { "$ref": "#/definitions/globalstructs.CommandSwagger" } }, + "files": { + "type": "array", + "items": { + "$ref": "#/definitions/globalstructs.File" + } + }, "name": { "type": "string" }, + "notes": { + "type": "string" + }, "priority": { "type": "integer" + }, + "timeout": { + "description": "timeout in seconds", + "type": "integer" } } }, "globalstructs.Worker": { "type": "object", "properties": { - "IddleThreads": { + "defaultThreads": { "type": "integer" }, "downCount": { "type": "integer" }, + "iddleThreads": { + "type": "integer" + }, "name": { "description": "Workers name (unique)", "type": "string" @@ -683,7 +717,7 @@ const docTemplate = `{ // SwaggerInfo holds exported Swagger Info so clients can modify it var SwaggerInfo = &swag.Spec{ - Version: "1.0", + Version: "v0.1", Host: "", BasePath: "/", Schemes: []string{"https", "http"}, diff --git a/docs/swagger.json b/docs/swagger.json index ffc7cee..bd1d361 100644 --- a/docs/swagger.json +++ b/docs/swagger.json @@ -16,7 +16,7 @@ "name": "GPL-3.0", "url": "https://github.com/r4ulcl/nTask/blob/main/LICENSE" }, - "version": "1.0" + "version": "v0.1" }, "basePath": "/", "paths": { @@ -88,6 +88,12 @@ "name": "command", "in": "query" }, + { + "type": "string", + "description": "Task files", + "name": "file", + "in": "query" + }, { "type": "string", "description": "Task name", @@ -543,17 +549,11 @@ "args": { "type": "string" }, - "fileContent": { - "type": "string" - }, "module": { "type": "string" }, "output": { "type": "string" - }, - "remoteFilePath": { - "type": "string" } } }, @@ -563,14 +563,8 @@ "args": { "type": "string" }, - "fileContent": { - "type": "string" - }, "module": { "type": "string" - }, - "remoteFilePath": { - "type": "string" } } }, @@ -582,6 +576,17 @@ } } }, + "globalstructs.File": { + "type": "object", + "properties": { + "fileContentB64": { + "type": "string" + }, + "remoteFilePath": { + "type": "string" + } + } + }, "globalstructs.Task": { "type": "object", "properties": { @@ -591,7 +596,7 @@ "callbackURL": { "type": "string" }, - "command": { + "commands": { "type": "array", "items": { "$ref": "#/definitions/globalstructs.Command" @@ -603,12 +608,21 @@ "executedAt": { "type": "string" }, + "files": { + "type": "array", + "items": { + "$ref": "#/definitions/globalstructs.File" + } + }, "id": { "type": "string" }, "name": { "type": "string" }, + "notes": { + "type": "string" + }, "priority": { "type": "integer" }, @@ -616,6 +630,10 @@ "description": "pending, running, done, failed, deleted", "type": "string" }, + "timeout": { + "description": "timeout in seconds", + "type": "integer" + }, "updatedAt": { "type": "string" }, @@ -630,29 +648,45 @@ "globalstructs.TaskSwagger": { "type": "object", "properties": { - "command": { + "commands": { "type": "array", "items": { "$ref": "#/definitions/globalstructs.CommandSwagger" } }, + "files": { + "type": "array", + "items": { + "$ref": "#/definitions/globalstructs.File" + } + }, "name": { "type": "string" }, + "notes": { + "type": "string" + }, "priority": { "type": "integer" + }, + "timeout": { + "description": "timeout in seconds", + "type": "integer" } } }, "globalstructs.Worker": { "type": "object", "properties": { - "IddleThreads": { + "defaultThreads": { "type": "integer" }, "downCount": { "type": "integer" }, + "iddleThreads": { + "type": "integer" + }, "name": { "description": "Workers name (unique)", "type": "string" diff --git a/docs/swagger.yaml b/docs/swagger.yaml index c72ab29..a16b721 100644 --- a/docs/swagger.yaml +++ b/docs/swagger.yaml @@ -4,38 +4,37 @@ definitions: properties: args: type: string - fileContent: - type: string module: type: string output: type: string - remoteFilePath: - type: string type: object globalstructs.CommandSwagger: properties: args: type: string - fileContent: - type: string module: type: string - remoteFilePath: - type: string type: object globalstructs.Error: properties: error: type: string type: object + globalstructs.File: + properties: + fileContentB64: + type: string + remoteFilePath: + type: string + type: object globalstructs.Task: properties: callbackToken: type: string callbackURL: type: string - command: + commands: items: $ref: '#/definitions/globalstructs.Command' type: array @@ -43,15 +42,24 @@ definitions: type: string executedAt: type: string + files: + items: + $ref: '#/definitions/globalstructs.File' + type: array id: type: string name: type: string + notes: + type: string priority: type: integer status: description: pending, running, done, failed, deleted type: string + timeout: + description: timeout in seconds + type: integer updatedAt: type: string username: @@ -61,21 +69,32 @@ definitions: type: object globalstructs.TaskSwagger: properties: - command: + commands: items: $ref: '#/definitions/globalstructs.CommandSwagger' type: array + files: + items: + $ref: '#/definitions/globalstructs.File' + type: array name: type: string + notes: + type: string priority: type: integer + timeout: + description: timeout in seconds + type: integer type: object globalstructs.Worker: properties: - IddleThreads: + defaultThreads: type: integer downCount: type: integer + iddleThreads: + type: integer name: description: Workers name (unique) type: string @@ -92,7 +111,7 @@ info: name: GPL-3.0 url: https://github.com/r4ulcl/nTask/blob/main/LICENSE title: nTask API - version: "1.0" + version: v0.1 paths: /status: get: @@ -131,6 +150,10 @@ paths: in: query name: command type: string + - description: Task files + in: query + name: file + type: string - description: Task name in: query name: name diff --git a/examples/scriptExample2.sh b/examples/scriptExample2.sh new file mode 100644 index 0000000..73ab5b3 --- /dev/null +++ b/examples/scriptExample2.sh @@ -0,0 +1,120 @@ +#!/bin/bash + +# Function to send a POST request and get the task ID +function send_post_request() { + local url="$1" + local oauthToken="$2" + local data="$3" + + # Send POST request and capture the task ID + task_id=$(curl -s -k -X POST -H "Authorization: $oauthToken" -H "Content-Type: application/json" -d "$data" "$url" | jq -r '.id') + echo "$task_id" +} + +# Function to check the status of a task using a GET request +function get_task() { + local url="$1" + local oauthToken="$2" + + # Send GET request to check task status + task=$(curl -s -k -H "Authorization: $oauthToken" "$url") + echo "$task" +} + +function wait_task(){ + local url="$1" + local oauthToken="$2" + local task_id="$3" + # Wait for task done + while true; do + task=$(get_task "$url/task/$task_id" "$oauthToken" ) + status=$(echo $task | jq -r '.status') + # Check if the status is not equal to "working" + if [ "$status" == "done" ]; then + #echo "Task completed successfully. Status: $status" + + echo $task + break # Exit the loop + else + #echo "Task still in progress. Status: $status" + sleep 1 # Adjust the sleep duration as needed + fi + done +} + +function wait_tasks(){ + local url="$1" + local oauthToken="$2" + shift 2 + local task_ids=("$@") + + output_array=() + pids=() + + echo -n '{"result":[' + + # Loop ids + array_length=${#task_ids[@]} + for ((i=0; i" +1234 +test diff --git a/generateCert.sh b/generateCert.sh index 757114c..abd1a9a 100755 --- a/generateCert.sh +++ b/generateCert.sh @@ -15,7 +15,7 @@ MANAGER_IP="127.0.0.1" MANAGER_HOSTNAME="manager.local" # Set certificate expiration time in days -CERT_EXPIRATION_DAYS=365 +CERT_EXPIRATION_DAYS=3650 # Create directories to store the CA and certificate files mkdir -p certs/${MANAGER_FOLDER} diff --git a/globalstructs/struct.go b/globalstructs/struct.go index 7384310..db094ac 100644 --- a/globalstructs/struct.go +++ b/globalstructs/struct.go @@ -1,6 +1,10 @@ package globalstructs -import "github.com/gorilla/websocket" +import ( + "time" + + "github.com/gorilla/websocket" +) // package for structs used in manager and workers // in case I want to separate the project one day @@ -8,71 +12,92 @@ import "github.com/gorilla/websocket" // Task Struct to store all Task information. type Task struct { ID string `json:"id"` - Commands []Command `json:"command"` + Notes string `json:"notes"` + Commands []Command `json:"commands"` + Files []File `json:"files"` Name string `json:"name"` CreatedAt string `json:"createdAt"` UpdatedAt string `json:"updatedAt"` ExecutedAt string `json:"executedAt"` Status string `json:"status"` // pending, running, done, failed, deleted + Duration float64 `json:"duration"` WorkerName string `json:"workerName"` Username string `json:"username"` Priority int `json:"priority"` + Timeout int `json:"timeout"` // timeout in seconds CallbackURL string `json:"callbackURL"` CallbackToken string `json:"callbackToken"` } // Command struct for Commands in a task type Command struct { - Module string `json:"module"` - Args string `json:"args"` - FileContent string `json:"fileContent"` + Module string `json:"module"` + Args string `json:"args"` + Output string `json:"output"` +} + +// File Files struct to encapsulate FileContent and RemoteFilePath +type File struct { + FileContentB64 string `json:"fileContentB64"` RemoteFilePath string `json:"remoteFilePath"` - Output string `json:"output"` } -// Task Struct for swagger docs, for the POST +// TaskSwagger Task Struct for swagger docs, for the POST type TaskSwagger struct { - Commands []CommandSwagger `json:"command"` + Commands []CommandSwagger `json:"commands"` + Files []File `json:"files"` Name string `json:"name"` + Notes string `json:"notes"` Priority int `json:"priority"` + Timeout int `json:"timeout"` // timeout in seconds } -// Command struct for swagger documentation +// CommandSwagger Command struct for swagger documentation type CommandSwagger struct { - Module string `json:"module"` - Args string `json:"args"` - FileContent string `json:"fileContent"` - RemoteFilePath string `json:"remoteFilePath"` + Module string `json:"module"` + Args string `json:"args"` } // Worker struct to store all worker information. type Worker struct { // Workers name (unique) - Name string `json:"name"` - IddleThreads int `json:"IddleThreads"` - UP bool `json:"up"` - DownCount int `json:"downCount"` + Name string `json:"name"` + DefaultThreads int `json:"defaultThreads"` + IddleThreads int `json:"iddleThreads"` + UP bool `json:"up"` + DownCount int `json:"downCount"` + UpdatedAt string `json:"updatedAt"` } // WorkerStatus struct to process the worker status response. type WorkerStatus struct { Name string `json:"name"` - IddleThreads int `json:"IddleThreads"` + IddleThreads int `json:"iddleThreads"` WorkingIDs map[string]int `json:"workingIds"` } +// Error struct to JSON error type Error struct { Error string `json:"error"` } // websockets +// Upgrader for websockets var Upgrader = websocket.Upgrader{ - ReadBufferSize: 4096, // 4 kilobytes - WriteBufferSize: 4096, // 4 kilobytes + ReadBufferSize: 8192, // 8 kilobytes + WriteBufferSize: 8192, // 8 kilobytes } +// WebsocketMessage Struct for websocket messages type WebsocketMessage struct { Type string `json:"type"` JSON string `json:"json"` } + +const ( + WriteWait = 10 * time.Second + PongWait = 60 * time.Second + PingPeriod = (PongWait * 9) / 10 // 54 s + MaxMessageSize = 1 << 20 // 1 MiB +) diff --git a/go.mod b/go.mod index 0564157..1d2d860 100644 --- a/go.mod +++ b/go.mod @@ -1,30 +1,33 @@ module github.com/r4ulcl/nTask -go 1.21.5 +go 1.22.0 + +toolchain go1.23.3 require ( - github.com/go-sql-driver/mysql v1.7.1 + github.com/go-sql-driver/mysql v1.8.1 github.com/gorilla/mux v1.8.1 - github.com/gorilla/websocket v1.5.1 - github.com/spf13/cobra v1.8.0 + github.com/gorilla/websocket v1.5.3 + github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 github.com/swaggo/http-swagger v1.3.4 - github.com/swaggo/swag v1.16.3 - golang.org/x/crypto v0.20.0 + github.com/swaggo/swag v1.16.4 + golang.org/x/crypto v0.30.0 ) require ( + filippo.io/edwards25519 v1.1.0 // indirect github.com/KyleBanks/depth v1.2.1 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.20.0 // indirect - github.com/go-openapi/spec v0.20.6 // indirect - github.com/go-openapi/swag v0.19.15 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/spec v0.21.0 // indirect + github.com/go-openapi/swag v0.23.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect - github.com/mailru/easyjson v0.7.6 // indirect - github.com/swaggo/files v0.0.0-20220610200504-28940afbdbfe // indirect - golang.org/x/net v0.21.0 // indirect - golang.org/x/sys v0.17.0 // indirect - golang.org/x/tools v0.7.0 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/swaggo/files v1.0.1 // indirect + golang.org/x/net v0.32.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/tools v0.28.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index f92cb8f..1101973 100644 --- a/go.sum +++ b/go.sum @@ -1,85 +1,94 @@ +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc= github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= -github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= -github.com/go-openapi/spec v0.20.6 h1:ich1RQ3WDbfoeTqTAb+5EIxNmpKVJZWBNah9RAT0jIQ= -github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.15 h1:D2NRCBzS9/pEY3gP9Nl8aDqGUcPFrwG2p+CNFrLyrCM= -github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= -github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= +github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= +github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= -github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= -github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/swaggo/files v0.0.0-20220610200504-28940afbdbfe h1:K8pHPVoTgxFJt1lXuIzzOX7zZhZFldJQK/CgKx9BFIc= -github.com/swaggo/files v0.0.0-20220610200504-28940afbdbfe/go.mod h1:lKJPbtWzJ9JhsTN1k1gZgleJWY/cqq0psdoMmaThG3w= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/swaggo/files v1.0.1 h1:J1bVJ4XHZNq0I46UU90611i9/YzdrF7x92oX1ig5IdE= +github.com/swaggo/files v1.0.1/go.mod h1:0qXmMNH6sXNf+73t65aKeB+ApmgxdnkQzVTAj2uaMUg= github.com/swaggo/http-swagger v1.3.4 h1:q7t/XLx0n15H1Q9/tk3Y9L4n210XzJF5WtnDX64a5ww= github.com/swaggo/http-swagger v1.3.4/go.mod h1:9dAh0unqMBAlbp1uE2Uc2mQTxNMU/ha4UbucIg1MFkQ= -github.com/swaggo/swag v1.16.3 h1:PnCYjPCah8FK4I26l2F/KQ4yz3sILcVUN3cTlBFA9Pg= -github.com/swaggo/swag v1.16.3/go.mod h1:DImHIuOFXKpMFAQjcC7FG4m3Dg4+QuUgUzJmKjI/gRk= -golang.org/x/crypto v0.20.0 h1:jmAMJJZXr5KiCw05dfYK9QnqaqKLYXijU23lsEdcQqg= -golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ= -golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= -golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +github.com/swaggo/swag v1.16.4 h1:clWJtd9LStiG3VeijiCfOVODP6VpHtKdQy9ELFG3s1A= +github.com/swaggo/swag v1.16.4/go.mod h1:VBsHJRsDvfYvqoiMKnsdwhNV9LEMHgEDZcyVYX0sxPg= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.30.0 h1:RwoQn3GkWiMkzlX562cLB7OxWvjH1L8xutO2WoJcRoY= +golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI= +golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= -golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= +golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/main.go b/main.go index 2566860..3e61fa7 100644 --- a/main.go +++ b/main.go @@ -28,19 +28,20 @@ import ( // @name Authorization // @description ApiKeyAuth to login -// Config holds configuration parameters. +// Arguments Config holds configuration parameters. type Arguments struct { - ConfigFile string - ConfigSSHFile string - Swagger bool - Verbose bool - Debug bool - VerifyAltName bool + ConfigFile string + ConfigSSHFile string + ConfigCloudFile string + Swagger bool + Verbose bool + Debug bool + VerifyAltName bool } func main() { var arguments Arguments - version := "v0.1" + version := "v0.2" var rootCmd = &cobra.Command{ Use: "nTask", Short: "Your program description", @@ -75,6 +76,8 @@ func main() { "configFile", "c", "", "Path to the config file (default: manager.conf)") managerCmd.Flags().StringVarP(&arguments.ConfigSSHFile, "configSSHFile", "f", "", "Path to the config SSH file (default: empty)") + managerCmd.Flags().StringVarP(&arguments.ConfigCloudFile, + "configCloudFile", "C", "", "Path to the config Cloud file (default: empty)") // Add worker subcommand var workerCmd = &cobra.Command{ @@ -108,7 +111,7 @@ func managerStart(arguments *Arguments) { arguments.ConfigFile = "manager.conf" } manager.StartManager(arguments.Swagger, arguments.ConfigFile, - arguments.ConfigSSHFile, arguments.VerifyAltName, arguments.Verbose, arguments.Debug) + arguments.ConfigSSHFile, arguments.ConfigCloudFile, arguments.VerifyAltName, arguments.Verbose, arguments.Debug) } func workerStart(arguments *Arguments) { diff --git a/manager.conf b/manager.conf index 397db60..038ad67 100644 --- a/manager.conf +++ b/manager.conf @@ -7,14 +7,19 @@ "workers": { "workers" : "IeH0vpYFz2Yol6RdLvYZz62TFMv5FF" }, - "statusCheckSeconds": 10, - "StatusCheckDown": 360, - "port": "8080", + "statusCheckSeconds": 1, + "statusCheckDown": 36, + "httpPort": 8080, + "httpsPort": 8443, + "apiReadTimeout": 60, + "apiWriteTimeout": 60, + "apiIdleTimeout": 120, "dbUsername": "your_username", "dbPassword": "your_password", "dbHost": "db", "dbPort": "3306", "dbDatabase": "manager", "diskPath": "", - "certFolder": "./certs/manager/" + "certFolder": "./certs/manager/", + "maxTaskHistory": 1000 } diff --git a/manager/Dockerfile b/manager/Dockerfile index 4188657..fe05b5d 100644 --- a/manager/Dockerfile +++ b/manager/Dockerfile @@ -1,41 +1,37 @@ +# STEP 1: Compile a static Go binary +FROM golang:1.23-alpine AS builder -# STEP 1 build executable binary -FROM golang:alpine as builder -# copy files for compile -COPY ./certs $GOPATH/src/github.com/r4ulcl/nTask/certs -COPY ./docs $GOPATH/src/github.com/r4ulcl/nTask/docs -COPY ./globalstructs $GOPATH/src/github.com/r4ulcl/nTask/globalstructs -COPY ./go.mod $GOPATH/src/github.com/r4ulcl/nTask/go.mod -COPY ./go.sum $GOPATH/src/github.com/r4ulcl/nTask/go.sum -COPY ./main.go $GOPATH/src/github.com/r4ulcl/nTask/main.go -COPY ./manager $GOPATH/src/github.com/r4ulcl/nTask/manager -COPY ./worker $GOPATH/src/github.com/r4ulcl/nTask/worker - -WORKDIR $GOPATH/src/github.com/r4ulcl/nTask -#get dependancies -#RUN apk -U add alpine-sdk -#RUN go get -d -v -#build the binary -RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -installsuffix cgo -ldflags '-w -s' -o /go/bin/nTask - -#create config folder -RUN mkdir /config - -# STEP 2 build a small image -# start from scratch for manager +# Install ca-certificates only +RUN apk add --no-cache ca-certificates + +WORKDIR /go/src/github.com/r4ulcl/nTask + +# Cache modules +COPY go.mod go.sum ./ +RUN go mod download + +# Copy source (dockerignore limits context) +COPY . . + +# Build static binary with optimizations +RUN CGO_ENABLED=0 \ + GOOS=linux GOARCH=amd64 \ + go build -trimpath -ldflags="-s -w" -o /nTask ./main.go + +# STEP 2: Minimal runtime image FROM scratch -#GOPATH doesn-t exists in scratch -ENV GOPATH='/go' -# Copy our static executable -COPY --from=builder /$GOPATH/bin/nTask /$GOPATH/bin/nTask -#Copy SQL file -COPY --from=builder /config/ /config/ -# Copy swagger -COPY --from=builder $GOPATH/src/github.com/r4ulcl/nTask/docs/ /config/docs/ +# Add CA certificates for TLS +COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt + +# Copy binary +COPY --from=builder /nTask /nTask +# Copy configuration and assets +COPY --from=builder /go/src/github.com/r4ulcl/nTask/worker/modules /config/modules +COPY --from=builder /go/src/github.com/r4ulcl/nTask/docs /config/docs -# Set config folder -WORKDIR /config +# Set working directory +WORKDIR /config -ENTRYPOINT ["/go/bin/nTask"] \ No newline at end of file +ENTRYPOINT ["/nTask"] \ No newline at end of file diff --git a/manager/api/API.go b/manager/api/API.go index 158c83e..a3fa778 100644 --- a/manager/api/API.go +++ b/manager/api/API.go @@ -1,15 +1,17 @@ +// Package api to all the nTask manager package api import ( "database/sql" "encoding/json" - "fmt" "log" "net/http" + "github.com/gorilla/mux" "github.com/r4ulcl/nTask/manager/utils" ) +// HandleStatus Get status summary from Manager // @description Get status summary from Manager // @summary Get status summary from Manager // @Tags status @@ -20,8 +22,8 @@ import ( // @failure 403 {object} globalstructs.Error // @security ApiKeyAuth // @router /status [get] -func HandleStatus(w http.ResponseWriter, r *http.Request, config *utils.ManagerConfig, db *sql.DB, verbose, debug bool) { - _, ok := r.Context().Value("username").(string) +func HandleStatus(w http.ResponseWriter, r *http.Request, db *sql.DB, verbose, debug bool) { + _, ok := r.Context().Value(utils.UsernameKey).(string) if !ok { // if not username is a worker http.Error(w, "{ \"error\" : \"Unauthorized\" }", http.StatusUnauthorized) @@ -29,15 +31,15 @@ func HandleStatus(w http.ResponseWriter, r *http.Request, config *utils.ManagerC } // get all data - task, err1 := utils.GetStatusTask(db, verbose, debug) - worker, err2 := utils.GetStatusWorker(db, verbose, debug) + tasks, err1 := utils.GetStatusTask(db, verbose, debug) + workers, err2 := utils.GetStatusWorker(db, verbose, debug) if err1 != nil || err2 != nil { http.Error(w, "{ \"error\" : \"Invalid callback body Marshal:"+err1.Error()+err2.Error()+"\"}", http.StatusBadRequest) return } status := utils.Status{ - Task: task, - Worker: worker, + Task: tasks, + Worker: workers, } var jsonData []byte @@ -54,5 +56,62 @@ func HandleStatus(w http.ResponseWriter, r *http.Request, config *utils.ManagerC w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, string(jsonData)) + // Use json.NewEncoder for safe encoding + err = json.NewEncoder(w).Encode(status) + if err != nil { + http.Error(w, "{ \"error\" : \"Invalid status encode body:"+err.Error()+"\"}", http.StatusBadRequest) + } +} + +// Generic handler function for fetching and encoding data +func handleEntityStatus[T any](w http.ResponseWriter, r *http.Request, db *sql.DB, verbose, debug bool, fetchDataFunc func(*sql.DB, string, bool, bool) (T, error), entityName string) { + _, ok := r.Context().Value(utils.UsernameKey).(string) + if !ok { + http.Error(w, "{ \"error\" : \"Unauthorized\" }", http.StatusUnauthorized) + return + } + + vars := mux.Vars(r) + idOrName := vars[entityName] + + // Fetch the entity (task or worker) + entity, err := fetchDataFunc(db, idOrName, verbose, debug) + if err != nil { + http.Error(w, "{ \"error\" : \"Invalid "+entityName+" body: "+err.Error()+"\"}", http.StatusBadRequest) + return + } + + // Marshal the entity data into JSON + jsonData, err := json.Marshal(entity) + if err != nil { + http.Error(w, "{ \"error\" : \"Invalid Marshal body: "+err.Error()+"\"}", http.StatusBadRequest) + return + } + + if debug { + // Print the JSON data + log.Printf("API %s: %s", entityName, string(jsonData)) + } + + // Set the content type and write the response + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + // Use json.NewEncoder for safe encoding + err = json.NewEncoder(w).Encode(entity) + if err != nil { + http.Error(w, "{ \"error\" : \"Invalid "+entityName+" encode body: "+err.Error()+"\"}", http.StatusBadRequest) + } +} + +func getUsername(r *http.Request, verbose, debug bool) (bool, string) { + username, ok := r.Context().Value(utils.UsernameKey).(string) + if debug { + log.Println("getUsername", username) + } + if !ok && (debug || verbose) { + log.Println("API { \"error\" : \"Unauthorized\" }") + } + + return ok, username } diff --git a/manager/api/APItask.go b/manager/api/APItask.go index ae81c04..51d0906 100644 --- a/manager/api/APItask.go +++ b/manager/api/APItask.go @@ -5,7 +5,6 @@ import ( "database/sql" "encoding/hex" "encoding/json" - "fmt" "log" "net/http" "sync" @@ -16,6 +15,7 @@ import ( "github.com/r4ulcl/nTask/manager/utils" ) +// HandleTaskGet Get status of tasks // @description Get status of tasks // @summary Get all tasks // @Tags task @@ -23,6 +23,7 @@ import ( // @produce application/json // @param ID query string false "Task ID" // @param command query string false "Task command" +// @param file query string false "Task files" // @param name query string false "Task name" // @param createdAt query string false "Task createdAt" // @param updatedAt query string false "Task updatedAt" @@ -31,6 +32,7 @@ import ( // @param workerName query string false "Task workerName" // @param username query string false "Task username" // @param priority query string false "Task priority" +// @param timeout query string false "Task timeout" // @param callbackURL query string false "Task callbackURL" // @param callbackToken query string false "Task callbackToken" // @param limit query int false "limit output DB" @@ -40,10 +42,9 @@ import ( // @Failure 403 {object} globalstructs.Error // @security ApiKeyAuth // @router /task [get] -func HandleTaskGet(w http.ResponseWriter, r *http.Request, config *utils.ManagerConfig, db *sql.DB, verbose, debug bool) { - _, ok := r.Context().Value("username").(string) +func HandleTaskGet(w http.ResponseWriter, r *http.Request, db *sql.DB, verbose, debug bool) { + ok, _ := getUsername(r, verbose, debug) if !ok { - // if not username is a worker http.Error(w, "{ \"error\" : \"Unauthorized\" }", http.StatusUnauthorized) return } @@ -74,9 +75,14 @@ func HandleTaskGet(w http.ResponseWriter, r *http.Request, config *utils.Manager w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, string(jsonData)) + // Use json.NewEncoder for safe encoding + err = json.NewEncoder(w).Encode(tasks) + if err != nil { + http.Error(w, "{ \"error\" : \"Invalid tasks encode body:"+err.Error()+"\"}", http.StatusBadRequest) + } } +// HandleTaskPost Add a new tasks // @description Add a new tasks // @summary Add a new tasks // @Tags task @@ -88,12 +94,9 @@ func HandleTaskGet(w http.ResponseWriter, r *http.Request, config *utils.Manager // @Failure 403 {object} globalstructs.Error // @security ApiKeyAuth // @router /task [post] -func HandleTaskPost(w http.ResponseWriter, r *http.Request, config *utils.ManagerConfig, db *sql.DB, verbose, debug bool, wg *sync.WaitGroup) { - username, okUser := r.Context().Value("username").(string) - if !okUser { - if debug { - log.Println("API { \"error\" : \"Unauthorized\" }") - } +func HandleTaskPost(w http.ResponseWriter, r *http.Request, db *sql.DB, verbose, debug bool) { + ok, username := getUsername(r, verbose, debug) + if !ok { http.Error(w, "{ \"error\" : \"Unauthorized\" }", http.StatusUnauthorized) return } @@ -131,7 +134,7 @@ func HandleTaskPost(w http.ResponseWriter, r *http.Request, config *utils.Manage } } - err = database.AddTask(db, request, verbose, debug, wg) + err = database.AddTask(db, request, verbose, debug) if err != nil { message := "{ \"error\" : \"Invalid task info: " + err.Error() + "\" }" http.Error(w, message, http.StatusBadRequest) @@ -149,18 +152,17 @@ func HandleTaskPost(w http.ResponseWriter, r *http.Request, config *utils.Manage return } - jsonData, err := json.Marshal(task) - if err != nil { - http.Error(w, "{ \"error\" : \"Invalid callback body: "+err.Error()+"\"}", http.StatusBadRequest) - return - } - // Handle the result as needed w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, string(jsonData)) + // Use json.NewEncoder for safe encoding + err = json.NewEncoder(w).Encode(task) + if err != nil { + http.Error(w, "{ \"error\" : \"Invalid tasks encode body:"+err.Error()+"\"}", http.StatusBadRequest) + } } +// HandleTaskDelete Delete a tasks // @description Delete a tasks // @summary Delete a tasks // @Tags task @@ -172,8 +174,8 @@ func HandleTaskPost(w http.ResponseWriter, r *http.Request, config *utils.Manage // @Failure 403 {object} globalstructs.Error // @security ApiKeyAuth // @router /task/{ID} [delete] -func HandleTaskDelete(w http.ResponseWriter, r *http.Request, config *utils.ManagerConfig, db *sql.DB, verbose, debug bool, wg *sync.WaitGroup, writeLock *sync.Mutex) { - _, ok := r.Context().Value("username").(string) +func HandleTaskDelete(w http.ResponseWriter, r *http.Request, config *utils.ManagerConfig, db *sql.DB, verbose, debug bool, writeLock *sync.Mutex) { + _, ok := r.Context().Value(utils.UsernameKey).(string) if !ok { http.Error(w, "{ \"error\" : \"Username not found\" }", http.StatusUnauthorized) return @@ -193,7 +195,7 @@ func HandleTaskDelete(w http.ResponseWriter, r *http.Request, config *utils.Mana // Has a worker set, check if its running if task.Status == "running" { // If its runing send stop signal to worker - err = utils.SendDeleteTask(db, config, &worker, &task, verbose, debug, wg, writeLock) + err = utils.SendDeleteTask(db, config, &worker, &task, verbose, debug, writeLock) if err != nil { http.Error(w, "{ \"error\" : \""+err.Error()+"\" }", http.StatusBadRequest) return @@ -208,7 +210,7 @@ func HandleTaskDelete(w http.ResponseWriter, r *http.Request, config *utils.Mana return }*/ // Set task as running - err = database.SetTaskStatus(db, id, "deleted", verbose, debug, wg) + err = database.SetTaskStatus(db, id, "deleted", verbose, debug) if err != nil { log.Println("Utils Error SetTaskStatus in request:", err) } @@ -216,17 +218,16 @@ func HandleTaskDelete(w http.ResponseWriter, r *http.Request, config *utils.Mana // Return task with deleted status task.Status = "deleted" - jsonData, err := json.Marshal(task) - if err != nil { - http.Error(w, "{ \"error\" : \"Invalid callback body: "+err.Error()+"\"}", http.StatusBadRequest) - return - } - w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, string(jsonData)) + // Use json.NewEncoder for safe encoding + err = json.NewEncoder(w).Encode(task) + if err != nil { + http.Error(w, "{ \"error\" : \"Invalid tasks encode body:"+err.Error()+"\"}", http.StatusBadRequest) + } } +// HandleTaskStatus Get status of a task // @description Get status of a task // @summary Get status of a task // @Tags task @@ -238,39 +239,8 @@ func HandleTaskDelete(w http.ResponseWriter, r *http.Request, config *utils.Mana // @Failure 403 {object} globalstructs.Error // @security ApiKeyAuth // @router /task/{ID} [get] -func HandleTaskStatus(w http.ResponseWriter, r *http.Request, config *utils.ManagerConfig, db *sql.DB, verbose, debug bool) { - _, ok := r.Context().Value("username").(string) - if !ok { - http.Error(w, "{ \"error\" : \"Unauthorized\" }", http.StatusUnauthorized) - return - } - - vars := mux.Vars(r) - id := vars["ID"] - - // Access worker to update info if status running - // get task from ID - task, err := database.GetTask(db, id, verbose, debug) - if err != nil { - http.Error(w, "{ \"error\" : \"Invalid GetTask body: "+err.Error()+"\"}", http.StatusBadRequest) - - return - } - - jsonData, err := json.Marshal(task) - if err != nil { - http.Error(w, "{ \"error\" : \"Invalid Marshal body: "+err.Error()+"\"}", http.StatusBadRequest) - return - } - - if debug { - // Print the JSON data - log.Println("API get task: ", string(jsonData)) - } - - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, string(jsonData)) +func HandleTaskStatus(w http.ResponseWriter, r *http.Request, db *sql.DB, verbose, debug bool) { + handleEntityStatus(w, r, db, verbose, debug, database.GetTask, "ID") } // generateRandomID generates a random ID of the specified length @@ -288,5 +258,9 @@ func generateRandomID(length int, verbose, debug bool) (string, error) { // Convert random bytes to hex string randomID := hex.EncodeToString(randomBytes) + if verbose || debug { + log.Println("generateRandomID executed", randomID) + } + return randomID, nil } diff --git a/manager/api/APIworkers.go b/manager/api/APIworkers.go index 4b19496..7880c3b 100644 --- a/manager/api/APIworkers.go +++ b/manager/api/APIworkers.go @@ -8,8 +8,8 @@ import ( "net" "net/http" "sync" + "time" - "github.com/go-sql-driver/mysql" "github.com/gorilla/mux" globalstructs "github.com/r4ulcl/nTask/globalstructs" "github.com/r4ulcl/nTask/manager/database" @@ -17,7 +17,7 @@ import ( "github.com/r4ulcl/nTask/manager/websockets" ) -// HandleWorker Get handles the request to get workers +// HandleWorkerGet Get handles the request to get workers // @description Handle worker request // @summary Get workers // @Tags worker @@ -28,8 +28,8 @@ import ( // @failure 403 {object} globalstructs.Error // @security ApiKeyAuth // @router /worker [get] -func HandleWorkerGet(w http.ResponseWriter, r *http.Request, config *utils.ManagerConfig, db *sql.DB, verbose, debug bool) { - username, ok := r.Context().Value("username").(string) +func HandleWorkerGet(w http.ResponseWriter, r *http.Request, db *sql.DB, verbose, debug bool) { + username, ok := r.Context().Value(utils.UsernameKey).(string) if !ok { log.Println("API username", username) http.Error(w, "{ \"error\" : \"Unauthorized\" }", http.StatusUnauthorized) @@ -58,7 +58,11 @@ func HandleWorkerGet(w http.ResponseWriter, r *http.Request, config *utils.Manag w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, string(jsonData)) + // Use json.NewEncoder for safe encoding + err = json.NewEncoder(w).Encode(workers) + if err != nil { + http.Error(w, "{ \"error\" : \"Invalid workers encode body:"+err.Error()+"\"}", http.StatusBadRequest) + } } // HandleWorkerPost handles the request to add a worker @@ -73,9 +77,9 @@ func HandleWorkerGet(w http.ResponseWriter, r *http.Request, config *utils.Manag // @failure 403 {object} globalstructs.Error // @security ApiKeyAuth // @router /worker [post] -func HandleWorkerPost(w http.ResponseWriter, r *http.Request, config *utils.ManagerConfig, db *sql.DB, verbose, debug bool, wg *sync.WaitGroup) { - _, okUser := r.Context().Value("username").(string) - _, okWorker := r.Context().Value("worker").(string) +func HandleWorkerPost(w http.ResponseWriter, r *http.Request, db *sql.DB, verbose, debug bool) { + _, okUser := r.Context().Value(utils.UsernameKey).(string) + _, okWorker := r.Context().Value(utils.WorkerKey).(string) if !okUser && !okWorker { http.Error(w, "{ \"error\" : \"Unauthorized\" }", http.StatusUnauthorized) return @@ -88,7 +92,7 @@ func HandleWorkerPost(w http.ResponseWriter, r *http.Request, config *utils.Mana http.Error(w, "{ \"error\" : \"Invalid Decode body: "+err.Error()+"\"}", http.StatusBadRequest) } - err = addWorker(worker, db, verbose, debug, wg) + err = addWorker(worker, db, verbose, debug) if err != nil { http.Error(w, "{ \"error\" : \"Invalid Decode body: "+err.Error()+"\"}", http.StatusBadRequest) } @@ -98,44 +102,26 @@ func HandleWorkerPost(w http.ResponseWriter, r *http.Request, config *utils.Mana w.WriteHeader(http.StatusOK) } -func addWorker(worker globalstructs.Worker, db *sql.DB, verbose, debug bool, wg *sync.WaitGroup) error { +func addWorker(worker globalstructs.Worker, db *sql.DB, verbose, debug bool) error { if debug { log.Println("API worker.Name", worker.Name) } - err := database.AddWorker(db, &worker, verbose, debug, wg) + err := database.AddWorker(db, &worker, verbose, debug) if err != nil { - if mysqlErr, ok := err.(*mysql.MySQLError); ok { - if mysqlErr.Number == 1062 { // MySQL error number for duplicate entry - // Set as 'pending' all workers tasks to REDO - err = database.SetTasksWorkerPending(db, worker.Name, verbose, debug, wg) - if err != nil { - return err - } - - // set worker up - err = database.SetWorkerUPto(true, db, &worker, verbose, debug, wg) - if err != nil { - return err - } - - // reset down count - err = database.SetWorkerDownCount(0, db, &worker, verbose, debug, wg) - if err != nil { - return err - } - } + err = utils.HandleAddWorkerError(err, db, &worker, verbose, debug) + if err != nil { return err } - } return nil } -func HandleWorkerPostWebsocket(w http.ResponseWriter, r *http.Request, config *utils.ManagerConfig, db *sql.DB, verbose, debug bool, wg *sync.WaitGroup, writeLock *sync.Mutex) { - _, okWorker := r.Context().Value("worker").(string) +// HandleWorkerPostWebsocket HandleWorkerPostWebsocket +func HandleWorkerPostWebsocket(w http.ResponseWriter, r *http.Request, config *utils.ManagerConfig, db *sql.DB, verbose, debug bool, writeLock *sync.Mutex) { + _, okWorker := r.Context().Value(utils.WorkerKey).(string) if !okWorker { if verbose { log.Println("API HandleCallback: { \"error\" : \"Unauthorized\" }") @@ -152,8 +138,13 @@ func HandleWorkerPostWebsocket(w http.ResponseWriter, r *http.Request, config *u return } + if tcpConn, ok := conn.UnderlyingConn().(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(30 * time.Second) + } + //go - websockets.GetWorkerMessage(conn, config, db, verbose, debug, wg, writeLock) + websockets.GetWorkerMessage(conn, config, db, verbose, debug) } @@ -169,9 +160,9 @@ func HandleWorkerPostWebsocket(w http.ResponseWriter, r *http.Request, config *u // @failure 403 {object} globalstructs.Error // @security ApiKeyAuth // @router /worker/{NAME} [delete] -func HandleWorkerDeleteName(w http.ResponseWriter, r *http.Request, config *utils.ManagerConfig, db *sql.DB, verbose, debug bool, wg *sync.WaitGroup) { - _, okUser := r.Context().Value("username").(string) - _, okWorker := r.Context().Value("worker").(string) +func HandleWorkerDeleteName(w http.ResponseWriter, r *http.Request, db *sql.DB, verbose, debug bool) { + _, okUser := r.Context().Value(utils.UsernameKey).(string) + _, okWorker := r.Context().Value(utils.WorkerKey).(string) if !okUser && !okWorker { http.Error(w, "{ \"error\" : \"Unauthorized\" }", http.StatusUnauthorized) return @@ -180,9 +171,7 @@ func HandleWorkerDeleteName(w http.ResponseWriter, r *http.Request, config *util vars := mux.Vars(r) name := vars["NAME"] - // TODO - - err := database.RmWorkerName(db, name, verbose, debug, wg) + err := database.RmWorkerName(db, name, verbose, debug) if err != nil { http.Error(w, "{ \"error\" : \"RmWorkerName: "+err.Error()+"\"}", http.StatusBadRequest) @@ -206,44 +195,15 @@ func HandleWorkerDeleteName(w http.ResponseWriter, r *http.Request, config *util // @failure 403 {object} globalstructs.Error // @security ApiKeyAuth // @router /worker/{NAME} [get] -func HandleWorkerStatus(w http.ResponseWriter, r *http.Request, config *utils.ManagerConfig, db *sql.DB, verbose, debug bool) { - _, ok := r.Context().Value("username").(string) - if !ok { - http.Error(w, "{ \"error\" : \"Unauthorized\" }", http.StatusUnauthorized) - return - } - - vars := mux.Vars(r) - name := vars["NAME"] - - worker, err := database.GetWorker(db, name, verbose, debug) - if err != nil { - http.Error(w, "{ \"error\" : \"Invalid GetWorker body: "+err.Error()+"\"}", http.StatusBadRequest) - - return - } - - jsonData, err := json.Marshal(worker) - if err != nil { - http.Error(w, "{ \"error\" : \"Invalid Marshal body: "+err.Error()+"\"}", http.StatusBadRequest) - - return - } - - if debug { - // Print the JSON data - log.Println("API HandleWorkerStatus", string(jsonData)) - } - - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, string(jsonData)) +func HandleWorkerStatus(w http.ResponseWriter, r *http.Request, db *sql.DB, verbose, debug bool) { + handleEntityStatus(w, r, db, verbose, debug, database.GetWorker, "NAME") } // Other functions -// ReadUserIP reads the user's IP address from the request -func ReadUserIP(r *http.Request, verbose, debug bool) string { +/* +// readUserIP reads the user's IP address from the request +func readUserIP(r *http.Request, verbose, debug bool) string { IPAddress := r.Header.Get("X-Real-Ip") if IPAddress == "" { IPAddress = r.Header.Get("X-Forwarded-For") @@ -261,3 +221,4 @@ func ReadUserIP(r *http.Request, verbose, debug bool) string { // If there's an error (e.g., no port found), return the original address return IPAddress } +*/ diff --git a/manager/cloud/digitalocean.go b/manager/cloud/digitalocean.go new file mode 100644 index 0000000..395eadf --- /dev/null +++ b/manager/cloud/digitalocean.go @@ -0,0 +1,451 @@ +// Package cloud to all the nTask manager cloud management +package cloud + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "net" + "net/http" + "strconv" + "strings" + "time" + + "github.com/r4ulcl/nTask/manager/utils" +) + +const digitalOceanBaseURL = "https://api.digitalocean.com/v2" + +// DigitalOceanClient represents the DigitalOcean API client +type DigitalOceanClient struct { + Token string +} + +// Droplet represents a DigitalOcean droplet +type Droplet struct { + ID int `json:"id"` + Name string `json:"name"` + Status string `json:"status"` + Networks struct { + V4 []struct { + IPAddress string `json:"ip_address"` + } `json:"v4"` + } `json:"networks"` + // Add other fields as needed +} + +// ProcessDigitalOcean Process Digital Ocean config +func ProcessDigitalOcean(configCloud *utils.ManagerCloudConfig, configSSH *utils.ManagerSSHConfig, verbose, debug bool) { + doClient := &DigitalOceanClient{Token: configCloud.APIKey} + + // Step 1: Check if snapshot exists + snapshot, err := getSnapshotByName(doClient, configCloud.SnapshotName) + if err != nil { + log.Fatal("Error GetSnapshotByName:", err) + } + + // Step 2: Recreate droplets if needed + if configCloud.Recreate { + deleteDropletsByPrefix(doClient, configCloud.SnapshotName, debug) + } + + // Step 3: List current droplets and create new ones if necessary + droplets, err := listDroplets(doClient, configCloud.SnapshotName, debug) + if err != nil { + fmt.Println("Error:", err) + return + } + + // Step 4: Create missing droplets from snapshot if needed + createMissingDroplets(doClient, configCloud, snapshot, droplets, debug, verbose) + + // Step 5: Get all IPs and add to SSH config + updateSSHConfigWithIPs(doClient, configCloud.SnapshotName, configCloud.SSHPort, configSSH) +} + +// Helper function: Get snapshot by name +func getSnapshotByName(doClient *DigitalOceanClient, snapshotName string) (*Snapshot, error) { + return doClient.GetSnapshotByName(context.Background(), snapshotName) +} + +// Helper function: Delete droplets by prefix +func deleteDropletsByPrefix(doClient *DigitalOceanClient, snapshotName string, debug bool) { + if debug { + log.Println("Delete all droplets with prefix:", snapshotName) + } + err := doClient.DeleteDropletsByPrefix(context.Background(), snapshotName) + if err != nil { + log.Fatal("Error DeleteDropletsByPrefix:", err) + } +} + +// Helper function: List droplets by prefix +func listDroplets(doClient *DigitalOceanClient, snapshotName string, debug bool) ([]Droplet, error) { + if debug { + log.Println("List droplets by prefix:", snapshotName) + } + return doClient.ListDropletsByPrefix(context.Background(), snapshotName) +} + +// Helper function: Create missing droplets +func createMissingDroplets(doClient *DigitalOceanClient, configCloud *utils.ManagerCloudConfig, snapshot *Snapshot, droplets []Droplet, debug, verbose bool) { + numDroplets := len(droplets) + if numDroplets < configCloud.Servers { + missingDroplets := configCloud.Servers - numDroplets + if debug { + log.Println("Creating multiple droplets from snapshot") + } + ids, err := doClient.CreateXDropletsFromSnapshot(context.Background(), configCloud.SnapshotName, snapshot.ID, configCloud.Region, configCloud.Size, configCloud.SSHKeys, missingDroplets, numDroplets) + if err != nil { + log.Println("Error CreateXDropletsFromSnapshot:", err) + } + + // Wait until all droplets have an IP + waitForDropletCreation(doClient, ids, verbose, debug) + } +} + +// Helper function: Wait for droplet creation and log IPs +func waitForDropletCreation(doClient *DigitalOceanClient, ids []int, verbose, debug bool) { + for _, id := range ids { + if debug || verbose { + log.Println("Waiting for droplet:", id) + } + ip, err := doClient.WaitForDropletCreation(context.Background(), id, verbose, debug) + if err != nil { + log.Println("Error WaitForDropletCreation:", err) + } + if debug { + log.Println("Droplet with ID:", id, " IP:", ip) + } + } +} + +// Helper function: Get all IPs and update SSH config +func updateSSHConfigWithIPs(doClient *DigitalOceanClient, snapshotName string, sshPort int, configSSH *utils.ManagerSSHConfig) { + ips, err := doClient.GetDropletIPsByPrefix(context.Background(), snapshotName) + if err != nil { + fmt.Println("Error:", err) + } + + for _, ip := range ips { + log.Println(ip) + configSSH.IPPort[ip] = fmt.Sprint(sshPort) + } +} + +// ListDropletByID retrieves a droplet by ID +func (c *DigitalOceanClient) ListDropletByID(ctx context.Context, id int) (*Droplet, error) { + droplets, err := c.ListDroplets(ctx) + if err != nil { + return nil, err + } + + for _, droplet := range droplets { + if droplet.ID == id { + return &droplet, nil + } + } + + return nil, fmt.Errorf("Droplet with ID %d not found", id) +} + +// ListSnapshots retrieves all snapshots associated with the account +func (c *DigitalOceanClient) ListSnapshots(ctx context.Context) ([]Snapshot, error) { + var snapshots struct { + Snapshots []Snapshot `json:"snapshots"` + } + + err := c.fetchResources(ctx, "/snapshots", &snapshots) + if err != nil { + return nil, err + } + + return snapshots.Snapshots, nil +} + +// ListDroplets retrieves all droplets associated with the account +func (c *DigitalOceanClient) ListDroplets(ctx context.Context) ([]Droplet, error) { + var droplets struct { + Droplets []Droplet `json:"droplets"` + } + + err := c.fetchResources(ctx, "/droplets", &droplets) + if err != nil { + return nil, err + } + + return droplets.Droplets, nil +} + +// ListDropletsByPrefix retrieves droplets filtered by name prefix +func (c *DigitalOceanClient) ListDropletsByPrefix(ctx context.Context, prefix string) ([]Droplet, error) { + droplets, err := c.ListDroplets(ctx) + if err != nil { + return nil, err + } + + var filtered []Droplet + for _, droplet := range droplets { + if len(droplet.Name) >= len(prefix) && droplet.Name[:len(prefix)] == prefix { + filtered = append(filtered, droplet) + } + } + + return filtered, nil +} + +// GetDropletIPsByPrefix retrieves IP addresses of droplets filtered by name prefix +func (c *DigitalOceanClient) GetDropletIPsByPrefix(ctx context.Context, prefix string) ([]string, error) { + droplets, err := c.ListDropletsByPrefix(ctx, prefix) + if err != nil { + return nil, err + } + + var ips []string + for _, droplet := range droplets { + for _, v4 := range droplet.Networks.V4 { + if isPublicIP(v4.IPAddress) { + ips = append(ips, v4.IPAddress) + } + } + } + + return ips, nil +} + +// isPublicIP checks if an IP address is public +func isPublicIP(ipAddress string) bool { + ip := net.ParseIP(ipAddress) + if ip == nil { + return false + } + + // Private IP ranges as per RFC 1918 + privateRanges := []string{ + "10.", // 10.0.0.0 - 10.255.255.255 + "172.16.", // 172.16.0.0 - 172.31.255.255 + "192.168.", // 192.168.0.0 - 192.168.255.255 + "100.64.", // 100.64.0.0 - 100.127.255.255 (Shared Address Space RFC 6598) + "169.254.", // 169.254.0.0 - 169.254.255.255 (Link-local address) + } + + for _, pr := range privateRanges { + if strings.HasPrefix(ipAddress, pr) { + return false + } + } + + return !ip.IsLoopback() && !ip.IsLinkLocalUnicast() && !ip.IsLinkLocalMulticast() +} + +// Snapshot represents a DigitalOcean snapshot +type Snapshot struct { + ID string `json:"id"` + Name string `json:"name"` +} + +// GetSnapshotByName retrieves a snapshot by its exact name +func (c *DigitalOceanClient) GetSnapshotByName(ctx context.Context, snapshotName string) (*Snapshot, error) { + snapshots, err := c.ListSnapshots(ctx) + if err != nil { + return nil, err + } + + for _, snapshot := range snapshots { + if snapshot.Name == snapshotName { + return &snapshot, nil + } + } + + return nil, errors.New("snapshot not found") +} + +// GetSnapshotsByPrefix retrieves snapshots filtered by name prefix +func (c *DigitalOceanClient) GetSnapshotsByPrefix(ctx context.Context, prefix string) ([]Snapshot, error) { + snapshots, err := c.ListSnapshots(ctx) + if err != nil { + return nil, err + } + + var filtered []Snapshot + for _, snapshot := range snapshots { + if len(snapshot.Name) >= len(prefix) && snapshot.Name[:len(prefix)] == prefix { + filtered = append(filtered, snapshot) + } + } + + return filtered, nil +} + +// RequestPayload Request Payload for Digital Ocean +type RequestPayload struct { + Names []string `json:"names"` + Region string `json:"region"` + Size string `json:"size"` + Image string `json:"image"` + SSHKeys []interface{} `json:"ssh_keys"` + Backups bool `json:"backups"` + IPv6 bool `json:"ipv6"` + Monitoring bool `json:"monitoring"` + Tags []string `json:"tags"` +} + +// DropletResponse Droplet Response +type DropletResponse struct { + ID int `json:"id"` +} + +// DigitalOceanResponse Digital Ocean Response +type DigitalOceanResponse struct { + Droplets []DropletResponse `json:"droplets"` +} + +// sendRequest is a reusable helper function to handle HTTP requests. +// It supports different HTTP methods and payloads, and parses the response. +func (c *DigitalOceanClient) sendRequest(ctx context.Context, method, endpoint string, payload interface{}, result interface{}) error { + var body io.Reader + if payload != nil { + payloadBytes, err := json.Marshal(payload) + if err != nil { + return err + } + body = bytes.NewBuffer(payloadBytes) + } + + req, err := http.NewRequest(method, digitalOceanBaseURL+endpoint, body) + if err != nil { + return err + } + req.Header.Set("Authorization", "Bearer "+c.Token) + req.Header.Set("Content-Type", "application/json") + + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + responseBody, _ := io.ReadAll(resp.Body) + return fmt.Errorf("unexpected status: %s, response: %s", resp.Status, string(responseBody)) + } + + if result != nil { + if err := json.NewDecoder(resp.Body).Decode(result); err != nil { + return err + } + } + + return nil +} + +// CreateXDropletsFromSnapshot creates multiple droplets from a snapshot. +func (c *DigitalOceanClient) CreateXDropletsFromSnapshot(ctx context.Context, name, snapshotID, region, size, sshKey string, count, startNumber int) ([]int, error) { + // Prepare the names for the droplets + names := make([]string, count) + for i := 0; i < count; i++ { + names[i] = name + "-" + strconv.Itoa(i+1+startNumber) + } + + // Create the payload + payload := RequestPayload{ + Names: names, + Region: region, + Size: size, + Image: snapshotID, + SSHKeys: []interface{}{sshKey}, + Backups: false, + IPv6: false, + Monitoring: false, + Tags: []string{"nTask", "worker"}, + } + + // Response structure for droplet creation + var dropletsResponse DigitalOceanResponse + + // Send POST request to create droplets + if err := c.sendRequest(ctx, "POST", "/droplets", payload, &dropletsResponse); err != nil { + return nil, err + } + + // Collect and return the droplet IDs + ids := make([]int, len(dropletsResponse.Droplets)) + for i, droplet := range dropletsResponse.Droplets { + ids[i] = droplet.ID + } + + return ids, nil +} + +// DeleteDroplet deletes a droplet by its ID. +func (c *DigitalOceanClient) DeleteDroplet(ctx context.Context, dropletID int) error { + // Send DELETE request to delete the droplet + return c.sendRequest(ctx, "DELETE", fmt.Sprintf("/droplets/%d", dropletID), nil, nil) +} + +// fetchResources fetches resources using a GET request and parses the response. +func (c *DigitalOceanClient) fetchResources(ctx context.Context, endpoint string, result interface{}) error { + // Reuse sendRequest for GET requests + return c.sendRequest(ctx, "GET", endpoint, nil, result) +} + +//----------------- + +// WaitForDropletCreation waits until a droplet with the given ID is created +// and returns its public IP address. +func (c *DigitalOceanClient) WaitForDropletCreation(ctx context.Context, dropletID int, verbose, debug bool) (string, error) { + for { + droplet, err := c.ListDropletByID(ctx, dropletID) + if err != nil { + if debug { + log.Println("Droplet with ID ", dropletID, ":", err) + } + } + + if debug { + log.Println("WaitForDropletCreation:", droplet) + } + + if droplet != nil && droplet.Status == "active" { + for _, network := range droplet.Networks.V4 { + if network.IPAddress != "" { + return network.IPAddress, nil + } + } + return "", errors.New("unable to find public IP address") + } + + select { + case <-time.After(10 * time.Second): + continue + case <-ctx.Done(): + return "", ctx.Err() + } + } +} + +// DeleteDropletsByPrefix deletes all Droplets with the specified prefix +func (c *DigitalOceanClient) DeleteDropletsByPrefix(ctx context.Context, prefix string) error { + droplets, err := c.ListDropletsByPrefix(ctx, prefix) + if err != nil { + return err + } + + for _, droplet := range droplets { + err := c.DeleteDroplet(ctx, droplet.ID) + if err != nil { + return fmt.Errorf("failed to delete droplet %d: %v", droplet.ID, err) + } + } + + // Introduce a short delay to wait for the deletion to complete + time.Sleep(10 * time.Second) + + return nil +} diff --git a/manager/database/DB.go b/manager/database/DB.go index 8411801..0945c6e 100644 --- a/manager/database/DB.go +++ b/manager/database/DB.go @@ -1,36 +1,72 @@ -// manager.go // Package database provides functions for managing database connections and executing SQL statements. package database import ( + "context" "database/sql" + "encoding/json" "fmt" "log" + "math/rand" "strings" + "time" + + "github.com/go-sql-driver/mysql" + globalstructs "github.com/r4ulcl/nTask/globalstructs" +) + +const ( + maxRetries = 3 + initialBackOff = 50 * time.Millisecond + defaultSelectLimit = 1000 + defaultHistoryLimit = 5000 + maxConcurrentGeneralDBOps = 1 + maxConcurrentInsertDBOps = 10 + dbConnMaxLifetime = 30 * time.Minute + dbMaxOpenConns = 50 + dbMaxIdleConns = 25 +) + +// semaphore to throttle concurrent calls to execWithRetry +var ( + insertSemaphore = make(chan struct{}, maxConcurrentInsertDBOps) + generalSemaphore = make(chan struct{}, maxConcurrentGeneralDBOps) ) +func init() { + // Seed the RNG for backoff jitter + rand.Seed(time.Now().UnixNano()) +} + var sqlInit = ` CREATE TABLE IF NOT EXISTS worker ( name VARCHAR(255) PRIMARY KEY, + DefaultThreads INT, IddleThreads INT, up BOOLEAN, - downCount INT + downCount INT, + updatedAt TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP ); CREATE TABLE IF NOT EXISTS task ( ID VARCHAR(255) PRIMARY KEY, - command LONGTEXT, + notes LONGTEXT, + commands LONGTEXT, + files LONGTEXT, name TEXT, createdAt TIMESTAMP DEFAULT CURRENT_TIMESTAMP, updatedAt TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, executedAt TIMESTAMP NOT NULL DEFAULT '1970-01-01 00:00:01', - status VARCHAR(255), + status VARCHAR(255), + duration INT DEFAULT 0, workerName VARCHAR(255), username VARCHAR(255), priority INT DEFAULT 0, + timeout INT DEFAULT 0, callbackURL TEXT, - callbackToken TEXT + callbackToken TEXT, + INDEX idx_status (status) ); ` @@ -38,56 +74,108 @@ CREATE TABLE IF NOT EXISTS task ( // It takes the username, password, host, port, and database name as input. // It returns a pointer to the sql.DB object and an error if the connection fails. func ConnectDB(username, password, host, port, database string, verbose, debug bool) (*sql.DB, error) { - // Create a connection string. - dataSourceName := fmt.Sprintf("%s:%s@tcp(%s:%s)/%s", username, password, host, port, database) - + dsn := fmt.Sprintf( + "%s:%s@tcp(%s:%s)/%s?charset=utf8mb4&parseTime=True&loc=Local&clientFoundRows=true", + username, + password, + host, + port, + database, + ) if debug { - log.Println("DB ConnectDB - dataSourceName", dataSourceName) + log.Println("DB ConnectDB - DSN:", dsn) } - // Open a new connection to the MySQL database. - db, err := sql.Open("mysql", dataSourceName) + + db, err := sql.Open("mysql", dsn) if err != nil { return nil, err } + // pool sizing + db.SetMaxOpenConns(dbMaxOpenConns) + db.SetMaxIdleConns(dbMaxIdleConns) + db.SetConnMaxLifetime(dbConnMaxLifetime) - // Check if the connection is successful. - err = db.Ping() - if err != nil { + if err := db.Ping(); err != nil { return nil, err } - // Initialize the database structure from SQL file. - err = initFromVar(db, verbose, debug) - if err != nil { - log.Fatal(err) + if err := initFromVar(db, verbose, debug); err != nil { + return nil, err } - return db, nil } -// initFromFile initializes the database structure by executing SQL statements from a file. -// It takes a pointer to the sql.DB object and the file path as input. -// It returns an error if the initialization fails. func initFromVar(db *sql.DB, verbose, debug bool) error { - // Split the content of the SQL file into individual statements - sqlStatements := strings.Split(string(sqlInit), ";") + stmts := strings.Split(sqlInit, ";") + if verbose || debug { + log.Println("initFromVar: applying schema") + } + for _, s := range stmts { + s = strings.TrimSpace(s) + if s == "" { + continue + } + if _, err := db.Exec(s); err != nil { + return fmt.Errorf("initFromVar executing %q: %w", s, err) + } + } + return nil +} + +// execWithRetry wraps db.Exec to retry on MySQL deadlock (Error 1213). +func execWithRetry(db *sql.DB, isInsert bool, query string, args ...interface{}) (sql.Result, error) { + + // pick the semaphore based on the isInsert flag + dbSemaphore := generalSemaphore + if isInsert { + dbSemaphore = insertSemaphore + } - // Execute each SQL statement - for _, statement := range sqlStatements { - // Trim leading and trailing whitespaces - sqlStatement := strings.TrimSpace(statement) + dbSemaphore <- struct{}{} + defer func() { <-dbSemaphore }() - // Skip empty statements - if sqlStatement == "" { + var err error + backOff := initialBackOff + for i := 0; i < maxRetries; i++ { + var res sql.Result + // Attach a short context so the query does not block forever if the connection freezes. + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + res, err = db.ExecContext(ctx, query, args...) + cancel() + if err == nil { + return res, nil + } + if merr, ok := err.(*mysql.MySQLError); ok && merr.Number == 1213 { // deadlock found + time.Sleep(backOff) + backOff *= 2 continue } + return nil, err + } + return nil, fmt.Errorf("deadlock after %d retries for query %q: %w", maxRetries, query, err) +} - // Execute the SQL statement - _, err := db.Exec(sqlStatement) - if err != nil { - return err - } +// serializeToJSON marshals a slice into a JSON string. +func serializeToJSON(v any) (string, error) { + b, err := json.Marshal(v) + if err != nil { + return "", err } + return string(b), nil +} - return nil +// prepareTaskQuery prepare task insertion or update in the database. +func prepareTaskQuery(task globalstructs.Task, verbose, debug bool) (commandJSON, filesJSON string, err error) { + commandJSON, err = serializeToJSON(task.Commands) + if err != nil { + return "", "", err + } + filesJSON, err = serializeToJSON(task.Files) + if err != nil { + return "", "", err + } + if verbose || debug { + log.Printf("prepareTaskQuery: filesJSON=%s commandJSON=%s", filesJSON, commandJSON) + } + return } diff --git a/manager/database/DBtask.go b/manager/database/DBtask.go index fa52130..137bf55 100644 --- a/manager/database/DBtask.go +++ b/manager/database/DBtask.go @@ -6,566 +6,352 @@ import ( "fmt" "log" "net/http" + "net/url" "strconv" "strings" - "sync" globalstructs "github.com/r4ulcl/nTask/globalstructs" ) -// AddTask adds a task to the database -func AddTask(db *sql.DB, task globalstructs.Task, verbose, debug bool, wg *sync.WaitGroup) error { - // Add to the WaitGroup when the goroutine starts and done when exits - defer wg.Done() - wg.Add(1) - // Convert []command to string and insert - structJSON, err := json.Marshal(task.Commands) - if err != nil { - return err - } - commandJSON := string(structJSON) +// AddTask adds a task to the database. +func AddTask(db *sql.DB, task globalstructs.Task, verbose, debug bool) error { + const q = `INSERT INTO task + (ID, notes, commands, files, name, status, duration, WorkerName, username, priority, timeout, callbackURL, callbackToken) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)` - // Insert the JSON data into the MySQL table - _, err = db.Exec("INSERT INTO task (ID, command, name, status, WorkerName, username, priority, callbackURL, callbackToken) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)", - task.ID, commandJSON, task.Name, task.Status, task.WorkerName, task.Username, task.Priority, task.CallbackURL, task.CallbackToken) + cmdJSON, fileJSON, err := prepareTaskQuery(task, verbose, debug) if err != nil { - if debug { - log.Println("DB Error DBTask AddTask: ", err) - } return err } + if _, err = execWithRetry(db, true, q, + task.ID, task.Notes, cmdJSON, fileJSON, task.Name, task.Status, + task.Duration, task.WorkerName, task.Username, task.Priority, + task.Timeout, task.CallbackURL, task.CallbackToken); err != nil { + return fmt.Errorf("AddTask: %w", err) + } return nil } // UpdateTask updates all fields of a task in the database. -func UpdateTask(db *sql.DB, task globalstructs.Task, verbose, debug bool, wg *sync.WaitGroup) error { - // Add to the WaitGroup when the goroutine starts and done when exits - defer wg.Done() - wg.Add(1) +func UpdateTask(db *sql.DB, task globalstructs.Task, verbose, debug bool) error { + if debug { + log.Println("UpdateTask1:", task) + } + + const q = `UPDATE task SET + notes=?, commands=?, files=?, name=?, status=?, duration=?, + WorkerName=?, priority=?, timeout=?, callbackURL=?, callbackToken=?, updatedAt = NOW() + WHERE ID=?` - // Convert []command to string and insert - structJSON, err := json.Marshal(task.Commands) + cmdJSON, fileJSON, err := prepareTaskQuery(task, verbose, debug) if err != nil { return err } - commandJSON := string(structJSON) - // Update all fields in the MySQL table - _, err = db.Exec("UPDATE task SET command=?, name=?, status=?, WorkerName=?, priority=?, callbackURL=?, callbackToken=? WHERE ID=?", - commandJSON, task.Name, task.Status, task.WorkerName, task.Priority, task.CallbackURL, task.CallbackToken, task.ID) + res, err := execWithRetry(db, false, q, + task.Notes, cmdJSON, fileJSON, task.Name, task.Status, task.Duration, + task.WorkerName, task.Priority, task.Timeout, task.CallbackURL, + task.CallbackToken, task.ID, + ) if err != nil { - if debug { - log.Println("DB Error DBTask UpdateTask: ", err) - } - return err + return fmt.Errorf("UpdateTask error: %w", err) + } + if n, _ := res.RowsAffected(); n == 0 { + return fmt.Errorf("UpdateTask: no task with ID %s found (possible race)", task.ID) } + if debug { + log.Println("UpdateTask2: ", task) + } return nil } // RmTask deletes a task from the database. -func RmTask(db *sql.DB, id string, verbose, debug bool, wg *sync.WaitGroup) error { - // Add to the WaitGroup when the goroutine starts and done when exits - defer wg.Done() - wg.Add(1) - // Worker exists, proceed with deletion - sqlStatement := "DELETE FROM task WHERE ID LIKE ?" - if debug { - log.Println("DB Delete ID: ", id) - } - result, err := db.Exec(sqlStatement, id) +func RmTask(db *sql.DB, id string, verbose, debug bool) error { + const q = `DELETE FROM task WHERE ID = ?` + res, err := execWithRetry(db, false, q, false, id) if err != nil { return err } - - a, _ := result.RowsAffected() - - if a < 1 { - return fmt.Errorf("task not found") + if n, _ := res.RowsAffected(); n == 0 { + return fmt.Errorf("RmTask: task %s not found", id) } - return nil } -// GetTasks gets tasks with URL params as filter. -func GetTasks(r *http.Request, db *sql.DB, verbose, debug bool) ([]globalstructs.Task, error) { - queryParams := r.URL.Query() - - sql := "SELECT ID, command, name, createdAt, updatedAt, executedAt, status, workerName, username, priority, callbackURL, callbackToken FROM task WHERE 1=1 " - - // Add filters for each parameter if provided - if ID := queryParams.Get("ID"); ID != "" { - sql += fmt.Sprintf(" AND ID LIKE '%s'", ID) - } - - if command := queryParams.Get("command"); command != "" { - sql += fmt.Sprintf(" AND command LIKE '%s'", command) - } - - if name := queryParams.Get("name"); name != "" { - sql += fmt.Sprintf(" AND name LIKE '%s'", name) - } - - if createdAt := queryParams.Get("createdAt"); createdAt != "" { - sql += fmt.Sprintf(" AND createdAt LIKE '%s'", createdAt) - } - - if updatedAt := queryParams.Get("executedAt"); updatedAt != "" { - sql += fmt.Sprintf(" AND executedAt LIKE '%s'", updatedAt) - } - - if updatedAt := queryParams.Get("updatedAt"); updatedAt != "" { - sql += fmt.Sprintf(" AND updatedAt LIKE '%s'", updatedAt) - } - - if executedAt := queryParams.Get("executedAt"); executedAt != "" { - sql += fmt.Sprintf(" AND executedAt LIKE '%s'", executedAt) - } - - if status := queryParams.Get("status"); status != "" { - sql += fmt.Sprintf(" AND status = '%s'", status) +// buildFiltersWithParams constructs SQL filters using query parameters safely. +func buildFiltersWithParams(query url.Values) (string, []interface{}) { + var ( + filters []string + args []interface{} + ) + add := func(key, cond string) { + if v := query.Get(key); v != "" { + filters = append(filters, cond) + args = append(args, v) + } } + add("ID", "ID LIKE ?") + add("notes", "notes LIKE ?") + add("commands", "commands LIKE ?") + add("files", "files LIKE ?") + add("name", "name LIKE ?") + add("createdAt", "createdAt LIKE ?") + add("updatedAt", "updatedAt LIKE ?") + add("executedAt", "executedAt LIKE ?") + add("status", "status = ?") + add("duration", "duration = ?") + add("workerName", "workerName LIKE ?") + add("username", "username LIKE ?") + add("priority", "priority = ?") + add("timeout", "timeout = ?") + add("callbackURL", "callbackURL = ?") + add("callbackToken", "callbackToken = ?") + return strings.Join(filters, " AND "), args +} - if workerName := queryParams.Get("workerName"); workerName != "" { - sql += fmt.Sprintf(" AND workerName LIKE '%s'", workerName) +func buildOrderByAndLimit(page, limit int) (string, int, int) { + if page < 1 { + page = 1 } - - if username := queryParams.Get("username"); username != "" { - sql += fmt.Sprintf(" AND username LIKE '%s'", username) + if limit < 1 { + limit = defaultSelectLimit } + offset := (page - 1) * limit + return " ORDER BY priority DESC, createdAt ASC", limit, offset +} - if priority := queryParams.Get("priority"); priority != "" { - sql += fmt.Sprintf(" AND priority = '%s'", priority) - } +// GetTasks retrieves tasks from the database using URL parameters as filters. +func GetTasks(r *http.Request, db *sql.DB, verbose, debug bool) ([]globalstructs.Task, error) { + queryParams := r.URL.Query() + filters, args := buildFiltersWithParams(queryParams) + orderBy, limit, offset := buildOrderByAndLimit(getInt(queryParams, "page", 1), getInt(queryParams, "limit", defaultSelectLimit)) - if callbackURL := queryParams.Get("callbackURL"); callbackURL != "" { - sql += fmt.Sprintf(" AND callbackURL = '%s'", callbackURL) + sqlStr := "SELECT ID, notes, commands, files, name, createdAt, updatedAt, executedAt, status, duration, WorkerName, username, priority, timeout, callbackURL, callbackToken FROM task WHERE 1=1" + if filters != "" { + sqlStr += " AND " + filters } + sqlStr += orderBy + " LIMIT ? OFFSET ?" + args = append(args, limit, offset) - if callbackToken := queryParams.Get("callbackToken"); callbackToken != "" { - sql += fmt.Sprintf(" AND callbackToken = '%s'", callbackToken) + if debug { + log.Println("GetTasks SQL:", sqlStr) + log.Println("Args:", args) } + return getTasksSQL(sqlStr, args, db, verbose, debug) +} - sql += " ORDER BY priority DESC, createdAt ASC " - - // set limit and page - page := 1 // Default page number - if pageStr := queryParams.Get("page"); pageStr != "" { - page, _ = strconv.Atoi(pageStr) - if page < 1 { - page = 1 +func getInt(v url.Values, key string, d int) int { + if s := v.Get(key); s != "" { + if n, err := strconv.Atoi(s); err == nil && n > 0 { + return n } } + return d +} - limit := 1000 // Default limit - - if limitStr := queryParams.Get("limit"); limitStr != "" { - limit, _ = strconv.Atoi(limitStr) +func getPage(queryParams url.Values) int { + pageStr := queryParams.Get("page") + page, err := strconv.Atoi(pageStr) + if err != nil || page < 1 { + return 1 } + return page +} - offset := (page - 1) * limit - - sql += fmt.Sprintf(" LIMIT %d OFFSET %d;", limit, offset) - - sql += ";" - - if debug { - log.Println("GetTasks sql", sql) +func getLimit(queryParams url.Values) int { + limitStr := queryParams.Get("limit") + limit, err := strconv.Atoi(limitStr) + if err != nil || limit < 1 { + return 1000 } - - return GetTasksSQL(sql, db, verbose, debug) + return limit } -// GetTasksPending gets only tasks with status pending +// GetTasksPending Get Tasks with status = Pending func GetTasksPending(limit int, db *sql.DB, verbose, debug bool) ([]globalstructs.Task, error) { - sql := "SELECT ID, command, name, createdAt, updatedAt, executedAt, status, WorkerName, username, " + - "priority, callbackURL, callbackToken FROM task WHERE status = 'pending' ORDER BY priority DESC, createdAt ASC limit %d" - formattedSQL := fmt.Sprintf(sql, limit) - return GetTasksSQL(formattedSQL, db, verbose, debug) + if limit <= 0 { + limit = 1 + } + const q = `SELECT ID, notes, commands, files, name, createdAt, updatedAt, executedAt, status, duration, WorkerName, username, priority, timeout, callbackURL, callbackToken + FROM task WHERE status = 'pending' ORDER BY priority DESC, createdAt ASC LIMIT ?` + return getTasksSQL(q, []interface{}{limit}, db, verbose, debug) } -// GetTasksSQL gets tasks by passing the SQL query in sql param -func GetTasksSQL(sql string, db *sql.DB, verbose, debug bool) ([]globalstructs.Task, error) { - var tasks []globalstructs.Task - - // Query all tasks from the task table - rows, err := db.Query(sql) +// getTasksSQL executes a parameterized SQL query to fetch tasks. +func getTasksSQL(sqlQuery string, args []interface{}, db *sql.DB, verbose, debug bool) ([]globalstructs.Task, error) { + rows, err := db.Query(sqlQuery, args...) if err != nil { if debug { - log.Println("DB Error DBTask GetTasksSQL: ", sql, err) + log.Println("getTasksSQL query error:", err) } - return tasks, err + return nil, err } defer rows.Close() - // Iterate over the rows + var tasks []globalstructs.Task for rows.Next() { - // Declare variables to store JSON data - var ID string - var commandAux string - var name string - var createdAt string - var updatedAt string - var executedAt string - var status string - var workerName string - var username string - var priority int - var callbackURL string - var callbackToken string - - // Scan the values from the row into variables - err := rows.Scan(&ID, &commandAux, &name, &createdAt, &updatedAt, &executedAt, &status, &workerName, &username, &priority, &callbackURL, &callbackToken) - if err != nil { - if debug { - log.Println("DB Error DBTask GetTasksSQL: ", err) - } - return tasks, err + var ( + t globalstructs.Task + commandsStr string + filesStr string + ) + if err = rows.Scan(&t.ID, &t.Notes, &commandsStr, &filesStr, &t.Name, + &t.CreatedAt, &t.UpdatedAt, &t.ExecutedAt, &t.Status, &t.Duration, + &t.WorkerName, &t.Username, &t.Priority, &t.Timeout, &t.CallbackURL, &t.CallbackToken); err != nil { + return nil, err } - - // Data into a Task struct - var task globalstructs.Task - task.ID = ID - - // String to []struct - var command []globalstructs.Command - err = json.NewDecoder(strings.NewReader(commandAux)).Decode(&command) - if err != nil { - return tasks, err + if err = json.Unmarshal([]byte(commandsStr), &t.Commands); err != nil { + return nil, fmt.Errorf("parse commands: %w", err) } - task.Commands = command - task.Name = name - task.CreatedAt = createdAt - task.UpdatedAt = updatedAt - task.ExecutedAt = executedAt - task.Status = status - task.WorkerName = workerName - task.Username = username - task.Priority = priority - task.CallbackURL = callbackURL - task.CallbackToken = callbackToken - - // Append the task to the slice - tasks = append(tasks, task) - } - - // Check for errors from iterating over rows - if err := rows.Err(); err != nil { - if debug { - log.Println("DB Error DBTask GetTasksSQL: ", err) + if err = json.Unmarshal([]byte(filesStr), &t.Files); err != nil { + return nil, fmt.Errorf("parse files: %w", err) } - return tasks, err + tasks = append(tasks, t) + } + if err = rows.Err(); err != nil { + return nil, err } - return tasks, nil } // GetTask gets task filtered by id func GetTask(db *sql.DB, id string, verbose, debug bool) (globalstructs.Task, error) { - var task globalstructs.Task - // Retrieve the JSON data from the MySQL table - var commandAux string - var name string - var createdAt string - var updatedAt string - var executedAt string - var status string - var workerName string - var username string - var priority int - var callbackURL string - var callbackToken string - - err := db.QueryRow("SELECT ID, createdAt, updatedAt, executedAt, command, name, status, WorkerName, username, priority, callbackURL, callbackToken FROM task WHERE ID = ?", - id).Scan(&id, &createdAt, &updatedAt, &executedAt, &commandAux, &name, &status, &workerName, &username, &priority, &callbackURL, &callbackToken) + const q = `SELECT ID, notes, createdAt, updatedAt, executedAt, commands, files, name, status, duration, WorkerName, + username, priority, timeout, callbackURL, callbackToken + FROM task WHERE ID = ?` + var ( + t globalstructs.Task + commandsStr string + filesStr string + ) + err := db.QueryRow(q, id).Scan(&t.ID, &t.Notes, &t.CreatedAt, &t.UpdatedAt, &t.ExecutedAt, &commandsStr, &filesStr, + &t.Name, &t.Status, &t.Duration, &t.WorkerName, &t.Username, &t.Priority, &t.Timeout, &t.CallbackURL, &t.CallbackToken) if err != nil { - if debug { - log.Println("DB Error DBTask GetTask: ", err) - } - return task, err + return t, err } - - // Data back to a struct - task.ID = id - // String to []struct - var command []globalstructs.Command - err = json.NewDecoder(strings.NewReader(commandAux)).Decode(&command) - if err != nil { - return task, err - } - task.Commands = command - task.Name = name - task.CreatedAt = createdAt - task.UpdatedAt = updatedAt - task.ExecutedAt = executedAt - task.Status = status - task.WorkerName = workerName - task.Username = username - task.Priority = priority - task.CallbackURL = callbackURL - task.CallbackToken = callbackToken - - return task, nil -} - -// GetTaskExecutedAt -func GetTaskExecutedAt(db *sql.DB, id string, verbose, debug bool) (string, error) { - // Retrieve the workerName from the task table - var executedAt string - err := db.QueryRow("SELECT executedAt FROM task WHERE ID = ?", - id).Scan(&executedAt) - if err != nil { - if debug { - log.Println("DB Error DBTask GetTaskExecutedAt: ", err) - } - return executedAt, err + if err = json.Unmarshal([]byte(commandsStr), &t.Commands); err != nil { + return t, fmt.Errorf("parse commands: %w", err) } - - return executedAt, nil -} - -// GetTaskWorker gets task workerName from an ID -// This is the worker executing the task -func GetTaskWorker(db *sql.DB, id string, verbose, debug bool) (string, error) { - // Retrieve the workerName from the task table - var workerName string - err := db.QueryRow("SELECT WorkerName FROM task WHERE ID = ?", - id).Scan(&workerName) - if err != nil { - if debug { - log.Println("DB Error DBTask GetTaskWorker: ", err) - } - return workerName, err + if err = json.Unmarshal([]byte(filesStr), &t.Files); err != nil { + return t, fmt.Errorf("parse files: %w", err) } - - return workerName, nil + return t, nil } -// SetTasksWorkerFailed set to failed all task running worker workerName -func SetTasksWorkerFailed(db *sql.DB, workerName string, verbose, debug bool, wg *sync.WaitGroup) error { - // Add to the WaitGroup when the goroutine starts and done when exits - defer wg.Done() - wg.Add(1) - _, err := db.Exec("UPDATE task SET status = 'failed' WHERE workerName = ? AND status = 'running' ", workerName) +// Generic helper function to execute a database update +func executeDBUpdate(db *sql.DB, query string, args []interface{}, verbose, debug bool, taskName string) error { + _, err := execWithRetry(db, false, query, args...) + if err != nil { - if debug { - log.Println("DB Error DBTask SetTasksWorkerFailed: ", err) + if debug || verbose { + log.Printf("DB Error %s: %v", taskName, err) } return err } return nil } -// SetTasksWorkerInvalid set to invalid all task running worker workerName -func SetTasksWorkerInvalid(db *sql.DB, workerName string, verbose, debug bool, wg *sync.WaitGroup) error { - // Add to the WaitGroup when the goroutine starts and done when exits - defer wg.Done() - wg.Add(1) - _, err := db.Exec("UPDATE task SET status = 'invalid' WHERE workerName = ? AND status = 'running' ", workerName) - if err != nil { - if debug { - log.Println("DB Error DBTask SetTasksWorkerInvalid: ", err) - } - return err - } - return nil +// SetTasksWorkerPending Function to set tasks worker status to 'pending' +func SetTasksWorkerPending(db *sql.DB, workerName string, verbose, debug bool) error { + query := "UPDATE task SET status = 'pending', updatedAt = NOW() WHERE workerName = ? AND status = 'running'" + args := []interface{}{workerName} + return executeDBUpdate(db, query, args, verbose, debug, "DBTask: SetTasksWorkerPending") } -// SetTasksWorkerPending set all task of worker to pending because failed -func SetTasksWorkerPending(db *sql.DB, workerName string, verbose, debug bool, wg *sync.WaitGroup) error { - // Add to the WaitGroup when the goroutine starts and done when exits - defer wg.Done() - wg.Add(1) - _, err := db.Exec("UPDATE task SET status = 'pending' WHERE workerName = ? AND status = 'running' ", workerName) +// SetTaskExecutedAtNow Function to set task's executedAt timestamp to now() +func SetTaskExecutedAtNow(db *sql.DB, id string, verbose, debug bool) error { + res, err := execWithRetry(db, false, "UPDATE task SET executedAt = NOW(), updatedAt = NOW() WHERE ID = ?", id) if err != nil { - if debug { - log.Println("DB Error DBTask: ", err) - } return err } + if n, _ := res.RowsAffected(); n == 0 { + return fmt.Errorf("SetTaskExecutedAtNow: task %s not found", id) + } return nil } -// SetTaskWorkerName saves the worker name of the task in the database -func SetTaskWorkerName(db *sql.DB, id, workerName string, verbose, debug bool, wg *sync.WaitGroup) error { - // Add to the WaitGroup when the goroutine starts and done when exits - defer wg.Done() - wg.Add(1) - // Update the workerName column of the task table for the given ID - _, err := db.Exec("UPDATE task SET workerName = ? WHERE ID = ?", workerName, id) - if err != nil { - if debug { - log.Println("DB Error DBTask SetTaskWorkerName: ", err) - } - return err - } - return nil +func clearWorkerName(db *sql.DB, workerName string, verbose, debug bool) error { + _, err := execWithRetry(db, false, "UPDATE task SET WorkerName = '', updatedAt = NOW() WHERE WorkerName = ?", workerName) + return err } -// SetTasksWorkerEmpty remove the worker name of the task in the database -func SetTasksWorkerEmpty(db *sql.DB, workerName string, verbose, debug bool, wg *sync.WaitGroup) error { - // Add to the WaitGroup when the goroutine starts and done when exits - defer wg.Done() - wg.Add(1) - // Update the workerName column of the task table for the given ID - _, err := db.Exec("UPDATE task SET workerName = '' WHERE workerName = ?", workerName) +func SetTaskStatus(db *sql.DB, id, status string, verbose, debug bool) error { + res, err := execWithRetry(db, false, "UPDATE task SET status = ?, updatedAt = NOW() WHERE ID = ?", status, id) if err != nil { - if debug { - log.Println("DB Error DBTask SetTaskWorkerName: ", err) - } return err } + if n, _ := res.RowsAffected(); n == 0 { + return fmt.Errorf("SetTaskStatus: task %s not found", id) + } + if debug { + log.Println("SetTaskStatus", id, status) + } return nil } -// SetTaskStatus saves the status of the task in the database -func SetTaskStatus(db *sql.DB, id, status string, verbose, debug bool, wg *sync.WaitGroup) error { - // Add to the WaitGroup when the goroutine starts and done when exits - defer wg.Done() - wg.Add(1) - // Update the status column of the task table for the given ID - _, err := db.Exec("UPDATE task SET status = ? WHERE ID = ?", status, id) - if err != nil { - if debug { - log.Println("DB Error DBTask SetTaskStatus: ", err) - } - return err +// ---------------- Statistics & housekeeping ------------------------------ + +func GetCountByStatus(status string, db *sql.DB, verbose, debug bool) (int, error) { + const q = `SELECT COUNT(*) FROM task WHERE status = ?` + var c int + if err := db.QueryRow(q, status).Scan(&c); err != nil { + return 0, err } - return nil + return c, nil } -// SetTaskStatusIfPending saves the status of the task in the database if current is pending -func SetTaskStatusIfPending(db *sql.DB, id, status string, verbose, debug bool, wg *sync.WaitGroup) error { - // Add to the WaitGroup when the goroutine starts and done when exits - defer wg.Done() - wg.Add(1) - // Update the status column of the task table for the given ID - _, err := db.Exec("UPDATE task SET status = ? WHERE ID = ? and status = 'pending'", status, id) - if err != nil { - if debug { - log.Println("DB Error DBTask SetTaskStatusIfPending: ", err) - } +// DeleteMaxEntriesHistory keeps only the newest rows whose status = 'done'. +func DeleteMaxEntriesHistory(db *sql.DB, maxEntries int, table string, verbose, debug bool) error { + if maxEntries <= 0 { + maxEntries = defaultHistoryLimit + } + var total int + if err := db.QueryRow("SELECT COUNT(*) FROM " + table + " WHERE status = 'done'").Scan(&total); err != nil { return err } - return nil -} - -// SetTaskStatusIfPending saves the status of the task in the database if current is pending -func SetTasksStatusIfRunning(db *sql.DB, status string, verbose, debug bool, wg *sync.WaitGroup) error { - // Add to the WaitGroup when the goroutine starts and done when exits - defer wg.Done() - wg.Add(1) - // Update the status column of the task table for the given ID - _, err := db.Exec("UPDATE task SET status = ? WHERE status = 'running'", status) - if err != nil { - if debug { - log.Println("DB Error DBTask SetTasksStatusIfRunning: ", err) - } + if total <= maxEntries { + return nil // nothing to do + } + del := total - maxEntries + cte := fmt.Sprintf(`WITH old AS ( + SELECT ID FROM %s WHERE status = 'done' ORDER BY createdAt ASC LIMIT ?) + DELETE FROM %s WHERE ID IN (SELECT ID FROM old)`, table, table) + if _, err := execWithRetry(db, false, cte, del); err != nil { return err } + if verbose || debug { + log.Printf("DeleteMaxEntriesHistory: trimmed %d → %d rows in %s", total, maxEntries, table) + } return nil } -// SetTaskExecutedAt saves current time as executedAt -func SetTaskExecutedAtNow(db *sql.DB, id string, verbose, debug bool, wg *sync.WaitGroup) error { - // Add to the WaitGroup when the goroutine starts and done when exits - defer wg.Done() - wg.Add(1) - // Update the status column of the task table for the given ID - _, err := db.Exec("UPDATE task SET executedAt = now() WHERE ID = ?", id) +// setTasksWorkerEmpty remove the worker name of the task in the database +func setTasksWorkerEmpty(db *sql.DB, workerName string, verbose, debug bool) error { + + // Update the workerName column of the task table for the given ID + query := "UPDATE task SET workerName = '', updatedAt = NOW() WHERE workerName = ?" + _, err := execWithRetry(db, false, query, workerName) if err != nil { - if debug { - log.Println("DB Error DBTask SetTaskExecutedAtNow: ", err) + if debug || verbose { + log.Println("DB Error DBTask SetTaskWorkerName: ", err) } return err } return nil } -// SetTaskExecutedAt saves current time as executedAt -func SetTaskExecutedAt(executedAt string, db *sql.DB, id string, verbose, debug bool, wg *sync.WaitGroup) error { - // Add to the WaitGroup when the goroutine starts and done when exits - defer wg.Done() - wg.Add(1) +// SetTasksStatusIfStatus saves the status of the task in the database if current status is currentStatus +func SetTasksStatusIfStatus(currentStatus string, db *sql.DB, newStatus string, verbose, debug bool) error { + // Update the status column of the task table for the given ID - _, err := db.Exec("UPDATE task SET executedAt = ? WHERE ID = ?", executedAt, id) + query := "UPDATE task SET status = ?, updatedAt = NOW() WHERE status = ?" + _, err := execWithRetry(db, false, query, newStatus, currentStatus) if err != nil { - if debug { - log.Println("DB Error DBTask SetTaskExecutedAt: ", err) + if debug || verbose { + log.Println("DB Error DBTask SetTasksStatusIfRunning: ", err) } return err } return nil } - -// Count - -func GetPendingCount(db *sql.DB, verbose, debug bool) (int, error) { - // Prepare the SQL query - query := "SELECT COUNT(*) FROM task where status = 'pending'" - - // Execute the query - var count int - err := db.QueryRow(query).Scan(&count) - if err != nil { - return 0, err - } - - return count, nil -} - -func GetRunningCount(db *sql.DB, verbose, debug bool) (int, error) { - // Prepare the SQL query - query := "SELECT COUNT(*) FROM task where status = 'running'" - - // Execute the query - var count int - err := db.QueryRow(query).Scan(&count) - if err != nil { - return 0, err - } - - return count, nil -} - -func GetDoneCount(db *sql.DB, verbose, debug bool) (int, error) { - // Prepare the SQL query - query := "SELECT COUNT(*) FROM task where status = 'done'" - - // Execute the query - var count int - err := db.QueryRow(query).Scan(&count) - if err != nil { - return 0, err - } - - return count, nil -} - -func GetFailedCount(db *sql.DB, verbose, debug bool) (int, error) { - // Prepare the SQL query - query := "SELECT COUNT(*) FROM task where status = 'failed'" - - // Execute the query - var count int - err := db.QueryRow(query).Scan(&count) - if err != nil { - return 0, err - } - - return count, nil -} - -func GetDeletedCount(db *sql.DB, verbose, debug bool) (int, error) { - // Prepare the SQL query - query := "SELECT COUNT(*) FROM task where status = 'deleted'" - - // Execute the query - var count int - err := db.QueryRow(query).Scan(&count) - if err != nil { - return 0, err - } - - return count, nil -} diff --git a/manager/database/DBworkers.go b/manager/database/DBworkers.go index 83a9247..22f5498 100644 --- a/manager/database/DBworkers.go +++ b/manager/database/DBworkers.go @@ -4,382 +4,231 @@ import ( "database/sql" "fmt" "log" - "strconv" - "sync" globalstructs "github.com/r4ulcl/nTask/globalstructs" ) -// AddWorker adds a worker to the database. -func AddWorker(db *sql.DB, worker *globalstructs.Worker, verbose, debug bool, wg *sync.WaitGroup) error { - // Add to the WaitGroup when the goroutine starts and done when exits - defer wg.Done() - wg.Add(1) - // Insert the JSON data into the MySQL table - _, err := db.Exec("INSERT INTO worker (name, IddleThreads, up, downCount)"+ - " VALUES (?, ?, ?, ?)", - worker.Name, worker.IddleThreads, worker.UP, worker.DownCount) - if err != nil { - return err +// ------------------------------------------------------------------------- +// Inserts / deletes +// ------------------------------------------------------------------------- + +// AddWorker inserts a new worker row. +func AddWorker(db *sql.DB, w *globalstructs.Worker, verbose, debug bool) error { + const q = `INSERT INTO worker (name, defaultThreads, iddleThreads, up, downCount, updatedAt) + VALUES (?, ?, ?, ?, ?, NOW())` + if _, err := execWithRetry(db, true, q, w.Name, w.DefaultThreads, w.IddleThreads, w.UP, w.DownCount); err != nil { + return fmt.Errorf("AddWorker: %w", err) } return nil } -// RmWorkerName deletes a worker by its name. -func RmWorkerName(db *sql.DB, name string, verbose, debug bool, wg *sync.WaitGroup) error { - // Add to the WaitGroup when the goroutine starts and done when exits - defer wg.Done() - wg.Add(1) - // Worker exists, proceed with deletion - sqlStatement := "DELETE FROM worker WHERE name = ?" - log.Println("DB Delete worker Name: ", name) - result, err := db.Exec(sqlStatement, name) +// RmWorkerName deletes a worker by name and detaches its running tasks. +func RmWorkerName(db *sql.DB, name string, verbose, debug bool) error { + const del = `DELETE FROM worker WHERE name = ?` + res, err := execWithRetry(db, false, del, name) if err != nil { - return err + return fmt.Errorf("RmWorkerName: %w", err) } - - a, _ := result.RowsAffected() - - if a < 1 { - return fmt.Errorf("{\"error\": \"worker not found\"}") + if n, _ := res.RowsAffected(); n == 0 { + return fmt.Errorf("RmWorkerName: worker %s not found", name) } - - // Set workers task to any worker - err = SetTasksWorkerEmpty(db, name, verbose, debug, wg) - if err != nil { + // orphan tasks → pending + if err := SetTasksWorkerPending(db, name, verbose, debug); err != nil { + return err + } + if err := clearWorkerName(db, name, verbose, debug); err != nil { return err } - return nil } -// GetWorkers retrieves all workers from the database. +// ------------------------------------------------------------------------- +// Select helpers +// ------------------------------------------------------------------------- + +const workerSelectCols = `name, defaultThreads, iddleThreads, up, downCount, updatedAt` + +// GetWorkers returns every row in the worker table. func GetWorkers(db *sql.DB, verbose, debug bool) ([]globalstructs.Worker, error) { - // Slice to store all workers - var workers []globalstructs.Worker + q := "SELECT " + workerSelectCols + " FROM worker" + return getWorkerSQL(q, db, verbose, debug) +} - // Query all workers from the worker table - rows, err := db.Query("SELECT name, IddleThreads, up, downCount FROM worker") +// GetWorker fetches a single worker by name. +func GetWorker(db *sql.DB, name string, verbose, debug bool) (globalstructs.Worker, error) { + q := "SELECT " + workerSelectCols + " FROM worker WHERE name = ?" + rows, err := getWorkerSQL(q, db, verbose, debug, name) if err != nil { - if debug { - log.Println("DB Error DBworkers: ", err) - } - return workers, err + return globalstructs.Worker{}, err } - defer rows.Close() - - // Iterate over the rows - for rows.Next() { - // Declare variables to store JSON data - var name string - var IddleThreads int - var up bool - var downCount int - - // Scan the values from the row into variables - err := rows.Scan(&name, &IddleThreads, &up, &downCount) - if err != nil { - if debug { - log.Println("DB Error DBworkers: ", err) - } - return workers, err - } - - // Data into a Worker struct - var worker globalstructs.Worker - worker.Name = name - worker.IddleThreads = IddleThreads - worker.UP = up - worker.DownCount = downCount - - // Append the worker to the slice - workers = append(workers, worker) + if len(rows) == 0 { + return globalstructs.Worker{}, sql.ErrNoRows } + return rows[0], nil +} - // Check for errors from iterating over rows - if err := rows.Err(); err != nil { - if debug { - log.Println("DB Error DBworkers: ", err) - } - return workers, err - } +// GetWorkerIddle returns workers that are up and have spare threads. +func GetWorkerIddle(db *sql.DB, verbose, debug bool) ([]globalstructs.Worker, error) { + q := "SELECT " + workerSelectCols + " FROM worker WHERE up = TRUE AND iddleThreads > 0 ORDER BY RAND()" + return getWorkerSQL(q, db, verbose, debug) +} - return workers, nil +// GetWorkerUP returns all workers with up = true. +func GetWorkerUP(db *sql.DB, verbose, debug bool) ([]globalstructs.Worker, error) { + q := "SELECT " + workerSelectCols + " FROM worker WHERE up = TRUE" + return getWorkerSQL(q, db, verbose, debug) } -// GetWorker retrieves a worker from the database by its name. -func GetWorker(db *sql.DB, name string, verbose, debug bool) (globalstructs.Worker, error) { - var worker globalstructs.Worker - // Retrieve the JSON data from the MySQL table - var name2 string - var IddleThreads int - var up bool - var downCount int +// ------------------------------------------------------------------------- +// Updates +// ------------------------------------------------------------------------- - err := db.QueryRow("SELECT name, IddleThreads, up, downCount FROM worker WHERE name = ?", - name).Scan( - &name2, &IddleThreads, &up, &downCount) +// UpdateWorker replaces every mutable column of the given worker. +func UpdateWorker(db *sql.DB, w *globalstructs.Worker, verbose, debug bool) error { + const q = `UPDATE worker SET defaultThreads = ?, iddleThreads = ?, up = ?, downCount = ?, updatedAt = NOW() WHERE name = ?` + res, err := execWithRetry(db, false, q, w.DefaultThreads, w.IddleThreads, w.UP, w.DownCount, w.Name) if err != nil { - if debug { - log.Println("DB Error DBworkers: ", err) - } - return worker, err + return fmt.Errorf("UpdateWorker: %w", err) } - - // Data into the struct - worker.Name = name - worker.IddleThreads = IddleThreads - worker.UP = up - worker.DownCount = downCount - - return worker, nil + if n, _ := res.RowsAffected(); n == 0 { + return fmt.Errorf("UpdateWorker: worker %s not found", w.Name) + } + return nil } -// UpdateWorker updates the information of a worker in the database. -func UpdateWorker(db *sql.DB, worker *globalstructs.Worker, verbose, debug bool, wg *sync.WaitGroup) error { - // Add to the WaitGroup when the goroutine starts and done when exits - defer wg.Done() - wg.Add(1) - // Update the JSON data in the MySQL table based on the worker's name - _, err := db.Exec("UPDATE worker SET"+ - " IddleThreads = ?, up = ?, downCount = ? WHERE name = ?", - worker.IddleThreads, worker.UP, worker.DownCount, worker.Name) +// SetWorkerUPto toggles the up column. +func SetWorkerUPto(db *sql.DB, name string, up bool, verbose, debug bool) error { + const q = `UPDATE worker SET up = ?, updatedAt = NOW() WHERE name = ?` + res, err := execWithRetry(db, false, q, up, name) if err != nil { - if debug { - log.Println("DB Error DBworkers: ", err) - } - return err + return fmt.Errorf("SetWorkerUPto: %w", err) } - return nil -} -// SetWorkerUPto sets the status of a worker to the specified value. -func SetWorkerUPto(up bool, db *sql.DB, worker *globalstructs.Worker, verbose, debug bool, wg *sync.WaitGroup) error { - // Add to the WaitGroup when the goroutine starts and done when exits - defer wg.Done() - wg.Add(1) - _, err := db.Exec("UPDATE worker SET up = ? WHERE name = ?", - up, worker.Name) + // RowsAffected==0 could mean “no matching row” OR “already in desired state” + n, err := res.RowsAffected() if err != nil { - if debug { - log.Println("DB Error DBworkers: ", err) + return fmt.Errorf("SetWorkerUPto (RowsAffected): %w", err) + } + if n == 0 { + // Check existence explicitly + var dummy int + err := db.QueryRow("SELECT 1 FROM worker WHERE name = ?", name).Scan(&dummy) + if err == sql.ErrNoRows { + return fmt.Errorf("SetWorkerUPto: worker %s not found", name) + } else if err != nil { + return fmt.Errorf("SetWorkerUPto (existence check): %w", err) } - return err + // row exists but was already up/down as requested → treat as success } if debug { - log.Println("DB Worker set to:", up, worker.Name) + log.Printf("SetWorkerUPto: worker %s up set to %t", name, up) } - return nil } -// SetWorkerworkingToString sets the status of a worker to the specified working value using the worker's name. -func SetIddleThreadsTo(IddleThreads int, db *sql.DB, worker string, verbose, debug bool, wg *sync.WaitGroup) error { - // Add to the WaitGroup when the goroutine starts and done when exits - defer wg.Done() - wg.Add(1) - if debug { - log.Println("DB Set IddleThreads to", IddleThreads) - } - _, err := db.Exec("UPDATE worker SET IddleThreads = ? WHERE name = ?", - IddleThreads, worker) +// SetIddleThreadsTo sets the iddleThreads value. +func SetIddleThreadsTo(db *sql.DB, name string, idle int, verbose, debug bool) error { + const q = `UPDATE worker SET iddleThreads = ?, updatedAt = NOW() WHERE name = ?` + res, err := execWithRetry(db, false, q, idle, name) if err != nil { - if debug { - log.Println("DB Error DBworkers: ", err) - } - return err - } - - return nil -} - -// SetWorkerworkingToString sets the status of a worker to the specified working value using the worker's name. -func AddWorkerIddleThreads1(db *sql.DB, worker string, verbose, debug bool, wg *sync.WaitGroup) error { - // Add to the WaitGroup when the goroutine starts and done when exits - defer wg.Done() - wg.Add(1) - if debug { - log.Println("DB AddWorkerIddleThreads1 worker name:", worker) + return fmt.Errorf("SetIddleThreadsTo: %w", err) } - _, err := db.Exec("UPDATE worker SET IddleThreads = IddleThreads + 1 WHERE name = ?;", - worker) - if err != nil { - if debug { - log.Println("DB Error DBworkers: ", err) - } - return err + if n, _ := res.RowsAffected(); n == 0 { + return fmt.Errorf("SetIddleThreadsTo: worker %s not found", name) } return nil } -// SubtractWorkerIddleThreads1 -func SubtractWorkerIddleThreads1(db *sql.DB, worker string, verbose, debug bool, wg *sync.WaitGroup) error { - // Add to the WaitGroup when the goroutine starts and done when exits - defer wg.Done() - wg.Add(1) - if debug { - log.Println("DB SubtractWorkerIddleThreads1") - } - - _, err := db.Exec("UPDATE worker SET IddleThreads = CASE WHEN IddleThreads > 0 THEN IddleThreads - 1 "+ - "ELSE 0 END WHERE name = ?", worker) +// SubtractWorkerIddleThreads1 decrements iddleThreads by 1 if > 0. +func SubtractWorkerIddleThreads1(db *sql.DB, name string, verbose, debug bool) error { + const q = `UPDATE worker SET iddleThreads = CASE WHEN iddleThreads > 0 THEN iddleThreads - 1 ELSE 0 END, updatedAt = NOW() WHERE name = ?` + res, err := execWithRetry(db, false, q, name) if err != nil { - if debug { - log.Println("DB Error DBworkers: ", err) - } - return err + return fmt.Errorf("SubtractWorkerIddleThreads1: %w", err) + } + if n, _ := res.RowsAffected(); n == 0 { + return fmt.Errorf("SubtractWorkerIddleThreads1: worker %s not found", name) } return nil } -// GetWorkerIddle retrieves all workers that are iddle. -func GetWorkerIddle(db *sql.DB, verbose, debug bool) ([]globalstructs.Worker, error) { - sql := "SELECT name, IddleThreads, up, downCount FROM worker WHERE up = true AND IddleThreads > 0 ORDER BY RAND();" - return GetWorkerSQL(sql, db, verbose, debug) -} - -// GetWorkerUP retrieves all workers that are up. -func GetWorkerUP(db *sql.DB, verbose, debug bool) ([]globalstructs.Worker, error) { - sql := "SELECT name, IddleThreads, up, downCount FROM worker WHERE up = true;" - return GetWorkerSQL(sql, db, verbose, debug) -} - -// GetWorkerSQL retrieves workers information based on a SQL statement. -func GetWorkerSQL(sql string, db *sql.DB, verbose, debug bool) ([]globalstructs.Worker, error) { - // Slice to store all workers - var workers []globalstructs.Worker - - // Query all workers from the worker table - rows, err := db.Query(sql) - if err != nil { - if debug { - log.Println("DB Error DBworkers: ", err) - } - return workers, err - } - defer rows.Close() - - // Iterate over the rows - for rows.Next() { - // Declare variables to store JSON data - var name string - var IddleThreads int - var up bool - var downCount int - - // Scan the values from the row into variables - err := rows.Scan(&name, &IddleThreads, &up, &downCount) - if err != nil { - if debug { - log.Println("DB Error DBworkers: ", err) - } - return workers, err - } - - // Data into a Worker struct - var worker globalstructs.Worker - worker.Name = name +// Down‑count helpers ------------------------------------------------------- - worker.IddleThreads = IddleThreads - worker.UP = up - worker.DownCount = downCount - - // Append the worker to the slice - workers = append(workers, worker) - } - - // Check for errors from iterating over rows - if err := rows.Err(); err != nil { - if debug { - log.Println("DB Error DBworkers: ", err) - } - return workers, err +func GetWorkerDownCount(db *sql.DB, name string, verbose, debug bool) (int, error) { + var dc int + if err := db.QueryRow("SELECT downCount FROM worker WHERE name = ?", name).Scan(&dc); err != nil { + return 0, err } - - return workers, nil + return dc, nil } -// GetWorkerCount get workers downCount by name (used to downCount until 3 to set down) -func GetWorkerDownCount(db *sql.DB, worker *globalstructs.Worker, verbose, debug bool) (int, error) { - var countS string - err := db.QueryRow("SELECT downCount FROM worker WHERE name = ?", - worker.Name).Scan(&countS) - if err != nil { - if debug { - log.Println("DB Error DBworkers: ", err) - } - return -1, err - } - downCount, err := strconv.Atoi(countS) +func SetWorkerDownCount(db *sql.DB, name string, count int, verbose, debug bool) error { + const q = `UPDATE worker SET downCount = ?, updatedAt = NOW() WHERE name = ?` + res, err := execWithRetry(db, false, q, count, name) if err != nil { - return -1, err + return fmt.Errorf("SetWorkerDownCount: %w", err) } - - if debug { - log.Println("DB count worker:", worker.Name, "downCount:", downCount) + if n, _ := res.RowsAffected(); n == 0 { + return fmt.Errorf("SetWorkerDownCount: worker %s not found", name) } - return downCount, nil + return nil } -// SetWorkerCount set worker downCount to downCount int -func SetWorkerDownCount(count int, db *sql.DB, worker *globalstructs.Worker, verbose, debug bool, wg *sync.WaitGroup) error { - // Add to the WaitGroup when the goroutine starts and done when exits - defer wg.Done() - wg.Add(1) - _, err := db.Exec("UPDATE worker SET downCount = ? WHERE name = ?", - count, worker.Name) +func AddWorkerDownCount(db *sql.DB, name string, verbose, debug bool) error { + const q = `UPDATE worker SET downCount = downCount + 1, updatedAt = NOW() WHERE name = ?` + res, err := execWithRetry(db, false, q, name) if err != nil { - if debug { - log.Println("DB Error DBworkers: ", err) - } - return err + return fmt.Errorf("AddWorkerDownCount: %w", err) + } + if n, _ := res.RowsAffected(); n == 0 { + return fmt.Errorf("AddWorkerDownCount: worker %s not found", name) } - return nil } -// AddWorkerCount add 1 to worker downCount -func AddWorkerDownCount(db *sql.DB, worker *globalstructs.Worker, verbose, debug bool, wg *sync.WaitGroup) error { - // Add to the WaitGroup when the goroutine starts and done when exits - defer wg.Done() - wg.Add(1) - _, err := db.Exec("UPDATE worker SET downCount = downCount + 1 WHERE name = ?", - worker.Name) - if err != nil { - if debug { - log.Println("DB Error DBworkers: ", err) - } - return err - } +// ------------------------------------------------------------------------- +// Simple aggregates +// ------------------------------------------------------------------------- - return nil +func GetUpCount(db *sql.DB, verbose, debug bool) (int, error) { + return getBoolCount(db, true) } -func GetUpCount(db *sql.DB, verbose, debug bool) (int, error) { - // Prepare the SQL query - query := "SELECT COUNT(*) FROM worker where up = true" +func GetDownCount(db *sql.DB, verbose, debug bool) (int, error) { + return getBoolCount(db, false) +} - // Execute the query - var count int - err := db.QueryRow(query).Scan(&count) - if err != nil { +func getBoolCount(db *sql.DB, up bool) (int, error) { + query := "SELECT COUNT(*) FROM worker WHERE up = ?" + var c int + if err := db.QueryRow(query, up).Scan(&c); err != nil { return 0, err } - - return count, nil + return c, nil } -func GetDownCount(db *sql.DB, verbose, debug bool) (int, error) { - // Prepare the SQL query - query := "SELECT COUNT(*) FROM worker where up = false" +// ------------------------------------------------------------------------- +// Row‑mapper +// ------------------------------------------------------------------------- - // Execute the query - var count int - err := db.QueryRow(query).Scan(&count) +func getWorkerSQL(sqlStr string, db *sql.DB, verbose, debug bool, args ...interface{}) ([]globalstructs.Worker, error) { + rows, err := db.Query(sqlStr, args...) if err != nil { - return 0, err + if debug { + log.Println("getWorkerSQL query error:", err) + } + return nil, err } + defer rows.Close() - return count, nil + var workers []globalstructs.Worker + for rows.Next() { + var w globalstructs.Worker + if err = rows.Scan(&w.Name, &w.DefaultThreads, &w.IddleThreads, &w.UP, &w.DownCount, &w.UpdatedAt); err != nil { + return nil, err + } + workers = append(workers, w) + } + return workers, rows.Err() } diff --git a/manager/manager.go b/manager/manager.go index 2fdc5cc..8a8e24d 100644 --- a/manager/manager.go +++ b/manager/manager.go @@ -1,4 +1,4 @@ -// manager.go +// Package manager with manager main data package manager import ( @@ -17,16 +17,17 @@ import ( "github.com/gorilla/mux" "github.com/gorilla/websocket" "github.com/r4ulcl/nTask/manager/api" + "github.com/r4ulcl/nTask/manager/cloud" "github.com/r4ulcl/nTask/manager/database" sshtunnel "github.com/r4ulcl/nTask/manager/sshTunnel" "github.com/r4ulcl/nTask/manager/utils" httpSwagger "github.com/swaggo/http-swagger" ) -func loadManagerConfig(filename string, verbose, debug bool) (*utils.ManagerConfig, error) { - var config utils.ManagerConfig - if debug { - log.Println("Manager Loading manager config from file", filename) +// Helper function to load and parse JSON config files +func loadConfigFile[T any](filename string, verbose, debug bool, configType string) (*T, error) { + if debug || verbose { + log.Printf("Manager Loading %s config from file: %s", configType, filename) } // Validate filename @@ -36,7 +37,7 @@ func loadManagerConfig(filename string, verbose, debug bool) (*utils.ManagerConf // Check if file exists if _, err := os.Stat(filename); os.IsNotExist(err) { - return nil, fmt.Errorf("config file does not exist") + return nil, fmt.Errorf("config file does not exist: %s", filename) } content, err := os.ReadFile(filename) @@ -44,89 +45,77 @@ func loadManagerConfig(filename string, verbose, debug bool) (*utils.ManagerConf return nil, err } - // Use specific error message for json.Unmarshal failure + // Unmarshal the content into the generic type T + var config T err = json.Unmarshal(content, &config) if err != nil { - return nil, fmt.Errorf("error unmarshaling JSON: %w", err) + return nil, fmt.Errorf("error unmarshaling JSON for %s: %w", configType, err) } - // init WebSockets map - config.WebSockets = make(map[string]*websocket.Conn) - - // Return nil instead of &config when error occurs return &config, nil } -func loadManagerSSHConfig(filename string, verbose, debug bool) (*utils.ManagerSSHConfig, error) { - var configSSH utils.ManagerSSHConfig - if debug { - log.Println("Manager Loading manager config from file", filename) - } - - // Validate filename - if filename == "" { - return nil, errors.New("filename cannot be empty") - } - - // Check if file exists - if _, err := os.Stat(filename); os.IsNotExist(err) { - return nil, fmt.Errorf("config file does not exist") - } - - content, err := os.ReadFile(filename) +// Specific function to load nTask config +func loadManagerConfig(filename string, verbose, debug bool) (*utils.ManagerConfig, error) { + configFile, err := loadConfigFile[utils.ManagerConfig](filename, verbose, debug, "nTask") if err != nil { return nil, err } + // init WebSockets map + configFile.WebSockets = make(map[string]*websocket.Conn) + return configFile, nil +} - // Use specific error message for json.Unmarshal failure - err = json.Unmarshal(content, &configSSH) - if err != nil { - return nil, fmt.Errorf("error unmarshaling JSON: %w", err) - } +// Specific function to load SSH config +func loadManagerSSHConfig(filename string, verbose, debug bool) (*utils.ManagerSSHConfig, error) { + return loadConfigFile[utils.ManagerSSHConfig](filename, verbose, debug, "SSH") +} - return &configSSH, nil +// Specific function to load Cloud config +func loadManagerCloudConfig(filename string, verbose, debug bool) (*utils.ManagerCloudConfig, error) { + return loadConfigFile[utils.ManagerCloudConfig](filename, verbose, debug, "Cloud") } -func addHandleWorker(workers *mux.Router, config *utils.ManagerConfig, db *sql.DB, verbose, debug bool, wg *sync.WaitGroup, writeLock *sync.Mutex) { +func addHandleWorker(workers *mux.Router, config *utils.ManagerConfig, db *sql.DB, verbose, debug bool, writeLock *sync.Mutex) { // worker workers.HandleFunc("", func(w http.ResponseWriter, r *http.Request) { - api.HandleWorkerGet(w, r, config, db, verbose, debug) + api.HandleWorkerGet(w, r, db, verbose, debug) }).Methods("GET") // get workers workers.HandleFunc("", func(w http.ResponseWriter, r *http.Request) { - api.HandleWorkerPost(w, r, config, db, verbose, debug, wg) + api.HandleWorkerPost(w, r, db, verbose, debug) }).Methods("POST") // add worker workers.HandleFunc("/websocket", func(w http.ResponseWriter, r *http.Request) { - api.HandleWorkerPostWebsocket(w, r, config, db, verbose, debug, wg, writeLock) + api.HandleWorkerPostWebsocket(w, r, config, db, verbose, debug, writeLock) }) workers.HandleFunc("/{NAME}", func(w http.ResponseWriter, r *http.Request) { - api.HandleWorkerDeleteName(w, r, config, db, verbose, debug, wg) + api.HandleWorkerDeleteName(w, r, db, verbose, debug) }).Methods("DELETE") // delete worker workers.HandleFunc("/{NAME}", func(w http.ResponseWriter, r *http.Request) { - api.HandleWorkerStatus(w, r, config, db, verbose, debug) + api.HandleWorkerStatus(w, r, db, verbose, debug) }).Methods("GET") // check status 1 worker } -func addHandleTask(task *mux.Router, config *utils.ManagerConfig, db *sql.DB, verbose, debug bool, wg *sync.WaitGroup, writeLock *sync.Mutex) { +func addHandleTask(task *mux.Router, config *utils.ManagerConfig, db *sql.DB, verbose, debug bool, writeLock *sync.Mutex) { // task task.HandleFunc("", func(w http.ResponseWriter, r *http.Request) { - api.HandleTaskGet(w, r, config, db, verbose, debug) + api.HandleTaskGet(w, r, db, verbose, debug) }).Methods("GET") // check tasks task.HandleFunc("", func(w http.ResponseWriter, r *http.Request) { - api.HandleTaskPost(w, r, config, db, verbose, debug, wg) + api.HandleTaskPost(w, r, db, verbose, debug) }).Methods("POST") // Add task task.HandleFunc("/{ID}", func(w http.ResponseWriter, r *http.Request) { - api.HandleTaskDelete(w, r, config, db, verbose, debug, wg, writeLock) + api.HandleTaskDelete(w, r, config, db, verbose, debug, writeLock) }).Methods("DELETE") // Delete task task.HandleFunc("/{ID}", func(w http.ResponseWriter, r *http.Request) { - api.HandleTaskStatus(w, r, config, db, verbose, debug) + api.HandleTaskStatus(w, r, db, verbose, debug) }).Methods("GET") // get status task } @@ -143,153 +132,274 @@ func startSwaggerWeb(router *mux.Router, verbose, debug bool) { http.ServeFile(w, r, "docs/swagger.json") }).Methods("GET") - if verbose { + if verbose || debug { log.Println("Manager Configure swagger docs in /swagger/") } } -func StartManager(swagger bool, configFile, configSSHFile string, verifyAltName, verbose, debug bool) { +// StartManager main function to start manager +// StartManager initializes and starts the manager application +func StartManager(swagger bool, configFile, configSSHFile, configCloudFile string, verifyAltName, verbose, debug bool) { log.Println("Manager Running as manager...") - // if config file empty set default + var writeLock sync.Mutex + + // Load configurations + config, err := loadManagerConfigurations(configFile, verbose, debug) + if err != nil { + log.Println("Error loadManagerConfigurations") + return + } + configSSH, err := loadSSHConfiguration(configSSHFile, verbose, debug) + if err != nil { + log.Println("Error loadSSHConfiguration") + } + configCloud, err := loadCloudConfiguration(configCloudFile, verbose, debug) + if err != nil { + log.Println("Error loadCloudConfiguration") + } + + // Connect to database + db := connectToDatabase(config, debug) + defer db.Close() + + // Handle initial task status updates + setInitialTaskStatus(db, verbose, debug) + + // Initialize HTTP client + if config != nil { + initializeHTTPClient(config, verifyAltName, verbose, debug) + startBackgroundTask(db, config, &writeLock, verbose, debug) + setupAndStartServers(swagger, config, db, &writeLock, verbose, debug) + } + + // Start SSH background task + if configSSH != nil { + startSSHBackgroundTask(configSSH, config, verbose, debug) + } + + if configCloud != nil { + processCloudConfiguration(configCloud, configSSH, verbose, debug) + } +} + +func loadManagerConfigurations(configFile string, verbose, debug bool) (*utils.ManagerConfig, error) { if configFile == "" { configFile = "manager.conf" } config, err := loadManagerConfig(configFile, verbose, debug) if err != nil { - log.Fatal("Error loading config file: ", err) + return nil, fmt.Errorf("Error loading config file") } - var configSSH *utils.ManagerSSHConfig - if configSSHFile != "" { - configSSH, err = loadManagerSSHConfig(configSSHFile, verbose, debug) - if err != nil { - log.Fatal("Error loading config SSH file: ", err) - } + // Load default values + if config.APIIdleTimeout <= 0 { + config.APIIdleTimeout = 60 + } + if config.APIReadTimeout <= 0 { + config.APIIdleTimeout = 60 + } + if config.APIWriteTimeout <= 0 { + config.APIIdleTimeout = 60 + } + if config.APIIdleTimeout <= 0 { + config.APIIdleTimeout = 60 + } + if config.HTTPPort <= 0 { + config.APIIdleTimeout = 8080 + } + if config.HTTPSPort <= 0 { + config.APIIdleTimeout = 8443 + } + if config.StatusCheckSeconds <= 0 { + config.StatusCheckSeconds = 10 + } + if config.StatusCheckDown <= 0 { + config.StatusCheckDown = 360 } - // create waitGroups for DB - var wg sync.WaitGroup - var writeLock sync.Mutex + if config.MaxTaskHistory < 0 { + config.MaxTaskHistory = 0 + } + + return config, nil +} + +func loadSSHConfiguration(configSSHFile string, verbose, debug bool) (*utils.ManagerSSHConfig, error) { + if configSSHFile == "" { + return nil, fmt.Errorf("no config SSH file configured") + } + + configSSH, err := loadManagerSSHConfig(configSSHFile, verbose, debug) + if err != nil { + return nil, fmt.Errorf("Error loading config SSH file") + } + return configSSH, nil +} + +func loadCloudConfiguration(configCloudFile string, verbose, debug bool) (*utils.ManagerCloudConfig, error) { + if configCloudFile == "" { + return nil, fmt.Errorf("no config Cloud file configured") + } - // Start DB + configCloud, err := loadManagerCloudConfig(configCloudFile, verbose, debug) + if err != nil { + return nil, fmt.Errorf("Error loading config Cloud file") + + } + + return configCloud, nil +} + +func processCloudConfiguration(configCloud *utils.ManagerCloudConfig, configSSH *utils.ManagerSSHConfig, verbose, debug bool) error { + switch configCloud.Provider { + case "digitalocean": + go cloud.ProcessDigitalOcean(configCloud, configSSH, verbose, debug) + default: + log.Fatal("Error: Unsupported cloud provider") + } + return nil +} + +func connectToDatabase(config *utils.ManagerConfig, debug bool) *sql.DB { var db *sql.DB + var err error + for { if debug { log.Println("Manager Trying to connect to DB") } - db, err = database.ConnectDB(config.DBUsername, config.DBPassword, config.DBHost, config.DBPort, config.DBDatabase, verbose, debug) + db, err = database.ConnectDB(config.DBUsername, config.DBPassword, config.DBHost, config.DBPort, config.DBDatabase, false, debug) if err != nil { - log.Println("Error manager ConnectDB: ", err) - if db != nil { - defer db.Close() - } - time.Sleep(time.Second * 5) + log.Printf("Error connecting to DB: %v", err) + time.Sleep(5 * time.Second) } else { break } } + return db +} - // if running set to failed +func setInitialTaskStatus(db *sql.DB, verbose, debug bool) { if debug { - log.Println("Manager Set task running to failed") + log.Println("Manager Setting tasks with running status to failed") } - err = database.SetTasksStatusIfRunning(db, "failed", verbose, debug, &wg) - if err != nil { - log.Println("Error SetTasksStatusIfRunning:", err) - return + // if the manager app restarst, set al running to pending to launch again + if err := database.SetTasksStatusIfStatus("running", db, "pending", verbose, debug); err != nil { + log.Printf("Error setting task statuses: %v", err) } - // Create an HTTP client with the custom TLS configuration +} + +func initializeHTTPClient(config *utils.ManagerConfig, verifyAltName, verbose, debug bool) { + var err error if config.CertFolder != "" { - clientHTTP, err := utils.CreateTLSClientWithCACert(config.CertFolder+"/ca-cert.pem", verifyAltName, verbose, debug) + config.ClientHTTP, err = utils.CreateTLSClientWithCACert(config.CertFolder+"/ca-cert.pem", verifyAltName, verbose, debug) if err != nil { - log.Println("Error creating HTTP client:", err) + log.Printf("Error creating HTTP client: %v", err) return } - config.ClientHTTP = clientHTTP - } else { config.ClientHTTP = &http.Client{} } config.ClientHTTP.Timeout = 5 * time.Second +} - // verify status workers infinite - go utils.VerifyWorkersLoop(db, config, verbose, debug, &wg, &writeLock) - - // manage task, routine to send task to iddle workers - go utils.ManageTasks(config, db, verbose, debug, &wg, &writeLock) - - if configSSHFile != "" { - go sshtunnel.StartSSH(configSSH, config.Port, verbose, debug) - } +func startSSHBackgroundTask(configSSH *utils.ManagerSSHConfig, config *utils.ManagerConfig, verbose, debug bool) { + go sshtunnel.StartSSH(configSSH, config.HTTPPort, config.HTTPSPort, verbose, debug) +} +func startBackgroundTask(db *sql.DB, config *utils.ManagerConfig, writeLock *sync.Mutex, verbose, debug bool) { + go utils.VerifyWorkersLoop(db, config, verbose, debug, writeLock) + go utils.ManageTasks(config, db, verbose, debug, writeLock) + go utils.DeleteMaxTaskHistoryLoop(db, config, verbose, debug) +} +func setupAndStartServers(swagger bool, config *utils.ManagerConfig, db *sql.DB, writeLock *sync.Mutex, verbose, debug bool) { router := mux.NewRouter() - - amw := authenticationMiddleware{tokenUsers: make(map[string]string), tokenWorkers: make(map[string]string)} + amw := authenticationMiddleware{ + tokenUsers: make(map[string]string), + tokenWorkers: make(map[string]string), + } amw.Populate(config) if swagger { - // Start swagger endpoint startSwaggerWeb(router, verbose, debug) } - // r.HandleFunc("/send/{recipient}", handleSendMessage).Methods("POST") + // Set up routes + setupRoutes(router, config, db, verbose, debug, writeLock, amw) + + // Start servers + startServers(router, config, verbose, debug) +} - // Status +func setupRoutes(router *mux.Router, config *utils.ManagerConfig, db *sql.DB, verbose, debug bool, writeLock *sync.Mutex, amw authenticationMiddleware) { status := router.PathPrefix("/status").Subrouter() status.Use(amw.Middleware) status.HandleFunc("", func(w http.ResponseWriter, r *http.Request) { - api.HandleStatus(w, r, config, db, verbose, debug) - }).Methods("GET") // get callback info from task + api.HandleStatus(w, r, db, verbose, debug) + }).Methods("GET") - // Worker workers := router.PathPrefix("/worker").Subrouter() workers.Use(amw.Middleware) - addHandleWorker(workers, config, db, verbose, debug, &wg, &writeLock) + addHandleWorker(workers, config, db, verbose, debug, writeLock) - // Task task := router.PathPrefix("/task").Subrouter() task.Use(amw.Middleware) - addHandleTask(task, config, db, verbose, debug, &wg, &writeLock) + addHandleTask(task, config, db, verbose, debug, writeLock) - // Middleware to modify server response headers router.Use(func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // Modify the server response headers here w.Header().Set("Server", "Apache") - - // Call the next handler next.ServeHTTP(w, r) }) }) - //router.Use(amw.Middleware) - - http.Handle("/", router) +} - // Set string for the port - addr := fmt.Sprintf(":%s", config.Port) - if verbose { - log.Println("Manager Port", config.Port) - } +func startServers(router *mux.Router, config *utils.ManagerConfig, verbose, debug bool) { + var wgServer sync.WaitGroup - // if there is cert is HTTPS - if config.CertFolder != "" { - log.Fatal(http.ListenAndServeTLS(addr, config.CertFolder+"/cert.pem", config.CertFolder+"/key.pem", router)) - } - - err = http.ListenAndServe(addr, nil) - if err != nil { - log.Println("Manager Error manager CertFolder: ", err) + if config.CertFolder != "" && config.HTTPSPort > 0 { + httpsAddr := fmt.Sprintf(":%d", config.HTTPSPort) + if verbose || debug { + log.Printf("Starting HTTPS server on port %d", config.HTTPSPort) + } + httpsServer := &http.Server{ + Addr: httpsAddr, + Handler: router, + ReadTimeout: time.Duration(config.APIReadTimeout) * time.Second, + WriteTimeout: time.Duration(config.APIWriteTimeout) * time.Second, + IdleTimeout: time.Duration(config.APIIdleTimeout) * time.Second, + } + go func() { + if err := httpsServer.ListenAndServeTLS(config.CertFolder+"/cert.pem", config.CertFolder+"/key.pem"); err != nil { + log.Fatalf("Error starting HTTPS server: %v", err) + } + }() + wgServer.Add(1) } - /* - err = http.ListenAndServe(":"+config.Port, allowCORS(http.DefaultServeMux)) - if err != nil { - log.Println("Manager Error manager: ",err) + if config.HTTPPort > 0 { + httpAddr := fmt.Sprintf(":%d", config.HTTPPort) + if verbose || debug { + log.Printf("Starting HTTP server on port %d", config.HTTPPort) } - */ + httpServer := &http.Server{ + Addr: httpAddr, + Handler: router, + ReadTimeout: time.Duration(config.APIReadTimeout) * time.Second, + WriteTimeout: time.Duration(config.APIWriteTimeout) * time.Second, + IdleTimeout: time.Duration(config.APIIdleTimeout) * time.Second, + } + go func() { + if err := httpServer.ListenAndServe(); err != nil { + log.Fatalf("Error starting HTTP server: %v", err) + } + }() + wgServer.Add(1) + } + wgServer.Wait() } // Define our struct @@ -319,14 +429,14 @@ func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler if foundUser { // We found the token in our map // Add the username to the request context - ctx := context.WithValue(r.Context(), "username", user) + ctx := context.WithValue(r.Context(), utils.UsernameKey, user) // Pass down the request with the updated context to the next middleware (or final handler) next.ServeHTTP(w, r.WithContext(ctx)) } else if foundWorker { // We found the token in our map // Add the username to the request context - ctx := context.WithValue(r.Context(), "worker", worker) + ctx := context.WithValue(r.Context(), utils.WorkerKey, worker) // Pass down the request with the updated context to the next middleware (or final handler) next.ServeHTTP(w, r.WithContext(ctx)) diff --git a/manager/sshTunnel/sshTunnel.go b/manager/sshTunnel/sshTunnel.go index 40d3756..ad87924 100644 --- a/manager/sshTunnel/sshTunnel.go +++ b/manager/sshTunnel/sshTunnel.go @@ -7,6 +7,8 @@ import ( "log" "net" "os" + "strconv" + "time" "github.com/r4ulcl/nTask/manager/utils" "golang.org/x/crypto/ssh" @@ -18,8 +20,14 @@ func forwardData(src, dest net.Conn) { log.Printf("Error forwarding data: %v", err) } - src.Close() - dest.Close() + err = src.Close() + if err != nil { + log.Printf("Error closing src: %v", err) + } + err = dest.Close() + if err != nil { + log.Printf("Error closing dest: %v", err) + } } func publicKeyFile(file string) (ssh.AuthMethod, error) { @@ -35,86 +43,123 @@ func publicKeyFile(file string) (ssh.AuthMethod, error) { return ssh.PublicKeys(key), nil } -func StartSSH(config *utils.ManagerSSHConfig, portAPI string, verbose, debug bool) { - log.Println("SSH StartSSH") - - for ip, port := range config.IPPort { - go func(ip, port string) { - log.Println("SSH connecction", ip, port) +// Maintain a map of active SSH connections +var activeConnections = make(map[string]*ssh.Client) - if !checkFileExists(config.PrivateKeyPath) { - log.Fatal("File ", config.PrivateKeyPath, " not found") - } - - auth, err := publicKeyFile(config.PrivateKeyPath) - if err != nil { - log.Fatal("Error loading file ", config.PrivateKeyPath, err) - } - - // SSH connection configuration - sshConfig := &ssh.ClientConfig{ - User: config.SSHUsername, - Auth: []ssh.AuthMethod{ - auth, - }, - HostKeyCallback: ssh.InsecureIgnoreHostKey(), - } - - // If a password is provided, add it as an additional authentication method - if config.PrivateKeyPassword != "" { - sshConfig.Auth = append(sshConfig.Auth, ssh.Password(config.PrivateKeyPassword)) +// StartSSH main function to startSSH +func StartSSH(config *utils.ManagerSSHConfig, httpPort, httpsPort int, verbose, debug bool) { + log.Println("SSH StartSSH") + for { + for ip, port := range config.IPPort { + // Create a key for the activeConnections map + connectionKey := fmt.Sprintf("%s:%s", ip, port) + + // Check if a connection to the host and port already exists + if _, ok := activeConnections[connectionKey]; ok { + if verbose { + log.Printf("SSH connection to %s already exists", connectionKey) + } + continue } - // Connect to the SSH server - sshClient, err := ssh.Dial("tcp", ip+":"+port, sshConfig) - if err != nil { - log.Fatalf("Failed to dial: %s", err) - } + go func(ip, port string) { + log.Println("SSH connection", ip, port) - // Remote port to forward - remoteAddr := "127.0.0.1:" + portAPI - // Local address to forward to - localAddr := "127.0.0.1:" + portAPI + if !checkFileExists(config.PrivateKeyPath) { + log.Fatal("File ", config.PrivateKeyPath, " not found") + } - if debug { - log.Println("SSH remoteAddr", remoteAddr) - } + auth, err := publicKeyFile(config.PrivateKeyPath) + if err != nil { + log.Fatal("Error loading file ", config.PrivateKeyPath, err) + } - // Request remote port forwarding - remoteListener, err := sshClient.Listen("tcp", remoteAddr) - if err != nil { - log.Fatalf("Failed to request remote port forwarding: %v", err) - } - defer remoteListener.Close() + // SSH connection configuration + sshConfig := &ssh.ClientConfig{ + User: config.SSHUsername, + Auth: []ssh.AuthMethod{ + auth, + }, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), + } - fmt.Printf("Remote port forwarding %s to %s via SSH...\n", remoteAddr, localAddr) + // If a password is provided, add it as an additional authentication method + if config.PrivateKeyPassword != "" { + sshConfig.Auth = append(sshConfig.Auth, ssh.Password(config.PrivateKeyPassword)) + } - for { - // Wait for a connection on the remote port - remoteConn, err := remoteListener.Accept() + // Connect to the SSH server + sshClient, err := ssh.Dial("tcp", ip+":"+port, sshConfig) if err != nil { - log.Fatalf("Failed to accept connection on remote port: %v", err) + log.Printf("Failed to dial: %s", err) + return } - // Connect to the local server - localConn, err := net.Dial("tcp", localAddr) - if err != nil { - log.Printf("Failed to connect to local server: %v", err) - remoteConn.Close() - continue + // Add the connection to the activeConnections map + activeConnections[connectionKey] = sshClient + + // Port forwarding for HTTP and HTTPS + forwardPort := func(localPort, remotePort int) { + remoteAddr := "127.0.0.1:" + strconv.Itoa(remotePort) + localAddr := "127.0.0.1:" + strconv.Itoa(localPort) + + if debug { + log.Printf("SSH forwarding remoteAddr: %s to localAddr: %s", remoteAddr, localAddr) + } + + // Request remote port forwarding + remoteListener, err := sshClient.Listen("tcp", remoteAddr) + if err != nil { + log.Printf("Failed to request remote port forwarding: %v", err) + // Remove the connection from the activeConnections map on failure + delete(activeConnections, connectionKey) + return + } + defer remoteListener.Close() + + fmt.Printf("Remote port forwarding %s to %s via SSH...\n", remoteAddr, localAddr) + + for { + // Wait for a connection on the remote port + remoteConn, err := remoteListener.Accept() + if err != nil { + log.Printf("Failed to accept connection on remote port: %v", err) + // Remove the connection from the activeConnections map on failure + delete(activeConnections, connectionKey) + return + } + + // Connect to the local server + localConn, err := net.Dial("tcp", localAddr) + if err != nil { + log.Printf("Failed to connect to local server: %v", err) + err := remoteConn.Close() + if err != nil { + log.Printf("Failed closing remote port forwarding: %v", err) + } + continue + } + + // Start forwarding data between local and remote connections + go forwardData(remoteConn, localConn) + go forwardData(localConn, remoteConn) + } } - // Start forwarding data between local and remote connections - go forwardData(remoteConn, localConn) - go forwardData(localConn, remoteConn) - } - }(ip, port) + // Start forwarding for HTTP + go forwardPort(httpPort, httpPort) + + // Start forwarding for HTTPS + go forwardPort(httpsPort, httpsPort) + }(ip, port) + } + time.Sleep(time.Second * 60) } } // ssh-keygen -t rsa -b 2048 func checkFileExists(filePath string) bool { - _, error := os.Stat(filePath) + _, err := os.Stat(filePath) //return !os.IsNotExist(err) - return !errors.Is(error, os.ErrNotExist) + return !errors.Is(err, os.ErrNotExist) } diff --git a/manager/utils/disk.go b/manager/utils/disk.go index 6fb170f..341ec77 100644 --- a/manager/utils/disk.go +++ b/manager/utils/disk.go @@ -8,6 +8,7 @@ import ( "github.com/r4ulcl/nTask/globalstructs" ) +// SaveTaskToDisk Save Task To Disk func SaveTaskToDisk(task globalstructs.Task, path string, verbose, debug bool) error { // Convert the struct to JSON format jsonData, err := json.MarshalIndent(task, "", " ") @@ -26,7 +27,7 @@ func SaveTaskToDisk(task globalstructs.Task, path string, verbose, debug bool) e filePath := path // Open the file for writing - file, err := os.OpenFile(filePath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644) + file, err := os.OpenFile(filePath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0600) if err != nil { if verbose { log.Println("Error creating file:", err) diff --git a/manager/utils/handleErrors.go b/manager/utils/handleErrors.go new file mode 100644 index 0000000..9fd235a --- /dev/null +++ b/manager/utils/handleErrors.go @@ -0,0 +1,40 @@ +package utils + +import ( + "database/sql" + + "github.com/r4ulcl/nTask/globalstructs" + "github.com/r4ulcl/nTask/manager/database" + + "github.com/go-sql-driver/mysql" +) + +// HandleAddWorkerError func to handle error adding workers +func HandleAddWorkerError(err error, db *sql.DB, worker *globalstructs.Worker, verbose, debug bool) error { + if mysqlErr, ok := err.(*mysql.MySQLError); ok { + // Handle the MySQL duplicate entry error + if mysqlErr.Number == 1062 { // MySQL error number for duplicate entry + // Set all worker tasks to 'pending' with REDO status + err = database.SetTasksWorkerPending(db, worker.Name, verbose, debug) + if err != nil { + return err + } + + // Update worker record + err = database.UpdateWorker(db, worker, verbose, debug) + if err != nil { + return err + } + + // Reset the worker's down count + err = database.SetWorkerDownCount(db, worker.Name, 0, verbose, debug) + if err != nil { + return err + } + } + // Return the original error if it's not the duplicate entry error + return err + } + // Return nil if no error + return nil +} diff --git a/manager/utils/manageTasks.go b/manager/utils/manageTasks.go index 2d2ed0f..73602c3 100644 --- a/manager/utils/manageTasks.go +++ b/manager/utils/manageTasks.go @@ -9,11 +9,13 @@ import ( "github.com/r4ulcl/nTask/manager/database" ) -func ManageTasks(config *ManagerConfig, db *sql.DB, verbose, debug bool, wg *sync.WaitGroup, writeLock *sync.Mutex) { +// ManageTasks infinite loop to manage task +func ManageTasks(config *ManagerConfig, db *sql.DB, verbose, debug bool, writeLock *sync.Mutex) { // infinite loop eecuted with go routine for { // Get all tasks in order and if priority - tasks, err := database.GetTasksPending(100, db, verbose, debug) + workersThreads := getWorkersThreads(db, verbose, debug) + tasks, err := database.GetTasksPending(workersThreads, db, verbose, debug) if err != nil { log.Println(err.Error()) } @@ -35,9 +37,9 @@ func ManageTasks(config *ManagerConfig, db *sql.DB, verbose, debug bool, wg *syn for _, worker := range workers { // if WorkerName not send or set this worker, just sendAddTask if task.WorkerName == "" || task.WorkerName == worker.Name { - err = SendAddTask(db, config, &worker, &task, verbose, debug, wg, writeLock) + err = sendAddTask(db, config, &worker, &task, verbose, debug, writeLock) if err != nil { - log.Println("Utils Error SendAddTask", err.Error()) + log.Println("Utils Error sendAddTask", err.Error()) //time.Sleep(time.Second * 1) break } @@ -56,6 +58,8 @@ func ManageTasks(config *ManagerConfig, db *sql.DB, verbose, debug bool, wg *syn } else { if len(tasks) == 0 { time.Sleep(time.Second * 1) + } else if len(workers) == 0 { + time.Sleep(time.Millisecond * 500) } } } diff --git a/manager/utils/stats.go b/manager/utils/stats.go index 23c4794..36819f5 100644 --- a/manager/utils/stats.go +++ b/manager/utils/stats.go @@ -6,6 +6,7 @@ import ( "github.com/r4ulcl/nTask/manager/database" ) +// GetStatusTask function to get task status, pending, running, etc func GetStatusTask(db *sql.DB, verbose, debug bool) (StatusTask, error) { task := StatusTask{ Pending: 0, @@ -15,31 +16,31 @@ func GetStatusTask(db *sql.DB, verbose, debug bool) (StatusTask, error) { Deleted: 0, } - pending, err := database.GetPendingCount(db, verbose, debug) + pending, err := database.GetCountByStatus("pending", db, verbose, debug) if err != nil { return task, err } task.Pending = pending - running, err := database.GetRunningCount(db, verbose, debug) + running, err := database.GetCountByStatus("running", db, verbose, debug) if err != nil { return task, err } task.Running = running - done, err := database.GetDoneCount(db, verbose, debug) + done, err := database.GetCountByStatus("done", db, verbose, debug) if err != nil { return task, err } task.Done = done - failed, err := database.GetFailedCount(db, verbose, debug) + failed, err := database.GetCountByStatus("failed", db, verbose, debug) if err != nil { return task, err } task.Failed = failed - deleted, err := database.GetDeletedCount(db, verbose, debug) + deleted, err := database.GetCountByStatus("deleted", db, verbose, debug) if err != nil { return task, err } @@ -48,6 +49,7 @@ func GetStatusTask(db *sql.DB, verbose, debug bool) (StatusTask, error) { return task, nil } +// GetStatusWorker func to get status up, down of workers func GetStatusWorker(db *sql.DB, verbose, debug bool) (StatusWorker, error) { worker := StatusWorker{ Up: 0, diff --git a/manager/utils/structs.go b/manager/utils/structs.go index 8f2f1a1..60115a0 100644 --- a/manager/utils/structs.go +++ b/manager/utils/structs.go @@ -6,10 +6,15 @@ import ( "github.com/gorilla/websocket" ) +// ManagerConfig manager config file struct type ManagerConfig struct { Users map[string]string `json:"users"` Workers map[string]string `json:"workers"` - Port string `json:"port"` + HTTPPort int `json:"httpPort"` + HTTPSPort int `json:"httpsPort"` + APIReadTimeout int `json:"apiReadTimeout"` + APIWriteTimeout int `json:"apiWriteTimeout"` + APIIdleTimeout int `json:"apiIdleTimeout"` DBUsername string `json:"dbUsername"` DBPassword string `json:"dbPassword"` DBHost string `json:"dbHost"` @@ -21,8 +26,10 @@ type ManagerConfig struct { CertFolder string `json:"certFolder"` ClientHTTP *http.Client `json:"clientHTTP"` WebSockets map[string]*websocket.Conn `json:"webSockets"` + MaxTaskHistory int `json:"maxTaskHistory"` } +// ManagerSSHConfig manager SSH config struct type ManagerSSHConfig struct { IPPort map[string]string `json:"ipPort"` SSHUsername string `json:"sshUsername"` @@ -30,11 +37,27 @@ type ManagerSSHConfig struct { PrivateKeyPassword string `json:"privateKeyPassword"` } +// ManagerCloudConfig manager cloud config struct +// https://slugs.do-api.dev/ +type ManagerCloudConfig struct { + Provider string `json:"provider"` + APIKey string `json:"apiKey"` + SnapshotName string `json:"snapshotName"` + Servers int `json:"servers"` + Region string `json:"region"` + Size string `json:"size"` + SSHKeys string `json:"sshKeys"` + SSHPort int `json:"sshPort"` + Recreate bool `json:"recreate"` +} + +// Status General status struct type Status struct { Task StatusTask `json:"task"` Worker StatusWorker `json:"worker"` } +// StatusTask task status struct type StatusTask struct { Pending int `json:"pending"` Running int `json:"running"` @@ -43,7 +66,17 @@ type StatusTask struct { Deleted int `json:"deleted"` } +// StatusWorker worker status struct type StatusWorker struct { Up int `json:"up"` Down int `json:"down"` } + +// API username middleware +type contextKey string + +// UsernameKey key to get username in API +const UsernameKey contextKey = "username" + +// WorkerKey key to get worker in API +const WorkerKey contextKey = "worker" diff --git a/manager/utils/workerRequest.go b/manager/utils/workerRequest.go index 8409ae9..2dc43d7 100644 --- a/manager/utils/workerRequest.go +++ b/manager/utils/workerRequest.go @@ -9,6 +9,7 @@ import ( "log" "net/http" "os" + "strings" "sync" "time" @@ -17,20 +18,16 @@ import ( "github.com/r4ulcl/nTask/manager/database" ) +// SendMessage Function to send message in a websocket func SendMessage(conn *websocket.Conn, message []byte, verbose, debug bool, writeLock *sync.Mutex) error { writeLock.Lock() defer writeLock.Unlock() if debug { log.Println("Utils SendMessage", string(message)) } - // check if websocket alive - err := conn.WriteControl(websocket.PingMessage, nil, time.Now().Add(5*time.Second)) - if err != nil { - log.Println("Utils Error in websocket", string(message)) - return err - } - - err = conn.WriteMessage(websocket.TextMessage, message) + writeTimeout := 10 * time.Second + conn.SetWriteDeadline(time.Now().Add(writeTimeout)) + err := conn.WriteMessage(websocket.TextMessage, message) if err != nil { return err } @@ -41,24 +38,62 @@ func SendMessage(conn *websocket.Conn, message []byte, verbose, debug bool, writ } // VerifyWorkersLoop checks and sets if the workers are UP infinitely. -func VerifyWorkersLoop(db *sql.DB, config *ManagerConfig, verbose, debug bool, wg *sync.WaitGroup, writeLock *sync.Mutex) { - for { - go verifyWorkers(db, config, verbose, debug, wg, writeLock) - time.Sleep(time.Duration(config.StatusCheckSeconds) * time.Second) +func VerifyWorkersLoop(db *sql.DB, config *ManagerConfig, verbose, debug bool, writeLock *sync.Mutex) { + ticker := time.NewTicker(time.Duration(config.StatusCheckSeconds) * time.Second) + defer ticker.Stop() + for range ticker.C { + verifyWorkers(db, config, verbose, debug, writeLock) + } +} + +// DeleteMaxTaskHistoryLoop Loop and Delete Database Entries if num tasks > config.MaxTaskHistory +func DeleteMaxTaskHistoryLoop(db *sql.DB, config *ManagerConfig, verbose, debug bool) { + maxEntries := config.MaxTaskHistory + tableName := "task" + if maxEntries > 0 { + for { + err := database.DeleteMaxEntriesHistory(db, maxEntries, tableName, verbose, debug) + if err != nil && (verbose || debug) { + log.Println("Error DeleteMaxEntriesHistory:", err) + } + time.Sleep(1 * time.Hour) + } } } +// getWorkersThreads get DefaultThreads of all workers +func getWorkersThreads(db *sql.DB, verbose, debug bool) int { + + workersThreads := 0 + // Get all workers from the database + workers, err := database.GetWorkerUP(db, verbose, debug) + if err != nil { + log.Print("GetWorker", err) + } + + // Verify each worker + for _, worker := range workers { + workersThreads += worker.DefaultThreads + } + + if debug { + log.Println("getWorkersThreads workersThreads", workersThreads) + } + + return workersThreads +} + // verifyWorkers checks and sets if the workers are UP. -func verifyWorkers(db *sql.DB, config *ManagerConfig, verbose, debug bool, wg *sync.WaitGroup, writeLock *sync.Mutex) { - // Get all UP workers from the database +func verifyWorkers(db *sql.DB, config *ManagerConfig, verbose, debug bool, writeLock *sync.Mutex) { + // Get all workers from the database workers, err := database.GetWorkers(db, verbose, debug) if err != nil { - log.Print("GetWorkerUP", err) + log.Print("GetWorker", err) } // Verify each worker for _, worker := range workers { - err := verifyWorker(db, config, &worker, verbose, debug, wg, writeLock) + err := verifyWorker(db, config, &worker, verbose, debug, writeLock) if err != nil { log.Print("verifyWorker ", err) } @@ -66,49 +101,14 @@ func verifyWorkers(db *sql.DB, config *ManagerConfig, verbose, debug bool, wg *s } // verifyWorker checks and sets if the worker is UP. -func verifyWorker(db *sql.DB, config *ManagerConfig, worker *globalstructs.Worker, verbose, debug bool, wg *sync.WaitGroup, writeLock *sync.Mutex) error { +func verifyWorker(db *sql.DB, config *ManagerConfig, worker *globalstructs.Worker, verbose, debug bool, writeLock *sync.Mutex) error { if debug { log.Println("Utils verifyWorker", worker.Name) } + conn := config.WebSockets[worker.Name] if conn == nil { - if debug { - log.Println("Utils Error: The worker doesnt have a websocket", worker.Name) - } - - delete(config.WebSockets, worker.Name) - - err := database.SetWorkerUPto(false, db, worker, verbose, debug, wg) - if err != nil { - return err - } - - downCount, err := database.GetWorkerDownCount(db, worker, verbose, debug) - if err != nil { - return err - } - - if downCount >= config.StatusCheckDown { - if debug { - log.Println("Utils downCount", downCount, " >= config.StatusCheckDown", config.StatusCheckDown) - } - err = database.RmWorkerName(db, worker.Name, verbose, debug, wg) - if err != nil { - return err - } - } else { - err = database.AddWorkerDownCount(db, worker, verbose, debug, wg) - if err != nil { - return err - } - } - - // Set as 'pending' all workers tasks to REDO - err = database.SetTasksWorkerPending(db, worker.Name, verbose, debug, wg) - if err != nil { - return err - } - return nil + return handleMissingWebSocket(worker, db, config, verbose, debug) } msg := globalstructs.WebsocketMessage{ @@ -123,39 +123,68 @@ func verifyWorker(db *sql.DB, config *ManagerConfig, worker *globalstructs.Worke } return err } + return SendMessage(conn, jsonData, verbose, debug, writeLock) +} - err = SendMessage(conn, jsonData, verbose, debug, writeLock) - if err != nil { - if debug { - log.Println("Utils Can't send message, error:", err) - } - err = WorkerDisconnected(db, config, worker, verbose, debug, wg) - if err != nil { +// handleMissingWebSocket marks a worker down (or removes it) when its WS is gone. +// It tolerates “not found” from SetWorkerUPto in case the row was already deleted. +func handleMissingWebSocket( + worker *globalstructs.Worker, + db *sql.DB, + config *ManagerConfig, + verbose, debug bool, +) error { + if debug { + log.Println("Utils Error: no websocket for worker", worker.Name) + } + + // Remove from in-memory map + delete(config.WebSockets, worker.Name) + + // Mark as down + if err := database.SetWorkerUPto(db, worker.Name, false, verbose, debug); err != nil { + // ignore “not found” since it may have been removed already + if !strings.Contains(err.Error(), "not found") { return err } - return err + if debug { + log.Printf("Utils handleMissingWebSocket: %v", err) + } } - err = database.SetWorkerDownCount(0, db, worker, verbose, debug, wg) + // Increment down-count + downCount, err := database.GetWorkerDownCount(db, worker.Name, verbose, debug) if err != nil { return err } + if err := database.AddWorkerDownCount(db, worker.Name, verbose, debug); err != nil { + return err + } - return nil + // If exceeded retries, orphan tasks and delete the worker + if downCount+1 >= config.StatusCheckDown { + if err := database.SetTasksWorkerPending(db, worker.Name, verbose, debug); err != nil { + return err + } + if err := database.RmWorkerName(db, worker.Name, verbose, debug); err != nil { + return err + } + } + return nil } -// SendAddTask sends a request to a worker to add a task. -func SendAddTask(db *sql.DB, config *ManagerConfig, worker *globalstructs.Worker, task *globalstructs.Task, verbose, debug bool, wg *sync.WaitGroup, writeLock *sync.Mutex) error { +// sendAddTask sends a request to a worker to add a task. +func sendAddTask(db *sql.DB, config *ManagerConfig, worker *globalstructs.Worker, task *globalstructs.Task, verbose, debug bool, writeLock *sync.Mutex) error { if debug { - log.Println("Utils SendAddTask") + log.Println("Utils sendAddTask") } - //Sustract 1 Iddle Thread in worker - err := database.SubtractWorkerIddleThreads1(db, worker.Name, verbose, debug, wg) + + // Subtract Iddle thread in DB, in the next status to worker it will update to real data + err := database.SubtractWorkerIddleThreads1(db, worker.Name, verbose, debug) if err != nil { return err } - // add 1 on callback conn := config.WebSockets[worker.Name] if conn == nil { @@ -187,31 +216,24 @@ func SendAddTask(db *sql.DB, config *ManagerConfig, worker *globalstructs.Worker if debug { log.Println("Utils Can't send message, error:", err) } - err = WorkerDisconnected(db, config, worker, verbose, debug, wg) - if err != nil { - return err - } return err } + task.Status = "running" + task.WorkerName = worker.Name + // Set task as running - err = database.SetTaskStatus(db, task.ID, "running", verbose, debug, wg) + err = database.UpdateTask(db, *task, verbose, debug) if err != nil { - log.Println("Utils Error SetTaskStatus in request:", err) + return fmt.Errorf("Utils Error SetTaskStatus in request: %s", err) } // Set task as executed - err = database.SetTaskExecutedAtNow(db, task.ID, verbose, debug, wg) + err = database.SetTaskExecutedAtNow(db, task.ID, verbose, debug) if err != nil { return fmt.Errorf("Error SetTaskExecutedAt in request: %s", err) } - // Set workerName in DB and in object - err = database.SetTaskWorkerName(db, task.ID, worker.Name, verbose, debug, wg) - if err != nil { - return fmt.Errorf("Error SetWorkerNameTask in request: %s", err) - } - if verbose { log.Println("Utils Task send successfully") } @@ -220,7 +242,7 @@ func SendAddTask(db *sql.DB, config *ManagerConfig, worker *globalstructs.Worker } // SendDeleteTask sends a request to a worker to stop and delete a task. -func SendDeleteTask(db *sql.DB, config *ManagerConfig, worker *globalstructs.Worker, task *globalstructs.Task, verbose, debug bool, wg *sync.WaitGroup, writeLock *sync.Mutex) error { +func SendDeleteTask(db *sql.DB, config *ManagerConfig, worker *globalstructs.Worker, task *globalstructs.Task, verbose, debug bool, writeLock *sync.Mutex) error { conn := config.WebSockets[worker.Name] if conn == nil { return fmt.Errorf("Error, websocket not found") @@ -248,19 +270,11 @@ func SendDeleteTask(db *sql.DB, config *ManagerConfig, worker *globalstructs.Wor if debug { log.Println("Utils Can't send message, error:", err) } - err = WorkerDisconnected(db, config, worker, verbose, debug, wg) - if err != nil { - return err - } return err } // Set the task and worker as not working - err = database.SetTaskStatus(db, task.ID, "deleted", verbose, debug, wg) - if err != nil { - return err - } - err = database.SubtractWorkerIddleThreads1(db, worker.Name, verbose, debug, wg) + err = database.SetTaskStatus(db, task.ID, "deleted", verbose, debug) if err != nil { return err } @@ -295,6 +309,7 @@ func CreateTLSClientWithCACert(caCertPath string, verifyAltName, verbose, debug tlsConfig = &tls.Config{ InsecureSkipVerify: true, // Enable server verification RootCAs: certPool, + MinVersion: tls.VersionTLS12, // Minimum version set to TLS 1.2 VerifyPeerCertificate: func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { if len(rawCerts) == 0 { return fmt.Errorf("no certificates provided by the server") @@ -338,23 +353,38 @@ func CreateTLSClientWithCACert(caCertPath string, verifyAltName, verbose, debug return client, nil } -func WorkerDisconnected(db *sql.DB, config *ManagerConfig, worker *globalstructs.Worker, verbose, debug bool, wg *sync.WaitGroup) error { +// WorkerDisconnected is called when a live connection error occurs. +// It closes the socket, marks the worker down, and re-queues its tasks. +// It tolerates “not found” from SetWorkerUPto. +func WorkerDisconnected( + db *sql.DB, + config *ManagerConfig, + worker *globalstructs.Worker, + verbose, debug bool, +) error { if debug { - log.Println("Utils Error: WriteControl cant connect", worker.Name) + log.Println("Utils WorkerDisconnected: closing websocket for", worker.Name) } - // Close connection - config.WebSockets[worker.Name].Close() + // Close the socket if still present + if ws, ok := config.WebSockets[worker.Name]; ok { + ws.Close() + } delete(config.WebSockets, worker.Name) - err := database.SetWorkerUPto(false, db, worker, verbose, debug, wg) - if err != nil { - return err + // Mark as down + if err := database.SetWorkerUPto(db, worker.Name, false, verbose, debug); err != nil { + // ignore “not found” since it may have been removed already + if !strings.Contains(err.Error(), "not found") { + return err + } + if debug { + log.Printf("Utils WorkerDisconnected: %v", err) + } } - // Set as 'pending' all workers tasks to REDO - err = database.SetTasksWorkerPending(db, worker.Name, verbose, debug, wg) - if err != nil { + // Re-queue any in-flight tasks + if err := database.SetTasksWorkerPending(db, worker.Name, verbose, debug); err != nil { return err } diff --git a/manager/websockets/websockets.go b/manager/websockets/websockets.go index aeca45f..68abe41 100644 --- a/manager/websockets/websockets.go +++ b/manager/websockets/websockets.go @@ -3,270 +3,338 @@ package websockets import ( "database/sql" "encoding/json" - "fmt" "log" "sync" + "time" - "github.com/go-sql-driver/mysql" "github.com/gorilla/websocket" "github.com/r4ulcl/nTask/globalstructs" "github.com/r4ulcl/nTask/manager/database" "github.com/r4ulcl/nTask/manager/utils" ) -func GetWorkerMessage(conn *websocket.Conn, config *utils.ManagerConfig, db *sql.DB, verbose, debug bool, wg *sync.WaitGroup, writeLock *sync.Mutex) { +// GetWorkerMessage processes worker messages with robust heartbeat and write synchronization. +func GetWorkerMessage(conn *websocket.Conn, config *utils.ManagerConfig, db *sql.DB, verbose, debug bool) { var worker globalstructs.Worker - for { - response := globalstructs.WebsocketMessage{ - Type: "", - JSON: "", + // configure timing and retries + const ( + pongWait = 120 * time.Second + pingInterval = 30 * time.Second + maxRecovery = 5 + recoveryBackoff = pingInterval * 2 + writeTimeout = 60 * time.Second + ) + + // protect writes + var writeMu sync.Mutex + // heartbeat tracking + var lastPong time.Time = time.Now() + var lastPongMu sync.Mutex + + // read limits and initial deadline + conn.SetReadLimit(globalstructs.MaxMessageSize) + conn.SetReadDeadline(time.Now().Add(globalstructs.PongWait)) + conn.SetPongHandler(func(appData string) error { + // update lastPong under lock + lastPongMu.Lock() + lastPong = time.Now() + lastPongMu.Unlock() + // extend read deadline + conn.SetReadDeadline(time.Now().Add(globalstructs.PongWait)) + if debug { + log.Println("Received Pong from worker", worker.Name) } + return nil + }) - _, p, err := conn.ReadMessage() - if err != nil { - // if the clients conexion is down, this is the first error - if debug { - log.Println("WebSockets client conexion down error: ", err) + // handle client Close frames + conn.SetCloseHandler(func(code int, text string) error { + if debug { + log.Printf("Received Close frame (code=%d): %s", code, text) + } + // immediate shutdown + return conn.Close() + }) + + // ping ticker + recovery + pingTicker := time.NewTicker(pingInterval) + defer pingTicker.Stop() + + go func() { + for range pingTicker.C { + // send ping under write lock + writeMu.Lock() + conn.SetWriteDeadline(time.Now().Add(writeTimeout)) + err := conn.WriteControl(websocket.PingMessage, nil, time.Now().Add(writeTimeout)) + writeMu.Unlock() + if err != nil { + if debug { + log.Println("Ping write failed, closing:", err) + } + conn.Close() + return } - // check if worker not init - if worker != (globalstructs.Worker{}) { - err = utils.WorkerDisconnected(db, config, &worker, verbose, debug, wg) - if err != nil { + + // check time since lastPong + lastPongMu.Lock() + elapsed := time.Since(lastPong) + lastPongMu.Unlock() + if elapsed > globalstructs.PongWait { + if debug { + log.Println("Missed heartbeat—entering recovery retries") + } + // recovery loop + for i := 1; i <= maxRecovery; i++ { + time.Sleep(recoveryBackoff) if debug { - log.Println("WebSockets WorkerDisconnected error: ", err) + log.Printf("Recovery ping #%d", i) + } + writeMu.Lock() + conn.SetWriteDeadline(time.Now().Add(writeTimeout)) + err := conn.WriteControl(websocket.PingMessage, nil, time.Now().Add(writeTimeout)) + writeMu.Unlock() + if err != nil { + if debug { + log.Println("Recovery ping failed, closing:", err) + } + conn.Close() + return + } + lastPongMu.Lock() + elapsed = time.Since(lastPong) + lastPongMu.Unlock() + if elapsed <= globalstructs.PongWait { + if debug { + log.Println("Heartbeat recovered on attempt", i) + } + break } } - } else { - if debug { - log.Println("WebSockets Worker empty") + // final check + lastPongMu.Lock() + elapsed = time.Since(lastPong) + lastPongMu.Unlock() + if elapsed > globalstructs.PongWait { + if debug { + log.Println("No heartbeat after recovery—disconnecting") + } + conn.Close() + return } } - return } + }() - var msg globalstructs.WebsocketMessage - err = json.Unmarshal(p, &msg) + // main read loop + for { + _, payload, err := conn.ReadMessage() if err != nil { if debug { - log.Println("WebSockets Error decoding JSON:", err) + log.Println("ReadMessage error (disconnect):", err) } - continue + handleConnectionError(err, db, config, &worker, verbose, debug) + return } - switch msg.Type { - case "addWorker": - if debug { - log.Println("WebSockets msg.Type", msg.Type) - log.Println("WebSockets msg.JSON", msg.JSON) - } - - err = json.Unmarshal([]byte(msg.JSON), &worker) - if err != nil { - log.Println("WebSockets addWorker Unmarshal error: ", err) - } - // add con to worker - err = addWorker(worker, db, verbose, debug, wg) - if err != nil { - log.Println("WebSockets addWorker error: ", err) - response.Type = "FAILED" - } else { - response.Type = "OK" - config.WebSockets[worker.Name] = conn - } - - case "deleteWorker": + msg, err := parseMessage(payload, debug) + if err != nil { if debug { - log.Println(msg.Type) - } - err = json.Unmarshal([]byte(msg.JSON), &worker) - if err != nil { - log.Println("WebSockets deleteWorker Unmarshal error: ", err) + log.Println("parseMessage error:", err) } + continue + } + handleMessage(msg, conn, config, db, &worker, verbose, debug) + } +} - err = database.RmWorkerName(db, worker.Name, verbose, debug, wg) - if err != nil { - log.Println("WebSockets RmWorkerName error: ", err) - response.Type = "FAILED" - } else { - response.Type = "OK" - config.WebSockets[worker.Name].Close() - delete(config.WebSockets, worker.Name) - } +func handleConnectionError( + err error, + db *sql.DB, + config *utils.ManagerConfig, + worker *globalstructs.Worker, + verbose, debug bool, +) { + if debug { + log.Println("WebSocket connection error:", err) + } + // only call WorkerDisconnected if worker has been initialized + if *worker != (globalstructs.Worker{}) { + if err := utils.WorkerDisconnected(db, config, worker, verbose, debug); err != nil && debug { + log.Println("WorkerDisconnected error:", err) + } + } else if debug { + log.Println("Worker is uninitialized; nothing to clean up") + } +} - // Set the tasks as failed - err := database.SetTasksWorkerPending(db, worker.Name, verbose, debug, wg) - if err != nil { - log.Println("WebSockets SetTasksWorkerFailed error: ", err) - } - case "callbackTask": - if debug { - log.Println("WebSockets msg.Type", msg.Type) - log.Println("WebSockets msg.JSON", msg.JSON) - } +func parseMessage(p []byte, debug bool) (globalstructs.WebsocketMessage, error) { + var msg globalstructs.WebsocketMessage + if err := json.Unmarshal(p, &msg); err != nil { + if debug { + log.Println("Error decoding JSON:", err) + } + return msg, err + } + return msg, nil +} - var result globalstructs.Task - err = json.Unmarshal([]byte(msg.JSON), &result) - if err != nil { - log.Println("WebSockets addWorker Unmarshal error: ", err) - } +func handleMessage(msg globalstructs.WebsocketMessage, conn *websocket.Conn, config *utils.ManagerConfig, db *sql.DB, worker *globalstructs.Worker, verbose, debug bool) { + switch msg.Type { + case "addWorker": + handleAddWorker(msg, conn, config, db, worker, verbose, debug) + case "deleteWorker": + handleDeleteWorker(msg, config, db, worker, verbose, debug) + case "callbackTask": + handleCallbackTask(msg, config, db, verbose, debug) + case "status": + handleWorkerStatus(msg, db, verbose, debug) + case "OK;addTask": + if debug { + log.Println("Receive message OK;addTask from worker") + } + // Set here as working? + case "OK;deleteTask": + if debug { + log.Println("Received OK;deleteTask — marking task as deleted") + } + /*var completedTask globalstructs.Task + if err := json.Unmarshal([]byte(msg.JSON), &completedTask); err != nil { + log.Println("Error unmarshaling OK;deleteTask JSON:", err) + break + } - err = callback(result, config, db, verbose, debug, wg) + if err := database.SetTaskStatus(db, completedTask.ID, "deleted", verbose, debug, wg); err != nil { + log.Println("Error setting task status to deleted:", err) + }*/ - if err != nil { - log.Println("WebSockets callbackTask error: ", err) - } + case "FAILED;deleteTask": + if debug { + log.Println("Received FAILED;deleteTask — deletion failed, leaving state or retrying") + } + var failedDelTask globalstructs.Task + if err := json.Unmarshal([]byte(msg.JSON), &failedDelTask); err != nil { + log.Println("Error unmarshaling FAILED;deleteTask JSON:", err) + break + } + log.Printf("Task %d could not be killed on worker %q", failedDelTask.ID, failedDelTask.WorkerName) - //Responses + case "FAILED;addTask": + if debug { + log.Println("Receive message FAILED;addTask from worker - re‐queueing", msg.JSON) + } + var failedTask globalstructs.Task + if err := json.Unmarshal([]byte(msg.JSON), &failedTask); err != nil { + log.Println("Error unmarshaling FAILED;addTask JSON:", err) + break + } - case "OK;addTask": - if debug { - log.Println("WebSockets msg.Type", msg.Type) - log.Println("WebSockets msg.JSON", msg.JSON) - } - var result globalstructs.Task - err = json.Unmarshal([]byte(msg.JSON), &result) - if err != nil { - log.Println("WebSockets addWorker Unmarshal error: ", err) - } + // Revert the task to “pending” so it can be retried + // Wait to avoid updating the time at the same second + time.Sleep(1 * time.Second) - // Set task as executed - err = database.SetTaskExecutedAtNow(db, result.ID, verbose, debug, wg) - if err != nil { - log.Println("WebSockets Error SetTaskExecutedAt in request:", err) - } + if err := database.SetTaskStatus(db, failedTask.ID, "pending", verbose, debug); err != nil { + log.Println("Error setting task status back to pending:", err) + } + default: + if debug { + log.Printf("--------- Unhandled message type: %s\n", msg.Type) + } + } +} - // Set workerName in DB and in object - err = database.SetTaskWorkerName(db, result.ID, result.WorkerName, verbose, debug, wg) - if err != nil { - log.Println("WebSockets Error SetWorkerNameTask in request:", err) - } +func handleAddWorker(msg globalstructs.WebsocketMessage, conn *websocket.Conn, config *utils.ManagerConfig, db *sql.DB, worker *globalstructs.Worker, verbose, debug bool) { + if err := handleWorkerMessage(msg, worker, db, verbose, debug, func() error { + config.WebSockets[worker.Name] = conn + return addWorker(*worker, db, verbose, debug) + }); err != nil { + log.Println("Error handling addWorker:", err) + } +} - if verbose { - log.Println("WebSockets Task send successfully") - } - case "FAILED;addTask": - if debug { - log.Println("WebSockets msg.Type", msg.Type) - log.Println("WebSockets msg.JSON", msg.JSON) - } +func handleDeleteWorker(msg globalstructs.WebsocketMessage, config *utils.ManagerConfig, db *sql.DB, worker *globalstructs.Worker, verbose, debug bool) { + if err := handleWorkerMessage(msg, worker, db, verbose, debug, func() error { + return database.RmWorkerName(db, worker.Name, verbose, debug) + }); err != nil { + log.Println("Error handling deleteWorker:", err) + } +} - var result globalstructs.Task - err = json.Unmarshal([]byte(msg.JSON), &result) - if err != nil { - log.Println("WebSockets addWorker Unmarshal error: ", err) - } +func handleWorkerMessage(msg globalstructs.WebsocketMessage, worker *globalstructs.Worker, db *sql.DB, verbose, debug bool, workerAction func() error) error { + if debug { + log.Println("Handling worker message") + } + if err := json.Unmarshal([]byte(msg.JSON), worker); err != nil { + log.Println("Error unmarshaling worker message:", err) + return err + } - // Set the task as pending because the worker return error in add, so its not been procesed - err = database.SetTaskStatus(db, result.ID, "pending", verbose, debug, wg) - if err != nil { - if verbose { - log.Println("WebSockets HandleCallback { \"error\" : \"Error SetTaskStatus: " + err.Error() + "\"}") - } - log.Println("WebSockets Error SetTaskStatus in request:", err) - } - case "OK;deleteTask": - if debug { - log.Println("WebSockets msg.Type", msg.Type) - log.Println("WebSockets msg.JSON", msg.JSON) - } - case "FAILED;deleteTask": - if debug { - log.Println("WebSockets msg.Type", msg.Type) - log.Println("WebSockets msg.JSON", msg.JSON) - } - log.Println("WebSockets ------------------ TODO FAILED;deleteTask") - case "status": - if debug { - log.Println("WebSockets msg.Type", msg.Type) - log.Println("WebSockets msg.JSON", msg.JSON) - } - if msg.Type == "status" { - // Unmarshal the JSON into a WorkerStatus struct - var status globalstructs.WorkerStatus - err = json.Unmarshal([]byte(msg.JSON), &status) - if err != nil { - log.Println("WebSockets status Unmarshal error: ", err) - } + if err := workerAction(); err != nil { + return err + } - if verbose { - log.Println("WebSockets Response status from worker", status.Name, msg.JSON) - } - worker, err := database.GetWorker(db, status.Name, verbose, debug) - if err != nil { - log.Println("WebSockets GetWorker error: ", err) - } - // If there is no error in making the request, assume worker is online - err = database.SetWorkerUPto(true, db, &worker, verbose, debug, wg) - if err != nil { - log.Println("WebSockets status error: ", err) - } + return nil +} - // If worker IddleThreads is not the same as stored in the DB, update the DB - if status.IddleThreads != worker.IddleThreads { - err := database.SetIddleThreadsTo(status.IddleThreads, db, worker.Name, verbose, debug, wg) - if err != nil { - log.Println("WebSockets status SetIddleThreadsTo error: ", err) - } - } - } - } +func handleCallbackTask(msg globalstructs.WebsocketMessage, config *utils.ManagerConfig, db *sql.DB, verbose, debug bool) { + if debug { + log.Println("Handling callbackTask message") + } + var task globalstructs.Task + if err := json.Unmarshal([]byte(msg.JSON), &task); err != nil { + log.Println("Error unmarshaling callbackTask message:", err) + return + } + if err := callback(task, config, db, verbose, debug); err != nil { + log.Println("Error handling callback task:", err) + } +} - if debug { - fmt.Printf("Received message type: %s\n", msg.Type) - fmt.Printf("Received message json: %s\n", msg.JSON) - } +func handleWorkerStatus(msg globalstructs.WebsocketMessage, db *sql.DB, verbose, debug bool) { + if debug { + log.Println("Handling status message") + } + var status globalstructs.WorkerStatus + if err := json.Unmarshal([]byte(msg.JSON), &status); err != nil { + log.Println("Error unmarshaling status message:", err) + return + } + worker, err := database.GetWorker(db, status.Name, verbose, debug) + if err != nil { + log.Println("Error retrieving worker from database:", err) + return + } + if err := database.SetWorkerUPto(db, worker.Name, true, verbose, debug); err != nil { + log.Println("Error setting worker status to UP:", err) + } - if response.Type != "" { - jsonData, err := json.Marshal(response) - if err != nil { - log.Println("WebSockets Marshal error: ", err) - } - err = utils.SendMessage(conn, jsonData, verbose, debug, writeLock) - if err != nil { - log.Println("WebSockets SendMessage error: ", err) - } + if err := database.SetWorkerDownCount(db, worker.Name, 0, verbose, debug); err != nil { + log.Println("Error setting worker status to UP:", err) + } + if status.IddleThreads != worker.IddleThreads { + if err := database.SetIddleThreadsTo(db, worker.Name, status.IddleThreads, verbose, debug); err != nil { + log.Println("Error updating idle threads in database:", err) } } } -func addWorker(worker globalstructs.Worker, db *sql.DB, verbose, debug bool, wg *sync.WaitGroup) error { +func addWorker(worker globalstructs.Worker, db *sql.DB, verbose, debug bool) error { if debug { log.Println("WebSockets worker.Name", worker.Name) } - err := database.AddWorker(db, &worker, verbose, debug, wg) + err := database.AddWorker(db, &worker, verbose, debug) if err != nil { - if mysqlErr, ok := err.(*mysql.MySQLError); ok { - if mysqlErr.Number == 1062 { // MySQL error number for duplicate entry - // Set as 'pending' all workers tasks to REDO - err = database.SetTasksWorkerPending(db, worker.Name, verbose, debug, wg) - if err != nil { - return err - } - - // set worker up - err = database.SetWorkerUPto(true, db, &worker, verbose, debug, wg) - if err != nil { - return err - } - - // reset down count - err = database.SetWorkerDownCount(0, db, &worker, verbose, debug, wg) - if err != nil { - return err - } - } + err = utils.HandleAddWorkerError(err, db, &worker, verbose, debug) + if err != nil { return err - } - } return nil } -func callback(result globalstructs.Task, config *utils.ManagerConfig, db *sql.DB, verbose, debug bool, wg *sync.WaitGroup) error { +func callback(result globalstructs.Task, config *utils.ManagerConfig, db *sql.DB, verbose, debug bool) error { if debug { log.Println("WebSockets result: ", result) @@ -274,35 +342,15 @@ func callback(result globalstructs.Task, config *utils.ManagerConfig, db *sql.DB } // Update task with the worker one - err := database.UpdateTask(db, result, verbose, debug, wg) + err := database.UpdateTask(db, result, verbose, debug) if err != nil { - if verbose { + if debug || verbose { log.Println("WebSockets HandleCallback { \"error\" : \"Error UpdateTask: " + err.Error() + "\"}") } return err } - // force set task to status receive - // Set the task as done - if result.Status == "failed" { - err = database.SetTaskStatus(db, result.ID, result.Status, verbose, debug, wg) - if err != nil { - if verbose { - log.Println("WebSockets HandleCallback { \"error\" : \"Error SetTaskStatus: " + err.Error() + "\"}") - } - return err - } - } else { - err = database.SetTaskStatus(db, result.ID, "done", verbose, debug, wg) - if err != nil { - if verbose { - log.Println("WebSockets HandleCallback { \"error\" : \"Error SetTaskStatus: " + err.Error() + "\"}") - } - return err - } - } - // if callbackURL is not empty send the request to the client if result.CallbackURL != "" { utils.CallbackUserTaskMessage(config, &result, verbose, debug) @@ -321,14 +369,5 @@ func callback(result globalstructs.Task, config *utils.ManagerConfig, db *sql.DB } } - // Handle the result as needed - - //Add 1 to Iddle thread in worker - // add 1 when finish - err = database.AddWorkerIddleThreads1(db, result.WorkerName, verbose, debug, wg) - if err != nil { - return err - } - return nil } diff --git a/status.log b/status.log new file mode 100644 index 0000000..32574b3 --- /dev/null +++ b/status.log @@ -0,0 +1,356 @@ +Mon Jul 14 10:21:16 AM CEST 2025 +{"task":{"pending":2816,"running":448,"done":1647,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:21:17 AM CEST 2025 +{"task":{"pending":2780,"running":457,"done":1674,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:21:19 AM CEST 2025 +{"task":{"pending":2737,"running":476,"done":1698,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:21:20 AM CEST 2025 +{"task":{"pending":2694,"running":500,"done":1717,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:21:21 AM CEST 2025 +{"task":{"pending":2653,"running":503,"done":1756,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:21:22 AM CEST 2025 +{"task":{"pending":2608,"running":496,"done":1807,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:21:23 AM CEST 2025 +{"task":{"pending":2570,"running":487,"done":1854,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:21:24 AM CEST 2025 +{"task":{"pending":2535,"running":467,"done":1910,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:21:25 AM CEST 2025 +{"task":{"pending":2494,"running":453,"done":1969,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:21:26 AM CEST 2025 +{"task":{"pending":2474,"running":374,"done":2064,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:21:28 AM CEST 2025 +{"task":{"pending":2447,"running":302,"done":2164,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:21:29 AM CEST 2025 +{"task":{"pending":2405,"running":313,"done":2193,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:21:30 AM CEST 2025 +{"task":{"pending":2358,"running":360,"done":2193,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:21:31 AM CEST 2025 +{"task":{"pending":2310,"running":408,"done":2193,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:21:32 AM CEST 2025 +{"task":{"pending":2269,"running":449,"done":2193,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:21:33 AM CEST 2025 +{"task":{"pending":2226,"running":492,"done":2193,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:21:34 AM CEST 2025 +{"task":{"pending":2178,"running":540,"done":2193,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:21:35 AM CEST 2025 +{"task":{"pending":2128,"running":591,"done":2193,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:21:36 AM CEST 2025 +{"task":{"pending":2083,"running":635,"done":2193,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:21:37 AM CEST 2025 +{"task":{"pending":2036,"running":682,"done":2193,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:21:38 AM CEST 2025 +{"task":{"pending":1992,"running":726,"done":2193,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:21:40 AM CEST 2025 +{"task":{"pending":1956,"running":762,"done":2193,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:21:41 AM CEST 2025 +{"task":{"pending":1911,"running":807,"done":2193,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:21:42 AM CEST 2025 +{"task":{"pending":1874,"running":844,"done":2193,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:21:43 AM CEST 2025 +{"task":{"pending":1844,"running":874,"done":2193,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:21:44 AM CEST 2025 +{"task":{"pending":1799,"running":919,"done":2193,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:21:45 AM CEST 2025 +{"task":{"pending":1749,"running":969,"done":2193,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:21:46 AM CEST 2025 +{"task":{"pending":1718,"running":1000,"done":2193,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:21:47 AM CEST 2025 +{"task":{"pending":1718,"running":1000,"done":2193,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:21:48 AM CEST 2025 +{"task":{"pending":1718,"running":1000,"done":2193,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:21:49 AM CEST 2025 +{"task":{"pending":1718,"running":1000,"done":2193,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:21:50 AM CEST 2025 +{"task":{"pending":1718,"running":1000,"done":2193,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:21:52 AM CEST 2025 +{"task":{"pending":1716,"running":948,"done":2247,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:21:53 AM CEST 2025 +{"task":{"pending":1678,"running":926,"done":2308,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:21:54 AM CEST 2025 +{"task":{"pending":1637,"running":917,"done":2357,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:21:55 AM CEST 2025 +{"task":{"pending":1609,"running":876,"done":2426,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:21:56 AM CEST 2025 +{"task":{"pending":1567,"running":898,"done":2447,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:21:57 AM CEST 2025 +{"task":{"pending":1535,"running":868,"done":2510,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:21:58 AM CEST 2025 +{"task":{"pending":1497,"running":804,"done":2613,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:21:59 AM CEST 2025 +{"task":{"pending":1460,"running":756,"done":2696,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:00 AM CEST 2025 +{"task":{"pending":1425,"running":685,"done":2802,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:01 AM CEST 2025 +{"task":{"pending":1394,"running":619,"done":2898,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:03 AM CEST 2025 +{"task":{"pending":1356,"running":586,"done":2970,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:04 AM CEST 2025 +{"task":{"pending":1319,"running":547,"done":3046,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:05 AM CEST 2025 +{"task":{"pending":1282,"running":510,"done":3119,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:06 AM CEST 2025 +{"task":{"pending":1239,"running":494,"done":3178,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:07 AM CEST 2025 +{"task":{"pending":1195,"running":524,"done":3193,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:08 AM CEST 2025 +{"task":{"pending":1145,"running":573,"done":3193,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:09 AM CEST 2025 +{"task":{"pending":1100,"running":618,"done":3193,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:10 AM CEST 2025 +{"task":{"pending":1055,"running":663,"done":3193,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:11 AM CEST 2025 +{"task":{"pending":1013,"running":694,"done":3204,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:12 AM CEST 2025 +{"task":{"pending":983,"running":661,"done":3267,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:14 AM CEST 2025 +{"task":{"pending":952,"running":629,"done":3330,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:15 AM CEST 2025 +{"task":{"pending":915,"running":632,"done":3365,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:16 AM CEST 2025 +{"task":{"pending":1140,"running":351,"done":3421,"failed":0,"deleted":0},"worker":{"up":5,"down":5}} +Mon Jul 14 10:22:17 AM CEST 2025 +{"task":{"pending":1411,"running":67,"done":3434,"failed":0,"deleted":0},"worker":{"up":1,"down":9}} +Mon Jul 14 10:22:18 AM CEST 2025 +{"task":{"pending":1373,"running":97,"done":3441,"failed":0,"deleted":0},"worker":{"up":1,"down":9}} +Mon Jul 14 10:22:19 AM CEST 2025 +{"task":{"pending":1373,"running":95,"done":3443,"failed":0,"deleted":0},"worker":{"up":1,"down":9}} +Mon Jul 14 10:22:20 AM CEST 2025 +{"task":{"pending":1373,"running":88,"done":3450,"failed":0,"deleted":0},"worker":{"up":1,"down":9}} +Mon Jul 14 10:22:21 AM CEST 2025 +{"task":{"pending":1402,"running":10,"done":3501,"failed":0,"deleted":0},"worker":{"up":1,"down":9}} +Mon Jul 14 10:22:22 AM CEST 2025 +{"task":{"pending":1293,"running":34,"done":3584,"failed":0,"deleted":0},"worker":{"up":8,"down":2}} +Mon Jul 14 10:22:23 AM CEST 2025 +{"task":{"pending":1209,"running":69,"done":3633,"failed":0,"deleted":0},"worker":{"up":8,"down":2}} +Mon Jul 14 10:22:25 AM CEST 2025 +{"task":{"pending":1123,"running":98,"done":3690,"failed":0,"deleted":0},"worker":{"up":8,"down":2}} +Mon Jul 14 10:22:26 AM CEST 2025 +{"task":{"pending":1048,"running":128,"done":3735,"failed":0,"deleted":0},"worker":{"up":9,"down":1}} +Mon Jul 14 10:22:27 AM CEST 2025 +{"task":{"pending":972,"running":112,"done":3828,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:28 AM CEST 2025 +{"task":{"pending":919,"running":91,"done":3901,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:29 AM CEST 2025 +{"task":{"pending":881,"running":74,"done":3956,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:30 AM CEST 2025 +{"task":{"pending":841,"running":48,"done":4023,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:31 AM CEST 2025 +{"task":{"pending":836,"running":45,"done":4030,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:32 AM CEST 2025 +{"task":{"pending":835,"running":72,"done":4004,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:34 AM CEST 2025 +{"task":{"pending":818,"running":120,"done":3973,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:35 AM CEST 2025 +{"task":{"pending":811,"running":168,"done":3932,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:36 AM CEST 2025 +{"task":{"pending":768,"running":209,"done":3935,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:37 AM CEST 2025 +{"task":{"pending":733,"running":232,"done":3946,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:38 AM CEST 2025 +{"task":{"pending":703,"running":262,"done":3946,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:39 AM CEST 2025 +{"task":{"pending":663,"running":302,"done":3946,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:40 AM CEST 2025 +{"task":{"pending":625,"running":340,"done":3946,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:42 AM CEST 2025 +{"task":{"pending":594,"running":360,"done":3959,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:43 AM CEST 2025 +{"task":{"pending":578,"running":316,"done":4018,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:44 AM CEST 2025 +{"task":{"pending":553,"running":253,"done":4105,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:45 AM CEST 2025 +{"task":{"pending":519,"running":209,"done":4184,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:46 AM CEST 2025 +{"task":{"pending":486,"running":182,"done":4243,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:47 AM CEST 2025 +{"task":{"pending":447,"running":192,"done":4273,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:48 AM CEST 2025 +{"task":{"pending":406,"running":177,"done":4328,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:49 AM CEST 2025 +{"task":{"pending":365,"running":205,"done":4341,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:50 AM CEST 2025 +{"task":{"pending":327,"running":197,"done":4389,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:52 AM CEST 2025 +{"task":{"pending":291,"running":177,"done":4446,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:53 AM CEST 2025 +{"task":{"pending":252,"running":144,"done":4515,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:54 AM CEST 2025 +{"task":{"pending":214,"running":138,"done":4560,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:55 AM CEST 2025 +{"task":{"pending":188,"running":104,"done":4619,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:56 AM CEST 2025 +{"task":{"pending":152,"running":91,"done":4668,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:57 AM CEST 2025 +{"task":{"pending":119,"running":72,"done":4720,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:58 AM CEST 2025 +{"task":{"pending":83,"running":66,"done":4763,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:22:59 AM CEST 2025 +{"task":{"pending":40,"running":53,"done":4818,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:23:00 AM CEST 2025 +{"task":{"pending":8,"running":33,"done":4870,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:23:02 AM CEST 2025 +{"task":{"pending":0,"running":7,"done":4904,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:23:03 AM CEST 2025 +{"task":{"pending":0,"running":0,"done":4911,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:23:04 AM CEST 2025 +{"task":{"pending":0,"running":0,"done":4911,"failed":0,"deleted":0},"worker":{"up":9,"down":1}} +Mon Jul 14 10:23:05 AM CEST 2025 +{"task":{"pending":0,"running":0,"done":4911,"failed":0,"deleted":0},"worker":{"up":4,"down":4}} +Mon Jul 14 10:23:06 AM CEST 2025 +{"task":{"pending":0,"running":0,"done":4911,"failed":0,"deleted":0},"worker":{"up":0,"down":8}} +Mon Jul 14 10:23:07 AM CEST 2025 +{"task":{"pending":0,"running":0,"done":4911,"failed":0,"deleted":0},"worker":{"up":0,"down":8}} +Mon Jul 14 10:23:09 AM CEST 2025 +{"task":{"pending":0,"running":0,"done":1000,"failed":0,"deleted":0},"worker":{"up":0,"down":8}} +Mon Jul 14 10:23:10 AM CEST 2025 +{"task":{"pending":0,"running":0,"done":1000,"failed":0,"deleted":0},"worker":{"up":3,"down":8}} +Mon Jul 14 10:23:11 AM CEST 2025 +{"task":{"pending":0,"running":0,"done":1000,"failed":0,"deleted":0},"worker":{"up":10,"down":8}} +Mon Jul 14 10:23:12 AM CEST 2025 +{"task":{"pending":0,"running":0,"done":1000,"failed":0,"deleted":0},"worker":{"up":10,"down":8}} +Mon Jul 14 10:23:13 AM CEST 2025 +{"task":{"pending":0,"running":0,"done":1000,"failed":0,"deleted":0},"worker":{"up":10,"down":8}} +Mon Jul 14 10:23:15 AM CEST 2025 +{"task":{"pending":0,"running":0,"done":1000,"failed":0,"deleted":0},"worker":{"up":10,"down":8}} +Mon Jul 14 10:23:16 AM CEST 2025 +{"task":{"pending":0,"running":0,"done":1000,"failed":0,"deleted":0},"worker":{"up":10,"down":8}} +Mon Jul 14 10:23:17 AM CEST 2025 +{"task":{"pending":0,"running":0,"done":1000,"failed":0,"deleted":0},"worker":{"up":10,"down":8}} +Mon Jul 14 10:23:18 AM CEST 2025 +{"task":{"pending":0,"running":0,"done":1000,"failed":0,"deleted":0},"worker":{"up":10,"down":8}} +Mon Jul 14 10:23:19 AM CEST 2025 +{"task":{"pending":0,"running":0,"done":1000,"failed":0,"deleted":0},"worker":{"up":10,"down":8}} +Mon Jul 14 10:23:20 AM CEST 2025 +{"task":{"pending":0,"running":0,"done":1000,"failed":0,"deleted":0},"worker":{"up":10,"down":8}} +Mon Jul 14 10:23:21 AM CEST 2025 +{"task":{"pending":0,"running":0,"done":1000,"failed":0,"deleted":0},"worker":{"up":10,"down":8}} +Mon Jul 14 10:23:22 AM CEST 2025 +{"task":{"pending":0,"running":0,"done":1000,"failed":0,"deleted":0},"worker":{"up":10,"down":8}} +Mon Jul 14 10:23:23 AM CEST 2025 +{"task":{"pending":0,"running":0,"done":1000,"failed":0,"deleted":0},"worker":{"up":10,"down":8}} +Mon Jul 14 10:23:24 AM CEST 2025 +{"task":{"pending":0,"running":0,"done":1000,"failed":0,"deleted":0},"worker":{"up":10,"down":8}} +Mon Jul 14 10:23:25 AM CEST 2025 +{"task":{"pending":0,"running":0,"done":1000,"failed":0,"deleted":0},"worker":{"up":10,"down":8}} +Mon Jul 14 10:23:26 AM CEST 2025 +{"task":{"pending":0,"running":0,"done":1000,"failed":0,"deleted":0},"worker":{"up":10,"down":8}} +Mon Jul 14 10:23:27 AM CEST 2025 +{"task":{"pending":0,"running":0,"done":1000,"failed":0,"deleted":0},"worker":{"up":10,"down":8}} +Mon Jul 14 10:23:28 AM CEST 2025 +{"task":{"pending":0,"running":0,"done":1000,"failed":0,"deleted":0},"worker":{"up":10,"down":8}} +Mon Jul 14 10:23:29 AM CEST 2025 +{"task":{"pending":0,"running":0,"done":1000,"failed":0,"deleted":0},"worker":{"up":10,"down":8}} +Mon Jul 14 10:23:30 AM CEST 2025 +{"task":{"pending":0,"running":0,"done":1000,"failed":0,"deleted":0},"worker":{"up":10,"down":8}} +Mon Jul 14 10:23:31 AM CEST 2025 +{"task":{"pending":0,"running":0,"done":1000,"failed":0,"deleted":0},"worker":{"up":10,"down":8}} +Mon Jul 14 10:23:32 AM CEST 2025 +{"task":{"pending":0,"running":0,"done":1000,"failed":0,"deleted":0},"worker":{"up":10,"down":8}} +Mon Jul 14 10:23:33 AM CEST 2025 +{"task":{"pending":0,"running":0,"done":1000,"failed":0,"deleted":0},"worker":{"up":10,"down":8}} +Mon Jul 14 10:23:34 AM CEST 2025 +{"task":{"pending":0,"running":0,"done":1000,"failed":0,"deleted":0},"worker":{"up":10,"down":8}} +Mon Jul 14 10:23:35 AM CEST 2025 +{"task":{"pending":0,"running":0,"done":1000,"failed":0,"deleted":0},"worker":{"up":10,"down":8}} +Mon Jul 14 10:23:36 AM CEST 2025 +{"task":{"pending":0,"running":0,"done":1000,"failed":0,"deleted":0},"worker":{"up":10,"down":8}} +Mon Jul 14 10:23:37 AM CEST 2025 +{"task":{"pending":0,"running":0,"done":1000,"failed":0,"deleted":0},"worker":{"up":10,"down":8}} +Mon Jul 14 10:23:39 AM CEST 2025 +{"task":{"pending":71,"running":14,"done":1000,"failed":0,"deleted":0},"worker":{"up":10,"down":8}} +Mon Jul 14 10:23:40 AM CEST 2025 +{"task":{"pending":144,"running":28,"done":1000,"failed":0,"deleted":0},"worker":{"up":10,"down":8}} +Mon Jul 14 10:23:41 AM CEST 2025 +{"task":{"pending":248,"running":45,"done":1002,"failed":0,"deleted":0},"worker":{"up":10,"down":8}} +Mon Jul 14 10:23:42 AM CEST 2025 +{"task":{"pending":357,"running":65,"done":1002,"failed":0,"deleted":0},"worker":{"up":10,"down":6}} +Mon Jul 14 10:23:44 AM CEST 2025 +{"task":{"pending":471,"running":79,"done":1003,"failed":0,"deleted":0},"worker":{"up":10,"down":2}} +Mon Jul 14 10:23:45 AM CEST 2025 +{"task":{"pending":587,"running":97,"done":1003,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:23:46 AM CEST 2025 +{"task":{"pending":698,"running":119,"done":1003,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:23:47 AM CEST 2025 +{"task":{"pending":790,"running":138,"done":1005,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:23:48 AM CEST 2025 +{"task":{"pending":886,"running":159,"done":1007,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:23:49 AM CEST 2025 +{"task":{"pending":989,"running":180,"done":1009,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:23:51 AM CEST 2025 +{"task":{"pending":1111,"running":196,"done":1016,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:23:52 AM CEST 2025 +{"task":{"pending":1212,"running":210,"done":1022,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:23:53 AM CEST 2025 +{"task":{"pending":1314,"running":229,"done":1025,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:23:54 AM CEST 2025 +{"task":{"pending":1351,"running":256,"done":1027,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:23:56 AM CEST 2025 +{"task":{"pending":1297,"running":309,"done":1028,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:23:57 AM CEST 2025 +{"task":{"pending":1251,"running":354,"done":1030,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:23:58 AM CEST 2025 +{"task":{"pending":1196,"running":408,"done":1032,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:23:59 AM CEST 2025 +{"task":{"pending":1137,"running":463,"done":1034,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:24:00 AM CEST 2025 +{"task":{"pending":1085,"running":512,"done":1037,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:24:01 AM CEST 2025 +{"task":{"pending":1031,"running":563,"done":1040,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:24:02 AM CEST 2025 +{"task":{"pending":970,"running":621,"done":1043,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:24:03 AM CEST 2025 +{"task":{"pending":907,"running":683,"done":1044,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:24:04 AM CEST 2025 +{"task":{"pending":843,"running":741,"done":1050,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:24:05 AM CEST 2025 +{"task":{"pending":778,"running":804,"done":1052,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:24:06 AM CEST 2025 +{"task":{"pending":719,"running":862,"done":1053,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:24:07 AM CEST 2025 +{"task":{"pending":664,"running":901,"done":1069,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:24:09 AM CEST 2025 +{"task":{"pending":606,"running":937,"done":1091,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:24:10 AM CEST 2025 +{"task":{"pending":551,"running":967,"done":1116,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:24:11 AM CEST 2025 +{"task":{"pending":517,"running":967,"done":1150,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:24:12 AM CEST 2025 +{"task":{"pending":491,"running":973,"done":1170,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:24:13 AM CEST 2025 +{"task":{"pending":465,"running":966,"done":1203,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:24:14 AM CEST 2025 +{"task":{"pending":437,"running":973,"done":1224,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:24:15 AM CEST 2025 +{"task":{"pending":410,"running":965,"done":1259,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:24:16 AM CEST 2025 +{"task":{"pending":381,"running":969,"done":1284,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:24:17 AM CEST 2025 +{"task":{"pending":355,"running":958,"done":1321,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:24:18 AM CEST 2025 +{"task":{"pending":309,"running":989,"done":1336,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:24:19 AM CEST 2025 +{"task":{"pending":281,"running":982,"done":1371,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:24:20 AM CEST 2025 +{"task":{"pending":258,"running":986,"done":1390,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:24:21 AM CEST 2025 +{"task":{"pending":238,"running":982,"done":1414,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:24:22 AM CEST 2025 +{"task":{"pending":207,"running":976,"done":1451,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:24:23 AM CEST 2025 +{"task":{"pending":172,"running":973,"done":1489,"failed":0,"deleted":0},"worker":{"up":10,"down":0}} +Mon Jul 14 10:24:24 AM CEST 2025 +{"task":{"pending":252,"running":874,"done":1508,"failed":0,"deleted":0},"worker":{"up":7,"down":3}} +Mon Jul 14 10:24:25 AM CEST 2025 +{"task":{"pending":421,"running":680,"done":1533,"failed":0,"deleted":0},"worker":{"up":7,"down":3}} +Mon Jul 14 10:24:27 AM CEST 2025 +{"task":{"pending":1094,"running":0,"done":1540,"failed":0,"deleted":0},"worker":{"up":0,"down":10}} +Mon Jul 14 10:24:28 AM CEST 2025 +{"task":{"pending":1091,"running":0,"done":1543,"failed":0,"deleted":0},"worker":{"up":0,"down":10}} +Mon Jul 14 10:24:29 AM CEST 2025 +{"task":{"pending":1088,"running":0,"done":1546,"failed":0,"deleted":0},"worker":{"up":0,"down":10}} +Mon Jul 14 10:24:30 AM CEST 2025 +{"task":{"pending":1082,"running":0,"done":1552,"failed":0,"deleted":0},"worker":{"up":2,"down":8}} +Mon Jul 14 10:24:31 AM CEST 2025 +{"task":{"pending":1037,"running":41,"done":1557,"failed":0,"deleted":0},"worker":{"up":2,"down":8}} +Mon Jul 14 10:24:32 AM CEST 2025 +{"task":{"pending":1012,"running":49,"done":1573,"failed":0,"deleted":0},"worker":{"up":2,"down":8}} diff --git a/test.sh b/test.sh new file mode 100644 index 0000000..3df6b28 --- /dev/null +++ b/test.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash +# infinite_curl.sh – endlessly enqueue “sleep X” tasks with X ∈ [1,20] + +AUTH="WLJ2xVQZ5TXVw4qEznZDnmEEV" +URL="http://127.0.0.1:8080/task" + +MAX_ITER=10000 + +for ((i=1; i<=MAX_ITER; i++)); do + # Pick a random integer 1-20 + X=$(( RANDOM % 60 + 1 )) + + # Compose the JSON payload with that X + read -r -d '' payload < /dev/null & + + #echo # newline after each response + #sleep 0 # brief pause so you don’t hammer the endpoint too hard +done + +wait diff --git a/worker.conf b/worker.conf index 33ef8a0..2ae1bd8 100644 --- a/worker.conf +++ b/worker.conf @@ -1,11 +1,12 @@ { "name": "", - "iddleThreads": 2, + "defaultThreads": 100, "managerIP" : "nTask_manager", - "managerPort" : "8080", + "managerPort" : 8443, "managerOauthToken": "IeH0vpYFz2Yol6RdLvYZz62TFMv5FF", "CA": "./certs/ca-cert.pem", - "insecureModules": false, + "deleteFiles": true, + "insecureModules": true, "modules": { "sleep": "/usr/bin/sleep", "curl": "/usr/bin/curl", diff --git a/worker/Dockerfile b/worker/Dockerfile index 663ed73..40d3536 100644 --- a/worker/Dockerfile +++ b/worker/Dockerfile @@ -1,46 +1,44 @@ +# STEP 1: Compile a static Go binary +FROM golang:1.23-alpine AS builder + +# Install git for fetching dependencies, and ca-certificates for HTTPS +RUN apk add --no-cache git ca-certificates + +WORKDIR /go/src/github.com/r4ulcl/nTask + +# Cache modules +COPY go.mod go.sum ./ +RUN go mod download + +# Copy only necessary files (dockerignore excludes extras) +COPY . . + +# Build static binary +RUN CGO_ENABLED=0 \ + GOOS=linux GOARCH=amd64 \ + go build -trimpath -ldflags="-s -w" -o /nTask ./main.go -# STEP 1 build executable binary -FROM golang:alpine as builder - -# copy files for compile -COPY ./certs $GOPATH/src/github.com/r4ulcl/nTask/certs -COPY ./docs $GOPATH/src/github.com/r4ulcl/nTask/docs -COPY ./globalstructs $GOPATH/src/github.com/r4ulcl/nTask/globalstructs -COPY ./go.mod $GOPATH/src/github.com/r4ulcl/nTask/go.mod -COPY ./go.sum $GOPATH/src/github.com/r4ulcl/nTask/go.sum -COPY ./main.go $GOPATH/src/github.com/r4ulcl/nTask/main.go -COPY ./manager $GOPATH/src/github.com/r4ulcl/nTask/manager -COPY ./worker $GOPATH/src/github.com/r4ulcl/nTask/worker - -WORKDIR $GOPATH/src/github.com/r4ulcl/nTask -#get dependancies -#RUN apk -U add alpine-sdk -#RUN go get -d -v -#build the binary -RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -installsuffix cgo -ldflags '-w -s' -o /go/bin/nTask - -#create config folder -RUN mkdir /config # STEP 2 build a small image # start from kali for worker -FROM kalilinux/kali-rolling +FROM kalilinux/kali-rolling:latest #GOPATH doesn-t exists in scratch ENV GOPATH='/go' -RUN apt-get update && apt-get install procps net-tools curl nmap -y -RUN apt-get install -y kali-tools-fuzzing +RUN apt-get update && apt-get install -y procps net-tools curl nmap \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* -# Copy our static executable -COPY --from=builder /$GOPATH/bin/nTask /$GOPATH/bin/nTask -# Copy modules -COPY --from=builder $GOPATH/src/github.com/r4ulcl/nTask/worker/modules/ /config/modules/ +# Add root certificates +COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ -# Copy swagger -COPY --from=builder $GOPATH/src/github.com/r4ulcl/nTask/docs/ /config/docs/ +# Copy binary and assets +COPY --from=builder /nTask /nTask +COPY --from=builder /go/src/github.com/r4ulcl/nTask/worker/modules /config/modules +COPY --from=builder /go/src/github.com/r4ulcl/nTask/docs /config/docs -# Set config folder -WORKDIR /config +# Create config directory and set workdir +WORKDIR /config -ENTRYPOINT ["/go/bin/nTask"] \ No newline at end of file +ENTRYPOINT ["/nTask"] \ No newline at end of file diff --git a/worker/managerrequest/managerRequest.go b/worker/managerrequest/managerRequest.go index 20692d2..774fd78 100644 --- a/worker/managerrequest/managerRequest.go +++ b/worker/managerrequest/managerRequest.go @@ -4,6 +4,7 @@ import ( "encoding/json" "log" "net/http" + "strconv" "sync" "github.com/gorilla/websocket" @@ -11,6 +12,7 @@ import ( "github.com/r4ulcl/nTask/worker/utils" ) +// CreateWebsocket func to create the websocketrs func CreateWebsocket(config *utils.WorkerConfig, caCertPath string, verifyAltName, verbose, debug bool) (*websocket.Conn, error) { @@ -18,14 +20,15 @@ func CreateWebsocket(config *utils.WorkerConfig, caCertPath string, headers.Set("Authorization", config.ManagerOauthToken) var serverAddr string + portStr := strconv.Itoa(config.ManagerPort) if transport, ok := config.ClientHTTP.Transport.(*http.Transport); ok { if transport.TLSClientConfig != nil { - serverAddr = "wss://" + config.ManagerIP + ":" + config.ManagerPort + "/worker/websocket" + serverAddr = "wss://" + config.ManagerIP + ":" + portStr + "/worker/websocket" } else { - serverAddr = "ws://" + config.ManagerIP + ":" + config.ManagerPort + "/worker/websocket" + serverAddr = "ws://" + config.ManagerIP + ":" + portStr + "/worker/websocket" } } else { - serverAddr = "wss://" + config.ManagerIP + ":" + config.ManagerPort + "/worker/websocket" + serverAddr = "wss://" + config.ManagerIP + ":" + portStr + "/worker/websocket" } if debug { @@ -55,107 +58,80 @@ func CreateWebsocket(config *utils.WorkerConfig, caCertPath string, return conn, nil } +// SendMessage funct to send message to a websocket from a worker func SendMessage(conn *websocket.Conn, message []byte, verbose, debug bool, writeLock *sync.Mutex) error { writeLock.Lock() defer writeLock.Unlock() if debug { - log.Println("sendMessage", string(message)) + log.Println("SendMessage:", string(message)) } err := conn.WriteMessage(websocket.TextMessage, message) - if err != nil { - return err + if err != nil && (verbose || debug) { + log.Println("SendMessage error:", err) } - return nil + return err } -// AddWorker sends a POST request to add a worker to the manager -func AddWorker(config *utils.WorkerConfig, verbose, debug bool, writeLock *sync.Mutex) error { - // Create a Worker object with the provided configuration - worker := globalstructs.Worker{ - Name: config.Name, - IddleThreads: config.IddleThreads, - UP: true, - DownCount: 0, +// sendWebSocketMessage is a helper function to send a WebSocket message to the manager +func sendWebSocketMessage(config *utils.WorkerConfig, messageType string, payload interface{}, verbose, debug bool, writeLock *sync.Mutex) error { + // Marshal the payload into JSON + payloadData, err := json.Marshal(payload) + if err != nil { + if verbose || debug { + log.Println("Error encoding JSON payload:", err) + } + return err } - // Marshal the worker object into JSON - payload, _ := json.Marshal(worker) - + // Create the WebSocket message msg := globalstructs.WebsocketMessage{ - Type: "addWorker", - JSON: string(payload), + Type: messageType, + JSON: string(payloadData), } - jsonData, err := json.Marshal(msg) - if err != nil { - log.Println("Error encoding JSON:", err) - return err + if debug { + log.Printf("ManagerRequest message [%s]: %+v", messageType, msg) } - err = SendMessage(config.Conn, jsonData, verbose, debug, writeLock) + // Marshal WebsocketMessage + jsonData, err := json.Marshal(msg) if err != nil { + if verbose || debug { + log.Println("Error encoding WebSocket message:", err) + } return err } - return nil + // Send the message + return SendMessage(config.Conn, jsonData, verbose, debug, writeLock) } // AddWorker sends a POST request to add a worker to the manager +func AddWorker(config *utils.WorkerConfig, verbose, debug bool, writeLock *sync.Mutex) error { + worker := globalstructs.Worker{ + Name: config.Name, + DefaultThreads: config.DefaultThreads, + IddleThreads: config.DefaultThreads, + UP: true, + DownCount: 0, + } + + return sendWebSocketMessage(config, "addWorker", worker, verbose, debug, writeLock) +} + +// DeleteWorker sends a POST request to delete a worker from the manager func DeleteWorker(config *utils.WorkerConfig, verbose, debug bool, writeLock *sync.Mutex) error { - // Create a Worker object with the provided configuration worker := globalstructs.Worker{ Name: config.Name, - IddleThreads: config.IddleThreads, + IddleThreads: -1, UP: true, DownCount: 0, } - // Marshal the worker object into JSON - payload, _ := json.Marshal(worker) - - msg := globalstructs.WebsocketMessage{ - Type: "deleteWorker", - JSON: string(payload), - } - - jsonData, err := json.Marshal(msg) - if err != nil { - log.Println("Error encoding JSON:", err) - return err - } - - err = SendMessage(config.Conn, jsonData, verbose, debug, writeLock) - if err != nil { - return err - } - - return nil + return sendWebSocketMessage(config, "deleteWorker", worker, verbose, debug, writeLock) } -// CallbackTaskMessage sends a POST request to the manager to callback with a task message +// CallbackTaskMessage sends a POST request to the manager with a task message func CallbackTaskMessage(config *utils.WorkerConfig, task *globalstructs.Task, verbose, debug bool, writeLock *sync.Mutex) error { - // Marshal the task object into JSON - payload, _ := json.Marshal(task) - - msg := globalstructs.WebsocketMessage{ - Type: "callbackTask", - JSON: string(payload), - } - - if debug { - log.Println("ManagerRequest msg callback:", msg) - } - - jsonData, err := json.Marshal(msg) - if err != nil { - log.Println("Error encoding JSON:", err) - return err - } - - err = SendMessage(config.Conn, jsonData, verbose, debug, writeLock) - if err != nil { - return err - } - - return nil + return sendWebSocketMessage(config, "callbackTask", task, verbose, debug, writeLock) } diff --git a/worker/modules/module1.py b/worker/modules/module1.py index 234285f..ba7d04d 100644 --- a/worker/modules/module1.py +++ b/worker/modules/module1.py @@ -1,10 +1,9 @@ -import csv import time -import random +import secrets import sys def generate_random_data(): - return [random.randint(1, 100), random.uniform(0.0, 1.0), random.choice(['A', 'B', 'C'])] + return [secrets.randbelow(100) + 1, secrets.uniform(0.0, 1.0), secrets.choice(['A', 'B', 'C'])] def main(): @@ -20,7 +19,7 @@ def main(): print(data) # Sleep for a random time between 1 and 30 seconds - sleep_time = random.uniform(5, 15) + sleep_time = 5 + (secrets.randbelow(100) / 10) print(f"Sleeping for {sleep_time:.2f} seconds...") time.sleep(sleep_time) diff --git a/worker/modules/modules.go b/worker/modules/modules.go index d12471d..2f62345 100644 --- a/worker/modules/modules.go +++ b/worker/modules/modules.go @@ -2,10 +2,13 @@ package modules import ( "bytes" + "context" + "encoding/base64" "fmt" "log" "os" "os/exec" + "path/filepath" "runtime" "strings" "sync" @@ -18,126 +21,154 @@ import ( var mutex sync.Mutex +// setWorkingID ensures the map is initialized and sets the PID +func setWorkingID(status *globalstructs.WorkerStatus, id string, pid int) { + mutex.Lock() + defer mutex.Unlock() + if status.WorkingIDs == nil { + status.WorkingIDs = make(map[string]int) + } + status.WorkingIDs[id] = pid +} + +// deleteWorkingID removes the entry for id +func deleteWorkingID(status *globalstructs.WorkerStatus, id string) { + mutex.Lock() + defer mutex.Unlock() + delete(status.WorkingIDs, id) +} + func runModule(config *utils.WorkerConfig, command string, arguments string, status *globalstructs.WorkerStatus, id string, verbose, debug bool) (string, error) { - // if command is empty, like in the example "exec" to exec any binary - // the first argument is the command + // mark as starting (-1) + setWorkingID(status, id, -1) + defer cleanupWorkerStatus(status, id) + + cmd, err := prepareCommand(config, command, arguments, debug) + if err != nil { + return "", err + } + + output, err := executeCommand(cmd, status, id, verbose, debug) + return strings.TrimRight(output, "\n"), err +} + +func cleanupWorkerStatus(status *globalstructs.WorkerStatus, id string) { + deleteWorkingID(status, id) +} + +func prepareCommand(config *utils.WorkerConfig, command, arguments string, debug bool) (*exec.Cmd, error) { var cmd *exec.Cmd + if config.InsecureModules { - cmdStr := command + " " + arguments - if debug { - log.Println("Modules cmdStr: ", cmdStr) + cmd = createInsecureCommand(command, arguments, debug) + } else { + var err error + cmd, err = createSecureCommand(command, arguments, debug) + if err != nil { + return nil, err } + } - if runtime.GOOS == "windows" { - cmd = exec.Command("cmd", "/c", cmdStr) - } else if runtime.GOOS == "linux" { - cmd = exec.Command("sh", "-c", cmdStr) - } else { - log.Fatal("Unsupported operating system") - } + return cmd, nil +} - } else { - // Convert arguments to array - argumentsArray := strings.Split(arguments, " ") - if command == "" && len(arguments) > 0 { - command = argumentsArray[0] - argumentsArray = argumentsArray[1:] - } +func createInsecureCommand(command, arguments string, debug bool) *exec.Cmd { + cmdStr := command + " " + arguments + if debug { + log.Println("Modules cmdStr: ", cmdStr) + } - // Check if module has space, to separate it in command and args - if strings.Contains(command, " ") { - parts := strings.SplitN(command, " ", 2) - argumentsArray = append([]string{parts[1]}, argumentsArray...) + if runtime.GOOS == "windows" { + return exec.Command("cmd", "/c", cmdStr) + } else if runtime.GOOS == "linux" { + // use --login to load bashrc + return exec.Command("bash", "--login", "-c", cmdStr) + } - // Update the inputString to contain only the first part - command = parts[0] - } + log.Fatal("Unsupported operating system") + return nil +} - if debug { - log.Println("Modules command: ", command) - log.Println("Modules argumentsArray: ", argumentsArray) - } +func createSecureCommand(command, arguments string, debug bool) (*exec.Cmd, error) { + argumentsArray := strings.Split(arguments, " ") + if command == "" && len(arguments) > 0 { + command = argumentsArray[0] + argumentsArray = argumentsArray[1:] + } - // Command to run the module - cmd = exec.Command(command, argumentsArray...) + if strings.Contains(command, " ") { + parts := strings.SplitN(command, " ", 2) + argumentsArray = append([]string{parts[1]}, argumentsArray...) + command = parts[0] + } + + if debug { + log.Println("Modules command: ", command) + log.Println("Modules argumentsArray: ", argumentsArray) } - // Create a buffer to store the command output - var stdout, stderr bytes.Buffer - // Set the output and error streams to the buffers + return exec.Command(command, argumentsArray...), nil +} + +func executeCommand(cmd *exec.Cmd, status *globalstructs.WorkerStatus, id string, verbose, debug bool) (string, error) { + var stdout, stderr bytes.Buffer cmd.Stdout = &stdout cmd.Stderr = &stderr - // Start the command - err := cmd.Start() - if err != nil { - // Check if the error is an ExitError - if exitError, ok := err.(*exec.ExitError); ok { - // The command exited with a non-zero status - fmt.Printf("Command exited with error: %v\n", exitError) - - // Print the captured standard error - log.Println("Standard Error:") - fmt.Print(stderr.String()) - } else { - // Some other error occurred - fmt.Printf("Command finished with unexpected error: %v\n", err) - } - return err.Error(), err + if err := cmd.Start(); err != nil { + logCommandError(err, &stderr, verbose, debug) + return "", err } - mutex.Lock() - status.WorkingIDs[id] = cmd.Process.Pid - mutex.Unlock() - - defer func() { - mutex.Lock() - delete(status.WorkingIDs, id) - mutex.Unlock() - }() + // update with actual PID + setWorkingID(status, id, cmd.Process.Pid) - // Create a channel to signal when the process is done done := make(chan error, 1) - - // Monitor the process in a goroutine go func() { - // Wait for the command to finish - err := cmd.Wait() - done <- err + done <- cmd.Wait() }() - // Check every 30 minutes if the process is still running - ticker := time.NewTicker(30 * time.Minute) + return monitorCommandExecution(cmd, &stdout, &stderr, done, verbose, debug) +} + +func logCommandError(err error, stderr *bytes.Buffer, verbose, debug bool) { + if exitError, ok := err.(*exec.ExitError); ok { + if verbose || debug { + log.Printf("Command exited with error: %v", exitError) + log.Println("Standard Error:") + log.Print(stderr.String()) + } + } else { + if verbose || debug { + log.Printf("Command finished with unexpected error: %v", err) + } + } +} + +func monitorCommandExecution(cmd *exec.Cmd, stdout, stderr *bytes.Buffer, done chan error, verbose, debug bool) (string, error) { + ticker := time.NewTicker(1 * time.Minute) defer ticker.Stop() for { select { case <-ticker.C: - // Check if the process is still running - if err := isProcessRunning(cmd.Process.Pid); err != nil { - // Process is not running, break the loop - return "", err + if err := isProcessRunning(cmd.Process.Pid, verbose, debug); err != nil { + return stdout.String() + stderr.String(), err } case err := <-done: - // Process has finished - if err != nil { - if debug { - log.Println("Modules Error waiting for command:", err) - } - return err.Error(), err + if err != nil && debug { + log.Println("Modules Error waiting for command:", err) } - - // Process completed successfully - // Capture the output of the script - output := stdout.String() + stderr.String() - output = strings.TrimRight(output, "\n") - return output, nil + return stdout.String() + stderr.String(), err } } } // Function to check if a process with a given PID is still running -func isProcessRunning(pid int) error { +func isProcessRunning(pid int, verbose, debug bool) error { + if debug || verbose { + log.Println("isProcessRunning", pid) + } process, err := os.FindProcess(pid) if err != nil { return err @@ -154,55 +185,163 @@ func isProcessRunning(pid int) error { return nil } +// ProcessFiles decodes the base64 content of each file in task.Files and saves it to its RemoteFilePath. +// It updates the WorkerStatus and handles verbose and debug logging as needed. +func ProcessFiles(task *globalstructs.Task, config *utils.WorkerConfig, status *globalstructs.WorkerStatus, id string, verbose, debug bool) error { + for num, file := range task.Files { + // Assuming 'fileContentB64B64' is the base64-encoded content as a string + // If this is a typo, rename it appropriately (e.g., 'FileContentB64') + contentB64 := file.FileContentB64 + path := file.RemoteFilePath + + // Decode the base64 content + decodedBytes, err := base64.StdEncoding.DecodeString(contentB64) + if err != nil { + return fmt.Errorf("file %d: failed to decode base64 content: %w", num+1, err) + } + + // Ensure the directory exists + dir := getDirectory(path) + const dirPerm = 0600 // Use restricted permissions (0600) + if err := os.MkdirAll(dir, dirPerm); err != nil { + return fmt.Errorf("file %d: failed to create directories for %s: %w", num+1, path, err) + } + // Write the decoded content to the specified path + const filePerm = 0600 + if err := os.WriteFile(path, decodedBytes, filePerm); err != nil { + return fmt.Errorf("file %d: failed to write file %s: %w", num+1, path, err) + } + + // Update the worker status if applicable + // (Assuming WorkerStatus has a method or field to update progress) + // status.UpdateProgress(num + 1, len(task.Files)) + + // Verbose logging + if verbose { + fmt.Printf("Saved file %d to %s\n", num+1, path) + } + + // Debug logging + if debug { + fmt.Printf("Debug: File %d details - Path: %s, Content Length: %d bytes\n", num+1, path, len(decodedBytes)) + } + } + + return nil +} + +// DeleteFiles delete files send +func DeleteFiles(task *globalstructs.Task, verbose, debug bool) error { + for num, file := range task.Files { + path := file.RemoteFilePath + + // Attempt to delete the file + err := os.Remove(path) + if err != nil { + fmt.Printf("Error deleting file: %v\n", err) + } + + // Verbose logging + if verbose { + fmt.Printf("Deleted file %d to %s\n", num+1, path) + } + + // Debug logging + if debug { + fmt.Printf("Debug: File %d details - Path: %s, Content\n", num+1, path) + } + } + + return nil +} + +// getDirectory extracts the directory part from a file path using filepath.Dir +func getDirectory(filePath string) string { + return filepath.Dir(filePath) +} + // ProcessModule processes a task by iterating through its commands and executing corresponding modules func ProcessModule(task *globalstructs.Task, config *utils.WorkerConfig, status *globalstructs.WorkerStatus, id string, verbose, debug bool) error { - for num, command := range task.Commands { - module := command.Module - arguments := command.Args + // Define a context with timeout for the entire task + var ctx context.Context + var cancel context.CancelFunc + if task.Timeout > 0 { + ctx, cancel = context.WithTimeout(context.Background(), time.Duration(task.Timeout)*time.Second) + } else { + ctx, cancel = context.WithCancel(context.Background()) + } + defer cancel() - // Check if the module exists in the worker configuration - commandAux, found := config.Modules[module] - if !found { - // Return an error if the module is not found - return fmt.Errorf("unknown command: %s", module) - } + // Channel to signal a timeout or completion + done := make(chan error, 1) - // If there is a file in the command, save to disk - if command.FileContent != "" { - if command.RemoteFilePath == "" { - return fmt.Errorf("RemoteFilePath empty") + // Run the task processing in a separate goroutine + go func() { + // Start timer to measure the command execution time + startTime := time.Now() + for num, command := range task.Commands { + module := command.Module + arguments := command.Args + + // Check if the module exists in the worker configuration + commandAux, found := config.Modules[module] + if !found { + // Send an error if the module is not found + done <- fmt.Errorf("unknown command: %s", module) + return } - err := SaveStringToFile(command.RemoteFilePath, command.FileContent) + if verbose { + log.Println("Modules commandAux: ", commandAux) + log.Println("Modules arguments: ", arguments) + } + + // Execute the module and get the output and any error + outputCommand, err := runModule(config, commandAux, arguments, status, id, verbose, debug) if err != nil { - return err + // Save the text error in the task output to review + task.Commands[num].Output = outputCommand + ";" + err.Error() + // Send an error if there is an issue running the module + done <- fmt.Errorf("error running %s task: %v", commandAux, err) + return } + // Store the output in the task struct for the current command + task.Commands[num].Output = outputCommand } + // Calculate and save the duration in seconds + duration := time.Since(startTime).Seconds() + task.Duration = duration - if verbose { - log.Println("Modules commandAux: ", commandAux) - log.Println("Modules arguments: ", arguments) - } + // Signal successful completion + done <- nil + }() - // Execute the module and get the output and any error - outputCommand, err := runModule(config, commandAux, arguments, status, id, verbose, debug) + // Wait for the task to complete or timeout + select { + case err := <-done: if err != nil { - // Save the text error in the task output to review - task.Commands[num].Output = outputCommand + err.Error() - // Return an error if there is an issue running the module - return fmt.Errorf("error running %s task: %v", commandAux, err) + return err } - - // Store the output in the task struct for the current command - task.Commands[num].Output = outputCommand + // Return nil if the task is processed successfully + return nil + case <-ctx.Done(): + // Set a timeout error for all commands if the context times out + for i := range task.Commands { + task.Commands[i].Output = "Timeout error: task exceeded the time limit" + } + return fmt.Errorf("timeout processing task: exceeded %d seconds", task.Timeout) } - - // Return nil if the task is processed successfully - return nil } func stringList(list []string, verbose, debug bool) string { + if verbose || debug { + log.Println("Executing stringList") + if debug { + log.Println(" with params:", list) + + } + } stringList := "" for _, item := range list { stringList += item + "\n" @@ -211,8 +350,8 @@ func stringList(list []string, verbose, debug bool) string { return stringList } -// SaveStringToFile saves a string to a file. -func SaveStringToFile(filename string, content string) error { +// saveStringToFile saves a string to a file. +func saveStringToFile(filename string, content string) error { // Write the string content to the file err := os.WriteFile(filename, []byte(content), 0600) if err != nil { diff --git a/worker/modules/nmapIPs.sh b/worker/modules/nmapIPs.sh index fd48aaf..867068d 100644 --- a/worker/modules/nmapIPs.sh +++ b/worker/modules/nmapIPs.sh @@ -2,4 +2,4 @@ scanRange=$1 -nmap -sn $scanRange | grep -E -o "([0-9]{1,3}\.){3}[0-9]{1,3}" \ No newline at end of file +nmap -sn "$scanRange" | grep -E -o "([0-9]{1,3}\.){3}[0-9]{1,3}" \ No newline at end of file diff --git a/worker/process/processTask.go b/worker/process/processTask.go index 8652b34..94c7f7c 100644 --- a/worker/process/processTask.go +++ b/worker/process/processTask.go @@ -11,9 +11,7 @@ import ( "github.com/r4ulcl/nTask/worker/utils" ) -var mutex sync.Mutex - -// processTask is a helper function that processes the given task in the background. +// Task is a helper function that processes the given task in the background. // It sets the worker status to indicate that it is currently working on the task. // It calls the ProcessModule function to execute the task's module. // If an error occurs, it sets the task status to "failed". @@ -21,22 +19,29 @@ var mutex sync.Mutex // Finally, it calls the CallbackTaskMessage function to send the task result to the configured callback endpoint. // After completing the task, it resets the worker status to indicate that it is no longer working. func Task(status *globalstructs.WorkerStatus, config *utils.WorkerConfig, task *globalstructs.Task, verbose, debug bool, writeLock *sync.Mutex) { - //Remove one from working threads - sustract1IddleThreads(status) - - //Add one from working threads - defer add1IddleThreads(status) - if verbose { - log.Println("Process Start processing task", task.ID, " workCount: ", status.IddleThreads) + log.Println("Process Start processing task", task.ID, " defaultThreads: ", config.DefaultThreads, " lenWorkCount: ", len(status.WorkingIDs)) } - err := modules.ProcessModule(task, config, status, task.ID, verbose, debug) + err := modules.ProcessFiles(task, config, status, task.ID, verbose, debug) if err != nil { - log.Println("Process Error ProcessModule:", err) + log.Println("Process Error ProcessFiles:", err) task.Status = "failed" } else { - task.Status = "done" + err := modules.ProcessModule(task, config, status, task.ID, verbose, debug) + if err != nil { + log.Println("Process Error ProcessModule:", err) + task.Status = "failed" + } else { + task.Status = "done" + } + } + + if config.DeleteFiles { + err = modules.DeleteFiles(task, verbose, debug) + if err != nil { + log.Println("Process Error DeleteFiles:", err) + } } // While manager doesnt responds loop @@ -49,22 +54,3 @@ func Task(status *globalstructs.WorkerStatus, config *utils.WorkerConfig, task * } } - -func add1IddleThreads(status *globalstructs.WorkerStatus) { - modifyIddleThreads(true, status) -} - -func sustract1IddleThreads(status *globalstructs.WorkerStatus) { - modifyIddleThreads(false, status) -} - -func modifyIddleThreads(add bool, status *globalstructs.WorkerStatus) { - mutex.Lock() - defer mutex.Unlock() - - if add { - status.IddleThreads++ - } else { - status.IddleThreads-- - } -} diff --git a/worker/utils/structs.go b/worker/utils/structs.go index 589b8a2..57dfc38 100644 --- a/worker/utils/structs.go +++ b/worker/utils/structs.go @@ -7,12 +7,14 @@ import ( "github.com/gorilla/websocket" ) +// WorkerConfig Worker Config file struct type WorkerConfig struct { Name string `json:"name"` - IddleThreads int `json:"iddleThreads"` + DefaultThreads int `json:"defaultThreads"` ManagerIP string `json:"managerIP"` - ManagerPort string `json:"managerPort"` + ManagerPort int `json:"managerPort"` ManagerOauthToken string `json:"managerOauthToken"` + DeleteFiles bool `json:"deleteFiles"` CA string `json:"ca"` InsecureModules bool `json:"insecureModules"` Modules map[string]string `json:"modules"` @@ -20,6 +22,7 @@ type WorkerConfig struct { Conn *websocket.Conn `json:"Conn"` } +// Task Task struct type Task struct { ID string Module string diff --git a/worker/utils/utils.go b/worker/utils/utils.go index 7e5443e..3c0e524 100644 --- a/worker/utils/utils.go +++ b/worker/utils/utils.go @@ -31,6 +31,7 @@ func CreateTLSClientWithCACert(caCertPath string, verifyAltName, verbose, debug return client, nil } +// LoadWorkerConfig funct to load worker config file func LoadWorkerConfig(filename string, verbose, debug bool) (*WorkerConfig, error) { var config WorkerConfig content, err := os.ReadFile(filename) @@ -81,6 +82,7 @@ func LoadWorkerConfig(filename string, verbose, debug bool) (*WorkerConfig, erro return &config, nil } +// GenerateTLSConfig Function to generate the TLS config func GenerateTLSConfig(caCertPath string, verifyAltName, verbose, debug bool) (*tls.Config, error) { var tlsConfig *tls.Config @@ -101,6 +103,7 @@ func GenerateTLSConfig(caCertPath string, verifyAltName, verbose, debug bool) (* tlsConfig = &tls.Config{ InsecureSkipVerify: true, // Enable server verification RootCAs: certPool, + MinVersion: tls.VersionTLS12, // Minimum version set to TLS 1.2 VerifyPeerCertificate: func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { if len(rawCerts) == 0 { return fmt.Errorf("no certificates provided by the server") diff --git a/worker/websockets/websockets.go b/worker/websockets/websockets.go index bb301d4..03e68dd 100644 --- a/worker/websockets/websockets.go +++ b/worker/websockets/websockets.go @@ -3,7 +3,9 @@ package websockets import ( "bytes" "encoding/json" + "fmt" "log" + "net" "os/exec" "strconv" "sync" @@ -16,167 +18,265 @@ import ( "github.com/r4ulcl/nTask/worker/utils" ) -func GetMessage(config *utils.WorkerConfig, status *globalstructs.WorkerStatus, verbose, debug bool, writeLock *sync.Mutex) { - for { +func initConnDeadlines(c *websocket.Conn) { + c.SetReadLimit(globalstructs.MaxMessageSize) + c.SetReadDeadline(time.Now().Add(globalstructs.PongWait)) +} - response := globalstructs.WebsocketMessage{ - Type: "", - JSON: "", +// attachPongHandler resets deadlines and signals when a Pong arrives +func attachPongHandler(conn *websocket.Conn, pongRec chan struct{}, debug bool) { + conn.SetPongHandler(func(appData string) error { + if debug { + log.Println("Received Pong:", appData) + } + conn.SetReadDeadline(time.Now().Add(globalstructs.PongWait)) + // non-blocking notify + select { + case pongRec <- struct{}{}: + default: } + return nil + }) +} - _, p, err := config.Conn.ReadMessage() //messageType +func GetMessage(config *utils.WorkerConfig, status *globalstructs.WorkerStatus, verbose, debug bool, writeLock *sync.Mutex) { + for { + // blocking read → any error bubbles up + _, p, err := config.Conn.ReadMessage() if err != nil { - log.Println("WebSockets config.Conn.ReadMessage()", err) - time.Sleep(time.Second * 5) - + log.Println("Conn.ReadMessage error:", err) + time.Sleep(5 * time.Second) continue } var msg globalstructs.WebsocketMessage - err = json.Unmarshal(p, &msg) - if err != nil { - log.Println("WebSockets Error decoding JSON:", err) - + if err := json.Unmarshal(p, &msg); err != nil { + log.Println("JSON decode error:", err) continue } - switch msg.Type { + if debug { + log.Printf("Received message type=%q json=%s\n", msg.Type, msg.JSON) + } + var ( + response globalstructs.WebsocketMessage + handlerErr error + ) + switch msg.Type { case "status": + response, handlerErr = messageStatusTask(config, status, msg, verbose, debug) + case "addTask": + response, handlerErr = messageAddTask(config, status, msg, verbose, debug, writeLock) + case "deleteTask": + response, handlerErr = messageDeleteTask(status, msg, verbose, debug) + default: if debug { - if debug { - log.Println("WebSockets msg.Type", msg.Type) - } - } - jsonData, err := json.Marshal(status) - if err != nil { - response.Type = "FAILED" - } else { - response.Type = "status" - response.JSON = string(jsonData) + log.Printf("Unhandled message type: %s", msg.Type) } + } + if handlerErr != nil { + log.Println("Handler error:", handlerErr) + } - if debug { - // Print the JSON data - log.Println(string(jsonData)) + if response.Type != "" { + jsonData, _ := json.Marshal(response) + if err := managerrequest.SendMessage(config.Conn, jsonData, verbose, debug, writeLock); err != nil { + log.Println("SendMessage error:", err) } + } + } +} - case "addTask": - if debug { - log.Println("WebSockets msg.Type", msg.Type) - } - var requestTask globalstructs.Task - err = json.Unmarshal([]byte(msg.JSON), &requestTask) - if err != nil { - log.Println("WebSockets addWorker Unmarshal error: ", err) - } - // if executing task skip and return error - if status.IddleThreads <= 0 { - response.Type = "FAILED;addTask" - response.JSON = msg.JSON - - requestTask.Status = "failed" - } else { - // Process task in background - if debug { - log.Println("WebSockets Task") - } - go process.Task(status, config, &requestTask, verbose, debug, writeLock) - response.Type = "OK;addTask" - response.JSON = msg.JSON - requestTask.Status = "running" - } +// RecreateConnection keeps the connection healthy: it sends pings every 5 s and +// reconnects if a Pong is not received within 5 s. +func RecreateConnection(config *utils.WorkerConfig, verifyAltName, verbose, debug bool, writeLock *sync.Mutex) { + pongReceived := make(chan struct{}, 1) + // ensure handler on first conn + attachPongHandler(config.Conn, pongReceived, debug) - //return task - jsonData, err := json.Marshal(requestTask) - if err != nil { - log.Println("WebSockets Marshal error: ", err) - } - response.JSON = string(jsonData) + ticker := time.NewTicker(5 * time.Second) + defer ticker.Stop() - case "deleteTask": + for range ticker.C { + if debug { + log.Println("Heartbeat check") + } + // send Ping under lock + writeLock.Lock() + err := config.Conn.WriteControl(websocket.PingMessage, nil, time.Now().Add(5*time.Second)) + writeLock.Unlock() + if err != nil { + log.Println("Error sending Ping, reconnecting:", err) + config.Conn.Close() + CreateConnection(config, verifyAltName, verbose, debug, writeLock) + attachPongHandler(config.Conn, pongReceived, debug) + continue + } + + // wait for Pong or timeout + timeout := time.NewTimer(5 * time.Second) + select { + case <-pongReceived: if debug { - log.Println("WebSockets msg.Type", msg.Type) + log.Println("pongReceived – connection healthy") } + case <-timeout.C: + log.Println("Pong timeout – reconnecting") + config.Conn.Close() + CreateConnection(config, verifyAltName, verbose, debug, writeLock) + attachPongHandler(config.Conn, pongReceived, debug) + } + timeout.Stop() + } +} - var requestTask globalstructs.Task - err = json.Unmarshal([]byte(msg.JSON), &requestTask) - if err != nil { - log.Println("WebSockets deleteTask Unmarshal error: ", err) - } +// CreateConnection dials the manager, stores it in config.Conn, installs +// deadlines, and registers the worker. +func CreateConnection(config *utils.WorkerConfig, verifyAltName, verbose, debug bool, writeLock *sync.Mutex) { + for { + if debug { + log.Println("Attempting to connect...") + } + conn, err := managerrequest.CreateWebsocket(config, config.CA, verifyAltName, verbose, debug) + if err != nil { + log.Println("CreateWebsocket error:", err) + time.Sleep(5 * time.Second) + continue + } - cmdID := status.WorkingIDs[requestTask.ID] + initConnDeadlines(conn) + writeLock.Lock() + config.Conn = conn + writeLock.Unlock() - if cmdID < 0 { - log.Println("Invalid cmdID") - continue - } - cmdIDString := strconv.Itoa(cmdID) - - // Kill the process using cmdID - cmd := exec.Command("kill", "-9", cmdIDString) - - var stderr bytes.Buffer - cmd.Stderr = &stderr - - err := cmd.Run() - - if err != nil { - if debug { - log.Println("WebSockets Error killing process:", err) - log.Println("WebSockets Error details:", stderr.String()) - } - response.Type = "FAILED;deleteTask" - response.JSON = msg.JSON - } else { - response.Type = "OK;deleteTask" - response.JSON = msg.JSON - } + // optional TCP keep-alive + if tcpConn, ok := conn.UnderlyingConn().(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(30 * time.Second) } - if debug { - log.Printf("Received message type: %s\n", msg.Type) - log.Printf("Received message json: %s\n", msg.JSON) + + if err := managerrequest.AddWorker(config, verbose, debug, writeLock); err != nil { + log.Println("AddWorker error:", err) + time.Sleep(5 * time.Second) + continue } - if response.Type != "" { - jsonData, err := json.Marshal(response) - if err != nil { - log.Println("WebSockets Marshal error: ", err) - } - err = managerrequest.SendMessage(config.Conn, jsonData, verbose, debug, writeLock) - if err != nil { - log.Println("WebSockets SendMessage error: ", err) - } + if verbose { + log.Println("Connected to manager ✓") } + return } } -func RecreateConnection(config *utils.WorkerConfig, verifyAltName, verbose, debug bool, writeLock *sync.Mutex) { - for { - time.Sleep(1 * time.Second) // Adjust the interval based on your requirements - if err := config.Conn.WriteControl(websocket.PingMessage, nil, time.Now().Add(1*time.Second)); err != nil { - conn, err := managerrequest.CreateWebsocket(config, config.CA, verifyAltName, verbose, debug) - if err != nil { - if verbose { - log.Println("WebSockets Error CreateWebsocket: ", err) - } - } else { - config.Conn = conn - - err = managerrequest.AddWorker(config, verbose, debug, writeLock) - if err != nil { - if verbose { - log.Println("WebSockets Error worker RecreateConnection AddWorker: ", err) - } - } else { - if verbose { - log.Println("WebSockets Worker connected to manager. ") - } - - continue - } +func messageAddTask(config *utils.WorkerConfig, status *globalstructs.WorkerStatus, msg globalstructs.WebsocketMessage, verbose, debug bool, writeLock *sync.Mutex) (globalstructs.WebsocketMessage, error) { - } + response := globalstructs.WebsocketMessage{ + Type: "", + JSON: "", + } + + if debug { + log.Println("WebSockets msg.Type", msg.Type) + } + var requestTask globalstructs.Task + err := json.Unmarshal([]byte(msg.JSON), &requestTask) + if err != nil { + return response, fmt.Errorf("WebSockets addWorker Unmarshal error: %s", err.Error()) + } + // if executing task skip and return error + if (config.DefaultThreads - len(status.WorkingIDs)) <= 0 { + response.Type = "FAILED;addTask" + response.JSON = msg.JSON + } else { + // Process task in background + if debug { + log.Println("WebSockets Task") } + go process.Task(status, config, &requestTask, verbose, debug, writeLock) + response.Type = "OK;addTask" + response.JSON = msg.JSON + requestTask.Status = "running" + } + + //return task + jsonData, err := json.Marshal(requestTask) + if err != nil { + return response, fmt.Errorf("WebSockets Marshal error: %s", err.Error()) + } + response.JSON = string(jsonData) + + return response, nil +} + +func messageDeleteTask(status *globalstructs.WorkerStatus, msg globalstructs.WebsocketMessage, verbose, debug bool) (globalstructs.WebsocketMessage, error) { + response := globalstructs.WebsocketMessage{ + Type: "", + JSON: "", + } + if debug || verbose { + log.Println("WebSockets msg.Type", msg.Type) + } + + var requestTask globalstructs.Task + err := json.Unmarshal([]byte(msg.JSON), &requestTask) + if err != nil { + return response, fmt.Errorf("WebSockets deleteTask Unmarshal error: %s", err.Error()) + } + + cmdID := status.WorkingIDs[requestTask.ID] + + if cmdID < 0 { + log.Println("Invalid cmdID") + return response, fmt.Errorf("Invalid cmdID") + } + cmdIDString := strconv.Itoa(cmdID) + + // Kill the process using cmdID + cmd := exec.Command("kill", "-9", cmdIDString) + + var stderr bytes.Buffer + cmd.Stderr = &stderr + + err = cmd.Run() + + if err != nil { + if debug { + log.Println("WebSockets Error killing process:", err) + log.Println("WebSockets Error details:", stderr.String()) + } + response.Type = "FAILED;deleteTask" + response.JSON = msg.JSON + } else { + response.Type = "OK;deleteTask" + response.JSON = msg.JSON + } + + return response, nil +} + +func messageStatusTask(config *utils.WorkerConfig, status *globalstructs.WorkerStatus, msg globalstructs.WebsocketMessage, verbose, debug bool) (globalstructs.WebsocketMessage, error) { + response := globalstructs.WebsocketMessage{ + Type: "", + JSON: "", + } + status.IddleThreads = config.DefaultThreads - len(status.WorkingIDs) + + if debug || verbose { + log.Println("WebSockets msg.Type", msg.Type, "status:", status) + } + + jsonData, err := json.Marshal(status) + if err != nil { + response.Type = "FAILED" + } else { + response.Type = "status" + response.JSON = string(jsonData) + } + + if debug { + // Print the JSON data + log.Println("messageStatusTask:", string(jsonData)) } + return response, nil } diff --git a/worker/worker.go b/worker/worker.go index 895b0ec..b19e8fe 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -1,4 +1,4 @@ -// workerouter.go +// Package worker for all the workers data package worker import ( @@ -8,7 +8,6 @@ import ( "os/signal" "sync" "syscall" - "time" globalstructs "github.com/r4ulcl/nTask/globalstructs" "github.com/r4ulcl/nTask/worker/managerrequest" @@ -16,6 +15,7 @@ import ( "github.com/r4ulcl/nTask/worker/websockets" ) +// StartWorker main function to start Worker func StartWorker(swagger bool, configFile string, verifyAltName, verbose, debug bool) { log.Println("Worker Running as worker router...") @@ -28,7 +28,7 @@ func StartWorker(swagger bool, configFile string, verifyAltName, verbose, debug status := globalstructs.WorkerStatus{ Name: config.Name, - IddleThreads: config.IddleThreads, + IddleThreads: config.DefaultThreads, WorkingIDs: make(map[string]int), } @@ -69,31 +69,7 @@ func StartWorker(swagger bool, configFile string, verifyAltName, verbose, debug config.ClientHTTP = &http.Client{} } - // Loop until connects - for { - if debug { - log.Println("Worker Trying to conenct to manager") - } - conn, err := managerrequest.CreateWebsocket(config, config.CA, verifyAltName, verbose, debug) - if err != nil { - log.Println("Worker Error worker CreateWebsocket: ", err) - } else { - config.Conn = conn - - err = managerrequest.AddWorker(config, verbose, debug, &writeLock) - if err != nil { - if verbose { - log.Println("Worker Error worker AddWorker: ", err) - } - } else { - if verbose { - log.Println("Worker connected to manager. ") - } - break - } - } - time.Sleep(time.Second * 5) - } + websockets.CreateConnection(config, verifyAltName, verbose, debug, &writeLock) go websockets.GetMessage(config, &status, verbose, debug, &writeLock) diff --git a/workersBin/dnsvalidator b/workersBin/dnsvalidator new file mode 100755 index 0000000..46d5100 --- /dev/null +++ b/workersBin/dnsvalidator @@ -0,0 +1,33 @@ +#!/config/dnsvalidator/venv/bin/python3 +# EASY-INSTALL-ENTRY-SCRIPT: 'DNSValidator==0.1','console_scripts','dnsvalidator' +import re +import sys + +# for compatibility with easy_install; see #2198 +__requires__ = 'DNSValidator==0.1' + +try: + from importlib.metadata import distribution +except ImportError: + try: + from importlib_metadata import distribution + except ImportError: + from pkg_resources import load_entry_point + + +def importlib_load_entry_point(spec, group, name): + dist_name, _, _ = spec.partition('==') + matches = ( + entry_point + for entry_point in distribution(dist_name).entry_points + if entry_point.group == group and entry_point.name == name + ) + return next(matches).load() + + +globals().setdefault('load_entry_point', importlib_load_entry_point) + + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(load_entry_point('DNSValidator==0.1', 'console_scripts', 'dnsvalidator')()) diff --git a/workersBin/dnsx b/workersBin/dnsx new file mode 100755 index 0000000..904adbb Binary files /dev/null and b/workersBin/dnsx differ diff --git a/workersBin/massdns b/workersBin/massdns new file mode 100755 index 0000000..e1c596c Binary files /dev/null and b/workersBin/massdns differ diff --git a/workersBin/puredns b/workersBin/puredns new file mode 100755 index 0000000..d21bfad Binary files /dev/null and b/workersBin/puredns differ diff --git a/workersBin/subfinder b/workersBin/subfinder new file mode 100755 index 0000000..7f521c1 Binary files /dev/null and b/workersBin/subfinder differ diff --git a/workersBin/vt b/workersBin/vt new file mode 100755 index 0000000..ddbef6f Binary files /dev/null and b/workersBin/vt differ