diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index d9b4988b2..666e4dc06 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -625,6 +625,42 @@ jobs: asset_name: OctoBot_macos_arm64 asset_content_type: application/x-binary + sync-deploy: + name: Deploy Sync Server + needs: [docker] + if: false # Temporarily skipped + runs-on: ubuntu-latest + + env: + DEPLOY_ENV: ${{ startsWith(github.ref, 'refs/tags/') && 'production' || github.ref == 'refs/heads/master' && 'staging' || 'development' }} + + steps: + - uses: actions/checkout@v6 + + - name: Install Ansible + run: pip install ansible + + - name: Install Galaxy requirements + working-directory: infra/sync/ansible + run: ansible-galaxy install -r requirements.yml + + - name: Set up SSH key + run: | + mkdir -p ~/.ssh + echo "${{ secrets.SYNC_DEPLOY_SSH_KEY }}" > ~/.ssh/id_ed25519 + chmod 600 ~/.ssh/id_ed25519 + for ip in ${{ secrets.SYNC_NODE_IPS }}; do + ssh-keyscan -H "$ip" >> ~/.ssh/known_hosts 2>/dev/null + done + + - name: Deploy to ${{ env.DEPLOY_ENV }} + working-directory: infra/sync/ansible + env: + ANSIBLE_VAULT_PASSWORD: ${{ secrets.SYNC_ANSIBLE_VAULT_PASSWORD }} + run: | + ansible-playbook playbooks/deploy-octobot-sync.yml \ + -i inventories/${{ env.DEPLOY_ENV }} + notify: if: ${{ failure() }} needs: diff --git a/.gitignore b/.gitignore index 035cbf6c6..fd35a23ab 100644 --- a/.gitignore +++ b/.gitignore @@ -141,6 +141,13 @@ letsencrypt/ # dev env .env +# Ansible decrypted temp files, SSH keys, and production collections config +infra/**/*.dec.yml +infra/**/.ssh/ + +# Ansible Galaxy installed roles (installed via requirements.yml) +infra/**/roles/geerlingguy.*/ + # Pants build system /.pants.d/ /dist/ diff --git a/infra/sync/README.md b/infra/sync/README.md new file mode 100644 index 000000000..5a0e72a50 --- /dev/null +++ b/infra/sync/README.md @@ -0,0 +1,246 @@ +# OctoBot Sync Server — Infrastructure + +Deploys the OctoBot sync server stack across multiple VPS nodes with zero-downtime rolling updates. + +**Stack per node:** Garage (S3 storage) + OctoBot sync server + Nginx (reverse proxy with caching) + +## Prerequisites + +- Python 3.10+ with pip (`pip install -r infra/sync/requirements.txt` installs `ansible-core`) +- SSH access to target nodes (key-based, user `deploy` with sudo) +- OctoBot Docker image (`drakkarsoftware/octobot`) — the sync server runs via `OctoBot sync` CLI command (no separate image needed) + +## Quick start + +```bash +# 1. Install Ansible +pip install -r infra/sync/requirements.txt +cd infra/sync/ansible + +# 2. Install Ansible Galaxy roles +ansible-galaxy install -r requirements.yml + +# 3. Set up credentials for your environment +cp vault.yml.example inventories/development/group_vars/all/vault.yml +cp hosts.yml.example inventories/development/hosts.yml + +# 4. Set up SSH key +mkdir -p inventories/development/.ssh +cp ~/.ssh/id_rsa inventories/development/.ssh/id_rsa +chmod 600 inventories/development/.ssh/id_rsa + +# 5. Fill in real values +vim inventories/development/hosts.yml # node IPs, zones, capacity +vim inventories/development/group_vars/all/vault.yml # secrets + +# 6. Encrypt sensitive files +ansible-vault encrypt inventories/development/group_vars/all/vault.yml +ansible-vault encrypt inventories/development/hosts.yml + +# 7. Deploy +ansible-playbook playbooks/site.yml -i inventories/development +``` + +## Environments + +| Environment | Branch/Trigger | Image Tag | Inventory | +|---|---|---|---| +| development | push to `dev` | `latest` | `inventories/development` | +| staging | push to `master` | `stable` | `inventories/staging` | +| production | git tag | version | `inventories/production` | + +Deploy to a specific environment: + +```bash +ansible-playbook playbooks/site.yml -i inventories/staging +ansible-playbook playbooks/site.yml -i inventories/production +``` + +Bare `ansible-playbook` without `-i` defaults to development (configured in `ansible.cfg`). + +## Playbooks + +| Playbook | Purpose | When to use | +|---|---|---| +| `site.yml` | Full stack rolling deploy | First deploy, infra changes, Garage config changes | +| `deploy-octobot-sync.yml` | App-only rolling update | New app version (fast — only restarts OctoBot Sync) | +| `setup-garage.yml` | Cluster bootstrap | Once after first `site.yml` — creates bucket + API key | + +### First-time setup + +```bash +# 1. Deploy the full stack (Garage + OctoBot Sync + Nginx) +ansible-playbook playbooks/site.yml -i inventories/production + +# 2. Bootstrap the Garage cluster (connects nodes, assigns layout, creates bucket + key) +ansible-playbook playbooks/setup-garage.yml -i inventories/production + +# 3. Save the S3 credentials output by step 2 into vault.yml +ansible-vault edit inventories/production/group_vars/all/vault.yml + +# 4. Save the node IDs into hosts.yml (garage_node_id per host) +ansible-vault edit inventories/production/hosts.yml + +# 5. Re-deploy with real S3 credentials +ansible-playbook playbooks/site.yml -i inventories/production +``` + +### Routine app deploy + +```bash +ansible-playbook playbooks/deploy-octobot-sync.yml -i inventories/production +``` + +## Credentials + +All secrets are managed via [Ansible Vault](https://docs.ansible.com/ansible/latest/vault_guide/). + +### SSH keys per environment + +Each environment has its own SSH key at `inventories//.ssh/id_rsa` (gitignored): + +```bash +mkdir -p inventories/production/.ssh +ssh-keygen -t ed25519 -f inventories/production/.ssh/id_rsa -N "" +# Copy the public key to your nodes: +ssh-copy-id -i inventories/production/.ssh/id_rsa.pub deploy@node-ip +``` + +When deploying to a non-default environment, pass the key explicitly: + +```bash +ansible-playbook playbooks/site.yml -i inventories/production \ + --private-key inventories/production/.ssh/id_rsa +``` + +### Encrypted files per environment + +| File | Contents | +|---|---| +| `inventories//hosts.yml` | Node IPs, garage node IDs | +| `inventories//group_vars/all/vault.yml` | S3 keys, encryption secrets, Garage tokens | +| `inventories//.ssh/` | SSH private key for the `deploy` user (gitignored) | + +### Editing encrypted files + +```bash +# Edit in-place (opens $EDITOR) +ansible-vault edit inventories/production/group_vars/all/vault.yml + +# Or decrypt to a gitignored temp file, edit, then re-encrypt +ansible-vault decrypt inventories/production/group_vars/all/vault.yml \ + --output inventories/production/group_vars/all/vault.dec.yml +vim inventories/production/group_vars/all/vault.dec.yml +ansible-vault encrypt inventories/production/group_vars/all/vault.dec.yml \ + --output inventories/production/group_vars/all/vault.yml +rm inventories/production/group_vars/all/vault.dec.yml + +# Same for hosts +ansible-vault decrypt inventories/production/hosts.yml \ + --output inventories/production/hosts.dec.yml +vim inventories/production/hosts.dec.yml +ansible-vault encrypt inventories/production/hosts.dec.yml \ + --output inventories/production/hosts.yml +rm inventories/production/hosts.dec.yml + +# Re-encrypt with a new password +ansible-vault rekey inventories/production/group_vars/all/vault.yml +``` + +### Pre-commit hook + +Prevents accidentally committing unencrypted `vault.yml` or `hosts.yml`: + +```bash +# Unix / macOS +cp infra/sync/ansible/scripts/pre-commit-vault-check.py .git/hooks/pre-commit +chmod +x .git/hooks/pre-commit + +# Windows (Git Bash) +cp infra/sync/ansible/scripts/pre-commit-vault-check.py .git/hooks/pre-commit +``` + +### Vault password + +The vault password is read from the `ANSIBLE_VAULT_PASSWORD` environment variable (via `scripts/vault-password.sh`). Set it before running playbooks: + +```bash +export ANSIBLE_VAULT_PASSWORD="your-vault-password" +``` + +Or pass it interactively: + +```bash +ansible-playbook playbooks/site.yml -i inventories/production --ask-vault-pass +``` + +### Generating secrets + +```bash +# Garage RPC secret +openssl rand -hex 32 + +# Garage admin/metrics tokens +openssl rand -base64 32 + +# Encryption secrets +openssl rand -base64 48 +``` + +### Required vault variables + +See `vault.yml.example` for the full list: + +| Variable | Purpose | +|---|---| +| `vault_garage_rpc_secret` | Shared secret for Garage inter-node RPC | +| `vault_garage_admin_token` | Garage admin API authentication | +| `vault_garage_metrics_token` | Garage metrics endpoint authentication | +| `vault_s3_access_key` | S3 API access key (from `setup-garage.yml`) | +| `vault_s3_secret_key` | S3 API secret key (from `setup-garage.yml`) | +| `vault_platform_pubkey_evm` | Platform EVM address (identity) | +| `vault_encryption_secret` | User data encryption key | +| `vault_platform_encryption_secret` | Platform data encryption key | + +## Adding a new node + +1. Edit the environment's `hosts.yml` — add a new entry under `sync_nodes` +2. Run `site.yml` with `--limit` to deploy only to the new node: + ```bash + ansible-playbook playbooks/site.yml -i inventories/production --limit new-node.example.com + ``` +3. Run `setup-garage.yml` to assign the new node in the Garage layout (bucket/key creation is skipped — they replicate automatically) + +## Zero-downtime guarantee + +- `serial: 1` — one node updated at a time +- Garage `replication_factor=3` — quorum needs 2/3, losing 1 is safe +- OctoBot sync is stateless — restart loses nothing +- Health checks must pass before moving to next node +- 10s pause between nodes for data re-sync + +## CI/CD + +Automated via GitHub Actions (`.github/workflows/main.yml`): + +1. **`docker`** (existing) — builds the OctoBot image (`drakkarsoftware/octobot`), which includes the sync server +2. **`sync-deploy`** — after `docker` succeeds, runs Ansible `deploy-octobot-sync.yml` against the right environment + +The sync server uses the same OctoBot image with `OctoBot sync` as the entry point — no separate build step needed. + +Required GitHub secrets: + +| Secret | Purpose | +|---|---| +| `SYNC_DEPLOY_SSH_KEY` | Ed25519 private key for the `deploy` user on VPS nodes | +| `SYNC_ANSIBLE_VAULT_PASSWORD` | Vault password for decrypting secrets | +| `SYNC_NODE_IPS` | Space-separated list of node IPs (for ssh-keyscan) | + +## Nginx caching + +Nginx config is auto-generated from `collections.json` (via `generate_nginx_conf.py`): + +- **Public + pull_only** collections — cached 1h +- **Public + writable** collections — cached 30s +- **Private** collections — no cache, proxied directly +- `X-Cache-Status` header on cached routes for debugging diff --git a/infra/sync/ansible/ansible.cfg b/infra/sync/ansible/ansible.cfg new file mode 100644 index 000000000..549d04e37 --- /dev/null +++ b/infra/sync/ansible/ansible.cfg @@ -0,0 +1,16 @@ +[defaults] +inventory = inventories/development +roles_path = roles +vault_password_file = scripts/vault-password.sh +# SSH key per environment: inventories//.ssh/id_rsa +private_key_file = inventories/development/.ssh/id_rsa +remote_tmp = /tmp/.ansible/tmp +host_key_checking = False +retry_files_enabled = False +# Ignore .example files so Ansible doesn't try to parse them as inventory +inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .example +deprecation_warnings = False + +[privilege_escalation] +become = True +become_method = sudo diff --git a/infra/sync/ansible/hosts.yml.example b/infra/sync/ansible/hosts.yml.example new file mode 100644 index 000000000..34eb60bf5 --- /dev/null +++ b/infra/sync/ansible/hosts.yml.example @@ -0,0 +1,28 @@ +# Copy to inventories//hosts.yml and fill in real values +# Then encrypt: ansible-vault encrypt inventories//hosts.yml +# +# For a single-node dev setup, one host is enough. +# For staging/production, use 3+ nodes across different zones for redundancy. + +all: + children: + sync_nodes: + hosts: + sync-1.example.com: + ansible_host: 203.0.113.10 + ansible_user: deploy + garage_rpc_public_addr: "203.0.113.10:3901" + garage_capacity: 100 # GB of storage to allocate + garage_zone: "dc1" + # sync-2.example.com: + # ansible_host: 203.0.113.11 + # ansible_user: deploy + # garage_rpc_public_addr: "203.0.113.11:3901" + # garage_capacity: 100 + # garage_zone: "dc2" + # sync-3.example.com: + # ansible_host: 203.0.113.12 + # ansible_user: deploy + # garage_rpc_public_addr: "203.0.113.12:3901" + # garage_capacity: 100 + # garage_zone: "dc3" diff --git a/infra/sync/ansible/inventories/development/group_vars/all/vars.yml b/infra/sync/ansible/inventories/development/group_vars/all/vars.yml new file mode 100644 index 000000000..a76d1fab1 --- /dev/null +++ b/infra/sync/ansible/inventories/development/group_vars/all/vars.yml @@ -0,0 +1,36 @@ +# Docker images +octobot_sync_image: "drakkarsoftware/octobot" +octobot_image_tag: "latest" +garage_image: "dxflrs/garage:v2.2.0" +nginx_image: "nginx:1-alpine" + +# Deployment +stack_deploy_dir: "/opt/octobot-sync" +s3_bucket: "octobot-sync-dev" +s3_region: "garage" +octobot_sync_port: 3000 +nginx_port: 8080 +garage_replication_factor: 1 + +# Map vault → app vars +garage_rpc_secret: "{{ vault_garage_rpc_secret }}" +garage_admin_token: "{{ vault_garage_admin_token }}" +garage_metrics_token: "{{ vault_garage_metrics_token }}" +s3_access_key: "{{ vault_s3_access_key }}" +s3_secret_key: "{{ vault_s3_secret_key }}" +platform_pubkey_evm: "{{ vault_platform_pubkey_evm }}" +encryption_secret: "{{ vault_encryption_secret }}" +platform_encryption_secret: "{{ vault_platform_encryption_secret }}" +evm_base_rpc: "{{ vault_evm_base_rpc | default('') }}" +evm_contract_base: "{{ vault_evm_contract_base | default('') }}" + +# Firewall (geerlingguy.firewall) +firewall_allowed_tcp_ports: + - "22" + - "8080" + # Port 3901 (Garage RPC) restricted to peer IPs only — see sync_nodes group vars + +# Docker (geerlingguy.docker) +docker_install_compose_plugin: true +docker_users: + - deploy diff --git a/infra/sync/ansible/inventories/development/group_vars/all/vault.yml b/infra/sync/ansible/inventories/development/group_vars/all/vault.yml new file mode 100644 index 000000000..268b584e7 --- /dev/null +++ b/infra/sync/ansible/inventories/development/group_vars/all/vault.yml @@ -0,0 +1,34 @@ +$ANSIBLE_VAULT;1.1;AES256 +38396633616531373263656566303465356634396366336261383766316563333132326235633138 +6436313432343632343331633432623937386531386162660a653432346263326537373436376537 +66336561663461643638663062633866376131353762393034316331383232393634303930303833 +3234653330316338620a623061393639333238613837373064363532306639356239393863333138 +39306133643464383530653237613934396334316163636335333166616333356437613131643738 +30373765306363626165623632303233383863666535383762303266333636656136346536396639 +62653262386566373562643034623135626637376330303635366564383465343365393739383466 +38653865316233306361366463353262343534303833303130316665626237636131393930663464 +36636364306234346461323234636634626136396631333831373532666535363731623037386533 +38336630316636616230393066653730366366393063636637353131376438636637636365316265 +35306339343963636138326139653431343138626539316564336132316531393730303635356462 +63363564356637373764373938623538366534656135343163376235316131666535633062313736 +35633038376662303533626234323166663231313565616361306665633538306635376236656632 +35663935336333396264306464386531623537343165323736333533656566636262306132306531 +38383237623139353661666232626437323366323864386264303731623339343330616132313365 +36623634353334653835643234373536666134366535656339623235626131643161343232313561 +62303731646663343335313531313736353734346362343461616432326238343665666436376363 +38396431643034313531396662663639343863373663353234656437633834633030653263306339 +63376339356665623664616237303238303065356161353465376533626337336365346266656137 +66363864333161373936356666336235306565353833636563363565373638623932383238626439 +66643964636566613562336138323436306530303332626139343137356339303265643864323561 +32343431363538656464643434623165366632616263386363623236393632663866623537376232 +66393337376139323437363963303032313035393664313435346163306230386235386431386235 +62626339656262386237633462383466313063303637643362313262633562393161373738653861 +32393038363835643234653836353730366333363432663839393164396135626533393263616532 +63323063323364383737356235623365303637323037653333623235623065386535316431383233 +33626163343434333062343030376430366238353836646663393465363038633039646561313534 +62333432663864393062323461303230636261643663626432383838613835363639653836323265 +62663336653865633839346635303833303061306437663831353662306332373230373234303735 +38363538363436313530353435326532333263323431343438373737613366656330656330383630 +36666463313330356331373366303861666332373130356532646564316537346235626236636462 +62656430623131643163623937316636663962626163613466656332316132366462363765313438 +38376166326565316139656530306135633339636533333264333830343934636566 diff --git a/infra/sync/ansible/inventories/development/group_vars/sync_nodes/vars.yml b/infra/sync/ansible/inventories/development/group_vars/sync_nodes/vars.yml new file mode 100644 index 000000000..29591f40b --- /dev/null +++ b/infra/sync/ansible/inventories/development/group_vars/sync_nodes/vars.yml @@ -0,0 +1,16 @@ +garage_bootstrap_peers: >- + {% set peers = [] %} + {% for host in groups['sync_nodes'] if host != inventory_hostname %} + {% set _ = peers.append('"' ~ hostvars[host]['garage_node_id'] ~ '@' ~ hostvars[host]['garage_rpc_public_addr'] ~ '"') %} + {% endfor %} + {{ peers | join(', ') }} + +# Allow Garage RPC (3901) only from peer sync nodes +firewall_additional_rules: >- + {{ + groups['sync_nodes'] + | reject('equalto', inventory_hostname) + | map('extract', hostvars, 'ansible_host') + | map('regex_replace', '^(.*)$', 'iptables -A INPUT -p tcp --dport 3901 -s \1 -j ACCEPT') + | list + }} diff --git a/infra/sync/ansible/inventories/development/hosts.yml b/infra/sync/ansible/inventories/development/hosts.yml new file mode 100644 index 000000000..3e36bd028 --- /dev/null +++ b/infra/sync/ansible/inventories/development/hosts.yml @@ -0,0 +1,18 @@ +$ANSIBLE_VAULT;1.1;AES256 +31386665663465333536336665326134353434653239303465663331636135626237336635336232 +3864653134613237313235626566343434356533396632620a326337356633363531666365306464 +36643734333235316364393836356539373564366562346335373939356333333336633563373862 +3436376238643734370a313737316233353164376563656336383666653933396534666162326330 +35666238353061363530363933623839666136383138643438343537663537313939373235313864 +39323661386362353066323432333265336162373236343162363465386661636661643835376633 +65376633363137376165633638323262303839323834373030366661333933346532643733656263 +38306264643631376337306130666137633237623832623466616335326263396138643663663533 +65383638383862616465366338316233656136393233333230303136383262636639613135653930 +32353838343037613961396231643235343361386630663165656638306266343563356466663630 +37353464643661353535623839363666396266663164336262646532613336313062383062363061 +36613139636534323664343930383339303536303533613435343133363666623035663163653732 +66623939383661326564326361346361363235326261613864363734303666616364326131613665 +31386263666639316134376232396166336234386464366130663164363964323238343461383363 +61663462613932346339646566306663363435643834363463626336346165333563363235343638 +61346337323766616162356264313565633330336665373937663437633366313436626561633836 +36396432343562376639663962623535623634383739636362313939366136346638 diff --git a/infra/sync/ansible/inventories/production/group_vars/all/vars.yml b/infra/sync/ansible/inventories/production/group_vars/all/vars.yml new file mode 100644 index 000000000..f0e8fd7d3 --- /dev/null +++ b/infra/sync/ansible/inventories/production/group_vars/all/vars.yml @@ -0,0 +1,36 @@ +# Docker images +octobot_sync_image: "drakkarsoftware/octobot" +octobot_image_tag: "stable" +garage_image: "dxflrs/garage:v2.2.0" +nginx_image: "nginx:1-alpine" + +# Deployment +stack_deploy_dir: "/opt/octobot-sync" +s3_bucket: "octobot-sync" +s3_region: "garage" +octobot_sync_port: 3000 +nginx_port: 80 +garage_replication_factor: 3 + +# Map vault → app vars +garage_rpc_secret: "{{ vault_garage_rpc_secret }}" +garage_admin_token: "{{ vault_garage_admin_token }}" +garage_metrics_token: "{{ vault_garage_metrics_token }}" +s3_access_key: "{{ vault_s3_access_key }}" +s3_secret_key: "{{ vault_s3_secret_key }}" +platform_pubkey_evm: "{{ vault_platform_pubkey_evm }}" +encryption_secret: "{{ vault_encryption_secret }}" +platform_encryption_secret: "{{ vault_platform_encryption_secret }}" +evm_base_rpc: "{{ vault_evm_base_rpc | default('') }}" +evm_contract_base: "{{ vault_evm_contract_base | default('') }}" + +# Firewall (geerlingguy.firewall) +firewall_allowed_tcp_ports: + - "22" + - "80" + # Port 3901 (Garage RPC) restricted to peer IPs only — see sync_nodes group vars + +# Docker (geerlingguy.docker) +docker_install_compose_plugin: true +docker_users: + - deploy diff --git a/infra/sync/ansible/inventories/production/group_vars/sync_nodes/vars.yml b/infra/sync/ansible/inventories/production/group_vars/sync_nodes/vars.yml new file mode 100644 index 000000000..29591f40b --- /dev/null +++ b/infra/sync/ansible/inventories/production/group_vars/sync_nodes/vars.yml @@ -0,0 +1,16 @@ +garage_bootstrap_peers: >- + {% set peers = [] %} + {% for host in groups['sync_nodes'] if host != inventory_hostname %} + {% set _ = peers.append('"' ~ hostvars[host]['garage_node_id'] ~ '@' ~ hostvars[host]['garage_rpc_public_addr'] ~ '"') %} + {% endfor %} + {{ peers | join(', ') }} + +# Allow Garage RPC (3901) only from peer sync nodes +firewall_additional_rules: >- + {{ + groups['sync_nodes'] + | reject('equalto', inventory_hostname) + | map('extract', hostvars, 'ansible_host') + | map('regex_replace', '^(.*)$', 'iptables -A INPUT -p tcp --dport 3901 -s \1 -j ACCEPT') + | list + }} diff --git a/infra/sync/ansible/inventories/staging/group_vars/all/vars.yml b/infra/sync/ansible/inventories/staging/group_vars/all/vars.yml new file mode 100644 index 000000000..f2320416c --- /dev/null +++ b/infra/sync/ansible/inventories/staging/group_vars/all/vars.yml @@ -0,0 +1,36 @@ +# Docker images +octobot_sync_image: "drakkarsoftware/octobot" +octobot_image_tag: "staging" +garage_image: "dxflrs/garage:v2.2.0" +nginx_image: "nginx:1-alpine" + +# Deployment +stack_deploy_dir: "/opt/octobot-sync" +s3_bucket: "octobot-sync-staging" +s3_region: "garage" +octobot_sync_port: 3000 +nginx_port: 80 +garage_replication_factor: 3 + +# Map vault → app vars +garage_rpc_secret: "{{ vault_garage_rpc_secret }}" +garage_admin_token: "{{ vault_garage_admin_token }}" +garage_metrics_token: "{{ vault_garage_metrics_token }}" +s3_access_key: "{{ vault_s3_access_key }}" +s3_secret_key: "{{ vault_s3_secret_key }}" +platform_pubkey_evm: "{{ vault_platform_pubkey_evm }}" +encryption_secret: "{{ vault_encryption_secret }}" +platform_encryption_secret: "{{ vault_platform_encryption_secret }}" +evm_base_rpc: "{{ vault_evm_base_rpc | default('') }}" +evm_contract_base: "{{ vault_evm_contract_base | default('') }}" + +# Firewall (geerlingguy.firewall) +firewall_allowed_tcp_ports: + - "22" + - "80" + # Port 3901 (Garage RPC) restricted to peer IPs only — see sync_nodes group vars + +# Docker (geerlingguy.docker) +docker_install_compose_plugin: true +docker_users: + - deploy diff --git a/infra/sync/ansible/inventories/staging/group_vars/sync_nodes/vars.yml b/infra/sync/ansible/inventories/staging/group_vars/sync_nodes/vars.yml new file mode 100644 index 000000000..29591f40b --- /dev/null +++ b/infra/sync/ansible/inventories/staging/group_vars/sync_nodes/vars.yml @@ -0,0 +1,16 @@ +garage_bootstrap_peers: >- + {% set peers = [] %} + {% for host in groups['sync_nodes'] if host != inventory_hostname %} + {% set _ = peers.append('"' ~ hostvars[host]['garage_node_id'] ~ '@' ~ hostvars[host]['garage_rpc_public_addr'] ~ '"') %} + {% endfor %} + {{ peers | join(', ') }} + +# Allow Garage RPC (3901) only from peer sync nodes +firewall_additional_rules: >- + {{ + groups['sync_nodes'] + | reject('equalto', inventory_hostname) + | map('extract', hostvars, 'ansible_host') + | map('regex_replace', '^(.*)$', 'iptables -A INPUT -p tcp --dport 3901 -s \1 -j ACCEPT') + | list + }} diff --git a/infra/sync/ansible/playbooks/deploy-octobot-sync.yml b/infra/sync/ansible/playbooks/deploy-octobot-sync.yml new file mode 100644 index 000000000..4c3cb9729 --- /dev/null +++ b/infra/sync/ansible/playbooks/deploy-octobot-sync.yml @@ -0,0 +1,30 @@ +--- +# App-only rolling update — fast path after CI pushes a new image +- name: Rolling deploy of OctoBot sync + hosts: sync_nodes + serial: 1 + max_fail_percentage: 0 + become: true + + tasks: + - name: Pull new OctoBot sync image + ansible.builtin.command: + cmd: docker compose pull octobot-sync + chdir: "{{ stack_deploy_dir }}" + changed_when: false + + - name: Restart OctoBot sync only + ansible.builtin.command: + cmd: docker compose up -d --no-deps --force-recreate octobot-sync + chdir: "{{ stack_deploy_dir }}" + changed_when: true + + - name: Run health checks + ansible.builtin.include_role: + name: stack + tasks_from: healthcheck + + - name: Pause between nodes + ansible.builtin.pause: + seconds: 10 + when: ansible_play_hosts_all | length > 1 diff --git a/infra/sync/ansible/playbooks/setup-garage.yml b/infra/sync/ansible/playbooks/setup-garage.yml new file mode 100644 index 000000000..d25ea28a8 --- /dev/null +++ b/infra/sync/ansible/playbooks/setup-garage.yml @@ -0,0 +1,18 @@ +--- +# One-time: bootstrap the Garage cluster after first deploy +# Run AFTER site.yml has deployed all nodes with Garage running +# +# Usage: +# ansible-playbook playbooks/setup-garage.yml +# +# After this completes: +# 1. Copy the S3 credentials into vault.yml +# 2. Copy the node IDs into host_vars/.yml +# 3. Re-run site.yml to deploy with real credentials + +- name: Bootstrap Garage Cluster + hosts: sync_nodes + become: true + + roles: + - garage_init diff --git a/infra/sync/ansible/playbooks/site.yml b/infra/sync/ansible/playbooks/site.yml new file mode 100644 index 000000000..2697e7cf7 --- /dev/null +++ b/infra/sync/ansible/playbooks/site.yml @@ -0,0 +1,18 @@ +--- +# Full stack rolling deploy — one node at a time, health-checked +- name: Deploy OctoBot Sync Stack + hosts: sync_nodes + serial: 1 + max_fail_percentage: 0 + become: true + + roles: + - geerlingguy.firewall + - geerlingguy.docker + - stack + + post_tasks: + - name: Pause between nodes + ansible.builtin.pause: + seconds: 10 + when: ansible_play_hosts_all | length > 1 diff --git a/infra/sync/ansible/requirements.yml b/infra/sync/ansible/requirements.yml new file mode 100644 index 000000000..cdbc717b0 --- /dev/null +++ b/infra/sync/ansible/requirements.yml @@ -0,0 +1,9 @@ +roles: + - name: geerlingguy.docker + version: "7.4.1" + - name: geerlingguy.firewall + version: "2.7.0" + +collections: + - name: community.docker + version: ">=3.0.0" diff --git a/infra/sync/ansible/roles/garage_init/tasks/main.yml b/infra/sync/ansible/roles/garage_init/tasks/main.yml new file mode 100644 index 000000000..483e2bd65 --- /dev/null +++ b/infra/sync/ansible/roles/garage_init/tasks/main.yml @@ -0,0 +1,140 @@ +--- +# One-time cluster bootstrap — run via setup-garage.yml +# Idempotent: skips bucket/key creation if they already exist (replicated metadata) + +- name: Ensure Garage is running + ansible.builtin.command: + cmd: docker compose exec -T garage wget -qO- http://127.0.0.1:3903/health + chdir: "{{ stack_deploy_dir }}" + register: garage_health + retries: 10 + delay: 5 + until: garage_health.rc == 0 + changed_when: false + +- name: Discover Garage node ID + ansible.builtin.command: + cmd: docker compose exec -T garage /garage node id -q + chdir: "{{ stack_deploy_dir }}" + register: node_id_result + changed_when: false + +- name: Set node ID fact + ansible.builtin.set_fact: + discovered_garage_node_id: "{{ node_id_result.stdout | trim }}" + +- name: Display node ID + ansible.builtin.debug: + msg: "{{ inventory_hostname }}: {{ discovered_garage_node_id }}" + +# --- Cluster setup (delegated to first node) --- + +- name: Connect to all peers + ansible.builtin.command: + cmd: >- + docker compose exec -T garage /garage node connect + {{ hostvars[item]['discovered_garage_node_id'] }}@{{ hostvars[item]['garage_rpc_public_addr'] }} + chdir: "{{ stack_deploy_dir }}" + loop: "{{ groups['sync_nodes'] }}" + when: + - inventory_hostname == groups['sync_nodes'][0] + - item != inventory_hostname + changed_when: false + +- name: Assign layout for each node + ansible.builtin.command: + cmd: >- + docker compose exec -T garage /garage layout assign + -z {{ hostvars[item]['garage_zone'] }} + -c {{ hostvars[item]['garage_capacity'] }}G + -t {{ item }} + {{ hostvars[item]['discovered_garage_node_id'] }} + chdir: "{{ stack_deploy_dir }}" + loop: "{{ groups['sync_nodes'] }}" + when: inventory_hostname == groups['sync_nodes'][0] + changed_when: true + +- name: Get current layout version + ansible.builtin.shell: + cmd: >- + docker compose exec -T garage /garage layout show 2>&1 + | grep -oP 'Current layout version: \K\d+' || echo 0 + chdir: "{{ stack_deploy_dir }}" + register: layout_version + changed_when: false + when: inventory_hostname == groups['sync_nodes'][0] + +- name: Apply layout + ansible.builtin.command: + cmd: >- + docker compose exec -T garage /garage layout apply + --version {{ (layout_version.stdout | int) + 1 }} + chdir: "{{ stack_deploy_dir }}" + when: inventory_hostname == groups['sync_nodes'][0] + changed_when: true + +# --- Bucket (idempotent — skipped if already exists via replication) --- + +- name: Check if bucket exists + ansible.builtin.command: + cmd: docker compose exec -T garage /garage bucket info {{ s3_bucket }} + chdir: "{{ stack_deploy_dir }}" + register: bucket_check + failed_when: false + changed_when: false + when: inventory_hostname == groups['sync_nodes'][0] + +- name: Create S3 bucket + ansible.builtin.command: + cmd: docker compose exec -T garage /garage bucket create {{ s3_bucket }} + chdir: "{{ stack_deploy_dir }}" + when: + - inventory_hostname == groups['sync_nodes'][0] + - bucket_check.rc != 0 + +# --- API key (idempotent — skipped if already exists via replication) --- + +- name: Check if API key exists + ansible.builtin.command: + cmd: docker compose exec -T garage /garage key info octobot-sync-key + chdir: "{{ stack_deploy_dir }}" + register: key_check + failed_when: false + changed_when: false + when: inventory_hostname == groups['sync_nodes'][0] + +- name: Create S3 API key + ansible.builtin.command: + cmd: docker compose exec -T garage /garage key create octobot-sync-key + chdir: "{{ stack_deploy_dir }}" + when: + - inventory_hostname == groups['sync_nodes'][0] + - key_check.rc != 0 + register: key_output + no_log: true + +- name: Grant key access to bucket + ansible.builtin.command: + cmd: >- + docker compose exec -T garage /garage bucket allow + --read --write --owner {{ s3_bucket }} --key octobot-sync-key + chdir: "{{ stack_deploy_dir }}" + when: + - inventory_hostname == groups['sync_nodes'][0] + - key_check.rc != 0 + +- name: Display new credentials + ansible.builtin.debug: + msg: | + Save these to vault.yml for this environment! + cp vault.yml.example inventories//group_vars/all/vault.yml + ansible-vault edit inventories//group_vars/all/vault.yml + + {{ key_output.stdout | default('(key already existed)') }} + + Node IDs for host_vars: + {% for host in groups['sync_nodes'] %} + {{ host }}: {{ hostvars[host]['discovered_garage_node_id'] }} + {% endfor %} + when: inventory_hostname == groups['sync_nodes'][0] + no_log: "{{ key_output is changed }}" diff --git a/infra/sync/ansible/roles/stack/files/collections.json b/infra/sync/ansible/roles/stack/files/collections.json new file mode 100644 index 000000000..ce85a18e5 --- /dev/null +++ b/infra/sync/ansible/roles/stack/files/collections.json @@ -0,0 +1,276 @@ +{ + "version": 1, + "collections": [ + { + "name": "signals", + "storagePath": "products/{productId}/signals/{version}", + "readRoles": ["member"], + "writeRoles": ["owner"], + "encryption": "none", + "maxBodyBytes": 65536, + "rateLimit": true + }, + { + "name": "product-profiles", + "storagePath": "products/{productId}/profile", + "readRoles": ["public"], + "writeRoles": ["owner", "admin"], + "encryption": "none", + "maxBodyBytes": 65536, + "objectSchema": { + "type": "object", + "properties": { + "name": { "type": "string" }, + "description": { "type": "string" }, + "website": { "type": "string" }, + "twitter": { "type": "string" }, + "tags": { + "type": "array", + "items": { "type": "string", "maxLength": 100 }, + "maxItems": 50 + } + }, + "additionalProperties": false + }, + "rateLimit": true + }, + { + "name": "product-logos", + "storagePath": "products/{productId}/logo", + "readRoles": ["public"], + "writeRoles": ["owner", "admin"], + "encryption": "none", + "maxBodyBytes": 2097152, + "allowedMimeTypes": ["image/png", "image/jpeg", "image/gif", "image/webp"], + "rateLimit": true + }, + { + "name": "product-versions", + "storagePath": "products/{productId}/versions/{version}/document", + "readRoles": ["public"], + "writeRoles": ["owner", "admin"], + "encryption": "none", + "maxBodyBytes": 65536, + "objectSchema": { + "type": "object", + "properties": { + "description": { "type": "string" } + }, + "additionalProperties": false + } + }, + { + "name": "bots", + "storagePath": "users/{identity}", + "bundle": "user-data", + "readRoles": ["self"], + "writeRoles": ["self"], + "encryption": "identity", + "maxBodyBytes": 10485760 + }, + { + "name": "accounts", + "storagePath": "users/{identity}", + "bundle": "user-data", + "readRoles": ["self"], + "writeRoles": ["self"], + "encryption": "identity", + "maxBodyBytes": 10485760 + }, + { + "name": "settings", + "storagePath": "users/{identity}", + "bundle": "user-data", + "readRoles": ["self"], + "writeRoles": ["self"], + "encryption": "identity", + "maxBodyBytes": 10485760 + }, + { + "name": "notifications", + "storagePath": "users/{identity}", + "bundle": "user-data", + "readRoles": ["self"], + "writeRoles": ["self"], + "encryption": "identity", + "maxBodyBytes": 10485760 + }, + { + "name": "referrals", + "storagePath": "users/{identity}/referrals", + "readRoles": ["self", "admin"], + "writeRoles": ["admin"], + "encryption": "none", + "maxBodyBytes": 65536 + }, + { + "name": "invoices", + "storagePath": "users/{identity}/invoices", + "readRoles": ["self", "admin"], + "writeRoles": ["admin"], + "encryption": "none", + "maxBodyBytes": 65536 + }, + { + "name": "affiliate", + "storagePath": "users/{identity}/affiliate", + "readRoles": ["self", "admin"], + "writeRoles": ["admin"], + "encryption": "none", + "maxBodyBytes": 65536 + }, + { + "name": "donations", + "storagePath": "users/{identity}/donations", + "readRoles": ["self", "admin"], + "writeRoles": ["admin"], + "encryption": "none", + "maxBodyBytes": 65536 + }, + { + "name": "recovery", + "storagePath": "users/{identity}/recovery", + "readRoles": ["self", "admin"], + "writeRoles": ["self"], + "encryption": "identity", + "maxBodyBytes": 65536 + }, + { + "name": "courses-user", + "storagePath": "users/{identity}/courses", + "readRoles": ["self"], + "writeRoles": ["self"], + "encryption": "identity", + "maxBodyBytes": 65536 + }, + { + "name": "platform-affiliates", + "storagePath": "platform/affiliates", + "readRoles": ["admin"], + "writeRoles": ["admin"], + "encryption": "server", + "maxBodyBytes": 65536 + }, + { + "name": "platform-referrals", + "storagePath": "platform/referrals", + "readRoles": ["admin"], + "writeRoles": ["admin"], + "encryption": "server", + "maxBodyBytes": 65536 + }, + { + "name": "errors", + "storagePath": "users/{identity}/errors/{errorId}", + "readRoles": ["self", "admin"], + "writeRoles": ["self"], + "encryption": "delegated", + "maxBodyBytes": 500000 + }, + { + "name": "profiles", + "storagePath": "public/profiles/{identity}", + "readRoles": ["public"], + "writeRoles": ["self", "admin"], + "pullOnly": false, + "encryption": "none", + "maxBodyBytes": 65536, + "rateLimit": true + }, + { + "name": "news", + "storagePath": "public/news/{lang}/{month}", + "readRoles": ["public"], + "writeRoles": [], + "pullOnly": true, + "forceFullFetch": true, + "encryption": "none", + "maxBodyBytes": 65536 + }, + { + "name": "courses", + "storagePath": "public/courses", + "readRoles": ["public"], + "writeRoles": [], + "pullOnly": true, + "encryption": "none", + "maxBodyBytes": 65536 + }, + { + "name": "exchanges", + "storagePath": "public/exchanges", + "readRoles": ["public"], + "writeRoles": [], + "pullOnly": true, + "encryption": "none", + "maxBodyBytes": 65536 + }, + { + "name": "cryptocurrencies", + "storagePath": "public/cryptocurrencies", + "readRoles": ["public"], + "writeRoles": [], + "pullOnly": true, + "encryption": "none", + "maxBodyBytes": 65536 + }, + { + "name": "cryptocurrency-detail", + "storagePath": "public/cryptocurrencies/{cryptocurrency}", + "readRoles": ["public"], + "writeRoles": [], + "pullOnly": true, + "encryption": "none", + "maxBodyBytes": 65536 + }, + { + "name": "highlights", + "storagePath": "public/products/highlights", + "readRoles": ["public"], + "writeRoles": [], + "pullOnly": true, + "encryption": "none", + "maxBodyBytes": 65536 + }, + { + "name": "categories", + "storagePath": "public/products/categories", + "readRoles": ["public"], + "writeRoles": [], + "pullOnly": true, + "encryption": "none", + "maxBodyBytes": 65536 + }, + { + "name": "plans", + "storagePath": "public/products/plans", + "readRoles": ["public"], + "writeRoles": [], + "pullOnly": true, + "encryption": "none", + "maxBodyBytes": 65536 + }, + { + "name": "products-index", + "storagePath": "public/products/index", + "readRoles": ["public"], + "writeRoles": [], + "pullOnly": true, + "encryption": "none", + "maxBodyBytes": 65536 + }, + { + "name": "performance", + "storagePath": "products/{productId}/performance/{version}", + "readRoles": ["public"], + "writeRoles": [], + "pullOnly": true, + "encryption": "none", + "maxBodyBytes": 65536 + } + ], + "rateLimit": { + "windowMs": 60000, + "maxRequests": 100 + } +} diff --git a/infra/sync/ansible/roles/stack/handlers/main.yml b/infra/sync/ansible/roles/stack/handlers/main.yml new file mode 100644 index 000000000..283488ae0 --- /dev/null +++ b/infra/sync/ansible/roles/stack/handlers/main.yml @@ -0,0 +1,31 @@ +--- +- name: restart stack + ansible.builtin.command: + cmd: docker compose up -d + chdir: "{{ stack_deploy_dir }}" + +- name: restart garage + ansible.builtin.command: + cmd: docker compose restart garage + chdir: "{{ stack_deploy_dir }}" + +- name: restart octobot-sync + ansible.builtin.command: + cmd: docker compose up -d --no-deps --force-recreate octobot-sync + chdir: "{{ stack_deploy_dir }}" + +- name: restart nginx + ansible.builtin.command: + cmd: docker compose restart nginx + chdir: "{{ stack_deploy_dir }}" + +- name: regenerate nginx config + ansible.builtin.command: + cmd: >- + docker run --rm --entrypoint python3 + -v {{ stack_deploy_dir }}/collections.json:/tmp/collections.json:ro + {{ octobot_sync_image }}:{{ octobot_image_tag }} + -m octobot_sync.util.nginx_conf /tmp/collections.json + register: nginx_regen + changed_when: false + notify: restart nginx diff --git a/infra/sync/ansible/roles/stack/tasks/healthcheck.yml b/infra/sync/ansible/roles/stack/tasks/healthcheck.yml new file mode 100644 index 000000000..af6475e83 --- /dev/null +++ b/infra/sync/ansible/roles/stack/tasks/healthcheck.yml @@ -0,0 +1,36 @@ +--- +- name: Wait for Garage health + ansible.builtin.uri: + url: "http://127.0.0.1:3903/health" + return_content: true + status_code: [200] + register: garage_health + until: garage_health.status == 200 + retries: 10 + delay: 5 + +- name: Wait for OctoBot sync health + ansible.builtin.uri: + url: "http://127.0.0.1:{{ octobot_sync_port }}/health" + return_content: true + status_code: [200] + register: octobot_health + until: octobot_health.status == 200 + retries: 30 + delay: 5 + +- name: Verify OctoBot sync responds ok + ansible.builtin.assert: + that: + - octobot_health.json.ok == true + fail_msg: "OctoBot sync health check failed on {{ inventory_hostname }}" + +- name: Wait for Nginx health (proxied) + ansible.builtin.uri: + url: "http://127.0.0.1:{{ nginx_port }}/health" + return_content: true + status_code: [200] + register: nginx_health + until: nginx_health.status == 200 + retries: 10 + delay: 5 diff --git a/infra/sync/ansible/roles/stack/tasks/main.yml b/infra/sync/ansible/roles/stack/tasks/main.yml new file mode 100644 index 000000000..2b01ed474 --- /dev/null +++ b/infra/sync/ansible/roles/stack/tasks/main.yml @@ -0,0 +1,82 @@ +--- +- name: Create deploy directory + ansible.builtin.file: + path: "{{ stack_deploy_dir }}" + state: directory + owner: deploy + group: deploy + mode: "0755" + +- name: Render docker-compose.yml + ansible.builtin.template: + src: docker-compose.yml.j2 + dest: "{{ stack_deploy_dir }}/docker-compose.yml" + owner: deploy + group: deploy + mode: "0644" + notify: restart stack + +- name: Render .env + ansible.builtin.template: + src: env.j2 + dest: "{{ stack_deploy_dir }}/.env" + owner: deploy + group: deploy + mode: "0600" + notify: restart octobot-sync + no_log: true + +- name: Render garage.toml + ansible.builtin.template: + src: garage.toml.j2 + dest: "{{ stack_deploy_dir }}/garage.toml" + owner: deploy + group: deploy + mode: "0644" + notify: restart garage + +- name: Copy collections.json + ansible.builtin.copy: + src: collections.json + dest: "{{ stack_deploy_dir }}/collections.json" + owner: deploy + group: deploy + mode: "0644" + notify: regenerate nginx config + +- name: Pull Docker images + ansible.builtin.command: + cmd: docker compose pull + chdir: "{{ stack_deploy_dir }}" + changed_when: false + +- name: Generate nginx.conf via OctoBot sync image + ansible.builtin.command: + cmd: >- + docker run --rm --entrypoint python3 + -v {{ stack_deploy_dir }}/collections.json:/tmp/collections.json:ro + {{ octobot_sync_image }}:{{ octobot_image_tag }} + -m octobot_sync.util.nginx_conf /tmp/collections.json + register: nginx_conf_output + changed_when: false + +- name: Write nginx.conf + ansible.builtin.copy: + content: "{{ nginx_conf_output.stdout }}" + dest: "{{ stack_deploy_dir }}/nginx.conf" + owner: deploy + group: deploy + mode: "0644" + notify: restart nginx + +- name: Start stack + ansible.builtin.command: + cmd: docker compose up -d + chdir: "{{ stack_deploy_dir }}" + changed_when: false + +- name: Flush handlers before health check + ansible.builtin.meta: flush_handlers + +- name: Run health checks + ansible.builtin.include_tasks: healthcheck.yml diff --git a/infra/sync/ansible/roles/stack/templates/docker-compose.yml.j2 b/infra/sync/ansible/roles/stack/templates/docker-compose.yml.j2 new file mode 100644 index 000000000..2281597b1 --- /dev/null +++ b/infra/sync/ansible/roles/stack/templates/docker-compose.yml.j2 @@ -0,0 +1,66 @@ +services: + garage: + image: {{ garage_image }} + ports: + - "127.0.0.1:3900:3900" + - "0.0.0.0:3901:3901" + - "127.0.0.1:3902:3902" + - "127.0.0.1:3903:3903" + restart: unless-stopped + mem_limit: 2g + security_opt: + - no-new-privileges:true + volumes: + - garage_meta:/var/lib/garage/meta + - garage_data:/var/lib/garage/data + - ./garage.toml:/etc/garage.toml:ro + healthcheck: + test: ["CMD", "wget", "-qO-", "http://127.0.0.1:3903/health"] + interval: 10s + retries: 5 + networks: + - backend + + octobot-sync: + image: {{ octobot_sync_image }}:{{ octobot_image_tag }} + command: ["OctoBot", "sync", "--host", "0.0.0.0", "--port", "{{ octobot_sync_port }}"] + env_file: .env + expose: + - "{{ octobot_sync_port }}" + restart: unless-stopped + mem_limit: 1g + security_opt: + - no-new-privileges:true + depends_on: + garage: + condition: service_healthy + networks: + - frontend + - backend + + nginx: + image: {{ nginx_image }} + ports: + - "{{ nginx_port }}:80" + volumes: + - ./nginx.conf:/etc/nginx/conf.d/default.conf:ro + depends_on: + - octobot-sync + restart: unless-stopped + mem_limit: 256m + security_opt: + - no-new-privileges:true + healthcheck: + test: ["CMD", "wget", "-qO-", "http://127.0.0.1/health"] + interval: 10s + retries: 5 + networks: + - frontend + +volumes: + garage_meta: + garage_data: + +networks: + frontend: + backend: diff --git a/infra/sync/ansible/roles/stack/templates/env.j2 b/infra/sync/ansible/roles/stack/templates/env.j2 new file mode 100644 index 000000000..a9d23a55a --- /dev/null +++ b/infra/sync/ansible/roles/stack/templates/env.j2 @@ -0,0 +1,11 @@ +S3_ACCESS_KEY={{ s3_access_key }} +S3_SECRET_KEY={{ s3_secret_key }} +S3_ENDPOINT=http://garage:3900 +S3_BUCKET={{ s3_bucket }} +S3_REGION={{ s3_region }} +PLATFORM_PUBKEY_EVM={{ platform_pubkey_evm }} +ENCRYPTION_SECRET={{ encryption_secret }} +PLATFORM_ENCRYPTION_SECRET={{ platform_encryption_secret }} +PORT={{ octobot_sync_port }} +EVM_BASE_RPC={{ evm_base_rpc }} +EVM_CONTRACT_BASE={{ evm_contract_base }} diff --git a/infra/sync/ansible/roles/stack/templates/garage.toml.j2 b/infra/sync/ansible/roles/stack/templates/garage.toml.j2 new file mode 100644 index 000000000..ce8d0b353 --- /dev/null +++ b/infra/sync/ansible/roles/stack/templates/garage.toml.j2 @@ -0,0 +1,31 @@ +metadata_dir = "/var/lib/garage/meta" +data_dir = "/var/lib/garage/data" +db_engine = "lmdb" +metadata_auto_snapshot_interval = "6h" + +replication_factor = {{ garage_replication_factor }} +compression_level = 2 + +rpc_bind_addr = "[::]:3901" +rpc_public_addr = "{{ garage_rpc_public_addr }}" +rpc_secret = "{{ garage_rpc_secret }}" + +bootstrap_peers = [{{ garage_bootstrap_peers }}] + +[s3_api] +s3_region = "{{ s3_region }}" +api_bind_addr = "[::]:3900" +root_domain = ".s3.garage" + +[s3_web] +bind_addr = "[::]:3902" +root_domain = ".web.garage" +index = "index.html" + +[k2v_api] +api_bind_addr = "[::]:3904" + +[admin] +api_bind_addr = "[::]:3903" +admin_token = "{{ garage_admin_token }}" +metrics_token = "{{ garage_metrics_token }}" diff --git a/infra/sync/ansible/scripts/pre-commit-vault-check.py b/infra/sync/ansible/scripts/pre-commit-vault-check.py new file mode 100644 index 000000000..427ff5d50 --- /dev/null +++ b/infra/sync/ansible/scripts/pre-commit-vault-check.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python3 +"""Pre-commit hook: ensure vault.yml and hosts.yml are encrypted before committing. + +Install: + cp infra/sync/ansible/scripts/pre-commit-vault-check.py .git/hooks/pre-commit + chmod +x .git/hooks/pre-commit # Unix only, not needed on Windows + +Works on Linux, macOS, and Windows. +""" + +import fnmatch +import subprocess +import sys + +SENSITIVE_PATTERNS = [ + "infra/sync/ansible/inventories/*/group_vars/all/vault.yml", + "infra/sync/ansible/inventories/*/hosts.yml", +] + +VAULT_HEADER = "$ANSIBLE_VAULT" + + +def get_staged_files(): + result = subprocess.run( + ["git", "diff", "--cached", "--name-only"], + capture_output=True, text=True, check=True, + ) + return result.stdout.strip().splitlines() + + +def matches_any_pattern(filepath): + # Normalize to forward slashes for cross-platform matching + normalized = filepath.replace("\\", "/") + return any(fnmatch.fnmatch(normalized, p) for p in SENSITIVE_PATTERNS) + + +def is_encrypted(filepath): + try: + with open(filepath, "r", encoding="utf-8") as f: + first_line = f.readline().strip() + return first_line.startswith(VAULT_HEADER) + except (OSError, UnicodeDecodeError): + # Binary or unreadable — likely already encrypted + return True + + +def main(): + staged = get_staged_files() + failed = [] + + for filepath in staged: + if matches_any_pattern(filepath) and not is_encrypted(filepath): + failed.append(filepath) + + if failed: + print("ERROR: The following files are NOT encrypted:") + for f in failed: + print(f" {f}") + print() + print("Encrypt them before committing:") + for f in failed: + print(f" ansible-vault encrypt {f}") + return 1 + + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/infra/sync/ansible/scripts/vault-password.sh b/infra/sync/ansible/scripts/vault-password.sh new file mode 100755 index 000000000..eb09d1b8d --- /dev/null +++ b/infra/sync/ansible/scripts/vault-password.sh @@ -0,0 +1,2 @@ +#!/bin/bash +echo "${ANSIBLE_VAULT_PASSWORD}" diff --git a/infra/sync/ansible/vault.yml.example b/infra/sync/ansible/vault.yml.example new file mode 100644 index 000000000..cbcbcfb97 --- /dev/null +++ b/infra/sync/ansible/vault.yml.example @@ -0,0 +1,25 @@ +# Copy to inventories//group_vars/all/vault.yml and fill in secrets +# Then encrypt: ansible-vault encrypt inventories//group_vars/all/vault.yml +# +# Generate secrets: +# openssl rand -hex 32 → vault_garage_rpc_secret +# openssl rand -base64 32 → vault_garage_admin_token, vault_garage_metrics_token +# openssl rand -base64 64 → vault_encryption_secret, vault_platform_encryption_secret +# +# S3 credentials come from setup-garage.yml (garage key create) +# Platform EVM address: derive from your private key or use an existing wallet address + +vault_garage_rpc_secret: "CHANGE_ME" +vault_garage_admin_token: "CHANGE_ME" +vault_garage_metrics_token: "CHANGE_ME" + +vault_s3_access_key: "CHANGE_ME" +vault_s3_secret_key: "CHANGE_ME" + +vault_platform_pubkey_evm: "0xCHANGE_ME" +vault_encryption_secret: "CHANGE_ME" # openssl rand -base64 64 +vault_platform_encryption_secret: "CHANGE_ME" # openssl rand -base64 64 + +# Optional +# vault_evm_base_rpc: "https://mainnet.base.org" +# vault_evm_contract_base: "0x..." diff --git a/infra/sync/requirements.txt b/infra/sync/requirements.txt new file mode 100644 index 000000000..0cb2ed597 --- /dev/null +++ b/infra/sync/requirements.txt @@ -0,0 +1 @@ +ansible-core>=2.16,<2.18 diff --git a/packages/sync/CHANGELOG.md b/packages/sync/CHANGELOG.md new file mode 100644 index 000000000..dfc664d1a --- /dev/null +++ b/packages/sync/CHANGELOG.md @@ -0,0 +1,23 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [0.1.0] - 2026-03-20 +### Added +- [Collections] `product-profiles` collection with JSON Schema validation (name, description, website, twitter, tags) +- [Collections] `product-logos` binary collection with MIME type validation (PNG, JPEG, GIF, WebP) +- [Collections] `product-versions` collection with JSON Schema validation for version documents +- [Signals] `member` role for signal reads — public products allow all authenticated users, private products require on-chain `has_access` +- [RoleEnricher] Assign `member` role via on-chain `has_access` check (owner gets both `owner` and `member`) +- [NginxConf] Escape regex metacharacters in storage paths to prevent nginx config injection +- [NginxConf] Validate collection names (alphanumeric, hyphens, underscores only) +- [NginxConf] Reject zero/negative rate limit values +- [Security] Auth failure logging via `octobot_sync.security` logger +### Changed +- [Constants] Reduce auth timestamp window from 30s to 10s +### Removed +- [Routes] Remove manual product routes (GET/PUT) — replaced by declarative Starfish collections +- [Routes] Remove unused `/verify` endpoint (auth handled by starfish role_resolver) +- [App] Remove `app.state` dependencies (object_store, registry, platform_pubkey) — all handled by Starfish router diff --git a/packages/sync/full_requirements.txt b/packages/sync/full_requirements.txt index ba76dc1c1..d27e23815 100644 --- a/packages/sync/full_requirements.txt +++ b/packages/sync/full_requirements.txt @@ -1,2 +1,2 @@ -starfish-server==1.0.0 +starfish-server==1.1.0 cachetools diff --git a/packages/sync/octobot_sync/app.py b/packages/sync/octobot_sync/app.py index 5e19b0622..d5df2d45a 100644 --- a/packages/sync/octobot_sync/app.py +++ b/packages/sync/octobot_sync/app.py @@ -27,7 +27,6 @@ import octobot_sync.auth as auth import octobot_sync.chain as chain import octobot_sync.constants as constants -import octobot_sync.routes as routes import octobot_sync.sync as sync @@ -47,20 +46,8 @@ def create_app( encryption_secret = os.environ["ENCRYPTION_SECRET"] platform_encryption_secret = os.environ["PLATFORM_ENCRYPTION_SECRET"] - # Store shared deps on app.state for route handlers - app.state.object_store = object_store - app.state.nonce = nonce - app.state.registry = registry - app.state.platform_pubkey = platform_pubkey - app.state.encryption_secret = encryption_secret - app.state.platform_encryption_secret = platform_encryption_secret - sync_config = sync.load_sync_config(collections_path) - # Health + verify (unversioned) - app.include_router(routes.health.router) - app.include_router(routes.verify.router) - replica_manager = None if primary_url: sync_config = sync.make_replica_config( @@ -98,10 +85,6 @@ def create_app( ) app.include_router(sync_router, prefix="/v1") - # Manual routes (non-sync) - app.include_router(routes.product_meta.router, prefix="/v1") - app.include_router(routes.product.router, prefix="/v1") - if replica_manager: @app.on_event("startup") async def _start_replica(): diff --git a/packages/sync/octobot_sync/constants.py b/packages/sync/octobot_sync/constants.py index 24b4e647e..695e0bdae 100644 --- a/packages/sync/octobot_sync/constants.py +++ b/packages/sync/octobot_sync/constants.py @@ -14,7 +14,6 @@ # You should have received a copy of the GNU General Public # License along with OctoBot. If not, see . -MAX_BODY_SIZE_META = 2 * 1024 * 1024 # 2 MB — product meta + logo upload MAX_BODY_SIZE_SIGNAL = 64 * 1024 # 64 KB — signal payload MAX_BODY_SIZE_PERFORMANCE = 64 * 1024 # 64 KB — live performance snapshot MAX_BODY_SIZE_PRIVATE = 10 * 1024 * 1024 # 10 MB — private documents @@ -25,16 +24,11 @@ MAX_NONCE_LENGTH = 128 MAX_PUBKEY_LENGTH = 256 MAX_SIGNATURE_LENGTH = 512 -TIMESTAMP_WINDOW_MS = 30_000 +TIMESTAMP_WINDOW_MS = 10_000 HEADER_PUBKEY = "X-OctoBot-Pubkey" HEADER_SIGNATURE = "X-OctoBot-Signature" HEADER_TIMESTAMP = "X-OctoBot-Timestamp" HEADER_NONCE = "X-OctoBot-Nonce" HEADER_CHAIN = "X-OctoBot-Chain" -HEADER_ORIGINAL_METHOD = "X-Original-Method" -HEADER_ORIGINAL_URI = "X-Original-URI" - COLLECTIONS_FILE = "collections.json" - -DEFAULT_VERSION = 0 diff --git a/packages/sync/octobot_sync/routes/__init__.py b/packages/sync/octobot_sync/routes/__init__.py deleted file mode 100644 index 9e4057a61..000000000 --- a/packages/sync/octobot_sync/routes/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -# Drakkar-Software OctoBot-Sync -# Copyright (c) Drakkar-Software, All rights reserved. -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 3.0 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library. - -from octobot_sync.routes import health -from octobot_sync.routes import verify -from octobot_sync.routes import product -from octobot_sync.routes import product_meta - -__all__ = [ - "health", - "verify", - "product", - "product_meta", -] diff --git a/packages/sync/octobot_sync/routes/health.py b/packages/sync/octobot_sync/routes/health.py deleted file mode 100644 index c562f72ec..000000000 --- a/packages/sync/octobot_sync/routes/health.py +++ /dev/null @@ -1,26 +0,0 @@ -# This file is part of OctoBot Sync (https://github.com/Drakkar-Software/OctoBot) -# Copyright (c) 2025 Drakkar-Software, All rights reserved. -# -# OctoBot is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either -# version 3.0 of the License, or (at your option) any later version. -# -# OctoBot is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public -# License along with OctoBot. If not, see . - -import time - -from fastapi import APIRouter - -router = APIRouter() - - -@router.get("/health") -async def health(): - return {"ok": True, "ts": int(time.time() * 1000)} diff --git a/packages/sync/octobot_sync/routes/product.py b/packages/sync/octobot_sync/routes/product.py deleted file mode 100644 index dad45b993..000000000 --- a/packages/sync/octobot_sync/routes/product.py +++ /dev/null @@ -1,48 +0,0 @@ -# This file is part of OctoBot Sync (https://github.com/Drakkar-Software/OctoBot) -# Copyright (c) 2025 Drakkar-Software, All rights reserved. -# -# OctoBot is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either -# version 3.0 of the License, or (at your option) any later version. -# -# OctoBot is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public -# License along with OctoBot. If not, see . - -import json - -from fastapi import APIRouter, Request -from fastapi.responses import JSONResponse - -import octobot_sync.sync as sync - -router = APIRouter() - - -async def _get_doc(store, prefix: str, doc_id: str) -> dict | None: - raw = await store.get_string(f"{prefix}/{doc_id}.json") - if not raw: - return None - return json.loads(raw) - - -@router.get("/product/{product_pubkey}") -async def get_product(product_pubkey: str, request: Request): - registry = request.app.state.registry - object_store = request.app.state.object_store - - product = await sync.find_item(registry, product_pubkey) - if product is None: - return JSONResponse({"error": "Product not found"}, status_code=404) - - profile = await _get_doc(object_store, f"products/{product_pubkey}", "profile") - - return { - "product": {"id": product.id, "owner": product.owner}, - "profile": profile or {}, - } diff --git a/packages/sync/octobot_sync/routes/product_meta.py b/packages/sync/octobot_sync/routes/product_meta.py deleted file mode 100644 index e7b715d00..000000000 --- a/packages/sync/octobot_sync/routes/product_meta.py +++ /dev/null @@ -1,112 +0,0 @@ -# This file is part of OctoBot Sync (https://github.com/Drakkar-Software/OctoBot) -# Copyright (c) 2025 Drakkar-Software, All rights reserved. -# -# OctoBot is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either -# version 3.0 of the License, or (at your option) any later version. -# -# OctoBot is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public -# License along with OctoBot. If not, see . - -import json - -from fastapi import APIRouter, Request -from fastapi.responses import JSONResponse - -import octobot_sync.constants as constants -import octobot_sync.sync as sync - -router = APIRouter() - -UNSAFE_KEYS = frozenset({"__proto__", "constructor", "prototype"}) - - -async def _get_doc(store, prefix: str, doc_id: str) -> dict | None: - raw = await store.get_string(f"{prefix}/{doc_id}.json") - if not raw: - return None - return json.loads(raw) - - -async def _put_doc(store, prefix: str, doc_id: str, doc: dict) -> None: - await store.put(f"{prefix}/{doc_id}.json", json.dumps(doc), content_type="application/json") - - -@router.put("/product/{product_pubkey}/{version}/meta") -async def put_product_meta(product_pubkey: str, version: str, request: Request): - """Upload product metadata (multipart form: profile fields + logo + version_description).""" - object_store = request.app.state.object_store - registry = request.app.state.registry - - # Parse version - raw_version = version.lstrip("v") or str(constants.DEFAULT_VERSION) - try: - ver = int(raw_version) - except ValueError: - return JSONResponse({"error": "Invalid version"}, status_code=400) - - form_data = await request.form() - - # Profile fields — whitelist only - profile_fields: dict = {} - for field in ("name", "description", "website", "twitter"): - value = form_data.get(field) - if value and str(value): - profile_fields[field] = value - - tags = form_data.get("tags") - if tags and str(tags): - try: - profile_fields["tags"] = json.loads(tags) - except json.JSONDecodeError: - return JSONResponse({"error": "Invalid tags JSON"}, status_code=400) - - # Merge with existing profile — sanitize both sides - if profile_fields: - raw = await _get_doc(object_store, f"products/{product_pubkey}", "profile") or {} - existing = {k: v for k, v in raw.items() if k not in UNSAFE_KEYS} - await _put_doc( - object_store, f"products/{product_pubkey}", "profile", {**existing, **profile_fields} - ) - - # Logo upload - logo = form_data.get("logo") - if logo and hasattr(logo, "read"): - logo_data = await logo.read() - await object_store.put( - f"products/{product_pubkey}/logo.png", - logo_data.decode("utf-8") if bytes(logo_data) else logo_data, - content_type="image/png", - ) - - # Version description - version_desc = form_data.get("version_description") - if version_desc and str(version_desc): - await _put_doc( - object_store, - f"products/{product_pubkey}/v{ver}", - "document", - {"description": version_desc}, - ) - - return {"ok": True} - - -@router.get("/product/{product_pubkey}/info") -async def get_product_info(product_pubkey: str, request: Request): - object_store = request.app.state.object_store - registry = request.app.state.registry - - product = await sync.find_item(registry, product_pubkey) - profile = await _get_doc(object_store, f"products/{product_pubkey}", "profile") - - return { - "product": {"id": product.id, "owner": product.owner} if product else None, - "profile": profile or {}, - } diff --git a/packages/sync/octobot_sync/routes/verify.py b/packages/sync/octobot_sync/routes/verify.py deleted file mode 100644 index f99a6aef4..000000000 --- a/packages/sync/octobot_sync/routes/verify.py +++ /dev/null @@ -1,118 +0,0 @@ -# This file is part of OctoBot Sync (https://github.com/Drakkar-Software/OctoBot) -# Copyright (c) 2025 Drakkar-Software, All rights reserved. -# -# OctoBot is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either -# version 3.0 of the License, or (at your option) any later version. -# -# OctoBot is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public -# License along with OctoBot. If not, see . - -import re -import time - -from fastapi import APIRouter, Request, Response - -import octobot_sync.auth as auth -import octobot_sync.chain as chain -import octobot_sync.constants as constants - -router = APIRouter() - - -def _strip_bucket_prefix(uri: str) -> str: - without_leading_slash = uri.lstrip("/") - slash_idx = without_leading_slash.find("/") - return "" if slash_idx == -1 else without_leading_slash[slash_idx + 1 :] - - -def _is_path_authorized( - s3_path: str, method: str, pubkey: str, platform_pubkey: str -) -> bool: - is_read = method in ("GET", "HEAD") - - if s3_path.startswith("products/"): - return is_read or pubkey == platform_pubkey - - if s3_path.startswith("public/"): - return is_read or pubkey == platform_pubkey - - if s3_path.startswith("users/"): - parts = s3_path.split("/") - path_pubkey = parts[1] if len(parts) > 1 else "" - return pubkey == path_pubkey or pubkey == platform_pubkey - - if s3_path.startswith("platform/"): - return pubkey == platform_pubkey - - return False - - -@router.get("/verify") -async def verify(request: Request) -> Response: - registry: chain.ChainRegistry = request.app.state.registry - nonce_store: auth.NonceStore = request.app.state.nonce - platform_pubkey: str = request.app.state.platform_pubkey - - pubkey = request.headers.get(constants.HEADER_PUBKEY) - signature = request.headers.get(constants.HEADER_SIGNATURE) - timestamp = request.headers.get(constants.HEADER_TIMESTAMP) - nonce_header = request.headers.get(constants.HEADER_NONCE) - chain_id = request.headers.get(constants.HEADER_CHAIN) - - original_method = request.headers.get(constants.HEADER_ORIGINAL_METHOD) - original_uri = request.headers.get(constants.HEADER_ORIGINAL_URI) - - # Public reads on products/ and public/ don't require auth - if original_uri and original_method == "GET": - path = _strip_bucket_prefix(original_uri) - if path.startswith("products/") or path.startswith("public/"): - return Response(status_code=200) - - if not all([pubkey, signature, timestamp, nonce_header, chain_id]): - return Response(status_code=401) - - if len(pubkey) > constants.MAX_PUBKEY_LENGTH: - return Response(status_code=401) - if len(signature) > constants.MAX_SIGNATURE_LENGTH: - return Response(status_code=401) - if len(nonce_header) > constants.MAX_NONCE_LENGTH: - return Response(status_code=401) - - if not re.match(r"^\d+$", timestamp): - return Response(status_code=401) - ts = int(timestamp) - if abs(ts - int(time.time() * 1000)) > constants.TIMESTAMP_WINDOW_MS: - return Response(status_code=401) - - try: - chain = registry.get(chain_id) - except Exception: - return Response(status_code=401) - - body_hash = auth.hash_body("") - method = original_method or "GET" - path = original_uri or "/" - canonical = auth.build_canonical(method, path, timestamp, nonce_header, body_hash) - - valid = await chain.verify_signature(canonical, signature, pubkey) - if not valid: - return Response(status_code=401) - - fresh = await nonce_store.nonce_insert(nonce_header, pubkey) - if not fresh: - return Response(status_code=401) - - # Path-based authorization - if original_uri: - s3_path = _strip_bucket_prefix(original_uri) - if not _is_path_authorized(s3_path, method, pubkey, platform_pubkey): - return Response(status_code=401) - - return Response(status_code=200) diff --git a/packages/sync/octobot_sync/sync/__init__.py b/packages/sync/octobot_sync/sync/__init__.py index e906eb14f..9b4646c9b 100644 --- a/packages/sync/octobot_sync/sync/__init__.py +++ b/packages/sync/octobot_sync/sync/__init__.py @@ -26,7 +26,6 @@ create_role_resolver, create_role_enricher, create_signature_verifier, - find_item, ) __all__ = [ @@ -36,5 +35,4 @@ "create_role_resolver", "create_role_enricher", "create_signature_verifier", - "find_item", ] diff --git a/packages/sync/octobot_sync/sync/role_resolver.py b/packages/sync/octobot_sync/sync/role_resolver.py index 37aa3797d..b1570a5a5 100644 --- a/packages/sync/octobot_sync/sync/role_resolver.py +++ b/packages/sync/octobot_sync/sync/role_resolver.py @@ -92,11 +92,13 @@ async def role_resolver(request: Request) -> AuthResult: def create_role_enricher(registry: chain.ChainRegistry): async def role_enricher(auth: AuthResult, params: dict[str, str]) -> list[str]: product_id = params.get("productId") - if product_id: - for chain in registry.list(): - is_owner = await chain.is_item_owner(product_id, auth.identity) - if is_owner: - return ["owner"] + if not product_id: + return [] + for c in registry.list(): + if await c.is_item_owner(product_id, auth.identity): + return ["owner", "member"] + if await c.has_access(product_id, auth.identity): + return ["member"] return [] return role_enricher @@ -113,11 +115,3 @@ async def signature_verifier(data: str, signature: str, pubkey: str) -> bool: return False return signature_verifier - - -async def find_item(registry: chain.ChainRegistry, item_id: str) -> chain.Item | None: - for chain in registry.list(): - item = await chain.get_item(item_id) - if item is not None: - return item - return None diff --git a/packages/sync/octobot_sync/util/__init__.py b/packages/sync/octobot_sync/util/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/packages/sync/octobot_sync/util/nginx_conf.py b/packages/sync/octobot_sync/util/nginx_conf.py new file mode 100644 index 000000000..e71a5de78 --- /dev/null +++ b/packages/sync/octobot_sync/util/nginx_conf.py @@ -0,0 +1,217 @@ +"""Generate an nginx config from an OctoBot sync collections.json. + +Produces an nginx server block where: +- Public + pull_only collections → cached with long TTL (1h) +- Public + writable collections → cached with short TTL (30s) +- Everything else → proxy_pass straight to OctoBot sync, no cache +- Rate limiting from global rateLimit config + per-collection rateLimit flag + +CLI usage: + python -m octobot_sync.nginx_conf collections.json > nginx.conf + python -m octobot_sync.nginx_conf collections.json --upstream octobot-sync:3000 --listen 80 +""" + +import argparse +import json +import math +import re +import sys +import textwrap + + +_COLLECTION_NAME_RE = re.compile(r"^[a-zA-Z0-9_-]+$") + + +def storage_path_to_regex(storage_path: str) -> str: + """Convert a storagePath to an nginx location regex. + + "items/{itemId}/feed/{version}" → "items/[^/]+/feed/[^/]+" + "public/catalog" → "public/catalog" + + Literal path segments are escaped so that regex metacharacters in + collection paths cannot inject arbitrary nginx location patterns. + """ + parts = re.split(r"(\{[^}]+\})", storage_path) + result = [] + for part in parts: + if part.startswith("{") and part.endswith("}"): + result.append("[^/]+") + else: + result.append(re.escape(part)) + return "".join(result) + + +def rate_to_nginx(max_requests: int, window_ms: int) -> tuple[str, int]: + """Convert maxRequests/windowMs to nginx rate string and burst. + + Returns (rate_str, burst) e.g. ("2r/s", 20). + """ + if max_requests <= 0 or window_ms <= 0: + raise ValueError(f"Rate limit values must be positive: maxRequests={max_requests}, windowMs={window_ms}") + window_s = window_ms / 1000 + rps = max_requests / window_s + if rps >= 1: + rate_str = f"{math.ceil(rps)}r/s" + else: + rpm = max_requests / (window_s / 60) + rate_str = f"{math.ceil(rpm)}r/m" + burst = max(1, max_requests // 2) + return rate_str, burst + + +def generate(collections_path: str, upstream: str, listen: int) -> str: + with open(collections_path) as f: + config = json.load(f) + + collections = config.get("collections", []) + for col in collections: + name = col.get("name", "") + if not _COLLECTION_NAME_RE.match(name): + raise ValueError(f"Invalid collection name (must be alphanumeric/hyphens/underscores): {name!r}") + global_rate_limit = config.get("rateLimit") + + # Rate limit zones + rate_limit_block = "" + global_rate_str = "" + global_burst = 0 + strict_rate_str = "" + strict_burst = 0 + if global_rate_limit: + window_ms = global_rate_limit["windowMs"] + max_requests = global_rate_limit["maxRequests"] + global_rate_str, global_burst = rate_to_nginx(max_requests, window_ms) + strict_rate_str, strict_burst = rate_to_nginx( + max(1, max_requests // 2), window_ms + ) + rate_limit_block = textwrap.dedent(f"""\ + limit_req_zone $binary_remote_addr zone=sync_global:10m rate={global_rate_str}; + limit_req_zone $binary_remote_addr zone=sync_strict:10m rate={strict_rate_str}; + limit_req_status 429; + """) + + # Cached pull locations for public collections + cached_locations = [] + for col in collections: + read_roles = col.get("readRoles", []) + if "public" not in read_roles: + continue + + path_re = storage_path_to_regex(col["storagePath"]) + pull_only = col.get("pullOnly", False) + name = col["name"] + + ttl = "1h" if pull_only else "30s" + + rate_line = "" + if global_rate_limit: + rate_line = f"\n limit_req zone=sync_global burst={global_burst} nodelay;" + + cached_locations.append( + textwrap.dedent(f"""\ + # {name} (public, {"pull_only" if pull_only else "writable"}) + location ~* ^/v1/pull/{path_re}$ {{ + proxy_pass http://octobot_sync; + proxy_cache sync_cache; + proxy_cache_valid 200 {ttl}; + proxy_cache_use_stale error timeout updating; + proxy_cache_lock on; + add_header X-Cache-Status $upstream_cache_status;{rate_line} + }}""") + ) + + # Strict rate-limited push locations for collections with rateLimit: true + rate_limited_push_locations = [] + if global_rate_limit: + for col in collections: + if not col.get("rateLimit"): + continue + if col.get("pullOnly"): + continue + + path_re = storage_path_to_regex(col["storagePath"]) + name = col["name"] + + rate_limited_push_locations.append( + textwrap.dedent(f"""\ + # {name} (rate limited push) + location ~* ^/v1/push/{path_re}$ {{ + proxy_pass http://octobot_sync; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + limit_req zone=sync_strict burst={strict_burst} nodelay; + }}""") + ) + + cached_block = "\n\n".join(cached_locations) if cached_locations else "" + push_block = "\n\n".join(rate_limited_push_locations) if rate_limited_push_locations else "" + + # Global rate limit on catch-all + catchall_rate_line = "" + if global_rate_limit: + catchall_rate_line = f"\n limit_req zone=sync_global burst={global_burst} nodelay;" + + return textwrap.dedent(f"""\ +proxy_cache_path /var/cache/nginx/sync + levels=1:2 + keys_zone=sync_cache:10m + max_size=1g + inactive=60m + use_temp_path=off; + +{rate_limit_block}upstream octobot_sync {{ + server {upstream}; +}} + +server {{ + listen {listen}; + server_name _; + + client_max_body_size 10m; + + # ── Health (no cache, no rate limit) ── + location = /health {{ + proxy_pass http://octobot_sync; + }} + +{textwrap.indent(cached_block, " ") if cached_block else " # (no public collections found)"} + +{textwrap.indent(push_block, " ") if push_block else ""} + + # ── Catch-all: proxy to OctoBot sync, no cache ── + location /v1/ {{ + proxy_pass http://octobot_sync; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme;{catchall_rate_line} + }} + + # Reject anything outside /v1 and /health + location / {{ + return 404; + }} +}} +""") + + +def main(): + parser = argparse.ArgumentParser( + description="Generate nginx config from OctoBot sync collections.json" + ) + parser.add_argument("collections", help="Path to collections.json") + parser.add_argument( + "--upstream", default="octobot-sync:3000", + help="OctoBot sync upstream host:port (default: octobot-sync:3000)", + ) + parser.add_argument( + "--listen", type=int, default=80, + help="nginx listen port (default: 80)", + ) + args = parser.parse_args() + + sys.stdout.write(generate(args.collections, args.upstream, args.listen)) + + +if __name__ == "__main__": + main() diff --git a/packages/sync/requirements.txt b/packages/sync/requirements.txt index 7888065a9..85f5ce6f5 100644 --- a/packages/sync/requirements.txt +++ b/packages/sync/requirements.txt @@ -1,3 +1,3 @@ -starfish-sdk==1.0.0 +starfish-sdk==1.1.0 python-multipart web3 diff --git a/packages/sync/tests/e2e/conftest.py b/packages/sync/tests/e2e/conftest.py index 5eb3e3537..c10da20d9 100644 --- a/packages/sync/tests/e2e/conftest.py +++ b/packages/sync/tests/e2e/conftest.py @@ -17,7 +17,6 @@ """Shared fixtures for e2e tests.""" import os -import time from pathlib import Path import pytest @@ -26,7 +25,6 @@ import octobot_sync.app as sync_app import octobot_sync.auth as auth import octobot_sync.chain as chain -import octobot_sync.constants as constants import tests.mock_chain as mock_chain_module ADMIN_PUBKEY = "0xE2eAdminPubkey" @@ -81,30 +79,3 @@ async def client(app): transport = ASGITransport(app=app) async with AsyncClient(transport=transport, base_url="http://test") as ac: yield ac - - -def make_verify_headers( - mock_chain: mock_chain_module.MockChain, - pubkey: str, - original_method: str, - original_uri: str, -) -> dict[str, str]: - """Build valid auth + nginx headers for the /verify endpoint.""" - ts = str(int(time.time() * 1000)) - nonce = f"e2e-nonce-{time.time()}" - body_hash = auth.hash_body("") - canonical = auth.build_canonical( - original_method, original_uri, ts, nonce, body_hash - ) - signature = f"e2e-sig-{ts}" - mock_chain.set_signature_valid(canonical, signature, pubkey, True) - - return { - constants.HEADER_PUBKEY: pubkey, - constants.HEADER_SIGNATURE: signature, - constants.HEADER_TIMESTAMP: ts, - constants.HEADER_NONCE: nonce, - constants.HEADER_CHAIN: CHAIN_ID, - constants.HEADER_ORIGINAL_METHOD: original_method, - constants.HEADER_ORIGINAL_URI: original_uri, - } diff --git a/packages/sync/tests/e2e/test_product_e2e.py b/packages/sync/tests/e2e/test_product_e2e.py deleted file mode 100644 index b6674bb27..000000000 --- a/packages/sync/tests/e2e/test_product_e2e.py +++ /dev/null @@ -1,177 +0,0 @@ -# Drakkar-Software OctoBot-Sync -# Copyright (c) Drakkar-Software, All rights reserved. -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 3.0 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library. - -"""E2E tests — product lifecycle with real S3 storage.""" - -import json -import os - -import pytest - -import octobot_sync.chain as chain - -pytestmark = pytest.mark.skipif( - not os.environ.get("S3_ENDPOINT"), - reason="S3_ENDPOINT not set — skipping e2e tests", -) - - -async def test_full_product_lifecycle(client, mock_chain, s3_store): - """Create product → upload meta → read info → update meta → read again → verify S3 keys.""" - pid = "e2e-lifecycle" - mock_chain.set_item(pid, chain.Item(id=pid, owner="0xLifecycleOwner")) - - resp = await client.put( - f"/v1/product/{pid}/v1/meta", - data={ - "name": "Lifecycle Bot", - "description": "Initial description", - "website": "https://octobot.cloud", - "tags": json.dumps(["defi", "arbitrage"]), - }, - ) - assert resp.status_code == 200 - - resp = await client.get(f"/v1/product/{pid}/info") - info = resp.json() - assert info["product"]["owner"] == "0xLifecycleOwner" - assert info["profile"]["name"] == "Lifecycle Bot" - assert info["profile"]["tags"] == ["defi", "arbitrage"] - - resp = await client.put( - f"/v1/product/{pid}/v1/meta", - data={ - "name": "Lifecycle Bot v2", - "description": "Updated description", - "twitter": "@lifecycle", - }, - ) - assert resp.status_code == 200 - - resp = await client.get(f"/v1/product/{pid}/info") - profile = resp.json()["profile"] - assert profile["name"] == "Lifecycle Bot v2" - assert profile["description"] == "Updated description" - assert profile["website"] == "https://octobot.cloud" - assert profile["twitter"] == "@lifecycle" - assert profile["tags"] == ["defi", "arbitrage"] - - raw = await s3_store.get_string(f"products/{pid}/profile.json") - assert raw is not None - stored = json.loads(raw) - assert stored["name"] == "Lifecycle Bot v2" - - -async def test_multiple_products_isolated(client, mock_chain): - """Two products with separate profiles don't interfere with each other.""" - for suffix, owner, name in [("alpha", "0xA", "Alpha"), ("beta", "0xB", "Beta")]: - pid = f"e2e-iso-{suffix}" - mock_chain.set_item(pid, chain.Item(id=pid, owner=owner)) - await client.put(f"/v1/product/{pid}/v1/meta", data={"name": name}) - - resp_a = await client.get("/v1/product/e2e-iso-alpha/info") - resp_b = await client.get("/v1/product/e2e-iso-beta/info") - assert resp_a.json()["profile"]["name"] == "Alpha" - assert resp_a.json()["product"]["owner"] == "0xA" - assert resp_b.json()["profile"]["name"] == "Beta" - assert resp_b.json()["product"]["owner"] == "0xB" - - -async def test_version_descriptions_across_versions(client, s3_store): - """Version descriptions are stored separately per version.""" - pid = "e2e-versions" - - await client.put( - f"/v1/product/{pid}/v1/meta", - data={"version_description": "First release"}, - ) - await client.put( - f"/v1/product/{pid}/v2/meta", - data={"version_description": "Major update"}, - ) - - v1_raw = await s3_store.get_string(f"products/{pid}/v1/document.json") - v2_raw = await s3_store.get_string(f"products/{pid}/v2/document.json") - assert json.loads(v1_raw)["description"] == "First release" - assert json.loads(v2_raw)["description"] == "Major update" - - -async def test_product_endpoint_returns_404_without_chain_item(client): - """GET /product/{id} returns 404 when the product doesn't exist on chain.""" - resp = await client.get("/v1/product/e2e-ghost") - assert resp.status_code == 404 - assert resp.json()["error"] == "Product not found" - - -async def test_product_info_returns_null_product_without_chain_item(client): - """GET /product/{id}/info returns null product when not on chain, empty profile.""" - resp = await client.get("/v1/product/e2e-ghost/info") - assert resp.status_code == 200 - data = resp.json() - assert data["product"] is None - assert data["profile"] == {} - - -async def test_product_info_with_profile_but_no_chain_item(client, s3_store): - """Profile can exist in S3 even if product isn't on chain (e.g. delisted).""" - pid = "e2e-delisted" - await s3_store.put( - f"products/{pid}/profile.json", - json.dumps({"name": "Delisted Bot"}), - content_type="application/json", - ) - - resp = await client.get(f"/v1/product/{pid}/info") - data = resp.json() - assert data["product"] is None - assert data["profile"]["name"] == "Delisted Bot" - - -async def test_meta_invalid_version_rejected(client): - resp = await client.put("/v1/product/e2e-bad/invalid/meta") - assert resp.status_code == 400 - assert resp.json()["error"] == "Invalid version" - - -async def test_meta_invalid_tags_json_rejected(client): - resp = await client.put( - "/v1/product/e2e-bad/v1/meta", - data={"tags": "not-valid-json["}, - ) - assert resp.status_code == 400 - assert resp.json()["error"] == "Invalid tags JSON" - - -async def test_meta_empty_form_is_noop(client, s3_store): - """Submitting meta with no fields doesn't create a profile document.""" - pid = "e2e-empty-meta" - resp = await client.put(f"/v1/product/{pid}/v1/meta", data={}) - assert resp.status_code == 200 - - raw = await s3_store.get_string(f"products/{pid}/profile.json") - assert raw is None - - -async def test_product_endpoint_no_profile_returns_empty(client, mock_chain): - """GET /product/{id} works even when no profile has been uploaded.""" - pid = "e2e-no-profile" - mock_chain.set_item(pid, chain.Item(id=pid, owner="0xNoProfile")) - - resp = await client.get(f"/v1/product/{pid}") - assert resp.status_code == 200 - data = resp.json() - assert data["product"]["id"] == pid - assert data["profile"] == {} diff --git a/packages/sync/tests/e2e/test_verify_e2e.py b/packages/sync/tests/e2e/test_verify_e2e.py deleted file mode 100644 index 4faa871e0..000000000 --- a/packages/sync/tests/e2e/test_verify_e2e.py +++ /dev/null @@ -1,290 +0,0 @@ -# Drakkar-Software OctoBot-Sync -# Copyright (c) Drakkar-Software, All rights reserved. -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 3.0 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library. - -"""E2E tests — /verify endpoint: auth flow, path authorization, edge cases.""" - -import os -import time - -import pytest - -import octobot_sync.constants as constants -from tests.e2e.conftest import ( - ADMIN_PUBKEY, - USER_PUBKEY, - OTHER_PUBKEY, - CHAIN_ID, - make_verify_headers, -) - -pytestmark = pytest.mark.skipif( - not os.environ.get("S3_ENDPOINT"), - reason="S3_ENDPOINT not set — skipping e2e tests", -) - - -async def test_public_read_products_no_auth_needed(client): - """GET on products/ passes without any auth headers.""" - resp = await client.get( - "/verify", - headers={ - constants.HEADER_ORIGINAL_METHOD: "GET", - constants.HEADER_ORIGINAL_URI: "/bucket/products/prod-1/signals.json", - }, - ) - assert resp.status_code == 200 - - -async def test_public_read_public_path_no_auth_needed(client): - """GET on public/ passes without any auth headers.""" - resp = await client.get( - "/verify", - headers={ - constants.HEADER_ORIGINAL_METHOD: "GET", - constants.HEADER_ORIGINAL_URI: "/bucket/public/news/2026-03.json", - }, - ) - assert resp.status_code == 200 - - -async def test_no_headers_at_all_rejected(client): - resp = await client.get("/verify") - assert resp.status_code == 401 - - -async def test_missing_one_auth_header_rejected(client): - """Providing only some auth headers is still rejected.""" - resp = await client.get( - "/verify", - headers={ - constants.HEADER_PUBKEY: USER_PUBKEY, - constants.HEADER_TIMESTAMP: str(int(time.time() * 1000)), - }, - ) - assert resp.status_code == 401 - - -async def test_admin_can_write_products(client, mock_chain): - """Admin pubkey is authorized to PUT on products/.""" - headers = make_verify_headers( - mock_chain, ADMIN_PUBKEY, "PUT", "/bucket/products/prod-1/data.json" - ) - resp = await client.get("/verify", headers=headers) - assert resp.status_code == 200 - - -async def test_user_cannot_write_products(client, mock_chain): - """Regular user is not authorized to PUT on products/.""" - headers = make_verify_headers( - mock_chain, USER_PUBKEY, "PUT", "/bucket/products/prod-1/data.json" - ) - resp = await client.get("/verify", headers=headers) - assert resp.status_code == 401 - - -async def test_admin_can_write_public(client, mock_chain): - """Admin pubkey can PUT on public/ paths.""" - headers = make_verify_headers( - mock_chain, ADMIN_PUBKEY, "PUT", "/bucket/public/highlights.json" - ) - resp = await client.get("/verify", headers=headers) - assert resp.status_code == 200 - - -async def test_user_cannot_write_public(client, mock_chain): - """Regular user cannot PUT on public/ paths.""" - headers = make_verify_headers( - mock_chain, USER_PUBKEY, "PUT", "/bucket/public/highlights.json" - ) - resp = await client.get("/verify", headers=headers) - assert resp.status_code == 401 - - -async def test_user_can_access_own_path(client, mock_chain): - """User can read and write their own users/{pubkey}/ path.""" - for method in ("GET", "PUT"): - headers = make_verify_headers( - mock_chain, USER_PUBKEY, method, f"/bucket/users/{USER_PUBKEY}/data.json" - ) - resp = await client.get("/verify", headers=headers) - assert resp.status_code == 200, f"{method} own path should be allowed" - - -async def test_user_cannot_access_other_user_path(client, mock_chain): - """User cannot read or write another user's path.""" - for method in ("GET", "PUT"): - headers = make_verify_headers( - mock_chain, USER_PUBKEY, method, f"/bucket/users/{OTHER_PUBKEY}/data.json" - ) - resp = await client.get("/verify", headers=headers) - assert resp.status_code == 401, f"{method} other user's path should be denied" - - -async def test_admin_can_access_any_user_path(client, mock_chain): - """Admin can read and write any user's path.""" - for method in ("GET", "PUT"): - headers = make_verify_headers( - mock_chain, ADMIN_PUBKEY, method, f"/bucket/users/{USER_PUBKEY}/data.json" - ) - resp = await client.get("/verify", headers=headers) - assert resp.status_code == 200, f"Admin {method} any user path should work" - - -async def test_only_admin_can_access_platform(client, mock_chain): - """Only admin can read or write platform/ paths.""" - headers_admin = make_verify_headers( - mock_chain, ADMIN_PUBKEY, "GET", "/bucket/platform/config.json" - ) - resp = await client.get("/verify", headers=headers_admin) - assert resp.status_code == 200 - - headers_user = make_verify_headers( - mock_chain, USER_PUBKEY, "GET", "/bucket/platform/config.json" - ) - resp = await client.get("/verify", headers=headers_user) - assert resp.status_code == 401 - - -async def test_unknown_path_prefix_rejected(client, mock_chain): - """Paths not under products/public/users/platform are rejected.""" - headers = make_verify_headers( - mock_chain, ADMIN_PUBKEY, "GET", "/bucket/unknown/file.json" - ) - resp = await client.get("/verify", headers=headers) - assert resp.status_code == 401 - - -async def test_expired_timestamp_rejected(client): - """Timestamp older than TIMESTAMP_WINDOW_MS is rejected.""" - old_ts = str(int(time.time() * 1000) - 120_000) - resp = await client.get( - "/verify", - headers={ - constants.HEADER_PUBKEY: USER_PUBKEY, - constants.HEADER_SIGNATURE: "sig", - constants.HEADER_TIMESTAMP: old_ts, - constants.HEADER_NONCE: "e2e-expired-nonce", - constants.HEADER_CHAIN: CHAIN_ID, - constants.HEADER_ORIGINAL_METHOD: "PUT", - constants.HEADER_ORIGINAL_URI: "/bucket/users/x/data.json", - }, - ) - assert resp.status_code == 401 - - -async def test_non_numeric_timestamp_rejected(client): - resp = await client.get( - "/verify", - headers={ - constants.HEADER_PUBKEY: USER_PUBKEY, - constants.HEADER_SIGNATURE: "sig", - constants.HEADER_TIMESTAMP: "not-a-number", - constants.HEADER_NONCE: "nonce", - constants.HEADER_CHAIN: CHAIN_ID, - }, - ) - assert resp.status_code == 401 - - -async def test_invalid_signature_rejected(client): - """Valid headers but wrong signature is rejected.""" - ts = str(int(time.time() * 1000)) - resp = await client.get( - "/verify", - headers={ - constants.HEADER_PUBKEY: USER_PUBKEY, - constants.HEADER_SIGNATURE: "wrong-signature", - constants.HEADER_TIMESTAMP: ts, - constants.HEADER_NONCE: "e2e-badsig-nonce", - constants.HEADER_CHAIN: CHAIN_ID, - constants.HEADER_ORIGINAL_METHOD: "PUT", - constants.HEADER_ORIGINAL_URI: f"/bucket/users/{USER_PUBKEY}/data.json", - }, - ) - assert resp.status_code == 401 - - -async def test_nonce_replay_rejected(client, mock_chain): - """Same nonce+pubkey used twice is rejected on the second request.""" - headers = make_verify_headers( - mock_chain, ADMIN_PUBKEY, "PUT", "/bucket/platform/config.json" - ) - resp1 = await client.get("/verify", headers=headers) - assert resp1.status_code == 200 - - resp2 = await client.get("/verify", headers=headers) - assert resp2.status_code == 401 - - -async def test_unknown_chain_rejected(client): - """Request referencing a non-registered chain ID is rejected.""" - ts = str(int(time.time() * 1000)) - resp = await client.get( - "/verify", - headers={ - constants.HEADER_PUBKEY: USER_PUBKEY, - constants.HEADER_SIGNATURE: "sig", - constants.HEADER_TIMESTAMP: ts, - constants.HEADER_NONCE: "e2e-unknown-chain", - constants.HEADER_CHAIN: "nonexistent-chain", - constants.HEADER_ORIGINAL_METHOD: "PUT", - constants.HEADER_ORIGINAL_URI: f"/bucket/users/{USER_PUBKEY}/data.json", - }, - ) - assert resp.status_code == 401 - - -async def test_oversized_pubkey_rejected(client): - """Pubkey exceeding MAX_PUBKEY_LENGTH is rejected.""" - ts = str(int(time.time() * 1000)) - resp = await client.get( - "/verify", - headers={ - constants.HEADER_PUBKEY: "x" * (constants.MAX_PUBKEY_LENGTH + 1), - constants.HEADER_SIGNATURE: "sig", - constants.HEADER_TIMESTAMP: ts, - constants.HEADER_NONCE: "e2e-oversized", - constants.HEADER_CHAIN: CHAIN_ID, - }, - ) - assert resp.status_code == 401 - - -async def test_oversized_signature_rejected(client): - """Signature exceeding MAX_SIGNATURE_LENGTH is rejected.""" - ts = str(int(time.time() * 1000)) - resp = await client.get( - "/verify", - headers={ - constants.HEADER_PUBKEY: USER_PUBKEY, - constants.HEADER_SIGNATURE: "s" * (constants.MAX_SIGNATURE_LENGTH + 1), - constants.HEADER_TIMESTAMP: ts, - constants.HEADER_NONCE: "e2e-oversized-sig", - constants.HEADER_CHAIN: CHAIN_ID, - }, - ) - assert resp.status_code == 401 - - -async def test_head_method_is_read(client, mock_chain): - """HEAD requests are treated as reads (same as GET).""" - headers = make_verify_headers( - mock_chain, USER_PUBKEY, "HEAD", "/bucket/products/prod-1/data.json" - ) - resp = await client.get("/verify", headers=headers) - assert resp.status_code == 200 - - diff --git a/packages/sync/tests/fixtures/collections.json b/packages/sync/tests/fixtures/collections.json index 3d351f593..3588c8d3f 100644 --- a/packages/sync/tests/fixtures/collections.json +++ b/packages/sync/tests/fixtures/collections.json @@ -30,12 +30,54 @@ { "name": "delta-feed", "storagePath": "items/{itemId}/feed/{version}", - "readRoles": ["public"], + "readRoles": ["member"], "writeRoles": ["owner"], "encryption": "none", "maxBodyBytes": 65536, "rateLimit": true }, + { + "name": "theta-profiles", + "storagePath": "items/{itemId}/profile", + "readRoles": ["public"], + "writeRoles": ["owner", "admin"], + "encryption": "none", + "maxBodyBytes": 65536, + "objectSchema": { + "type": "object", + "properties": { + "name": { "type": "string" }, + "description": { "type": "string" } + }, + "additionalProperties": false + }, + "rateLimit": true + }, + { + "name": "theta-logos", + "storagePath": "items/{itemId}/logo", + "readRoles": ["public"], + "writeRoles": ["owner", "admin"], + "encryption": "none", + "maxBodyBytes": 2097152, + "allowedMimeTypes": ["image/png", "image/jpeg", "image/gif", "image/webp"], + "rateLimit": true + }, + { + "name": "theta-versions", + "storagePath": "items/{itemId}/versions/{version}/document", + "readRoles": ["public"], + "writeRoles": ["owner", "admin"], + "encryption": "none", + "maxBodyBytes": 65536, + "objectSchema": { + "type": "object", + "properties": { + "description": { "type": "string" } + }, + "additionalProperties": false + } + }, { "name": "epsilon-catalog", "storagePath": "public/catalog", diff --git a/packages/sync/tests/test_generate_nginx_conf.py b/packages/sync/tests/test_generate_nginx_conf.py new file mode 100644 index 000000000..edf4fcb19 --- /dev/null +++ b/packages/sync/tests/test_generate_nginx_conf.py @@ -0,0 +1,270 @@ +# Drakkar-Software OctoBot-Sync +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +"""Tests for octobot_sync.nginx_conf.""" + +import json +import os +import tempfile +from pathlib import Path + +import pytest + +from octobot_sync.util.nginx_conf import generate, storage_path_to_regex, rate_to_nginx + +FIXTURES_DIR = Path(__file__).resolve().parent / "fixtures" +COLLECTIONS_PATH = str(FIXTURES_DIR / "collections.json") + + +def test_static_path_unchanged(): + assert storage_path_to_regex("public/catalog") == r"public/catalog" + + +def test_single_template_replaced(): + assert storage_path_to_regex("users/{identity}") == "users/[^/]+" + + +def test_multiple_templates_replaced(): + result = storage_path_to_regex("items/{itemId}/feed/{version}") + assert result == "items/[^/]+/feed/[^/]+" + + +def test_rate_100_per_60s(): + rate, burst = rate_to_nginx(100, 60_000) + assert rate == "2r/s" + assert burst == 50 + + +def test_rate_10_per_60s(): + rate, burst = rate_to_nginx(10, 60_000) + assert rate == "10r/m" # 10/60s < 1r/s → use r/m + assert burst == 5 + + +def test_rate_1_per_60s(): + rate, burst = rate_to_nginx(1, 60_000) + assert rate == "1r/m" + assert burst == 1 + + +def test_rate_30_per_30s(): + rate, burst = rate_to_nginx(30, 30_000) + assert rate == "1r/s" + assert burst == 15 + + +def test_output_contains_upstream(): + output = generate(COLLECTIONS_PATH, "octobot-sync:3000", 80) + assert "upstream octobot_sync" in output + assert "server octobot-sync:3000;" in output + + +def test_output_contains_cache_path(): + output = generate(COLLECTIONS_PATH, "octobot-sync:3000", 80) + assert "proxy_cache_path" in output + assert "sync_cache" in output + + +def test_listen_port(): + output = generate(COLLECTIONS_PATH, "octobot-sync:3000", 8080) + assert "listen 8080;" in output + + +def test_health_endpoint(): + output = generate(COLLECTIONS_PATH, "octobot-sync:3000", 80) + assert "location = /health" in output + + +def test_public_pull_only_cached_1h(): + """epsilon-catalog is public + pullOnly → 1h cache.""" + output = generate(COLLECTIONS_PATH, "octobot-sync:3000", 80) + assert "# epsilon-catalog (public, pull_only)" in output + assert "proxy_cache_valid 200 1h;" in output + + +def test_public_writable_cached_30s(): + """theta-profiles is public + writable → 30s cache.""" + output = generate(COLLECTIONS_PATH, "octobot-sync:3000", 80) + assert "# theta-profiles (public, writable)" in output + assert "proxy_cache_valid 200 30s;" in output + + +def test_private_collections_not_cached(): + """alpha-docs, beta-prefs, gamma-logs, zeta-internal are private → no cache location.""" + output = generate(COLLECTIONS_PATH, "octobot-sync:3000", 80) + assert "alpha-docs" not in output + assert "beta-prefs" not in output + assert "gamma-logs" not in output + assert "zeta-internal" not in output + + +def test_cache_status_header(): + output = generate(COLLECTIONS_PATH, "octobot-sync:3000", 80) + assert "X-Cache-Status" in output + + +def test_catchall_location(): + output = generate(COLLECTIONS_PATH, "octobot-sync:3000", 80) + assert "location /v1/" in output + + +def test_reject_root(): + output = generate(COLLECTIONS_PATH, "octobot-sync:3000", 80) + assert "return 404;" in output + + +def test_rate_limit_zones_present(): + """Global rateLimit config → limit_req_zone directives.""" + output = generate(COLLECTIONS_PATH, "octobot-sync:3000", 80) + assert "limit_req_zone" in output + assert "zone=sync_global" in output + assert "zone=sync_strict" in output + assert "limit_req_status 429;" in output + + +def test_public_locations_have_global_rate_limit(): + output = generate(COLLECTIONS_PATH, "octobot-sync:3000", 80) + # delta-feed pull location should have global rate limit + lines = output.split("\n") + in_delta = False + for line in lines: + if "delta-feed" in line and "writable" in line: + in_delta = True + if in_delta and "limit_req zone=sync_global" in line: + break + if in_delta and line.strip() == "}": + pytest.fail("delta-feed pull location missing global rate limit") + + +def test_rate_limited_push_location(): + """delta-feed has rateLimit: true → strict push location.""" + output = generate(COLLECTIONS_PATH, "octobot-sync:3000", 80) + assert "# delta-feed (rate limited push)" in output + assert "/v1/push/items/[^/]+/feed/[^/]+" in output + assert "zone=sync_strict" in output + + +def test_pull_only_no_push_rate_limit(): + """epsilon-catalog is pullOnly → no push rate limit location.""" + output = generate(COLLECTIONS_PATH, "octobot-sync:3000", 80) + assert "epsilon-catalog (rate limited push)" not in output + + +def test_catchall_has_rate_limit(): + output = generate(COLLECTIONS_PATH, "octobot-sync:3000", 80) + lines = output.split("\n") + in_catchall = False + for line in lines: + if "Catch-all" in line: + in_catchall = True + if in_catchall and "limit_req zone=sync_global" in line: + break + if in_catchall and line.strip() == "}": + pytest.fail("Catch-all location missing rate limit") + + +def test_no_rate_limit_config(): + """When global rateLimit is absent, no rate limiting directives.""" + config = { + "version": 1, + "collections": [ + { + "name": "public-col", + "storagePath": "public/data", + "readRoles": ["public"], + "writeRoles": ["admin"], + "encryption": "none", + "maxBodyBytes": 65536, + } + ], + } + with tempfile.NamedTemporaryFile( + mode="w", suffix=".json", delete=False + ) as f: + json.dump(config, f) + f.flush() + try: + output = generate(f.name, "octobot-sync:3000", 80) + finally: + os.unlink(f.name) + + assert "limit_req_zone" not in output + assert "limit_req" not in output + assert "public-col" in output + + +# ── Security: input sanitization (#24) ── + + +def test_storage_path_escapes_regex_special_chars(): + """Regex metacharacters in literal path segments must be escaped.""" + result = storage_path_to_regex("public/data.v2") + assert result == r"public/data\.v2" + + result = storage_path_to_regex("items/{id}/feed+extra") + assert result == r"items/[^/]+/feed\+extra" + + +def test_storage_path_escapes_pipe(): + """Pipe in path could create regex OR — must be escaped.""" + result = storage_path_to_regex("public/a|b") + assert result == r"public/a\|b" + + +def test_storage_path_escapes_dollar(): + """Dollar sign in path must be escaped.""" + result = storage_path_to_regex("public/price$") + assert result == r"public/price\$" + + +def test_storage_path_escapes_parentheses(): + result = storage_path_to_regex("public/data(v1)") + assert result == r"public/data\(v1\)" + + +def test_invalid_collection_name_rejected(): + """Collection names with special chars should raise ValueError.""" + config = { + "version": 1, + "collections": [ + { + "name": "bad name; injection", + "storagePath": "public/data", + "readRoles": ["public"], + "writeRoles": ["admin"], + "encryption": "none", + "maxBodyBytes": 65536, + } + ], + } + with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as f: + json.dump(config, f) + f.flush() + try: + with pytest.raises(ValueError, match="Invalid collection name"): + generate(f.name, "octobot-sync:3000", 80) + finally: + os.unlink(f.name) + + +def test_rate_to_nginx_rejects_zero(): + with pytest.raises(ValueError, match="must be positive"): + rate_to_nginx(0, 60_000) + + +def test_rate_to_nginx_rejects_negative(): + with pytest.raises(ValueError, match="must be positive"): + rate_to_nginx(10, -1) diff --git a/packages/sync/tests/test_role_resolver.py b/packages/sync/tests/test_role_resolver.py index 3b761fa65..f7b3cf3a3 100644 --- a/packages/sync/tests/test_role_resolver.py +++ b/packages/sync/tests/test_role_resolver.py @@ -147,9 +147,20 @@ async def test_role_enricher_owner(mock_chain, registry): extra = await enricher(AuthResult(identity=PUBKEY, roles=["user"]), {"productId": "product-123"}) assert "owner" in extra + assert "member" in extra -async def test_role_enricher_not_owner(mock_chain, registry): +async def test_role_enricher_member(mock_chain, registry): + enricher = sync.create_role_enricher(registry) + mock_chain.set_owner("product-123", "0xSomeoneElse") + mock_chain.set_access("product-123", PUBKEY, 0) + + extra = await enricher(AuthResult(identity=PUBKEY, roles=["user"]), {"productId": "product-123"}) + assert "member" in extra + assert "owner" not in extra + + +async def test_role_enricher_not_owner_no_access(mock_chain, registry): enricher = sync.create_role_enricher(registry) mock_chain.set_owner("product-123", "0xSomeoneElse") @@ -210,15 +221,3 @@ async def test_role_enricher_no_product_id(mock_chain, registry): enricher = sync.create_role_enricher(registry) extra = await enricher(AuthResult(identity=PUBKEY, roles=["user"]), {}) assert extra == [] - - -async def test_find_item(mock_chain, registry): - mock_chain.set_item("item-1", chain.Item(id="item-1", owner="0xOwner")) - result = await sync.find_item(registry, "item-1") - assert result is not None - assert result.id == "item-1" - - -async def test_find_item_not_found(registry): - result = await sync.find_item(registry, "nonexistent") - assert result is None diff --git a/packages/sync/tests/test_routes.py b/packages/sync/tests/test_routes.py deleted file mode 100644 index 8875c6991..000000000 --- a/packages/sync/tests/test_routes.py +++ /dev/null @@ -1,156 +0,0 @@ -# Drakkar-Software OctoBot-Sync -# Copyright (c) Drakkar-Software, All rights reserved. -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 3.0 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library. - -"""Integration tests — full app with mock deps, hit all manual routes.""" - -from pathlib import Path - -import pytest -from httpx import AsyncClient, ASGITransport - -import octobot_sync.app as sync_app -import octobot_sync.auth as auth -import octobot_sync.chain as chain -import octobot_sync.constants as constants -import tests.mock_chain as mock_chain_module -from tests.conftest import MemoryObjectStore - - -ADMIN_PUBKEY = "0xAdminPubkey" -USER_PUBKEY = "0xUserPubkey" -CHAIN_ID = "mock" - -COLLECTIONS_PATH = str(Path(__file__).resolve().parent / "fixtures" / "collections.json") - - -@pytest.fixture -def mock_chain(): - return mock_chain_module.MockChain(CHAIN_ID) - - -@pytest.fixture -def app(mock_chain, monkeypatch): - monkeypatch.setenv("PLATFORM_PUBKEY_EVM", ADMIN_PUBKEY) - monkeypatch.setenv("ENCRYPTION_SECRET", "test-encryption-secret") - monkeypatch.setenv("PLATFORM_ENCRYPTION_SECRET", "test-platform-secret") - registry = chain.ChainRegistry() - registry.register(mock_chain) - nonce = auth.NonceStore(auth.MemoryStorageAdapter()) - object_store = MemoryObjectStore() - return sync_app.create_app(nonce, object_store, registry, collections_path=COLLECTIONS_PATH) - - -@pytest.fixture -async def client(app): - transport = ASGITransport(app=app) - async with AsyncClient(transport=transport, base_url="http://test") as ac: - yield ac - - - - -async def test_health(client): - resp = await client.get("/health") - assert resp.status_code == 200 - data = resp.json() - assert data["ok"] is True - assert "ts" in data - - - - -async def test_verify_public_read(client): - """Public GETs on products/ should pass without auth.""" - resp = await client.get( - "/verify", - headers={ - constants.HEADER_ORIGINAL_METHOD: "GET", - constants.HEADER_ORIGINAL_URI: "/octobot-sync-dev/products/some-product/data.json", - }, - ) - assert resp.status_code == 200 - - -async def test_verify_missing_headers(client): - resp = await client.get("/verify") - assert resp.status_code == 401 - - - - -async def test_get_product_not_found(client): - resp = await client.get("/v1/product/nonexistent") - assert resp.status_code == 404 - - -async def test_get_product_found(client, mock_chain): - mock_chain.set_item("prod-1", chain.Item(id="prod-1", owner="0xOwner")) - resp = await client.get("/v1/product/prod-1") - assert resp.status_code == 200 - data = resp.json() - assert data["product"]["id"] == "prod-1" - assert data["product"]["owner"] == "0xOwner" - - - - -async def test_get_product_info(client, mock_chain): - mock_chain.set_item("prod-2", chain.Item(id="prod-2", owner="0xOwner2")) - resp = await client.get("/v1/product/prod-2/info") - assert resp.status_code == 200 - data = resp.json() - assert data["product"]["id"] == "prod-2" - - - - -async def test_verify_public_path_read(client): - resp = await client.get( - "/verify", - headers={ - constants.HEADER_ORIGINAL_METHOD: "GET", - constants.HEADER_ORIGINAL_URI: "/bucket/public/some-file.json", - }, - ) - assert resp.status_code == 200 - - - - -async def test_put_product_meta_invalid_version(client): - resp = await client.put("/v1/product/prod-1/invalid/meta") - assert resp.status_code == 400 - - -async def test_put_product_meta_invalid_tags(client): - resp = await client.put( - "/v1/product/prod-1/v1/meta", - data={"tags": "not-json"}, - ) - assert resp.status_code == 400 - - -async def test_put_product_meta_profile_fields(client): - resp = await client.put( - "/v1/product/prod-1/v1/meta", - data={"name": "My Product", "description": "A test product"}, - ) - assert resp.status_code == 200 - - resp = await client.get("/v1/product/prod-1/info") - profile = resp.json()["profile"] - assert profile["name"] == "My Product" - assert profile["description"] == "A test product" diff --git a/packages/sync/tests/test_sync_collections.py b/packages/sync/tests/test_sync_collections.py index 323c11e10..82c5412e6 100644 --- a/packages/sync/tests/test_sync_collections.py +++ b/packages/sync/tests/test_sync_collections.py @@ -33,7 +33,7 @@ def test_sync_config_version(): def test_sync_config_has_collections(): - assert len(_load().collections) == 7 + assert len(_load().collections) == 10 def test_all_collections_have_names(): @@ -46,10 +46,10 @@ def test_all_collections_have_names(): def test_rate_limited_collection(): col = next(c for c in _load().collections if c.name == "delta-feed") assert col.storage_path == "items/{itemId}/feed/{version}" - assert "public" in col.read_roles + assert "member" in col.read_roles assert "owner" in col.write_roles assert col.encryption == "none" - assert col.rate_limit is True + assert col.rate_limit def test_bundled_collections():