diff --git a/.cargo/config.toml b/.cargo/config.toml new file mode 100644 index 0000000..c91c3f3 --- /dev/null +++ b/.cargo/config.toml @@ -0,0 +1,2 @@ +[net] +git-fetch-with-cli = true diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index de35d32..5819094 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -16,6 +16,9 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 + - uses: webfactory/ssh-agent@v0.9.0 + with: + ssh-private-key: ${{ secrets.OPENAPI_TO_DISCOVERY_DEPLOY_KEY }} - uses: dtolnay/rust-toolchain@stable - uses: Swatinem/rust-cache@v2 - run: cargo check --all-targets @@ -25,6 +28,9 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 + - uses: webfactory/ssh-agent@v0.9.0 + with: + ssh-private-key: ${{ secrets.OPENAPI_TO_DISCOVERY_DEPLOY_KEY }} - uses: dtolnay/rust-toolchain@stable - uses: Swatinem/rust-cache@v2 - run: cargo test @@ -44,6 +50,9 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 + - uses: webfactory/ssh-agent@v0.9.0 + with: + ssh-private-key: ${{ secrets.OPENAPI_TO_DISCOVERY_DEPLOY_KEY }} - uses: dtolnay/rust-toolchain@stable with: components: clippy diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 116808f..80b3a44 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -41,6 +41,9 @@ jobs: steps: - uses: actions/checkout@v4 + - uses: webfactory/ssh-agent@v0.9.0 + with: + ssh-private-key: ${{ secrets.OPENAPI_TO_DISCOVERY_DEPLOY_KEY }} - uses: dtolnay/rust-toolchain@stable - uses: taiki-e/setup-cross-toolchain-action@v1 diff --git a/.github/workflows/update-discovery.yml b/.github/workflows/update-discovery.yml new file mode 100644 index 0000000..20332ce --- /dev/null +++ b/.github/workflows/update-discovery.yml @@ -0,0 +1,96 @@ +name: Update Discovery Documents + +on: + schedule: + - cron: '0 * * * *' # hourly + workflow_dispatch: {} # manual trigger + +permissions: + contents: write + pull-requests: write + +env: + SERVICES: | + analyzer=https://analyzer.exein.dev/analyzer-discovery.json + # isaac=https://analyzer.exein.dev/isaac-discovery.json + # vuln-tracker=https://analyzer.exein.dev/vuln-tracker-discovery.json + +jobs: + update: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Fetch all discovery documents + run: | + mkdir -p discovery + while IFS='=' read -r name url; do + [ -z "$name" ] && continue + [[ "$name" == \#* ]] && continue + echo "Fetching $name from $url" + curl -sf "$url" \ + -H "Authorization: Bearer ${{ secrets.ANALYZER_API_KEY }}" \ + -o "discovery/${name}.json.new" || echo "WARN: failed to fetch $name" + done <<< "$SERVICES" + + - name: Check for changes + id: diff + run: | + changed=false + for f in discovery/*.json.new; do + [ ! -f "$f" ] && continue + base="${f%.new}" + if ! diff -q "$base" "$f" > /dev/null 2>&1; then + mv "$f" "$base" + changed=true + else + rm "$f" + fi + done + echo "changed=$changed" >> "$GITHUB_OUTPUT" + + - name: Configure SSH for private dependencies + if: steps.diff.outputs.changed == 'true' + uses: webfactory/ssh-agent@v0.9.0 + with: + ssh-private-key: ${{ secrets.OPENAPI_TO_DISCOVERY_DEPLOY_KEY }} + + - name: Install Rust toolchain + if: steps.diff.outputs.changed == 'true' + uses: dtolnay/rust-toolchain@stable + + - name: Cache cargo + if: steps.diff.outputs.changed == 'true' + uses: actions/cache@v4 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + target + key: ${{ runner.os }}-cargo-${{ hashFiles('Cargo.lock') }} + + - name: Regenerate skills + if: steps.diff.outputs.changed == 'true' + run: | + cargo build --release + for f in discovery/*.json; do + [ ! -f "$f" ] && continue + name="$(basename "$f" .json)" + echo "Generating skills for $name" + ./target/release/analyzer --discovery "$f" generate-skills \ + || echo "WARN: failed to generate skills for $name" + done + + - name: Create PR + if: steps.diff.outputs.changed == 'true' + uses: peter-evans/create-pull-request@v6 + with: + title: "update: Discovery Documents + skills" + body: | + Auto-generated from upstream API changes. + + Discovery documents in `discovery/` have changed. + Skill files in `skills/` have been regenerated. + branch: update-discovery + commit-message: "update: discovery documents + regenerated skills" + delete-branch: true diff --git a/.gitignore b/.gitignore index 96ef6c0..ab3a068 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,6 @@ /target Cargo.lock +usage_examples.txt +*-discovery.json +*20*.txt +.env \ No newline at end of file diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 0000000..e58f2d2 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1 @@ +When using or contributing to this repository, follow the guidelines in [CONTEXT.md](CONTEXT.md). diff --git a/CONTEXT.md b/CONTEXT.md new file mode 100644 index 0000000..770b3e7 --- /dev/null +++ b/CONTEXT.md @@ -0,0 +1,145 @@ +# Analyzer CLI (`analyzer`) Context + +The `analyzer` CLI provides dynamic access to firmware and software security APIs by parsing Discovery Documents at runtime. It supports multiple API services through a compile-time service registry, with each service backed by its own Discovery Document fetched and cached automatically. + +## Registered Services + +| Alias | Description | +|-------|-------------| +| `analyzer` | Firmware and software image security analysis | + +## Agent Integration (Claude Code) + +To use the `analyzer` CLI with Claude Code (or other AI agents), run the one-time setup: + +```bash +analyzer init-agent +``` + +This installs skills, usage context, and permissions into `~/.claude/` so that Claude Code automatically discovers the `analyzer` CLI in every project — no source code or manual configuration needed. + +What it writes: + +| File | Purpose | +|------|---------| +| `~/.claude/skills/` | Per-resource API skill files (generated from discovery documents) | +| `~/.claude/settings.json` | Allowlists the `analyzer` binary for Claude Code | + +Skills are loaded on-demand by Claude Code — no global `CLAUDE.md` or `CONTEXT.md` is written, so the analyzer context only appears when relevant. + +Re-run `analyzer init-agent` after upgrading to refresh skills. + +## Rules of Engagement for Agents + +* **Schema First:** *If you don't know the exact JSON payload structure, run `analyzer schema ..` first to inspect the schema before executing.* +* **Context Window Protection:** *API responses can be large. ALWAYS use `--fields` when listing or getting resources to avoid overwhelming your context window.* +* **Dry-Run Safety:** *Always use the `--dry-run` flag for mutating operations (create, update, delete) to validate your JSON payload before actual execution.* +* **Poll, Don't Guess:** *After scheduling a scan, poll the status endpoint until it completes. Do not assume timing or make further requests against incomplete scans.* + +## Core Syntax + +```bash +# API commands (service name is first positional arg) +analyzer api [sub-resource...] [flags] + +# Schema introspection (service name is first dotted segment) +analyzer schema .. + +# Navigation +analyzer api analyzer --help +analyzer api analyzer scans --help +``` + +### Key Flags + +| Flag | Purpose | +|------|---------| +| `--params ''` | Path and query parameters (e.g., `{"id": "..."}`) | +| `--json ''` | Request body for POST/PUT/PATCH methods | +| `--fields ''` | Comma-separated response fields to include (context window protection) | +| `--page-all` | Auto-paginate results as NDJSON (one JSON line per page) | +| `--dry-run` | Validate and print the request without executing | +| `--format human\|json\|table` | Output format (default: `human`) | +| `--discovery ` | Override discovery source for dev/testing | + +## Schema Introspection + +The CLI is self-documenting. Use `analyzer schema` to discover parameters, request/response schemas, and types at runtime — no static docs needed. + +```bash +# Browse all resources for a service +analyzer schema analyzer.api + +# Inspect a method's params, types, and defaults +analyzer schema analyzer.scans.create +analyzer schema analyzer.objects.get + +# Browse a sub-resource tree +analyzer schema analyzer.scans.compliance-check +``` + +Use `analyzer schema` output to build your `--params` and `--json` flags. + +## Usage Patterns + +### Reading Data + +Always use `--fields` to minimize tokens. + +```bash +# List objects (efficient) +analyzer api analyzer objects list --params '{"limit": 10}' --fields "id,name,tags" + +# Get scan details +analyzer api analyzer scans get --params '{"id": "SCAN_ID"}' --fields "id,status,score" + +# Check service health +analyzer api analyzer health list +``` + +### Writing Data + +Use `--json` for the request body. Always `--dry-run` first. + +```bash +analyzer schema analyzer.objects.create +analyzer api analyzer objects create --json '{"name": "Router FW v2.1"}' --dry-run +``` + +### Pagination (NDJSON) + +Use `--page-all` for large result sets. Output is one JSON line per page. + +```bash +analyzer api analyzer scans results get \ + --params '{"scan_id": "SCAN_ID", "analysis_id": "cve"}' \ + --page-all --fields "id,severity,score" +``` + +### Deleting Data + +```bash +analyzer api analyzer objects delete --params '{"id": "OBJ_ID"}' --dry-run +``` + +## Human-Friendly Commands + +Classic CLI subcommands (unchanged by multi-service routing): + +```bash +analyzer login +analyzer whoami +analyzer object list +analyzer scan new --object OBJ_ID -f firmware.bin -t linux --wait +analyzer scan score --scan SCAN_ID +``` + +## Error Handling + +All errors are JSON on stderr with a non-zero exit code: + +```json +{"error": {"code": 404, "message": "Object not found"}} +``` + +Exit `0` = success, non-zero = failure. Parse error JSON to decide next steps. diff --git a/Cargo.toml b/Cargo.toml index bee6d43..4de5a99 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,11 +15,36 @@ name = "analyzer" path = "src/main.rs" [dependencies] -clap = { version = "4", features = ["derive", "env", "color", "help", "usage", "error-context", "suggestions", "wrap_help"] } -reqwest = { version = "0.12", default-features = false, features = ["json", "multipart", "stream", "rustls-tls"] } + +# internal +openapi-to-discovery = { git = "ssh://git@github.com/exein-io/openapi-to-discovery" } + +# 3rdparty +clap = { version = "4", features = [ + "derive", + "env", + "color", + "help", + "usage", + "error-context", + "suggestions", + "wrap_help", + "string", +] } +reqwest = { version = "0.12", default-features = false, features = [ + "json", + "multipart", + "stream", + "rustls-tls", +] } serde = { version = "1", features = ["derive"] } serde_json = "1" -tokio = { version = "1", features = ["rt-multi-thread", "macros", "fs", "io-util"] } +tokio = { version = "1", features = [ + "rt-multi-thread", + "macros", + "fs", + "io-util", +] } tokio-util = { version = "0.7", features = ["io"] } tokio-stream = "0.1" futures = "0.3" @@ -39,8 +64,10 @@ clap_complete = "4" [dev-dependencies] assert_cmd = "2" +filetime = "0.2" predicates = "3" tempfile = "3" +test-case = "3" wiremock = "0.6" [profile.release] diff --git a/README.md b/README.md index 963e8ed..a1065c7 100644 --- a/README.md +++ b/README.md @@ -83,14 +83,22 @@ analyzer scan sbom --object a1b2c3d4-... -O sbom.json # Interactive login (prompts for API key, validates, saves) analyzer login -# Use a specific server URL -analyzer login --url https://my-analyzer.example.com/api/ +# Login to a specific environment +analyzer login --env dev # https://analyzer.exein.dev/api/ +analyzer login --env stage # https://analyzer.exein.live/api/ +analyzer login --env prod # https://analyzer.exein.io/api/ (default) + +# Or use an explicit URL (for self-hosted instances) +analyzer login --url https://my-instance.example.com/api/ # Login to a named profile -analyzer login --profile staging +analyzer login --env dev --profile dev -# Check your current identity +# Check your current identity and environment analyzer whoami +# Profile: default +# URL: https://analyzer.exein.dev/api/ (Dev) +# API Key: r4ZW...VZOC ``` ### Objects @@ -251,11 +259,13 @@ default_profile = "default" api_key = "your-api-key" url = "https://analyzer.exein.io/api/" -[profiles.staging] -api_key = "staging-key" -url = "https://staging.analyzer.exein.io/api/" +[profiles.dev] +api_key = "dev-api-key" +url = "https://analyzer.exein.dev/api/" ``` +Switch profiles with `--profile dev` or `export ANALYZER_PROFILE=dev`. + ### Precedence Settings are resolved in this order (highest priority first): @@ -273,6 +283,90 @@ Settings are resolved in this order (highest priority first): | `docker` | info, cve, password-hash, crypto, software-bom, malware, hardening, capabilities | | `idf` | info, cve, software-bom, symbols, tasks, stack-overflow | +--- + +## Agent mode + +The CLI includes a discovery-driven layer designed for AI agents (Claude Code, etc.). While the commands above are human-friendly (named flags, progress bars, `--wait`), the agent layer exposes the full API surface dynamically — no hardcoded commands. + +The CLI fetches each service's OpenAPI spec, converts it to a [Discovery Document](https://developers.google.com/discovery/v1/reference/apis) in-process (via [openapi-to-discovery](https://github.com/exein-io/openapi-to-discovery)), and uses that to generate CLI commands, schema introspection, and agent skills at runtime. + +### Setup (Claude Code) + +```bash +# 1. Login to your target environment +analyzer login --env dev + +# 2. Install agent integration (fetches OpenAPI, generates skills, configures permissions) +analyzer init-agent + +# 3. Start a new Claude Code session — skills are loaded automatically +``` + +For local development (API running on localhost): + +```bash +# Point at a local OpenAPI spec (auto-detected and converted) +analyzer --discovery ./openapi.json init-agent + +# Or point at a pre-converted discovery file +analyzer --discovery ./analyzer-discovery.json init-agent +``` + +### `analyzer api` — dynamic API access + +All API resources and methods are generated at runtime from the discovery document: + +```bash +# List objects +analyzer api analyzer objects list --params '{"limit": 10}' --fields "id,name" + +# Create a scan (dry-run first) +analyzer api analyzer scans create --json '{"name": "test", ...}' --dry-run + +# Get scan results +analyzer api analyzer scans results get \ + --params '{"scan_id": "ID", "analysis_id": "UUID", "query": "sort-by=severity&sort-ord=asc"}' +``` + +**Flags for `api` commands:** + +| Flag | Purpose | +|------|---------| +| `--params ''` | Path and query parameters | +| `--json ''` | Request body for POST/PUT/PATCH | +| `--fields ''` | Limit response fields (protects agent context window) | +| `--dry-run` | Print the request without executing | +| `--format ` | Output: `human` (default), `json`, `table`, `csv` | +| `--page-all` | Auto-paginate results as NDJSON | + +### `analyzer schema` — introspect method signatures + +Agents use this to discover parameters, types, and request/response schemas at runtime: + +```bash +# Inspect a specific method +analyzer schema analyzer.scans.create + +# Browse available methods under a resource +analyzer schema analyzer.scans + +# Full tree +analyzer schema analyzer.api +``` + +### `analyzer generate-skills` — generate skill files + +Reads the discovery document and writes markdown skill files to `skills/`: + +```bash +analyzer generate-skills +ls skills/ +# analyzer-objects/ analyzer-scans/ analyzer-health/ analyzer-shared/ +``` + +See [CONTEXT.md](CONTEXT.md) for the full agent reference. + ## License Apache-2.0 diff --git a/Schema_introspection_flow.md b/Schema_introspection_flow.md new file mode 100644 index 0000000..442e1be --- /dev/null +++ b/Schema_introspection_flow.md @@ -0,0 +1,55 @@ +## When an agent runs analyzer schema analyzer.scans.compliance-check: + +1. Service "analyzer" resolves from the registry +2. Path becomes api.scans.compliance-check (the "api." prefix is prepended by main.rs) +3. Segments: ["api", "scans", "compliance-check"] +4. resolve_method tries first — fails (compliance-check is a resource, not a method) +5. resolve_resource tries next — succeeds, finds the container resource +6. print_resource_tree renders it + +The agent would see this output: + +``` +cyber-resilience-act/ + list (GET) — Computes compliance with Cyber Resilience Act + ai-suggestion/ + begin/ + create (POST) — Triggers CRA AI suggestion using user-provided documen + status/ + list (GET) — Returns status of the CRA AI suggestion. + overwrite/ + overwrite_compliance_check_requirement (PUT) — Overwrites compliance check requirement + report/ + list (GET) — Downloads Cyber Resilience Act compliance report as PDF +``` + +The agent now knows it can drill deeper. For example, to get the full method signature: + +analyzer schema analyzer.scans.compliance-check.cyber-resilience-act.list + +Which would output the JSON method schema: + +```json +{ + "id": "analyzer-api-routes.api.scans.compliance-check.cyber-resilience-act.list", + "httpMethod": "GET", + "path": "api/scans/{id}/compliance-check/cyber-resilience-act", + "description": "Computes compliance with Cyber Resilience Act", + "parameters": { + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Scan ID", + "format": "uuid" + } + }, + "parameterOrder": ["id"], + "response": { + "$ref": "CyberResilienceActReport" + } +} +``` + +That's the full schema first loop: `browse the tree` → `pick a method` → `get the exact signature` → `build the command`. An agent goes from knowing nothing +to calling the API in 2-3 schema lookups. \ No newline at end of file diff --git a/docs/skills.md b/docs/skills.md new file mode 100644 index 0000000..94ceabb --- /dev/null +++ b/docs/skills.md @@ -0,0 +1,11 @@ +# Skills Index + +> Auto-generated by `analyzer generate-skills`. Do not edit manually. + +| Skill | Description | +|-------|-------------| +| [analyzer-health](../skills/analyzer-health/SKILL.md) | API operations for analyzer-health. | +| [analyzer-objects](../skills/analyzer-objects/SKILL.md) | API operations for analyzer-objects. | +| [analyzer-scans](../skills/analyzer-scans/SKILL.md) | API operations for analyzer-scans. | +| [analyzer-shared](../skills/analyzer-shared/SKILL.md) | Shared patterns for authentication, global flags, and error handling (analyzer-shared). | + diff --git a/example.env b/example.env new file mode 100644 index 0000000..9e261ac --- /dev/null +++ b/example.env @@ -0,0 +1,18 @@ +# Copy to .env and adjust values for local development. +# +# The CLI reads these via --flags and env vars (see `analyzer --help`). +# Precedence: CLI flags > env vars > config file > defaults. +# +# Environments (use `analyzer login --env ` to switch): +# dev = https://analyzer.exein.dev/api/ +# stage = https://analyzer.exein.live/api/ +# prod = https://analyzer.exein.io/api/ (default) + +# Override the API base URL for local development +ANALYZER_URL=http://localhost:8000/ + +# API key (or use `analyzer login` to save interactively) +# ANALYZER_API_KEY=your-api-key + +# Override the discovery/OpenAPI source (file path or URL) +# ANALYZER_DISCOVERY_URL=./openapi.json diff --git a/gemini-extension.json b/gemini-extension.json new file mode 100644 index 0000000..7903cdf --- /dev/null +++ b/gemini-extension.json @@ -0,0 +1,6 @@ +{ + "name": "analyzer-cli", + "version": "0.2.0", + "description": "CLI for firmware and software image security analysis, with discovery-driven agent mode.", + "contextFileName": "CONTEXT.md" +} diff --git a/skills/analyzer-health/SKILL.md b/skills/analyzer-health/SKILL.md new file mode 100644 index 0000000..a06eb30 --- /dev/null +++ b/skills/analyzer-health/SKILL.md @@ -0,0 +1,39 @@ +--- +name: analyzer-health +version: 0.5.0 +description: "List health via the analyzer CLI — Firmware and software image security analysis" +metadata: + openclaw: + category: "security" + requires: + bins: ["analyzer"] + cliHelp: "analyzer api analyzer health --help" +--- + +# health (0.5.0) + +> **Before unfamiliar calls:** run `analyzer schema analyzer.health.` to inspect params, types, defaults. +> **Protect context:** add `--fields` to every `list`/`get`. **Dry-run mutations:** `--dry-run` first, confirm with user. +> **Auth/config/connection errors?** Read `../analyzer-shared/SKILL.md` + +```bash +analyzer api analyzer health [flags] +``` + +## API Resources + + - `list` — Returns if the service is in an healthy state. + +## Discovering Commands + +Before calling any API method, inspect it: + +```bash +# Browse resources and methods +analyzer api analyzer health --help + +# Inspect a method's required params, types, and defaults +analyzer schema analyzer.health. +``` + +Use `analyzer schema` output to build your `--params` and `--json` flags. diff --git a/skills/analyzer-objects/SKILL.md b/skills/analyzer-objects/SKILL.md new file mode 100644 index 0000000..deb237c --- /dev/null +++ b/skills/analyzer-objects/SKILL.md @@ -0,0 +1,47 @@ +--- +name: analyzer-objects +version: 0.5.0 +description: "Create, Delete, Get, List, Update objects; plus scans via the analyzer CLI — Firmware and software image security analysis" +metadata: + openclaw: + category: "security" + requires: + bins: ["analyzer"] + cliHelp: "analyzer api analyzer objects --help" +--- + +# objects (0.5.0) + +> **Before unfamiliar calls:** run `analyzer schema analyzer.objects.` to inspect params, types, defaults. +> **Protect context:** add `--fields` to every `list`/`get`. **Dry-run mutations:** `--dry-run` first, confirm with user. +> **Auth/config/connection errors?** Read `../analyzer-shared/SKILL.md` + +```bash +analyzer api analyzer objects [flags] +``` + +## API Resources + + - `create` — Create new object + - `delete` — Deletes a object and all related scans. + - `get` — Retrieve an object by its ID. + - `list` — Retrieve a list of all objects of the current user. + - `update` — Update an object + +### scans + + - `list` — Those scans could be + +## Discovering Commands + +Before calling any API method, inspect it: + +```bash +# Browse resources and methods +analyzer api analyzer objects --help + +# Inspect a method's required params, types, and defaults +analyzer schema analyzer.objects. +``` + +Use `analyzer schema` output to build your `--params` and `--json` flags. diff --git a/skills/analyzer-scans/SKILL.md b/skills/analyzer-scans/SKILL.md new file mode 100644 index 0000000..4c6e6e1 --- /dev/null +++ b/skills/analyzer-scans/SKILL.md @@ -0,0 +1,106 @@ +--- +name: analyzer-scans +version: 0.5.0 +description: "Create, Delete, Get, List scans; plus cancel, compliance-check, documents, and 7 more via the analyzer CLI — Firmware and software image security analysis" +metadata: + openclaw: + category: "security" + requires: + bins: ["analyzer"] + cliHelp: "analyzer api analyzer scans --help" +--- + +# scans (0.5.0) + +> **Before unfamiliar calls:** run `analyzer schema analyzer.scans.` to inspect params, types, defaults. +> **Protect context:** add `--fields` to every `list`/`get`. **Dry-run mutations:** `--dry-run` first, confirm with user. +> **Auth/config/connection errors?** Read `../analyzer-shared/SKILL.md` + +```bash +analyzer api analyzer scans [flags] +``` + +## API Resources + + - `create` — Schedule a new scan. + - `delete` — Delete a scan. + - `get` — Returns a scan. + - `list` — Retrieve a list of scans. + +### cancel + + - `create` — This can be used to cancel an already pending or running scan. + +### compliance-check + + - `cyber-resilience-act` — Operations on the 'cyber-resilience-act' resource + +### documents + + - `create` — Upload a document for a scan. + - `delete` — Delete a single document for a scan. + - `delete_documents` — Delete all documents for a scan. + - `list` — List documents for a scan. + +### overview + + - `get` — Returns an overview of one analysis. + - `list` — Returns an aggregated overview of all analysis executed for one scan. + +### report + + - `list` — Downloads a PDF security report for a scan. + +### results + + - `get` — Retrieve the results of one specific analysis of a scan. + +### sbom + + - `list` — Downloads the SBOM (CycloneDX JSON) for a scan. + +### score + + - `list` — Returns a security score of all successful finished analyses with their individual scores included. + +### status + + - `list` — Returns the status of a scan. + +### types + + - `list` — Returns a list of all available analysis types for each different image. + +## Discovering Commands + +Before calling any API method, inspect it: + +```bash +# Browse resources and methods +analyzer api analyzer scans --help + +# Inspect a method's required params, types, and defaults +analyzer schema analyzer.scans. +``` + +Use `analyzer schema` output to build your `--params` and `--json` flags. + +## Workflow: Fetching Analysis Results + +`analysis_id` parameters are UUIDs, not type names like "cve". The `query` parameter is required. + +1. Get analysis UUIDs: `analyzer api analyzer scans status list --params '{"id": "SCAN_ID"}'` +2. Find the UUID under the analysis type key (e.g., `cve.id`) +3. Fetch results with required `query` param: + ```bash + analyzer api analyzer scans results get --params '{"scan_id": "SCAN_ID", "analysis_id": "UUID", "query": "sort-by=severity&sort-ord=asc"}' + ``` + +The `query` value contains filter params. Required fields per analysis type (all need `sort-ord=asc|desc`): +- **CVE, hardening, capabilities, password-hash:** `sort-by=severity` +- **malware:** `sort-by=filename` +- **crypto:** `sort-by=type` +- **kernel:** `sort-by=features` +- **software-bom:** `sort-by=name` + +Optional: `&page=1&per-page=25`, `&search=openssl`, `&severity-filter=critical`. diff --git a/skills/analyzer-shared/SKILL.md b/skills/analyzer-shared/SKILL.md new file mode 100644 index 0000000..acf8835 --- /dev/null +++ b/skills/analyzer-shared/SKILL.md @@ -0,0 +1,39 @@ +--- +name: analyzer-shared +description: "analyzer CLI: auth, introspection, and agent rules for firmware/container security scanning." +metadata: + openclaw: + category: "security" + requires: + bins: ["analyzer"] +--- + +# analyzer CLI — Agent Rules + +## Discover before you act + +The CLI is self-documenting. Use these before every unfamiliar call: + +```bash +analyzer api analyzer --help # browse resources +analyzer api analyzer --help # browse methods +analyzer schema analyzer.. # inspect params, types, defaults +``` + +## Rules + +1. **Schema first** — run `analyzer schema` before building `--params` or `--json`. +2. **Protect context** — add `--fields` to every `list` and `get` call. +3. **Dry-run mutations** — use `--dry-run` for create/update/delete, then confirm with the user before executing. +4. **Poll, don't guess** — after scheduling a scan, poll status until complete. + +## Auth + +```bash +analyzer login # interactive, saves to ~/.config/analyzer/ +export ANALYZER_API_KEY="..." # or set env var +``` + +## Errors + +All errors: JSON on stderr, non-zero exit. Parse before retrying. diff --git a/src/agent_api/executor.rs b/src/agent_api/executor.rs new file mode 100644 index 0000000..b82871d --- /dev/null +++ b/src/agent_api/executor.rs @@ -0,0 +1,579 @@ +//! Generic API executor for discovery-driven methods. +//! +//! Substitutes path parameters, collects query parameters, and dispatches +//! HTTP requests through the existing [`AnalyzerClient`]. +//! Supports `--page-all` for auto-pagination with NDJSON output. + +use anyhow::{Context, Result}; +use serde_json::Value; +use std::collections::BTreeMap; + +use crate::client::AnalyzerClient; +use crate::discovery::{DiscoveryMethod, DiscoveryParameter}; +use crate::output::Format; + +/// Pagination configuration from `--page-all`, `--page-limit`, `--page-delay`. +pub struct PaginationConfig { + /// Whether to auto-paginate through all pages. + pub page_all: bool, + /// Maximum number of pages to fetch (default: 10). + pub page_limit: u32, + /// Delay between page fetches in milliseconds (default: 100). + pub page_delay_ms: u64, +} + +impl Default for PaginationConfig { + fn default() -> Self { + Self { + page_all: false, + page_limit: 10, + page_delay_ms: 100, + } + } +} + +/// What the next pagination step looks like. +#[derive(Debug)] +enum NextPage { + /// Follow an absolute URL (from `_links.next.href`). + Url(String), + /// Increment the `page` query param (offset-based pagination). + IncrementPage { next_page: u32 }, +} + +/// Execute a single discovery method against the API. +/// +/// `client` is `None` when `dry_run` is true (no auth required). +/// `fields` is an optional comma-separated list of response field names +/// to keep (client-side filtering for context window protection). +/// `pagination` controls auto-pagination with NDJSON output. +#[allow(clippy::too_many_arguments)] +pub async fn execute_method( + client: Option<&AnalyzerClient>, + method: &DiscoveryMethod, + params_json: Option<&str>, + body_json: Option<&str>, + fields: Option<&str>, + pagination: &PaginationConfig, + dry_run: bool, + format: Format, +) -> Result<()> { + let params: serde_json::Map = match params_json { + Some(s) => serde_json::from_str(s).context("invalid --params JSON")?, + None => serde_json::Map::new(), + }; + + let url_path = substitute_path_params(&method.path, &method.parameters, ¶ms)?; + let mut query_params = collect_query_params(&method.parameters, ¶ms); + + // Parse request body + let body: Option = match body_json { + Some(s) => Some(serde_json::from_str(s).context("invalid --json body")?), + None => None, + }; + + // Parse field mask + let field_list: Option> = fields.map(|f| f.split(',').map(str::trim).collect()); + + if dry_run { + let base = client + .map(|c| c.base_url().as_str()) + .unwrap_or("/"); + println!("{} {base}{url_path}", method.http_method); + if !query_params.is_empty() { + for (k, v) in &query_params { + println!(" ?{k}={v}"); + } + } + if let Some(ref fl) = field_list { + println!(" Fields: {}", fl.join(", ")); + } + if pagination.page_all { + println!( + " Pagination: --page-all (limit: {}, delay: {}ms)", + pagination.page_limit, pagination.page_delay_ms + ); + } + if let Some(b) = &body { + println!("{}", serde_json::to_string_pretty(b)?); + } + return Ok(()); + } + + let client = client.context("API client required for non-dry-run execution")?; + + // -- Pagination loop (or single request) ---------------------------------- + + let mut pages_fetched: u32 = 0; + let mut next_url: Option = None; + + loop { + let response = match &next_url { + Some(url) => client.execute_raw_url(url).await?, + None => { + client + .execute_raw(&method.http_method, &url_path, &query_params, body.as_ref()) + .await? + } + }; + + pages_fetched += 1; + + let filtered = match &field_list { + Some(fl) => filter_fields(response.clone(), fl), + None => response.clone(), + }; + + if pagination.page_all { + let out = crate::output::format_value_paginated(&filtered, &format, pages_fetched == 1); + print!("{out}"); + } else { + let out = crate::output::format_value(&filtered, &format); + println!("{out}"); + break; + } + + // Determine next page (try link-based, then offset-based) + match extract_next_page(&response, &query_params) { + Some(NextPage::Url(url)) if pages_fetched < pagination.page_limit => { + next_url = Some(url); + if pagination.page_delay_ms > 0 { + tokio::time::sleep(std::time::Duration::from_millis(pagination.page_delay_ms)) + .await; + } + } + Some(NextPage::IncrementPage { next_page }) + if pages_fetched < pagination.page_limit => + { + // Update or insert the page query param for the next request + if let Some(p) = query_params.iter_mut().find(|(k, _)| k == "page") { + p.1 = next_page.to_string(); + } else { + query_params.push(("page".to_string(), next_page.to_string())); + } + next_url = None; // re-use normal path with updated query params + if pagination.page_delay_ms > 0 { + tokio::time::sleep(std::time::Duration::from_millis(pagination.page_delay_ms)) + .await; + } + } + _ => break, + } + } + + Ok(()) +} + +/// Substitute path parameters in the URL template. +/// +/// `api/scans/{id}/score` + `{"id": "abc-123"}` → `api/scans/abc-123/score` +fn substitute_path_params( + path: &str, + parameters: &BTreeMap, + params: &serde_json::Map, +) -> Result { + let mut url_path = path.to_string(); + for (name, param_def) in parameters { + if param_def.location == "path" { + let value = params + .get(name) + .map(|v| match v { + Value::String(s) => s.clone(), + other => other.to_string().trim_matches('"').to_string(), + }) + .or_else(|| param_def.default.clone()) + .with_context(|| format!("required path parameter '{name}' not provided"))?; + url_path = url_path.replace(&format!("{{{name}}}"), &value); + } + } + Ok(url_path) +} + +/// Collect query parameters from user-provided params that match discovery query params. +fn collect_query_params( + parameters: &BTreeMap, + params: &serde_json::Map, +) -> Vec<(String, String)> { + let mut query_params = Vec::new(); + for (name, param_def) in parameters { + if param_def.location == "query" { + if let Some(value) = params.get(name) { + let val_str = match value { + Value::String(s) => s.clone(), + other => other.to_string(), + }; + // The "query" parameter is a passthrough query string + // (e.g. "sort-by=severity&sort-ord=asc"). Expand it into + // individual query params instead of sending ?query=... + if name == "query" && val_str.contains('=') { + for pair in val_str.split('&') { + if let Some((k, v)) = pair.split_once('=') { + query_params.push((k.to_string(), v.to_string())); + } + } + } else { + query_params.push((name.clone(), val_str)); + } + } + } + } + query_params +} + +/// Determine how to fetch the next page of results. +/// +/// Tries two strategies in order: +/// 1. **Link-based** (`_links.next.href`) — follow the absolute URL directly. +/// Used by endpoints that return `Page` wrappers. +/// 2. **Offset-based** (`total-findings` + `page`/`per-page`) — increment the +/// `page` query param. Used by `scans.results.get` and similar endpoints. +fn extract_next_page( + response: &Value, + current_query_params: &[(String, String)], +) -> Option { + // 1. Try _links.next.href (link-based pagination) + if let Some(url) = response + .get("_links") + .and_then(|l| l.get("next")) + .and_then(|n| n.get("href")) + .and_then(|h| h.as_str()) + { + if !url.is_empty() { + return Some(NextPage::Url(url.to_string())); + } + } + + // 2. Try offset-based: total-findings + page/per-page + let total = response + .get("total-findings") + .and_then(|v| v.as_u64()) + .unwrap_or(0); + if total == 0 { + return None; + } + + let current_page: u32 = current_query_params + .iter() + .find(|(k, _)| k == "page") + .and_then(|(_, v)| v.parse().ok()) + .unwrap_or(1); + let per_page: u32 = current_query_params + .iter() + .find(|(k, _)| k == "per-page") + .and_then(|(_, v)| v.parse().ok()) + .unwrap_or(25); + + if (current_page as u64) * (per_page as u64) < total { + Some(NextPage::IncrementPage { + next_page: current_page + 1, + }) + } else { + None + } +} + +/// Client-side field filtering for context window protection. +/// +/// Keeps only the specified top-level keys from JSON objects. +/// Arrays are filtered element-wise. Primitives pass through unchanged. +/// Metadata keys that wrap paginated data (not user-facing fields). +const WRAPPER_SKIP_KEYS: &[&str] = &[ + "nextPageToken", + "kind", + "total-findings", + "total_findings", + "_links", + "_embedded", + "links", +]; + +fn filter_fields(value: Value, fields: &[&str]) -> Value { + match value { + Value::Object(map) => { + let filtered: serde_json::Map<_, _> = map + .iter() + .filter(|(k, _)| fields.contains(&k.as_str())) + .map(|(k, v)| (k.clone(), v.clone())) + .collect(); + + // If direct filtering found matches, use them. + if !filtered.is_empty() { + return Value::Object(filtered); + } + + // Otherwise, look for a paginated wrapper array (e.g. {"data": [...]}) + // and filter each element inside it. + let mut new_map = serde_json::Map::new(); + for (k, v) in &map { + if WRAPPER_SKIP_KEYS.contains(&k.as_str()) || k.starts_with('_') { + continue; + } + if let Value::Array(arr) = v { + let filtered_arr: Vec = arr + .iter() + .map(|item| filter_fields(item.clone(), fields)) + .collect(); + new_map.insert(k.clone(), Value::Array(filtered_arr)); + return Value::Object(new_map); + } + } + + // No wrapper found either — return the (empty) direct filter result. + Value::Object(filtered) + } + Value::Array(arr) => { + Value::Array(arr.into_iter().map(|v| filter_fields(v, fields)).collect()) + } + other => other, + } +} + +#[cfg(test)] +mod tests { + use super::*; + use serde_json::json; + + fn param(location: &str, required: bool, default: Option<&str>) -> DiscoveryParameter { + DiscoveryParameter { + param_type: "string".to_string(), + required, + location: location.to_string(), + description: None, + format: None, + enum_values: None, + default: default.map(String::from), + } + } + + mod filter_fields { + use super::*; + + #[test] + fn should_keep_only_specified_keys() { + let input = json!({"id": "abc", "name": "test", "score": 72, "tags": []}); + assert_eq!( + super::super::filter_fields(input, &["id", "name"]), + json!({"id": "abc", "name": "test"}) + ); + } + + #[test] + fn should_filter_each_array_element() { + let input = json!([ + {"id": "1", "name": "a", "extra": true}, + {"id": "2", "name": "b", "extra": false} + ]); + assert_eq!( + super::super::filter_fields(input, &["id", "name"]), + json!([{"id": "1", "name": "a"}, {"id": "2", "name": "b"}]) + ); + } + + #[test] + fn should_pass_through_primitives() { + let input = json!("hello"); + assert_eq!(super::super::filter_fields(input.clone(), &["id"]), input); + } + + #[test] + fn should_descend_into_paginated_wrapper() { + let input = json!({ + "data": [ + {"id": "1", "name": "obj1", "score": 72, "tags": ["a"]}, + {"id": "2", "name": "obj2", "score": 85, "tags": ["b"]} + ], + "links": {"next": "/api/objects?page=2"} + }); + assert_eq!( + super::super::filter_fields(input, &["id", "name"]), + json!({"data": [{"id": "1", "name": "obj1"}, {"id": "2", "name": "obj2"}]}) + ); + } + } + + mod substitute_path_params { + use super::*; + + #[test] + fn should_replace_single_param() { + let mut parameters = BTreeMap::new(); + parameters.insert("id".to_string(), param("path", true, None)); + let params = + serde_json::from_str::>(r#"{"id": "scan-123"}"#) + .unwrap(); + + let result = + super::super::substitute_path_params("api/scans/{id}", ¶meters, ¶ms) + .unwrap(); + assert_eq!(result, "api/scans/scan-123"); + } + + #[test] + fn should_replace_multiple_params() { + let mut parameters = BTreeMap::new(); + parameters.insert("scan_id".to_string(), param("path", true, None)); + parameters.insert("analysis_id".to_string(), param("path", true, None)); + let params = serde_json::from_str::>( + r#"{"scan_id": "s-1", "analysis_id": "cve"}"#, + ) + .unwrap(); + + let result = super::super::substitute_path_params( + "api/scans/{scan_id}/results/{analysis_id}", + ¶meters, + ¶ms, + ) + .unwrap(); + assert_eq!(result, "api/scans/s-1/results/cve"); + } + + #[test] + fn should_error_on_missing_required() { + let mut parameters = BTreeMap::new(); + parameters.insert("id".to_string(), param("path", true, None)); + + let result = super::super::substitute_path_params( + "api/scans/{id}", + ¶meters, + &serde_json::Map::new(), + ); + assert!(result.is_err()); + assert!(result.unwrap_err().to_string().contains("id")); + } + + #[test] + fn should_use_default_value() { + let mut parameters = BTreeMap::new(); + parameters.insert("id".to_string(), param("path", true, Some("default-id"))); + + let result = super::super::substitute_path_params( + "api/scans/{id}", + ¶meters, + &serde_json::Map::new(), + ) + .unwrap(); + assert_eq!(result, "api/scans/default-id"); + } + } + + mod collect_query_params { + use super::*; + + #[test] + fn should_include_query_params() { + let mut parameters = BTreeMap::new(); + parameters.insert("limit".to_string(), param("query", false, None)); + parameters.insert("page".to_string(), param("query", false, None)); + let params = serde_json::from_str::>( + r#"{"limit": "10", "page": "2"}"#, + ) + .unwrap(); + + let result = super::super::collect_query_params(¶meters, ¶ms); + assert_eq!(result.len(), 2); + assert!(result.contains(&("limit".to_string(), "10".to_string()))); + } + + #[test] + fn should_exclude_path_params() { + let mut parameters = BTreeMap::new(); + parameters.insert("id".to_string(), param("path", true, None)); + parameters.insert("limit".to_string(), param("query", false, None)); + let params = serde_json::from_str::>( + r#"{"id": "scan-1", "limit": "10"}"#, + ) + .unwrap(); + + let result = super::super::collect_query_params(¶meters, ¶ms); + assert_eq!(result.len(), 1); + assert_eq!(result[0].0, "limit"); + } + + #[test] + fn should_expand_query_passthrough_param() { + let mut parameters = BTreeMap::new(); + parameters.insert("query".to_string(), param("query", true, None)); + let params = serde_json::from_str::>( + r#"{"query": "sort-by=severity&sort-ord=asc&per-page=25"}"#, + ) + .unwrap(); + + let result = super::super::collect_query_params(¶meters, ¶ms); + assert_eq!(result.len(), 3); + assert!(result.contains(&("sort-by".to_string(), "severity".to_string()))); + assert!(result.contains(&("sort-ord".to_string(), "asc".to_string()))); + assert!(result.contains(&("per-page".to_string(), "25".to_string()))); + } + } + + mod extract_next_page { + use super::*; + + #[test] + fn should_follow_links_href() { + let response = json!({ + "data": [{"id": "1"}], + "_links": {"next": {"href": "https://api.example.com/scans?page=2"}} + }); + match super::super::extract_next_page(&response, &[]) { + Some(NextPage::Url(url)) => { + assert_eq!(url, "https://api.example.com/scans?page=2"); + } + other => panic!("expected NextPage::Url, got {other:?}"), + } + } + + #[test] + fn should_increment_page_for_offset() { + let response = json!({"findings": [], "total-findings": 100}); + let qp = vec![ + ("page".to_string(), "1".to_string()), + ("per-page".to_string(), "25".to_string()), + ]; + match super::super::extract_next_page(&response, &qp) { + Some(NextPage::IncrementPage { next_page }) => assert_eq!(next_page, 2), + other => panic!("expected IncrementPage, got {other:?}"), + } + + // Also works with default page/per-page (no query params) + match super::super::extract_next_page(&response, &[]) { + Some(NextPage::IncrementPage { next_page }) => assert_eq!(next_page, 2), + other => panic!("expected IncrementPage with defaults, got {other:?}"), + } + } + + #[test] + fn should_stop_on_last_page() { + let response = json!({"findings": [], "total-findings": 25}); + let qp = vec![ + ("page".to_string(), "1".to_string()), + ("per-page".to_string(), "25".to_string()), + ]; + assert!(super::super::extract_next_page(&response, &qp).is_none()); + } + + #[test] + fn should_return_none_without_signals() { + // No _links, no total-findings + assert!(super::super::extract_next_page(&json!({"data": []}), &[]).is_none()); + // Empty _links + assert!(super::super::extract_next_page(&json!({"_links": {}}), &[]).is_none()); + // Null next + assert!( + super::super::extract_next_page(&json!({"_links": {"next": null}}), &[]).is_none() + ); + } + } + + mod pagination_config { + use super::*; + + #[test] + fn should_have_sensible_defaults() { + let cfg = PaginationConfig::default(); + assert!(!cfg.page_all); + assert_eq!(cfg.page_limit, 10); + assert_eq!(cfg.page_delay_ms, 100); + } + } +} diff --git a/src/agent_api/generate_skills.rs b/src/agent_api/generate_skills.rs new file mode 100644 index 0000000..31f0d28 --- /dev/null +++ b/src/agent_api/generate_skills.rs @@ -0,0 +1,457 @@ +//! Skill file generator. +//! +//! `analyzer generate-skills` reads discovery documents for all registered +//! services and writes markdown skill files to `skills/` — one per top-level +//! API resource per service, plus a shared skill for global flags, auth, and +//! error handling. +//! +//! Skills are intentionally minimal: method names + one-line descriptions only. +//! Agents use `analyzer schema ..` at runtime to +//! discover parameters, request/response schemas, and types — schema +//! introspection replaces static documentation. + +use std::fmt::Write as FmtWrite; +use std::path::Path; + +use anyhow::{Context, Result}; + +use crate::discovery::{DiscoveryDocument, DiscoveryResource}; +use crate::services::ServiceEntry; + +struct SkillIndexEntry { + name: String, + description: String, +} + +/// Generate skill files for a single registered service. +/// +/// Writes `skills/{alias}-/SKILL.md` for each top-level resource +/// under `api` in the discovery document. +pub fn generate_for_service( + doc: &DiscoveryDocument, + entry: &ServiceEntry, + output_dir: &Path, +) -> Result<()> { + let alias = entry.aliases[0]; + let api_resource = doc + .resources + .get("api") + .context("discovery document must have an 'api' resource")?; + + std::fs::create_dir_all(output_dir).with_context(|| { + format!( + "failed to create output directory: {}", + output_dir.display() + ) + })?; + + for (resource_name, resource) in &api_resource.resources { + let skill_name = format!("{alias}-{resource_name}"); + let skill_dir = output_dir.join(&skill_name); + std::fs::create_dir_all(&skill_dir)?; + + let content = render_resource_skill(alias, resource_name, resource, entry, &doc.version); + let skill_path = skill_dir.join("SKILL.md"); + std::fs::write(&skill_path, &content) + .with_context(|| format!("failed to write {}", skill_path.display()))?; + + println!(" wrote {}", skill_path.display()); + } + + Ok(()) +} + +/// Generate the shared skill file for a specific service. +/// +/// Writes `skills/{alias}-shared/SKILL.md` following the same `{alias}-{name}` +/// pattern used by resource skills, so each service (analyzer, isaac, …) gets +/// its own shared skill. +pub fn generate_shared(entry: &ServiceEntry, output_dir: &Path) -> Result<()> { + let alias = entry.aliases[0]; + std::fs::create_dir_all(output_dir).with_context(|| { + format!( + "failed to create output directory: {}", + output_dir.display() + ) + })?; + + let shared_dir = output_dir.join(format!("{alias}-shared")); + std::fs::create_dir_all(&shared_dir)?; + let content = generate_shared_skill(entry); + let shared_path = shared_dir.join("SKILL.md"); + std::fs::write(&shared_path, &content) + .with_context(|| format!("failed to write {}", shared_path.display()))?; + println!(" wrote {}", shared_path.display()); + Ok(()) +} + +/// Write `docs/skills.md` index file listing all generated skills. +pub fn write_skills_index(output_dir: &Path) -> Result<()> { + let mut entries: Vec = Vec::new(); + + if output_dir.exists() { + let mut dirs: Vec<_> = std::fs::read_dir(output_dir)? + .filter_map(|e| e.ok()) + .filter(|e| e.file_type().map(|t| t.is_dir()).unwrap_or(false)) + .collect(); + dirs.sort_by_key(|e| e.file_name()); + + for entry in dirs { + let name = entry.file_name().to_string_lossy().to_string(); + let skill_file = entry.path().join("SKILL.md"); + if skill_file.exists() { + let description = if name.ends_with("-shared") { + format!( + "Shared patterns for authentication, global flags, and error handling ({name})." + ) + } else { + format!("API operations for {name}.") + }; + entries.push(SkillIndexEntry { name, description }); + } + } + } + + let mut out = String::new(); + writeln!(out, "# Skills Index").unwrap(); + writeln!(out).unwrap(); + writeln!( + out, + "> Auto-generated by `analyzer generate-skills`. Do not edit manually." + ) + .unwrap(); + writeln!(out).unwrap(); + writeln!(out, "| Skill | Description |").unwrap(); + writeln!(out, "|-------|-------------|").unwrap(); + for entry in &entries { + writeln!( + out, + "| [{}](../skills/{}/SKILL.md) | {} |", + entry.name, entry.name, entry.description + ) + .unwrap(); + } + writeln!(out).unwrap(); + + let docs_dir = Path::new("docs"); + std::fs::create_dir_all(docs_dir).context("failed to create docs directory")?; + let path = docs_dir.join("skills.md"); + std::fs::write(&path, &out).with_context(|| format!("failed to write {}", path.display()))?; + println!(" wrote {}", path.display()); + Ok(()) +} + +// --------------------------------------------------------------------------- +// Rendering — resource skills (minimal, gws pattern) +// --------------------------------------------------------------------------- + +/// Render a SKILL.md for a single top-level resource. +/// +/// Intentionally minimal: frontmatter, syntax, method names with descriptions, +/// and a "Discovering Commands" section pointing to schema introspection. +/// No examples, no flags tables, no parameter details — agents use +/// `analyzer schema` to discover those at runtime. +fn render_resource_skill( + service_alias: &str, + name: &str, + resource: &DiscoveryResource, + entry: &ServiceEntry, + version: &str, +) -> String { + let skill_name = format!("{service_alias}-{name}"); + + let description = build_resource_description(service_alias, name, resource, entry); + + let mut out = format!( + r#"--- +name: {skill_name} +version: {version} +description: "{description}" +metadata: + openclaw: + category: "security" + requires: + bins: ["analyzer"] + cliHelp: "analyzer api {service_alias} {name} --help" +--- + +# {name} ({version}) + +> **Before unfamiliar calls:** run `analyzer schema {service_alias}.{name}.` to inspect params, types, defaults. +> **Protect context:** add `--fields` to every `list`/`get`. **Dry-run mutations:** `--dry-run` first, confirm with user. +> **Auth/config/connection errors?** Read `../{service_alias}-shared/SKILL.md` + +```bash +analyzer api {service_alias} {name} [flags] +``` + +## API Resources + +"#, + ); + + // Direct methods on this resource + render_methods(&mut out, &resource.methods); + + // Sub-resources (gws pattern: "name — Operations on the 'name' resource") + render_sub_resources(&mut out, resource); + + // Discovering Commands — the key section that replaces bloated examples + write!( + out, + r#"## Discovering Commands + +Before calling any API method, inspect it: + +```bash +# Browse resources and methods +analyzer api {service_alias} {name} --help + +# Inspect a method's required params, types, and defaults +analyzer schema {service_alias}.{name}. +``` + +Use `analyzer schema` output to build your `--params` and `--json` flags. +"# + ) + .unwrap(); + + // Add workflow hints for resources with non-obvious ID lookup patterns + if name == "scans" { + write!( + out, + r#" +## Workflow: Fetching Analysis Results + +`analysis_id` parameters are UUIDs, not type names like "cve". The `query` parameter is required. + +1. Get analysis UUIDs: `analyzer api {service_alias} scans status list --params '{{"id": "SCAN_ID"}}'` +2. Find the UUID under the analysis type key (e.g., `cve.id`) +3. Fetch results with required `query` param: + ```bash + analyzer api {service_alias} scans results get --params '{{"scan_id": "SCAN_ID", "analysis_id": "UUID", "query": "sort-by=severity&sort-ord=asc"}}' + ``` + +The `query` value contains filter params. Required fields per analysis type (all need `sort-ord=asc|desc`): +- **CVE, hardening, capabilities, password-hash:** `sort-by=severity` +- **malware:** `sort-by=filename` +- **crypto:** `sort-by=type` +- **kernel:** `sort-by=features` +- **software-bom:** `sort-by=name` + +Optional: `&page=1&per-page=25` (both required together, never use one without the other), `&search=openssl`, `&severity-filter=critical`. +"# + ) + .unwrap(); + } + + out +} + +/// Render method names with one-line descriptions. +fn render_methods( + out: &mut String, + methods: &std::collections::BTreeMap, +) { + for (method_name, method) in methods { + let desc = truncate_desc(method.description.as_deref().unwrap_or("")); + writeln!(out, " - `{method_name}` — {desc}").unwrap(); + } +} + +/// Render sub-resources. Deep nesting is listed as a pointer, not fully expanded. +fn render_sub_resources(out: &mut String, resource: &DiscoveryResource) { + for (sub_name, sub_resource) in &resource.resources { + writeln!(out).unwrap(); + writeln!(out, "### {sub_name}").unwrap(); + writeln!(out).unwrap(); + if sub_resource.methods.is_empty() && !sub_resource.resources.is_empty() { + // Container resource — list nested resources as pointers + for nested_name in sub_resource.resources.keys() { + writeln!( + out, + " - `{nested_name}` — Operations on the '{nested_name}' resource" + ) + .unwrap(); + } + } else { + render_methods(out, &sub_resource.methods); + // Nested sub-resources listed as pointers + for nested_name in sub_resource.resources.keys() { + writeln!( + out, + " - `{nested_name}` — Operations on the '{nested_name}' resource" + ) + .unwrap(); + } + } + } + writeln!(out).unwrap(); +} + +/// Build a skill description dynamically from the discovery doc's method and +/// sub-resource names, so new services work without code changes. +/// +/// Produces strings like: +/// "Create, Delete, Get, List, Update objects; plus scans via the analyzer CLI" +fn build_resource_description( + service_alias: &str, + name: &str, + resource: &DiscoveryResource, + entry: &ServiceEntry, +) -> String { + let methods_part: String = { + let actions: Vec<_> = resource + .methods + .keys() + .map(|k| capitalize_first(k)) + .collect(); + if actions.is_empty() { + capitalize_first(name) + } else { + actions.join(", ") + } + }; + + // List sub-resources for context; cap at 3 to keep the description short. + let sub_names: Vec<_> = resource.resources.keys().collect(); + let sub_part = match sub_names.len() { + 0 => String::new(), + 1..=4 => format!( + "; plus {}", + sub_names + .iter() + .map(|k| k.as_str()) + .collect::>() + .join(", ") + ), + _ => format!( + "; plus {}, and {} more", + sub_names[..3] + .iter() + .map(|k| k.as_str()) + .collect::>() + .join(", "), + sub_names.len() - 3 + ), + }; + + truncate_desc(&format!( + "{methods_part} {name}{sub_part} via the {service_alias} CLI — {}", + entry.description + )) +} + +fn capitalize_first(s: &str) -> String { + let mut c = s.chars(); + match c.next() { + None => String::new(), + Some(first) => { + let upper: String = first.to_uppercase().collect(); + format!("{upper}{}", c.as_str()) + } + } +} + +/// Truncate a description to its first sentence/line. +/// +/// Limit is 160 chars — long enough for auto-generated descriptions that +/// include method names, sub-resources, and the service tagline, while +/// still fitting comfortably in Claude Code's skill frontmatter index. +const DESCRIPTION_MAX_LEN: usize = 160; + +fn truncate_desc(desc: &str) -> String { + let first_line = desc.lines().next().unwrap_or(""); + // Cut at first sentence end if within limit + let truncated = first_line + .find(". ") + .map(|i| &first_line[..=i]) + .unwrap_or(first_line); + if truncated.len() > DESCRIPTION_MAX_LEN { + format!("{}...", &truncated[..DESCRIPTION_MAX_LEN - 3]) + } else { + truncated.to_string() + } +} + +// --------------------------------------------------------------------------- +// Rendering — shared skill (raw string for readability) +// --------------------------------------------------------------------------- + +/// Generate the shared SKILL.md for a specific service. +/// +/// Uses the service alias to produce `{alias}-shared` naming and +/// `{alias}`-specific schema examples, so each service gets its own +/// shared skill when multiple discovery documents are registered. +fn generate_shared_skill(entry: &ServiceEntry) -> String { + let alias = entry.aliases[0]; + + format!( + r#"--- +name: {alias}-shared +description: "{alias} CLI: auth, introspection, and agent rules for firmware/container security scanning." +metadata: + openclaw: + category: "security" + requires: + bins: ["analyzer"] +--- + +# {alias} CLI — Agent Rules + +## Discover before you act + +The CLI is self-documenting. Use these before every unfamiliar call: + +```bash +analyzer api {alias} --help # browse resources +analyzer api {alias} --help # browse methods +analyzer schema {alias}.. # inspect params, types, defaults +``` + +## Rules + +1. **Schema first** — run `analyzer schema` before building `--params` or `--json`. +2. **Protect context** — add `--fields` to every `list` and `get` call. +3. **Dry-run mutations** — use `--dry-run` for create/update/delete, then confirm with the user before executing. +4. **Poll, don't guess** — after scheduling a scan, poll status until complete. + +## Auth + +```bash +analyzer login # interactive, saves to ~/.config/analyzer/ +export ANALYZER_API_KEY="..." # or set env var +``` + +## Errors + +All errors: JSON on stderr, non-zero exit. Parse before retrying. +"# + ) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn truncate_short_desc() { + assert_eq!(truncate_desc("Returns a scan."), "Returns a scan."); + } + + #[test] + fn truncate_multi_sentence() { + assert_eq!( + truncate_desc("Returns a scan. This includes all metadata and results."), + "Returns a scan." + ); + } + + #[test] + fn truncate_long_desc() { + let long = "a".repeat(200); + let result = truncate_desc(&long); + assert!(result.len() <= DESCRIPTION_MAX_LEN); + assert!(result.ends_with("...")); + } +} diff --git a/src/agent_api/mod.rs b/src/agent_api/mod.rs new file mode 100644 index 0000000..6b81e40 --- /dev/null +++ b/src/agent_api/mod.rs @@ -0,0 +1,216 @@ +//! Discovery-driven dynamic command tree. +//! +//! Builds a `clap::Command` tree at runtime from the Discovery Document's +//! resource hierarchy, then dispatches matched methods through the executor. + +pub mod executor; +pub mod generate_skills; +pub mod schema; + +use anyhow::{Context, Result}; + +use crate::client::AnalyzerClient; +use crate::config; +use crate::discovery::{self, DiscoveryDocument, DiscoveryResource}; +use crate::output::Format; + +/// Build the `api` clap command tree from the discovery document. +/// +/// Skips the top-level `"api"` resource wrapper since the CLI already +/// prefixes with `analyzer api ...`. +pub fn build_api_command(doc: &DiscoveryDocument) -> clap::Command { + let api_resource = doc + .resources + .get("api") + .expect("discovery document must have an 'api' resource"); + + let mut cmd = clap::Command::new("api") + .about("Discovery-driven API access (dynamically generated)") + .arg_required_else_help(true) + .arg( + clap::Arg::new("params") + .long("params") + .help("Path and query parameters as JSON") + .global(true), + ) + .arg( + clap::Arg::new("json") + .long("json") + .help("Request body as JSON (for POST/PUT/PATCH)") + .global(true), + ) + .arg( + clap::Arg::new("dry-run") + .long("dry-run") + .action(clap::ArgAction::SetTrue) + .help("Print the request without sending it") + .global(true), + ) + .arg( + clap::Arg::new("fields") + .long("fields") + .help("Comma-separated response fields to include (e.g. \"id,name,score\")") + .global(true), + ) + .arg( + clap::Arg::new("page-all") + .long("page-all") + .action(clap::ArgAction::SetTrue) + .help("Auto-paginate and output NDJSON (one JSON object per line)") + .global(true), + ) + .arg( + clap::Arg::new("page-limit") + .long("page-limit") + .default_value("10") + .help("Maximum number of pages to fetch (default: 10)") + .global(true), + ) + .arg( + clap::Arg::new("page-delay") + .long("page-delay") + .default_value("100") + .help("Delay between page fetches in milliseconds (default: 100)") + .global(true), + ) + .arg( + clap::Arg::new("format") + .long("format") + .value_parser(clap::builder::EnumValueParser::::new()) + .help("Output format: human, json, table, csv") + .global(true), + ); + + cmd = add_resource_subcommands(cmd, api_resource); + cmd +} + +/// Recursively add subcommands from the resource tree. +/// Names and descriptions are cloned to owned `String`s because clap requires `'static`. +fn add_resource_subcommands( + mut parent: clap::Command, + resource: &DiscoveryResource, +) -> clap::Command { + for (method_name, method) in &resource.methods { + let about = method.description.clone().unwrap_or_default(); + let leaf = clap::Command::new(method_name.clone()).about(about); + parent = parent.subcommand(leaf); + } + + for (resource_name, child_resource) in &resource.resources { + let mut child_cmd = clap::Command::new(resource_name.clone()).arg_required_else_help(true); + child_cmd = add_resource_subcommands(child_cmd, child_resource); + parent = parent.subcommand(child_cmd); + } + + parent +} + +/// Dispatch a matched `api` command to the executor. +/// +/// Client creation is deferred so that `--dry-run` works without auth. +pub async fn dispatch( + doc: &DiscoveryDocument, + matches: &clap::ArgMatches, + api_key: Option<&str>, + url: Option<&str>, + profile: Option<&str>, + format: Format, +) -> Result<()> { + let (path, leaf_matches) = extract_subcommand_path(matches); + + let api_resource = doc + .resources + .get("api") + .context("discovery document must have 'api' resource")?; + + let path_refs: Vec<&str> = path.iter().map(|s| s.as_str()).collect(); + let method = discovery::resolve_method(api_resource, &path_refs) + .with_context(|| format!("no method found at path: {}", path.join(".")))?; + + // --format from the api subcommand overrides the top-level value + let format = leaf_matches + .get_one::("format") + .or_else(|| matches.get_one::("format")) + .copied() + .unwrap_or(format); + + let params_json = get_global_arg(leaf_matches, matches, "params"); + let body_json = get_global_arg(leaf_matches, matches, "json"); + let fields = get_global_arg(leaf_matches, matches, "fields"); + let dry_run = leaf_matches.get_flag("dry-run") || matches.get_flag("dry-run"); + let page_all = leaf_matches.get_flag("page-all") || matches.get_flag("page-all"); + let page_limit: u32 = get_global_arg(leaf_matches, matches, "page-limit") + .unwrap_or_else(|| "10".into()) + .parse() + .unwrap_or(10); + let page_delay: u64 = get_global_arg(leaf_matches, matches, "page-delay") + .unwrap_or_else(|| "100".into()) + .parse() + .unwrap_or(100); + + let pagination = executor::PaginationConfig { + page_all, + page_limit, + page_delay_ms: page_delay, + }; + + if dry_run { + return executor::execute_method( + None, + method, + params_json.as_deref(), + body_json.as_deref(), + fields.as_deref(), + &pagination, + true, + format, + ) + .await; + } + + let cfg = config::resolve_for_discovery(api_key, url, profile)?; + let api_key = cfg.api_key.ok_or_else(|| { + anyhow::anyhow!( + "no API key provided\n\n\ + Set it with one of:\n \ + analyzer login --env dev\n \ + analyzer --api-key ...\n \ + export ANALYZER_API_KEY=" + ) + })?; + let client = AnalyzerClient::new(cfg.url, &api_key)?; + executor::execute_method( + Some(&client), + method, + params_json.as_deref(), + body_json.as_deref(), + fields.as_deref(), + &pagination, + false, + format, + ) + .await +} + +/// Extract the subcommand path by walking ArgMatches recursively. +fn extract_subcommand_path(matches: &clap::ArgMatches) -> (Vec, &clap::ArgMatches) { + let mut path = Vec::new(); + let mut current = matches; + while let Some((name, sub_matches)) = current.subcommand() { + path.push(name.to_string()); + current = sub_matches; + } + (path, current) +} + +/// Get a global arg that may be on the leaf or any parent matches. +fn get_global_arg( + leaf: &clap::ArgMatches, + parent: &clap::ArgMatches, + name: &str, +) -> Option { + leaf.get_one::(name) + .or_else(|| parent.get_one::(name)) + .cloned() +} diff --git a/src/agent_api/schema.rs b/src/agent_api/schema.rs new file mode 100644 index 0000000..f473f00 --- /dev/null +++ b/src/agent_api/schema.rs @@ -0,0 +1,105 @@ +//! Schema introspection command. +//! +//! `analyzer schema api.scans.score.list` dumps the method signature +//! as machine-readable JSON (httpMethod, path, parameters, request/response refs). + +use anyhow::{Context, Result, bail}; +use serde::Serialize; + +use crate::discovery::{self, DiscoveryDocument, DiscoveryResource}; + +/// Handle `analyzer schema `. +/// +/// The path is resolved from the root of the discovery document's resource tree. +/// Service-level routing (extracting the service name from the user's input) +/// is handled by main.rs, which prepends `"api."` before calling this function. +pub fn handle_schema_command(doc: &DiscoveryDocument, dotted_path: &str) -> Result<()> { + let segments: Vec<&str> = dotted_path.split('.').collect(); + + if segments.is_empty() { + bail!("path cannot be empty"); + } + + // First segment selects the top-level resource (typically "api") + let first = segments[0]; + let api_resource = doc + .resources + .get(first) + .with_context(|| format!("resource '{first}' not found in discovery document"))?; + + let rest = &segments[1..]; + + if rest.is_empty() { + print_resource_tree(api_resource, 0); + return Ok(()); + } + + // Try to resolve as a method first + if let Some(method) = discovery::resolve_method(api_resource, rest) { + let output = serde_json::to_string_pretty(&MethodSchema::from(method))?; + println!("{output}"); + return Ok(()); + } + + // Try to resolve as a resource and list its contents + if let Some(resource) = discovery::resolve_resource(api_resource, rest) { + print_resource_tree(resource, 0); + return Ok(()); + } + + bail!( + "path '{}' not found in discovery document", + segments.join(".") + ); +} + +/// Serializable view of a method for schema output. +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct MethodSchema<'a> { + id: &'a str, + http_method: &'a str, + path: &'a str, + #[serde(skip_serializing_if = "Option::is_none")] + description: Option<&'a str>, + parameters: &'a std::collections::BTreeMap, + parameter_order: &'a [String], + #[serde(skip_serializing_if = "Option::is_none")] + request: Option<&'a crate::discovery::SchemaRef>, + #[serde(skip_serializing_if = "Option::is_none")] + response: Option<&'a crate::discovery::SchemaRef>, +} + +impl<'a> From<&'a crate::discovery::DiscoveryMethod> for MethodSchema<'a> { + fn from(m: &'a crate::discovery::DiscoveryMethod) -> Self { + Self { + id: &m.id, + http_method: &m.http_method, + path: &m.path, + description: m.description.as_deref(), + parameters: &m.parameters, + parameter_order: &m.parameter_order, + request: m.request.as_ref(), + response: m.response.as_ref(), + } + } +} + +/// Print the resource tree for navigation. +fn print_resource_tree(resource: &DiscoveryResource, indent: usize) { + let pad = " ".repeat(indent); + for (name, method) in &resource.methods { + let desc = method + .description + .as_deref() + .unwrap_or("") + .chars() + .take(60) + .collect::(); + println!("{pad}{name} ({}) — {desc}", method.http_method); + } + for (name, child) in &resource.resources { + println!("{pad}{name}/"); + print_resource_tree(child, indent + 1); + } +} diff --git a/src/client/mod.rs b/src/client/mod.rs index 9423ed4..d66938d 100644 --- a/src/client/mod.rs +++ b/src/client/mod.rs @@ -1,12 +1,13 @@ //! Typed HTTP client for the Analyzer API. +#[allow(dead_code)] pub mod models; use std::path::Path; use std::pin::Pin; use std::task::{Context, Poll}; -use anyhow::{Result, bail}; +use anyhow::{Context as _, Result, bail}; use futures::Stream; use indicatif::{ProgressBar, ProgressState, ProgressStyle}; use reqwest::{Body, Client, header, multipart}; @@ -244,6 +245,77 @@ impl AnalyzerClient { Self::bytes(resp).await } + // -- Generic execution (discovery-driven commands) ------------------------- + + /// Execute a raw API request from discovery method metadata. + /// Returns the response as a JSON value, or `{"status": "ok"}` for empty bodies. + pub async fn execute_raw( + &self, + http_method: &str, + path: &str, + query_params: &[(String, String)], + body: Option<&serde_json::Value>, + ) -> Result { + let mut url = self.base_url.join(path)?; + for (key, val) in query_params { + url.query_pairs_mut().append_pair(key, val); + } + + let builder = match http_method.to_uppercase().as_str() { + "GET" => self.client.get(url), + "POST" => self.client.post(url), + "PUT" => self.client.put(url), + "DELETE" => self.client.delete(url), + "PATCH" => self.client.patch(url), + other => bail!("unsupported HTTP method: {other}"), + }; + + let builder = if let Some(body) = body { + builder.json(body) + } else { + builder + }; + + let resp = builder.send().await?; + let status = resp.status(); + + if status.is_success() { + let text = resp.text().await?; + if text.is_empty() { + Ok(serde_json::json!({"status": "ok"})) + } else { + serde_json::from_str(&text).context("response is not valid JSON") + } + } else { + let body = resp.text().await.unwrap_or_default(); + bail!("API error (HTTP {status}): {body}"); + } + } + + /// Execute a raw GET request to an absolute URL (for following pagination links). + /// + /// Used by `--page-all` to follow `_links.next.href` URLs which are absolute. + pub async fn execute_raw_url(&self, url: &str) -> Result { + let resp = self.client.get(url).send().await?; + let status = resp.status(); + if status.is_success() { + let text = resp.text().await?; + if text.is_empty() { + Ok(serde_json::json!({"status": "ok"})) + } else { + serde_json::from_str(&text).context("response is not valid JSON") + } + } else { + let body = resp.text().await.unwrap_or_default(); + bail!("API error (HTTP {status}): {body}"); + } + } + + /// Expose the base URL for dry-run output. + pub fn base_url(&self) -> &Url { + &self.base_url + } + // -- Response helpers ----------------------------------------------------- async fn json(resp: reqwest::Response) -> Result { diff --git a/src/client/models.rs b/src/client/models.rs index 6ab9531..7a84632 100644 --- a/src/client/models.rs +++ b/src/client/models.rs @@ -2,6 +2,8 @@ //! //! These types are owned by the CLI and match the API's serialization format. //! No dependency on the `analyzer-api` crate. +//! +//! Some finding types are defined ahead of their use in human-mode commands. use std::collections::HashMap; use std::fmt; diff --git a/src/commands/auth.rs b/src/commands/auth.rs index 32a567e..da5b8af 100644 --- a/src/commands/auth.rs +++ b/src/commands/auth.rs @@ -117,10 +117,19 @@ pub fn run_whoami(api_key: Option<&str>, url: Option<&str>, profile: Option<&str None => "(not set)".to_string(), }; + let env_label = crate::config::Environment::from_url(&resolved_url) + .map(|e| format!(" ({e:?})")) + .unwrap_or_default(); + eprintln!("{}", style("Analyzer CLI").bold().underlined()); eprintln!(); eprintln!(" {:>12} {}", style("Profile:").bold(), profile_name); - eprintln!(" {:>12} {}", style("URL:").bold(), resolved_url); + eprintln!( + " {:>12} {}{}", + style("URL:").bold(), + resolved_url, + style(env_label).dim() + ); eprintln!(" {:>12} {}", style("API Key:").bold(), masked_key); if let Ok(path) = ConfigFile::path() { diff --git a/src/commands/init_agent.rs b/src/commands/init_agent.rs new file mode 100644 index 0000000..af5d21b --- /dev/null +++ b/src/commands/init_agent.rs @@ -0,0 +1,156 @@ +//! `analyzer init-agent` — install Claude Code agent integration globally. +//! +//! Writes skills and settings.json into `~/.claude/` so that Claude Code +//! automatically discovers the `analyzer` CLI in every project via on-demand +//! skill loading — no global CLAUDE.md or CONTEXT.md pollution. + +use std::path::Path; + +use anyhow::{Context, Result}; + +use crate::agent_api::generate_skills; +use crate::discovery; +use crate::output; +use crate::services; + +const ANALYZER_PERMISSION: &str = "Bash(analyzer *)"; + +/// Run the init-agent command, writing all artifacts to `~/.claude/`. +pub async fn run_init_agent(discovery_flag: Option<&str>) -> Result<()> { + let claude_dir = resolve_claude_dir()?; + init_agent_to_dir(&claude_dir, discovery_flag).await +} + +/// Core logic, factored out for testing with arbitrary target directories. +pub async fn init_agent_to_dir(claude_dir: &Path, discovery_flag: Option<&str>) -> Result<()> { + std::fs::create_dir_all(claude_dir) + .with_context(|| format!("failed to create {}", claude_dir.display()))?; + + // 1. Generate skills + let skills_dir = claude_dir.join("skills"); + output::status("Skills", "generating from discovery documents..."); + for entry in services::SERVICES { + let doc = if let Some(flag) = discovery_flag { + let source = discovery::resolve_source(Some(flag))?; + let doc = discovery::load(&source).await?; + // Warm the cache so subsequent runs without --discovery use this version. + discovery::warm_cache(entry, &doc); + doc + } else { + discovery::load_for_service(entry).await? + }; + generate_skills::generate_for_service(&doc, entry, &skills_dir)?; + generate_skills::generate_shared(entry, &skills_dir)?; + } + + // 2. Merge settings.json + let settings_path = claude_dir.join("settings.json"); + merge_settings_json(&settings_path)?; + + // Summary + output::success("Claude Code agent integration configured"); + output::status("Skills", &format!("{}", skills_dir.display())); + output::status("Settings", &format!("{}", settings_path.display())); + + Ok(()) +} + +/// Merge `Bash(analyzer *)` into `settings.json` without clobbering existing permissions. +fn merge_settings_json(path: &Path) -> Result<()> { + let mut settings: serde_json::Value = if path.exists() { + let contents = std::fs::read_to_string(path) + .with_context(|| format!("failed to read {}", path.display()))?; + serde_json::from_str(&contents).context("settings.json is not valid JSON")? + } else { + serde_json::json!({}) + }; + + let permissions = settings + .as_object_mut() + .context("settings.json root must be an object")? + .entry("permissions") + .or_insert_with(|| serde_json::json!({})); + let allow = permissions + .as_object_mut() + .context("permissions must be an object")? + .entry("allow") + .or_insert_with(|| serde_json::json!([])); + let allow_arr = allow + .as_array_mut() + .context("permissions.allow must be an array")?; + + if !allow_arr + .iter() + .any(|v| v.as_str() == Some(ANALYZER_PERMISSION)) + { + allow_arr.push(serde_json::Value::String(ANALYZER_PERMISSION.to_string())); + } + + std::fs::write(path, serde_json::to_string_pretty(&settings)?) + .with_context(|| format!("failed to write {}", path.display())) +} + +fn resolve_claude_dir() -> Result { + let home = dirs::home_dir().context("could not determine home directory")?; + Ok(home.join(".claude")) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn merge_settings_creates_new_file() { + let dir = tempfile::tempdir().unwrap(); + let path = dir.path().join("settings.json"); + + merge_settings_json(&path).unwrap(); + + let contents: serde_json::Value = + serde_json::from_str(&std::fs::read_to_string(&path).unwrap()).unwrap(); + let allow = contents["permissions"]["allow"].as_array().unwrap(); + assert_eq!(allow.len(), 1); + assert_eq!(allow[0].as_str().unwrap(), ANALYZER_PERMISSION); + } + + #[test] + fn merge_settings_preserves_existing_permissions() { + let dir = tempfile::tempdir().unwrap(); + let path = dir.path().join("settings.json"); + + let existing = serde_json::json!({ + "permissions": { + "allow": ["Read", "Bash(git *)"] + } + }); + std::fs::write(&path, serde_json::to_string_pretty(&existing).unwrap()).unwrap(); + + merge_settings_json(&path).unwrap(); + + let contents: serde_json::Value = + serde_json::from_str(&std::fs::read_to_string(&path).unwrap()).unwrap(); + let allow = contents["permissions"]["allow"].as_array().unwrap(); + assert_eq!(allow.len(), 3); + assert!(allow.iter().any(|v| v.as_str() == Some("Read"))); + assert!(allow.iter().any(|v| v.as_str() == Some("Bash(git *)"))); + assert!( + allow + .iter() + .any(|v| v.as_str() == Some(ANALYZER_PERMISSION)) + ); + } + + #[test] + fn merge_settings_is_idempotent() { + let dir = tempfile::tempdir().unwrap(); + let path = dir.path().join("settings.json"); + + merge_settings_json(&path).unwrap(); + merge_settings_json(&path).unwrap(); + + let contents: serde_json::Value = + serde_json::from_str(&std::fs::read_to_string(&path).unwrap()).unwrap(); + let allow = contents["permissions"]["allow"].as_array().unwrap(); + assert_eq!(allow.len(), 1, "should not duplicate the permission entry"); + } +} diff --git a/src/commands/mod.rs b/src/commands/mod.rs index d25aed7..8c21102 100644 --- a/src/commands/mod.rs +++ b/src/commands/mod.rs @@ -2,5 +2,6 @@ pub mod auth; pub mod config; +pub mod init_agent; pub mod object; pub mod scan; diff --git a/src/commands/object.rs b/src/commands/object.rs index d9d4a64..246e703 100644 --- a/src/commands/object.rs +++ b/src/commands/object.rs @@ -20,7 +20,7 @@ pub async fn run_list(client: &AnalyzerClient, format: Format) -> Result<()> { serde_json::to_string_pretty(&serde_json::to_value(&objects)?)? ); } - Format::Human | Format::Table => { + Format::Human | Format::Table | Format::Csv => { if objects.is_empty() { output::status( "Objects", @@ -114,7 +114,7 @@ pub async fn run_new( serde_json::to_string_pretty(&serde_json::to_value(&object)?)? ); } - Format::Human | Format::Table => { + Format::Human | Format::Table | Format::Csv => { output::success(&format!("Created object '{}' ({})", object.name, object.id)); } } diff --git a/src/commands/scan.rs b/src/commands/scan.rs index d93dd7b..d3ea075 100644 --- a/src/commands/scan.rs +++ b/src/commands/scan.rs @@ -10,11 +10,10 @@ use uuid::Uuid; use crate::client::AnalyzerClient; use crate::client::models::{ - AnalysisStatus, AnalysisStatusEntry, AnalysisType, CapabilityFinding, ComplianceReport, - ComplianceType, CryptoFinding, CveFinding, HardeningFinding, IdfSymbolFinding, IdfTaskFinding, - KernelFinding, MalwareFinding, PasswordFinding, ResultsQuery, SbomComponent, ScanTypeRequest, + AnalysisStatus, AnalysisStatusEntry, AnalysisType, ComplianceReport, ComplianceType, + ResultsQuery, ScanTypeRequest, }; -use crate::output::{self, Format, format_score, format_status}; +use crate::output::{self, Format, format_score, format_status, format_value}; /// Resolve a scan ID from either an explicit --scan or an --object flag. /// When --object is used, fetches the object and returns its last scan ID. @@ -190,7 +189,7 @@ pub async fn run_score(client: &AnalyzerClient, scan_id: Uuid, format: Format) - serde_json::to_string_pretty(&serde_json::to_value(&score)?)? ); } - Format::Human | Format::Table => { + Format::Human | Format::Table | Format::Csv => { eprintln!( "\n {} {}", style("Overall Score:").bold(), @@ -232,7 +231,7 @@ pub async fn run_types(client: &AnalyzerClient, format: Format) -> Result<()> { serde_json::to_string_pretty(&serde_json::to_value(&types)?)? ); } - Format::Human | Format::Table => { + Format::Human | Format::Table | Format::Csv => { for st in &types { eprintln!("\n {}", style(&st.image_type).bold().underlined()); for a in &st.analyses { @@ -282,7 +281,7 @@ fn print_status( serde_json::to_string_pretty(&serde_json::Value::Object(map))? ); } - Format::Human | Format::Table => { + Format::Human | Format::Table | Format::Csv => { eprintln!( "\n {} {} ({})", style("Scan").bold(), @@ -397,7 +396,7 @@ pub async fn run_overview(client: &AnalyzerClient, scan_id: Uuid, format: Format Format::Json => { println!("{}", serde_json::to_string_pretty(&overview)?); } - Format::Human | Format::Table => { + Format::Human | Format::Table | Format::Csv => { eprintln!("\n {} {}\n", style("Scan Overview").bold(), scan_id); if let Some(cve) = &overview.cve { @@ -544,350 +543,33 @@ pub async fn run_results( Format::Json => { println!("{}", serde_json::to_string_pretty(&results)?); } - Format::Human | Format::Table => { - let all_values: Vec<&serde_json::Value> = results.findings.iter().collect(); - - if all_values.is_empty() { - eprintln!("\n No findings.\n"); + Format::Human | Format::Table | Format::Csv => { + if results.findings.is_empty() { + if matches!(format, Format::Csv) { + // CSV: nothing to output + } else { + eprintln!("\n No findings.\n"); + } return Ok(()); } - match analysis_type { - AnalysisType::Cve => render_cve_table(&all_values)?, - AnalysisType::PasswordHash => render_password_table(&all_values)?, - AnalysisType::Malware => render_malware_table(&all_values)?, - AnalysisType::Hardening => render_hardening_table(&all_values)?, - AnalysisType::Capabilities => render_capabilities_table(&all_values)?, - AnalysisType::Crypto => render_crypto_table(&all_values)?, - AnalysisType::SoftwareBom => render_sbom_table(&all_values)?, - AnalysisType::Kernel => render_kernel_table(&all_values)?, - AnalysisType::Symbols => render_symbols_table(&all_values)?, - AnalysisType::Tasks => render_tasks_table(&all_values)?, - AnalysisType::Info => render_info(&all_values)?, - AnalysisType::StackOverflow => render_info(&all_values)?, - } - - let total_pages = results.total_findings.div_ceil(per_page as u64); - eprintln!( - "\n Page {}/{} ({} total) — use --page N to navigate\n", - page, total_pages, results.total_findings, - ); - } - } - Ok(()) -} - -fn render_cve_table(values: &[&serde_json::Value]) -> Result<()> { - eprintln!(); - eprintln!( - " {:<8} {:<15} {:<5} {:<14} {:<20} {}", - style("Severity").underlined(), - style("CVE ID").underlined(), - style("Score").underlined(), - style("Vendor").underlined(), - style("Product").underlined(), - style("Summary").underlined(), - ); - for val in values { - if let Ok(f) = serde_json::from_value::((*val).clone()) { - let score_str = f - .cvss - .as_ref() - .and_then(|c| c.v3.as_ref().or(c.v2.as_ref())) - .and_then(|d| d.base_score) - .map(|s| format!("{s:.1}")) - .unwrap_or_default(); - let sev = format_severity(f.severity.as_deref().unwrap_or("unknown"), 8); - let product = f - .products - .first() - .and_then(|p| p.product.as_deref()) - .unwrap_or("-"); - let summary = f.summary.as_deref().unwrap_or(""); - let summary_trunc = if summary.len() > 40 { - format!("{}...", &summary[..37]) + let findings_value = serde_json::to_value(&results.findings)?; + let formatted = format_value(&findings_value, &format); + if matches!(format, Format::Csv) { + print!("{formatted}"); } else { - summary.to_string() - }; - eprintln!( - " {} {:<15} {:<5} {:<14} {:<20} {}", - sev, - f.cveid.as_deref().unwrap_or("-"), - score_str, - truncate_str(f.vendor.as_deref().unwrap_or("-"), 14), - truncate_str(product, 20), - summary_trunc, - ); - } - } - Ok(()) -} - -fn render_password_table(values: &[&serde_json::Value]) -> Result<()> { - eprintln!(); - eprintln!( - " {:<8} {:<20} {}", - style("Severity").underlined(), - style("Username").underlined(), - style("Password").underlined(), - ); - for val in values { - if let Ok(f) = serde_json::from_value::((*val).clone()) { - let sev = format_severity(f.severity.as_deref().unwrap_or("unknown"), 8); - eprintln!( - " {} {:<20} {}", - sev, - f.username.as_deref().unwrap_or("-"), - f.password.as_deref().unwrap_or("-"), - ); - } - } - Ok(()) -} - -fn render_malware_table(values: &[&serde_json::Value]) -> Result<()> { - eprintln!(); - eprintln!( - " {:<30} {:<40} {}", - style("Filename").underlined(), - style("Description").underlined(), - style("Engine").underlined(), - ); - for val in values { - if let Ok(f) = serde_json::from_value::((*val).clone()) { - eprintln!( - " {:<30} {:<40} {}", - truncate_str(f.filename.as_deref().unwrap_or("-"), 30), - truncate_str(f.description.as_deref().unwrap_or("-"), 40), - f.detection_engine.as_deref().unwrap_or("-"), - ); - } - } - Ok(()) -} - -fn render_hardening_table(values: &[&serde_json::Value]) -> Result<()> { - eprintln!(); - eprintln!( - " {:<8} {:<30} {:<6} {:<3} {:<7} {:<7} {}", - style("Severity").underlined(), - style("Filename").underlined(), - style("Canary").underlined(), - style("NX").underlined(), - style("PIE").underlined(), - style("RELRO").underlined(), - style("Fortify").underlined(), - ); - for val in values { - if let Ok(f) = serde_json::from_value::((*val).clone()) { - let sev = format_severity(f.severity.as_deref().unwrap_or("unknown"), 8); - eprintln!( - " {} {:<30} {} {} {:<7} {:<7} {}", - sev, - truncate_str(f.filename.as_deref().unwrap_or("-"), 30), - format_bool(f.canary.unwrap_or(false), 6), - format_bool(f.nx.unwrap_or(false), 3), - f.pie.as_deref().unwrap_or("-"), - f.relro.as_deref().unwrap_or("-"), - format_bool(f.fortify.unwrap_or(false), 7), - ); - } - } - Ok(()) -} - -fn render_capabilities_table(values: &[&serde_json::Value]) -> Result<()> { - eprintln!(); - eprintln!( - " {:<30} {:<8} {:<9} {}", - style("Filename").underlined(), - style("Severity").underlined(), - style("Behaviors").underlined(), - style("Syscalls").underlined(), - ); - for val in values { - if let Ok(f) = serde_json::from_value::((*val).clone()) { - let sev = format_severity(f.level.as_deref().unwrap_or("unknown"), 8); - eprintln!( - " {:<30} {} {:<9} {}", - truncate_str(f.filename.as_deref().unwrap_or("-"), 30), - sev, - f.behaviors.len(), - f.syscalls.len(), - ); - } - } - Ok(()) -} - -/// Format a severity string with color and fixed-width padding. -fn format_severity(severity: &str, width: usize) -> String { - let padded = format!("{: style(padded).red().bold().to_string(), - "high" => style(padded).red().to_string(), - "medium" => style(padded).yellow().to_string(), - "low" => style(padded).green().to_string(), - _ => style(padded).dim().to_string(), - } -} - -/// Truncate a string to max chars, adding "..." if needed. -fn truncate_str(s: &str, max: usize) -> String { - if s.len() > max { - format!("{}...", &s[..max.saturating_sub(3)]) - } else { - format!("{: String { - if val { - style(format!("{: Result<()> { - eprintln!(); - eprintln!( - " {:<14} {:<20} {:<20} {:<8} {}", - style("Type").underlined(), - style("Filename").underlined(), - style("Path").underlined(), - style("Key Size").underlined(), - style("Aux").underlined(), - ); - for val in values { - if let Ok(f) = serde_json::from_value::((*val).clone()) { - let aux = if f.aux.is_empty() { - "-".to_string() - } else { - f.aux.join(", ") - }; - eprintln!( - " {:<14} {:<20} {:<20} {:<8} {}", - truncate_str(f.crypto_type.as_deref().unwrap_or("-"), 14), - truncate_str(f.filename.as_deref().unwrap_or("-"), 20), - truncate_str(f.parent.as_deref().unwrap_or("-"), 20), - f.pubsz.map(|s| s.to_string()).as_deref().unwrap_or("-"), - truncate_str(&aux, 30), - ); - } - } - Ok(()) -} - -fn render_sbom_table(values: &[&serde_json::Value]) -> Result<()> { - eprintln!(); - eprintln!( - " {:<30} {:<14} {:<12} {}", - style("Name").underlined(), - style("Version").underlined(), - style("Type").underlined(), - style("Licenses").underlined(), - ); - for val in values { - if let Ok(f) = serde_json::from_value::((*val).clone()) { - let licenses = f - .licenses - .iter() - .filter_map(|l| { - l.get("license") - .and_then(|lic| lic.get("id").or_else(|| lic.get("name"))) - .and_then(|v| v.as_str()) - .map(|s| s.to_string()) - }) - .collect::>() - .join(", "); - eprintln!( - " {:<30} {:<14} {:<12} {}", - truncate_str(f.name.as_deref().unwrap_or("-"), 30), - truncate_str(f.version.as_deref().unwrap_or("-"), 14), - f.component_type.as_deref().unwrap_or("-"), - if licenses.is_empty() { "-" } else { &licenses }, - ); - } - } - Ok(()) -} - -fn render_kernel_table(values: &[&serde_json::Value]) -> Result<()> { - for val in values { - if let Ok(f) = serde_json::from_value::((*val).clone()) { - if let Some(file) = &f.file { - eprintln!("\n {} {}", style("Kernel Config:").bold(), file); - } - if let Some(score) = f.score { - eprintln!(" Score: {}", score); - } - eprintln!(); - eprintln!( - " {:<40} {}", - style("Feature").underlined(), - style("Status").underlined(), - ); - for feat in &f.features { - eprintln!(" {:<40} {}", feat.name, format_bool(feat.enabled, 8),); + eprint!("\n{formatted}"); + let total_pages = results.total_findings.div_ceil(per_page as u64); + eprintln!( + "\n Page {}/{} ({} total) — use --page N to navigate\n", + page, total_pages, results.total_findings, + ); } } } Ok(()) } -fn render_symbols_table(values: &[&serde_json::Value]) -> Result<()> { - eprintln!(); - eprintln!( - " {:<40} {:<12} {}", - style("Name").underlined(), - style("Type").underlined(), - style("Bind").underlined(), - ); - for val in values { - if let Ok(f) = serde_json::from_value::((*val).clone()) { - eprintln!( - " {:<40} {:<12} {}", - truncate_str(f.symbol_name.as_deref().unwrap_or("-"), 40), - f.symbol_type.as_deref().unwrap_or("-"), - f.symbol_bind.as_deref().unwrap_or("-"), - ); - } - } - Ok(()) -} - -fn render_tasks_table(values: &[&serde_json::Value]) -> Result<()> { - eprintln!(); - eprintln!( - " {:<30} {}", - style("Name").underlined(), - style("Function").underlined(), - ); - for val in values { - if let Ok(f) = serde_json::from_value::((*val).clone()) { - eprintln!( - " {:<30} {}", - truncate_str(f.task_name.as_deref().unwrap_or("-"), 30), - f.task_fn.as_deref().unwrap_or("-"), - ); - } - } - Ok(()) -} - -fn render_info(values: &[&serde_json::Value]) -> Result<()> { - for val in values { - eprintln!("\n{}", serde_json::to_string_pretty(val)?); - } - Ok(()) -} - // =========================================================================== // Compliance // =========================================================================== @@ -905,7 +587,7 @@ pub async fn run_compliance( Format::Json => { println!("{}", serde_json::to_string_pretty(&report)?); } - Format::Human | Format::Table => { + Format::Human | Format::Table | Format::Csv => { render_compliance_human(&report, ct); } } diff --git a/src/config.rs b/src/config.rs index 239af9d..45d32bf 100644 --- a/src/config.rs +++ b/src/config.rs @@ -18,10 +18,46 @@ const CONFIG_FILE_NAME: &str = "config.toml"; const DEFAULT_URL: &str = "https://analyzer.exein.io/api/"; const DEFAULT_PROFILE: &str = "default"; +/// Named environments with pre-configured API URLs. +/// +/// Use with `analyzer login --env dev` instead of remembering full URLs. +#[derive(Debug, Clone, Copy, clap::ValueEnum)] +pub enum Environment { + /// Development — https://analyzer.exein.dev/api/ + Dev, + /// Staging — https://analyzer.exein.live/api/ + Stage, + /// Production — https://analyzer.exein.io/api/ + Prod, +} + +impl Environment { + pub fn url(self) -> &'static str { + match self { + Self::Dev => "https://analyzer.exein.dev/api/", + Self::Stage => "https://analyzer.exein.live/api/", + Self::Prod => "https://analyzer.exein.io/api/", + } + } + + /// Detect which environment a URL belongs to, if any. + pub fn from_url(url: &str) -> Option { + if url.contains("exein.dev") { + Some(Self::Dev) + } else if url.contains("exein.live") { + Some(Self::Stage) + } else if url.contains("exein.io") { + Some(Self::Prod) + } else { + None + } + } +} + /// Resolved runtime configuration, ready to use. #[derive(Debug, Clone)] pub struct ResolvedConfig { - pub api_key: String, + pub api_key: Option, pub url: Url, #[allow(dead_code)] pub profile: String, @@ -129,26 +165,37 @@ pub fn resolve( .parse() .with_context(|| format!("invalid URL: {url_str}"))?; - // API key: flag > env > profile + // API key: flag > env > profile (optional for discovery-driven commands) let api_key = cli_api_key .map(String::from) .or_else(|| std::env::var("ANALYZER_API_KEY").ok()) .or_else(|| profile.api_key.clone()); - let api_key = match api_key { - Some(key) => key, - None => anyhow::bail!( - "no API key provided\n\n\ - Set it with one of:\n \ - analyzer login\n \ - analyzer --api-key ...\n \ - export ANALYZER_API_KEY=" - ), - }; - Ok(ResolvedConfig { api_key, url, profile: profile_name, }) } + +/// Resolve config for discovery-driven commands (`api`, `schema`). +/// +/// Discovery method paths already include the `api/` segment, so the base +/// URL must not contain it. This strips a trailing `/api/` (or `/api`) from +/// whichever URL the normal resolver produces. +pub fn resolve_for_discovery( + cli_api_key: Option<&str>, + cli_url: Option<&str>, + cli_profile: Option<&str>, +) -> Result { + let mut cfg = resolve(cli_api_key, cli_url, cli_profile)?; + let url_str = cfg.url.as_str(); + let trimmed = url_str + .strip_suffix("api/") + .or_else(|| url_str.strip_suffix("api")) + .unwrap_or(url_str); + cfg.url = trimmed + .parse() + .with_context(|| format!("invalid URL: {trimmed}"))?; + Ok(cfg) +} diff --git a/src/discovery.rs b/src/discovery.rs new file mode 100644 index 0000000..100b218 --- /dev/null +++ b/src/discovery.rs @@ -0,0 +1,384 @@ +//! Discovery Document models and loader. +//! +//! Reads a Google Discovery-style JSON document (produced by `openapi-to-discovery`) +//! and provides lookup helpers for resolving methods from the nested resource tree. + +use std::collections::BTreeMap; +use std::path::PathBuf; +use std::time::Duration; + +use anyhow::{Context, Result, bail}; +use serde::{Deserialize, Serialize}; + +use crate::services::ServiceEntry; + +/// Top-level Discovery Document. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +#[allow(dead_code)] +pub struct DiscoveryDocument { + pub name: String, + pub version: String, + pub title: String, + pub description: Option, + pub root_url: String, + pub service_path: String, + #[serde(default)] + pub schemas: BTreeMap, + #[serde(default)] + pub resources: BTreeMap, +} + +/// A recursive resource node containing methods and child resources. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +pub struct DiscoveryResource { + #[serde(default)] + pub methods: BTreeMap, + #[serde(default)] + pub resources: BTreeMap, +} + +/// A single API method. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +#[allow(dead_code)] +pub struct DiscoveryMethod { + pub id: String, + pub http_method: String, + pub path: String, + pub description: Option, + #[serde(default)] + pub parameters: BTreeMap, + #[serde(default)] + pub parameter_order: Vec, + pub request: Option, + pub response: Option, + #[serde(default)] + pub scopes: Vec, +} + +/// Parameter definition. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DiscoveryParameter { + #[serde(rename = "type")] + pub param_type: String, + pub required: bool, + pub location: String, + pub description: Option, + pub format: Option, + #[serde(rename = "enum")] + pub enum_values: Option>, + pub default: Option, +} + +/// Schema reference (e.g. `{"$ref": "ScanScore"}`). +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SchemaRef { + #[serde(rename = "$ref")] + pub ref_name: String, +} + +/// Where the discovery document comes from. +pub enum DiscoverySource { + File(PathBuf), + Url(String), +} + +/// Determine the discovery source from the `--discovery` flag or env var. +pub fn resolve_source(flag: Option<&str>) -> Result { + let value = flag + .map(String::from) + .or_else(|| std::env::var("ANALYZER_DISCOVERY_URL").ok()); + + match value { + Some(v) if v.starts_with("http://") || v.starts_with("https://") => { + Ok(DiscoverySource::Url(v)) + } + Some(v) => Ok(DiscoverySource::File(PathBuf::from(v))), + None => bail!( + "no discovery document specified\n\n\ + Provide one with:\n \ + analyzer --discovery api ...\n \ + export ANALYZER_DISCOVERY_URL=" + ), + } +} + +/// Load and parse a discovery document from the resolved source. +/// +/// Auto-detects whether the input is an OpenAPI spec (has `"openapi"` key) or +/// an already-converted discovery document, and converts if necessary. +pub async fn load(source: &DiscoverySource) -> Result { + let json_str = match source { + DiscoverySource::File(path) => std::fs::read_to_string(path) + .with_context(|| format!("failed to read file: {}", path.display()))?, + DiscoverySource::Url(url) => { + let resp = reqwest::get(url) + .await + .with_context(|| format!("failed to fetch from {url}"))?; + let status = resp.status(); + if !status.is_success() { + bail!("fetch {url} returned HTTP {status}"); + } + resp.text().await? + } + }; + parse_or_convert(&json_str) +} + +/// Parse a JSON string as either a discovery document or an OpenAPI spec. +/// +/// If the JSON contains an `"openapi"` key, it's treated as an OpenAPI spec +/// and converted to discovery format. Otherwise it's parsed directly. +fn parse_or_convert(json_str: &str) -> Result { + let probe: serde_json::Value = + serde_json::from_str(json_str).context("input is not valid JSON")?; + if probe.get("openapi").is_some() { + let spec = openapi_to_discovery::parse_openapi_str(json_str) + .map_err(|e| anyhow::anyhow!("failed to parse OpenAPI spec: {e}"))?; + let doc = openapi_to_discovery::transform(&spec, None, None); + let discovery_json = + serde_json::to_string(&doc).context("failed to serialize discovery document")?; + serde_json::from_str(&discovery_json).context("failed to parse converted discovery doc") + } else { + serde_json::from_str(json_str).context("failed to parse discovery document") + } +} + +/// Best-effort API key resolution for authenticated OpenAPI spec fetches. +/// +/// Checks `ANALYZER_API_KEY` env var, then falls back to the default profile +/// in the config file. Returns `None` if no key is configured (the fetch +/// proceeds without auth — works for public endpoints or local dev). +fn resolve_api_key() -> Option { + std::env::var("ANALYZER_API_KEY").ok().or_else(|| { + crate::config::ConfigFile::load() + .ok() + .and_then(|cfg| cfg.profile(None).api_key.clone()) + }) +} + +/// Convert an OpenAPI JSON string to discovery format JSON using `openapi-to-discovery`. +fn convert_openapi_to_discovery_json(openapi_json: &str, entry: &ServiceEntry) -> Result { + let spec = openapi_to_discovery::parse_openapi_str(openapi_json) + .map_err(|e| anyhow::anyhow!("failed to parse OpenAPI spec: {e}"))?; + let name_override = if entry.api_name.is_empty() { + None + } else { + Some(entry.api_name) + }; + let version_override = if entry.version.is_empty() { + None + } else { + Some(entry.version) + }; + let doc = openapi_to_discovery::transform(&spec, name_override, version_override); + serde_json::to_string(&doc).context("failed to serialize discovery document") +} + +/// Write a discovery document to the service cache (best-effort). +/// +/// Called when `--discovery` provides a local file so subsequent runs +/// without the flag pick up the fresh content. +/// +/// The cache filename includes the version (`{api_name}_{version}.json`), so a version bump in +/// `services.rs` creates a new entry and the old one becomes stale. +pub fn warm_cache(entry: &ServiceEntry, doc: &DiscoveryDocument) { + let cache_dir = dirs::cache_dir() + .unwrap_or_else(|| PathBuf::from(".cache")) + .join("analyzer"); + if std::fs::create_dir_all(&cache_dir).is_err() { + return; + } + let cache_file = cache_dir.join(format!("{}_{}.json", entry.api_name, entry.version)); + if let Ok(json) = serde_json::to_string(doc) { + if let Err(e) = std::fs::write(&cache_file, json) { + eprintln!( + "warning: failed to warm cache {}: {e}", + cache_file.display() + ); + } + } +} + +/// Load discovery document for a registered service with 24h file cache. +/// +/// Cache location: `~/.cache/analyzer/{api_name}_{version}.json` +pub async fn load_for_service(entry: &ServiceEntry) -> Result { + let cache_dir = dirs::cache_dir() + .unwrap_or_else(|| PathBuf::from(".cache")) + .join("analyzer"); + load_for_service_with_cache(entry, &cache_dir).await +} + +/// Core logic for [`load_for_service`], factored out so tests can supply a +/// custom cache directory. +pub async fn load_for_service_with_cache( + entry: &ServiceEntry, + cache_dir: &std::path::Path, +) -> Result { + std::fs::create_dir_all(cache_dir) + .with_context(|| format!("failed to create cache dir {}", cache_dir.display()))?; + let cache_file = cache_dir.join(format!("{}_{}.json", entry.api_name, entry.version)); + + // Check cache (24h TTL) + if let Ok(metadata) = std::fs::metadata(&cache_file) { + if let Ok(modified) = metadata.modified() { + if modified.elapsed().unwrap_or_default() < Duration::from_secs(86400) { + let json = std::fs::read_to_string(&cache_file) + .with_context(|| format!("failed to read cache {}", cache_file.display()))?; + return serde_json::from_str(&json) + .context("failed to parse cached discovery document"); + } + } + } + + // Fetch OpenAPI spec, convert to discovery format, cache the result. + // Falls back to stale cache on failure or non-200 response. + // The OpenAPI endpoint may require auth, so we attach a Bearer token + // if an API key is available from config/env. + let fetch_result = async { + let mut req = reqwest::Client::new().get(entry.openapi_url); + if let Some(api_key) = resolve_api_key() { + req = req.bearer_auth(api_key); + } + let resp = req + .send() + .await + .with_context(|| format!("failed to fetch {}", entry.openapi_url))?; + let status = resp.status(); + if !status.is_success() { + bail!("fetch {} returned HTTP {status}", entry.openapi_url); + } + let openapi_json = resp.text().await.context("failed to read response body")?; + convert_openapi_to_discovery_json(&openapi_json, entry) + } + .await; + + let json = match fetch_result { + Ok(text) => { + // Write cache (best-effort) + if let Err(e) = std::fs::write(&cache_file, &text) { + eprintln!( + "warning: failed to write cache {}: {e}", + cache_file.display() + ); + } + text + } + Err(fetch_err) => { + if cache_file.exists() { + eprintln!("warning: {fetch_err} — using stale cache",); + std::fs::read_to_string(&cache_file).with_context(|| { + format!("failed to read stale cache {}", cache_file.display()) + })? + } else { + return Err(fetch_err); + } + } + }; + + serde_json::from_str(&json).context("failed to parse discovery document") +} + +/// Resolve a method by walking the resource tree with a path like `["scans", "score", "list"]`. +pub fn resolve_method<'a>( + resource: &'a DiscoveryResource, + segments: &[&str], +) -> Option<&'a DiscoveryMethod> { + match segments { + [] => None, + [method_name] => resource.methods.get(*method_name), + [resource_name, rest @ ..] => resource + .resources + .get(*resource_name) + .and_then(|child| resolve_method(child, rest)), + } +} + +/// Resolve a resource by walking the tree (for schema introspection of intermediate nodes). +pub fn resolve_resource<'a>( + resource: &'a DiscoveryResource, + segments: &[&str], +) -> Option<&'a DiscoveryResource> { + match segments { + [] => Some(resource), + [name, rest @ ..] => resource + .resources + .get(*name) + .and_then(|child| resolve_resource(child, rest)), + } +} + +#[cfg(test)] +mod tests { + use super::*; + + /// Minimal discovery doc fixture for unit tests (no external file needed). + const TEST_DISCOVERY: &str = r#"{ + "name": "test-api", + "version": "1.0.0", + "title": "Test", + "rootUrl": "", + "servicePath": "", + "schemas": { "Scan": { "id": "Scan" } }, + "resources": { + "api": { + "resources": { + "scans": { + "methods": { + "list": { "id": "test.scans.list", "httpMethod": "GET", "path": "api/scans" }, + "get": { "id": "test.scans.get", "httpMethod": "GET", "path": "api/scans/{id}" } + }, + "resources": { + "score": { + "methods": { + "list": { "id": "test.scans.score.list", "httpMethod": "GET", "path": "api/scans/{id}/score" } + } + } + } + } + } + } + } + }"#; + + fn test_doc() -> DiscoveryDocument { + serde_json::from_str(TEST_DISCOVERY).expect("test fixture should parse") + } + + #[test] + fn parses_discovery_document() { + let doc = test_doc(); + assert_eq!(doc.name, "test-api"); + assert!(!doc.resources.is_empty()); + assert!(!doc.schemas.is_empty()); + } + + #[test] + fn resolve_method_finds_nested() { + let doc = test_doc(); + let api = doc.resources.get("api").unwrap(); + let method = resolve_method(api, &["scans", "score", "list"]); + assert!(method.is_some()); + let m = method.unwrap(); + assert_eq!(m.http_method, "GET"); + assert!(m.path.contains("score")); + } + + #[test] + fn resolve_method_returns_none_for_bad_path() { + let doc = test_doc(); + let api = doc.resources.get("api").unwrap(); + assert!(resolve_method(api, &["nonexistent", "method"]).is_none()); + } + + #[test] + fn resolve_resource_finds_intermediate() { + let doc = test_doc(); + let api = doc.resources.get("api").unwrap(); + let scans = resolve_resource(api, &["scans"]); + assert!(scans.is_some()); + assert!(scans.unwrap().methods.contains_key("list")); + } +} diff --git a/src/lib.rs b/src/lib.rs new file mode 100644 index 0000000..0079495 --- /dev/null +++ b/src/lib.rs @@ -0,0 +1,10 @@ +//! Library re-exports for integration tests. +//! +//! The binary is in `main.rs`; this crate exposes modules needed by `tests/`. + +pub mod agent_api; +pub mod client; +pub mod config; +pub mod discovery; +pub mod output; +pub mod services; diff --git a/src/main.rs b/src/main.rs index a3ea2d3..758f654 100644 --- a/src/main.rs +++ b/src/main.rs @@ -3,21 +3,25 @@ //! Scan firmware and container images for vulnerabilities, generate SBOMs, //! check CRA compliance, and more. +mod agent_api; mod client; mod commands; mod config; +mod discovery; mod output; +mod services; use std::path::PathBuf; use std::process::ExitCode; use std::time::Duration; -use anyhow::Result; +use anyhow::{Context, Result, bail}; use clap::{Parser, Subcommand}; use uuid::Uuid; use crate::client::AnalyzerClient; use crate::client::models::{AnalysisType, ComplianceType}; +use crate::config::Environment; use crate::output::Format; /// Exein Analyzer CLI — firmware & container security scanning. @@ -50,6 +54,10 @@ struct Cli { #[arg(long, global = true, value_enum, default_value_t = Format::Human)] format: Format, + /// Path or URL to a discovery.json document for the `api` and `schema` subcommands. + #[arg(long, global = true, env = "ANALYZER_DISCOVERY_URL")] + discovery: Option, + #[command(subcommand)] command: Command, } @@ -59,8 +67,11 @@ enum Command { /// Authenticate and save your API key. Login { /// Server URL to authenticate against. - #[arg(long)] + #[arg(long, conflicts_with = "env")] url: Option, + /// Target environment (shortcut for --url). + #[arg(long, value_enum)] + env: Option, /// Profile name to save credentials under. #[arg(long)] profile: Option, @@ -87,9 +98,39 @@ enum Command { #[arg(value_enum)] shell: clap_complete::Shell, }, -} -// -- Config subcommands ------------------------------------------------------- + /// Discovery-driven API access — dynamically generated from a discovery document. + /// + /// The first positional arg is the service name (e.g. "analyzer"). + /// Uses the service registry by default; --discovery overrides. + Api { + /// Service name followed by resource path and method + /// (e.g., analyzer scans list). + #[arg(trailing_var_arg = true, allow_hyphen_values = true, num_args = 0..)] + args: Vec, + }, + + /// Introspect method signatures from the discovery document. + /// + /// Path format: .. (e.g. "analyzer.scans.score.list"). + /// Uses the service registry by default; --discovery overrides. + Schema { + /// Dotted path: ... + path: String, + }, + + /// Generate skill files for all registered services. + /// + /// Fetches discovery documents from the service registry (or uses --discovery + /// override) and writes markdown skill files to `skills/`. + GenerateSkills, + + /// Install Claude Code agent integration globally. + /// + /// Generates skills, CONTEXT.md, CLAUDE.md, and settings.json in ~/.claude/ + /// so Claude Code automatically discovers the analyzer CLI in every project. + InitAgent, +} #[derive(Subcommand)] enum ConfigCommand { @@ -112,8 +153,6 @@ enum ConfigCommand { }, } -// -- Object subcommands ------------------------------------------------------- - #[derive(Subcommand)] enum ObjectCommand { /// List all objects. @@ -136,8 +175,6 @@ enum ObjectCommand { }, } -// -- Scan subcommands --------------------------------------------------------- - #[derive(Subcommand)] enum ScanCommand { /// Create a new scan. @@ -340,8 +377,6 @@ enum ScanCommand { }, } -// ============================================================================= - #[tokio::main] async fn main() -> ExitCode { let cli = Cli::parse(); @@ -355,18 +390,23 @@ async fn main() -> ExitCode { } async fn run(cli: Cli) -> Result<()> { - // Extract auth fields before moving cli.command + // Extract fields before moving cli.command let api_key = cli.api_key; let url = cli.url; let profile = cli.profile; let format = cli.format; + let discovery_flag = cli.discovery; match cli.command { // -- Auth (no API key required) ----------------------------------- Command::Login { url: login_url, + env: login_env, profile: login_profile, - } => commands::auth::run_login(login_url.as_deref(), login_profile.as_deref()).await, + } => { + let resolved_url = login_url.as_deref().or_else(|| login_env.map(|e| e.url())); + commands::auth::run_login(resolved_url, login_profile.as_deref()).await + } Command::Whoami => { commands::auth::run_whoami(api_key.as_deref(), url.as_deref(), profile.as_deref()) @@ -504,6 +544,100 @@ async fn run(cli: Cli) -> Result<()> { } } } + + // -- Discovery-driven commands (agent mode) ----------------------- + Command::Api { args } => { + // First arg is the service name + let (service_name, rest_args) = args + .split_first() + .context("usage: analyzer api [flags]")?; + + // --discovery flag overrides registry lookup + let doc = if let Some(flag) = discovery_flag.as_deref() { + let source = discovery::resolve_source(Some(flag))?; + discovery::load(&source).await? + } else { + let entry = services::resolve_service(service_name).with_context(|| { + format!( + "unknown service '{service_name}'\n\nAvailable: {}", + services::list_aliases().join(", ") + ) + })?; + discovery::load_for_service(entry).await? + }; + + let api_cmd = agent_api::build_api_command(&doc); + let api_matches = match api_cmd.try_get_matches_from( + std::iter::once("api".to_string()).chain(rest_args.iter().cloned()), + ) { + Ok(m) => m, + Err(e) => { + // Let clap handle --help and --version display directly + e.exit(); + } + }; + // Client creation is deferred — dispatch will call make_client only + // if the request isn't a dry-run. + agent_api::dispatch( + &doc, + &api_matches, + api_key.as_deref(), + url.as_deref(), + profile.as_deref(), + format, + ) + .await + } + + Command::Schema { path } => { + // path = "analyzer.scans.create" — first segment is service name + let segments: Vec<&str> = path.splitn(2, '.').collect(); + let (service_name, rest_path) = match segments.as_slice() { + [svc, rest] => (*svc, *rest), + _ => bail!("path must be .."), + }; + + let doc = if let Some(flag) = discovery_flag.as_deref() { + let source = discovery::resolve_source(Some(flag))?; + discovery::load(&source).await? + } else { + let entry = services::resolve_service(service_name).with_context(|| { + format!( + "unknown service '{service_name}'\n\nAvailable: {}", + services::list_aliases().join(", ") + ) + })?; + discovery::load_for_service(entry).await? + }; + + // rest_path = "scans.create" — pass with "api." prefix for the resource tree + agent_api::schema::handle_schema_command(&doc, &format!("api.{rest_path}")) + } + + Command::GenerateSkills => { + let skills_dir = std::path::Path::new("skills"); + println!( + "Generating skills for {} service(s)...", + services::SERVICES.len() + ); + for entry in services::SERVICES { + println!("\n Service: {} ({})", entry.aliases[0], entry.api_name); + let doc = if let Some(flag) = discovery_flag.as_deref() { + // --discovery provided: use it (single service mode for dev) + let source = discovery::resolve_source(Some(flag))?; + discovery::load(&source).await? + } else { + discovery::load_for_service(entry).await? + }; + agent_api::generate_skills::generate_for_service(&doc, entry, skills_dir)?; + agent_api::generate_skills::generate_shared(entry, skills_dir)?; + } + agent_api::generate_skills::write_skills_index(skills_dir)?; + println!("\nDone."); + Ok(()) + } + + Command::InitAgent => commands::init_agent::run_init_agent(discovery_flag.as_deref()).await, } } @@ -513,5 +647,14 @@ fn make_client( profile: Option<&str>, ) -> Result { let cfg = config::resolve(api_key, url, profile)?; - AnalyzerClient::new(cfg.url, &cfg.api_key) + let api_key = cfg.api_key.ok_or_else(|| { + anyhow::anyhow!( + "no API key provided\n\n\ + Set it with one of:\n \ + analyzer login\n \ + analyzer --api-key ...\n \ + export ANALYZER_API_KEY=" + ) + })?; + AnalyzerClient::new(cfg.url, &api_key) } diff --git a/src/output.rs b/src/output.rs index 83316bc..c9fe9a1 100644 --- a/src/output.rs +++ b/src/output.rs @@ -1,7 +1,11 @@ -//! Output formatting: human (colored), JSON, and table modes. +//! Output formatting: human (colored), JSON, table, and CSV modes. +//! +//! Provides a generic `format_value` function that auto-detects arrays, +//! flattens nested objects into dot-notation columns, and renders tables +//! or CSV from arbitrary JSON responses. No per-analysis-type renderers +//! needed — the formatter handles any shape from the discovery API. -use console::style; -use owo_colors::OwoColorize; +use serde_json::Value; /// Output format selected by the user. #[derive(Debug, Clone, Copy, Default, clap::ValueEnum)] @@ -13,46 +17,491 @@ pub enum Format { Json, /// ASCII table output. Table, + /// CSV output for export. + Csv, } -/// Print a success message to stderr. -pub fn success(msg: &str) { - eprintln!(" {} {msg}", style("OK").green().bold()); +mod status { + use console::style; + use owo_colors::OwoColorize; + + /// Print a success message to stderr. + pub fn success(msg: &str) { + eprintln!(" {} {msg}", style("OK").green().bold()); + } + + /// Print a warning message to stderr. + pub fn warning(msg: &str) { + eprintln!(" {} {msg}", style("WARN").yellow().bold()); + } + + /// Print an error message to stderr. + pub fn error(msg: &str) { + eprintln!(" {} {msg}", style("ERR").red().bold()); + } + + /// Print a labelled status line to stderr. + pub fn status(label: &str, msg: &str) { + eprintln!("{} {msg}", style(format!("{label:>12}")).cyan().bold()); + } + + /// Format a score with colour coding. + pub fn format_score(score: Option) -> String { + match score { + Some(s) if s >= 80 => format!("{}", s.to_string().green()), + Some(s) if s >= 50 => format!("{}", s.to_string().yellow()), + Some(s) => format!("{}", s.to_string().red()), + None => style("--").dim().to_string(), + } + } + + /// Format an analysis status string with colour. + pub fn format_status(status: &str) -> String { + match status { + "success" => style(status).green().to_string(), + "pending" => style(status).dim().to_string(), + "in-progress" => style(status).cyan().to_string(), + "canceled" => style(status).yellow().to_string(), + "error" => style(status).red().to_string(), + other => other.to_string(), + } + } } -/// Print a warning message to stderr. -pub fn warning(msg: &str) { - eprintln!(" {} {msg}", style("WARN").yellow().bold()); +pub use status::*; + +/// Format a JSON value according to the selected output format. +/// +/// For `Table` and `Csv`: auto-detects arrays (including nested under a wrapper +/// key), flattens objects into dot-notation columns, and renders. +/// For `Json`: pretty-prints. +/// For `Human`: falls back to pretty JSON (callers may override with custom +/// rendering before calling this). +pub fn format_value(value: &Value, format: &Format) -> String { + match format { + Format::Json | Format::Human => serde_json::to_string_pretty(value).unwrap_or_default(), + Format::Table => table::format(value), + Format::Csv => csv::format(value), + } } -/// Print an error message to stderr. -pub fn error(msg: &str) { - eprintln!(" {} {msg}", style("ERR").red().bold()); +/// Format a paginated response. For NDJSON (Json), emits compact one-line JSON. +/// For Table/Csv, `is_first_page` controls whether headers are emitted. +pub fn format_value_paginated(value: &Value, format: &Format, is_first_page: bool) -> String { + match format { + Format::Json | Format::Human => { + // NDJSON: compact, one object per line + serde_json::to_string(value).unwrap_or_default() + } + Format::Table => table::format_page(value, is_first_page), + Format::Csv => csv::format_page(value, is_first_page), + } } -/// Print a labelled status line to stderr. -pub fn status(label: &str, msg: &str) { - eprintln!("{} {msg}", style(format!("{label:>12}")).cyan().bold()); +mod items { + use serde_json::Value; + use std::collections::BTreeMap; + + /// Extract items from a JSON value. If it's an array, use directly. + /// If it's an object with an array field (skipping metadata keys), extract that. + /// Otherwise wrap the single value as a one-element array. + pub fn extract(value: &Value) -> Vec<&Value> { + match value { + Value::Array(arr) => arr.iter().collect(), + Value::Object(map) => { + let skip_keys = [ + "nextPageToken", + "kind", + "total-findings", + "total_findings", + "_links", + "_embedded", + ]; + for (key, val) in map { + if skip_keys.contains(&key.as_str()) || key.starts_with('_') { + continue; + } + if let Value::Array(arr) = val { + if !arr.is_empty() { + return arr.iter().collect(); + } + } + } + vec![value] + } + _ => vec![value], + } + } + + /// Collect column names preserving insertion order from the first item, + /// then adding any extra keys from subsequent items. + pub fn collect_columns(items: &[&Value]) -> Vec { + let mut seen = std::collections::HashSet::new(); + let mut columns = Vec::new(); + + for item in items { + let flat = flatten(item); + for key in flat.keys() { + if seen.insert(key.clone()) { + columns.push(key.clone()); + } + } + } + columns + } + + /// Flatten a JSON value into a string map with dot-notation keys. + /// `{"user": {"name": "Alice"}, "id": 1}` becomes `{"user.name": "Alice", "id": "1"}` + pub fn flatten(value: &Value) -> BTreeMap { + let mut map = BTreeMap::new(); + flatten_recursive(value, String::new(), &mut map); + map + } + + fn flatten_recursive(value: &Value, prefix: String, map: &mut BTreeMap) { + match value { + Value::Object(obj) => { + for (k, v) in obj { + let key = if prefix.is_empty() { + k.clone() + } else { + format!("{prefix}.{k}") + }; + flatten_recursive(v, key, map); + } + } + Value::Array(arr) => { + let cells: Vec = arr.iter().map(value_to_cell).collect(); + map.insert(prefix, cells.join(", ")); + } + _ => { + map.insert(prefix, value_to_cell(value)); + } + } + } + + /// Convert a JSON value to a display string. + pub fn value_to_cell(value: &Value) -> String { + match value { + Value::Null => String::new(), + Value::String(s) => s.clone(), + Value::Bool(b) => b.to_string(), + Value::Number(n) => n.to_string(), + Value::Array(arr) => { + let cells: Vec = arr.iter().map(value_to_cell).collect(); + cells.join(", ") + } + Value::Object(_) => serde_json::to_string(value).unwrap_or_default(), + } + } +} + +mod table { + use super::items; + use serde_json::Value; + use std::fmt::Write as FmtWrite; + + const MAX_COL_WIDTH: usize = 50; + + pub fn format(value: &Value) -> String { + format_page(value, true) + } + + pub fn format_page(value: &Value, include_header: bool) -> String { + let extracted = items::extract(value); + + if extracted.is_empty() { + if include_header { + return "(no data)\n".to_string(); + } + return String::new(); + } + + let columns = items::collect_columns(&extracted); + let rows: Vec<_> = extracted.iter().map(|v| items::flatten(v)).collect(); + + let widths: Vec = columns + .iter() + .map(|col| { + let header_w = col.chars().count(); + let data_w = rows + .iter() + .map(|r| r.get(col).map(|v| v.chars().count()).unwrap_or(0)) + .max() + .unwrap_or(0); + header_w.max(data_w).min(MAX_COL_WIDTH) + }) + .collect(); + + let mut out = String::new(); + + if include_header { + let header: Vec = columns + .iter() + .zip(&widths) + .map(|(col, &w)| truncate_to_width(col, w)) + .collect(); + writeln!(out, " {}", header.join(" ")).unwrap(); + + let sep: Vec = widths.iter().map(|&w| "─".repeat(w)).collect(); + writeln!(out, " {}", sep.join(" ")).unwrap(); + } + + for row in &rows { + let cells: Vec = columns + .iter() + .zip(&widths) + .map(|(col, &w)| { + let val = row.get(col).map(|s| s.as_str()).unwrap_or(""); + truncate_to_width(val, w) + }) + .collect(); + writeln!(out, " {}", cells.join(" ")).unwrap(); + } + + out + } + + fn truncate_to_width(s: &str, width: usize) -> String { + let char_count = s.chars().count(); + if char_count <= width { + format!("{: 1 { + let truncated: String = s.chars().take(width - 1).collect(); + format!("{truncated}…") + } else { + "…".to_string() + } + } } -/// Format a score with colour coding. -pub fn format_score(score: Option) -> String { - match score { - Some(s) if s >= 80 => format!("{}", s.to_string().green()), - Some(s) if s >= 50 => format!("{}", s.to_string().yellow()), - Some(s) => format!("{}", s.to_string().red()), - None => style("--").dim().to_string(), +mod csv { + use super::items; + use serde_json::Value; + use std::fmt::Write as FmtWrite; + + pub fn format(value: &Value) -> String { + format_page(value, true) + } + + pub fn format_page(value: &Value, include_header: bool) -> String { + let extracted = items::extract(value); + + if extracted.is_empty() { + return String::new(); + } + + let columns = items::collect_columns(&extracted); + let rows: Vec<_> = extracted.iter().map(|v| items::flatten(v)).collect(); + + let mut out = String::new(); + + if include_header { + let header: Vec = columns.iter().map(|c| escape(c)).collect(); + writeln!(out, "{}", header.join(",")).unwrap(); + } + + for row in &rows { + let cells: Vec = columns + .iter() + .map(|col| { + let val = row.get(col).map(|s| s.as_str()).unwrap_or(""); + escape(val) + }) + .collect(); + writeln!(out, "{}", cells.join(",")).unwrap(); + } + + out + } + + fn escape(s: &str) -> String { + if s.contains(',') || s.contains('"') || s.contains('\n') { + format!("\"{}\"", s.replace('"', "\"\"")) + } else { + s.to_string() + } } } -/// Format an analysis status string with colour. -pub fn format_status(status: &str) -> String { - match status { - "success" => style(status).green().to_string(), - "pending" => style(status).dim().to_string(), - "in-progress" => style(status).cyan().to_string(), - "canceled" => style(status).yellow().to_string(), - "error" => style(status).red().to_string(), - other => other.to_string(), +#[cfg(test)] +mod tests { + use super::*; + use serde_json::json; + use test_case::test_case; + + // -- format_value: all formats preserve data -- + + #[test_case(Format::Json ; "json")] + #[test_case(Format::Human ; "human")] + #[test_case(Format::Table ; "table")] + #[test_case(Format::Csv ; "csv")] + fn format_value_array_contains_data(fmt: Format) { + let val = json!([ + {"id": "1", "name": "alice"}, + {"id": "2", "name": "bob"} + ]); + let out = format_value(&val, &fmt); + assert!(out.contains("alice"), "missing 'alice' in {fmt:?}"); + assert!(out.contains("bob"), "missing 'bob' in {fmt:?}"); + } + + #[test_case(Format::Json ; "json")] + #[test_case(Format::Human ; "human")] + #[test_case(Format::Table ; "table")] + #[test_case(Format::Csv ; "csv")] + fn format_value_single_object(fmt: Format) { + let val = json!({"status": "success", "score": 85}); + let out = format_value(&val, &fmt); + assert!(out.contains("success"), "missing 'success' in {fmt:?}"); + assert!(out.contains("85"), "missing '85' in {fmt:?}"); + } + + // -- nested flattening: Table and Csv flatten, Json/Human keep nested -- + + #[test_case(Format::Table ; "table")] + #[test_case(Format::Csv ; "csv")] + fn nested_objects_are_flattened(fmt: Format) { + let val = json!([{"user": {"name": "Alice"}, "score": 42}]); + let out = format_value(&val, &fmt); + assert!(out.contains("user.name"), "missing dot-notation in {fmt:?}"); + assert!(out.contains("Alice")); + assert!(out.contains("42")); + } + + #[test_case(Format::Json ; "json")] + #[test_case(Format::Human ; "human")] + fn nested_objects_preserved_in_json(fmt: Format) { + let val = json!([{"user": {"name": "Alice"}}]); + let out = format_value(&val, &fmt); + // Json/Human keep the nested structure, no dot-notation + assert!(out.contains("\"name\": \"Alice\"")); + assert!(!out.contains("user.name")); + } + + // -- wrapper extraction: Table and Csv extract arrays from response wrappers -- + + #[test_case(Format::Table ; "table")] + #[test_case(Format::Csv ; "csv")] + fn extracts_array_from_wrapper(fmt: Format) { + let val = json!({ + "total-findings": 100, + "findings": [{"id": "a"}, {"id": "b"}], + "_links": {"next": {"href": "http://..."}} + }); + let out = format_value(&val, &fmt); + assert!(out.contains("a"), "missing item 'a' in {fmt:?}"); + assert!(out.contains("b"), "missing item 'b' in {fmt:?}"); + assert!( + !out.contains("total-findings"), + "metadata leaked in {fmt:?}" + ); + assert!(!out.contains("_links"), "metadata leaked in {fmt:?}"); + } + + // -- table-specific formatting -- + + #[test] + fn table_has_header_and_separator() { + let val = json!([{"id": "1", "name": "alice"}]); + let out = format_value(&val, &Format::Table); + assert!(out.contains("id")); + assert!(out.contains("name")); + assert!(out.contains("──")); + } + + #[test] + fn table_truncates_long_values() { + let long = "x".repeat(100); + let val = json!([{"col": long}]); + let out = format_value(&val, &Format::Table); + assert!(out.contains('…'), "long value should be truncated"); + } + + // -- csv-specific formatting -- + + #[test] + fn csv_has_header_row_and_data_rows() { + let val = json!([ + {"id": "1", "name": "alice"}, + {"id": "2", "name": "bob"} + ]); + let out = format_value(&val, &Format::Csv); + let lines: Vec<&str> = out.lines().collect(); + assert_eq!(lines[0], "id,name"); + assert_eq!(lines[1], "1,alice"); + assert_eq!(lines[2], "2,bob"); + } + + #[test] + fn csv_escapes_special_characters() { + let val = json!([{"a": "hello,world"}, {"a": "say \"hi\""}]); + let out = format_value(&val, &Format::Csv); + let lines: Vec<&str> = out.lines().collect(); + assert_eq!(lines[1], "\"hello,world\""); + assert_eq!(lines[2], "\"say \"\"hi\"\"\""); + } + + // -- pagination: header behaviour across all formats -- + + #[test_case(Format::Table ; "table")] + #[test_case(Format::Csv ; "csv")] + fn paginated_first_page_includes_header(fmt: Format) { + let val = json!([{"id": "1", "name": "a"}]); + let first = format_value_paginated(&val, &fmt, true); + assert!(first.contains("id"), "first page missing header in {fmt:?}"); + assert!( + first.contains("name"), + "first page missing header in {fmt:?}" + ); + } + + #[test_case(Format::Table ; "table")] + #[test_case(Format::Csv ; "csv")] + fn paginated_subsequent_pages_skip_header(fmt: Format) { + let val = json!([{"id": "1", "name": "a"}]); + let first = format_value_paginated(&val, &fmt, true); + let second = format_value_paginated(&val, &fmt, false); + // Data present in both pages + assert!(second.contains("1"), "data missing on page 2 in {fmt:?}"); + // Header line count differs + let first_lines = first.lines().count(); + let second_lines = second.lines().count(); + assert!( + first_lines > second_lines, + "page 2 should have fewer lines (no header) in {fmt:?}" + ); + } + + #[test_case(Format::Json ; "json")] + #[test_case(Format::Human ; "human")] + fn paginated_json_is_compact_ndjson(fmt: Format) { + let val = json!({"id": "1", "name": "test"}); + let out = format_value_paginated(&val, &fmt, true); + assert!( + !out.contains('\n'), + "NDJSON should be single-line in {fmt:?}" + ); + } + + // -- empty input -- + + #[test_case(Format::Json ; "json")] + #[test_case(Format::Human ; "human")] + #[test_case(Format::Table ; "table")] + #[test_case(Format::Csv ; "csv")] + fn empty_array_does_not_panic(fmt: Format) { + let val = json!([]); + let _ = format_value(&val, &fmt); // should not panic + } + + #[test] + fn empty_array_table_shows_no_data() { + assert!(format_value(&json!([]), &Format::Table).contains("(no data)")); + } + + #[test] + fn empty_array_csv_is_empty() { + assert!(format_value(&json!([]), &Format::Csv).is_empty()); } } diff --git a/src/services.rs b/src/services.rs new file mode 100644 index 0000000..e03a237 --- /dev/null +++ b/src/services.rs @@ -0,0 +1,66 @@ +/// Compile-time service registry. +/// +/// Each entry maps one or more CLI aliases to an API name, version, +/// and the URL where its OpenAPI spec can be fetched. The CLI converts +/// the OpenAPI spec to Discovery format in-process using `openapi-to-discovery`. +pub struct ServiceEntry { + pub aliases: &'static [&'static str], + pub api_name: &'static str, + pub version: &'static str, + pub openapi_url: &'static str, + pub description: &'static str, +} + +pub const SERVICES: &[ServiceEntry] = &[ + ServiceEntry { + aliases: &["analyzer"], + api_name: "analyzer-api-discovery", + version: "0.5.0", + openapi_url: "https://analyzer.exein.dev/api-doc/openapi.json", + description: "Firmware and software image security analysis", + }, + // Future entries: + // ServiceEntry { + // aliases: &["isaac"], + // api_name: "isaac-api", + // version: "1.0.0", + // openapi_url: "https://analyzer.exein.dev/isaac/api-doc/openapi.json", + // description: "Device identity and attestation", + // }, +]; + +/// Resolve a service alias to its registry entry. +pub fn resolve_service(name: &str) -> Option<&'static ServiceEntry> { + SERVICES.iter().find(|s| s.aliases.contains(&name)) +} + +/// List all registered aliases (for help text and error messages). +pub fn list_aliases() -> Vec<&'static str> { + SERVICES + .iter() + .flat_map(|s| s.aliases.iter().copied()) + .collect() +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn resolve_known_service() { + let entry = resolve_service("analyzer").expect("should resolve"); + assert_eq!(entry.api_name, "analyzer-api-discovery"); + assert!(entry.openapi_url.contains("openapi.json")); + } + + #[test] + fn resolve_unknown_returns_none() { + assert!(resolve_service("nonexistent").is_none()); + } + + #[test] + fn list_aliases_includes_analyzer() { + let aliases = list_aliases(); + assert!(aliases.contains(&"analyzer")); + } +} diff --git a/tests/api_executor.rs b/tests/api_executor.rs new file mode 100644 index 0000000..88ceb7c --- /dev/null +++ b/tests/api_executor.rs @@ -0,0 +1,369 @@ +//! Integration tests for the discovery-driven API executor. +//! +//! Uses wiremock to mock HTTP responses and validates the full execution path: +//! path param substitution, pagination (link-based + offset-based), --fields, +//! and --dry-run. + +use serde_json::json; +use wiremock::matchers::{method, path, query_param, query_param_is_missing}; +use wiremock::{Mock, MockServer, ResponseTemplate}; + +use analyzer_cli::agent_api::executor::{PaginationConfig, execute_method}; +use analyzer_cli::client::AnalyzerClient; +use analyzer_cli::discovery::{DiscoveryMethod, DiscoveryParameter}; +use analyzer_cli::output::Format; + +fn test_client(server: &MockServer) -> AnalyzerClient { + let url: url::Url = server.uri().parse().expect("valid mock server URL"); + AnalyzerClient::new(url, "test-key").expect("client creation") +} + +fn test_method(http_method: &str, api_path: &str) -> DiscoveryMethod { + DiscoveryMethod { + id: "test.method".to_string(), + http_method: http_method.to_string(), + path: api_path.to_string(), + description: Some("Test method".to_string()), + parameters: std::collections::BTreeMap::new(), + parameter_order: vec![], + request: None, + response: None, + scopes: vec![], + } +} + +fn test_method_with_path_param(api_path: &str) -> DiscoveryMethod { + let mut params = std::collections::BTreeMap::new(); + params.insert( + "id".to_string(), + DiscoveryParameter { + param_type: "string".to_string(), + required: true, + location: "path".to_string(), + description: None, + format: None, + enum_values: None, + default: None, + }, + ); + DiscoveryMethod { + id: "test.method".to_string(), + http_method: "GET".to_string(), + path: api_path.to_string(), + description: None, + parameters: params, + parameter_order: vec!["id".to_string()], + request: None, + response: None, + scopes: vec![], + } +} + +fn test_method_with_pagination(api_path: &str) -> DiscoveryMethod { + let mut params = std::collections::BTreeMap::new(); + params.insert( + "scan_id".to_string(), + DiscoveryParameter { + param_type: "string".to_string(), + required: true, + location: "path".to_string(), + description: None, + format: None, + enum_values: None, + default: None, + }, + ); + for name in ["page", "per-page"] { + params.insert( + name.to_string(), + DiscoveryParameter { + param_type: "integer".to_string(), + required: false, + location: "query".to_string(), + description: None, + format: None, + enum_values: None, + default: None, + }, + ); + } + DiscoveryMethod { + id: "test.results.get".to_string(), + http_method: "GET".to_string(), + path: api_path.to_string(), + description: None, + parameters: params, + parameter_order: vec!["scan_id".to_string()], + request: None, + response: None, + scopes: vec![], + } +} + +mod path_params { + use super::*; + + #[tokio::test] + async fn should_substitute_in_url() { + let server = MockServer::start().await; + Mock::given(method("GET")) + .and(path("/api/scans/scan-abc/score")) + .respond_with(ResponseTemplate::new(200).set_body_json(json!({"score": 72}))) + .expect(1) + .mount(&server) + .await; + + let client = test_client(&server); + let result = execute_method( + Some(&client), + &test_method_with_path_param("api/scans/{id}/score"), + Some(r#"{"id": "scan-abc"}"#), + None, + None, + &PaginationConfig::default(), + false, + Format::Json, + ) + .await; + assert!(result.is_ok()); + } + + #[tokio::test] + async fn should_error_on_missing_required() { + let server = MockServer::start().await; + let client = test_client(&server); + let result = execute_method( + Some(&client), + &test_method_with_path_param("api/scans/{id}"), + None, + None, + None, + &PaginationConfig::default(), + false, + Format::Json, + ) + .await; + assert!(result.is_err()); + assert!(result.unwrap_err().to_string().contains("id")); + } +} + +mod dry_run { + use super::*; + + #[tokio::test] + async fn should_not_make_requests() { + let server = MockServer::start().await; + let client = test_client(&server); + let result = execute_method( + Some(&client), + &test_method("GET", "api/scans"), + None, + None, + Some("id,name"), + &PaginationConfig { + page_all: true, + page_limit: 5, + page_delay_ms: 0, + }, + true, + Format::Json, + ) + .await; + assert!(result.is_ok()); + assert_eq!(server.received_requests().await.unwrap().len(), 0); + } +} + +mod fields { + use super::*; + + #[tokio::test] + async fn should_apply_filter_to_response() { + let server = MockServer::start().await; + Mock::given(method("GET")) + .and(path("/api/objects")) + .respond_with(ResponseTemplate::new(200).set_body_json(json!({ + "id": "obj-1", "name": "Router FW", "description": "Long text", "score": 85 + }))) + .expect(1) + .mount(&server) + .await; + + let client = test_client(&server); + let result = execute_method( + Some(&client), + &test_method("GET", "api/objects"), + None, + None, + Some("id,name"), + &PaginationConfig::default(), + false, + Format::Json, + ) + .await; + assert!(result.is_ok()); + } +} + +mod pagination_link_based { + use super::*; + + #[tokio::test] + async fn should_follow_links_href_across_pages() { + let server = MockServer::start().await; + + Mock::given(method("GET")) + .and(path("/api/scans")) + .and(query_param_is_missing("page")) + .respond_with(ResponseTemplate::new(200).set_body_json(json!({ + "data": [{"id": "s-1"}], + "_links": {"next": {"href": format!("{}/api/scans?page=2", server.uri())}} + }))) + .expect(1) + .mount(&server) + .await; + + Mock::given(method("GET")) + .and(path("/api/scans")) + .and(query_param("page", "2")) + .respond_with(ResponseTemplate::new(200).set_body_json(json!({ + "data": [{"id": "s-2"}], "_links": {} + }))) + .expect(1) + .mount(&server) + .await; + + let client = test_client(&server); + let result = execute_method( + Some(&client), + &test_method("GET", "api/scans"), + None, + None, + None, + &PaginationConfig { + page_all: true, + page_limit: 10, + page_delay_ms: 0, + }, + false, + Format::Json, + ) + .await; + assert!(result.is_ok()); + } + + #[tokio::test] + async fn should_respect_page_limit() { + let server = MockServer::start().await; + + Mock::given(method("GET")) + .and(path("/api/scans")) + .respond_with(ResponseTemplate::new(200).set_body_json(json!({ + "data": [{"id": "s-1"}], + "_links": {"next": {"href": format!("{}/api/scans?page=2", server.uri())}} + }))) + .expect(1) + .mount(&server) + .await; + + let client = test_client(&server); + let result = execute_method( + Some(&client), + &test_method("GET", "api/scans"), + None, + None, + None, + &PaginationConfig { + page_all: true, + page_limit: 1, + page_delay_ms: 0, + }, + false, + Format::Json, + ) + .await; + assert!(result.is_ok()); + } +} + +mod pagination_offset_based { + use super::*; + + #[tokio::test] + async fn should_increment_page_param() { + let server = MockServer::start().await; + + Mock::given(method("GET")) + .and(path("/api/scans/s-1/results")) + .and(query_param("page", "1")) + .and(query_param("per-page", "25")) + .respond_with(ResponseTemplate::new(200).set_body_json(json!({ + "findings": [{"id": "cve-1"}], "total-findings": 50 + }))) + .expect(1) + .mount(&server) + .await; + + Mock::given(method("GET")) + .and(path("/api/scans/s-1/results")) + .and(query_param("page", "2")) + .and(query_param("per-page", "25")) + .respond_with(ResponseTemplate::new(200).set_body_json(json!({ + "findings": [{"id": "cve-2"}], "total-findings": 50 + }))) + .expect(1) + .mount(&server) + .await; + + let client = test_client(&server); + let result = execute_method( + Some(&client), + &test_method_with_pagination("api/scans/{scan_id}/results"), + Some(r#"{"scan_id": "s-1", "page": 1, "per-page": 25}"#), + None, + None, + &PaginationConfig { + page_all: true, + page_limit: 10, + page_delay_ms: 0, + }, + false, + Format::Json, + ) + .await; + assert!(result.is_ok()); + } + + #[tokio::test] + async fn should_stop_on_last_page() { + let server = MockServer::start().await; + + Mock::given(method("GET")) + .and(path("/api/scans/s-1/results")) + .respond_with(ResponseTemplate::new(200).set_body_json(json!({ + "findings": [{"id": "cve-1"}], "total-findings": 10 + }))) + .expect(1) + .mount(&server) + .await; + + let client = test_client(&server); + let result = execute_method( + Some(&client), + &test_method_with_pagination("api/scans/{scan_id}/results"), + Some(r#"{"scan_id": "s-1", "page": 1, "per-page": 25}"#), + None, + None, + &PaginationConfig { + page_all: true, + page_limit: 10, + page_delay_ms: 0, + }, + false, + Format::Json, + ) + .await; + assert!(result.is_ok()); + } +} diff --git a/tests/discovery_cache.rs b/tests/discovery_cache.rs new file mode 100644 index 0000000..938130c --- /dev/null +++ b/tests/discovery_cache.rs @@ -0,0 +1,164 @@ +//! Integration tests for discovery document cache fallback. +//! +//! Verifies that `load_for_service_with_cache` falls back to a stale (expired) +//! cached discovery document when the network fetch fails or returns a non-200 +//! status, and that it errors correctly when no cache exists at all. + +use std::time::{Duration, SystemTime}; + +use analyzer_cli::discovery::load_for_service_with_cache; +use analyzer_cli::services::ServiceEntry; +use wiremock::{Mock, MockServer, ResponseTemplate}; + +/// Minimal valid discovery document used as a cache fixture. +const MINIMAL_DISCOVERY: &str = r#"{ + "name": "test-api", + "version": "1.0.0", + "title": "Test API", + "rootUrl": "https://example.com/", + "servicePath": "api/", + "schemas": {}, + "resources": {} +}"#; + +/// A service entry whose discovery URL will never resolve (port 1 is +/// unreachable on localhost), so every fetch attempt fails quickly. +fn unreachable_service() -> ServiceEntry { + ServiceEntry { + aliases: &["test"], + api_name: "test-api", + version: "1.0.0", + openapi_url: "http://127.0.0.1:1/never-reachable.json", + description: "test service", + } +} + +/// Helper: write a cache file and backdate its mtime so it looks expired. +fn write_stale_cache(cache_dir: &std::path::Path, entry: &ServiceEntry) { + std::fs::create_dir_all(cache_dir).unwrap(); + let cache_file = cache_dir.join(format!("{}_{}.json", entry.api_name, entry.version)); + std::fs::write(&cache_file, MINIMAL_DISCOVERY).unwrap(); + + // Set mtime to 25 hours ago so it exceeds the 24h TTL. + let stale_time = SystemTime::now() - Duration::from_secs(25 * 3600); + filetime::FileTime::from_system_time(stale_time); + let ft = filetime::FileTime::from_system_time(stale_time); + filetime::set_file_mtime(&cache_file, ft).unwrap(); +} + +mod stale_cache_fallback { + use super::*; + + #[tokio::test] + async fn uses_stale_cache_when_fetch_fails() { + let dir = tempfile::tempdir().unwrap(); + let cache_dir = dir.path().join("analyzer"); + let entry = unreachable_service(); + + write_stale_cache(&cache_dir, &entry); + + let doc = load_for_service_with_cache(&entry, &cache_dir).await; + assert!(doc.is_ok(), "should fall back to stale cache: {doc:?}"); + let doc = doc.unwrap(); + assert_eq!(doc.name, "test-api"); + assert_eq!(doc.version, "1.0.0"); + } + + #[tokio::test] + async fn errors_when_no_cache_and_fetch_fails() { + let dir = tempfile::tempdir().unwrap(); + let cache_dir = dir.path().join("analyzer"); + let entry = unreachable_service(); + + // No cache file written — should fail with fetch error. + let result = load_for_service_with_cache(&entry, &cache_dir).await; + assert!(result.is_err()); + let err_msg = format!("{:#}", result.unwrap_err()); + assert!( + err_msg.contains("failed to fetch"), + "error should mention fetch failure: {err_msg}" + ); + } +} + +mod non_200_response { + use super::*; + + /// Helper: create a service entry pointing at a wiremock server. + fn service_for_mock(url: &str) -> ServiceEntry { + // Leak the string so we get a &'static str for the ServiceEntry. + let leaked: &'static str = Box::leak(url.to_string().into_boxed_str()); + ServiceEntry { + aliases: &["test"], + api_name: "test-api", + version: "1.0.0", + openapi_url: leaked, + description: "test service", + } + } + + #[tokio::test] + async fn falls_back_to_stale_cache_on_404() { + let server = MockServer::start().await; + Mock::given(wiremock::matchers::any()) + .respond_with(ResponseTemplate::new(404).set_body_string("Not Found")) + .mount(&server) + .await; + + let dir = tempfile::tempdir().unwrap(); + let cache_dir = dir.path().join("analyzer"); + let entry = service_for_mock(&format!("{}/discovery.json", server.uri())); + + write_stale_cache(&cache_dir, &entry); + + let doc = load_for_service_with_cache(&entry, &cache_dir).await; + assert!( + doc.is_ok(), + "should fall back to stale cache on 404: {doc:?}" + ); + assert_eq!(doc.unwrap().name, "test-api"); + } + + #[tokio::test] + async fn errors_on_500_without_cache() { + let server = MockServer::start().await; + Mock::given(wiremock::matchers::any()) + .respond_with(ResponseTemplate::new(500).set_body_string("Internal Server Error")) + .mount(&server) + .await; + + let dir = tempfile::tempdir().unwrap(); + let cache_dir = dir.path().join("analyzer"); + let entry = service_for_mock(&format!("{}/discovery.json", server.uri())); + + let result = load_for_service_with_cache(&entry, &cache_dir).await; + assert!(result.is_err()); + let err_msg = format!("{:#}", result.unwrap_err()); + assert!( + err_msg.contains("HTTP"), + "error should mention HTTP status: {err_msg}" + ); + } +} + +mod fresh_cache { + use super::*; + + #[tokio::test] + async fn returns_fresh_cache_without_fetching() { + let dir = tempfile::tempdir().unwrap(); + let cache_dir = dir.path().join("analyzer"); + let entry = unreachable_service(); + + // Write a cache file with current mtime (fresh, within 24h TTL). + std::fs::create_dir_all(&cache_dir).unwrap(); + let cache_file = cache_dir.join(format!("{}_{}.json", entry.api_name, entry.version)); + std::fs::write(&cache_file, MINIMAL_DISCOVERY).unwrap(); + + // Even though the URL is unreachable, fresh cache should be returned + // without attempting a fetch. + let doc = load_for_service_with_cache(&entry, &cache_dir).await; + assert!(doc.is_ok(), "fresh cache should be used: {doc:?}"); + assert_eq!(doc.unwrap().name, "test-api"); + } +}