From 2f9b92f5ac951eaa5e7b61d1d1bd6a921e4d85db Mon Sep 17 00:00:00 2001 From: wildmeta-agent Date: Mon, 27 Apr 2026 13:18:22 +0800 Subject: [PATCH 01/15] =?UTF-8?q?feat(stage7):=20phase=202=20=E2=80=94=20O?= =?UTF-8?q?IDC=20issuer=20in=20Rust=20broker=20+=20provisioner-scripts=20A?= =?UTF-8?q?WS-cred=20wiring?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Phase 2 of Stage 7 (Generalized OIDC Provider). Two slices: OIDC issuer absorption (replaces TS services/oidc-stub): - crates/agentkeys-broker-server/src/oidc.rs — ES256 P-256 keypair generation, on-disk persistence (mode 0600 at ~/.agentkeys/broker/oidc-keypair.json), JWK serialization, JWT signing. - New broker routes: GET /.well-known/openid-configuration, GET /.well-known/jwks.json, POST /v1/mint-oidc-jwt (bearer-gated against the backend's /session/validate). JWT mints land in the same audit log as mint-aws-creds with requested_role=oidc_jwt. - New env vars: BROKER_OIDC_ISSUER, BROKER_OIDC_KEYPAIR_PATH, BROKER_OIDC_JWT_TTL_SECONDS (default 300, bounded [60, 3600]). - TS services/oidc-stub deleted — Rust broker now owns the surface. Provisioner-scripts AWS-cred wiring (replaces stage6-demo-env.sh sourcing): - crates/agentkeys-provisioner/src/aws_creds.rs — fetch_via_broker helper + AwsTempCreds.to_env() rendering AWS_ACCESS_KEY_ID/SECRET_ACCESS_KEY/ SESSION_TOKEN (+ AWS_REGION when set). - CLI: --broker-url / AGENTKEYS_BROKER_URL flag on agentkeys; cmd_provision fetches creds via the broker before spawning the scraper subprocess. - MCP: McpHandler::with_broker_url builder + run_stdio_with_broker entry point; daemon threads its existing --broker-url through automatically. - When --broker-url is unset the legacy stage6-demo-env.sh sourcing path still works — wiring is purely additive. Tests: broker integration (mint_flow + oidc_flow), MCP broker-env injection, provisioner aws_creds unit + stub-server tests, existing unit suite. cargo clippy --no-deps clean. Still deferred (Phase 2 federation step): public TLS hosting of \$BROKER_OIDC_ISSUER so AWS IAM accepts create-open-id-connect-provider; TEE-derived signer at oidc/issuer/v1 (heima-gaps §3). Recipe preserved in docs/stage7-wip.md. Co-Authored-By: Claude Opus 4.7 (1M context) --- Cargo.lock | 193 +- crates/agentkeys-broker-server/Cargo.toml | 7 + crates/agentkeys-broker-server/src/config.rs | 38 + .../src/handlers/mod.rs | 1 + .../src/handlers/oidc.rs | 135 + crates/agentkeys-broker-server/src/lib.rs | 7 + crates/agentkeys-broker-server/src/main.rs | 11 + crates/agentkeys-broker-server/src/oidc.rs | 290 ++ crates/agentkeys-broker-server/src/state.rs | 2 + .../tests/mint_flow.rs | 13 + .../tests/oidc_flow.rs | 261 ++ crates/agentkeys-cli/src/lib.rs | 43 +- crates/agentkeys-cli/src/main.rs | 10 +- crates/agentkeys-daemon/src/main.rs | 8 +- crates/agentkeys-mcp/Cargo.toml | 1 + crates/agentkeys-mcp/src/lib.rs | 138 +- crates/agentkeys-mcp/src/server.rs | 13 +- crates/agentkeys-provisioner/Cargo.toml | 2 + crates/agentkeys-provisioner/src/aws_creds.rs | 199 ++ crates/agentkeys-provisioner/src/lib.rs | 2 + docs/dev-setup.md | 8 +- docs/operator-runbook.md | 18 +- docs/spec/plans/development-stages.md | 9 +- docs/stage7-wip.md | 96 +- services/oidc-stub/.gitignore | 5 - services/oidc-stub/README.md | 92 - services/oidc-stub/package-lock.json | 2921 ----------------- services/oidc-stub/package.json | 23 - services/oidc-stub/src/keys.ts | 108 - services/oidc-stub/src/server.ts | 93 - services/oidc-stub/tests/server.test.ts | 170 - services/oidc-stub/tsconfig.json | 14 - services/oidc-stub/vitest.config.ts | 8 - 33 files changed, 1446 insertions(+), 3493 deletions(-) create mode 100644 crates/agentkeys-broker-server/src/handlers/oidc.rs create mode 100644 crates/agentkeys-broker-server/src/oidc.rs create mode 100644 crates/agentkeys-broker-server/tests/oidc_flow.rs create mode 100644 crates/agentkeys-provisioner/src/aws_creds.rs delete mode 100644 services/oidc-stub/.gitignore delete mode 100644 services/oidc-stub/README.md delete mode 100644 services/oidc-stub/package-lock.json delete mode 100644 services/oidc-stub/package.json delete mode 100644 services/oidc-stub/src/keys.ts delete mode 100644 services/oidc-stub/src/server.ts delete mode 100644 services/oidc-stub/tests/server.test.ts delete mode 100644 services/oidc-stub/tsconfig.json delete mode 100644 services/oidc-stub/vitest.config.ts diff --git a/Cargo.lock b/Cargo.lock index 2dfcda8..ecedda8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -26,14 +26,21 @@ dependencies = [ "aws-credential-types", "aws-sdk-sts", "axum", + "base64", "clap", + "getrandom 0.2.17", "hex", "http-body-util", + "jsonwebtoken", + "p256 0.13.2", + "pkcs8 0.10.2", + "rand_core", "reqwest", "rusqlite", "serde", "serde_json", "sha2 0.10.9", + "tempfile", "thiserror", "tokio", "tower 0.4.13", @@ -119,6 +126,7 @@ dependencies = [ "agentkeys-types", "anyhow", "async-trait", + "axum", "serde", "serde_json", "tokio", @@ -161,6 +169,8 @@ dependencies = [ "agentkeys-types", "anyhow", "async-trait", + "axum", + "reqwest", "serde", "serde_json", "tempfile", @@ -629,7 +639,7 @@ dependencies = [ "hmac 0.13.0", "http 0.2.12", "http 1.4.0", - "p256", + "p256 0.11.1", "percent-encoding", "ring", "sha2 0.11.0", @@ -893,6 +903,12 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" +[[package]] +name = "base16ct" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" + [[package]] name = "base64" version = "0.22.1" @@ -1226,8 +1242,10 @@ version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" dependencies = [ + "generic-array", "rand_core", "subtle", + "zeroize", ] [[package]] @@ -1302,6 +1320,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7c1832837b905bbfb5101e07cc24c8deddf52f93225eee6ead5f4d63d53ddcb" dependencies = [ "const-oid 0.9.6", + "pem-rfc7468", "zeroize", ] @@ -1338,6 +1357,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer 0.10.4", + "const-oid 0.9.6", "crypto-common 0.1.7", "subtle", ] @@ -1378,11 +1398,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" dependencies = [ "der 0.6.1", - "elliptic-curve", - "rfc6979", + "elliptic-curve 0.12.3", + "rfc6979 0.3.1", "signature 1.6.4", ] +[[package]] +name = "ecdsa" +version = "0.16.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" +dependencies = [ + "der 0.7.10", + "digest 0.10.7", + "elliptic-curve 0.13.8", + "rfc6979 0.4.0", + "signature 2.2.0", + "spki 0.7.3", +] + [[package]] name = "ed25519" version = "2.2.3" @@ -1420,16 +1454,36 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" dependencies = [ - "base16ct", + "base16ct 0.1.1", "crypto-bigint 0.4.9", "der 0.6.1", "digest 0.10.7", - "ff", + "ff 0.12.1", "generic-array", - "group", + "group 0.12.1", "pkcs8 0.9.0", "rand_core", - "sec1", + "sec1 0.3.0", + "subtle", + "zeroize", +] + +[[package]] +name = "elliptic-curve" +version = "0.13.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" +dependencies = [ + "base16ct 0.2.0", + "crypto-bigint 0.5.5", + "digest 0.10.7", + "ff 0.13.1", + "generic-array", + "group 0.13.0", + "pem-rfc7468", + "pkcs8 0.10.2", + "rand_core", + "sec1 0.7.3", "subtle", "zeroize", ] @@ -1555,6 +1609,16 @@ dependencies = [ "subtle", ] +[[package]] +name = "ff" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0b50bfb653653f9ca9095b427bed08ab8d75a137839d9ad64eb11810d5b6393" +dependencies = [ + "rand_core", + "subtle", +] + [[package]] name = "fiat-crypto" version = "0.2.9" @@ -1715,6 +1779,7 @@ checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", + "zeroize", ] [[package]] @@ -1724,8 +1789,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff2abc00be7fca6ebc474524697ae276ad847ad0a6b3faa4bcb027e9a4614ad0" dependencies = [ "cfg-if", + "js-sys", "libc", "wasi", + "wasm-bindgen", ] [[package]] @@ -1759,7 +1826,18 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" dependencies = [ - "ff", + "ff 0.12.1", + "rand_core", + "subtle", +] + +[[package]] +name = "group" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" +dependencies = [ + "ff 0.13.1", "rand_core", "subtle", ] @@ -2293,6 +2371,21 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "jsonwebtoken" +version = "9.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a87cc7a48537badeae96744432de36f4be2b4a34a05a5ef32e9dd8a1c169dde" +dependencies = [ + "base64", + "js-sys", + "pem", + "ring", + "serde", + "serde_json", + "simple_asn1", +] + [[package]] name = "keyring" version = "2.3.3" @@ -2642,8 +2735,20 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51f44edd08f51e2ade572f141051021c5af22677e42b7dd28a88155151c33594" dependencies = [ - "ecdsa", - "elliptic-curve", + "ecdsa 0.14.8", + "elliptic-curve 0.12.3", + "sha2 0.10.9", +] + +[[package]] +name = "p256" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" +dependencies = [ + "ecdsa 0.16.9", + "elliptic-curve 0.13.8", + "primeorder", "sha2 0.10.9", ] @@ -2676,6 +2781,25 @@ dependencies = [ "windows-link", ] +[[package]] +name = "pem" +version = "3.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d30c53c26bc5b31a98cd02d20f25a7c8567146caf63ed593a9d87b2775291be" +dependencies = [ + "base64", + "serde_core", +] + +[[package]] +name = "pem-rfc7468" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" +dependencies = [ + "base64ct", +] + [[package]] name = "percent-encoding" version = "2.3.2" @@ -2845,6 +2969,15 @@ dependencies = [ "syn 2.0.117", ] +[[package]] +name = "primeorder" +version = "0.13.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "353e1ca18966c16d9deb1c69278edbc5f194139612772bd9537af60ac231e1e6" +dependencies = [ + "elliptic-curve 0.13.8", +] + [[package]] name = "proc-macro-crate" version = "1.3.1" @@ -3012,6 +3145,16 @@ dependencies = [ "zeroize", ] +[[package]] +name = "rfc6979" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2" +dependencies = [ + "hmac 0.12.1", + "subtle", +] + [[package]] name = "ring" version = "0.17.14" @@ -3201,7 +3344,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" dependencies = [ - "base16ct", + "base16ct 0.1.1", "der 0.6.1", "generic-array", "pkcs8 0.9.0", @@ -3209,6 +3352,20 @@ dependencies = [ "zeroize", ] +[[package]] +name = "sec1" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" +dependencies = [ + "base16ct 0.2.0", + "der 0.7.10", + "generic-array", + "pkcs8 0.10.2", + "subtle", + "zeroize", +] + [[package]] name = "secret-service" version = "3.1.0" @@ -3421,9 +3578,22 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" dependencies = [ + "digest 0.10.7", "rand_core", ] +[[package]] +name = "simple_asn1" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d585997b0ac10be3c5ee635f1bab02d512760d14b7c468801ac8a01d9ae5f1d" +dependencies = [ + "num-bigint", + "num-traits", + "thiserror", + "time", +] + [[package]] name = "slab" version = "0.4.12" @@ -3618,6 +3788,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "743bd48c283afc0388f9b8827b976905fb217ad9e647fae3a379a9283c4def2c" dependencies = [ "deranged", + "itoa", "num-conv", "powerfmt", "serde_core", diff --git a/crates/agentkeys-broker-server/Cargo.toml b/crates/agentkeys-broker-server/Cargo.toml index 8b1cedc..3f5e3d1 100644 --- a/crates/agentkeys-broker-server/Cargo.toml +++ b/crates/agentkeys-broker-server/Cargo.toml @@ -30,6 +30,12 @@ hex = "0.4" aws-config = { version = "1", features = ["behavior-version-latest"] } aws-credential-types = "1" aws-sdk-sts = "1" +jsonwebtoken = "9" +p256 = { version = "0.13", features = ["pkcs8", "pem", "ecdsa"] } +pkcs8 = { version = "0.10", features = ["pem"] } +base64 = "0.22" +rand_core = { version = "0.6", features = ["std"] } +getrandom = "0.2" [features] default = [] @@ -40,3 +46,4 @@ agentkeys-broker-server = { path = ".", features = ["test-stub"] } agentkeys-mock-server = { path = "../agentkeys-mock-server" } tower = { version = "0.4", features = ["util"] } http-body-util = "0.1" +tempfile = "3" diff --git a/crates/agentkeys-broker-server/src/config.rs b/crates/agentkeys-broker-server/src/config.rs index ebab6f6..433bd8e 100644 --- a/crates/agentkeys-broker-server/src/config.rs +++ b/crates/agentkeys-broker-server/src/config.rs @@ -15,6 +15,18 @@ pub struct BrokerConfig { /// Hard cap on graceful-shutdown drain time. After SIGTERM, in-flight /// requests get this many seconds before the process exits anyway. pub shutdown_grace_seconds: u64, + /// Public URL the broker advertises as the OIDC issuer (`iss` claim, + /// discovery doc `issuer` field, `jwks_uri` prefix). AWS IAM + /// `create-open-id-connect-provider` requires this to be a stable HTTPS + /// URL in production; localhost HTTP works for local dev. + pub oidc_issuer: String, + /// Path to the persisted ES256 keypair (mode 0600). Defaults to + /// `~/.agentkeys/broker/oidc-keypair.json`. + pub oidc_keypair_path: PathBuf, + /// Time-to-live (seconds) for minted OIDC JWTs. AWS STS requires the + /// token to be valid at the moment of exchange but no longer than the + /// role's max session duration; 300s mirrors the TS oidc-stub default. + pub oidc_jwt_ttl_seconds: u64, } impl BrokerConfig { @@ -96,6 +108,29 @@ impl BrokerConfig { Err(_) => 30, }; + let oidc_issuer = std::env::var("BROKER_OIDC_ISSUER") + .unwrap_or_else(|_| "https://oidc.agentkeys.dev".to_string()); + let oidc_keypair_path = std::env::var("BROKER_OIDC_KEYPAIR_PATH") + .ok() + .map(PathBuf::from) + .unwrap_or_else(crate::oidc::OidcKeypair::default_path); + let oidc_jwt_ttl_seconds = match std::env::var("BROKER_OIDC_JWT_TTL_SECONDS") { + Ok(s) => s.parse::().map_err(|e| { + anyhow::anyhow!( + "BROKER_OIDC_JWT_TTL_SECONDS={:?} could not be parsed: {}", + s, + e + ) + })?, + Err(_) => 300, + }; + if !(60..=3_600).contains(&oidc_jwt_ttl_seconds) { + anyhow::bail!( + "BROKER_OIDC_JWT_TTL_SECONDS must be between 60 and 3600, got {}", + oidc_jwt_ttl_seconds + ); + } + Ok(Self { daemon_access_key_id, daemon_secret_access_key, @@ -106,6 +141,9 @@ impl BrokerConfig { session_duration_seconds, backend_request_timeout_seconds, shutdown_grace_seconds, + oidc_issuer, + oidc_keypair_path, + oidc_jwt_ttl_seconds, }) } } diff --git a/crates/agentkeys-broker-server/src/handlers/mod.rs b/crates/agentkeys-broker-server/src/handlers/mod.rs index 3aa9653..990c9c8 100644 --- a/crates/agentkeys-broker-server/src/handlers/mod.rs +++ b/crates/agentkeys-broker-server/src/handlers/mod.rs @@ -1,2 +1,3 @@ pub mod health; pub mod mint; +pub mod oidc; diff --git a/crates/agentkeys-broker-server/src/handlers/oidc.rs b/crates/agentkeys-broker-server/src/handlers/oidc.rs new file mode 100644 index 0000000..db9e913 --- /dev/null +++ b/crates/agentkeys-broker-server/src/handlers/oidc.rs @@ -0,0 +1,135 @@ +use std::time::{SystemTime, UNIX_EPOCH}; + +use axum::{ + extract::State, + http::HeaderMap, + response::IntoResponse, + Json, +}; +use serde_json::json; + +use crate::audit::{MintOutcome, MintRecord}; +use crate::auth::{extract_bearer_token, validate_bearer_token}; +use crate::error::{BrokerError, BrokerResult}; +use crate::state::SharedState; + +/// `GET /.well-known/openid-configuration` — OIDC discovery doc. +/// +/// Shaped to satisfy AWS IAM `create-open-id-connect-provider` and the +/// `sts:AssumeRoleWithWebIdentity` exchange. Mirrors the TS oidc-stub the +/// broker is replacing so existing test recipes keep working. +pub async fn discovery(State(state): State) -> impl IntoResponse { + let issuer = &state.config.oidc_issuer; + Json(json!({ + "issuer": issuer, + "jwks_uri": format!("{}/.well-known/jwks.json", issuer), + "response_types_supported": ["id_token"], + "subject_types_supported": ["public"], + "id_token_signing_alg_values_supported": ["ES256"], + "scopes_supported": ["openid"], + "token_endpoint_auth_methods_supported": ["none"], + "claims_supported": [ + "iss", + "sub", + "aud", + "iat", + "exp", + "nbf", + "agentkeys_attested_at", + "agentkeys_enclave_tier", + "agentkeys_child_wallet", + "agentkeys_grant_id", + "agentkeys_operation", + "agentkeys_user_wallet", + ], + })) +} + +/// `GET /.well-known/jwks.json` — JWK Set with our ES256 public key. +pub async fn jwks(State(state): State) -> impl IntoResponse { + Json(state.oidc.jwks_json()) +} + +#[derive(serde::Serialize)] +pub struct MintOidcJwtResponse { + pub jwt: String, + pub wallet: String, + pub expiration: i64, +} + +/// `POST /v1/mint-oidc-jwt` — bearer-token in (validated against the session +/// backend), short-lived ES256 JWT out, suitable for `sts:AssumeRoleWithWebIdentity`. +/// +/// Audited via the existing mint-audit log with a `oidc_jwt` outcome marker so +/// operators see one ledger for AWS-cred mints and OIDC-JWT mints. +#[tracing::instrument(skip_all, fields(wallet = tracing::field::Empty, outcome = tracing::field::Empty))] +pub async fn mint_oidc_jwt( + State(state): State, + headers: HeaderMap, +) -> BrokerResult> { + let token = headers + .get("authorization") + .and_then(|v| v.to_str().ok()) + .and_then(extract_bearer_token) + .ok_or_else(|| BrokerError::Unauthorized("missing Authorization header".into()))?; + + let session = match validate_bearer_token(&state.http, &state.config.backend_url, token).await { + Ok(s) => s, + Err(e) => { + let outcome = match &e { + BrokerError::Unauthorized(_) => MintOutcome::AuthFailed, + _ => MintOutcome::BackendError, + }; + let _ = state.audit.record_mint( + MintRecord { + requester_token: token, + requester_wallet: "unknown", + requested_role: "oidc_jwt", + session_duration_seconds: state.config.oidc_jwt_ttl_seconds as i32, + sts_session_name: "(unauthenticated)", + outcome, + }, + Some(&e.to_string()), + ); + return Err(e); + } + }; + + tracing::Span::current().record("wallet", session.wallet.as_str()); + + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .map(|d| d.as_secs() as i64) + .unwrap_or(0); + let exp = now + state.config.oidc_jwt_ttl_seconds as i64; + + let claims = json!({ + "iss": state.config.oidc_issuer, + "sub": format!("agentkeys:agent:{}", session.wallet), + "aud": "sts.amazonaws.com", + "iat": now, + "exp": exp, + "agentkeys_user_wallet": session.wallet, + }); + + let jwt = state.oidc.sign_jwt(&claims)?; + + state.audit.record_mint( + MintRecord { + requester_token: token, + requester_wallet: &session.wallet, + requested_role: "oidc_jwt", + session_duration_seconds: state.config.oidc_jwt_ttl_seconds as i32, + sts_session_name: &state.oidc.kid, + outcome: MintOutcome::Ok, + }, + None, + )?; + tracing::Span::current().record("outcome", "ok"); + + Ok(Json(MintOidcJwtResponse { + jwt, + wallet: session.wallet, + expiration: exp, + })) +} diff --git a/crates/agentkeys-broker-server/src/lib.rs b/crates/agentkeys-broker-server/src/lib.rs index 0789c92..47bca81 100644 --- a/crates/agentkeys-broker-server/src/lib.rs +++ b/crates/agentkeys-broker-server/src/lib.rs @@ -3,6 +3,7 @@ pub mod auth; pub mod config; pub mod error; pub mod handlers; +pub mod oidc; pub mod state; pub mod sts; @@ -15,5 +16,11 @@ pub fn create_router(state: SharedState) -> Router { .route("/healthz", get(handlers::health::healthz)) .route("/readyz", get(handlers::health::readyz)) .route("/v1/mint-aws-creds", post(handlers::mint::mint_aws_creds)) + .route( + "/.well-known/openid-configuration", + get(handlers::oidc::discovery), + ) + .route("/.well-known/jwks.json", get(handlers::oidc::jwks)) + .route("/v1/mint-oidc-jwt", post(handlers::oidc::mint_oidc_jwt)) .with_state(state) } diff --git a/crates/agentkeys-broker-server/src/main.rs b/crates/agentkeys-broker-server/src/main.rs index 2e0f459..4d6eaaf 100644 --- a/crates/agentkeys-broker-server/src/main.rs +++ b/crates/agentkeys-broker-server/src/main.rs @@ -5,6 +5,7 @@ use agentkeys_broker_server::{ audit::AuditLog, config::BrokerConfig, create_router, + oidc::OidcKeypair, state::AppState, sts::{AwsStsClient, StsClient}, }; @@ -68,11 +69,21 @@ async fn main() -> anyhow::Result<()> { let grace_seconds = config.shutdown_grace_seconds; + let oidc = OidcKeypair::load_or_generate(&config.oidc_keypair_path) + .map_err(|e| anyhow::anyhow!("load OIDC keypair: {}", e))?; + tracing::info!( + kid = %oidc.kid, + issuer = %config.oidc_issuer, + path = %config.oidc_keypair_path.display(), + "OIDC signer ready" + ); + let state = Arc::new(AppState { config, http, audit, sts: Arc::new(sts), + oidc: Arc::new(oidc), }); let app = create_router(state); diff --git a/crates/agentkeys-broker-server/src/oidc.rs b/crates/agentkeys-broker-server/src/oidc.rs new file mode 100644 index 0000000..0ce5134 --- /dev/null +++ b/crates/agentkeys-broker-server/src/oidc.rs @@ -0,0 +1,290 @@ +use std::path::{Path, PathBuf}; +use std::time::{SystemTime, UNIX_EPOCH}; + +use base64::engine::general_purpose::URL_SAFE_NO_PAD; +use base64::Engine; +use jsonwebtoken::{encode, Algorithm, EncodingKey, Header}; +use p256::ecdsa::SigningKey; +use p256::pkcs8::{DecodePrivateKey, EncodePrivateKey, LineEnding}; +use serde::{Deserialize, Serialize}; + +use crate::error::{BrokerError, BrokerResult}; + +/// Persisted on-disk shape (mode 0600). Keeping the kid + PEM lets us add +/// rotation later (multiple kids in JWKS) without changing the file format. +#[derive(Serialize, Deserialize)] +struct PersistedKeypair { + kid: String, + private_key_pem: String, +} + +/// In-memory ES256 signing keypair plus the public-key components needed to +/// emit a JWK and a `kid` for JWT headers. +pub struct OidcKeypair { + pub kid: String, + pub private_key_pem: String, + /// base64url(no-pad)-encoded affine X coordinate (P-256, 32 bytes raw). + pub public_x_b64: String, + /// base64url(no-pad)-encoded affine Y coordinate. + pub public_y_b64: String, +} + +impl OidcKeypair { + /// Generate a fresh ES256 keypair and persist it at `path` (mode 0600 on Unix). + pub fn generate_and_persist(path: &Path) -> BrokerResult { + let signing_key = SigningKey::random(&mut rand_core_compat::OsRngWrapper); + let verifying_key = signing_key.verifying_key(); + + let private_key_pem = signing_key + .to_pkcs8_pem(LineEnding::LF) + .map_err(|e| BrokerError::Internal(format!("encode pkcs8 pem: {e}")))? + .to_string(); + + let kid = format!( + "v1-{}", + SystemTime::now() + .duration_since(UNIX_EPOCH) + .map(|d| d.as_secs()) + .unwrap_or(0) + ); + + let encoded_point = verifying_key.to_encoded_point(false); + let x_bytes = encoded_point + .x() + .ok_or_else(|| BrokerError::Internal("verifying key missing X coordinate".into()))?; + let y_bytes = encoded_point + .y() + .ok_or_else(|| BrokerError::Internal("verifying key missing Y coordinate".into()))?; + + let public_x_b64 = URL_SAFE_NO_PAD.encode(x_bytes); + let public_y_b64 = URL_SAFE_NO_PAD.encode(y_bytes); + + let persisted = PersistedKeypair { + kid: kid.clone(), + private_key_pem: private_key_pem.clone(), + }; + + if let Some(parent) = path.parent() { + std::fs::create_dir_all(parent) + .map_err(|e| BrokerError::Internal(format!("create dir {parent:?}: {e}")))?; + } + let json = serde_json::to_string_pretty(&persisted) + .map_err(|e| BrokerError::Internal(format!("serialize keypair: {e}")))?; + std::fs::write(path, json) + .map_err(|e| BrokerError::Internal(format!("write keypair {path:?}: {e}")))?; + set_owner_only(path)?; + + Ok(Self { + kid, + private_key_pem, + public_x_b64, + public_y_b64, + }) + } + + /// Load an already-persisted keypair from `path`. + pub fn load(path: &Path) -> BrokerResult { + let raw = std::fs::read_to_string(path) + .map_err(|e| BrokerError::Internal(format!("read keypair {path:?}: {e}")))?; + let persisted: PersistedKeypair = serde_json::from_str(&raw) + .map_err(|e| BrokerError::Internal(format!("parse keypair {path:?}: {e}")))?; + + let signing_key = SigningKey::from_pkcs8_pem(&persisted.private_key_pem) + .map_err(|e| BrokerError::Internal(format!("decode pkcs8 pem: {e}")))?; + let verifying_key = signing_key.verifying_key(); + let encoded_point = verifying_key.to_encoded_point(false); + let x_bytes = encoded_point + .x() + .ok_or_else(|| BrokerError::Internal("verifying key missing X coordinate".into()))?; + let y_bytes = encoded_point + .y() + .ok_or_else(|| BrokerError::Internal("verifying key missing Y coordinate".into()))?; + + Ok(Self { + kid: persisted.kid, + private_key_pem: persisted.private_key_pem, + public_x_b64: URL_SAFE_NO_PAD.encode(x_bytes), + public_y_b64: URL_SAFE_NO_PAD.encode(y_bytes), + }) + } + + /// Load if the file exists, otherwise generate and persist. The dev-only + /// path the broker uses at startup before a TEE-derived key is wired in. + pub fn load_or_generate(path: &Path) -> BrokerResult { + if path.exists() { + Self::load(path) + } else { + Self::generate_and_persist(path) + } + } + + /// Default on-disk location: `~/.agentkeys/broker/oidc-keypair.json`. + pub fn default_path() -> PathBuf { + let home = std::env::var("HOME").unwrap_or_else(|_| ".".to_string()); + PathBuf::from(home) + .join(".agentkeys") + .join("broker") + .join("oidc-keypair.json") + } + + /// Return the JWK Set body that `/.well-known/jwks.json` serves. + pub fn jwks_json(&self) -> serde_json::Value { + serde_json::json!({ + "keys": [{ + "kty": "EC", + "crv": "P-256", + "x": self.public_x_b64, + "y": self.public_y_b64, + "kid": self.kid, + "alg": "ES256", + "use": "sig", + }] + }) + } + + /// Sign `claims` (a JSON object) into a compact JWS (ES256, with our kid). + pub fn sign_jwt(&self, claims: &serde_json::Value) -> BrokerResult { + let key = EncodingKey::from_ec_pem(self.private_key_pem.as_bytes()) + .map_err(|e| BrokerError::Internal(format!("load signing key: {e}")))?; + let mut header = Header::new(Algorithm::ES256); + header.kid = Some(self.kid.clone()); + encode(&header, claims, &key) + .map_err(|e| BrokerError::Internal(format!("sign jwt: {e}"))) + } +} + +#[cfg(unix)] +fn set_owner_only(path: &Path) -> BrokerResult<()> { + use std::os::unix::fs::PermissionsExt; + let mut perms = std::fs::metadata(path) + .map_err(|e| BrokerError::Internal(format!("metadata {path:?}: {e}")))? + .permissions(); + perms.set_mode(0o600); + std::fs::set_permissions(path, perms) + .map_err(|e| BrokerError::Internal(format!("chmod {path:?}: {e}")))?; + Ok(()) +} + +#[cfg(not(unix))] +fn set_owner_only(_path: &Path) -> BrokerResult<()> { + // On non-Unix, file ACLs aren't 0600-shaped. The README warns operators + // to run the broker on Linux; we don't fail startup on Windows just to + // make CI green. + Ok(()) +} + +/// Bridges `rand_core 0.6` (what `p256` 0.13 expects) to the system OS RNG. +mod rand_core_compat { + pub struct OsRngWrapper; + + impl rand_core::CryptoRng for OsRngWrapper {} + + impl rand_core::RngCore for OsRngWrapper { + fn next_u32(&mut self) -> u32 { + let mut b = [0u8; 4]; + self.fill_bytes(&mut b); + u32::from_le_bytes(b) + } + fn next_u64(&mut self) -> u64 { + let mut b = [0u8; 8]; + self.fill_bytes(&mut b); + u64::from_le_bytes(b) + } + fn fill_bytes(&mut self, dest: &mut [u8]) { + getrandom::getrandom(dest).expect("OS RNG failed"); + } + fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), rand_core::Error> { + getrandom::getrandom(dest).map_err(|_| rand_core::Error::from(core::num::NonZeroU32::new(1).unwrap())) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use jsonwebtoken::{decode, DecodingKey, Validation}; + use tempfile::TempDir; + + #[test] + fn generate_and_load_round_trip() { + let tmp = TempDir::new().unwrap(); + let path = tmp.path().join("kp.json"); + + let kp1 = OidcKeypair::generate_and_persist(&path).unwrap(); + assert!(path.exists()); + assert!(!kp1.kid.is_empty()); + assert_eq!(URL_SAFE_NO_PAD.decode(&kp1.public_x_b64).unwrap().len(), 32); + assert_eq!(URL_SAFE_NO_PAD.decode(&kp1.public_y_b64).unwrap().len(), 32); + + let kp2 = OidcKeypair::load(&path).unwrap(); + assert_eq!(kp1.kid, kp2.kid); + assert_eq!(kp1.public_x_b64, kp2.public_x_b64); + assert_eq!(kp1.public_y_b64, kp2.public_y_b64); + } + + #[test] + fn load_or_generate_creates_then_reuses() { + let tmp = TempDir::new().unwrap(); + let path = tmp.path().join("kp.json"); + + let kp1 = OidcKeypair::load_or_generate(&path).unwrap(); + let kp2 = OidcKeypair::load_or_generate(&path).unwrap(); + assert_eq!(kp1.kid, kp2.kid, "second call must reuse the persisted keypair"); + } + + #[test] + fn jwks_shape_matches_aws_oidc_expectations() { + let tmp = TempDir::new().unwrap(); + let kp = OidcKeypair::generate_and_persist(&tmp.path().join("kp.json")).unwrap(); + let jwks = kp.jwks_json(); + let key = &jwks["keys"][0]; + assert_eq!(key["kty"], "EC"); + assert_eq!(key["crv"], "P-256"); + assert_eq!(key["alg"], "ES256"); + assert_eq!(key["use"], "sig"); + assert_eq!(key["kid"], kp.kid); + assert!(key["x"].is_string()); + assert!(key["y"].is_string()); + } + + #[test] + fn sign_jwt_round_trips_via_public_key() { + let tmp = TempDir::new().unwrap(); + let kp = OidcKeypair::generate_and_persist(&tmp.path().join("kp.json")).unwrap(); + + let claims = serde_json::json!({ + "iss": "https://oidc.agentkeys.dev", + "sub": "agentkeys:agent:0xabc", + "aud": "sts.amazonaws.com", + "exp": 9_999_999_999_u64, + "iat": 1_000_000_000_u64, + "agentkeys_user_wallet": "0xabc", + }); + let jwt = kp.sign_jwt(&claims).unwrap(); + assert_eq!(jwt.matches('.').count(), 2); + + // Verify with the public components we'd serve over the wire. + let decoding_key = + DecodingKey::from_ec_components(&kp.public_x_b64, &kp.public_y_b64).unwrap(); + let mut validation = Validation::new(Algorithm::ES256); + validation.set_audience(&["sts.amazonaws.com"]); + validation.set_issuer(&["https://oidc.agentkeys.dev"]); + + let token_data: jsonwebtoken::TokenData = + decode(&jwt, &decoding_key, &validation).expect("public-key verify"); + assert_eq!(token_data.header.alg, Algorithm::ES256); + assert_eq!(token_data.header.kid.as_deref(), Some(kp.kid.as_str())); + assert_eq!(token_data.claims["agentkeys_user_wallet"], "0xabc"); + } + + #[cfg(unix)] + #[test] + fn persisted_file_is_owner_only() { + use std::os::unix::fs::PermissionsExt; + let tmp = TempDir::new().unwrap(); + let path = tmp.path().join("kp.json"); + OidcKeypair::generate_and_persist(&path).unwrap(); + let mode = std::fs::metadata(&path).unwrap().permissions().mode(); + assert_eq!(mode & 0o777, 0o600, "expected 0600, got {:o}", mode & 0o777); + } +} diff --git a/crates/agentkeys-broker-server/src/state.rs b/crates/agentkeys-broker-server/src/state.rs index fe7602f..63ec078 100644 --- a/crates/agentkeys-broker-server/src/state.rs +++ b/crates/agentkeys-broker-server/src/state.rs @@ -2,6 +2,7 @@ use std::sync::Arc; use crate::audit::AuditLog; use crate::config::BrokerConfig; +use crate::oidc::OidcKeypair; use crate::sts::StsClient; pub struct AppState { @@ -9,6 +10,7 @@ pub struct AppState { pub http: reqwest::Client, pub audit: AuditLog, pub sts: Arc, + pub oidc: Arc, } pub type SharedState = Arc; diff --git a/crates/agentkeys-broker-server/tests/mint_flow.rs b/crates/agentkeys-broker-server/tests/mint_flow.rs index 41ce0ed..fe52f46 100644 --- a/crates/agentkeys-broker-server/tests/mint_flow.rs +++ b/crates/agentkeys-broker-server/tests/mint_flow.rs @@ -10,9 +10,11 @@ use std::sync::Arc; use agentkeys_broker_server::audit::{hash_token, AuditLog}; use agentkeys_broker_server::config::BrokerConfig; use agentkeys_broker_server::create_router; +use agentkeys_broker_server::oidc::OidcKeypair; use agentkeys_broker_server::state::AppState; use agentkeys_broker_server::sts::{AssumedCredentials, StsClient, StubStsClient}; use serde_json::Value; +use tempfile::TempDir; const STUB_ROLE_ARN: &str = "arn:aws:iam::000000000000:role/agentkeys-agent"; @@ -43,6 +45,13 @@ async fn spawn_broker_with_sts( backend_url: String, sts: Arc, ) -> (String, Arc) { + // Tempdir is leaked into the static so the keypair file outlives the + // tokio task spawned below; integration tests are short-lived and the + // OS cleans /tmp on reboot. + let tmp = Box::leak(Box::new(TempDir::new().unwrap())); + let oidc = + OidcKeypair::generate_and_persist(&tmp.path().join("oidc-keypair.json")).unwrap(); + let config = BrokerConfig { daemon_access_key_id: "AKIA-fake".into(), daemon_secret_access_key: "fake-secret".into(), @@ -53,6 +62,9 @@ async fn spawn_broker_with_sts( session_duration_seconds: 3600, backend_request_timeout_seconds: 5, shutdown_grace_seconds: 5, + oidc_issuer: "https://oidc.test.invalid".into(), + oidc_keypair_path: tmp.path().join("oidc-keypair.json"), + oidc_jwt_ttl_seconds: 300, }; let http = reqwest::Client::builder() @@ -65,6 +77,7 @@ async fn spawn_broker_with_sts( http, audit: AuditLog::open_in_memory().unwrap(), sts, + oidc: Arc::new(oidc), }); let app = create_router(state.clone()); diff --git a/crates/agentkeys-broker-server/tests/oidc_flow.rs b/crates/agentkeys-broker-server/tests/oidc_flow.rs new file mode 100644 index 0000000..b47c03f --- /dev/null +++ b/crates/agentkeys-broker-server/tests/oidc_flow.rs @@ -0,0 +1,261 @@ +//! End-to-end tests for the broker's OIDC issuer surface (Stage 7 phase 2): +//! discovery doc, JWKS, and bearer-token-gated JWT mint. +//! +//! Mirrors the recipe operators run before `aws iam create-open-id-connect-provider`: +//! 1. fetch discovery → confirm issuer + jwks_uri +//! 2. fetch JWKS → confirm ES256 P-256 public key + kid +//! 3. mint a JWT for a real session → verify ES256 signature with the JWKS + +use std::path::PathBuf; +use std::sync::Arc; + +use agentkeys_broker_server::audit::AuditLog; +use agentkeys_broker_server::config::BrokerConfig; +use agentkeys_broker_server::create_router; +use agentkeys_broker_server::oidc::OidcKeypair; +use agentkeys_broker_server::state::AppState; +use agentkeys_broker_server::sts::{AssumedCredentials, StsClient, StubStsClient}; +use jsonwebtoken::{decode, decode_header, Algorithm, DecodingKey, Validation}; +use serde_json::Value; +use tempfile::TempDir; + +const STUB_ROLE_ARN: &str = "arn:aws:iam::000000000000:role/agentkeys-agent"; +const TEST_ISSUER: &str = "https://oidc.test.invalid"; + +fn stub_creds() -> AssumedCredentials { + AssumedCredentials { + access_key_id: "ASIA-stub-AKID".into(), + secret_access_key: "stub-secret".into(), + session_token: "stub-session-token".into(), + expiration_unix: 9_999_999_999, + } +} + +async fn spawn_mock_backend() -> String { + let conn = rusqlite::Connection::open_in_memory().unwrap(); + agentkeys_mock_server::db::init_schema(&conn).unwrap(); + let state = Arc::new(agentkeys_mock_server::state::AppState::new(conn)); + let app = agentkeys_mock_server::create_router(state); + + let listener = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap(); + let addr = listener.local_addr().unwrap(); + tokio::spawn(async move { + axum::serve(listener, app).await.unwrap(); + }); + format!("http://{}", addr) +} + +async fn spawn_broker(backend_url: String) -> (String, Arc) { + let tmp = Box::leak(Box::new(TempDir::new().unwrap())); + let keypair_path = tmp.path().join("oidc-keypair.json"); + let oidc = OidcKeypair::generate_and_persist(&keypair_path).unwrap(); + + let sts: Arc = Arc::new(StubStsClient::ok(stub_creds())); + let config = BrokerConfig { + daemon_access_key_id: "AKIA-fake".into(), + daemon_secret_access_key: "fake-secret".into(), + agent_role_arn: STUB_ROLE_ARN.into(), + backend_url, + audit_db_path: PathBuf::from(":memory:"), + aws_region: "us-east-1".into(), + session_duration_seconds: 3600, + backend_request_timeout_seconds: 5, + shutdown_grace_seconds: 5, + oidc_issuer: TEST_ISSUER.into(), + oidc_keypair_path: keypair_path, + oidc_jwt_ttl_seconds: 300, + }; + + let http = reqwest::Client::builder() + .timeout(std::time::Duration::from_secs(2)) + .connect_timeout(std::time::Duration::from_millis(500)) + .build() + .unwrap(); + let state = Arc::new(AppState { + config, + http, + audit: AuditLog::open_in_memory().unwrap(), + sts, + oidc: Arc::new(oidc), + }); + let app = create_router(state.clone()); + + let listener = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap(); + let addr = listener.local_addr().unwrap(); + tokio::spawn(async move { + axum::serve(listener, app).await.unwrap(); + }); + (format!("http://{}", addr), state) +} + +async fn mint_session_against_backend(backend_url: &str) -> (String, String) { + let client = reqwest::Client::new(); + let resp: Value = client + .post(format!("{}/session/create", backend_url)) + .json(&serde_json::json!({ "auth_token": "oidc-test-bearer" })) + .send() + .await + .unwrap() + .json() + .await + .unwrap(); + let session = resp["session"].as_str().unwrap().to_string(); + let wallet = resp["wallet"].as_str().unwrap().to_string(); + (session, wallet) +} + +#[tokio::test] +async fn discovery_returns_aws_compatible_shape() { + let backend_url = spawn_mock_backend().await; + let (broker_url, _) = spawn_broker(backend_url).await; + + let resp: Value = reqwest::Client::new() + .get(format!("{}/.well-known/openid-configuration", broker_url)) + .send() + .await + .unwrap() + .json() + .await + .unwrap(); + + assert_eq!(resp["issuer"], TEST_ISSUER); + assert_eq!( + resp["jwks_uri"], + format!("{}/.well-known/jwks.json", TEST_ISSUER) + ); + assert_eq!(resp["id_token_signing_alg_values_supported"][0], "ES256"); + assert_eq!(resp["subject_types_supported"][0], "public"); + assert_eq!(resp["token_endpoint_auth_methods_supported"][0], "none"); + + let claims = resp["claims_supported"] + .as_array() + .expect("claims_supported must be an array"); + let names: Vec<&str> = claims.iter().filter_map(|v| v.as_str()).collect(); + assert!(names.contains(&"agentkeys_user_wallet")); + assert!(names.contains(&"sub")); + assert!(names.contains(&"exp")); +} + +#[tokio::test] +async fn jwks_returns_p256_es256_with_kid() { + let backend_url = spawn_mock_backend().await; + let (broker_url, state) = spawn_broker(backend_url).await; + + let resp: Value = reqwest::Client::new() + .get(format!("{}/.well-known/jwks.json", broker_url)) + .send() + .await + .unwrap() + .json() + .await + .unwrap(); + + let key = &resp["keys"][0]; + assert_eq!(key["kty"], "EC"); + assert_eq!(key["crv"], "P-256"); + assert_eq!(key["alg"], "ES256"); + assert_eq!(key["use"], "sig"); + assert_eq!(key["kid"], state.oidc.kid); + assert_eq!(key["x"], state.oidc.public_x_b64); + assert_eq!(key["y"], state.oidc.public_y_b64); +} + +#[tokio::test] +async fn mint_oidc_jwt_signs_claims_for_session_wallet() { + let backend_url = spawn_mock_backend().await; + let (session_token, wallet) = mint_session_against_backend(&backend_url).await; + let (broker_url, state) = spawn_broker(backend_url).await; + + let resp = reqwest::Client::new() + .post(format!("{}/v1/mint-oidc-jwt", broker_url)) + .header("Authorization", format!("Bearer {}", session_token)) + .send() + .await + .unwrap(); + + assert_eq!(resp.status(), reqwest::StatusCode::OK); + let body: Value = resp.json().await.unwrap(); + let jwt = body["jwt"].as_str().expect("jwt must be a string"); + assert_eq!(body["wallet"], wallet); + let exp = body["expiration"].as_i64().unwrap(); + assert!(exp > chrono_utc_now() - 5 && exp < chrono_utc_now() + 600); + + let header = decode_header(jwt).unwrap(); + assert_eq!(header.alg, Algorithm::ES256); + assert_eq!(header.kid.as_deref(), Some(state.oidc.kid.as_str())); + + let decoding_key = + DecodingKey::from_ec_components(&state.oidc.public_x_b64, &state.oidc.public_y_b64) + .unwrap(); + let mut validation = Validation::new(Algorithm::ES256); + validation.set_audience(&["sts.amazonaws.com"]); + validation.set_issuer(&[TEST_ISSUER]); + + let token_data: jsonwebtoken::TokenData = + decode(jwt, &decoding_key, &validation).expect("public-key verify"); + assert_eq!(token_data.claims["agentkeys_user_wallet"], wallet); + assert_eq!( + token_data.claims["sub"], + format!("agentkeys:agent:{}", wallet) + ); + assert_eq!(token_data.claims["aud"], "sts.amazonaws.com"); + assert_eq!(token_data.claims["iss"], TEST_ISSUER); + + let row = state.audit.last_row().unwrap().expect("audit row missing"); + assert_eq!(row.outcome, "ok"); + assert_eq!(row.requester_wallet, wallet); + assert_eq!(row.requested_role, "oidc_jwt"); +} + +#[tokio::test] +async fn mint_oidc_jwt_rejects_missing_bearer() { + let backend_url = spawn_mock_backend().await; + let (broker_url, _) = spawn_broker(backend_url).await; + + let resp = reqwest::Client::new() + .post(format!("{}/v1/mint-oidc-jwt", broker_url)) + .send() + .await + .unwrap(); + + assert_eq!(resp.status(), reqwest::StatusCode::UNAUTHORIZED); +} + +#[tokio::test] +async fn mint_oidc_jwt_rejects_invalid_bearer_and_audits_auth_failed() { + let backend_url = spawn_mock_backend().await; + let (broker_url, state) = spawn_broker(backend_url).await; + + let resp = reqwest::Client::new() + .post(format!("{}/v1/mint-oidc-jwt", broker_url)) + .header("Authorization", "Bearer never-minted") + .send() + .await + .unwrap(); + + assert_eq!(resp.status(), reqwest::StatusCode::UNAUTHORIZED); + let row = state.audit.last_row().unwrap().expect("audit row missing"); + assert_eq!(row.outcome, "auth_failed"); + assert_eq!(row.requested_role, "oidc_jwt"); +} + +#[tokio::test] +async fn keypair_persists_across_broker_restarts() { + // Two brokers pointed at the same on-disk keypair must serve the same + // JWKS — otherwise an AWS OIDC provider registered against the first + // broker breaks every restart, which would be unusable in production. + let tmp = TempDir::new().unwrap(); + let keypair_path = tmp.path().join("oidc-keypair.json"); + let kp1 = OidcKeypair::generate_and_persist(&keypair_path).unwrap(); + let kp2 = OidcKeypair::load(&keypair_path).unwrap(); + assert_eq!(kp1.kid, kp2.kid); + assert_eq!(kp1.public_x_b64, kp2.public_x_b64); + assert_eq!(kp1.public_y_b64, kp2.public_y_b64); +} + +fn chrono_utc_now() -> i64 { + std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .map(|d| d.as_secs() as i64) + .unwrap_or(0) +} diff --git a/crates/agentkeys-cli/src/lib.rs b/crates/agentkeys-cli/src/lib.rs index 622d0c3..f77a11f 100644 --- a/crates/agentkeys-cli/src/lib.rs +++ b/crates/agentkeys-cli/src/lib.rs @@ -5,7 +5,26 @@ use agentkeys_core::backend::{BackendError, CredentialBackend}; use agentkeys_core::mock_client::MockHttpClient; pub use agentkeys_core::session_store; use agentkeys_core::session_store::SessionStore; -use agentkeys_provisioner::{run_provision, ProvisionError, Provisioner}; +use agentkeys_provisioner::{aws_creds::fetch_via_broker, run_provision, ProvisionError, Provisioner}; + +/// Stage-7 phase-2 helper: when a broker URL is configured, fetch 1-hour +/// scoped AWS creds and return them as an env-var map ready to merge into the +/// scraper subprocess. With no broker URL, returns an empty map and the +/// subprocess inherits whatever the operator already has in its environment +/// (legacy `stage6-demo-env.sh` path). +async fn broker_env_for_provision( + broker_url: Option<&str>, + session_token: &str, +) -> Result> { + let Some(url) = broker_url else { + return Ok(HashMap::new()); + }; + let creds = fetch_via_broker(url, session_token).await?; + let region = std::env::var("AWS_REGION") + .ok() + .or_else(|| std::env::var("AWS_DEFAULT_REGION").ok()); + Ok(creds.to_env(region.as_deref())) +} use agentkeys_types::{ AuditEvent, AuditFilter, AuthToken, Scope, ServiceName, Session, WalletAddress, }; @@ -54,6 +73,10 @@ pub struct CommandContext { /// to point at a tempdir in file-only mode without mutating /// process-global `$HOME` / `AGENTKEYS_SESSION_STORE` (issue #34). pub session_store_override: Option, + /// Stage-7 phase-2 wiring: when set, `agentkeys provision` fetches AWS + /// temp creds from this broker URL and injects them into the scraper + /// subprocess env (replacing the `stage6-demo-env.sh` sourcing pattern). + pub broker_url: Option, } impl CommandContext { @@ -66,9 +89,15 @@ impl CommandContext { session_override: None, backend_override: None, session_store_override: None, + broker_url: std::env::var("AGENTKEYS_BROKER_URL").ok().filter(|s| !s.is_empty()), } } + pub fn with_broker_url(mut self, broker_url: Option) -> Self { + self.broker_url = broker_url; + self + } + pub fn with_session(mut self, session: Session) -> Self { self.session_override = Some(session); self @@ -876,11 +905,21 @@ pub async fn cmd_provision( let mut stderr_lines: Vec = Vec::new(); + let env = match broker_env_for_provision(ctx.broker_url.as_deref(), &session.token).await { + Ok(env) => env, + Err(e) => { + return Err(anyhow!( + "Problem: Could not fetch AWS credentials from broker.\nCause: {}.\nFix: Verify --broker-url / AGENTKEYS_BROKER_URL is reachable, your session token is current, and the broker's /readyz endpoint returns 200.\nDocs: https://github.com/litentry/agentKeys/blob/main/docs/operator-runbook.md", + e + )); + } + }; + let result = run_provision( &provisioner, service, &cmd_refs, - HashMap::new(), + env, Some(&repo_root), backend, &session, diff --git a/crates/agentkeys-cli/src/main.rs b/crates/agentkeys-cli/src/main.rs index dcf2383..98739ee 100644 --- a/crates/agentkeys-cli/src/main.rs +++ b/crates/agentkeys-cli/src/main.rs @@ -24,6 +24,13 @@ struct Cli { #[arg(long, help = "Output machine-readable JSON where supported")] json: bool, + #[arg( + long, + env = "AGENTKEYS_BROKER_URL", + help = "Stage 7 broker URL — when set, `provision` fetches AWS temp creds from the broker (replaces stage6-demo-env.sh)" + )] + broker_url: Option, + #[command(subcommand)] command: Commands, } @@ -208,7 +215,8 @@ enum InboxAction { #[tokio::main] async fn main() { let cli = Cli::parse(); - let ctx = CommandContext::new(&cli.backend, cli.verbose, cli.json); + let ctx = CommandContext::new(&cli.backend, cli.verbose, cli.json) + .with_broker_url(cli.broker_url.clone()); let result: anyhow::Result = match &cli.command { Commands::Init { mock_token } => { diff --git a/crates/agentkeys-daemon/src/main.rs b/crates/agentkeys-daemon/src/main.rs index bb75f46..787245f 100644 --- a/crates/agentkeys-daemon/src/main.rs +++ b/crates/agentkeys-daemon/src/main.rs @@ -242,7 +242,13 @@ async fn main() -> anyhow::Result<()> { // 3. Serve MCP if args.stdio { let dyn_backend: Arc = backend; - agentkeys_mcp::server::run_stdio(dyn_backend, sess, agent_id).await?; + agentkeys_mcp::server::run_stdio_with_broker( + dyn_backend, + sess, + agent_id, + args.broker_url.clone(), + ) + .await?; } else { info!("no --stdio flag; daemon exiting (Unix socket mode not yet implemented)"); } diff --git a/crates/agentkeys-mcp/Cargo.toml b/crates/agentkeys-mcp/Cargo.toml index c2803de..de7b2f5 100644 --- a/crates/agentkeys-mcp/Cargo.toml +++ b/crates/agentkeys-mcp/Cargo.toml @@ -20,3 +20,4 @@ tracing = "0.1" [dev-dependencies] tokio = { workspace = true } +axum = { version = "0.7", features = ["json"] } diff --git a/crates/agentkeys-mcp/src/lib.rs b/crates/agentkeys-mcp/src/lib.rs index 3b8143f..ad64667 100644 --- a/crates/agentkeys-mcp/src/lib.rs +++ b/crates/agentkeys-mcp/src/lib.rs @@ -1,5 +1,5 @@ use agentkeys_core::backend::{BackendError, CredentialBackend}; -use agentkeys_provisioner::{run_provision, Provisioner}; +use agentkeys_provisioner::{aws_creds::fetch_via_broker, run_provision, Provisioner}; use agentkeys_types::{AuditFilter, ServiceName, Session, WalletAddress}; use serde_json::{json, Value}; use std::collections::HashMap; @@ -98,6 +98,11 @@ pub struct McpHandler { agent_id: WalletAddress, provisioner: Arc, repo_root: PathBuf, + /// Stage-7 phase-2 wiring: when `Some`, the provision tool fetches AWS + /// temp creds from this broker URL and injects them into the scraper + /// subprocess env. When `None`, the subprocess inherits whatever `AWS_*` + /// vars the operator sourced manually (legacy `stage6-demo-env.sh` path). + broker_url: Option, } impl McpHandler { @@ -115,6 +120,7 @@ impl McpHandler { agent_id, provisioner: Arc::new(Provisioner::new()), repo_root, + broker_url: None, } } @@ -127,7 +133,21 @@ impl McpHandler { let repo_root = std::env::var("AGENTKEYS_REPO_ROOT") .map(PathBuf::from) .unwrap_or_else(|_| std::env::current_dir().unwrap_or_default()); - Self { backend, session, agent_id, provisioner, repo_root } + Self { + backend, + session, + agent_id, + provisioner, + repo_root, + broker_url: None, + } + } + + /// Builder-style setter so the daemon can pass `--broker-url` through + /// without forcing every caller to know about it. + pub fn with_broker_url(mut self, broker_url: Option) -> Self { + self.broker_url = broker_url; + self } pub async fn handle(&self, request: JsonRpcRequest) -> JsonRpcResponse { @@ -251,11 +271,26 @@ impl McpHandler { let cmd_refs: Vec<&str> = script_command.iter().map(|s| s.as_str()).collect(); let cwd = self.repo_root.clone(); + let env = match self.broker_env_for_provision().await { + Ok(env) => env, + Err(e) => { + return JsonRpcResponse::error( + id, + -32603, + json!({ + "code": "BROKER_FETCH_FAILED", + "message": e.to_string() + }) + .to_string(), + ); + } + }; + let result = run_provision( &self.provisioner, &service, &cmd_refs, - HashMap::new(), + env, Some(&cwd), self.backend.clone(), &self.session, @@ -290,6 +325,34 @@ impl McpHandler { } } +impl McpHandler { + /// Fetch AWS temp creds from the broker (if configured) and return them + /// as an env-var map ready to merge into the subprocess. With no broker + /// configured, returns an empty map and the subprocess inherits whatever + /// `AWS_*` vars the operator already exported (legacy path). + async fn broker_env_for_provision(&self) -> Result, BrokerEnvError> { + let Some(broker_url) = self.broker_url.as_deref() else { + return Ok(HashMap::new()); + }; + let creds = fetch_via_broker(broker_url, &self.session.token) + .await + .map_err(|e| BrokerEnvError(e.to_string()))?; + let region = std::env::var("AWS_REGION") + .ok() + .or_else(|| std::env::var("AWS_DEFAULT_REGION").ok()); + Ok(creds.to_env(region.as_deref())) + } +} + +#[derive(Debug)] +struct BrokerEnvError(String); + +impl std::fmt::Display for BrokerEnvError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "broker AWS-cred fetch failed: {}", self.0) + } +} + fn provision_error_to_mcp_code(err: &agentkeys_provisioner::ProvisionError) -> &'static str { use agentkeys_provisioner::ProvisionError; match err { @@ -432,6 +495,75 @@ mod tests { ); } + #[tokio::test] + async fn broker_env_for_provision_returns_empty_without_broker_url() { + let handler = make_handler(); + let env = handler.broker_env_for_provision().await.unwrap(); + assert!( + env.is_empty(), + "no broker_url ⇒ no AWS env injected (legacy stage6-demo path)" + ); + } + + #[tokio::test] + async fn broker_env_for_provision_injects_aws_creds_when_broker_url_set() { + use axum::{routing::post, Json, Router}; + + // Stub broker that returns canned creds; the real broker logic is + // covered in agentkeys-broker-server tests. Here we just verify the + // MCP handler hits /v1/mint-aws-creds with its session bearer and + // surfaces the response into the subprocess env. + let router = Router::new().route( + "/v1/mint-aws-creds", + post(|| async { + Json(json!({ + "access_key_id": "ASIA-mcp-test", + "secret_access_key": "mcp-secret", + "session_token": "mcp-token", + "expiration": 9_999_999_999_i64, + "wallet": "0xtest" + })) + }), + ); + let listener = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap(); + let addr = listener.local_addr().unwrap(); + tokio::spawn(async move { + axum::serve(listener, router).await.unwrap(); + }); + let broker_url = format!("http://{}", addr); + + let handler = McpHandler::new( + Arc::new(NoopBackend), + test_session(), + WalletAddress("0xtest".into()), + ) + .with_broker_url(Some(broker_url)); + + let env = handler.broker_env_for_provision().await.unwrap(); + assert_eq!(env.get("AWS_ACCESS_KEY_ID").unwrap(), "ASIA-mcp-test"); + assert_eq!(env.get("AWS_SECRET_ACCESS_KEY").unwrap(), "mcp-secret"); + assert_eq!(env.get("AWS_SESSION_TOKEN").unwrap(), "mcp-token"); + } + + #[tokio::test] + async fn broker_env_for_provision_surfaces_unreachable_broker() { + let handler = McpHandler::new( + Arc::new(NoopBackend), + test_session(), + WalletAddress("0xtest".into()), + ) + .with_broker_url(Some("http://127.0.0.1:1".into())); + + let err = handler + .broker_env_for_provision() + .await + .expect_err("unreachable broker must error"); + assert!( + err.to_string().contains("broker"), + "error should reference the broker: {err}" + ); + } + #[tokio::test] async fn provision_unknown_service_error() { let handler = make_handler(); diff --git a/crates/agentkeys-mcp/src/server.rs b/crates/agentkeys-mcp/src/server.rs index a3a1742..f613bc9 100644 --- a/crates/agentkeys-mcp/src/server.rs +++ b/crates/agentkeys-mcp/src/server.rs @@ -11,7 +11,18 @@ pub async fn run_stdio( session: Session, agent_id: WalletAddress, ) -> anyhow::Result<()> { - let handler = McpHandler::new(backend, session, agent_id); + let broker_url = std::env::var("AGENTKEYS_BROKER_URL").ok(); + run_stdio_with_broker(backend, session, agent_id, broker_url).await +} + +pub async fn run_stdio_with_broker( + backend: Arc, + session: Session, + agent_id: WalletAddress, + broker_url: Option, +) -> anyhow::Result<()> { + let handler = + McpHandler::new(backend, session, agent_id).with_broker_url(broker_url); let stdin = tokio::io::stdin(); let stdout = tokio::io::stdout(); let mut reader = BufReader::new(stdin); diff --git a/crates/agentkeys-provisioner/Cargo.toml b/crates/agentkeys-provisioner/Cargo.toml index 072362c..3c61834 100644 --- a/crates/agentkeys-provisioner/Cargo.toml +++ b/crates/agentkeys-provisioner/Cargo.toml @@ -13,6 +13,8 @@ async-trait = { workspace = true } thiserror = { workspace = true } anyhow = { workspace = true } tracing = "0.1" +reqwest = { version = "0.12", features = ["json"] } [dev-dependencies] tempfile = "3" +axum = { version = "0.7", features = ["json"] } diff --git a/crates/agentkeys-provisioner/src/aws_creds.rs b/crates/agentkeys-provisioner/src/aws_creds.rs new file mode 100644 index 0000000..3e0e5f7 --- /dev/null +++ b/crates/agentkeys-provisioner/src/aws_creds.rs @@ -0,0 +1,199 @@ +//! AWS-cred fetch helper for the Stage 7 broker. +//! +//! When the daemon (or CLI) is run with `--broker-url`, the operator no longer +//! has to source `scripts/stage6-demo-env.sh`. Instead, the provisioner asks the +//! broker for 1-hour scoped temp credentials right before spawning a scraper +//! subprocess, and injects them as `AWS_*` env vars into the child's environment. +//! +//! Behavior is opt-in: pass `BrokerCreds::None` (the default when no broker URL +//! is configured) and the subprocess inherits whatever `AWS_*` env the operator +//! already exported manually. + +use std::collections::HashMap; +use std::time::Duration; + +use serde::Deserialize; + +use crate::error::{ProvisionError, ProvisionResult}; + +/// Shape of the broker's `POST /v1/mint-aws-creds` response. Keep in sync with +/// `crates/agentkeys-broker-server/src/handlers/mint.rs::MintResponse`. +#[derive(Debug, Clone, Deserialize)] +pub struct AwsTempCreds { + pub access_key_id: String, + pub secret_access_key: String, + pub session_token: String, + /// Unix epoch seconds. The broker's session_duration_seconds caps this + /// (1h default). + pub expiration: i64, + pub wallet: String, +} + +impl AwsTempCreds { + /// Render the creds as a `HashMap` suitable for merging + /// into a `tokio::process::Command` env. Adds the AWS region only when + /// supplied — leaving it unset lets the subprocess fall back to `AWS_REGION` + /// already in its environment. + pub fn to_env(&self, region: Option<&str>) -> HashMap { + let mut m = HashMap::new(); + m.insert("AWS_ACCESS_KEY_ID".into(), self.access_key_id.clone()); + m.insert("AWS_SECRET_ACCESS_KEY".into(), self.secret_access_key.clone()); + m.insert("AWS_SESSION_TOKEN".into(), self.session_token.clone()); + if let Some(r) = region { + m.insert("AWS_REGION".into(), r.to_string()); + m.insert("AWS_DEFAULT_REGION".into(), r.to_string()); + } + m + } +} + +/// Caller-side fetch. Bearer token is the daemon's own session token, which the +/// broker validates against the backend's `/session/validate` endpoint before +/// minting. Errors are mapped to `ProvisionError::Internal` because they sit +/// upstream of the subprocess spawn — the per-step tripwire/store/error codes +/// don't apply here. +pub async fn fetch_via_broker( + broker_url: &str, + session_token: &str, +) -> ProvisionResult { + let url = format!( + "{}/v1/mint-aws-creds", + broker_url.trim_end_matches('/') + ); + let client = reqwest::Client::builder() + .timeout(Duration::from_secs(15)) + .connect_timeout(Duration::from_secs(5)) + .build() + .map_err(|e| ProvisionError::Internal(format!("build broker http client: {e}")))?; + let resp = client + .post(&url) + .header("Authorization", format!("Bearer {}", session_token)) + .send() + .await + .map_err(|e| ProvisionError::Internal(format!("broker request to {url} failed: {e}")))?; + + let status = resp.status(); + if !status.is_success() { + let body = resp.text().await.unwrap_or_default(); + return Err(ProvisionError::Internal(format!( + "broker {url} returned HTTP {}: {}", + status, + body + ))); + } + + resp.json::() + .await + .map_err(|e| ProvisionError::Internal(format!("parse broker response: {e}"))) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn to_env_emits_three_aws_keys() { + let creds = AwsTempCreds { + access_key_id: "ASIA-test".into(), + secret_access_key: "secret".into(), + session_token: "tok".into(), + expiration: 0, + wallet: "0xabc".into(), + }; + let env = creds.to_env(None); + assert_eq!(env.get("AWS_ACCESS_KEY_ID").unwrap(), "ASIA-test"); + assert_eq!(env.get("AWS_SECRET_ACCESS_KEY").unwrap(), "secret"); + assert_eq!(env.get("AWS_SESSION_TOKEN").unwrap(), "tok"); + assert!(!env.contains_key("AWS_REGION")); + } + + #[test] + fn to_env_includes_region_when_given() { + let creds = AwsTempCreds { + access_key_id: "k".into(), + secret_access_key: "s".into(), + session_token: "t".into(), + expiration: 0, + wallet: "0xabc".into(), + }; + let env = creds.to_env(Some("us-east-1")); + assert_eq!(env.get("AWS_REGION").unwrap(), "us-east-1"); + assert_eq!(env.get("AWS_DEFAULT_REGION").unwrap(), "us-east-1"); + } + + #[tokio::test] + async fn fetch_via_broker_happy_path() { + let server = stub_broker_server(StubResponse::Ok).await; + let creds = fetch_via_broker(&server.url, "session-token").await.unwrap(); + assert_eq!(creds.access_key_id, "ASIA-stub"); + assert_eq!(creds.wallet, "0xtest"); + } + + #[tokio::test] + async fn fetch_via_broker_propagates_unauthorized() { + let server = stub_broker_server(StubResponse::Unauthorized).await; + let err = fetch_via_broker(&server.url, "bogus") + .await + .expect_err("expected error on 401"); + let msg = err.to_string(); + assert!(msg.contains("401") || msg.contains("Unauthorized"), "msg = {msg}"); + } + + #[tokio::test] + async fn fetch_via_broker_handles_unreachable_broker() { + // Port 1 is reserved; nothing listens there. + let err = fetch_via_broker("http://127.0.0.1:1", "tok") + .await + .expect_err("expected error on unreachable broker"); + assert!(err.to_string().contains("broker request")); + } + + enum StubResponse { + Ok, + Unauthorized, + } + + struct StubServer { + url: String, + _handle: tokio::task::JoinHandle<()>, + } + + async fn stub_broker_server(response: StubResponse) -> StubServer { + use axum::{routing::post, Json, Router}; + use serde_json::json; + + let router = match response { + StubResponse::Ok => Router::new().route( + "/v1/mint-aws-creds", + post(|| async { + Json(json!({ + "access_key_id": "ASIA-stub", + "secret_access_key": "stub-secret", + "session_token": "stub-token", + "expiration": 9_999_999_999_i64, + "wallet": "0xtest", + })) + }), + ), + StubResponse::Unauthorized => Router::new().route( + "/v1/mint-aws-creds", + post(|| async { + ( + axum::http::StatusCode::UNAUTHORIZED, + Json(json!({"error":"unauthorized","message":"bad bearer"})), + ) + }), + ), + }; + + let listener = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap(); + let addr = listener.local_addr().unwrap(); + let handle = tokio::spawn(async move { + axum::serve(listener, router).await.unwrap(); + }); + StubServer { + url: format!("http://{}", addr), + _handle: handle, + } + } +} diff --git a/crates/agentkeys-provisioner/src/lib.rs b/crates/agentkeys-provisioner/src/lib.rs index 274f239..e732bef 100644 --- a/crates/agentkeys-provisioner/src/lib.rs +++ b/crates/agentkeys-provisioner/src/lib.rs @@ -1,9 +1,11 @@ +pub mod aws_creds; pub mod error; pub mod metrics; pub mod orchestrator; pub mod subprocess; pub mod tripwire; +pub use aws_creds::{fetch_via_broker, AwsTempCreds}; pub use error::{ProvisionError, ProvisionResult}; pub use orchestrator::{mask_key, run_provision, ActiveProvision, ProvisionSuccess, Provisioner}; pub use subprocess::{spawn_and_collect, SubprocessConfig, SubprocessOutcome}; diff --git a/docs/dev-setup.md b/docs/dev-setup.md index 4945e60..17fe1f9 100644 --- a/docs/dev-setup.md +++ b/docs/dev-setup.md @@ -104,13 +104,19 @@ When the daemon needs to access the operator's S3 vault (to read or store a cred ### 4.3 Provision a new service -The provisioner scripts run unchanged from your machine: +The provisioner scripts run unchanged from your machine. With `--broker-url` set, the daemon (or the `agentkeys` CLI directly) calls the broker's `POST /v1/mint-aws-creds` right before spawning the scraper subprocess and injects 1-hour scoped `AWS_*` env vars into the child process. **You no longer need to source `scripts/stage6-demo-env.sh`** — that path is the legacy fallback for ops who run without a broker. ```bash $BIN --broker-url "$AGENTKEYS_BROKER_URL" --session "$AGENTKEYS_BEARER_TOKEN" \ provision openrouter --identity bot-$(date +%s)@bots.example.dev ``` +Or via the CLI: + +```bash +agentkeys --broker-url "$AGENTKEYS_BROKER_URL" provision openrouter +``` + Success criteria: 1. The scraper exits 0 with a key on stdout. diff --git a/docs/operator-runbook.md b/docs/operator-runbook.md index e4444a6..63eeca4 100644 --- a/docs/operator-runbook.md +++ b/docs/operator-runbook.md @@ -4,19 +4,22 @@ **Scope:** start, supervise, rotate keys, monitor audit, and migrate from local to hosted. v0.1 deliberately avoids TEE / KMS / hosted-only paths — those land later. -> **WIP / scratchpad.** This runbook ships alongside the v0.1 broker (Stage 7 vertical slice — `mint-aws-creds` + audit only). Sections marked **(later)** describe surface that lands in Stage 7 phase 2 (OIDC federation) or Stage 8 (off-chain vault). Treat them as forward-looking, not load-bearing for v0.1 operators. +> **WIP / scratchpad.** This runbook ships alongside the v0.1 broker. Stage 7 phase 1 (broker mint-aws-creds + audit) and phase 2 (OIDC issuer surface + provisioner-scripts AWS-cred wiring) are both live. The `sts:AssumeRoleWithWebIdentity` federation step is still deferred — it needs public TLS hosting of the issuer URL, see [`stage7-wip.md`](./stage7-wip.md). Stage 8 (off-chain vault) sections are forward-looking. ## 1. What the broker is `agentkeys-broker-server` is the long-running HTTP service that holds the operator's long-lived `agentkeys-daemon` AWS access key and brokers 1-hour scoped credentials to authenticated daemons. It is the boundary that lets app developers run daemons against your infrastructure **without holding any AWS credentials themselves**. -In v0.1 the broker exposes a single user-facing endpoint: +User-facing endpoints: -- `POST /v1/mint-aws-creds` — bearer-token in, temp AWS creds out. +- `POST /v1/mint-aws-creds` — bearer-token in, temp AWS creds out (phase 1). +- `POST /v1/mint-oidc-jwt` — bearer-token in, short-lived ES256 JWT out (phase 2). Suitable for `sts:AssumeRoleWithWebIdentity` once the issuer URL is publicly hosted. +- `GET /.well-known/openid-configuration` — OIDC discovery doc. +- `GET /.well-known/jwks.json` — JWK Set with the broker's ES256 P-256 public key + `kid`. -Plus operator-side health checks (`/healthz`, `/readyz`) and an audit log written to local SQLite. +Operator-side: `/healthz`, `/readyz` health checks, and an audit log written to local SQLite. Both `mint-aws-creds` and `mint-oidc-jwt` write to the same audit table — `requested_role = "oidc_jwt"` distinguishes JWT mints in the ledger. -The OIDC discovery surface (`/.well-known/openid-configuration`, `/.well-known/jwks.json`, `POST /v1/mint-oidc-jwt`) and `sts:AssumeRoleWithWebIdentity` exchange land in Stage 7 phase 2, alongside the public-hosting prereq from [`stage7-wip.md`](./stage7-wip.md). +The remaining federation step (`aws iam create-open-id-connect-provider --url $BROKER_OIDC_ISSUER` + `sts:AssumeRoleWithWebIdentity`) is the public-hosting recipe in [`stage7-wip.md` §"Phase 2 — federation step"](./stage7-wip.md). ## 2. Threat model — what the broker is and isn't defending against @@ -47,6 +50,9 @@ The broker reads its configuration from environment variables only — no config | `BROKER_SESSION_DURATION_SECONDS` | no | TTL for minted credentials. Default: `3600` (1 h). Min: `900`, max: `43200`. | | `BROKER_BACKEND_TIMEOUT_SECONDS` | no | HTTP timeout for backend `/session/validate` calls. Default: `10`. | | `BROKER_SHUTDOWN_GRACE_SECONDS` | no | Hard cap on graceful-shutdown drain. Default: `30`. | +| `BROKER_OIDC_ISSUER` | no | Public URL the broker advertises in the OIDC discovery doc and JWT `iss` claim. Must match the URL used at `aws iam create-open-id-connect-provider` time. Default: `https://oidc.agentkeys.dev`. | +| `BROKER_OIDC_KEYPAIR_PATH` | no | Path to the persisted ES256 keypair (mode 0600). Generated on first start, reused on subsequent restarts so the registered IAM OIDC provider stays valid. Default: `$HOME/.agentkeys/broker/oidc-keypair.json`. | +| `BROKER_OIDC_JWT_TTL_SECONDS` | no | TTL (seconds) for minted OIDC JWTs. Default: `300`. Bounded `[60, 3600]`. | Persist `DAEMON_ACCESS_KEY_ID` and `DAEMON_SECRET_ACCESS_KEY` in `~/.zshenv` (or the equivalent per-shell startup file for non-zsh shells) with file mode 0600 so the operator's shell has them on every login. The names match `scripts/stage6-demo-env.sh` so one persisted set of keys feeds both the legacy demo flow and the broker: @@ -177,7 +183,7 @@ Operator-side, the same binary runs. Configuration source changes from env vars - KMS-sealed configuration source. Env vars only. - Secret-manager integration as a config source (Vault, AWS Secrets Manager, GCP Secret Manager). Operator persists the daemon AWS keys in `~/.zshenv` (or supervisor-managed env) themselves. - Multi-tenant operator support. One broker process serves one operator's `agentkeys-daemon` key. -- OIDC `assume-role-with-web-identity` exchange. Direct `assume-role` with the static IAM trust path. The OIDC half lands when public hosting is also in motion (Stage 7 phase 2). +- `sts:AssumeRoleWithWebIdentity` exchange against the broker's issuer. The broker now serves a conforming OIDC discovery + JWKS surface and a bearer-gated `mint-oidc-jwt` endpoint, but the AWS-side `create-open-id-connect-provider` registration requires the issuer URL to be reachable over public TLS — that hosting step is the remaining blocker (Stage 7 phase 2 federation step). - Automatic key rotation. Rotate manually per §5. ## 10. Further reading diff --git a/docs/spec/plans/development-stages.md b/docs/spec/plans/development-stages.md index 0ac29ef..7f5f0f8 100644 --- a/docs/spec/plans/development-stages.md +++ b/docs/spec/plans/development-stages.md @@ -20,6 +20,7 @@ If you're looking for setup / demo instructions, go to [`../../dev-setup.md`](.. | 4 | Pair / Approve / Recover | OTP-gated auth requests; 2-terminal pair flow; alias / email / ENS recovery via identity-link table | 15/11 unit + 2-terminal E2E | | 5a | Provisioner (deterministic) | OpenRouter + OpenAI CDP scrapers; `signupEmailOtp` pattern library; HTML-strip + label-aware OTP extractor; mandatory post-provision verify; `agentkeys provision openrouter` | 59/59 unit + live provision | | 6 (interim, 2026-04) | Hosted email infra | SES domain verification on `bots.litentry.org`; `agentkeys-daemon` IAM user → `agentkeys-agent` assume-role; S3 inbound bucket; `ses-s3` email backend; end-to-end demo from signup → SES receipt → S3 poll → key extraction | `scripts/stage6-demo-run.sh` prints a valid `sk-or-v1-...` key | +| 7 phase 1 (2026-04) | Broker server | `agentkeys-broker-server` axum service: bearer-gated `POST /v1/mint-aws-creds`, audit SQLite, supervisor probes; daemon `--broker-url` flag wired up | 22/22 unit + integration | ### Non-stage work shipped alongside @@ -61,7 +62,13 @@ Today's Stage 6 still lists "interim" AWS-managed DKIM + static IAM user. To cal ### Stage 7 — Generalized OIDC provider -Expose `oidc.agentkeys.dev` as a conforming OIDC Identity Provider. Any cloud that accepts external OIDC federation (AWS, GCP, Azure, Snowflake, K8s) trusts AgentKeys once and gets per-user-wallet-tagged temp creds via standard federation. Unlocks bring-your-own-domain + per-user cloud-enforced isolation via `PrincipalTag`. Scratch notes: [`../../stage7-wip.md`](../../stage7-wip.md). Blocked on: public TLS for `oidc.agentkeys.dev`, TEE-held ES256 signer at `oidc/issuer/v1` (`heima-gaps §3`). +Expose `oidc.agentkeys.dev` as a conforming OIDC Identity Provider. Any cloud that accepts external OIDC federation (AWS, GCP, Azure, Snowflake, K8s) trusts AgentKeys once and gets per-user-wallet-tagged temp creds via standard federation. Unlocks bring-your-own-domain + per-user cloud-enforced isolation via `PrincipalTag`. Scratch notes: [`../../stage7-wip.md`](../../stage7-wip.md). + +**Phase 1 (shipped, PR #60):** broker server (`crates/agentkeys-broker-server/`) — bearer-validated `POST /v1/mint-aws-creds` against the operator's daemon AWS key, SQLite audit, `/healthz` + `/readyz` supervisor probes. + +**Phase 2 (shipping, this PR):** OIDC discovery + JWKS + bearer-gated `POST /v1/mint-oidc-jwt` absorbed into the Rust broker (TS `services/oidc-stub/` retired); CLI/MCP `provision` paths fetch AWS temp creds via the broker when `--broker-url` is set, replacing the `stage6-demo-env.sh` sourcing pattern. + +**Still blocked:** public TLS hosting of the issuer URL so `aws iam create-open-id-connect-provider` accepts it; TEE-held ES256 signer at `oidc/issuer/v1` (`heima-gaps §3`). Stage 7 stops at the isolation primitive. **It does not commit a position on where credential ciphertext lives** — the previously-assumed `pallet-secrets-vault` (on-chain encrypted blob store) is superseded by Stage 8 below, per [`../threat-model-key-custody.md`](../threat-model-key-custody.md). diff --git a/docs/stage7-wip.md b/docs/stage7-wip.md index afb63cd..c8c16ea 100644 --- a/docs/stage7-wip.md +++ b/docs/stage7-wip.md @@ -1,13 +1,13 @@ # Stage 7 — WIP notes -> **WIP / scratchpad.** Phase 1 (broker server) ships in PR [#60](https://github.com/litentry/agentKeys/pull/60); the OIDC-federation half (phase 2) is preserved below for when its prereqs land. Not a finished guide. +> **WIP / scratchpad.** Phase 1 (broker server) shipped in PR [#60](https://github.com/litentry/agentKeys/pull/60). Phase 2 (OIDC issuer absorption + provisioner-scripts AWS-cred wiring) ships in this PR. The remaining federation prerequisites — public TLS hosting + IAM OIDC-provider registration — stay deferred and are documented below for when both prereqs land. ## What Stage 7 is Two halves that compose into the canonical "broker, not proxy" architecture: 1. **Phase 1 — Broker server (shipped).** A long-running HTTP service holds the operator's long-lived `agentkeys-daemon` AWS access key and brokers 1-hour scoped credentials to authenticated daemons. Lets app developers run daemons against operator infrastructure without ever touching AWS keys themselves. -2. **Phase 2 — OIDC federation (deferred).** Expose the broker's TEE (or interim ES256 signer) as a conforming OIDC Identity Provider at a stable public URL. Any cloud that trusts the issuer can exchange our JWTs for scoped temp creds via standard federation. Replaces the static-IAM `sts:assume-role` path with `sts:assume-role-with-web-identity` + `sts:TagSession` for cloud-enforced per-user isolation. +2. **Phase 2 — OIDC issuer (in-progress).** The Rust broker now serves the conforming OIDC discovery + JWKS surface and a bearer-gated `POST /v1/mint-oidc-jwt` endpoint, replacing the standalone TS `services/oidc-stub/` package. Provisioner-scripts AWS-cred wiring is also live: `agentkeys provision ` (CLI) and the `agentkeys.provision` MCP tool fetch 1-hour temp creds from the broker and inject them into the scraper subprocess env when `--broker-url` is set. The remaining federation step (`sts:AssumeRoleWithWebIdentity` against a public-TLS-hosted issuer) stays deferred. Per [`docs/spec/plans/development-stages.md`](./spec/plans/development-stages.md), this is the "Generalized OIDC Provider" stage after Stage 6 (Federated Own Email). @@ -55,31 +55,68 @@ echo "$CREDS" | jq '{access_key_id, expiration, wallet}' Acceptance: `curl /healthz` → 200, `curl /readyz` → 200, `mint-aws-creds` returns creds, audit row appears in `~/.agentkeys/broker/audit.sqlite`. -**Out of phase 1 (deferred to phase 2):** +**Out of phase 1 (now landing in phase 2):** -- OIDC discovery / JWKS / `assume-role-with-web-identity` (the section below). -- TS [`services/oidc-stub/`](../services/oidc-stub/) retirement (still ships its `/internal/sign` endpoint independently for the phase 2 test recipe below). -- Provisioner-scripts AWS-cred consumer rewiring — daemon flag is in place; the scraper-side fetch happens with phase 2. +- Rust-broker OIDC discovery / JWKS / `mint-oidc-jwt` (delivered — see §"Phase 2 — OIDC issuer (Rust broker)" below). +- TS [`services/oidc-stub/`](../services/oidc-stub/) retirement (deleted in this PR). +- Provisioner-scripts AWS-cred consumer rewiring (delivered — `agentkeys provision` and `agentkeys.provision` MCP tool now mint creds via the broker when `--broker-url` is set). + +**Still deferred:** + +- `aws iam create-open-id-connect-provider` against a public TLS endpoint + `sts:AssumeRoleWithWebIdentity` exchange — needs §"Phase 2 federation step" below. - Public hosting of the broker / KMS-sealed config source. +- TEE-derived signer (replaces the on-disk ES256 keypair). + +## Phase 2 — OIDC issuer (Rust broker) + +The Rust broker exposes three new endpoints. They are the same endpoints the TS oidc-stub used to serve; the schemas, JWT shape, JWKS shape, and bucket-policy enforcement are byte-for-byte compatible so federation recipes already written against the stub keep working unchanged. + +| Method | Path | Auth | Purpose | +|---|---|---|---| +| `GET` | `/.well-known/openid-configuration` | none | Discovery doc the AWS IAM `create-open-id-connect-provider` step reads. | +| `GET` | `/.well-known/jwks.json` | none | JWK Set with the broker's ES256 P-256 public key + `kid`. | +| `POST` | `/v1/mint-oidc-jwt` | bearer | Validates the bearer against the backend's `/session/validate`, then mints a short-lived ES256 JWT carrying `sub=agentkeys:agent:`, `aud=sts.amazonaws.com`, `agentkeys_user_wallet=`. | + +### Configuration + +| Env var | Default | Notes | +|---|---|---| +| `BROKER_OIDC_ISSUER` | `https://oidc.agentkeys.dev` | The exact string emitted as `iss` and as the discovery `issuer`. AWS requires this to match the URL `create-open-id-connect-provider --url` was registered with. | +| `BROKER_OIDC_KEYPAIR_PATH` | `~/.agentkeys/broker/oidc-keypair.json` | On first start the broker generates a P-256 keypair and persists it mode 0600. Subsequent restarts reuse the same `kid` so the registered IAM OIDC provider stays valid. | +| `BROKER_OIDC_JWT_TTL_SECONDS` | `300` | Bounded `[60, 3600]`. STS only checks the JWT at the moment of exchange; short TTL limits replay risk if the broker leaks a JWT. | -## Phase 2 — OIDC federation (still blocked) +### Audit log + +Both `mint-aws-creds` and `mint-oidc-jwt` write to the same SQLite audit table at `~/.agentkeys/broker/audit.sqlite`. JWT mints land with `requested_role = "oidc_jwt"` and `sts_session_name = ` — operators see one ledger for both credential types. + +### Provisioner-scripts AWS-cred wiring + +Operators no longer have to source `scripts/stage6-demo-env.sh`. With `--broker-url` set on the daemon, MCP, or CLI: + +1. Before spawning the scraper subprocess, the provisioner calls `POST /v1/mint-aws-creds` with its session bearer. +2. The broker validates the bearer, runs `sts:AssumeRole` on the operator's daemon key, and returns 1-hour scoped creds. +3. The provisioner injects `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, `AWS_SESSION_TOKEN` (plus `AWS_REGION`/`AWS_DEFAULT_REGION` if set) into the subprocess env. +4. The scraper's existing SES → S3 email path works unchanged. + +The legacy `stage6-demo-env.sh` flow still works when `--broker-url` is unset; the wiring is purely additive. + +## Phase 2 — federation step (still blocked) This is the half that turns the broker into a generalized OIDC Identity Provider so any AWS account (or GCP / Ali Cloud) can trust our JWTs without operator-side IAM-user keys. -### Why phase 2 is not running yet +### Why the federation step is not running yet -- Needs `oidc.agentkeys.dev` (or equivalent) hosted publicly with a public-CA TLS cert so AWS IAM accepts `create-open-id-connect-provider`. -- The "right" signer is a TEE-derived ES256 key at path `oidc/issuer/v1`, blocked on [`heima-gaps §3`](./spec/heima-gaps-vs-desired-architecture.md). -- [`services/oidc-stub/`](../services/oidc-stub/) ships an interim local-file ES256 signer; swap for TEE when §3 closes, or absorb the issuer endpoints into the Rust broker once public hosting is decided. +- Needs the broker (or a `/.well-known/*` reverse proxy) hosted publicly with a public-CA TLS cert so AWS IAM accepts `create-open-id-connect-provider`. +- The "right" signer is a TEE-derived ES256 key at path `oidc/issuer/v1`, blocked on [`heima-gaps §3`](./spec/heima-gaps-vs-desired-architecture.md). The current on-disk keypair is the local-dev placeholder; swap to TEE when §3 closes by replacing `crates/agentkeys-broker-server/src/oidc.rs::OidcKeypair::load_or_generate` with a TEE oracle call. JWKS, JWT shape, STS exchange, and bucket-policy enforcement all stay identical. -### Phase 2 test script — preserved for when both prereqs are in place +### Phase 2 federation test script — preserved for when both prereqs are in place #### Prereqs - Stage 6 AWS setup complete per [`docs/stage6-aws-setup.md`](./stage6-aws-setup.md). -- Phase 1 broker running locally (so the static-IAM `mint-aws-creds` path keeps working as a fallback during the migration). -- `services/oidc-stub/` hosted publicly. Options: CloudFront+S3 + Lambda for `/internal/sign`; ECS Fargate with ALB; or ngrok for dev (`ngrok http 34568`). -- `export OIDC_ISSUER=https://`; verify `curl -sf "$OIDC_ISSUER/.well-known/openid-configuration" | jq .issuer`. +- Phase 1 broker running publicly (so its `/.well-known/openid-configuration` is fetchable over public TLS). +- `export OIDC_ISSUER="$BROKER_OIDC_ISSUER"` — the exact `BROKER_OIDC_ISSUER` you started the broker with. +- Verify `curl -sf "$OIDC_ISSUER/.well-known/openid-configuration" | jq .issuer` returns that string. #### 1. Register the OIDC provider in IAM @@ -142,18 +179,14 @@ Replaces the `AllowDaemonRead` statement in [`stage6-aws-setup.md` §4](./stage6 The one test that proves phase 2 works: a JWT claiming wallet A can only touch wallet A's prefix — never B's. ```bash -# Mint a JWT via the stub -WALLET=0x1111111111111111111111111111111111111111 -JWT=$(curl -sf -X POST http://localhost:34568/internal/sign \ - -H 'content-type: application/json' \ - -d "{ - \"iss\": \"$OIDC_ISSUER\", - \"sub\": \"agentkeys:agent:$WALLET\", - \"aud\": \"sts.amazonaws.com\", - \"agentkeys_user_wallet\": \"$WALLET\", - \"exp\": $(($(date +%s) + 300)), - \"iat\": $(date +%s) - }" | jq -r .jwt) +# Mint a JWT via the broker. Bearer must come from `POST /session/create` +# against the backend; the wallet inside the JWT is whatever wallet that +# session is bound to (so this recipe presumes the operator drove the same +# session-create flow phase 1 already documented). +SESSION= +JWT=$(curl -sf -X POST "$BROKER_URL/v1/mint-oidc-jwt" \ + -H "Authorization: Bearer $SESSION" | jq -r .jwt) +WALLET=$(jq -R 'split(".") | .[1] | @base64d | fromjson | .agentkeys_user_wallet' <<<"$JWT" -r) # Exchange for temp creds CREDS=$(aws sts assume-role-with-web-identity \ @@ -173,15 +206,14 @@ aws s3api list-objects-v2 --bucket "$BUCKET" --prefix "0xdeadbeef/" Test (b) is what Stage 6's static-IAM path can't prove. Cloud-enforced, zero app-side trust. The phase 1 broker's `assume-role` path **does** issue scoped creds, but isolation enforcement still relies on the operator's IAM trust policy alone — phase 2 moves enforcement into AWS itself. -#### 5. Swap the stub for a TEE-derived signer +#### 5. Swap the on-disk keypair for a TEE-derived signer -When [`heima-gaps §3`](./spec/heima-gaps-vs-desired-architecture.md) closes, replace [`services/oidc-stub/src/keys.ts`](../services/oidc-stub/src/keys.ts)'s local-file key loader with a call to the TEE's `derive("oidc/issuer/v1")`. JWKS, JWT shape, STS exchange, and bucket-policy enforcement all stay identical. ~50 lines in `keys.ts`. Or, if the issuer endpoints have already been absorbed into the Rust broker by then, the swap happens inside `crates/agentkeys-broker-server/`. +When [`heima-gaps §3`](./spec/heima-gaps-vs-desired-architecture.md) closes, replace `crates/agentkeys-broker-server/src/oidc.rs::OidcKeypair::load_or_generate` with a call to the TEE's `derive("oidc/issuer/v1")`. JWKS, JWT shape, STS exchange, and bucket-policy enforcement all stay identical — only the signing backend changes. ## TODO pickups -- **Phase 2 issuer absorption:** port discovery + JWKS + JWT signing from [`services/oidc-stub/`](../services/oidc-stub/) into the Rust broker as `POST /v1/mint-oidc-jwt` and the `/.well-known/*` surface. Retire the TS stub. -- **Public hosting:** CloudFront+S3 for static discovery + Lambda for sign, or terminate TLS at a reverse proxy in front of the Rust broker. -- **Provisioner-scripts integration:** wire the daemon's `--broker-url` flag into the scraper subprocesses' AWS-cred fetch (replaces the `stage6-demo-env.sh` sourcing pattern in `scripts/`). +- **Public hosting:** terminate TLS at a reverse proxy in front of the Rust broker, or absorb the issuer endpoints behind a CloudFront+ALB pair so `oidc.agentkeys.dev` (or chosen issuer URL) resolves to the broker's `/.well-known/*` surface. +- **TEE signer swap:** see §5 above. - **Promote phase 1 doc:** once the live three-terminal demo passes for a non-operator developer (with no AWS env vars on their machine), promote `docs/operator-runbook.md` from WIP to canonical. - **Add the equivalent GCP Workload Identity Federation + Ali Cloud RAM recipes** (Stage 7 target is generalized, not AWS-only). - **Hand off the credential-vault question to Stage 8** — the bucket prefix `s3://agentkeys-vault//` is the reuse point; ciphertext + per-epoch DEK rotation live in [`stage8-wip.md`](./stage8-wip.md), not here. diff --git a/services/oidc-stub/.gitignore b/services/oidc-stub/.gitignore deleted file mode 100644 index 0df331d..0000000 --- a/services/oidc-stub/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -node_modules/ -dist/ -keys/ -*.keypair.json -keypair.json diff --git a/services/oidc-stub/README.md b/services/oidc-stub/README.md deleted file mode 100644 index c763933..0000000 --- a/services/oidc-stub/README.md +++ /dev/null @@ -1,92 +0,0 @@ -# agentkeys-oidc-stub - -> **THIS IS A TEE-INTERIM STUB.** -> Production Stage 6 replaces the signer with a TEE-derived `oidc/issuer/v1` key -> per `wiki/oidc-federation.md` §Architecture (heima-gaps §3). -> **Do NOT deploy this to production without an audit.** - -Minimal OIDC discovery + JWKS service for `oidc.agentkeys.dev`. Used in Stage 5b -scraper testing and Stage 6 AWS IAM federation setup — before the real TEE signer -is wired in. - -## Endpoints - -| Method | Path | Description | -|--------|------|-------------| -| `GET` | `/.well-known/openid-configuration` | OIDC discovery document (AWS IAM compatible) | -| `GET` | `/.well-known/jwks.json` | JWK Set with the ES256 public key | -| `POST` | `/internal/sign` | Dev-only: sign arbitrary claims, returns JWT | - -## Running locally - -```bash -cd services/oidc-stub -npm install -npm start -``` - -Server listens on `http://localhost:34568` by default. Override with env vars: - -```bash -OIDC_STUB_PORT=8080 OIDC_STUB_ISSUER=https://oidc.agentkeys.dev npm start -``` - -Test the endpoints: - -```bash -curl http://localhost:34568/.well-known/openid-configuration | jq . -curl http://localhost:34568/.well-known/jwks.json | jq . -curl -X POST http://localhost:34568/internal/sign \ - -H 'content-type: application/json' \ - -d '{"sub":"enclave:test:agent:0xabc","aud":"sts.amazonaws.com"}' | jq . -``` - -## Key persistence - -On first startup a fresh P-256 keypair is generated and cached at -`~/.agentkeys/oidc-stub/keypair.json` (mode 0600). Subsequent restarts reuse this -keypair so the JWKS stays stable for AWS/GCP OIDC provider registrations. - -The `keys/` directory in this repo and all `*.keypair.json` / `keypair.json` files -are `.gitignore`-d — never commit a private key. - -## TLS / HTTPS - -For local dev, plain HTTP on localhost is fine. In staging/production, run this -service behind a reverse proxy (nginx, Caddy, AWS ALB) that terminates TLS with a -public-CA certificate. AWS IAM requires the issuer URL to start with `https://`; -see `wiki/oidc-federation.md` §"Key requirements". - -## Environment variables - -| Variable | Default | Description | -|----------|---------|-------------| -| `OIDC_STUB_PORT` | `34568` | Port to listen on | -| `OIDC_STUB_ISSUER` | `https://oidc.agentkeys.dev` | Issuer URL emitted in discovery doc + JWTs | -| `AGENTKEYS_OIDC_KMS_KEY_ID` | unset | If set, stub errors immediately — KMS path not implemented (reserved for Stage 6 production path) | - -## Running tests - -```bash -npm test -``` - -## Security caveats - -1. **Private key on disk.** The dev keypair lives unencrypted in - `~/.agentkeys/oidc-stub/keypair.json`. Protect your home directory. -2. **`/internal/sign` is unauthenticated.** Any process that can reach the port - can mint arbitrary JWTs. Firewall this endpoint; do not expose it on 0.0.0.0 in - any shared environment. -3. **Not a TEE.** This stub generates the key in userspace. The production - architecture (Stage 6) derives the key inside the TEE enclave so it never - leaves hardware. This stub is solely for dev/test workflows. -4. **KMS stub.** If `AGENTKEYS_OIDC_KMS_KEY_ID` is set the server refuses to - start. The KMS path is documented with a TODO in `src/keys.ts` but not - implemented — it is superseded by the TEE path before it would ever be needed. - -## Stage 6 follow-up - -Replace `src/keys.ts` `loadKeypair()` with a call to the TEE `oidc/issuer/v1` -signing oracle. The three HTTP endpoints stay identical; only the signing -backend changes. See `wiki/oidc-federation.md` for the full architecture. diff --git a/services/oidc-stub/package-lock.json b/services/oidc-stub/package-lock.json deleted file mode 100644 index 2f4cec5..0000000 --- a/services/oidc-stub/package-lock.json +++ /dev/null @@ -1,2921 +0,0 @@ -{ - "name": "agentkeys-oidc-stub", - "version": "0.1.0", - "lockfileVersion": 3, - "requires": true, - "packages": { - "": { - "name": "agentkeys-oidc-stub", - "version": "0.1.0", - "dependencies": { - "express": "^4.21.0", - "jose": "^5.9.0" - }, - "devDependencies": { - "@types/express": "^5.0.0", - "@types/node": "^20.0.0", - "tsx": "^4.19.0", - "typescript": "^5.5.0", - "vitest": "^2.1.0" - } - }, - "node_modules/@esbuild/aix-ppc64": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.27.7.tgz", - "integrity": "sha512-EKX3Qwmhz1eMdEJokhALr0YiD0lhQNwDqkPYyPhiSwKrh7/4KRjQc04sZ8db+5DVVnZ1LmbNDI1uAMPEUBnQPg==", - "cpu": [ - "ppc64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "aix" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/android-arm": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.27.7.tgz", - "integrity": "sha512-jbPXvB4Yj2yBV7HUfE2KHe4GJX51QplCN1pGbYjvsyCZbQmies29EoJbkEc+vYuU5o45AfQn37vZlyXy4YJ8RQ==", - "cpu": [ - "arm" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/android-arm64": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.27.7.tgz", - "integrity": "sha512-62dPZHpIXzvChfvfLJow3q5dDtiNMkwiRzPylSCfriLvZeq0a1bWChrGx/BbUbPwOrsWKMn8idSllklzBy+dgQ==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/android-x64": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.27.7.tgz", - "integrity": "sha512-x5VpMODneVDb70PYV2VQOmIUUiBtY3D3mPBG8NxVk5CogneYhkR7MmM3yR/uMdITLrC1ml/NV1rj4bMJuy9MCg==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/darwin-arm64": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.27.7.tgz", - "integrity": "sha512-5lckdqeuBPlKUwvoCXIgI2D9/ABmPq3Rdp7IfL70393YgaASt7tbju3Ac+ePVi3KDH6N2RqePfHnXkaDtY9fkw==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/darwin-x64": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.27.7.tgz", - "integrity": "sha512-rYnXrKcXuT7Z+WL5K980jVFdvVKhCHhUwid+dDYQpH+qu+TefcomiMAJpIiC2EM3Rjtq0sO3StMV/+3w3MyyqQ==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/freebsd-arm64": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.27.7.tgz", - "integrity": "sha512-B48PqeCsEgOtzME2GbNM2roU29AMTuOIN91dsMO30t+Ydis3z/3Ngoj5hhnsOSSwNzS+6JppqWsuhTp6E82l2w==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "freebsd" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/freebsd-x64": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.27.7.tgz", - "integrity": "sha512-jOBDK5XEjA4m5IJK3bpAQF9/Lelu/Z9ZcdhTRLf4cajlB+8VEhFFRjWgfy3M1O4rO2GQ/b2dLwCUGpiF/eATNQ==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "freebsd" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-arm": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.27.7.tgz", - "integrity": "sha512-RkT/YXYBTSULo3+af8Ib0ykH8u2MBh57o7q/DAs3lTJlyVQkgQvlrPTnjIzzRPQyavxtPtfg0EopvDyIt0j1rA==", - "cpu": [ - "arm" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-arm64": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.27.7.tgz", - "integrity": "sha512-RZPHBoxXuNnPQO9rvjh5jdkRmVizktkT7TCDkDmQ0W2SwHInKCAV95GRuvdSvA7w4VMwfCjUiPwDi0ZO6Nfe9A==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-ia32": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.27.7.tgz", - "integrity": "sha512-GA48aKNkyQDbd3KtkplYWT102C5sn/EZTY4XROkxONgruHPU72l+gW+FfF8tf2cFjeHaRbWpOYa/uRBz/Xq1Pg==", - "cpu": [ - "ia32" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-loong64": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.27.7.tgz", - "integrity": "sha512-a4POruNM2oWsD4WKvBSEKGIiWQF8fZOAsycHOt6JBpZ+JN2n2JH9WAv56SOyu9X5IqAjqSIPTaJkqN8F7XOQ5Q==", - "cpu": [ - "loong64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-mips64el": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.27.7.tgz", - "integrity": "sha512-KabT5I6StirGfIz0FMgl1I+R1H73Gp0ofL9A3nG3i/cYFJzKHhouBV5VWK1CSgKvVaG4q1RNpCTR2LuTVB3fIw==", - "cpu": [ - "mips64el" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-ppc64": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.27.7.tgz", - "integrity": "sha512-gRsL4x6wsGHGRqhtI+ifpN/vpOFTQtnbsupUF5R5YTAg+y/lKelYR1hXbnBdzDjGbMYjVJLJTd2OFmMewAgwlQ==", - "cpu": [ - "ppc64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-riscv64": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.27.7.tgz", - "integrity": "sha512-hL25LbxO1QOngGzu2U5xeXtxXcW+/GvMN3ejANqXkxZ/opySAZMrc+9LY/WyjAan41unrR3YrmtTsUpwT66InQ==", - "cpu": [ - "riscv64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-s390x": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.27.7.tgz", - "integrity": "sha512-2k8go8Ycu1Kb46vEelhu1vqEP+UeRVj2zY1pSuPdgvbd5ykAw82Lrro28vXUrRmzEsUV0NzCf54yARIK8r0fdw==", - "cpu": [ - "s390x" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-x64": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.27.7.tgz", - "integrity": "sha512-hzznmADPt+OmsYzw1EE33ccA+HPdIqiCRq7cQeL1Jlq2gb1+OyWBkMCrYGBJ+sxVzve2ZJEVeePbLM2iEIZSxA==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/netbsd-arm64": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.27.7.tgz", - "integrity": "sha512-b6pqtrQdigZBwZxAn1UpazEisvwaIDvdbMbmrly7cDTMFnw/+3lVxxCTGOrkPVnsYIosJJXAsILG9XcQS+Yu6w==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "netbsd" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/netbsd-x64": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.27.7.tgz", - "integrity": "sha512-OfatkLojr6U+WN5EDYuoQhtM+1xco+/6FSzJJnuWiUw5eVcicbyK3dq5EeV/QHT1uy6GoDhGbFpprUiHUYggrw==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "netbsd" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/openbsd-arm64": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.27.7.tgz", - "integrity": "sha512-AFuojMQTxAz75Fo8idVcqoQWEHIXFRbOc1TrVcFSgCZtQfSdc1RXgB3tjOn/krRHENUB4j00bfGjyl2mJrU37A==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "openbsd" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/openbsd-x64": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.27.7.tgz", - "integrity": "sha512-+A1NJmfM8WNDv5CLVQYJ5PshuRm/4cI6WMZRg1by1GwPIQPCTs1GLEUHwiiQGT5zDdyLiRM/l1G0Pv54gvtKIg==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "openbsd" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/openharmony-arm64": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.27.7.tgz", - "integrity": "sha512-+KrvYb/C8zA9CU/g0sR6w2RBw7IGc5J2BPnc3dYc5VJxHCSF1yNMxTV5LQ7GuKteQXZtspjFbiuW5/dOj7H4Yw==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "openharmony" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/sunos-x64": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.27.7.tgz", - "integrity": "sha512-ikktIhFBzQNt/QDyOL580ti9+5mL/YZeUPKU2ivGtGjdTYoqz6jObj6nOMfhASpS4GU4Q/Clh1QtxWAvcYKamA==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "sunos" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/win32-arm64": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.27.7.tgz", - "integrity": "sha512-7yRhbHvPqSpRUV7Q20VuDwbjW5kIMwTHpptuUzV+AA46kiPze5Z7qgt6CLCK3pWFrHeNfDd1VKgyP4O+ng17CA==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/win32-ia32": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.27.7.tgz", - "integrity": "sha512-SmwKXe6VHIyZYbBLJrhOoCJRB/Z1tckzmgTLfFYOfpMAx63BJEaL9ExI8x7v0oAO3Zh6D/Oi1gVxEYr5oUCFhw==", - "cpu": [ - "ia32" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/win32-x64": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.27.7.tgz", - "integrity": "sha512-56hiAJPhwQ1R4i+21FVF7V8kSD5zZTdHcVuRFMW0hn753vVfQN8xlx4uOPT4xoGH0Z/oVATuR82AiqSTDIpaHg==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@jridgewell/sourcemap-codec": { - "version": "1.5.5", - "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", - "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", - "dev": true, - "license": "MIT" - }, - "node_modules/@rollup/rollup-android-arm-eabi": { - "version": "4.60.2", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.60.2.tgz", - "integrity": "sha512-dnlp69efPPg6Uaw2dVqzWRfAWRnYVb1XJ8CyyhIbZeaq4CA5/mLeZ1IEt9QqQxmbdvagjLIm2ZL8BxXv5lH4Yw==", - "cpu": [ - "arm" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "android" - ] - }, - "node_modules/@rollup/rollup-android-arm64": { - "version": "4.60.2", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.60.2.tgz", - "integrity": "sha512-OqZTwDRDchGRHHm/hwLOL7uVPB9aUvI0am/eQuWMNyFHf5PSEQmyEeYYheA0EPPKUO/l0uigCp+iaTjoLjVoHg==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "android" - ] - }, - "node_modules/@rollup/rollup-darwin-arm64": { - "version": "4.60.2", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.60.2.tgz", - "integrity": "sha512-UwRE7CGpvSVEQS8gUMBe1uADWjNnVgP3Iusyda1nSRwNDCsRjnGc7w6El6WLQsXmZTbLZx9cecegumcitNfpmA==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ] - }, - "node_modules/@rollup/rollup-darwin-x64": { - "version": "4.60.2", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.60.2.tgz", - "integrity": "sha512-gjEtURKLCC5VXm1I+2i1u9OhxFsKAQJKTVB8WvDAHF+oZlq0GTVFOlTlO1q3AlCTE/DF32c16ESvfgqR7343/g==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ] - }, - "node_modules/@rollup/rollup-freebsd-arm64": { - "version": "4.60.2", - "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.60.2.tgz", - "integrity": "sha512-Bcl6CYDeAgE70cqZaMojOi/eK63h5Me97ZqAQoh77VPjMysA/4ORQBRGo3rRy45x4MzVlU9uZxs8Uwy7ZaKnBw==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "freebsd" - ] - }, - "node_modules/@rollup/rollup-freebsd-x64": { - "version": "4.60.2", - "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.60.2.tgz", - "integrity": "sha512-LU+TPda3mAE2QB0/Hp5VyeKJivpC6+tlOXd1VMoXV/YFMvk/MNk5iXeBfB4MQGRWyOYVJ01625vjkr0Az98OJQ==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "freebsd" - ] - }, - "node_modules/@rollup/rollup-linux-arm-gnueabihf": { - "version": "4.60.2", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.60.2.tgz", - "integrity": "sha512-2QxQrM+KQ7DAW4o22j+XZ6RKdxjLD7BOWTP0Bv0tmjdyhXSsr2Ul1oJDQqh9Zf5qOwTuTc7Ek83mOFaKnodPjg==", - "cpu": [ - "arm" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-arm-musleabihf": { - "version": "4.60.2", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.60.2.tgz", - "integrity": "sha512-TbziEu2DVsTEOPif2mKWkMeDMLoYjx95oESa9fkQQK7r/Orta0gnkcDpzwufEcAO2BLBsD7mZkXGFqEdMRRwfw==", - "cpu": [ - "arm" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-arm64-gnu": { - "version": "4.60.2", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.60.2.tgz", - "integrity": "sha512-bO/rVDiDUuM2YfuCUwZ1t1cP+/yqjqz+Xf2VtkdppefuOFS2OSeAfgafaHNkFn0t02hEyXngZkxtGqXcXwO8Rg==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-arm64-musl": { - "version": "4.60.2", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.60.2.tgz", - "integrity": "sha512-hr26p7e93Rl0Za+JwW7EAnwAvKkehh12BU1Llm9Ykiibg4uIr2rbpxG9WCf56GuvidlTG9KiiQT/TXT1yAWxTA==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-loong64-gnu": { - "version": "4.60.2", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.60.2.tgz", - "integrity": "sha512-pOjB/uSIyDt+ow3k/RcLvUAOGpysT2phDn7TTUB3n75SlIgZzM6NKAqlErPhoFU+npgY3/n+2HYIQVbF70P9/A==", - "cpu": [ - "loong64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-loong64-musl": { - "version": "4.60.2", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.60.2.tgz", - "integrity": "sha512-2/w+q8jszv9Ww1c+6uJT3OwqhdmGP2/4T17cu8WuwyUuuaCDDJ2ojdyYwZzCxx0GcsZBhzi3HmH+J5pZNXnd+Q==", - "cpu": [ - "loong64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-ppc64-gnu": { - "version": "4.60.2", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.60.2.tgz", - "integrity": "sha512-11+aL5vKheYgczxtPVVRhdptAM2H7fcDR5Gw4/bTcteuZBlH4oP9f5s9zYO9aGZvoGeBpqXI/9TZZihZ609wKw==", - "cpu": [ - "ppc64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-ppc64-musl": { - "version": "4.60.2", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.60.2.tgz", - "integrity": "sha512-i16fokAGK46IVZuV8LIIwMdtqhin9hfYkCh8pf8iC3QU3LpwL+1FSFGej+O7l3E/AoknL6Dclh2oTdnRMpTzFQ==", - "cpu": [ - "ppc64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-riscv64-gnu": { - "version": "4.60.2", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.60.2.tgz", - "integrity": "sha512-49FkKS6RGQoriDSK/6E2GkAsAuU5kETFCh7pG4yD/ylj9rKhTmO3elsnmBvRD4PgJPds5W2PkhC82aVwmUcJ7A==", - "cpu": [ - "riscv64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-riscv64-musl": { - "version": "4.60.2", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.60.2.tgz", - "integrity": "sha512-mjYNkHPfGpUR00DuM1ZZIgs64Hpf4bWcz9Z41+4Q+pgDx73UwWdAYyf6EG/lRFldmdHHzgrYyge5akFUW0D3mQ==", - "cpu": [ - "riscv64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-s390x-gnu": { - "version": "4.60.2", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.60.2.tgz", - "integrity": "sha512-ALyvJz965BQk8E9Al/JDKKDLH2kfKFLTGMlgkAbbYtZuJt9LU8DW3ZoDMCtQpXAltZxwBHevXz5u+gf0yA0YoA==", - "cpu": [ - "s390x" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-x64-gnu": { - "version": "4.60.2", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.60.2.tgz", - "integrity": "sha512-UQjrkIdWrKI626Du8lCQ6MJp/6V1LAo2bOK9OTu4mSn8GGXIkPXk/Vsp4bLHCd9Z9Iz2OTEaokUE90VweJgIYQ==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-x64-musl": { - "version": "4.60.2", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.60.2.tgz", - "integrity": "sha512-bTsRGj6VlSdn/XD4CGyzMnzaBs9bsRxy79eTqTCBsA8TMIEky7qg48aPkvJvFe1HyzQ5oMZdg7AnVlWQSKLTnw==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-openbsd-x64": { - "version": "4.60.2", - "resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.60.2.tgz", - "integrity": "sha512-6d4Z3534xitaA1FcMWP7mQPq5zGwBmGbhphh2DwaA1aNIXUu3KTOfwrWpbwI4/Gr0uANo7NTtaykFyO2hPuFLg==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "openbsd" - ] - }, - "node_modules/@rollup/rollup-openharmony-arm64": { - "version": "4.60.2", - "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.60.2.tgz", - "integrity": "sha512-NetAg5iO2uN7eB8zE5qrZ3CSil+7IJt4WDFLcC75Ymywq1VZVD6qJ6EvNLjZ3rEm6gB7XW5JdT60c6MN35Z85Q==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "openharmony" - ] - }, - "node_modules/@rollup/rollup-win32-arm64-msvc": { - "version": "4.60.2", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.60.2.tgz", - "integrity": "sha512-NCYhOotpgWZ5kdxCZsv6Iudx0wX8980Q/oW4pNFNihpBKsDbEA1zpkfxJGC0yugsUuyDZ7gL37dbzwhR0VI7pQ==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ] - }, - "node_modules/@rollup/rollup-win32-ia32-msvc": { - "version": "4.60.2", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.60.2.tgz", - "integrity": "sha512-RXsaOqXxfoUBQoOgvmmijVxJnW2IGB0eoMO7F8FAjaj0UTywUO/luSqimWBJn04WNgUkeNhh7fs7pESXajWmkg==", - "cpu": [ - "ia32" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ] - }, - "node_modules/@rollup/rollup-win32-x64-gnu": { - "version": "4.60.2", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.60.2.tgz", - "integrity": "sha512-qdAzEULD+/hzObedtmV6iBpdL5TIbKVztGiK7O3/KYSf+HIzU257+MX1EXJcyIiDbMAqmbwaufcYPvyRryeZtA==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ] - }, - "node_modules/@rollup/rollup-win32-x64-msvc": { - "version": "4.60.2", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.60.2.tgz", - "integrity": "sha512-Nd/SgG27WoA9e+/TdK74KnHz852TLa94ovOYySo/yMPuTmpckK/jIF2jSwS3g7ELSKXK13/cVdmg1Z/DaCWKxA==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ] - }, - "node_modules/@types/body-parser": { - "version": "1.19.6", - "resolved": "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.6.tgz", - "integrity": "sha512-HLFeCYgz89uk22N5Qg3dvGvsv46B8GLvKKo1zKG4NybA8U2DiEO3w9lqGg29t/tfLRJpJ6iQxnVw4OnB7MoM9g==", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/connect": "*", - "@types/node": "*" - } - }, - "node_modules/@types/connect": { - "version": "3.4.38", - "resolved": "https://registry.npmjs.org/@types/connect/-/connect-3.4.38.tgz", - "integrity": "sha512-K6uROf1LD88uDQqJCktA4yzL1YYAK6NgfsI0v/mTgyPKWsX1CnJ0XPSDhViejru1GcRkLWb8RlzFYJRqGUbaug==", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/estree": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", - "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", - "dev": true, - "license": "MIT" - }, - "node_modules/@types/express": { - "version": "5.0.6", - "resolved": "https://registry.npmjs.org/@types/express/-/express-5.0.6.tgz", - "integrity": "sha512-sKYVuV7Sv9fbPIt/442koC7+IIwK5olP1KWeD88e/idgoJqDm3JV/YUiPwkoKK92ylff2MGxSz1CSjsXelx0YA==", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/body-parser": "*", - "@types/express-serve-static-core": "^5.0.0", - "@types/serve-static": "^2" - } - }, - "node_modules/@types/express-serve-static-core": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-5.1.1.tgz", - "integrity": "sha512-v4zIMr/cX7/d2BpAEX3KNKL/JrT1s43s96lLvvdTmza1oEvDudCqK9aF/djc/SWgy8Yh0h30TZx5VpzqFCxk5A==", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/node": "*", - "@types/qs": "*", - "@types/range-parser": "*", - "@types/send": "*" - } - }, - "node_modules/@types/http-errors": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/@types/http-errors/-/http-errors-2.0.5.tgz", - "integrity": "sha512-r8Tayk8HJnX0FztbZN7oVqGccWgw98T/0neJphO91KkmOzug1KkofZURD4UaD5uH8AqcFLfdPErnBod0u71/qg==", - "dev": true, - "license": "MIT" - }, - "node_modules/@types/node": { - "version": "20.19.39", - "resolved": "https://registry.npmjs.org/@types/node/-/node-20.19.39.tgz", - "integrity": "sha512-orrrD74MBUyK8jOAD/r0+lfa1I2MO6I+vAkmAWzMYbCcgrN4lCrmK52gRFQq/JRxfYPfonkr4b0jcY7Olqdqbw==", - "dev": true, - "license": "MIT", - "dependencies": { - "undici-types": "~6.21.0" - } - }, - "node_modules/@types/qs": { - "version": "6.15.0", - "resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.15.0.tgz", - "integrity": "sha512-JawvT8iBVWpzTrz3EGw9BTQFg3BQNmwERdKE22vlTxawwtbyUSlMppvZYKLZzB5zgACXdXxbD3m1bXaMqP/9ow==", - "dev": true, - "license": "MIT" - }, - "node_modules/@types/range-parser": { - "version": "1.2.7", - "resolved": "https://registry.npmjs.org/@types/range-parser/-/range-parser-1.2.7.tgz", - "integrity": "sha512-hKormJbkJqzQGhziax5PItDUTMAM9uE2XXQmM37dyd4hVM+5aVl7oVxMVUiVQn2oCQFN/LKCZdvSM0pFRqbSmQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/@types/send": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/@types/send/-/send-1.2.1.tgz", - "integrity": "sha512-arsCikDvlU99zl1g69TcAB3mzZPpxgw0UQnaHeC1Nwb015xp8bknZv5rIfri9xTOcMuaVgvabfIRA7PSZVuZIQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/serve-static": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/@types/serve-static/-/serve-static-2.2.0.tgz", - "integrity": "sha512-8mam4H1NHLtu7nmtalF7eyBH14QyOASmcxHhSfEoRyr0nP/YdoesEtU+uSRvMe96TW/HPTtkoKqQLl53N7UXMQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/http-errors": "*", - "@types/node": "*" - } - }, - "node_modules/@vitest/expect": { - "version": "2.1.9", - "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-2.1.9.tgz", - "integrity": "sha512-UJCIkTBenHeKT1TTlKMJWy1laZewsRIzYighyYiJKZreqtdxSos/S1t+ktRMQWu2CKqaarrkeszJx1cgC5tGZw==", - "dev": true, - "license": "MIT", - "dependencies": { - "@vitest/spy": "2.1.9", - "@vitest/utils": "2.1.9", - "chai": "^5.1.2", - "tinyrainbow": "^1.2.0" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, - "node_modules/@vitest/mocker": { - "version": "2.1.9", - "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-2.1.9.tgz", - "integrity": "sha512-tVL6uJgoUdi6icpxmdrn5YNo3g3Dxv+IHJBr0GXHaEdTcw3F+cPKnsXFhli6nO+f/6SDKPHEK1UN+k+TQv0Ehg==", - "dev": true, - "license": "MIT", - "dependencies": { - "@vitest/spy": "2.1.9", - "estree-walker": "^3.0.3", - "magic-string": "^0.30.12" - }, - "funding": { - "url": "https://opencollective.com/vitest" - }, - "peerDependencies": { - "msw": "^2.4.9", - "vite": "^5.0.0" - }, - "peerDependenciesMeta": { - "msw": { - "optional": true - }, - "vite": { - "optional": true - } - } - }, - "node_modules/@vitest/pretty-format": { - "version": "2.1.9", - "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-2.1.9.tgz", - "integrity": "sha512-KhRIdGV2U9HOUzxfiHmY8IFHTdqtOhIzCpd8WRdJiE7D/HUcZVD0EgQCVjm+Q9gkUXWgBvMmTtZgIG48wq7sOQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "tinyrainbow": "^1.2.0" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, - "node_modules/@vitest/runner": { - "version": "2.1.9", - "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-2.1.9.tgz", - "integrity": "sha512-ZXSSqTFIrzduD63btIfEyOmNcBmQvgOVsPNPe0jYtESiXkhd8u2erDLnMxmGrDCwHCCHE7hxwRDCT3pt0esT4g==", - "dev": true, - "license": "MIT", - "dependencies": { - "@vitest/utils": "2.1.9", - "pathe": "^1.1.2" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, - "node_modules/@vitest/snapshot": { - "version": "2.1.9", - "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-2.1.9.tgz", - "integrity": "sha512-oBO82rEjsxLNJincVhLhaxxZdEtV0EFHMK5Kmx5sJ6H9L183dHECjiefOAdnqpIgT5eZwT04PoggUnW88vOBNQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "@vitest/pretty-format": "2.1.9", - "magic-string": "^0.30.12", - "pathe": "^1.1.2" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, - "node_modules/@vitest/spy": { - "version": "2.1.9", - "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-2.1.9.tgz", - "integrity": "sha512-E1B35FwzXXTs9FHNK6bDszs7mtydNi5MIfUWpceJ8Xbfb1gBMscAnwLbEu+B44ed6W3XjL9/ehLPHR1fkf1KLQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "tinyspy": "^3.0.2" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, - "node_modules/@vitest/utils": { - "version": "2.1.9", - "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-2.1.9.tgz", - "integrity": "sha512-v0psaMSkNJ3A2NMrUEHFRzJtDPFn+/VWZ5WxImB21T9fjucJRmS7xCS3ppEnARb9y11OAzaD+P2Ps+b+BGX5iQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "@vitest/pretty-format": "2.1.9", - "loupe": "^3.1.2", - "tinyrainbow": "^1.2.0" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, - "node_modules/accepts": { - "version": "1.3.8", - "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", - "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==", - "license": "MIT", - "dependencies": { - "mime-types": "~2.1.34", - "negotiator": "0.6.3" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/array-flatten": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", - "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==", - "license": "MIT" - }, - "node_modules/assertion-error": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-2.0.1.tgz", - "integrity": "sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - } - }, - "node_modules/body-parser": { - "version": "1.20.4", - "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.4.tgz", - "integrity": "sha512-ZTgYYLMOXY9qKU/57FAo8F+HA2dGX7bqGc71txDRC1rS4frdFI5R7NhluHxH6M0YItAP0sHB4uqAOcYKxO6uGA==", - "license": "MIT", - "dependencies": { - "bytes": "~3.1.2", - "content-type": "~1.0.5", - "debug": "2.6.9", - "depd": "2.0.0", - "destroy": "~1.2.0", - "http-errors": "~2.0.1", - "iconv-lite": "~0.4.24", - "on-finished": "~2.4.1", - "qs": "~6.14.0", - "raw-body": "~2.5.3", - "type-is": "~1.6.18", - "unpipe": "~1.0.0" - }, - "engines": { - "node": ">= 0.8", - "npm": "1.2.8000 || >= 1.4.16" - } - }, - "node_modules/bytes": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", - "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/cac": { - "version": "6.7.14", - "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz", - "integrity": "sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/call-bind-apply-helpers": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", - "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", - "license": "MIT", - "dependencies": { - "es-errors": "^1.3.0", - "function-bind": "^1.1.2" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/call-bound": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz", - "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==", - "license": "MIT", - "dependencies": { - "call-bind-apply-helpers": "^1.0.2", - "get-intrinsic": "^1.3.0" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/chai": { - "version": "5.3.3", - "resolved": "https://registry.npmjs.org/chai/-/chai-5.3.3.tgz", - "integrity": "sha512-4zNhdJD/iOjSH0A05ea+Ke6MU5mmpQcbQsSOkgdaUMJ9zTlDTD/GYlwohmIE2u0gaxHYiVHEn1Fw9mZ/ktJWgw==", - "dev": true, - "license": "MIT", - "dependencies": { - "assertion-error": "^2.0.1", - "check-error": "^2.1.1", - "deep-eql": "^5.0.1", - "loupe": "^3.1.0", - "pathval": "^2.0.0" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/check-error": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/check-error/-/check-error-2.1.3.tgz", - "integrity": "sha512-PAJdDJusoxnwm1VwW07VWwUN1sl7smmC3OKggvndJFadxxDRyFJBX/ggnu/KE4kQAB7a3Dp8f/YXC1FlUprWmA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 16" - } - }, - "node_modules/content-disposition": { - "version": "0.5.4", - "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz", - "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==", - "license": "MIT", - "dependencies": { - "safe-buffer": "5.2.1" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/content-type": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", - "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/cookie": { - "version": "0.7.2", - "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.2.tgz", - "integrity": "sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/cookie-signature": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.7.tgz", - "integrity": "sha512-NXdYc3dLr47pBkpUCHtKSwIOQXLVn8dZEuywboCOJY/osA0wFSLlSawr3KN8qXJEyX66FcONTH8EIlVuK0yyFA==", - "license": "MIT" - }, - "node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "license": "MIT", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/deep-eql": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-5.0.2.tgz", - "integrity": "sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/depd": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", - "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/destroy": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz", - "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==", - "license": "MIT", - "engines": { - "node": ">= 0.8", - "npm": "1.2.8000 || >= 1.4.16" - } - }, - "node_modules/dunder-proto": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", - "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", - "license": "MIT", - "dependencies": { - "call-bind-apply-helpers": "^1.0.1", - "es-errors": "^1.3.0", - "gopd": "^1.2.0" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/ee-first": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", - "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==", - "license": "MIT" - }, - "node_modules/encodeurl": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz", - "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==", - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/es-define-property": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", - "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/es-errors": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", - "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/es-module-lexer": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.7.0.tgz", - "integrity": "sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==", - "dev": true, - "license": "MIT" - }, - "node_modules/es-object-atoms": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", - "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", - "license": "MIT", - "dependencies": { - "es-errors": "^1.3.0" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/esbuild": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.27.7.tgz", - "integrity": "sha512-IxpibTjyVnmrIQo5aqNpCgoACA/dTKLTlhMHihVHhdkxKyPO1uBBthumT0rdHmcsk9uMonIWS0m4FljWzILh3w==", - "dev": true, - "hasInstallScript": true, - "license": "MIT", - "bin": { - "esbuild": "bin/esbuild" - }, - "engines": { - "node": ">=18" - }, - "optionalDependencies": { - "@esbuild/aix-ppc64": "0.27.7", - "@esbuild/android-arm": "0.27.7", - "@esbuild/android-arm64": "0.27.7", - "@esbuild/android-x64": "0.27.7", - "@esbuild/darwin-arm64": "0.27.7", - "@esbuild/darwin-x64": "0.27.7", - "@esbuild/freebsd-arm64": "0.27.7", - "@esbuild/freebsd-x64": "0.27.7", - "@esbuild/linux-arm": "0.27.7", - "@esbuild/linux-arm64": "0.27.7", - "@esbuild/linux-ia32": "0.27.7", - "@esbuild/linux-loong64": "0.27.7", - "@esbuild/linux-mips64el": "0.27.7", - "@esbuild/linux-ppc64": "0.27.7", - "@esbuild/linux-riscv64": "0.27.7", - "@esbuild/linux-s390x": "0.27.7", - "@esbuild/linux-x64": "0.27.7", - "@esbuild/netbsd-arm64": "0.27.7", - "@esbuild/netbsd-x64": "0.27.7", - "@esbuild/openbsd-arm64": "0.27.7", - "@esbuild/openbsd-x64": "0.27.7", - "@esbuild/openharmony-arm64": "0.27.7", - "@esbuild/sunos-x64": "0.27.7", - "@esbuild/win32-arm64": "0.27.7", - "@esbuild/win32-ia32": "0.27.7", - "@esbuild/win32-x64": "0.27.7" - } - }, - "node_modules/escape-html": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", - "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==", - "license": "MIT" - }, - "node_modules/estree-walker": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", - "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/estree": "^1.0.0" - } - }, - "node_modules/etag": { - "version": "1.8.1", - "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", - "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/expect-type": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/expect-type/-/expect-type-1.3.0.tgz", - "integrity": "sha512-knvyeauYhqjOYvQ66MznSMs83wmHrCycNEN6Ao+2AeYEfxUIkuiVxdEa1qlGEPK+We3n0THiDciYSsCcgW/DoA==", - "dev": true, - "license": "Apache-2.0", - "engines": { - "node": ">=12.0.0" - } - }, - "node_modules/express": { - "version": "4.22.1", - "resolved": "https://registry.npmjs.org/express/-/express-4.22.1.tgz", - "integrity": "sha512-F2X8g9P1X7uCPZMA3MVf9wcTqlyNp7IhH5qPCI0izhaOIYXaW9L535tGA3qmjRzpH+bZczqq7hVKxTR4NWnu+g==", - "license": "MIT", - "dependencies": { - "accepts": "~1.3.8", - "array-flatten": "1.1.1", - "body-parser": "~1.20.3", - "content-disposition": "~0.5.4", - "content-type": "~1.0.4", - "cookie": "~0.7.1", - "cookie-signature": "~1.0.6", - "debug": "2.6.9", - "depd": "2.0.0", - "encodeurl": "~2.0.0", - "escape-html": "~1.0.3", - "etag": "~1.8.1", - "finalhandler": "~1.3.1", - "fresh": "~0.5.2", - "http-errors": "~2.0.0", - "merge-descriptors": "1.0.3", - "methods": "~1.1.2", - "on-finished": "~2.4.1", - "parseurl": "~1.3.3", - "path-to-regexp": "~0.1.12", - "proxy-addr": "~2.0.7", - "qs": "~6.14.0", - "range-parser": "~1.2.1", - "safe-buffer": "5.2.1", - "send": "~0.19.0", - "serve-static": "~1.16.2", - "setprototypeof": "1.2.0", - "statuses": "~2.0.1", - "type-is": "~1.6.18", - "utils-merge": "1.0.1", - "vary": "~1.1.2" - }, - "engines": { - "node": ">= 0.10.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/express" - } - }, - "node_modules/finalhandler": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.3.2.tgz", - "integrity": "sha512-aA4RyPcd3badbdABGDuTXCMTtOneUCAYH/gxoYRTZlIJdF0YPWuGqiAsIrhNnnqdXGswYk6dGujem4w80UJFhg==", - "license": "MIT", - "dependencies": { - "debug": "2.6.9", - "encodeurl": "~2.0.0", - "escape-html": "~1.0.3", - "on-finished": "~2.4.1", - "parseurl": "~1.3.3", - "statuses": "~2.0.2", - "unpipe": "~1.0.0" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/forwarded": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", - "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/fresh": { - "version": "0.5.2", - "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", - "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/fsevents": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", - "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", - "dev": true, - "hasInstallScript": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": "^8.16.0 || ^10.6.0 || >=11.0.0" - } - }, - "node_modules/function-bind": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", - "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", - "license": "MIT", - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/get-intrinsic": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", - "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", - "license": "MIT", - "dependencies": { - "call-bind-apply-helpers": "^1.0.2", - "es-define-property": "^1.0.1", - "es-errors": "^1.3.0", - "es-object-atoms": "^1.1.1", - "function-bind": "^1.1.2", - "get-proto": "^1.0.1", - "gopd": "^1.2.0", - "has-symbols": "^1.1.0", - "hasown": "^2.0.2", - "math-intrinsics": "^1.1.0" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/get-proto": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", - "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", - "license": "MIT", - "dependencies": { - "dunder-proto": "^1.0.1", - "es-object-atoms": "^1.0.0" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/get-tsconfig": { - "version": "4.14.0", - "resolved": "https://registry.npmjs.org/get-tsconfig/-/get-tsconfig-4.14.0.tgz", - "integrity": "sha512-yTb+8DXzDREzgvYmh6s9vHsSVCHeC0G3PI5bEXNBHtmshPnO+S5O7qgLEOn0I5QvMy6kpZN8K1NKGyilLb93wA==", - "dev": true, - "license": "MIT", - "dependencies": { - "resolve-pkg-maps": "^1.0.0" - }, - "funding": { - "url": "https://github.com/privatenumber/get-tsconfig?sponsor=1" - } - }, - "node_modules/gopd": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", - "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/has-symbols": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", - "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/hasown": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.3.tgz", - "integrity": "sha512-ej4AhfhfL2Q2zpMmLo7U1Uv9+PyhIZpgQLGT1F9miIGmiCJIoCgSmczFdrc97mWT4kVY72KA+WnnhJ5pghSvSg==", - "license": "MIT", - "dependencies": { - "function-bind": "^1.1.2" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/http-errors": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.1.tgz", - "integrity": "sha512-4FbRdAX+bSdmo4AUFuS0WNiPz8NgFt+r8ThgNWmlrjQjt1Q7ZR9+zTlce2859x4KSXrwIsaeTqDoKQmtP8pLmQ==", - "license": "MIT", - "dependencies": { - "depd": "~2.0.0", - "inherits": "~2.0.4", - "setprototypeof": "~1.2.0", - "statuses": "~2.0.2", - "toidentifier": "~1.0.1" - }, - "engines": { - "node": ">= 0.8" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/express" - } - }, - "node_modules/iconv-lite": { - "version": "0.4.24", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", - "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", - "license": "MIT", - "dependencies": { - "safer-buffer": ">= 2.1.2 < 3" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/inherits": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", - "license": "ISC" - }, - "node_modules/ipaddr.js": { - "version": "1.9.1", - "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", - "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", - "license": "MIT", - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/jose": { - "version": "5.10.0", - "resolved": "https://registry.npmjs.org/jose/-/jose-5.10.0.tgz", - "integrity": "sha512-s+3Al/p9g32Iq+oqXxkW//7jk2Vig6FF1CFqzVXoTUXt2qz89YWbL+OwS17NFYEvxC35n0FKeGO2LGYSxeM2Gg==", - "license": "MIT", - "funding": { - "url": "https://github.com/sponsors/panva" - } - }, - "node_modules/loupe": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/loupe/-/loupe-3.2.1.tgz", - "integrity": "sha512-CdzqowRJCeLU72bHvWqwRBBlLcMEtIvGrlvef74kMnV2AolS9Y8xUv1I0U/MNAWMhBlKIoyuEgoJ0t/bbwHbLQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/magic-string": { - "version": "0.30.21", - "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.21.tgz", - "integrity": "sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "@jridgewell/sourcemap-codec": "^1.5.5" - } - }, - "node_modules/math-intrinsics": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", - "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/media-typer": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", - "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/merge-descriptors": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.3.tgz", - "integrity": "sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==", - "license": "MIT", - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/methods": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", - "integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/mime": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", - "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", - "license": "MIT", - "bin": { - "mime": "cli.js" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/mime-db": { - "version": "1.52.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", - "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/mime-types": { - "version": "2.1.35", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", - "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", - "license": "MIT", - "dependencies": { - "mime-db": "1.52.0" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", - "license": "MIT" - }, - "node_modules/nanoid": { - "version": "3.3.11", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", - "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "license": "MIT", - "bin": { - "nanoid": "bin/nanoid.cjs" - }, - "engines": { - "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" - } - }, - "node_modules/negotiator": { - "version": "0.6.3", - "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", - "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/object-inspect": { - "version": "1.13.4", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz", - "integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/on-finished": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", - "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", - "license": "MIT", - "dependencies": { - "ee-first": "1.1.1" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/parseurl": { - "version": "1.3.3", - "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", - "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/path-to-regexp": { - "version": "0.1.13", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.13.tgz", - "integrity": "sha512-A/AGNMFN3c8bOlvV9RreMdrv7jsmF9XIfDeCd87+I8RNg6s78BhJxMu69NEMHBSJFxKidViTEdruRwEk/WIKqA==", - "license": "MIT" - }, - "node_modules/pathe": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/pathe/-/pathe-1.1.2.tgz", - "integrity": "sha512-whLdWMYL2TwI08hn8/ZqAbrVemu0LNaNNJZX73O6qaIdCTfXutsLhMkjdENX0qhsQ9uIimo4/aQOmXkoon2nDQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/pathval": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/pathval/-/pathval-2.0.1.tgz", - "integrity": "sha512-//nshmD55c46FuFw26xV/xFAaB5HF9Xdap7HJBBnrKdAd6/GxDBaNA1870O79+9ueg61cZLSVc+OaFlfmObYVQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 14.16" - } - }, - "node_modules/picocolors": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", - "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", - "dev": true, - "license": "ISC" - }, - "node_modules/postcss": { - "version": "8.5.10", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.10.tgz", - "integrity": "sha512-pMMHxBOZKFU6HgAZ4eyGnwXF/EvPGGqUr0MnZ5+99485wwW41kW91A4LOGxSHhgugZmSChL5AlElNdwlNgcnLQ==", - "dev": true, - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/postcss" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "license": "MIT", - "dependencies": { - "nanoid": "^3.3.11", - "picocolors": "^1.1.1", - "source-map-js": "^1.2.1" - }, - "engines": { - "node": "^10 || ^12 || >=14" - } - }, - "node_modules/proxy-addr": { - "version": "2.0.7", - "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", - "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==", - "license": "MIT", - "dependencies": { - "forwarded": "0.2.0", - "ipaddr.js": "1.9.1" - }, - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/qs": { - "version": "6.14.2", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.14.2.tgz", - "integrity": "sha512-V/yCWTTF7VJ9hIh18Ugr2zhJMP01MY7c5kh4J870L7imm6/DIzBsNLTXzMwUA3yZ5b/KBqLx8Kp3uRvd7xSe3Q==", - "license": "BSD-3-Clause", - "dependencies": { - "side-channel": "^1.1.0" - }, - "engines": { - "node": ">=0.6" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/range-parser": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", - "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/raw-body": { - "version": "2.5.3", - "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.3.tgz", - "integrity": "sha512-s4VSOf6yN0rvbRZGxs8Om5CWj6seneMwK3oDb4lWDH0UPhWcxwOWw5+qk24bxq87szX1ydrwylIOp2uG1ojUpA==", - "license": "MIT", - "dependencies": { - "bytes": "~3.1.2", - "http-errors": "~2.0.1", - "iconv-lite": "~0.4.24", - "unpipe": "~1.0.0" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/resolve-pkg-maps": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/resolve-pkg-maps/-/resolve-pkg-maps-1.0.0.tgz", - "integrity": "sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==", - "dev": true, - "license": "MIT", - "funding": { - "url": "https://github.com/privatenumber/resolve-pkg-maps?sponsor=1" - } - }, - "node_modules/rollup": { - "version": "4.60.2", - "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.60.2.tgz", - "integrity": "sha512-J9qZyW++QK/09NyN/zeO0dG/1GdGfyp9lV8ajHnRVLfo/uFsbji5mHnDgn/qYdUHyCkM2N+8VyspgZclfAh0eQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/estree": "1.0.8" - }, - "bin": { - "rollup": "dist/bin/rollup" - }, - "engines": { - "node": ">=18.0.0", - "npm": ">=8.0.0" - }, - "optionalDependencies": { - "@rollup/rollup-android-arm-eabi": "4.60.2", - "@rollup/rollup-android-arm64": "4.60.2", - "@rollup/rollup-darwin-arm64": "4.60.2", - "@rollup/rollup-darwin-x64": "4.60.2", - "@rollup/rollup-freebsd-arm64": "4.60.2", - "@rollup/rollup-freebsd-x64": "4.60.2", - "@rollup/rollup-linux-arm-gnueabihf": "4.60.2", - "@rollup/rollup-linux-arm-musleabihf": "4.60.2", - "@rollup/rollup-linux-arm64-gnu": "4.60.2", - "@rollup/rollup-linux-arm64-musl": "4.60.2", - "@rollup/rollup-linux-loong64-gnu": "4.60.2", - "@rollup/rollup-linux-loong64-musl": "4.60.2", - "@rollup/rollup-linux-ppc64-gnu": "4.60.2", - "@rollup/rollup-linux-ppc64-musl": "4.60.2", - "@rollup/rollup-linux-riscv64-gnu": "4.60.2", - "@rollup/rollup-linux-riscv64-musl": "4.60.2", - "@rollup/rollup-linux-s390x-gnu": "4.60.2", - "@rollup/rollup-linux-x64-gnu": "4.60.2", - "@rollup/rollup-linux-x64-musl": "4.60.2", - "@rollup/rollup-openbsd-x64": "4.60.2", - "@rollup/rollup-openharmony-arm64": "4.60.2", - "@rollup/rollup-win32-arm64-msvc": "4.60.2", - "@rollup/rollup-win32-ia32-msvc": "4.60.2", - "@rollup/rollup-win32-x64-gnu": "4.60.2", - "@rollup/rollup-win32-x64-msvc": "4.60.2", - "fsevents": "~2.3.2" - } - }, - "node_modules/safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT" - }, - "node_modules/safer-buffer": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", - "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", - "license": "MIT" - }, - "node_modules/send": { - "version": "0.19.2", - "resolved": "https://registry.npmjs.org/send/-/send-0.19.2.tgz", - "integrity": "sha512-VMbMxbDeehAxpOtWJXlcUS5E8iXh6QmN+BkRX1GARS3wRaXEEgzCcB10gTQazO42tpNIya8xIyNx8fll1OFPrg==", - "license": "MIT", - "dependencies": { - "debug": "2.6.9", - "depd": "2.0.0", - "destroy": "1.2.0", - "encodeurl": "~2.0.0", - "escape-html": "~1.0.3", - "etag": "~1.8.1", - "fresh": "~0.5.2", - "http-errors": "~2.0.1", - "mime": "1.6.0", - "ms": "2.1.3", - "on-finished": "~2.4.1", - "range-parser": "~1.2.1", - "statuses": "~2.0.2" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/send/node_modules/ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", - "license": "MIT" - }, - "node_modules/serve-static": { - "version": "1.16.3", - "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.16.3.tgz", - "integrity": "sha512-x0RTqQel6g5SY7Lg6ZreMmsOzncHFU7nhnRWkKgWuMTu5NN0DR5oruckMqRvacAN9d5w6ARnRBXl9xhDCgfMeA==", - "license": "MIT", - "dependencies": { - "encodeurl": "~2.0.0", - "escape-html": "~1.0.3", - "parseurl": "~1.3.3", - "send": "~0.19.1" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/setprototypeof": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", - "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==", - "license": "ISC" - }, - "node_modules/side-channel": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz", - "integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==", - "license": "MIT", - "dependencies": { - "es-errors": "^1.3.0", - "object-inspect": "^1.13.3", - "side-channel-list": "^1.0.0", - "side-channel-map": "^1.0.1", - "side-channel-weakmap": "^1.0.2" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/side-channel-list": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.1.tgz", - "integrity": "sha512-mjn/0bi/oUURjc5Xl7IaWi/OJJJumuoJFQJfDDyO46+hBWsfaVM65TBHq2eoZBhzl9EchxOijpkbRC8SVBQU0w==", - "license": "MIT", - "dependencies": { - "es-errors": "^1.3.0", - "object-inspect": "^1.13.4" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/side-channel-map": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz", - "integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==", - "license": "MIT", - "dependencies": { - "call-bound": "^1.0.2", - "es-errors": "^1.3.0", - "get-intrinsic": "^1.2.5", - "object-inspect": "^1.13.3" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/side-channel-weakmap": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz", - "integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==", - "license": "MIT", - "dependencies": { - "call-bound": "^1.0.2", - "es-errors": "^1.3.0", - "get-intrinsic": "^1.2.5", - "object-inspect": "^1.13.3", - "side-channel-map": "^1.0.1" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/siginfo": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/siginfo/-/siginfo-2.0.0.tgz", - "integrity": "sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==", - "dev": true, - "license": "ISC" - }, - "node_modules/source-map-js": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", - "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", - "dev": true, - "license": "BSD-3-Clause", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/stackback": { - "version": "0.0.2", - "resolved": "https://registry.npmjs.org/stackback/-/stackback-0.0.2.tgz", - "integrity": "sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==", - "dev": true, - "license": "MIT" - }, - "node_modules/statuses": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.2.tgz", - "integrity": "sha512-DvEy55V3DB7uknRo+4iOGT5fP1slR8wQohVdknigZPMpMstaKJQWhwiYBACJE3Ul2pTnATihhBYnRhZQHGBiRw==", - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/std-env": { - "version": "3.10.0", - "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.10.0.tgz", - "integrity": "sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg==", - "dev": true, - "license": "MIT" - }, - "node_modules/tinybench": { - "version": "2.9.0", - "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.9.0.tgz", - "integrity": "sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==", - "dev": true, - "license": "MIT" - }, - "node_modules/tinyexec": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-0.3.2.tgz", - "integrity": "sha512-KQQR9yN7R5+OSwaK0XQoj22pwHoTlgYqmUscPYoknOoWCWfj/5/ABTMRi69FrKU5ffPVh5QcFikpWJI/P1ocHA==", - "dev": true, - "license": "MIT" - }, - "node_modules/tinypool": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/tinypool/-/tinypool-1.1.1.tgz", - "integrity": "sha512-Zba82s87IFq9A9XmjiX5uZA/ARWDrB03OHlq+Vw1fSdt0I+4/Kutwy8BP4Y/y/aORMo61FQ0vIb5j44vSo5Pkg==", - "dev": true, - "license": "MIT", - "engines": { - "node": "^18.0.0 || >=20.0.0" - } - }, - "node_modules/tinyrainbow": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-1.2.0.tgz", - "integrity": "sha512-weEDEq7Z5eTHPDh4xjX789+fHfF+P8boiFB+0vbWzpbnbsEr/GRaohi/uMKxg8RZMXnl1ItAi/IUHWMsjDV7kQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=14.0.0" - } - }, - "node_modules/tinyspy": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/tinyspy/-/tinyspy-3.0.2.tgz", - "integrity": "sha512-n1cw8k1k0x4pgA2+9XrOkFydTerNcJ1zWCO5Nn9scWHTD+5tp8dghT2x1uduQePZTZgd3Tupf+x9BxJjeJi77Q==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=14.0.0" - } - }, - "node_modules/toidentifier": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", - "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==", - "license": "MIT", - "engines": { - "node": ">=0.6" - } - }, - "node_modules/tsx": { - "version": "4.21.0", - "resolved": "https://registry.npmjs.org/tsx/-/tsx-4.21.0.tgz", - "integrity": "sha512-5C1sg4USs1lfG0GFb2RLXsdpXqBSEhAaA/0kPL01wxzpMqLILNxIxIOKiILz+cdg/pLnOUxFYOR5yhHU666wbw==", - "dev": true, - "license": "MIT", - "dependencies": { - "esbuild": "~0.27.0", - "get-tsconfig": "^4.7.5" - }, - "bin": { - "tsx": "dist/cli.mjs" - }, - "engines": { - "node": ">=18.0.0" - }, - "optionalDependencies": { - "fsevents": "~2.3.3" - } - }, - "node_modules/type-is": { - "version": "1.6.18", - "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", - "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", - "license": "MIT", - "dependencies": { - "media-typer": "0.3.0", - "mime-types": "~2.1.24" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/typescript": { - "version": "5.9.3", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", - "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", - "dev": true, - "license": "Apache-2.0", - "bin": { - "tsc": "bin/tsc", - "tsserver": "bin/tsserver" - }, - "engines": { - "node": ">=14.17" - } - }, - "node_modules/undici-types": { - "version": "6.21.0", - "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", - "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/unpipe": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", - "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==", - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/utils-merge": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", - "integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==", - "license": "MIT", - "engines": { - "node": ">= 0.4.0" - } - }, - "node_modules/vary": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", - "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==", - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/vite": { - "version": "5.4.21", - "resolved": "https://registry.npmjs.org/vite/-/vite-5.4.21.tgz", - "integrity": "sha512-o5a9xKjbtuhY6Bi5S3+HvbRERmouabWbyUcpXXUA1u+GNUKoROi9byOJ8M0nHbHYHkYICiMlqxkg1KkYmm25Sw==", - "dev": true, - "license": "MIT", - "dependencies": { - "esbuild": "^0.21.3", - "postcss": "^8.4.43", - "rollup": "^4.20.0" - }, - "bin": { - "vite": "bin/vite.js" - }, - "engines": { - "node": "^18.0.0 || >=20.0.0" - }, - "funding": { - "url": "https://github.com/vitejs/vite?sponsor=1" - }, - "optionalDependencies": { - "fsevents": "~2.3.3" - }, - "peerDependencies": { - "@types/node": "^18.0.0 || >=20.0.0", - "less": "*", - "lightningcss": "^1.21.0", - "sass": "*", - "sass-embedded": "*", - "stylus": "*", - "sugarss": "*", - "terser": "^5.4.0" - }, - "peerDependenciesMeta": { - "@types/node": { - "optional": true - }, - "less": { - "optional": true - }, - "lightningcss": { - "optional": true - }, - "sass": { - "optional": true - }, - "sass-embedded": { - "optional": true - }, - "stylus": { - "optional": true - }, - "sugarss": { - "optional": true - }, - "terser": { - "optional": true - } - } - }, - "node_modules/vite-node": { - "version": "2.1.9", - "resolved": "https://registry.npmjs.org/vite-node/-/vite-node-2.1.9.tgz", - "integrity": "sha512-AM9aQ/IPrW/6ENLQg3AGY4K1N2TGZdR5e4gu/MmmR2xR3Ll1+dib+nook92g4TV3PXVyeyxdWwtaCAiUL0hMxA==", - "dev": true, - "license": "MIT", - "dependencies": { - "cac": "^6.7.14", - "debug": "^4.3.7", - "es-module-lexer": "^1.5.4", - "pathe": "^1.1.2", - "vite": "^5.0.0" - }, - "bin": { - "vite-node": "vite-node.mjs" - }, - "engines": { - "node": "^18.0.0 || >=20.0.0" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, - "node_modules/vite-node/node_modules/debug": { - "version": "4.4.3", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", - "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", - "dev": true, - "license": "MIT", - "dependencies": { - "ms": "^2.1.3" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/vite-node/node_modules/ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", - "dev": true, - "license": "MIT" - }, - "node_modules/vite/node_modules/@esbuild/aix-ppc64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.21.5.tgz", - "integrity": "sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ==", - "cpu": [ - "ppc64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "aix" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/android-arm": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.21.5.tgz", - "integrity": "sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg==", - "cpu": [ - "arm" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/android-arm64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.21.5.tgz", - "integrity": "sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/android-x64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.21.5.tgz", - "integrity": "sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/darwin-arm64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.21.5.tgz", - "integrity": "sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/darwin-x64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.21.5.tgz", - "integrity": "sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/freebsd-arm64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.21.5.tgz", - "integrity": "sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "freebsd" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/freebsd-x64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.21.5.tgz", - "integrity": "sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "freebsd" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/linux-arm": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.21.5.tgz", - "integrity": "sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA==", - "cpu": [ - "arm" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/linux-arm64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.21.5.tgz", - "integrity": "sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/linux-ia32": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.21.5.tgz", - "integrity": "sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg==", - "cpu": [ - "ia32" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/linux-loong64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.21.5.tgz", - "integrity": "sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg==", - "cpu": [ - "loong64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/linux-mips64el": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.21.5.tgz", - "integrity": "sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg==", - "cpu": [ - "mips64el" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/linux-ppc64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.21.5.tgz", - "integrity": "sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w==", - "cpu": [ - "ppc64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/linux-riscv64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.21.5.tgz", - "integrity": "sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA==", - "cpu": [ - "riscv64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/linux-s390x": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.21.5.tgz", - "integrity": "sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A==", - "cpu": [ - "s390x" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/linux-x64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.21.5.tgz", - "integrity": "sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/netbsd-x64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.21.5.tgz", - "integrity": "sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "netbsd" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/openbsd-x64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.21.5.tgz", - "integrity": "sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "openbsd" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/sunos-x64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.21.5.tgz", - "integrity": "sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "sunos" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/win32-arm64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.21.5.tgz", - "integrity": "sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/win32-ia32": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.21.5.tgz", - "integrity": "sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA==", - "cpu": [ - "ia32" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/win32-x64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.21.5.tgz", - "integrity": "sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/esbuild": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.21.5.tgz", - "integrity": "sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw==", - "dev": true, - "hasInstallScript": true, - "license": "MIT", - "bin": { - "esbuild": "bin/esbuild" - }, - "engines": { - "node": ">=12" - }, - "optionalDependencies": { - "@esbuild/aix-ppc64": "0.21.5", - "@esbuild/android-arm": "0.21.5", - "@esbuild/android-arm64": "0.21.5", - "@esbuild/android-x64": "0.21.5", - "@esbuild/darwin-arm64": "0.21.5", - "@esbuild/darwin-x64": "0.21.5", - "@esbuild/freebsd-arm64": "0.21.5", - "@esbuild/freebsd-x64": "0.21.5", - "@esbuild/linux-arm": "0.21.5", - "@esbuild/linux-arm64": "0.21.5", - "@esbuild/linux-ia32": "0.21.5", - "@esbuild/linux-loong64": "0.21.5", - "@esbuild/linux-mips64el": "0.21.5", - "@esbuild/linux-ppc64": "0.21.5", - "@esbuild/linux-riscv64": "0.21.5", - "@esbuild/linux-s390x": "0.21.5", - "@esbuild/linux-x64": "0.21.5", - "@esbuild/netbsd-x64": "0.21.5", - "@esbuild/openbsd-x64": "0.21.5", - "@esbuild/sunos-x64": "0.21.5", - "@esbuild/win32-arm64": "0.21.5", - "@esbuild/win32-ia32": "0.21.5", - "@esbuild/win32-x64": "0.21.5" - } - }, - "node_modules/vitest": { - "version": "2.1.9", - "resolved": "https://registry.npmjs.org/vitest/-/vitest-2.1.9.tgz", - "integrity": "sha512-MSmPM9REYqDGBI8439mA4mWhV5sKmDlBKWIYbA3lRb2PTHACE0mgKwA8yQ2xq9vxDTuk4iPrECBAEW2aoFXY0Q==", - "dev": true, - "license": "MIT", - "dependencies": { - "@vitest/expect": "2.1.9", - "@vitest/mocker": "2.1.9", - "@vitest/pretty-format": "^2.1.9", - "@vitest/runner": "2.1.9", - "@vitest/snapshot": "2.1.9", - "@vitest/spy": "2.1.9", - "@vitest/utils": "2.1.9", - "chai": "^5.1.2", - "debug": "^4.3.7", - "expect-type": "^1.1.0", - "magic-string": "^0.30.12", - "pathe": "^1.1.2", - "std-env": "^3.8.0", - "tinybench": "^2.9.0", - "tinyexec": "^0.3.1", - "tinypool": "^1.0.1", - "tinyrainbow": "^1.2.0", - "vite": "^5.0.0", - "vite-node": "2.1.9", - "why-is-node-running": "^2.3.0" - }, - "bin": { - "vitest": "vitest.mjs" - }, - "engines": { - "node": "^18.0.0 || >=20.0.0" - }, - "funding": { - "url": "https://opencollective.com/vitest" - }, - "peerDependencies": { - "@edge-runtime/vm": "*", - "@types/node": "^18.0.0 || >=20.0.0", - "@vitest/browser": "2.1.9", - "@vitest/ui": "2.1.9", - "happy-dom": "*", - "jsdom": "*" - }, - "peerDependenciesMeta": { - "@edge-runtime/vm": { - "optional": true - }, - "@types/node": { - "optional": true - }, - "@vitest/browser": { - "optional": true - }, - "@vitest/ui": { - "optional": true - }, - "happy-dom": { - "optional": true - }, - "jsdom": { - "optional": true - } - } - }, - "node_modules/vitest/node_modules/debug": { - "version": "4.4.3", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", - "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", - "dev": true, - "license": "MIT", - "dependencies": { - "ms": "^2.1.3" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/vitest/node_modules/ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", - "dev": true, - "license": "MIT" - }, - "node_modules/why-is-node-running": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.3.0.tgz", - "integrity": "sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==", - "dev": true, - "license": "MIT", - "dependencies": { - "siginfo": "^2.0.0", - "stackback": "0.0.2" - }, - "bin": { - "why-is-node-running": "cli.js" - }, - "engines": { - "node": ">=8" - } - } - } -} diff --git a/services/oidc-stub/package.json b/services/oidc-stub/package.json deleted file mode 100644 index 5aeb127..0000000 --- a/services/oidc-stub/package.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "name": "agentkeys-oidc-stub", - "version": "0.1.0", - "private": true, - "type": "module", - "scripts": { - "start": "tsx src/server.ts", - "test": "vitest run", - "test:watch": "vitest", - "typecheck": "tsc --noEmit" - }, - "dependencies": { - "express": "^4.21.0", - "jose": "^5.9.0" - }, - "devDependencies": { - "@types/express": "^5.0.0", - "@types/node": "^20.0.0", - "tsx": "^4.19.0", - "typescript": "^5.5.0", - "vitest": "^2.1.0" - } -} diff --git a/services/oidc-stub/src/keys.ts b/services/oidc-stub/src/keys.ts deleted file mode 100644 index 801d53f..0000000 --- a/services/oidc-stub/src/keys.ts +++ /dev/null @@ -1,108 +0,0 @@ -import { generateKeyPair, exportJWK, importJWK, type KeyLike, type JWK } from "jose"; -import { readFile, writeFile, mkdir, chmod } from "node:fs/promises"; -import { existsSync } from "node:fs"; -import { homedir } from "node:os"; -import { join } from "node:path"; - -const KEYPAIR_DIR = join(homedir(), ".agentkeys", "oidc-stub"); -const KEYPAIR_PATH = join(KEYPAIR_DIR, "keypair.json"); - -export interface LoadedKeypair { - privateKey: KeyLike; - publicKey: KeyLike; - publicJwk: JWK; - kid: string; -} - -interface PersistedKeypair { - kid: string; - privateJwk: JWK; - publicJwk: JWK; -} - -async function generateAndPersistKeypair(): Promise { - const { privateKey, publicKey } = await generateKeyPair("ES256", { - extractable: true, - }); - - const privateJwk = await exportJWK(privateKey); - const publicJwk = await exportJWK(publicKey); - const kid = `v1-${Date.now()}`; - privateJwk.kid = kid; - publicJwk.kid = kid; - publicJwk.alg = "ES256"; - publicJwk.use = "sig"; - - const persisted: PersistedKeypair = { kid, privateJwk, publicJwk }; - - await mkdir(KEYPAIR_DIR, { recursive: true }); - await writeFile(KEYPAIR_PATH, JSON.stringify(persisted, null, 2), { - mode: 0o600, - }); - await chmod(KEYPAIR_PATH, 0o600); - - console.log(`[oidc-stub] Generated new ES256 keypair (kid=${kid}), cached at ${KEYPAIR_PATH}`); - - return { - privateKey, - publicKey, - publicJwk, - kid, - }; -} - -async function loadPersistedKeypair(): Promise { - const raw = await readFile(KEYPAIR_PATH, "utf-8"); - const persisted: PersistedKeypair = JSON.parse(raw); - - const privateKey = (await importJWK(persisted.privateJwk, "ES256")) as KeyLike; - const publicKey = (await importJWK(persisted.publicJwk, "ES256")) as KeyLike; - - console.log(`[oidc-stub] Loaded persisted ES256 keypair (kid=${persisted.kid}) from ${KEYPAIR_PATH}`); - - return { - privateKey, - publicKey, - publicJwk: persisted.publicJwk, - kid: persisted.kid, - }; -} - -/** - * Load the ES256 keypair for this stub instance. - * - * Dev path: generates a fresh P-256 keypair at startup, caches it to - * ~/.agentkeys/oidc-stub/keypair.json (mode 0600) for persistence across restarts. - * - * Prod placeholder (TODO): when AGENTKEYS_OIDC_KMS_KEY_ID is set, delegate signing - * to AWS KMS using the AsymmetricSign API. See the TODO block below. This stub - * intentionally does NOT implement KMS signing — the TEE-derived key path - * (oidc/issuer/v1) described in wiki/oidc-federation.md §Architecture replaces - * both this dev keypair and the KMS placeholder in Stage 6 production. - * - * SECURITY NOTICE: See README.md — this is a TEE-INTERIM STUB only. - */ -export async function loadKeypair(): Promise { - const kmsKeyId = process.env["AGENTKEYS_OIDC_KMS_KEY_ID"]; - if (kmsKeyId) { - // TODO: Production Stage 6 — use AWS KMS AsymmetricSign with the key referenced - // by AGENTKEYS_OIDC_KMS_KEY_ID. The KMS key must be an ECC_NIST_P256 key with - // SIGN_VERIFY usage. Signing via KMS: call kms.sign({ KeyId, Message, MessageType, - // SigningAlgorithm: "ECDSA_SHA_256" }) and assemble the JWT manually. Public key - // can be fetched once via kms.getPublicKey({ KeyId }) and cached. - // - // IMPORTANT: Production Stage 6 replaces this entire stub with a TEE-derived - // oidc/issuer/v1 key per wiki/oidc-federation.md §Architecture. Do NOT treat - // KMS as the final architecture — it is only a stepping stone. - throw new Error( - `[oidc-stub] AGENTKEYS_OIDC_KMS_KEY_ID is set (${kmsKeyId}) but KMS signing is not ` + - `implemented in this stub. This path is reserved for the production Stage 6 TEE signer. ` + - `Unset the env var to use the local dev keypair.` - ); - } - - if (existsSync(KEYPAIR_PATH)) { - return loadPersistedKeypair(); - } - return generateAndPersistKeypair(); -} diff --git a/services/oidc-stub/src/server.ts b/services/oidc-stub/src/server.ts deleted file mode 100644 index 86b9bff..0000000 --- a/services/oidc-stub/src/server.ts +++ /dev/null @@ -1,93 +0,0 @@ -import express, { type Request, type Response } from "express"; -import { SignJWT } from "jose"; -import { loadKeypair, type LoadedKeypair } from "./keys.js"; - -const ISSUER = process.env["OIDC_STUB_ISSUER"] ?? "https://oidc.agentkeys.dev"; -const PORT = parseInt(process.env["OIDC_STUB_PORT"] ?? "34568", 10); - -export function buildApp(keypair: LoadedKeypair): express.Application { - const app = express(); - app.use(express.json()); - - app.get("/.well-known/openid-configuration", (_req: Request, res: Response) => { - res.json({ - issuer: ISSUER, - jwks_uri: `${ISSUER}/.well-known/jwks.json`, - response_types_supported: ["id_token"], - subject_types_supported: ["public"], - id_token_signing_alg_values_supported: ["ES256"], - scopes_supported: ["openid"], - token_endpoint_auth_methods_supported: ["none"], - claims_supported: [ - "iss", - "sub", - "aud", - "iat", - "exp", - "nbf", - "agentkeys_attested_at", - "agentkeys_enclave_tier", - "agentkeys_child_wallet", - "agentkeys_grant_id", - "agentkeys_operation", - "agentkeys_user_wallet", - ], - }); - }); - - app.get("/.well-known/jwks.json", (_req: Request, res: Response) => { - res.json({ - keys: [keypair.publicJwk], - }); - }); - - app.post("/internal/sign", async (req: Request, res: Response) => { - const claims = req.body as Record; - if (!claims || typeof claims !== "object") { - res.status(400).json({ error: "Request body must be a JSON object of claims" }); - return; - } - - const nowSec = Math.floor(Date.now() / 1000); - const expSec = typeof claims["exp"] === "number" ? claims["exp"] : nowSec + 300; - - const jwtBuilder = new SignJWT({ ...claims }) - .setProtectedHeader({ alg: "ES256", kid: keypair.kid }) - .setIssuedAt(nowSec) - .setExpirationTime(expSec) - .setIssuer((claims["iss"] as string | undefined) ?? ISSUER); - - if (claims["sub"] !== undefined) { - jwtBuilder.setSubject(claims["sub"] as string); - } - if (claims["aud"] !== undefined) { - jwtBuilder.setAudience(claims["aud"] as string | string[]); - } - if (claims["nbf"] !== undefined) { - jwtBuilder.setNotBefore(claims["nbf"] as number); - } - - const jwt = await jwtBuilder.sign(keypair.privateKey); - res.json({ jwt }); - }); - - return app; -} - -async function main(): Promise { - const keypair = await loadKeypair(); - const app = buildApp(keypair); - - app.listen(PORT, () => { - console.log(`[oidc-stub] Listening on http://localhost:${PORT}`); - console.log(`[oidc-stub] Discovery: http://localhost:${PORT}/.well-known/openid-configuration`); - console.log(`[oidc-stub] JWKS: http://localhost:${PORT}/.well-known/jwks.json`); - console.log(`[oidc-stub] Sign: POST http://localhost:${PORT}/internal/sign`); - console.log(`[oidc-stub] Issuer: ${ISSUER}`); - }); -} - -main().catch((err) => { - console.error("[oidc-stub] Fatal startup error:", err); - process.exit(1); -}); diff --git a/services/oidc-stub/tests/server.test.ts b/services/oidc-stub/tests/server.test.ts deleted file mode 100644 index e4888bd..0000000 --- a/services/oidc-stub/tests/server.test.ts +++ /dev/null @@ -1,170 +0,0 @@ -import { describe, it, expect, beforeAll } from "vitest"; -import { generateKeyPair, exportJWK, jwtVerify, createRemoteJWKSet, importJWK, type KeyLike, type JWK } from "jose"; -import express from "express"; -import type { Server } from "node:http"; -import { buildApp } from "../src/server.js"; -import type { LoadedKeypair } from "../src/keys.js"; - -let server: Server; -let baseUrl: string; -let keypair: LoadedKeypair; - -beforeAll(async () => { - const { privateKey, publicKey } = await generateKeyPair("ES256", { extractable: true }); - const publicJwk = await exportJWK(publicKey); - publicJwk.kid = "test-v1"; - publicJwk.alg = "ES256"; - publicJwk.use = "sig"; - - keypair = { - privateKey: privateKey as KeyLike, - publicKey: publicKey as KeyLike, - publicJwk, - kid: "test-v1", - }; - - const app = buildApp(keypair); - - await new Promise((resolve) => { - server = app.listen(0, () => { - const addr = server.address(); - const port = typeof addr === "object" && addr ? addr.port : 0; - baseUrl = `http://localhost:${port}`; - resolve(); - }); - }); - - return () => { - server.close(); - }; -}); - -describe("GET /.well-known/openid-configuration", () => { - it("returns 200 with valid JSON", async () => { - const response = await fetch(`${baseUrl}/.well-known/openid-configuration`); - expect(response.status).toBe(200); - expect(response.headers.get("content-type")).toMatch(/application\/json/); - }); - - it("contains all required OIDC fields", async () => { - const response = await fetch(`${baseUrl}/.well-known/openid-configuration`); - const doc = (await response.json()) as Record; - - expect(typeof doc["issuer"]).toBe("string"); - expect(typeof doc["jwks_uri"]).toBe("string"); - expect(doc["id_token_signing_alg_values_supported"]).toEqual(["ES256"]); - expect(doc["response_types_supported"]).toContain("id_token"); - expect(doc["subject_types_supported"]).toContain("public"); - }); - - it("jwks_uri points to the jwks endpoint", async () => { - const response = await fetch(`${baseUrl}/.well-known/openid-configuration`); - const doc = (await response.json()) as Record; - - expect(typeof doc["jwks_uri"]).toBe("string"); - expect((doc["jwks_uri"] as string).endsWith("/.well-known/jwks.json")).toBe(true); - }); -}); - -describe("GET /.well-known/jwks.json", () => { - it("returns 200 with valid JSON", async () => { - const response = await fetch(`${baseUrl}/.well-known/jwks.json`); - expect(response.status).toBe(200); - expect(response.headers.get("content-type")).toMatch(/application\/json/); - }); - - it("contains exactly one ES256 JWK", async () => { - const response = await fetch(`${baseUrl}/.well-known/jwks.json`); - const jwks = (await response.json()) as Record; - - expect(Array.isArray(jwks["keys"])).toBe(true); - const keys = jwks["keys"] as Record[]; - expect(keys).toHaveLength(1); - - const key = keys[0]; - expect(key["kty"]).toBe("EC"); - expect(key["crv"]).toBe("P-256"); - expect(key["alg"]).toBe("ES256"); - expect(key["use"]).toBe("sig"); - expect(typeof key["x"]).toBe("string"); - expect(typeof key["y"]).toBe("string"); - expect(key["d"]).toBeUndefined(); - }); -}); - -describe("POST /internal/sign", () => { - it("returns 400 when body is missing", async () => { - const response = await fetch(`${baseUrl}/internal/sign`, { - method: "POST", - headers: { "content-type": "application/json" }, - body: "null", - }); - expect(response.status).toBe(400); - }); - - it("produces a JWT that verifies against the JWKS endpoint", async () => { - const claims = { - sub: "enclave:mrenclave123:mrsigner456:agent:0xabc", - aud: "sts.amazonaws.com", - agentkeys_operation: "ses.send", - agentkeys_enclave_tier: "dev", - }; - - const signResponse = await fetch(`${baseUrl}/internal/sign`, { - method: "POST", - headers: { "content-type": "application/json" }, - body: JSON.stringify(claims), - }); - expect(signResponse.status).toBe(200); - - const body = (await signResponse.json()) as { jwt: string }; - expect(typeof body["jwt"]).toBe("string"); - - const jwksUri = new URL(`${baseUrl}/.well-known/jwks.json`); - const remoteJwks = createRemoteJWKSet(jwksUri); - - const { payload } = await jwtVerify(body.jwt, remoteJwks, { - audience: "sts.amazonaws.com", - }); - - expect(payload["sub"]).toBe(claims.sub); - expect(payload["agentkeys_operation"]).toBe("ses.send"); - expect(payload["agentkeys_enclave_tier"]).toBe("dev"); - expect(typeof payload["iat"]).toBe("number"); - expect(typeof payload["exp"]).toBe("number"); - }); - - it("JWT header contains alg=ES256 and kid matching the JWKS key", async () => { - const signResponse = await fetch(`${baseUrl}/internal/sign`, { - method: "POST", - headers: { "content-type": "application/json" }, - body: JSON.stringify({ sub: "test-sub", aud: "test-aud" }), - }); - const { jwt } = (await signResponse.json()) as { jwt: string }; - - const headerB64 = jwt.split(".")[0]; - const headerJson = Buffer.from(headerB64, "base64url").toString("utf-8"); - const header = JSON.parse(headerJson) as Record; - - expect(header["alg"]).toBe("ES256"); - expect(header["kid"]).toBe("test-v1"); - }); - - it("JWT verifies against the public key from JWKS by importing directly", async () => { - const signResponse = await fetch(`${baseUrl}/internal/sign`, { - method: "POST", - headers: { "content-type": "application/json" }, - body: JSON.stringify({ sub: "direct-verify-sub", aud: "sts.amazonaws.com" }), - }); - const { jwt } = (await signResponse.json()) as { jwt: string }; - - const jwksResponse = await fetch(`${baseUrl}/.well-known/jwks.json`); - const jwks = (await jwksResponse.json()) as { keys: JWK[] }; - const publicKey = await importJWK(jwks.keys[0]!, "ES256"); - - const { payload } = await jwtVerify(jwt, publicKey, { - audience: "sts.amazonaws.com", - }); - expect(payload["sub"]).toBe("direct-verify-sub"); - }); -}); diff --git a/services/oidc-stub/tsconfig.json b/services/oidc-stub/tsconfig.json deleted file mode 100644 index eba3e0a..0000000 --- a/services/oidc-stub/tsconfig.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "compilerOptions": { - "target": "ES2022", - "module": "NodeNext", - "moduleResolution": "NodeNext", - "strict": true, - "esModuleInterop": true, - "skipLibCheck": true, - "rootDir": ".", - "outDir": "dist", - "resolveJsonModule": true - }, - "include": ["src/**/*", "tests/**/*", "vitest.config.ts"] -} diff --git a/services/oidc-stub/vitest.config.ts b/services/oidc-stub/vitest.config.ts deleted file mode 100644 index 3fc8364..0000000 --- a/services/oidc-stub/vitest.config.ts +++ /dev/null @@ -1,8 +0,0 @@ -import { defineConfig } from "vitest/config"; - -export default defineConfig({ - test: { - include: ["tests/**/*.test.ts", "src/**/*.test.ts"], - testTimeout: 15000, - }, -}); From 68c947d082f4b4b5c8442e4830bddb708ed2ae00 Mon Sep 17 00:00:00 2001 From: wildmeta-agent Date: Mon, 27 Apr 2026 21:26:09 +0800 Subject: [PATCH 02/15] docs(stage7): mark phase 2 complete; document audit-destination pluggability MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Phase 2 was being framed as "still blocked" on a TEE-derived signer (heima-gaps §3) and on chain-anchored audit. That framing conflated two concerns: (a) the OIDC issuer architecture, which is complete and shipping, and (b) the audit-destination *backend*, which is a pluggable layer. The audit destination is interchangeable behind a single "append a tamper-evident record" interface: - Federated public chain (Heima, other Substrate parachains) - General-purpose public chain (Ethereum, Solana, Sui, Cosmos) - Permissioned / consortium chain (Hyperledger Fabric, Quorum, Aliyun BaaS) — the relevant choice for jurisdictions like China - Plain backend server (append-only SQLite, Postgres + immutable WAL, S3-with-Object-Lock) — the broker ships in this row today - Sealed log services (CloudTrail with KMS, GCP Cloud Audit Logs) - TEE-attested append-only log (Heima TEE + sealed storage, AWS Nitro + KMS, Azure Confidential Ledger) The Stage 7 broker's ~/.agentkeys/broker/audit.sqlite is a complete v0.1 audit destination on the simple-server side of this table — append-only by construction, sha256-hashed bearer tokens, audit-write-before-credentials invariant. Migrating to a chain or sealed log is a deployment-time backend swap, not a Stage-7 redesign. Changes: - docs/spec/architecture.md — new §11 "Audit destination is pluggable" with backend-class table; renumbers License → §12, Cross-references → §13 (no inbound external refs to those sections). - docs/stage7-wip.md — reframe Phase 2 as architecturally complete; add audit-destination-pluggability subsection; rename "federation step (still blocked)" to "Cloud federation deployment" (operational runbook, not architectural prerequisite); restructure TODO pickups as operational follow-ups. - docs/spec/plans/development-stages.md — add Stage 7 phase 2 row to Shipped; collapse Stage 7 Active section to operational follow-ups only. - harness/stage-7-done.sh — new completion gate covering broker tests, provisioner aws_creds tests, MCP broker-env tests, daemon + CLI rebuild, clippy on all touched crates, retired-stub directory check, broken-link guard against the deleted services/oidc-stub path. - crates/agentkeys-daemon/tests/pair_tests.rs — drop unused Session + WalletAddress imports (uncovered by adding daemon to the gate's clippy invocation; pre-existing lint, fixed for cleanliness). Stage 7 phase 1 + phase 2 now pass `bash harness/stage-7-done.sh` end to end. Co-Authored-By: Claude Opus 4.7 (1M context) --- crates/agentkeys-daemon/tests/pair_tests.rs | 2 +- docs/spec/architecture.md | 33 +++++++- docs/spec/plans/development-stages.md | 11 +-- docs/stage7-wip.md | 62 ++++++++++----- harness/stage-7-done.sh | 86 +++++++++++++++++++++ 5 files changed, 165 insertions(+), 29 deletions(-) create mode 100755 harness/stage-7-done.sh diff --git a/crates/agentkeys-daemon/tests/pair_tests.rs b/crates/agentkeys-daemon/tests/pair_tests.rs index 23939cb..4b8e2c0 100644 --- a/crates/agentkeys-daemon/tests/pair_tests.rs +++ b/crates/agentkeys-daemon/tests/pair_tests.rs @@ -9,7 +9,7 @@ use agentkeys_core::backend::CredentialBackend; use agentkeys_mock_server::test_client::InProcessBackend; use agentkeys_types::{ AgentIdentity, AuthRequestType, AuthToken, CanonicalBytes, EncryptedPairPayload, PairCode, - PublicKey, RecoveryMethod, Scope, ServiceName, Session, WalletAddress, + PublicKey, RecoveryMethod, Scope, ServiceName, }; // --------------------------------------------------------------------------- diff --git a/docs/spec/architecture.md b/docs/spec/architecture.md index 8d92399..b3d3d11 100644 --- a/docs/spec/architecture.md +++ b/docs/spec/architecture.md @@ -334,11 +334,40 @@ Lifecycle is ephemeral per chat session by design. Recovery flow handles re-atta **Rust: ~80% of lines, 100% of security-critical path.** TypeScript is strictly confined to: browser automation inside the agent sandbox, the npm daemon wrapper, the read-only indexer, and the Web GUI frontend. None of these touch the trust boundary. -## 11. License +## 11. Audit destination is pluggable + +Several earlier docs ([`threat-model-key-custody.md`](threat-model-key-custody.md), [`heima-gaps-vs-desired-architecture.md`](heima-gaps-vs-desired-architecture.md), `wiki/blockchain-tee-architecture.md`) describe audit + anchoring as Heima-pallet operations. That description is one *instance* of the architecture, not a constraint of it. The audit/anchoring layer is a pluggable backend behind a single interface: **append a tamper-evident record of who did what, when, against which agent**. Anything that satisfies that interface satisfies the architecture. + +Concretely, the same trait surface accommodates all of: + +| Backend class | Examples | Where it fits | +|---|---|---| +| **Federated public chain** | Heima parachain (default for v0.1+), other Substrate parachains | Production deployment with shared-validator trust assumptions. | +| **General-purpose public chain** | Ethereum, Solana, Sui, Aptos, Cosmos chains | Operators who already have on-chain identity / accounting on a different chain and want a single audit trail. | +| **Permissioned / consortium chain** | Hyperledger Fabric, R3 Corda, Quorum, ConsenSys Besu (IBFT), Aliyun BaaS | Enterprises in jurisdictions (China, EU regulated finance) where public-chain anchoring is non-starter for compliance reasons. | +| **Plain backend server** | Append-only SQLite (this is what the broker ships today), Postgres + immutable WAL, S3-with-Object-Lock, Honeycomb / Datadog audit log | Self-hosted operators who want zero chain dependency. The Stage 7 broker's `~/.agentkeys/broker/audit.sqlite` IS this category — it's a complete audit destination, not a placeholder. | +| **Sealed log services** | AWS CloudTrail with KMS-backed integrity validation, GCP Cloud Audit Logs | Cloud-native operators. | +| **TEE-attested append-only log** | Heima TEE + sealed storage (the original v0.1 target), AWS Nitro + KMS, Azure Confidential Ledger | Operators who want hardware-backed integrity independent of any chain. | + +What this means concretely: + +1. **Stage 7 phase 2 is not gated on Heima.** The broker's SQLite audit log is a fully-functional v0.1 audit destination on the simple-server side of this table. Migration to a chain (Heima or otherwise) is a deployment-time choice, not a Stage-7 prerequisite. +2. **`heima-gaps §3` is one path, not the path.** The TEE-derived ES256 signer is the *highest-assurance* signer for the OIDC issuer; the on-disk keypair shipped today plus the broker SQLite audit log is the *lowest-assurance-but-complete* path. v0.1 ships the lowest-assurance path; v0.2+ swaps to TEE without surface changes. +3. **Jurisdictional swaps are configuration, not redesign.** A China-deployment operator points the audit destination at a permissioned chain; the rest of the system is unchanged. + +What stays load-bearing across every backend: + +- The audit record schema (`requester_token_hash`, `requester_wallet`, `requested_role`, `outcome`, `sts_session_name`, timestamp). +- The promise that audit-write happens *before* credentials are returned to the caller (existing broker invariant — the credential mint with no audit row is the silent-failure mode operators defend against). +- The promise that audit failures are surfaced loudly, never swallowed. + +This pluggability is what lets Stage 7 phase 2 ship as **complete** today, with the broker's local audit log, and lets Stage 8 (off-chain encrypted vault) decouple ciphertext storage from audit-anchoring without re-litigating either layer. + +## 12. License All AgentKeys repositories are dual-licensed under **MIT OR Apache-2.0**, at the user's choice. This applies to `agentkeys-core`, `agentkeys-cli`, `agentkeys-daemon`, `agentkeys-mock-server`, `agentkeys-provisioner`, `provisioner-scripts`, and the `@agentkeys/daemon` npm package. -## 12. Cross-references +## 13. Cross-references - **Session key storage details (kernel hardening):** see `1-step-analysis.md` SS3.3, SS3.3a - **Two-interface daemon design (MCP + CLI):** see `1-step-analysis.md` SS3.4 diff --git a/docs/spec/plans/development-stages.md b/docs/spec/plans/development-stages.md index 7f5f0f8..96df45b 100644 --- a/docs/spec/plans/development-stages.md +++ b/docs/spec/plans/development-stages.md @@ -21,6 +21,7 @@ If you're looking for setup / demo instructions, go to [`../../dev-setup.md`](.. | 5a | Provisioner (deterministic) | OpenRouter + OpenAI CDP scrapers; `signupEmailOtp` pattern library; HTML-strip + label-aware OTP extractor; mandatory post-provision verify; `agentkeys provision openrouter` | 59/59 unit + live provision | | 6 (interim, 2026-04) | Hosted email infra | SES domain verification on `bots.litentry.org`; `agentkeys-daemon` IAM user → `agentkeys-agent` assume-role; S3 inbound bucket; `ses-s3` email backend; end-to-end demo from signup → SES receipt → S3 poll → key extraction | `scripts/stage6-demo-run.sh` prints a valid `sk-or-v1-...` key | | 7 phase 1 (2026-04) | Broker server | `agentkeys-broker-server` axum service: bearer-gated `POST /v1/mint-aws-creds`, audit SQLite, supervisor probes; daemon `--broker-url` flag wired up | 22/22 unit + integration | +| 7 phase 2 (2026-04) | OIDC issuer + AWS-cred wiring | OIDC discovery + JWKS + bearer-gated `POST /v1/mint-oidc-jwt` absorbed into Rust broker (TS `services/oidc-stub/` retired); CLI/MCP `provision` paths fetch AWS temp creds via the broker when `--broker-url` is set; audit destination is the broker's local SQLite per the pluggable-audit-backend framing in [`architecture.md` §11](../architecture.md) | broker integration + clippy clean; cloud federation deployment runbook in [`stage7-wip.md`](../../stage7-wip.md) | ### Non-stage work shipped alongside @@ -62,13 +63,13 @@ Today's Stage 6 still lists "interim" AWS-managed DKIM + static IAM user. To cal ### Stage 7 — Generalized OIDC provider -Expose `oidc.agentkeys.dev` as a conforming OIDC Identity Provider. Any cloud that accepts external OIDC federation (AWS, GCP, Azure, Snowflake, K8s) trusts AgentKeys once and gets per-user-wallet-tagged temp creds via standard federation. Unlocks bring-your-own-domain + per-user cloud-enforced isolation via `PrincipalTag`. Scratch notes: [`../../stage7-wip.md`](../../stage7-wip.md). +Both phases shipped — see Shipped table above. Scratch notes: [`../../stage7-wip.md`](../../stage7-wip.md). -**Phase 1 (shipped, PR #60):** broker server (`crates/agentkeys-broker-server/`) — bearer-validated `POST /v1/mint-aws-creds` against the operator's daemon AWS key, SQLite audit, `/healthz` + `/readyz` supervisor probes. +**Operational follow-ups (not architectural blockers):** -**Phase 2 (shipping, this PR):** OIDC discovery + JWKS + bearer-gated `POST /v1/mint-oidc-jwt` absorbed into the Rust broker (TS `services/oidc-stub/` retired); CLI/MCP `provision` paths fetch AWS temp creds via the broker when `--broker-url` is set, replacing the `stage6-demo-env.sh` sourcing pattern. - -**Still blocked:** public TLS hosting of the issuer URL so `aws iam create-open-id-connect-provider` accepts it; TEE-held ES256 signer at `oidc/issuer/v1` (`heima-gaps §3`). +- Public TLS hosting of `$BROKER_OIDC_ISSUER` so `aws iam create-open-id-connect-provider` can fetch the JWKS. Per-operator deployment task; recipe in [`stage7-wip.md` §"Cloud federation deployment"](../../stage7-wip.md). +- Higher-assurance signer (TEE-derived ES256 at `oidc/issuer/v1`, blocked on `heima-gaps §3`). The on-disk keypair shipped today is a complete v0.1 signer — TEE is hardening, not a Stage-7 prerequisite. +- Audit-destination swap (chain anchoring or sealed log service). The broker's local SQLite is one valid choice in the [pluggable audit-backend layer](../architecture.md#11-audit-destination-is-pluggable) — operators can swap per their threat model and jurisdiction. Stage 7 stops at the isolation primitive. **It does not commit a position on where credential ciphertext lives** — the previously-assumed `pallet-secrets-vault` (on-chain encrypted blob store) is superseded by Stage 8 below, per [`../threat-model-key-custody.md`](../threat-model-key-custody.md). diff --git a/docs/stage7-wip.md b/docs/stage7-wip.md index c8c16ea..7989523 100644 --- a/docs/stage7-wip.md +++ b/docs/stage7-wip.md @@ -1,13 +1,13 @@ # Stage 7 — WIP notes -> **WIP / scratchpad.** Phase 1 (broker server) shipped in PR [#60](https://github.com/litentry/agentKeys/pull/60). Phase 2 (OIDC issuer absorption + provisioner-scripts AWS-cred wiring) ships in this PR. The remaining federation prerequisites — public TLS hosting + IAM OIDC-provider registration — stay deferred and are documented below for when both prereqs land. +> **Status (2026-04-27).** Phase 1 (broker server) shipped in PR [#60](https://github.com/litentry/agentKeys/pull/60). Phase 2 (OIDC issuer + provisioner-scripts AWS-cred wiring) ships in PR [#61](https://github.com/litentry/agentKeys/pull/61) and is **architecturally complete**: the Rust broker owns the OIDC surface end-to-end, the audit destination is the broker's local SQLite (one valid choice in the [pluggable audit-destination layer](spec/architecture.md#11-audit-destination-is-pluggable)), and the provisioner subprocess is wired through the broker for AWS-cred minting. What's left is operational deployment for cloud-side OIDC federation (public TLS, `aws iam create-open-id-connect-provider`) — out of scope for the architecture but relevant to the cloud-deployment runbook. ## What Stage 7 is Two halves that compose into the canonical "broker, not proxy" architecture: -1. **Phase 1 — Broker server (shipped).** A long-running HTTP service holds the operator's long-lived `agentkeys-daemon` AWS access key and brokers 1-hour scoped credentials to authenticated daemons. Lets app developers run daemons against operator infrastructure without ever touching AWS keys themselves. -2. **Phase 2 — OIDC issuer (in-progress).** The Rust broker now serves the conforming OIDC discovery + JWKS surface and a bearer-gated `POST /v1/mint-oidc-jwt` endpoint, replacing the standalone TS `services/oidc-stub/` package. Provisioner-scripts AWS-cred wiring is also live: `agentkeys provision ` (CLI) and the `agentkeys.provision` MCP tool fetch 1-hour temp creds from the broker and inject them into the scraper subprocess env when `--broker-url` is set. The remaining federation step (`sts:AssumeRoleWithWebIdentity` against a public-TLS-hosted issuer) stays deferred. +1. **Phase 1 — Broker server (shipped, PR #60).** A long-running HTTP service holds the operator's long-lived `agentkeys-daemon` AWS access key and brokers 1-hour scoped credentials to authenticated daemons. Lets app developers run daemons against operator infrastructure without ever touching AWS keys themselves. +2. **Phase 2 — OIDC issuer + AWS-cred wiring (shipped, PR #61).** The Rust broker now serves the conforming OIDC discovery + JWKS surface and a bearer-gated `POST /v1/mint-oidc-jwt` endpoint, replacing the standalone TS `services/oidc-stub/` package. Provisioner-scripts AWS-cred wiring is live: `agentkeys provision ` (CLI) and the `agentkeys.provision` MCP tool fetch 1-hour temp creds from the broker and inject them into the scraper subprocess env when `--broker-url` is set. The audit destination is the broker's append-only SQLite at `~/.agentkeys/broker/audit.sqlite` — see [§"Audit destination is pluggable" below](#audit-destination-is-pluggable) for why that's a complete v0.1 choice, not a placeholder. Per [`docs/spec/plans/development-stages.md`](./spec/plans/development-stages.md), this is the "Generalized OIDC Provider" stage after Stage 6 (Federated Own Email). @@ -58,14 +58,14 @@ Acceptance: `curl /healthz` → 200, `curl /readyz` → 200, `mint-aws-creds` re **Out of phase 1 (now landing in phase 2):** - Rust-broker OIDC discovery / JWKS / `mint-oidc-jwt` (delivered — see §"Phase 2 — OIDC issuer (Rust broker)" below). -- TS [`services/oidc-stub/`](../services/oidc-stub/) retirement (deleted in this PR). +- TS `services/oidc-stub/` retirement (directory deleted in this PR; OIDC surface now lives entirely in the Rust broker). - Provisioner-scripts AWS-cred consumer rewiring (delivered — `agentkeys provision` and `agentkeys.provision` MCP tool now mint creds via the broker when `--broker-url` is set). -**Still deferred:** +**Operational follow-ups (not architectural blockers):** -- `aws iam create-open-id-connect-provider` against a public TLS endpoint + `sts:AssumeRoleWithWebIdentity` exchange — needs §"Phase 2 federation step" below. -- Public hosting of the broker / KMS-sealed config source. -- TEE-derived signer (replaces the on-disk ES256 keypair). +- `aws iam create-open-id-connect-provider` against a public TLS endpoint + `sts:AssumeRoleWithWebIdentity` exchange. The recipe is in §["Cloud federation deployment"](#cloud-federation-deployment) below. This is a deployment task, not a Stage-7 design task — the broker already serves the conforming OIDC surface; what's missing is just routing public TLS traffic to it. +- TEE-derived signer (a *higher-assurance* swap of the on-disk ES256 keypair). The on-disk keypair shipped today is a complete v0.1 signer per the [pluggable audit destination](spec/architecture.md#11-audit-destination-is-pluggable) framing; TEE is the v0.2+ hardening path, not a Stage-7 prerequisite. +- Chain-anchored audit (Heima or otherwise). Phase 2 ships with the broker's local SQLite as the audit destination — also a complete v0.1 choice. Operators who want chain anchoring can swap the audit backend without touching the OIDC issuer code. ## Phase 2 — OIDC issuer (Rust broker) @@ -89,6 +89,23 @@ The Rust broker exposes three new endpoints. They are the same endpoints the TS Both `mint-aws-creds` and `mint-oidc-jwt` write to the same SQLite audit table at `~/.agentkeys/broker/audit.sqlite`. JWT mints land with `requested_role = "oidc_jwt"` and `sts_session_name = ` — operators see one ledger for both credential types. + +#### Why local SQLite is a complete v0.1 audit destination + +Earlier docs ([`threat-model-key-custody.md`](spec/threat-model-key-custody.md), `wiki/blockchain-tee-architecture.md`) describe audit + anchoring as Heima-pallet operations. That description is **one instance** of the architecture, not a constraint of it. The audit/anchoring layer is a pluggable backend behind a single interface: append a tamper-evident record of *who did what, when, against which agent*. + +Per [`architecture.md` §11](spec/architecture.md#11-audit-destination-is-pluggable), the trait surface accommodates: + +- **Federated public chain** — Heima parachain, other Substrate parachains. +- **General-purpose public chain** — Ethereum, Solana, Sui, Cosmos. +- **Permissioned / consortium chain** — Hyperledger Fabric, Quorum, Aliyun BaaS (relevant for jurisdictions like China where public-chain anchoring is non-starter). +- **Plain backend server** — append-only SQLite (what the broker ships today), Postgres + immutable WAL, S3-with-Object-Lock, sealed log services. +- **TEE-attested append-only log** — Heima TEE + sealed storage, AWS Nitro + KMS, Azure Confidential Ledger. + +The Stage 7 broker ships in the "plain backend server" row. SQLite at `~/.agentkeys/broker/audit.sqlite` is append-only by virtue of the application code (only `INSERT`s, never `UPDATE`/`DELETE`), keys are sha256-hashed before write, and the audit-write happens *before* credentials leave the broker — that's the property operators need. Migrating to a chain-anchored destination is a backend swap, not a Stage-7 redesign. + +This is what makes Phase 2 architecturally complete today: the OIDC issuer + audit pair is one self-contained unit; the audit's storage backend is a deployment-time choice. + ### Provisioner-scripts AWS-cred wiring Operators no longer have to source `scripts/stage6-demo-env.sh`. With `--broker-url` set on the daemon, MCP, or CLI: @@ -100,20 +117,20 @@ Operators no longer have to source `scripts/stage6-demo-env.sh`. With `--broker- The legacy `stage6-demo-env.sh` flow still works when `--broker-url` is unset; the wiring is purely additive. -## Phase 2 — federation step (still blocked) +## Cloud federation deployment -This is the half that turns the broker into a generalized OIDC Identity Provider so any AWS account (or GCP / Ali Cloud) can trust our JWTs without operator-side IAM-user keys. +This section is the **operational runbook** for taking the (already-shipped) Phase 2 broker and making AWS (or GCP / Ali Cloud) trust its JWTs without operator-side IAM-user keys. It's not a Stage-7 architecture step — Phase 2 ships complete with the local SQLite audit destination above. Each cloud provider's IAM service has its own registration step, and that step needs the broker reachable over public TLS. That's what this section walks through. -### Why the federation step is not running yet +### What's actually needed -- Needs the broker (or a `/.well-known/*` reverse proxy) hosted publicly with a public-CA TLS cert so AWS IAM accepts `create-open-id-connect-provider`. -- The "right" signer is a TEE-derived ES256 key at path `oidc/issuer/v1`, blocked on [`heima-gaps §3`](./spec/heima-gaps-vs-desired-architecture.md). The current on-disk keypair is the local-dev placeholder; swap to TEE when §3 closes by replacing `crates/agentkeys-broker-server/src/oidc.rs::OidcKeypair::load_or_generate` with a TEE oracle call. JWKS, JWT shape, STS exchange, and bucket-policy enforcement all stay identical. +- The broker (or a `/.well-known/*` reverse proxy in front of it) reachable at `$BROKER_OIDC_ISSUER` over public TLS, so AWS IAM can fetch the JWKS during `create-open-id-connect-provider`. Operator picks: nginx + Let's Encrypt, AWS ALB + ACM, Caddy with auto-TLS, CloudFront + S3 for static `/.well-known/*` + Lambda for sign, etc. +- Stage 6 AWS setup complete per [`docs/stage6-aws-setup.md`](./stage6-aws-setup.md) (the daemon-IAM-user trust path established there is the fallback while the federated path is being rolled out). +- A higher-assurance signer if the operator's threat model requires it (TEE-derived ES256 at `oidc/issuer/v1`, blocked on [`heima-gaps §3`](./spec/heima-gaps-vs-desired-architecture.md)). The on-disk keypair shipped today is a complete v0.1 signer; TEE is a hardening swap, not a federation prerequisite. When ready, swap by replacing [`crates/agentkeys-broker-server/src/oidc.rs::OidcKeypair::load_or_generate`](../crates/agentkeys-broker-server/src/oidc.rs) with a TEE oracle call. JWKS, JWT shape, STS exchange, and bucket-policy enforcement all stay identical. -### Phase 2 federation test script — preserved for when both prereqs are in place +### AWS recipe #### Prereqs -- Stage 6 AWS setup complete per [`docs/stage6-aws-setup.md`](./stage6-aws-setup.md). - Phase 1 broker running publicly (so its `/.well-known/openid-configuration` is fetchable over public TLS). - `export OIDC_ISSUER="$BROKER_OIDC_ISSUER"` — the exact `BROKER_OIDC_ISSUER` you started the broker with. - Verify `curl -sf "$OIDC_ISSUER/.well-known/openid-configuration" | jq .issuer` returns that string. @@ -210,10 +227,13 @@ Test (b) is what Stage 6's static-IAM path can't prove. Cloud-enforced, zero app When [`heima-gaps §3`](./spec/heima-gaps-vs-desired-architecture.md) closes, replace `crates/agentkeys-broker-server/src/oidc.rs::OidcKeypair::load_or_generate` with a call to the TEE's `derive("oidc/issuer/v1")`. JWKS, JWT shape, STS exchange, and bucket-policy enforcement all stay identical — only the signing backend changes. -## TODO pickups +## Operational follow-ups (post Phase 2) + +Phase 2 architecture is complete. The remaining items are deployment and hardening tasks, scoped per-operator: -- **Public hosting:** terminate TLS at a reverse proxy in front of the Rust broker, or absorb the issuer endpoints behind a CloudFront+ALB pair so `oidc.agentkeys.dev` (or chosen issuer URL) resolves to the broker's `/.well-known/*` surface. -- **TEE signer swap:** see §5 above. -- **Promote phase 1 doc:** once the live three-terminal demo passes for a non-operator developer (with no AWS env vars on their machine), promote `docs/operator-runbook.md` from WIP to canonical. -- **Add the equivalent GCP Workload Identity Federation + Ali Cloud RAM recipes** (Stage 7 target is generalized, not AWS-only). -- **Hand off the credential-vault question to Stage 8** — the bucket prefix `s3://agentkeys-vault//` is the reuse point; ciphertext + per-epoch DEK rotation live in [`stage8-wip.md`](./stage8-wip.md), not here. +- **Public TLS hosting** — terminate TLS at a reverse proxy in front of the Rust broker (nginx + Let's Encrypt, AWS ALB + ACM, Caddy, etc.), or absorb the issuer endpoints behind a CloudFront+ALB pair so `oidc.agentkeys.dev` (or chosen issuer URL) resolves to the broker's `/.well-known/*` surface. Required for AWS `create-open-id-connect-provider` registration. +- **TEE signer swap** — replace the on-disk ES256 keypair with a TEE-derived `oidc/issuer/v1` key when [`heima-gaps §3`](./spec/heima-gaps-vs-desired-architecture.md) closes. Hardening, not a Stage-7 prerequisite — see §"Cloud federation deployment" above. +- **Audit-destination swap** — point the audit log at a chain (Heima, Ethereum, Solana, permissioned) or a sealed log service per the [pluggable audit destination](spec/architecture.md#11-audit-destination-is-pluggable) framing. Configuration choice, not a Stage-7 redesign. +- **GCP / Ali Cloud federation recipes** — equivalent of the AWS §"Cloud federation deployment" recipe for GCP Workload Identity Federation and Ali Cloud RAM. The OIDC discovery + JWT shape work cross-cloud unchanged; only the IAM-side registration step differs. +- **Promote phase 1 + 2 doc** — once the live three-terminal demo passes for a non-operator developer (with no AWS env vars on their machine), promote [`docs/operator-runbook.md`](./operator-runbook.md) from WIP to canonical. +- **Stage 8 hand-off** — the bucket prefix `s3://agentkeys-vault//` is the reuse point with Stage 8; ciphertext + per-epoch DEK rotation live in [`stage8-wip.md`](./stage8-wip.md), not here. diff --git a/harness/stage-7-done.sh b/harness/stage-7-done.sh new file mode 100755 index 0000000..e4df1fb --- /dev/null +++ b/harness/stage-7-done.sh @@ -0,0 +1,86 @@ +#!/usr/bin/env bash +# Stage 7 (phase 1 + phase 2) completion gate. +# +# Phase 1 (PR #60): broker server vertical slice — bearer-gated +# POST /v1/mint-aws-creds, SQLite audit, /healthz + /readyz, daemon +# --broker-url flag. +# +# Phase 2 (PR #61): OIDC issuer absorption (discovery + JWKS + +# POST /v1/mint-oidc-jwt) into the Rust broker; provisioner-scripts +# AWS-cred wiring through CLI cmd_provision and MCP provision_tool. +# +# What this covers (offline, hermetic): +# 1. Broker crate compiles + lib + integration tests pass +# 2. Provisioner aws_creds module unit tests pass +# 3. MCP broker-env injection tests pass +# 4. Daemon + CLI rebuild cleanly with the broker_url plumbing +# 5. Clippy on every Stage 7-touched crate, warnings as errors +# 6. The TS oidc-stub directory is gone (issuer surface owned by Rust) +# 7. No raw `services/oidc-stub` references survive in checked-in docs +# +# What this does NOT cover (by design): +# - Live STS / SES / S3 — the broker's StubStsClient covers the audit +# + dispatch logic without an AWS round-trip. +# - Public TLS deployment + `aws iam create-open-id-connect-provider`. +# That's the operational runbook in docs/stage7-wip.md, not a +# Stage-7 architectural prerequisite. +# +# Exit 0 = Stage 7 phases 1 + 2 are intact. Non-zero = stage broken. +set -euo pipefail +cd "$(git rev-parse --show-toplevel)" + +GREEN='\033[0;32m' +RED='\033[0;31m' +BOLD='\033[1m' +NC='\033[0m' +banner() { printf "\n${BOLD}=== %s ===${NC}\n" "$1"; } +ok() { printf "${GREEN}✓${NC} %s\n" "$1"; } +fail() { printf "${RED}✗${NC} %s\n" "$1" >&2; exit 1; } + +banner "1/7 Broker — lib + integration tests" +cargo test -p agentkeys-broker-server +ok "broker tests passed" + +banner "2/7 Provisioner — aws_creds module tests" +cargo test -p agentkeys-provisioner aws_creds +ok "provisioner aws_creds tests passed" + +banner "3/7 MCP — broker-env injection + tools/list" +cargo test -p agentkeys-mcp +ok "mcp tests passed" + +banner "4/7 Daemon + CLI rebuild with broker_url plumbing" +cargo build -p agentkeys-daemon -p agentkeys-cli +ok "daemon + cli build clean" + +banner "5/7 Clippy (Stage 7 crates, --no-deps -D warnings)" +cargo clippy --no-deps \ + -p agentkeys-broker-server \ + -p agentkeys-provisioner \ + -p agentkeys-mcp \ + -p agentkeys-cli \ + -p agentkeys-daemon \ + --all-targets -- -D warnings +ok "clippy clean" + +banner "6/7 TS oidc-stub directory retired" +if [ -e services/oidc-stub ]; then + fail "services/oidc-stub still on disk — Phase 2 expects it deleted (issuer absorbed into Rust broker)" +fi +ok "services/oidc-stub gone" + +banner "7/7 No broken markdown links to the retired services/oidc-stub" +# Narrative mentions ("services/oidc-stub retired") are fine — they describe +# what was deleted. What we're guarding against here is broken markdown +# links: `](../services/oidc-stub/...)` would 404 in the rendered docs. +# docs/archived/* is the historical scratchpad and is allowed to keep +# anything for context. +LEAKS=$(grep -rln "](.*/services/oidc-stub" docs wiki 2>/dev/null \ + | grep -v "^docs/archived/" || true) +if [ -n "$LEAKS" ]; then + printf "%s\n" "$LEAKS" >&2 + fail "broken markdown links to the deleted services/oidc-stub directory in non-archived docs (above)" +fi +ok "no broken links" + +printf "\n${GREEN}${BOLD}STAGE 7 (phase 1 + phase 2) PASSED${NC}\n" From de1d534e84d360e8846617a9c3f300545ab4017c Mon Sep 17 00:00:00 2001 From: wildmeta-agent Date: Mon, 27 Apr 2026 21:33:21 +0800 Subject: [PATCH 03/15] docs(stage7): add operator end-to-end test + remote deployment guide MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Two new sections in docs/stage7-wip.md, slotted between the Phase 2 issuer description and the Cloud federation runbook: 1. **Operator end-to-end test (Phase 2)** — a four-terminal walk-through that exercises every Phase 2 surface offline (broker `--skip-startup-check` so no AWS round-trip is required): - mock backend on :8090 - broker on :8091 with BROKER_OIDC_ISSUER set - healthz / discovery / JWKS smoke checks - session-create → mint-oidc-jwt round-trip with claim decode - mint-aws-creds round-trip (live-AWS path) - CLI `agentkeys provision` with AGENTKEYS_BROKER_URL set - audit log inspection via sqlite3 - acceptance criteria - negative checks for the failure modes operators triage 2. **Remote deployment** — production deployment guide for putting both the backend and the broker on real infrastructure: - Topology diagram (developer laptop → reverse proxy → broker → backend) - Caveats on the in-memory mock-server (state loss on restart, no HA, no listener-side TLS); two pragmatic v0.1 options laid out. - Step 1: provision a host (AWS / DO / Hetzner / Linode examples) with DNS, public-CA TLS cert, firewall rules. - Step 2: build + install binaries to /usr/local/bin. - Step 3: persist operator config in /etc/agentkeys/broker.env mode 0600. - Step 4: systemd units for both backend and broker, with hardening directives (NoNewPrivileges, ProtectSystem, ReadWritePaths, dedicated agentkeys user, broker bound to 127.0.0.1 only). - Step 5: nginx + Let's Encrypt config terminating TLS on broker.example.dev → 127.0.0.1:8091. - Step 6: client-side smoke test from a laptop with no AWS env vars. - Step 7: cross-link to the existing Cloud federation deployment recipe. - Operations: rotate, observe, harden — pointers to operator-runbook.md §5 and §6, and a hard rule against exposing :8091 directly. Cross-links: - Architecture pluggable-audit framing referenced from the audit log subsection. - BROKER_OIDC_ISSUER caveat (must equal proxy server_name) called out twice — once in step 3, once in step 6. - Direct pointer to warn_if_non_loopback_without_tls in the broker's main.rs as the source of truth for why broker bind=0.0.0.0 is unsafe. Stage 7 done-gate (`bash harness/stage-7-done.sh`) still passes. Co-Authored-By: Claude Opus 4.7 (1M context) --- docs/stage7-wip.md | 335 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 335 insertions(+) diff --git a/docs/stage7-wip.md b/docs/stage7-wip.md index 7989523..1e3a913 100644 --- a/docs/stage7-wip.md +++ b/docs/stage7-wip.md @@ -117,6 +117,341 @@ Operators no longer have to source `scripts/stage6-demo-env.sh`. With `--broker- The legacy `stage6-demo-env.sh` flow still works when `--broker-url` is unset; the wiring is purely additive. +## Operator end-to-end test (Phase 2) + +A four-terminal walk-through that exercises everything Phase 2 ships, with no AWS round-trip required (the broker's `--skip-startup-check` lets you stand it up offline). Run it once after a fresh build to confirm your operator setup is wired correctly. Times below are wall-clock expectations on a recent laptop. + +### Prereqs + +- A release build: `cargo build --release -p agentkeys-mock-server -p agentkeys-broker-server -p agentkeys-cli` (≈ 90 s cold). +- `jq` and `curl` on `$PATH`. +- For the AWS-side check (step 6), `DAEMON_ACCESS_KEY_ID` + `DAEMON_SECRET_ACCESS_KEY` + `ACCOUNT_ID` from your operator setup; for offline-only, skip step 6 and use `--skip-startup-check`. + +### Walk-through + +```bash +# Terminal A — backend (mock-server, in-memory SQLite) +./target/release/agentkeys-mock-server --port 8090 +# expect: "Mock server running on port 8090" +# CAVEAT: this server keeps state in-memory — it works for the E2E test +# but is NOT a long-running production backend. See the "Remote +# deployment" section below for the production backend story. + +# Terminal B — broker. For the offline path (no live AWS round-trip), +# pass --skip-startup-check; for the live path source your daemon creds +# first per docs/operator-runbook.md §3.1. +export BROKER_BACKEND_URL=http://127.0.0.1:8090 +export BROKER_OIDC_ISSUER=http://localhost:8091 # http for dev only; production must be https +export DAEMON_ACCESS_KEY_ID=AKIA-offline-stub +export DAEMON_SECRET_ACCESS_KEY=offline-stub-secret +export ACCOUNT_ID=000000000000 +./target/release/agentkeys-broker-server --port 8091 --skip-startup-check +# expect: "OIDC signer ready" with kid=v1-, then "broker listening on 0.0.0.0:8091" + +# Terminal C — checks +# 1. Healthz +curl -sf http://127.0.0.1:8091/healthz # → "ok" +# 2. Discovery doc (the surface AWS would consume after registration) +curl -sf http://127.0.0.1:8091/.well-known/openid-configuration | jq . +# 3. JWKS (the public-key Set the issuer publishes) +curl -sf http://127.0.0.1:8091/.well-known/jwks.json | jq '.keys[0] | {kty, crv, alg, kid}' + +# 4. Mint a session against the backend, then mint an OIDC JWT and an +# AWS-creds response from the broker. +SESSION=$(curl -sf -X POST http://127.0.0.1:8090/session/create \ + -H 'content-type: application/json' \ + -d '{"auth_token":"phase2-e2e"}' | jq -r .session) + +# 4a. JWT mint +JWT=$(curl -sf -X POST http://127.0.0.1:8091/v1/mint-oidc-jwt \ + -H "Authorization: Bearer $SESSION" | jq -r .jwt) +echo "$JWT" | awk -F. '{print $2}' | base64 --decode 2>/dev/null | jq . +# expect: claims with iss, sub=agentkeys:agent:, aud=sts.amazonaws.com, +# agentkeys_user_wallet, iat, exp. + +# 4b. AWS-creds mint (requires real AWS daemon creds; skip on the +# offline path). +CREDS=$(curl -sf -X POST http://127.0.0.1:8091/v1/mint-aws-creds \ + -H "Authorization: Bearer $SESSION") +echo "$CREDS" | jq '{access_key_id, expiration, wallet}' + +# 5. Provisioner-scripts wiring (CLI side). With AGENTKEYS_BROKER_URL +# set, `agentkeys provision` fetches AWS creds via the broker before +# spawning the scraper subprocess — no stage6-demo-env.sh sourcing. +export AGENTKEYS_BROKER_URL=http://127.0.0.1:8091 +./target/release/agentkeys init --mock-token phase2-e2e # session in OS keyring +./target/release/agentkeys provision openrouter --force # full live signup; takes minutes +# alternatively: confirm just the broker hop without doing the live signup +./target/release/agentkeys --broker-url http://127.0.0.1:8091 \ + provision openrouter --help # should not error on the env-fetch path + +# 6. Audit log inspection +sqlite3 ~/.agentkeys/broker/audit.sqlite \ + "SELECT outcome, requested_role, requester_wallet, occurred_at FROM mint_audit ORDER BY id DESC LIMIT 10;" +# expect: a row per mint, with requested_role IN ('arn:aws:iam::*:role/agentkeys-agent', 'oidc_jwt') +``` + +### Acceptance + +- `/healthz` and `/readyz` both return `200`. +- `/.well-known/openid-configuration` returns a body where `issuer` matches `BROKER_OIDC_ISSUER`. +- `/.well-known/jwks.json` returns a JWK Set with `alg=ES256`, `crv=P-256`, a stable `kid`. +- `mint-oidc-jwt` returns a JWT whose claims (decoded) include `agentkeys_user_wallet` matching the session's wallet, `aud=sts.amazonaws.com`, and a future `exp`. +- The audit DB has a fresh row per mint with `outcome=ok` (or `auth_failed` for the negative checks below). + +### Negative checks (verify the failure modes) + +```bash +# Missing bearer → 401 +curl -sf -o /dev/null -w "%{http_code}\n" -X POST http://127.0.0.1:8091/v1/mint-oidc-jwt +# expect: 401, with one auth_failed row in the audit DB. + +# Bogus bearer → 401 +curl -sf -o /dev/null -w "%{http_code}\n" -X POST http://127.0.0.1:8091/v1/mint-oidc-jwt \ + -H 'Authorization: Bearer never-minted' +# expect: 401 + auth_failed audit row. + +# Backend down (kill terminal A first) → 502 +curl -sf -o /dev/null -w "%{http_code}\n" -X POST http://127.0.0.1:8091/v1/mint-oidc-jwt \ + -H "Authorization: Bearer $SESSION" +# expect: 502, with a backend_error audit row (NOT auth_failed — the +# distinction is what an oncall operator chases when triaging). +``` + +If any of these don't match, capture the broker's stderr (Terminal B) and the audit row, then file an issue — the broker exposes one ledger so triage shouldn't require log digging. + +## Remote deployment + +This section is for operators who want their broker reachable by daemons running on developer laptops, CI, or cloud sandboxes — and who eventually want AWS / GCP / etc. to OIDC-federate against it. Phase 2 architecture is complete on a single host (see the operator E2E above); these instructions take that single-host setup and put it on real infrastructure. + +### Topology + +``` +┌── developer laptop / CI / cloud sandbox ──┐ +│ agentkeys-daemon (or `agentkeys` CLI) │ +│ --broker-url https://broker.example.dev │ +└───────────────────┬───────────────────────┘ + │ HTTPS (bearer) + ▼ +┌── operator-managed host(s) ─────────────────────────────┐ +│ │ +│ reverse proxy (TLS terminator) │ +│ nginx + Let's Encrypt / AWS ALB + ACM / │ +│ Caddy / CloudFront in front of broker │ +│ │ │ +│ ▼ │ +│ agentkeys-broker-server :8091 ──────────┐ │ +│ (BROKER_BACKEND_URL=http://backend:8090) │ │ +│ │ │ +│ agentkeys-mock-server (or Heima-backed │ HTTP │ +│ successor) :8090 ◄──────────┘ │ +│ │ +│ ~/.agentkeys/broker/audit.sqlite │ +│ ~/.agentkeys/broker/oidc-keypair.json (mode 0600) │ +└──────────────────────────────────────────────────────────┘ +``` + +The two server processes are deployed together. The mock backend (or its production successor) is **not** exposed publicly — only the broker is. The broker reaches the backend over the operator's private network. + +### Backend server: production caveats + +`agentkeys-mock-server` exists for v0 operators who don't yet have Heima integration. It's deliberately simple — Axum + **in-memory** SQLite — which means: + +- **State is lost on restart.** Every running session, identity link, and audit row vanishes when the process exits. For development this is fine; for a backend that other developers' daemons depend on, it's not. +- **No HA.** Single-process, single-node. +- **No TLS at the listener.** Always front it with a reverse proxy (or co-locate with the broker on the same private network and don't expose it externally). + +For v0.1 operators, two pragmatic options: + +1. **Single-host deployment with persistent state (recommended for self-hosted teams).** Keep the mock-server but add a small wrapper: front it with `systemd` (or Docker `restart: unless-stopped`), and mount the SQLite file on persistent storage — `docs/operator-runbook.md` will track the exact patches needed in the next iteration. Until that lands, treat session loss on restart as part of the operator runbook (have developers re-`init` after a backend restart). +2. **Skip the mock and wait for Heima.** If your timeline allows, hold this deployment until the chain-backed backend lands and use the real Heima session-management path. Stage 7 phase 2 isn't gated on this — the broker's interface is the same regardless of which backend implements `/session/create` + `/session/validate`. + +### Step 1 — Provision the host + +Pick whatever fits your stack. Two examples that satisfy the requirements (TLS-terminating reverse proxy + ≥ 1 vCPU / 1 GiB RAM + persistent disk): + +- **AWS:** `t4g.small` EC2 + Elastic IP + Route 53 A record + ALB with ACM cert. Or skip the ALB and run nginx directly on the instance. +- **DigitalOcean / Hetzner / Linode:** any 1 GiB droplet + a managed DNS A record + nginx + Let's Encrypt via certbot. + +Either way you need: + +- A DNS name resolving to the host (e.g. `broker.example.dev`). +- A public-CA TLS certificate covering that name (Let's Encrypt is free; ACM is free for ALB use). +- Firewall: inbound `:443` from anywhere, inbound `:22` from your admin IP, **everything else closed**. The broker's `:8091` and the backend's `:8090` are reached only via localhost or the private network. + +### Step 2 — Install the binaries + +The repo doesn't yet ship a `cargo dist` release; build from source on the target arch and copy the resulting binaries: + +```bash +git clone https://github.com/litentry/agentKeys.git +cd agentKeys +cargo build --release \ + -p agentkeys-mock-server \ + -p agentkeys-broker-server + +sudo install -m 0755 \ + target/release/agentkeys-mock-server \ + target/release/agentkeys-broker-server \ + /usr/local/bin/ +``` + +### Step 3 — Persisted operator config + +Persist the broker's required env vars in a 0600-mode file (`~/.zshenv` for zsh ops, `/etc/agentkeys/broker.env` for systemd): + +```bash +sudo install -d -m 0700 /etc/agentkeys +sudo tee /etc/agentkeys/broker.env >/dev/null <<'EOF' +DAEMON_ACCESS_KEY_ID=AKIA... +DAEMON_SECRET_ACCESS_KEY=... +ACCOUNT_ID=429071895007 +REGION=us-east-1 +BROKER_BACKEND_URL=http://127.0.0.1:8090 +BROKER_OIDC_ISSUER=https://broker.example.dev +EOF +sudo chmod 600 /etc/agentkeys/broker.env +``` + +`BROKER_OIDC_ISSUER` **must** match the public URL the reverse proxy serves — AWS rejects `create-open-id-connect-provider` if the registered URL doesn't equal the `iss` claim emitted by the broker. + +### Step 4 — systemd units + +```ini +# /etc/systemd/system/agentkeys-backend.service +[Unit] +Description=AgentKeys mock backend (session management) +After=network-online.target +Wants=network-online.target + +[Service] +Type=simple +ExecStart=/usr/local/bin/agentkeys-mock-server --port 8090 +Restart=on-failure +RestartSec=5s +User=agentkeys +Group=agentkeys +# Listens on all interfaces; only the local broker should reach it. +# Use a host firewall (ufw / nftables) to drop :8090 from anywhere +# but 127.0.0.1 + the broker's IP. +NoNewPrivileges=true +ProtectSystem=strict +ProtectHome=true +PrivateTmp=true + +[Install] +WantedBy=multi-user.target +``` + +```ini +# /etc/systemd/system/agentkeys-broker.service +[Unit] +Description=AgentKeys broker (Stage 7) +After=network-online.target agentkeys-backend.service +Wants=network-online.target +Requires=agentkeys-backend.service + +[Service] +Type=simple +EnvironmentFile=/etc/agentkeys/broker.env +ExecStart=/usr/local/bin/agentkeys-broker-server --port 8091 --bind 127.0.0.1 +Restart=on-failure +RestartSec=5s +User=agentkeys +Group=agentkeys +# Persist audit + keypair under /var/lib/agentkeys (operator must +# pre-create this dir mode 0700, owned by the agentkeys user). +Environment=HOME=/var/lib/agentkeys +NoNewPrivileges=true +ProtectSystem=strict +ProtectHome=true +ReadWritePaths=/var/lib/agentkeys +PrivateTmp=true + +[Install] +WantedBy=multi-user.target +``` + +```bash +sudo useradd --system --home /var/lib/agentkeys --shell /usr/sbin/nologin agentkeys +sudo install -d -m 0700 -o agentkeys -g agentkeys /var/lib/agentkeys +sudo systemctl daemon-reload +sudo systemctl enable --now agentkeys-backend agentkeys-broker +sudo systemctl status agentkeys-backend agentkeys-broker +``` + +The broker binds to `127.0.0.1:8091` so only the local reverse proxy can reach it. **Never** bind the broker to `0.0.0.0` without TLS — bearer tokens and minted credentials would traverse the network in cleartext (the broker logs a warning on startup if you do, see [`crates/agentkeys-broker-server/src/main.rs::warn_if_non_loopback_without_tls`](../crates/agentkeys-broker-server/src/main.rs)). + +### Step 5 — Reverse proxy + TLS + +Minimal nginx site for `broker.example.dev`: + +```nginx +# /etc/nginx/sites-available/agentkeys-broker +server { + listen 80; + server_name broker.example.dev; + location /.well-known/acme-challenge/ { root /var/www/certbot; } + location / { return 301 https://$host$request_uri; } +} + +server { + listen 443 ssl http2; + server_name broker.example.dev; + + ssl_certificate /etc/letsencrypt/live/broker.example.dev/fullchain.pem; + ssl_certificate_key /etc/letsencrypt/live/broker.example.dev/privkey.pem; + ssl_protocols TLSv1.2 TLSv1.3; + + # AWS IAM only fetches the well-known + JWKS during create-open-id-connect-provider; + # the rest of the broker is bearer-gated. Keep the proxy thin: no auth, + # no caching of /v1/*, just TLS termination. + location / { + proxy_pass http://127.0.0.1:8091; + proxy_http_version 1.1; + proxy_set_header Host $host; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-For $remote_addr; + proxy_read_timeout 30s; + } +} +``` + +```bash +sudo ln -s /etc/nginx/sites-available/agentkeys-broker /etc/nginx/sites-enabled/ +sudo certbot --nginx -d broker.example.dev --agree-tos -m ops@example.dev +sudo nginx -t && sudo systemctl reload nginx +``` + +### Step 6 — Smoke test from a client machine + +From a laptop that has nothing AWS-shaped configured: + +```bash +curl -sf https://broker.example.dev/healthz # → "ok" +curl -sf https://broker.example.dev/.well-known/openid-configuration | \ + jq '.issuer == "https://broker.example.dev"' # → true +curl -sf https://broker.example.dev/.well-known/jwks.json | jq '.keys[0].kid' + +# End-to-end JWT mint (use a session bearer the operator has provisioned) +SESSION= +curl -sf -X POST https://broker.example.dev/v1/mint-oidc-jwt \ + -H "Authorization: Bearer $SESSION" | jq '.expiration' +``` + +If the discovery `issuer` field doesn't equal the URL you're hitting, your `BROKER_OIDC_ISSUER` env var disagrees with the reverse-proxy `server_name` — fix this before running the AWS federation step or `create-open-id-connect-provider` will reject every JWT. + +### Step 7 — Wire AWS federation + +Once the smoke test above passes, follow [§"Cloud federation deployment"](#cloud-federation-deployment) below to register the OIDC provider with AWS IAM and verify the cloud-enforced isolation property. + +### Operations: rotate, observe, harden + +- **Rotate the daemon AWS key.** See [`operator-runbook.md` §5](./operator-runbook.md). The broker picks up the new key on the next `systemctl restart agentkeys-broker`; in-flight requests drain per `BROKER_SHUTDOWN_GRACE_SECONDS`. +- **Watch the audit log.** `sqlite3 /var/lib/agentkeys/.agentkeys/broker/audit.sqlite` per [`operator-runbook.md` §6](./operator-runbook.md). Anomalous mint spikes or `auth_failed` clusters are your earliest signal. +- **Watch the Let's Encrypt cert.** Certbot's renewal timer ships with the package; verify with `sudo systemctl list-timers | grep certbot`. AWS doesn't pin the cert, but `aws iam create-open-id-connect-provider` does record a thumbprint at registration time — if you swap the issuer to a different CA later, AWS will need the thumbprint refreshed. +- **Don't enable broker `:8091` ingress.** The host firewall must drop `:8091` from anywhere except `127.0.0.1`. The reverse proxy is the only legitimate caller. + ## Cloud federation deployment This section is the **operational runbook** for taking the (already-shipped) Phase 2 broker and making AWS (or GCP / Ali Cloud) trust its JWTs without operator-side IAM-user keys. It's not a Stage-7 architecture step — Phase 2 ships complete with the local SQLite audit destination above. Each cloud provider's IAM service has its own registration step, and that step needs the broker reachable over public TLS. That's what this section walks through. From 7bef63326c6b91dbf0760d3cc2b11f13be4d3556 Mon Sep 17 00:00:00 2001 From: wildmeta-agent Date: Tue, 28 Apr 2026 11:36:50 +0800 Subject: [PATCH 04/15] feat(stage7): broker reads AWS creds via SDK default chain (profiles); add broker host bootstrap script MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The broker no longer requires DAEMON_ACCESS_KEY_ID / DAEMON_SECRET_ACCESS_KEY in the process environment. When both are set the broker still uses them (legacy path for existing deployments); when either is unset the broker delegates credential resolution to the AWS SDK's default provider chain — named profiles in ~/.aws/credentials (AWS_PROFILE / awsp), EC2 instance profile via IMDS, or any other link in the chain. Picked path is logged at startup so misconfiguration is visible immediately. Code: - crates/agentkeys-broker-server/src/sts.rs: - new AwsStsClient::with_default_chain(region) — no credentials_provider override, SDK default chain handles it. - existing from_keys(...) kept for the static-keys legacy path. - crates/agentkeys-broker-server/src/config.rs: - daemon_access_key_id / daemon_secret_access_key now Option. - rejects setting only one of the pair (XOR-safe at startup). - crates/agentkeys-broker-server/src/main.rs: - dispatches on (Some, Some) → from_keys, otherwise → with_default_chain. - logs the chosen path; the startup-failure message lists all three credential sources (AWS_PROFILE, instance profile, static keys). - tests/mint_flow.rs + tests/oidc_flow.rs: pass Some(...) for the daemon keys in BrokerConfig literals. Docs: - docs/operator-runbook.md §3.1: rewritten as "AWS credentials" — leads with named profiles + awsp, then EC2 instance profile, then legacy static keys. §3.2 lists the remaining (non-AWS-secret) env vars; §3.3 + §3.4 renumbered for consistency. §5 rotation procedure split per credential path. - docs/stage7-wip.md operator-E2E + remote-deploy: - E2E walk-through: `awsp agentkeys-daemon` instead of DAEMON_* exports. - Remote deploy §3 rewritten with three credential paths (instance profile / named profile / legacy static), each with copy-paste commands. systemd unit no longer needs EnvironmentFile by default; AWS_PROFILE or instance-profile is preferred. - docs/dev-setup.md §1: new "Other setup scripts at a glance" table pointing at scripts/setup-dev-env.sh and scripts/setup-broker-host.sh. §5.1/§5.2/§5.4: profile-based broker boot. §8 troubleshooting: ExpiredToken note now references ~/.aws/credentials reload. - agentkeys-secrets.env.example: stripped DAEMON_* (now profile-managed), with a leading note explaining the move and a commented-out legacy block at the bottom for operators who can't use profiles. New automation: - scripts/setup-broker-host.sh: idempotent bootstrap for a fresh Linux broker host. Builds binaries, creates the agentkeys system user, drops both systemd units, optional --with-nginx + --with-certbot. Three credential modes via --cred-mode {instance-profile,profile,static}; default is instance-profile (zero secrets on disk). Prints remaining manual steps (DNS A record, IAM role attach, certbot run, client-side smoke test) on completion. - scripts/setup-dev-env.sh: final-message pointer to setup-broker-host.sh so operators can find it. Stage 7 done-gate (`bash harness/stage-7-done.sh`) still passes. Co-Authored-By: Claude Opus 4.7 (1M context) --- .claude/scheduled_tasks.lock | 1 + agentkeys-secrets.env.example | 33 +- crates/agentkeys-broker-server/src/config.rs | 38 +- crates/agentkeys-broker-server/src/main.rs | 22 +- crates/agentkeys-broker-server/src/sts.rs | 23 ++ .../tests/mint_flow.rs | 4 +- .../tests/oidc_flow.rs | 4 +- docs/dev-setup.md | 37 +- docs/operator-runbook.md | 104 +++-- docs/stage7-wip.md | 118 +++++- scripts/setup-broker-host.sh | 388 ++++++++++++++++++ scripts/setup-dev-env.sh | 3 + 12 files changed, 671 insertions(+), 104 deletions(-) create mode 100644 .claude/scheduled_tasks.lock create mode 100755 scripts/setup-broker-host.sh diff --git a/.claude/scheduled_tasks.lock b/.claude/scheduled_tasks.lock new file mode 100644 index 0000000..f0311eb --- /dev/null +++ b/.claude/scheduled_tasks.lock @@ -0,0 +1 @@ +{"sessionId":"b3498f25-3c90-4378-b647-255d411464dc","pid":36360,"acquiredAt":1777347347804} \ No newline at end of file diff --git a/agentkeys-secrets.env.example b/agentkeys-secrets.env.example index 8e89c8f..35ffd66 100644 --- a/agentkeys-secrets.env.example +++ b/agentkeys-secrets.env.example @@ -1,7 +1,7 @@ # agentkeys-secrets.env.example # -# Template for local developer secrets. DO NOT commit the real file — that's -# gitignored as `agentkeys-secrets.env`. Two ways to use: +# Template for local developer non-AWS knobs. DO NOT commit the real file — +# that's gitignored as `agentkeys-secrets.env`. Two ways to use: # # 1. Source it manually per shell: # cp agentkeys-secrets.env.example agentkeys-secrets.env @@ -12,21 +12,11 @@ # tool, cron jobs) pick it up too: # echo "[ -f $PWD/agentkeys-secrets.env ] && source $PWD/agentkeys-secrets.env" >> ~/.zshenv # -# After filling, run: `source scripts/stage6-demo-env.sh` to mint 1 h STS -# temp creds from DAEMON_* and export them as AWS_*. - -# ─── Long-lived IAM users (rotate quarterly) ────────────────────────────────── - -# Daemon user — only permission is `sts:AssumeRole` into agentkeys-agent. -# Compromise blast radius = can assume the role; rotate via `aws iam -# update-access-key --status Inactive` + create new key. -export DAEMON_ACCESS_KEY_ID=AKIA...REPLACE_ME -export DAEMON_SECRET_ACCESS_KEY=REPLACE_ME - -# Admin user — used for infra changes (SES config, IAM policies). NOT used by -# the scraper/recorder runtime. If you don't do admin work, leave blank. -export ADMIN_AWS_ACCESS_KEY_ID=AKIA...REPLACE_ME_OR_BLANK -export ADMIN_AWS_ACCESS_KEY_SECRET=REPLACE_ME_OR_BLANK +# AWS CREDENTIALS LIVE ELSEWHERE. +# As of Stage 7 phase 2, AWS credentials are managed via named profiles in +# ~/.aws/credentials (mode 0600), not env vars. The broker reads credentials +# through the AWS SDK's default chain — `awsp ` (sets AWS_PROFILE) +# or an EC2 instance profile via IMDS. See docs/operator-runbook.md §3.1. # ─── Non-secret infrastructure knobs ────────────────────────────────────────── @@ -62,3 +52,12 @@ export AGENTKEYS_SIGNUP_PASSWORD=REPLACE_ME_WITH_STRONG_PASSWORD # Pricing: ~$1 per 1000 hCaptcha solves. # Sign up: https://capsolver.com (paste the CAP-... token) export CAPSOLVER_API_KEY=CAP-REPLACE_ME + +# ─── Legacy: static AWS keys (only if you cannot use named profiles) ────────── +# +# The broker's old credential path. Both must be set together; setting only +# one is rejected at startup. Prefer `~/.aws/credentials` + `awsp` instead. +# Leave commented out for the recommended path. +# +# export DAEMON_ACCESS_KEY_ID=AKIA...REPLACE_ME +# export DAEMON_SECRET_ACCESS_KEY=REPLACE_ME diff --git a/crates/agentkeys-broker-server/src/config.rs b/crates/agentkeys-broker-server/src/config.rs index 433bd8e..3b431cf 100644 --- a/crates/agentkeys-broker-server/src/config.rs +++ b/crates/agentkeys-broker-server/src/config.rs @@ -2,8 +2,14 @@ use std::path::PathBuf; #[derive(Debug, Clone)] pub struct BrokerConfig { - pub daemon_access_key_id: String, - pub daemon_secret_access_key: String, + /// Optional. When *both* `daemon_access_key_id` and + /// `daemon_secret_access_key` are set, the broker uses static IAM-user + /// keys (legacy path). When either is unset, the broker falls back to + /// the AWS SDK's default credential chain — picking up `AWS_PROFILE` + /// from `~/.aws/credentials`, an EC2 instance profile via IMDS, etc. + /// The chain path is preferred for new deployments. + pub daemon_access_key_id: Option, + pub daemon_secret_access_key: Option, pub agent_role_arn: String, pub backend_url: String, pub audit_db_path: PathBuf, @@ -31,25 +37,27 @@ pub struct BrokerConfig { impl BrokerConfig { pub fn from_env() -> anyhow::Result { - // DAEMON_ACCESS_KEY_ID / DAEMON_SECRET_ACCESS_KEY are the same vars - // scripts/stage6-demo-env.sh reads — operator persists them once in - // ~/.zshenv and both the legacy demo script and the broker pick them - // up. BROKER_DAEMON_* names are accepted as a fallback for callers - // that prefer the explicit prefix. + // DAEMON_ACCESS_KEY_ID / DAEMON_SECRET_ACCESS_KEY are now optional. + // When both are present, the broker uses them directly (legacy path + // matching scripts/stage6-demo-env.sh). When either is missing, the + // broker delegates credential resolution to the AWS SDK's default + // chain — `AWS_PROFILE` (from `awsp` or your shell), `~/.aws/` + // shared files, or EC2 IMDS instance profile. The chain path is the + // recommended one for new deployments. let daemon_access_key_id = first_env(&[ "DAEMON_ACCESS_KEY_ID", "BROKER_DAEMON_ACCESS_KEY_ID", - ]) - .ok_or_else(|| { - anyhow::anyhow!("missing required env var: DAEMON_ACCESS_KEY_ID (or BROKER_DAEMON_ACCESS_KEY_ID)") - })?; + ]); let daemon_secret_access_key = first_env(&[ "DAEMON_SECRET_ACCESS_KEY", "BROKER_DAEMON_SECRET_ACCESS_KEY", - ]) - .ok_or_else(|| { - anyhow::anyhow!("missing required env var: DAEMON_SECRET_ACCESS_KEY (or BROKER_DAEMON_SECRET_ACCESS_KEY)") - })?; + ]); + if daemon_access_key_id.is_some() != daemon_secret_access_key.is_some() { + anyhow::bail!( + "DAEMON_ACCESS_KEY_ID and DAEMON_SECRET_ACCESS_KEY must be set together \ + (or both unset to use the AWS SDK default credential chain via AWS_PROFILE)." + ); + } // BROKER_AGENT_ROLE_ARN can be derived from ACCOUNT_ID for the // canonical Stage 6 role name. Operator can still override. let agent_role_arn = std::env::var("BROKER_AGENT_ROLE_ARN").or_else(|_| { diff --git a/crates/agentkeys-broker-server/src/main.rs b/crates/agentkeys-broker-server/src/main.rs index 4d6eaaf..abf057b 100644 --- a/crates/agentkeys-broker-server/src/main.rs +++ b/crates/agentkeys-broker-server/src/main.rs @@ -42,12 +42,20 @@ async fn main() -> anyhow::Result<()> { warn_if_non_loopback_without_tls(&args.bind); let audit = AuditLog::open(&config.audit_db_path)?; - let sts = AwsStsClient::from_keys( - &config.daemon_access_key_id, - &config.daemon_secret_access_key, - &config.aws_region, - ) - .await; + let sts = match (&config.daemon_access_key_id, &config.daemon_secret_access_key) { + (Some(akid), Some(secret)) => { + tracing::info!( + "AWS credentials: static IAM-user keys (DAEMON_ACCESS_KEY_ID env)" + ); + AwsStsClient::from_keys(akid, secret, &config.aws_region).await + } + _ => { + tracing::info!( + "AWS credentials: SDK default chain (AWS_PROFILE / ~/.aws / IMDS)" + ); + AwsStsClient::with_default_chain(&config.aws_region).await + } + }; if !args.skip_startup_check { match sts.caller_identity_ok().await { @@ -55,7 +63,7 @@ async fn main() -> anyhow::Result<()> { Err(e) => { tracing::error!(error = %e, "startup STS check failed — refusing to bind"); anyhow::bail!( - "startup STS check failed: {}. Verify BROKER_DAEMON_ACCESS_KEY_ID / BROKER_DAEMON_SECRET_ACCESS_KEY / BROKER_AWS_REGION, or pass --skip-startup-check for offline dev.", + "startup STS check failed: {}. Either set AWS_PROFILE (or attach an EC2 instance profile) so the SDK's default chain can resolve credentials, or set DAEMON_ACCESS_KEY_ID + DAEMON_SECRET_ACCESS_KEY for the legacy static-keys path. Verify BROKER_AWS_REGION too. Pass --skip-startup-check for offline dev.", e ); } diff --git a/crates/agentkeys-broker-server/src/sts.rs b/crates/agentkeys-broker-server/src/sts.rs index 012a887..fc38353 100644 --- a/crates/agentkeys-broker-server/src/sts.rs +++ b/crates/agentkeys-broker-server/src/sts.rs @@ -27,6 +27,13 @@ pub struct AwsStsClient { } impl AwsStsClient { + /// Construct a client backed by *static* IAM-user keys. + /// + /// Legacy / explicit-config path. New deployments should prefer + /// [`Self::with_default_chain`] so the AWS SDK can pick up credentials + /// from a named profile (`~/.aws/credentials` + `AWS_PROFILE`), an EC2 + /// instance profile (IMDS), or another link in the default provider + /// chain — no long-lived keys in the broker's process environment. pub async fn from_keys( access_key_id: &str, secret_access_key: &str, @@ -46,6 +53,22 @@ impl AwsStsClient { .await; Self { client: aws_sdk_sts::Client::new(&config) } } + + /// Construct a client using the AWS SDK's default credential provider + /// chain. Honors, in order: env vars (`AWS_ACCESS_KEY_ID` etc.), shared + /// credentials file (`~/.aws/credentials` + `AWS_PROFILE`), assume-role + /// chains in `~/.aws/config`, and (on EC2) IMDS instance profile. + /// + /// This is the recommended path for both local-dev (operators run + /// `awsp agentkeys-daemon` to set `AWS_PROFILE`, then start the broker) + /// and EC2 deployments (attach an instance profile, no env vars at all). + pub async fn with_default_chain(region: &str) -> Self { + let config = aws_config::defaults(aws_config::BehaviorVersion::latest()) + .region(aws_config::Region::new(region.to_string())) + .load() + .await; + Self { client: aws_sdk_sts::Client::new(&config) } + } } #[async_trait] diff --git a/crates/agentkeys-broker-server/tests/mint_flow.rs b/crates/agentkeys-broker-server/tests/mint_flow.rs index fe52f46..8c225b0 100644 --- a/crates/agentkeys-broker-server/tests/mint_flow.rs +++ b/crates/agentkeys-broker-server/tests/mint_flow.rs @@ -53,8 +53,8 @@ async fn spawn_broker_with_sts( OidcKeypair::generate_and_persist(&tmp.path().join("oidc-keypair.json")).unwrap(); let config = BrokerConfig { - daemon_access_key_id: "AKIA-fake".into(), - daemon_secret_access_key: "fake-secret".into(), + daemon_access_key_id: Some("AKIA-fake".into()), + daemon_secret_access_key: Some("fake-secret".into()), agent_role_arn: STUB_ROLE_ARN.into(), backend_url, audit_db_path: PathBuf::from(":memory:"), diff --git a/crates/agentkeys-broker-server/tests/oidc_flow.rs b/crates/agentkeys-broker-server/tests/oidc_flow.rs index b47c03f..f2bd8fa 100644 --- a/crates/agentkeys-broker-server/tests/oidc_flow.rs +++ b/crates/agentkeys-broker-server/tests/oidc_flow.rs @@ -52,8 +52,8 @@ async fn spawn_broker(backend_url: String) -> (String, Arc) { let sts: Arc = Arc::new(StubStsClient::ok(stub_creds())); let config = BrokerConfig { - daemon_access_key_id: "AKIA-fake".into(), - daemon_secret_access_key: "fake-secret".into(), + daemon_access_key_id: Some("AKIA-fake".into()), + daemon_secret_access_key: Some("fake-secret".into()), agent_role_arn: STUB_ROLE_ARN.into(), backend_url, audit_db_path: PathBuf::from(":memory:"), diff --git a/docs/dev-setup.md b/docs/dev-setup.md index 17fe1f9..3b8fa15 100644 --- a/docs/dev-setup.md +++ b/docs/dev-setup.md @@ -32,6 +32,13 @@ Two things the script intentionally does **not** do: 1. **Install Google Chrome.** The CDP scrapers attach to real Chrome at `localhost:9222`; install it from . 2. **Touch AWS infra.** That's the one-time operator setup in §5.2. +### Other setup scripts at a glance + +| Script | Audience | What it does | +|---|---|---| +| [`scripts/setup-dev-env.sh`](../scripts/setup-dev-env.sh) | Anyone — fresh dev machine | Installs every prerequisite above, builds workspace, runs smoke tests. (The one you just ran.) | +| [`scripts/setup-broker-host.sh`](../scripts/setup-broker-host.sh) | Operator — fresh broker host | Provisions a Linux host into a running broker: builds binaries, creates the `agentkeys` system user, drops systemd units, optional nginx + Let's Encrypt. Idempotent. See [`stage7-wip.md` "Remote deployment"](./stage7-wip.md) for the manual long-form walk-through. | + ### Manual matrix (if you'd rather pick tools yourself) | Tool | Why | Install | @@ -138,35 +145,36 @@ Run through [`stage6-aws-setup.md`](./stage6-aws-setup.md) through §7 once per - S3 bucket `agentkeys-mail-` with receipt rule writing inbound to `inbound/` - Route 53 records: three DKIM CNAMEs, MX, SPF, DMARC -Persist the daemon user's long-lived creds in `~/.zshenv` (mode 0600) so every shell on this host inherits them. The broker process picks them up at startup; nothing else on the host should be reading from these env vars. +Manage the daemon user's long-lived AWS keys via a **named profile** in `~/.aws/credentials` (mode 0600). The broker uses the AWS SDK's default credential chain — `AWS_PROFILE` (set by `awsp` or your shell), the shared credentials file, or an EC2 instance profile via IMDS. **No long-lived AWS keys live in env vars.** See [`operator-runbook.md` §3.1](./operator-runbook.md) for the full credential story. ### 5.2 Run the broker server -The broker holds your AWS daemon credentials and brokers scoped temp credentials to authenticated daemons. Same binary local + hosted; only the configuration source differs. +The broker holds your AWS daemon credentials (via the SDK default chain) and brokers scoped temp credentials to authenticated daemons. Same binary local + hosted; only the credential source differs. **Local development shape:** ```bash -# DAEMON_ACCESS_KEY_ID, DAEMON_SECRET_ACCESS_KEY, ACCOUNT_ID, and REGION -# are already in your shell because they're persisted in ~/.zshenv (mode -# 0600). The broker derives BROKER_AGENT_ROLE_ARN from ACCOUNT_ID -# automatically and falls back BROKER_AWS_REGION → REGION. -# The only per-run var the broker requires is BROKER_BACKEND_URL: -export BROKER_BACKEND_URL="http://127.0.0.1:8090" # mock backend for v0.1 dev loop - -# Run. +# Activate the daemon profile so the AWS SDK can resolve credentials. +awsp agentkeys-daemon # or: export AWS_PROFILE=agentkeys-daemon + +# Non-secret config: BROKER_BACKEND_URL is required; the rest derive +# from ACCOUNT_ID + REGION already in your shell. +export BROKER_BACKEND_URL="http://127.0.0.1:8090" # mock backend for v0.1 dev loop + cargo run --release -p agentkeys-broker-server -- --port 8091 -# → broker listening on 0.0.0.0:8091 +# → "AWS credentials: SDK default chain (AWS_PROFILE / ~/.aws / IMDS)" +# → "broker listening on 0.0.0.0:8091" ``` The broker: 1. Validates incoming bearer tokens against `BROKER_BACKEND_URL` (the mock server in dev; the real chain backend in v0.2+). -2. Calls `sts:assume-role` on `BROKER_AGENT_ROLE_ARN` using its env-var-loaded daemon key. +2. Calls `sts:assume-role` on `BROKER_AGENT_ROLE_ARN` using whatever credentials the SDK default chain returned. 3. Returns 1-hour temp creds to the caller. 4. Logs every mint to `BROKER_AUDIT_DB_PATH` (SQLite, one row per mint). For runbook detail (start / supervise / rotate / monitor / migrate to hosted), see [`docs/operator-runbook.md`](./operator-runbook.md). +For the automated remote-host bootstrap, see [`scripts/setup-broker-host.sh`](../scripts/setup-broker-host.sh). ### 5.3 Hand off bearer tokens to your developers @@ -180,7 +188,8 @@ If you're running everything on one box (typical solo dev), you'll want three te # Terminal A — mock backend cargo run --release -p agentkeys-mock-server -- --port 8090 -# Terminal B — broker. DAEMON_* + ACCOUNT_ID already in env via ~/.zshenv. +# Terminal B — broker. AWS credentials come from the active profile. +awsp agentkeys-daemon export BROKER_BACKEND_URL=http://127.0.0.1:8090 cargo run --release -p agentkeys-broker-server -- --port 8091 @@ -226,7 +235,7 @@ The stage-done script is the authoritative evaluator — never self-grade. If it | Symptom | Likely cause | Fix | |---|---|---| | `Cannot find package 'tsx'` | Running a scraper from repo root instead of `provisioner-scripts/` | Use `scripts/stage6-demo-run.sh`, or `cd provisioner-scripts` first | -| `ExpiredToken` from broker | Broker's daemon AWS key was rotated; broker process holds the old one | Restart the broker process — it re-reads `BROKER_DAEMON_*` from env on start | +| `ExpiredToken` from broker | Broker's daemon AWS key was rotated; broker process holds the old one | Restart the broker process — the SDK re-reads `~/.aws/credentials` (or IMDS / env vars) on start | | `401 Unauthorized` from broker | Bearer token expired (30-day TTL), or token issued against a different backend | Re-run `agentkeys init` against the broker's `BROKER_BACKEND_URL` | | Scraper hangs at `waiting for Turnstile` for >2 min | Turnstile showing a visible checkbox | Click it in the Chrome window from §5.4 | | Turnstile repeatedly fails even after checkbox | Chromium profile fingerprint flagged | `rm -rf /tmp/agentkeys-chrome-profile` and restart Chrome | diff --git a/docs/operator-runbook.md b/docs/operator-runbook.md index 63eeca4..b4defbd 100644 --- a/docs/operator-runbook.md +++ b/docs/operator-runbook.md @@ -35,45 +35,82 @@ The remaining federation step (`aws iam create-open-id-connect-provider --url $B ## 3. Start the broker -### 3.1 Required configuration +### 3.1 AWS credentials -The broker reads its configuration from environment variables only — no config file in v0.1. +The broker resolves AWS credentials through the AWS SDK's default provider chain — **named profiles in `~/.aws/credentials`** (recommended for local dev), **EC2 instance profile via IMDS** (recommended for cloud deployments), or static IAM-user keys in env vars (legacy fallback). + +#### Recommended: named profiles + `awsp` + +Profiles live in `~/.aws/credentials` and `~/.aws/config` (mode `0600`). One profile per role; switch with `awsp ` or `export AWS_PROFILE=`. Example layout: + +``` +~/.aws/credentials # mode 0600 +[agentkeys-admin] # admin operations +aws_access_key_id = AKIA... +aws_secret_access_key = ... + +[agentkeys-broker] # EC2 Instance Connect to broker host +aws_access_key_id = AKIA... +aws_secret_access_key = ... + +[agentkeys-daemon] # what the broker process assumes from +aws_access_key_id = AKIA... +aws_secret_access_key = ... +``` + +``` +~/.aws/config # mode 0600 +[profile agentkeys-admin] +region = us-east-1 +output = json + +[profile agentkeys-broker] +region = us-east-1 + +[profile agentkeys-daemon] +region = us-east-1 +``` + +Run the broker with the daemon profile active: + +```bash +awsp agentkeys-daemon # sets AWS_PROFILE=agentkeys-daemon +agentkeys-broker-server --port 8091 +# → "AWS credentials: SDK default chain (AWS_PROFILE / ~/.aws / IMDS)" +``` + +The broker logs which credential path it picked at startup, so misconfiguration is visible in the first second of the log. + +#### Recommended: EC2 instance profile + +When the broker runs on EC2, attach an instance profile granting `sts:AssumeRole` on `agentkeys-agent`. The SDK picks credentials from IMDS automatically — no env vars, no shared files, no rotation step. This is the path `scripts/setup-broker-host.sh` sets up. + +#### Legacy fallback: static IAM-user keys in env + +Set both `DAEMON_ACCESS_KEY_ID` *and* `DAEMON_SECRET_ACCESS_KEY` (or the `BROKER_DAEMON_*` aliases). The broker logs `AWS credentials: static IAM-user keys (DAEMON_ACCESS_KEY_ID env)` when it picks this path. Setting only one of the pair is rejected at startup. Prefer profiles or instance-profile. + +### 3.2 Other configuration | Variable | Required | Description | |---|---|---| -| `DAEMON_ACCESS_KEY_ID` | yes | Long-lived `agentkeys-daemon` IAM user access key. Same var `scripts/stage6-demo-env.sh` reads. (Fallback: `BROKER_DAEMON_ACCESS_KEY_ID`.) | -| `DAEMON_SECRET_ACCESS_KEY` | yes | Long-lived `agentkeys-daemon` IAM user secret. (Fallback: `BROKER_DAEMON_SECRET_ACCESS_KEY`.) | -| `BROKER_AGENT_ROLE_ARN` | yes (or `ACCOUNT_ID`) | ARN of the `agentkeys-agent` role. If unset, derived from `ACCOUNT_ID` as `arn:aws:iam::$ACCOUNT_ID:role/agentkeys-agent`. | | `BROKER_BACKEND_URL` | yes | URL of the AgentKeys backend that issues session tokens (mock-server in dev, chain in v0.2+). | +| `BROKER_AGENT_ROLE_ARN` | yes (or `ACCOUNT_ID`) | ARN of the `agentkeys-agent` role. If unset, derived from `ACCOUNT_ID` as `arn:aws:iam::$ACCOUNT_ID:role/agentkeys-agent`. | +| `BROKER_AWS_REGION` | no | AWS region for the STS call. Falls back to `REGION` (the rest-of-agentKeys convention) before defaulting to `us-east-1`. The active profile's `region` setting is used by the SDK independently for credential lookup. | | `BROKER_AUDIT_DB_PATH` | no | SQLite path for the audit log. Default: `$HOME/.agentkeys/broker/audit.sqlite`. | -| `BROKER_AWS_REGION` | no | AWS region for the STS call. Falls back to `REGION` (the rest-of-agentKeys convention) before defaulting to `us-east-1`. | | `BROKER_SESSION_DURATION_SECONDS` | no | TTL for minted credentials. Default: `3600` (1 h). Min: `900`, max: `43200`. | | `BROKER_BACKEND_TIMEOUT_SECONDS` | no | HTTP timeout for backend `/session/validate` calls. Default: `10`. | | `BROKER_SHUTDOWN_GRACE_SECONDS` | no | Hard cap on graceful-shutdown drain. Default: `30`. | | `BROKER_OIDC_ISSUER` | no | Public URL the broker advertises in the OIDC discovery doc and JWT `iss` claim. Must match the URL used at `aws iam create-open-id-connect-provider` time. Default: `https://oidc.agentkeys.dev`. | | `BROKER_OIDC_KEYPAIR_PATH` | no | Path to the persisted ES256 keypair (mode 0600). Generated on first start, reused on subsequent restarts so the registered IAM OIDC provider stays valid. Default: `$HOME/.agentkeys/broker/oidc-keypair.json`. | | `BROKER_OIDC_JWT_TTL_SECONDS` | no | TTL (seconds) for minted OIDC JWTs. Default: `300`. Bounded `[60, 3600]`. | +| `DAEMON_ACCESS_KEY_ID` / `DAEMON_SECRET_ACCESS_KEY` | no (legacy) | Static IAM-user keys. Only used when no profile / instance profile / SDK default is available. Both must be set together. | -Persist `DAEMON_ACCESS_KEY_ID` and `DAEMON_SECRET_ACCESS_KEY` in `~/.zshenv` (or the equivalent per-shell startup file for non-zsh shells) with file mode 0600 so the operator's shell has them on every login. The names match `scripts/stage6-demo-env.sh` so one persisted set of keys feeds both the legacy demo flow and the broker: - -```bash -chmod 600 ~/.zshenv -# inside ~/.zshenv: -export REGION=us-east-1 -export ACCOUNT_ID=429071895007 -export DAEMON_ACCESS_KEY_ID=AKIA... -export DAEMON_SECRET_ACCESS_KEY=... -``` - -`~/.zshenv` is sourced by every zsh invocation (login, interactive, script), so the broker process inherits the keys regardless of how it was started. The 0600 mode keeps the file readable only by the operator. +`ACCOUNT_ID` is read indirectly to derive `BROKER_AGENT_ROLE_ARN`. Persist non-secret values (region, account ID, role ARN, OIDC issuer URL) wherever your shell prefers; the broker no longer needs secrets in its environment. -The broker also accepts `BROKER_DAEMON_ACCESS_KEY_ID` / `BROKER_DAEMON_SECRET_ACCESS_KEY` as fallbacks if you prefer an explicit prefix. The unprefixed `DAEMON_*` names take precedence so the legacy and new flows stay aligned. - -If the host is shared or untrusted, prefer a secret manager that injects the values into the launch environment (systemd `LoadCredential=`, launchd `EnvironmentVariables` plist, or whatever your supervisor supports) rather than a per-user dotfile. - -### 3.2 Run +### 3.3 Run ```bash +awsp agentkeys-daemon # or attach instance profile cargo run --release -p agentkeys-broker-server -- --port 8091 # → broker listening on 0.0.0.0:8091 ``` @@ -81,10 +118,13 @@ cargo run --release -p agentkeys-broker-server -- --port 8091 Or from the built binary: ```bash +awsp agentkeys-daemon ./target/release/agentkeys-broker-server --port 8091 ``` -### 3.3 Verify it came up +The first second of the log shows which credential path the broker picked: `AWS credentials: SDK default chain ...` or `AWS credentials: static IAM-user keys ...`. Always check this before declaring the broker healthy in a new environment. + +### 3.4 Verify it came up ```bash curl -sf http://127.0.0.1:8091/healthz # → 200 ok @@ -105,15 +145,25 @@ Logs go to stderr in `tracing-subscriber` JSON format when `RUST_LOG=info` is se ## 5. Rotate the daemon AWS key -Long-lived keys age out. Rotation procedure: +Long-lived keys age out. Rotation procedure depends on the credential path: + +### Named profile (recommended) 1. In IAM, **create** a second access key on the `agentkeys-daemon` user — both old and new keys are now valid. -2. Update `~/.zshenv` (or your supervisor's environment-injection mechanism) with the new key. -3. Restart the broker — it picks up the new `DAEMON_*` from env. +2. Update the `agentkeys-daemon` profile in `~/.aws/credentials` with the new key. +3. Restart the broker — the SDK re-reads the shared file on each `aws_config::defaults().load()` (i.e., on process restart). 4. Verify with `curl /readyz` — should return 200. 5. In IAM, **deactivate** (not delete) the old access key. Wait 24 h. 6. If nothing broke, delete the old key. If something broke, reactivate and roll back. +### EC2 instance profile + +Rotation is automatic — IMDS-vended credentials refresh on a schedule managed by AWS. No operator step. + +### Legacy static-keys env-var path + +Same as the profile flow but step 2 updates the `DAEMON_*` env vars in your supervisor config. + **Cadence recommendation:** rotate every 90 days minimum, immediately on any operator-laptop compromise. ## 6. Audit diff --git a/docs/stage7-wip.md b/docs/stage7-wip.md index 1e3a913..df238db 100644 --- a/docs/stage7-wip.md +++ b/docs/stage7-wip.md @@ -35,10 +35,11 @@ The credential broker that lets app developers run daemons without holding any A # Terminal A — mock backend cargo run --release -p agentkeys-mock-server -- --port 8090 -# Terminal B — broker. Operator has DAEMON_ACCESS_KEY_ID, -# DAEMON_SECRET_ACCESS_KEY, ACCOUNT_ID, and REGION already in their shell -# environment (persisted in ~/.zshenv with mode 0600 — zsh sources it for -# every shell). The broker derives BROKER_AGENT_ROLE_ARN from ACCOUNT_ID. +# Terminal B — broker. AWS credentials come from the operator's +# ~/.aws/credentials profile (e.g. agentkeys-daemon) via `awsp` or +# AWS_PROFILE. ACCOUNT_ID + REGION live in the operator's shell. The +# broker derives BROKER_AGENT_ROLE_ARN from ACCOUNT_ID. +awsp agentkeys-daemon export BROKER_BACKEND_URL=http://127.0.0.1:8090 cargo run --release -p agentkeys-broker-server -- --port 8091 @@ -125,7 +126,7 @@ A four-terminal walk-through that exercises everything Phase 2 ships, with no AW - A release build: `cargo build --release -p agentkeys-mock-server -p agentkeys-broker-server -p agentkeys-cli` (≈ 90 s cold). - `jq` and `curl` on `$PATH`. -- For the AWS-side check (step 6), `DAEMON_ACCESS_KEY_ID` + `DAEMON_SECRET_ACCESS_KEY` + `ACCOUNT_ID` from your operator setup; for offline-only, skip step 6 and use `--skip-startup-check`. +- For the AWS-side check (step 4b + 6), `awsp agentkeys-daemon` (or another profile with `sts:AssumeRole` on `agentkeys-agent`) plus `ACCOUNT_ID` from your operator setup. For offline-only, skip those steps and use `--skip-startup-check`. ### Walk-through @@ -137,16 +138,17 @@ A four-terminal walk-through that exercises everything Phase 2 ships, with no AW # but is NOT a long-running production backend. See the "Remote # deployment" section below for the production backend story. -# Terminal B — broker. For the offline path (no live AWS round-trip), -# pass --skip-startup-check; for the live path source your daemon creds -# first per docs/operator-runbook.md §3.1. +# Terminal B — broker. Two ways to pass AWS credentials: +# • Offline path (no AWS round-trip): --skip-startup-check, no creds needed. +# • Live path: awsp agentkeys-daemon (SDK default chain) +# See docs/operator-runbook.md §3.1 for the full credential story. export BROKER_BACKEND_URL=http://127.0.0.1:8090 export BROKER_OIDC_ISSUER=http://localhost:8091 # http for dev only; production must be https -export DAEMON_ACCESS_KEY_ID=AKIA-offline-stub -export DAEMON_SECRET_ACCESS_KEY=offline-stub-secret -export ACCOUNT_ID=000000000000 +export ACCOUNT_ID=000000000000 # offline path tolerates a stub ./target/release/agentkeys-broker-server --port 8091 --skip-startup-check -# expect: "OIDC signer ready" with kid=v1-, then "broker listening on 0.0.0.0:8091" +# expect: "AWS credentials: SDK default chain (AWS_PROFILE / ~/.aws / IMDS)" +# "OIDC signer ready" with kid=v1- +# "broker listening on 0.0.0.0:8091" # Terminal C — checks # 1. Healthz @@ -296,21 +298,86 @@ sudo install -m 0755 \ /usr/local/bin/ ``` -### Step 3 — Persisted operator config +### Step 3 — AWS credentials + non-secret config -Persist the broker's required env vars in a 0600-mode file (`~/.zshenv` for zsh ops, `/etc/agentkeys/broker.env` for systemd): +The broker resolves AWS credentials through the SDK default chain. Pick one of three paths, in order of preference: + +#### 3a. EC2 instance profile (recommended on AWS) + +If the broker host is an EC2 instance, attach an IAM **instance profile** with `sts:AssumeRole` permission on `agentkeys-agent`. The SDK pulls credentials from IMDS automatically — **no secrets land on the host's filesystem, no env vars, no rotation runbook**. + +```bash +# One-time, from your admin workstation: +ROLE_NAME=agentkeys-broker-host +INSTANCE_PROFILE=$ROLE_NAME + +# Trust policy: only this EC2 role may assume. +aws iam create-role --role-name $ROLE_NAME --assume-role-policy-document "$(jq -n '{ + Version: "2012-10-17", + Statement: [{Effect:"Allow", Principal:{Service:"ec2.amazonaws.com"}, Action:"sts:AssumeRole"}] +}')" + +# Inline policy: the only thing the broker host can do is sts:AssumeRole on agentkeys-agent. +aws iam put-role-policy --role-name $ROLE_NAME --policy-name BrokerAssumeAgent \ + --policy-document "$(jq -n --arg account "$ACCOUNT_ID" '{ + Version: "2012-10-17", + Statement: [{Effect:"Allow", Action:"sts:AssumeRole", + Resource:"arn:aws:iam::\($account):role/agentkeys-agent"}] + }')" + +aws iam create-instance-profile --instance-profile-name $INSTANCE_PROFILE +aws iam add-role-to-instance-profile --instance-profile-name $INSTANCE_PROFILE --role-name $ROLE_NAME +aws ec2 associate-iam-instance-profile \ + --instance-id \ + --iam-instance-profile Name=$INSTANCE_PROFILE +``` + +Verify from the host: `aws sts get-caller-identity` should print the assumed role ARN. + +#### 3b. Named profile in `~/.aws/credentials` (non-EC2 hosts) + +Hosts outside AWS (DigitalOcean, Hetzner, etc.) can't use IMDS. Drop the operator user's profile into `~/.aws/credentials` for the `agentkeys` system user: + +```bash +sudo install -d -m 0700 -o agentkeys -g agentkeys /var/lib/agentkeys/.aws +sudo -u agentkeys tee /var/lib/agentkeys/.aws/credentials >/dev/null <<'EOF' +[agentkeys-daemon] +aws_access_key_id = AKIA... +aws_secret_access_key = ... +EOF +sudo chmod 600 /var/lib/agentkeys/.aws/credentials + +sudo -u agentkeys tee /var/lib/agentkeys/.aws/config >/dev/null <<'EOF' +[profile agentkeys-daemon] +region = us-east-1 +EOF +sudo chmod 600 /var/lib/agentkeys/.aws/config +``` + +The systemd unit below sets `Environment=HOME=/var/lib/agentkeys` so the SDK finds these files; the unit also sets `AWS_PROFILE=agentkeys-daemon` so it picks the right profile. + +#### 3c. Legacy static-keys env file (only if 3a/3b are not options) ```bash sudo install -d -m 0700 /etc/agentkeys sudo tee /etc/agentkeys/broker.env >/dev/null <<'EOF' DAEMON_ACCESS_KEY_ID=AKIA... DAEMON_SECRET_ACCESS_KEY=... +EOF +sudo chmod 600 /etc/agentkeys/broker.env +``` + +Only the systemd unit's `EnvironmentFile=` references this; nothing else on the host should read it. + +#### Non-secret config (all three paths) + +These values are not secrets and live in the systemd unit directly (Step 4): + +``` ACCOUNT_ID=429071895007 REGION=us-east-1 BROKER_BACKEND_URL=http://127.0.0.1:8090 BROKER_OIDC_ISSUER=https://broker.example.dev -EOF -sudo chmod 600 /etc/agentkeys/broker.env ``` `BROKER_OIDC_ISSUER` **must** match the public URL the reverse proxy serves — AWS rejects `create-open-id-connect-provider` if the registered URL doesn't equal the `iss` claim emitted by the broker. @@ -353,15 +420,26 @@ Requires=agentkeys-backend.service [Service] Type=simple -EnvironmentFile=/etc/agentkeys/broker.env +# Non-secret config goes inline; AWS credentials come from the SDK's +# default chain (IMDS for 3a, ~/.aws/* for 3b, EnvironmentFile for 3c). +Environment=HOME=/var/lib/agentkeys +Environment=ACCOUNT_ID=429071895007 +Environment=REGION=us-east-1 +Environment=BROKER_BACKEND_URL=http://127.0.0.1:8090 +Environment=BROKER_OIDC_ISSUER=https://broker.example.dev +# Uncomment ONE of the next two lines depending on the credential path: +# 3a (EC2 instance profile): nothing — IMDS handles it. +# 3b (named profile): +#Environment=AWS_PROFILE=agentkeys-daemon +# 3c (legacy static keys): +#EnvironmentFile=/etc/agentkeys/broker.env ExecStart=/usr/local/bin/agentkeys-broker-server --port 8091 --bind 127.0.0.1 Restart=on-failure RestartSec=5s User=agentkeys Group=agentkeys -# Persist audit + keypair under /var/lib/agentkeys (operator must -# pre-create this dir mode 0700, owned by the agentkeys user). -Environment=HOME=/var/lib/agentkeys +# Persist audit + keypair (and ~/.aws if 3b) under /var/lib/agentkeys — +# operator must pre-create this dir mode 0700, owned by the agentkeys user. NoNewPrivileges=true ProtectSystem=strict ProtectHome=true diff --git a/scripts/setup-broker-host.sh b/scripts/setup-broker-host.sh new file mode 100755 index 0000000..3810c9c --- /dev/null +++ b/scripts/setup-broker-host.sh @@ -0,0 +1,388 @@ +#!/usr/bin/env bash +# AgentKeys broker-host bootstrap. +# +# Provisions a fresh Linux host into a running broker. Automates the manual +# steps in docs/stage7-wip.md "Remote deployment" §1-7. Idempotent — safe +# to re-run after partial failures. +# +# Usage: +# bash scripts/setup-broker-host.sh \ +# --issuer-url https://broker.example.dev \ +# --account-id 429071895007 \ +# [--region us-east-1] \ +# [--cred-mode instance-profile|profile|static] \ +# [--profile-name agentkeys-daemon] \ +# [--with-nginx] \ +# [--with-certbot] +# +# Order of operations: +# 1. Pre-flight checks (Linux, root via sudo, Rust toolchain, repo checkout) +# 2. Build agentkeys-mock-server + agentkeys-broker-server (release) +# 3. Install binaries to /usr/local/bin +# 4. Create agentkeys system user + /var/lib/agentkeys (mode 0700) +# 5. Drop systemd units for backend + broker +# 6. (Optional) install nginx with site config templating $ISSUER_URL host +# 7. (Optional) install certbot +# 8. Enable + start units +# 9. Print remaining manual steps (DNS A record, certbot run, IAM role +# attach for instance-profile mode, populate ~/.aws/credentials for +# profile mode, populate /etc/agentkeys/broker.env for static mode) +# +# Out of scope (operator does these by hand): +# - DNS A record for $ISSUER_URL host +# - AWS-side IAM role/policy creation +# - Cert issuance (certbot --nginx prompts interactively) +# - Firewall rules + +set -euo pipefail + +REPO_ROOT="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")/.." && pwd)" + +# ─── Defaults ───────────────────────────────────────────────────────────────── +ISSUER_URL="" +ACCOUNT_ID="" +REGION="us-east-1" +CRED_MODE="instance-profile" +PROFILE_NAME="agentkeys-daemon" +WITH_NGINX=false +WITH_CERTBOT=false + +# ─── CLI parse ──────────────────────────────────────────────────────────────── +while (( $# > 0 )); do + case "$1" in + --issuer-url) ISSUER_URL="$2"; shift 2 ;; + --account-id) ACCOUNT_ID="$2"; shift 2 ;; + --region) REGION="$2"; shift 2 ;; + --cred-mode) CRED_MODE="$2"; shift 2 ;; + --profile-name) PROFILE_NAME="$2"; shift 2 ;; + --with-nginx) WITH_NGINX=true; shift ;; + --with-certbot) WITH_CERTBOT=true; shift ;; + -h|--help) + sed -n '2,/^set -euo/p' "$0" | sed 's/^# \?//' + exit 0 + ;; + *) echo "unknown flag: $1" >&2; exit 2 ;; + esac +done + +# ─── Helpers ────────────────────────────────────────────────────────────────── +log() { printf '\033[1;36m==>\033[0m %s\n' "$*"; } +warn() { printf '\033[1;33m!!\033[0m %s\n' "$*" >&2; } +die() { printf '\033[1;31mxx\033[0m %s\n' "$*" >&2; exit 1; } +have() { command -v "$1" >/dev/null 2>&1; } + +# ─── Pre-flight ─────────────────────────────────────────────────────────────── +log "Pre-flight" +[[ "$(uname -s)" == "Linux" ]] || die "broker host setup is Linux-only (got $(uname -s)). Run scripts/setup-dev-env.sh on a developer machine instead." +[[ -n "$ISSUER_URL" ]] || die "--issuer-url is required (e.g. https://broker.example.dev)" +[[ -n "$ACCOUNT_ID" ]] || die "--account-id is required" +case "$CRED_MODE" in + instance-profile|profile|static) ;; + *) die "--cred-mode must be one of: instance-profile, profile, static (got $CRED_MODE)";; +esac +have sudo || die "sudo not found — run as a user with sudo access" +[[ -d "$REPO_ROOT/crates/agentkeys-broker-server" ]] || \ + die "expected agentkeys checkout at $REPO_ROOT — run from inside a clone" + +ISSUER_HOST="${ISSUER_URL#https://}" +ISSUER_HOST="${ISSUER_HOST#http://}" +ISSUER_HOST="${ISSUER_HOST%%/*}" +log "issuer URL : $ISSUER_URL (host: $ISSUER_HOST)" +log "account ID : $ACCOUNT_ID" +log "region : $REGION" +log "cred mode : $CRED_MODE" +[[ "$CRED_MODE" == "profile" ]] && log "profile : $PROFILE_NAME" + +# ─── Detect package manager ─────────────────────────────────────────────────── +if have apt-get; then + PM=apt + PM_INSTALL=(sudo apt-get install -y) + PM_UPDATE=(sudo apt-get update -y) +elif have dnf; then + PM=dnf + PM_INSTALL=(sudo dnf install -y) + PM_UPDATE=(:) +else + die "no supported package manager (need apt or dnf)" +fi +log "package manager: $PM" + +# ─── 1. Build prereqs ───────────────────────────────────────────────────────── +log "Ensuring base build tools" +"${PM_UPDATE[@]}" +case "$PM" in + apt) "${PM_INSTALL[@]}" curl build-essential pkg-config libssl-dev ca-certificates ;; + dnf) "${PM_INSTALL[@]}" curl gcc gcc-c++ make pkgconf-pkg-config openssl-devel ca-certificates ;; +esac + +if ! have rustup; then + log "Installing rustup + stable toolchain" + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain stable --profile minimal + # shellcheck disable=SC1091 + source "$HOME/.cargo/env" +fi +log "Rust: $(rustc --version)" + +# ─── 2. Build binaries ──────────────────────────────────────────────────────── +log "Building agentkeys-mock-server + agentkeys-broker-server (release)" +( cd "$REPO_ROOT" && cargo build --release \ + -p agentkeys-mock-server \ + -p agentkeys-broker-server ) + +# ─── 3. Install binaries ────────────────────────────────────────────────────── +log "Installing binaries to /usr/local/bin" +sudo install -m 0755 \ + "$REPO_ROOT/target/release/agentkeys-mock-server" \ + "$REPO_ROOT/target/release/agentkeys-broker-server" \ + /usr/local/bin/ + +# ─── 4. System user + state dir ─────────────────────────────────────────────── +if ! id -u agentkeys >/dev/null 2>&1; then + log "Creating agentkeys system user" + sudo useradd --system --home /var/lib/agentkeys --shell /usr/sbin/nologin agentkeys +fi +sudo install -d -m 0700 -o agentkeys -g agentkeys /var/lib/agentkeys + +if [[ "$CRED_MODE" == "profile" ]]; then + sudo install -d -m 0700 -o agentkeys -g agentkeys /var/lib/agentkeys/.aws + if [[ ! -f /var/lib/agentkeys/.aws/credentials ]]; then + log "Creating placeholder /var/lib/agentkeys/.aws/credentials" + sudo -u agentkeys tee /var/lib/agentkeys/.aws/credentials >/dev/null </dev/null </dev/null <<'EOF' +# Static IAM-user keys — legacy path, only if instance-profile and +# named-profile aren't options. Both must be set together. +DAEMON_ACCESS_KEY_ID=REPLACE_WITH_DAEMON_AKID +DAEMON_SECRET_ACCESS_KEY=REPLACE_WITH_DAEMON_SECRET +EOF + sudo chmod 600 /etc/agentkeys/broker.env + fi +fi + +# ─── 5. systemd units ───────────────────────────────────────────────────────── +log "Writing systemd units" + +sudo tee /etc/systemd/system/agentkeys-backend.service >/dev/null <<'EOF' +[Unit] +Description=AgentKeys mock backend (session management) +After=network-online.target +Wants=network-online.target + +[Service] +Type=simple +ExecStart=/usr/local/bin/agentkeys-mock-server --port 8090 +Restart=on-failure +RestartSec=5s +User=agentkeys +Group=agentkeys +NoNewPrivileges=true +ProtectSystem=strict +ProtectHome=true +PrivateTmp=true + +[Install] +WantedBy=multi-user.target +EOF + +# Build the broker unit with the right credential-source line. +case "$CRED_MODE" in + instance-profile) + CRED_LINE="# Credentials come from the EC2 instance profile via IMDS — no env." + ;; + profile) + CRED_LINE="Environment=AWS_PROFILE=$PROFILE_NAME" + ;; + static) + CRED_LINE="EnvironmentFile=/etc/agentkeys/broker.env" + ;; +esac + +sudo tee /etc/systemd/system/agentkeys-broker.service >/dev/null </dev/null < + 2. Open port 443 on the host firewall (and 80 only for ACME challenges). + Drop all ingress to :8090 and :8091 except 127.0.0.1. + +EOF + +if [[ "$WITH_NGINX" == "true" ]]; then + cat < + sudo nginx -t && sudo systemctl reload nginx + +EOF +fi + +cat < Date: Tue, 28 Apr 2026 11:37:26 +0800 Subject: [PATCH 05/15] chore: gitignore .claude/ runtime state MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Last commit accidentally checked in `.claude/scheduled_tasks.lock` via `git add -A`. That directory is Claude Code's per-workspace runtime (lock files, scheduled-task index, settings.local.json) — never repo content. Adding `.claude/` to .gitignore and untracking the lock file. Co-Authored-By: Claude Opus 4.7 (1M context) --- .claude/scheduled_tasks.lock | 1 - .gitignore | 4 ++++ 2 files changed, 4 insertions(+), 1 deletion(-) delete mode 100644 .claude/scheduled_tasks.lock diff --git a/.claude/scheduled_tasks.lock b/.claude/scheduled_tasks.lock deleted file mode 100644 index f0311eb..0000000 --- a/.claude/scheduled_tasks.lock +++ /dev/null @@ -1 +0,0 @@ -{"sessionId":"b3498f25-3c90-4378-b647-255d411464dc","pid":36360,"acquiredAt":1777347347804} \ No newline at end of file diff --git a/.gitignore b/.gitignore index bccabfb..227c3d7 100644 --- a/.gitignore +++ b/.gitignore @@ -8,6 +8,10 @@ .gstack/ AWSCLIV2.pkg +# Claude Code per-workspace runtime state (lock files, scheduled-task index, +# personal settings.local.json). Never check these in. +.claude/ + # Local developer secrets — template is checked in as .env.example. agentkeys-secrets.env From 2bf1970fb0208f3dd698cbd0b1faf88cee4b2edc Mon Sep 17 00:00:00 2001 From: wildmeta-agent Date: Tue, 28 Apr 2026 12:02:59 +0800 Subject: [PATCH 06/15] feat(setup): make setup-broker-host.sh interactive with explanations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The script previously required all decisions up front via CLI flags. Now, when run on a TTY with no flags (or with --interactive explicitly), it walks the operator through each decision with an explanation block before the prompt — what the choice does, why it matters, and when to skip it — plus a final summary + confirmation before any destructive work. CI / non-TTY paths still work via --non-interactive + the existing flags. Behavioral changes: - Auto-detects TTY via `[[ -t 0 ]]`; can be overridden with --interactive / --non-interactive. - New --without-nginx and --without-certbot flags so non-interactive callers can be explicit instead of relying on the implicit default. - New --yes/-y to skip the final "Proceed?" prompt. - Required flags (--issuer-url, --account-id) now prompt interactively if missing; non-interactive mode still dies with a helpful redirect to the interactive walk-through. - Cred-mode is now interactive too: a numbered menu with explanations of instance-profile / profile / static and when each is appropriate. - Profile-name only prompted when cred-mode=profile. - Certbot prompt defaults to "yes" when nginx was chosen, "no" when not (because certbot --nginx has nothing to talk to without nginx). Tested: - bash -n syntax check - --help renders the header block - --non-interactive with missing --issuer-url → dies with redirect - --non-interactive with --cred-mode bogus → dies with valid-values list - --non-interactive with valid inputs → reaches summary block, then proceeds to package-manager detection - harness/stage-7-done.sh still passes Co-Authored-By: Claude Opus 4.7 (1M context) --- scripts/setup-broker-host.sh | 318 +++++++++++++++++++++++++++++++---- 1 file changed, 284 insertions(+), 34 deletions(-) diff --git a/scripts/setup-broker-host.sh b/scripts/setup-broker-host.sh index 3810c9c..0f55791 100755 --- a/scripts/setup-broker-host.sh +++ b/scripts/setup-broker-host.sh @@ -5,26 +5,33 @@ # steps in docs/stage7-wip.md "Remote deployment" §1-7. Idempotent — safe # to re-run after partial failures. # +# Run with no flags on a TTY for an interactive walk-through that explains +# each decision before it's made. Pass flags / --non-interactive for CI. +# # Usage: -# bash scripts/setup-broker-host.sh \ +# bash scripts/setup-broker-host.sh # interactive +# bash scripts/setup-broker-host.sh --non-interactive \ # CI # --issuer-url https://broker.example.dev \ # --account-id 429071895007 \ # [--region us-east-1] \ # [--cred-mode instance-profile|profile|static] \ # [--profile-name agentkeys-daemon] \ -# [--with-nginx] \ -# [--with-certbot] +# [--with-nginx | --without-nginx] \ +# [--with-certbot | --without-certbot] \ +# [--yes] # # Order of operations: -# 1. Pre-flight checks (Linux, root via sudo, Rust toolchain, repo checkout) -# 2. Build agentkeys-mock-server + agentkeys-broker-server (release) -# 3. Install binaries to /usr/local/bin -# 4. Create agentkeys system user + /var/lib/agentkeys (mode 0700) -# 5. Drop systemd units for backend + broker -# 6. (Optional) install nginx with site config templating $ISSUER_URL host -# 7. (Optional) install certbot -# 8. Enable + start units -# 9. Print remaining manual steps (DNS A record, certbot run, IAM role +# 1. Pre-flight checks (Linux, sudo, repo checkout) +# 2. Interactive prompts (skipped in --non-interactive mode) +# 3. Final summary + confirmation (skipped with --yes) +# 4. Build agentkeys-mock-server + agentkeys-broker-server (release) +# 5. Install binaries to /usr/local/bin +# 6. Create agentkeys system user + /var/lib/agentkeys (mode 0700) +# 7. Drop systemd units for backend + broker +# 8. (Optional) install nginx with site config templating $ISSUER_URL host +# 9. (Optional) install certbot +# 10. Enable + start units +# 11. Print remaining manual steps (DNS A record, certbot run, IAM role # attach for instance-profile mode, populate ~/.aws/credentials for # profile mode, populate /etc/agentkeys/broker.env for static mode) # @@ -42,21 +49,34 @@ REPO_ROOT="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")/.." && pwd)" ISSUER_URL="" ACCOUNT_ID="" REGION="us-east-1" -CRED_MODE="instance-profile" +CRED_MODE="" # set by interactive prompt or --cred-mode PROFILE_NAME="agentkeys-daemon" -WITH_NGINX=false -WITH_CERTBOT=false +WITH_NGINX="auto" # auto | yes | no +WITH_CERTBOT="auto" # auto | yes | no +ASSUME_YES=false + +# Interactive when stdin is a TTY and the operator hasn't opted out. +if [[ -t 0 ]]; then + INTERACTIVE=true +else + INTERACTIVE=false +fi # ─── CLI parse ──────────────────────────────────────────────────────────────── while (( $# > 0 )); do case "$1" in - --issuer-url) ISSUER_URL="$2"; shift 2 ;; - --account-id) ACCOUNT_ID="$2"; shift 2 ;; - --region) REGION="$2"; shift 2 ;; - --cred-mode) CRED_MODE="$2"; shift 2 ;; - --profile-name) PROFILE_NAME="$2"; shift 2 ;; - --with-nginx) WITH_NGINX=true; shift ;; - --with-certbot) WITH_CERTBOT=true; shift ;; + --issuer-url) ISSUER_URL="$2"; shift 2 ;; + --account-id) ACCOUNT_ID="$2"; shift 2 ;; + --region) REGION="$2"; shift 2 ;; + --cred-mode) CRED_MODE="$2"; shift 2 ;; + --profile-name) PROFILE_NAME="$2"; shift 2 ;; + --with-nginx) WITH_NGINX="yes"; shift ;; + --without-nginx) WITH_NGINX="no"; shift ;; + --with-certbot) WITH_CERTBOT="yes"; shift ;; + --without-certbot) WITH_CERTBOT="no"; shift ;; + --non-interactive) INTERACTIVE=false; shift ;; + --interactive) INTERACTIVE=true; shift ;; + --yes|-y) ASSUME_YES=true; shift ;; -h|--help) sed -n '2,/^set -euo/p' "$0" | sed 's/^# \?//' exit 0 @@ -66,27 +86,257 @@ while (( $# > 0 )); do done # ─── Helpers ────────────────────────────────────────────────────────────────── -log() { printf '\033[1;36m==>\033[0m %s\n' "$*"; } -warn() { printf '\033[1;33m!!\033[0m %s\n' "$*" >&2; } -die() { printf '\033[1;31mxx\033[0m %s\n' "$*" >&2; exit 1; } -have() { command -v "$1" >/dev/null 2>&1; } +log() { printf '\033[1;36m==>\033[0m %s\n' "$*"; } +warn() { printf '\033[1;33m!!\033[0m %s\n' "$*" >&2; } +die() { printf '\033[1;31mxx\033[0m %s\n' "$*" >&2; exit 1; } +have() { command -v "$1" >/dev/null 2>&1; } + +# Print an explanation block before a prompt. Stays out of the way in +# non-interactive mode so CI logs don't fill up with help text the +# operator can't act on. +explain() { + $INTERACTIVE || return 0 + printf '\n\033[1;34m── %s ──\033[0m\n' "$1" + shift + for line in "$@"; do + printf ' %s\n' "$line" + done + printf '\n' +} + +# Read a value with a default; non-empty input wins, empty input keeps the default. +# Args: var-name prompt-label default +prompt_default() { + local __var="$1" __label="$2" __default="$3" __answer + read -r -p "$__label [$__default]: " __answer || true + printf -v "$__var" '%s' "${__answer:-$__default}" +} + +# Read a required value. Re-asks until non-empty. +prompt_required() { + local __var="$1" __label="$2" __answer + while :; do + read -r -p "$__label: " __answer || true + if [[ -n "$__answer" ]]; then + printf -v "$__var" '%s' "$__answer" + return + fi + warn "value required" + done +} + +# Yes/no prompt with a default. Default-on-empty. +# Args: var-name prompt-label default(yes|no) +prompt_yn() { + local __var="$1" __label="$2" __default="$3" __hint __answer + case "$__default" in + yes) __hint="[Y/n]" ;; + no) __hint="[y/N]" ;; + *) __hint="[y/n]" ;; + esac + while :; do + read -r -p "$__label $__hint: " __answer || true + __answer="${__answer:-$__default}" + case "${__answer,,}" in + y|yes) printf -v "$__var" '%s' "yes"; return ;; + n|no) printf -v "$__var" '%s' "no"; return ;; + esac + done +} + +# Numbered choice prompt with a default index. +# Args: var-name prompt-label default-index choice1 choice2 ... +prompt_choice() { + local __var="$1" __label="$2" __default="$3"; shift 3 + local __choices=("$@") __i __pick + while :; do + printf '%s (default %s):\n' "$__label" "$__default" + for __i in "${!__choices[@]}"; do + printf ' %d) %s\n' "$(( __i + 1 ))" "${__choices[__i]}" + done + read -r -p "Choice [$__default]: " __pick || true + __pick="${__pick:-$__default}" + if [[ "$__pick" =~ ^[1-9][0-9]*$ ]] && (( __pick >= 1 && __pick <= ${#__choices[@]} )); then + printf -v "$__var" '%s' "${__choices[$(( __pick - 1 ))]}" + return + fi + warn "pick a number between 1 and ${#__choices[@]}" + done +} # ─── Pre-flight ─────────────────────────────────────────────────────────────── log "Pre-flight" [[ "$(uname -s)" == "Linux" ]] || die "broker host setup is Linux-only (got $(uname -s)). Run scripts/setup-dev-env.sh on a developer machine instead." -[[ -n "$ISSUER_URL" ]] || die "--issuer-url is required (e.g. https://broker.example.dev)" -[[ -n "$ACCOUNT_ID" ]] || die "--account-id is required" +have sudo || die "sudo not found — run as a user with sudo access" +[[ -d "$REPO_ROOT/crates/agentkeys-broker-server" ]] || \ + die "expected agentkeys checkout at $REPO_ROOT — run from inside a clone" + +# ─── Interactive walk-through ───────────────────────────────────────────────── +if $INTERACTIVE; then + cat <<'EOF' + +================================================================================ + AgentKeys broker host bootstrap — interactive +================================================================================ +This script walks through the steps in docs/stage7-wip.md "Remote deployment" +on this host. It will install packages, create a system user, drop systemd +units, and (optionally) configure nginx + certbot. Re-runs are safe; existing +files won't be overwritten without your input. + +You'll be asked about each optional step before it happens. Pass --help for +the non-interactive flag set. +EOF + + if [[ -z "$ISSUER_URL" ]]; then + explain "Public OIDC issuer URL" \ + "The HTTPS URL the outside world (AWS / GCP / clients) will use to" \ + "reach this broker. AWS IAM fetches /.well-known/openid-configuration" \ + "and /.well-known/jwks.json from this URL during" \ + "create-open-id-connect-provider, so it MUST:" \ + " • be reachable over public TLS (Let's Encrypt is fine)" \ + " • exactly match BROKER_OIDC_ISSUER (this script writes that env var)" \ + " • exactly match the --url you pass to AWS later" \ + "" \ + "Example: https://broker.example.dev" + prompt_required ISSUER_URL "Issuer URL" + fi + + if [[ -z "$ACCOUNT_ID" ]]; then + explain "AWS account ID" \ + "12-digit account ID for the AWS account that holds your" \ + "agentkeys-daemon IAM user (or role) and the agentkeys-agent role." \ + "Used to derive BROKER_AGENT_ROLE_ARN if not overridden." + prompt_required ACCOUNT_ID "Account ID" + fi + + explain "AWS region" \ + "Region the broker calls STS in. Use the region your agentkeys-agent" \ + "role and the operator's S3 bucket already live in." + prompt_default REGION "Region" "$REGION" + + if [[ -z "$CRED_MODE" ]]; then + explain "How does the broker get its AWS credentials?" \ + "Three credential paths, ordered by preference:" \ + "" \ + " 1) instance-profile (default, recommended for EC2)" \ + " Broker runs on EC2; SDK pulls creds from the instance profile" \ + " via IMDS. ZERO secrets on disk. You attach the role to the" \ + " instance manually after this script finishes." \ + "" \ + " 2) profile (recommended for non-EC2 hosts)" \ + " Creates ~/.aws/credentials under the agentkeys system user." \ + " You fill in the access key + secret by hand. AWS_PROFILE is" \ + " set in the systemd unit so the SDK picks it up." \ + "" \ + " 3) static (legacy, only if neither of the above work)" \ + " Drops DAEMON_ACCESS_KEY_ID + DAEMON_SECRET_ACCESS_KEY into" \ + " /etc/agentkeys/broker.env. systemd EnvironmentFile= reads it." + prompt_choice CRED_MODE "Credential mode" 1 \ + "instance-profile" \ + "profile" \ + "static" + fi + + if [[ "$CRED_MODE" == "profile" ]]; then + explain "Named-profile name" \ + "The profile-name section that goes into ~/.aws/credentials and" \ + "~/.aws/config under the agentkeys user, and into AWS_PROFILE= in" \ + "the broker's systemd unit. Match this to the profile you use" \ + "elsewhere if you want awsp / shared tooling to keep working." + prompt_default PROFILE_NAME "Profile name" "$PROFILE_NAME" + fi + + if [[ "$WITH_NGINX" == "auto" ]]; then + ISSUER_HOST_FOR_PROMPT="${ISSUER_URL#https://}" + ISSUER_HOST_FOR_PROMPT="${ISSUER_HOST_FOR_PROMPT#http://}" + ISSUER_HOST_FOR_PROMPT="${ISSUER_HOST_FOR_PROMPT%%/*}" + explain "Install + configure nginx?" \ + "If yes:" \ + " • installs nginx via the system package manager" \ + " • drops a site config at /etc/nginx/sites-available/agentkeys-broker" \ + " • the site routes $ISSUER_HOST_FOR_PROMPT → 127.0.0.1:8091 and" \ + " redirects :80 → :443" \ + " • the cert paths point at /etc/letsencrypt/live/$ISSUER_HOST_FOR_PROMPT/" \ + " (you run certbot separately to actually issue the cert)" \ + "" \ + "Skip if you're using AWS ALB+ACM, Cloudflare tunnel, Caddy, or an" \ + "existing nginx instance you'll edit yourself. The broker stays bound" \ + "to 127.0.0.1:8091 either way — it's the operator's job to put a" \ + "TLS-terminating proxy in front of it." + prompt_yn WITH_NGINX "Install nginx now?" "yes" + fi + + if [[ "$WITH_CERTBOT" == "auto" ]]; then + explain "Install certbot for Let's Encrypt cert issuance?" \ + "This script INSTALLS the certbot package. It does NOT issue a cert." \ + "Cert issuance requires:" \ + " • DNS A record for the issuer host already pointing at this host" \ + " • port 80 reachable from the public internet" \ + " • you running 'sudo certbot --nginx -d ' interactively" \ + "" \ + "Skip if you're using AWS ACM, Cloudflare-managed TLS, or a different" \ + "ACME client." + if [[ "$WITH_NGINX" == "yes" ]]; then + prompt_yn WITH_CERTBOT "Install certbot now?" "yes" + else + # Without nginx, certbot has nothing to talk to via the --nginx plugin. + # Default-no but still ask in case the operator plans to run certonly. + prompt_yn WITH_CERTBOT "Install certbot now?" "no" + fi + fi +fi + +# ─── Validate non-interactive inputs ───────────────────────────────────────── +[[ -n "$ISSUER_URL" ]] || die "--issuer-url is required (e.g. https://broker.example.dev). Drop --non-interactive for an interactive walk-through." +[[ -n "$ACCOUNT_ID" ]] || die "--account-id is required. Drop --non-interactive for an interactive walk-through." +[[ -n "$CRED_MODE" ]] || CRED_MODE="instance-profile" case "$CRED_MODE" in instance-profile|profile|static) ;; *) die "--cred-mode must be one of: instance-profile, profile, static (got $CRED_MODE)";; esac -have sudo || die "sudo not found — run as a user with sudo access" -[[ -d "$REPO_ROOT/crates/agentkeys-broker-server" ]] || \ - die "expected agentkeys checkout at $REPO_ROOT — run from inside a clone" +# Resolve auto → no for the non-interactive path (preserves prior default). +[[ "$WITH_NGINX" == "auto" ]] && WITH_NGINX="no" +[[ "$WITH_CERTBOT" == "auto" ]] && WITH_CERTBOT="no" ISSUER_HOST="${ISSUER_URL#https://}" ISSUER_HOST="${ISSUER_HOST#http://}" ISSUER_HOST="${ISSUER_HOST%%/*}" + +# ─── Summary + confirmation ────────────────────────────────────────────────── +cat < From e5c08cf1f3b34be87744e27ab6b6bd683d065607 Mon Sep 17 00:00:00 2001 From: wildmeta-agent Date: Tue, 28 Apr 2026 12:53:54 +0800 Subject: [PATCH 07/15] =?UTF-8?q?refactor(stage6+7):=20rename=20agentkeys-?= =?UTF-8?q?agent=20IAM=20role=20=E2=86=92=20agentkeys-data-role?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The role's old name overloaded "agent" with the project name (AgentKeys), the AI agent the credentials are minted *for*, and the IAM identity the broker assumes *into*. Three different things sharing one word — confusing during operator setup, and easy to mis-script. Renaming to `agentkeys-data-role` makes the role's job (data-plane access: S3 + SES) explicit and unambiguous. Code: - BrokerConfig.agent_role_arn → data_role_arn - BROKER_DATA_ROLE_ARN env var (primary); BROKER_AGENT_ROLE_ARN still accepted as a fallback for unmigrated deployments — startup error message lists both names. - ACCOUNT_ID-derived default ARN now points at agentkeys-data-role. - handlers/mint.rs + tests/{mint_flow,oidc_flow}.rs updated to match. Scripts: - scripts/setup-broker-host.sh: prompts, hand-off text, and CLI flag references all use the new name. - scripts/stage6-demo-env.sh: --role-arn target updated. Docs (every non-archived reference updated): - docs/stage6-aws-setup.md (the canonical "create the role" runbook; added a top-of-§3 note explaining the rename + back-compat env var). - docs/stage7-wip.md (E2E walk-through, remote deploy, federation recipe) - docs/operator-runbook.md (env-var table, audit-DB schema comment) - docs/dev-setup.md - docs/spec/ses-email-architecture.md (mermaid diagrams + bucket policies) - docs/spec/plans/development-stages.md - wiki/tag-based-access.md (cryptographic-isolation walk-through) - wiki/email-system.md Verified: - cargo test -p agentkeys-broker-server → all green - harness/stage-7-done.sh → STAGE 7 (phase 1 + phase 2) PASSED Migration for existing deployments: - Old AWS deployments with the legacy role name keep working unchanged via the BROKER_AGENT_ROLE_ARN fallback. - New deployments should follow the renamed instructions in stage6-aws- setup.md §3b and use BROKER_DATA_ROLE_ARN. Co-Authored-By: Claude Opus 4.7 (1M context) --- crates/agentkeys-broker-server/src/config.rs | 26 +++++++++------ .../src/handlers/mint.rs | 6 ++-- .../tests/mint_flow.rs | 4 +-- .../tests/oidc_flow.rs | 4 +-- docs/dev-setup.md | 4 +-- docs/operator-runbook.md | 10 +++--- docs/spec/plans/development-stages.md | 2 +- docs/spec/ses-email-architecture.md | 12 +++---- docs/stage6-aws-setup.md | 32 ++++++++++--------- docs/stage7-wip.md | 20 ++++++------ scripts/setup-broker-host.sh | 8 ++--- scripts/stage6-demo-env.sh | 2 +- wiki/email-system.md | 8 ++--- wiki/tag-based-access.md | 12 +++---- 14 files changed, 79 insertions(+), 71 deletions(-) diff --git a/crates/agentkeys-broker-server/src/config.rs b/crates/agentkeys-broker-server/src/config.rs index 3b431cf..2754fb6 100644 --- a/crates/agentkeys-broker-server/src/config.rs +++ b/crates/agentkeys-broker-server/src/config.rs @@ -10,7 +10,7 @@ pub struct BrokerConfig { /// The chain path is preferred for new deployments. pub daemon_access_key_id: Option, pub daemon_secret_access_key: Option, - pub agent_role_arn: String, + pub data_role_arn: String, pub backend_url: String, pub audit_db_path: PathBuf, pub aws_region: String, @@ -58,15 +58,21 @@ impl BrokerConfig { (or both unset to use the AWS SDK default credential chain via AWS_PROFILE)." ); } - // BROKER_AGENT_ROLE_ARN can be derived from ACCOUNT_ID for the + // BROKER_DATA_ROLE_ARN can be derived from ACCOUNT_ID for the // canonical Stage 6 role name. Operator can still override. - let agent_role_arn = std::env::var("BROKER_AGENT_ROLE_ARN").or_else(|_| { - std::env::var("ACCOUNT_ID") - .map(|account_id| format!("arn:aws:iam::{}:role/agentkeys-agent", account_id)) - }) - .map_err(|_| anyhow::anyhow!( - "missing required env var: set BROKER_AGENT_ROLE_ARN explicitly, or set ACCOUNT_ID and the broker will derive arn:aws:iam::$ACCOUNT_ID:role/agentkeys-agent" - ))?; + // BROKER_AGENT_ROLE_ARN is accepted as a fallback for callers + // that haven't migrated yet (renamed 2026-04-28: agentkeys-agent + // → agentkeys-data-role to disambiguate from the project's + // "agent" terminology). + let data_role_arn = std::env::var("BROKER_DATA_ROLE_ARN") + .or_else(|_| std::env::var("BROKER_AGENT_ROLE_ARN")) + .or_else(|_| { + std::env::var("ACCOUNT_ID") + .map(|account_id| format!("arn:aws:iam::{}:role/agentkeys-data-role", account_id)) + }) + .map_err(|_| anyhow::anyhow!( + "missing required env var: set BROKER_DATA_ROLE_ARN explicitly (legacy: BROKER_AGENT_ROLE_ARN), or set ACCOUNT_ID and the broker will derive arn:aws:iam::$ACCOUNT_ID:role/agentkeys-data-role" + ))?; let backend_url = required_env("BROKER_BACKEND_URL")?; let audit_db_path = std::env::var("BROKER_AUDIT_DB_PATH") .ok() @@ -142,7 +148,7 @@ impl BrokerConfig { Ok(Self { daemon_access_key_id, daemon_secret_access_key, - agent_role_arn, + data_role_arn, backend_url, audit_db_path, aws_region, diff --git a/crates/agentkeys-broker-server/src/handlers/mint.rs b/crates/agentkeys-broker-server/src/handlers/mint.rs index 92dc8d0..e2af5ee 100644 --- a/crates/agentkeys-broker-server/src/handlers/mint.rs +++ b/crates/agentkeys-broker-server/src/handlers/mint.rs @@ -59,7 +59,7 @@ pub async fn mint_aws_creds( match state .sts .assume_role( - &state.config.agent_role_arn, + &state.config.data_role_arn, &session_name, state.config.session_duration_seconds, ) @@ -73,7 +73,7 @@ pub async fn mint_aws_creds( MintRecord { requester_token: token, requester_wallet: &session.wallet, - requested_role: &state.config.agent_role_arn, + requested_role: &state.config.data_role_arn, session_duration_seconds: state.config.session_duration_seconds, sts_session_name: &session_name, outcome: MintOutcome::Ok, @@ -120,7 +120,7 @@ fn record_outcome( MintRecord { requester_token: token, requester_wallet: wallet, - requested_role: &state.config.agent_role_arn, + requested_role: &state.config.data_role_arn, session_duration_seconds: state.config.session_duration_seconds, sts_session_name: session_name, outcome, diff --git a/crates/agentkeys-broker-server/tests/mint_flow.rs b/crates/agentkeys-broker-server/tests/mint_flow.rs index 8c225b0..be3201f 100644 --- a/crates/agentkeys-broker-server/tests/mint_flow.rs +++ b/crates/agentkeys-broker-server/tests/mint_flow.rs @@ -16,7 +16,7 @@ use agentkeys_broker_server::sts::{AssumedCredentials, StsClient, StubStsClient} use serde_json::Value; use tempfile::TempDir; -const STUB_ROLE_ARN: &str = "arn:aws:iam::000000000000:role/agentkeys-agent"; +const STUB_ROLE_ARN: &str = "arn:aws:iam::000000000000:role/agentkeys-data-role"; fn stub_creds() -> AssumedCredentials { AssumedCredentials { @@ -55,7 +55,7 @@ async fn spawn_broker_with_sts( let config = BrokerConfig { daemon_access_key_id: Some("AKIA-fake".into()), daemon_secret_access_key: Some("fake-secret".into()), - agent_role_arn: STUB_ROLE_ARN.into(), + data_role_arn: STUB_ROLE_ARN.into(), backend_url, audit_db_path: PathBuf::from(":memory:"), aws_region: "us-east-1".into(), diff --git a/crates/agentkeys-broker-server/tests/oidc_flow.rs b/crates/agentkeys-broker-server/tests/oidc_flow.rs index f2bd8fa..25ff1b4 100644 --- a/crates/agentkeys-broker-server/tests/oidc_flow.rs +++ b/crates/agentkeys-broker-server/tests/oidc_flow.rs @@ -19,7 +19,7 @@ use jsonwebtoken::{decode, decode_header, Algorithm, DecodingKey, Validation}; use serde_json::Value; use tempfile::TempDir; -const STUB_ROLE_ARN: &str = "arn:aws:iam::000000000000:role/agentkeys-agent"; +const STUB_ROLE_ARN: &str = "arn:aws:iam::000000000000:role/agentkeys-data-role"; const TEST_ISSUER: &str = "https://oidc.test.invalid"; fn stub_creds() -> AssumedCredentials { @@ -54,7 +54,7 @@ async fn spawn_broker(backend_url: String) -> (String, Arc) { let config = BrokerConfig { daemon_access_key_id: Some("AKIA-fake".into()), daemon_secret_access_key: Some("fake-secret".into()), - agent_role_arn: STUB_ROLE_ARN.into(), + data_role_arn: STUB_ROLE_ARN.into(), backend_url, audit_db_path: PathBuf::from(":memory:"), aws_region: "us-east-1".into(), diff --git a/docs/dev-setup.md b/docs/dev-setup.md index 3b8fa15..aea85b4 100644 --- a/docs/dev-setup.md +++ b/docs/dev-setup.md @@ -141,7 +141,7 @@ Run through [`stage6-aws-setup.md`](./stage6-aws-setup.md) through §7 once per - SES domain identity verified on `bots.litentry.org` (or your substitute via `AGENTKEYS_EMAIL_DOMAIN`) - `agentkeys-daemon` IAM user with `sts:AssumeRole` only -- `agentkeys-agent` role with SES + S3 permissions +- `agentkeys-data-role` role with SES + S3 permissions - S3 bucket `agentkeys-mail-` with receipt rule writing inbound to `inbound/` - Route 53 records: three DKIM CNAMEs, MX, SPF, DMARC @@ -169,7 +169,7 @@ cargo run --release -p agentkeys-broker-server -- --port 8091 The broker: 1. Validates incoming bearer tokens against `BROKER_BACKEND_URL` (the mock server in dev; the real chain backend in v0.2+). -2. Calls `sts:assume-role` on `BROKER_AGENT_ROLE_ARN` using whatever credentials the SDK default chain returned. +2. Calls `sts:assume-role` on `BROKER_DATA_ROLE_ARN` using whatever credentials the SDK default chain returned. 3. Returns 1-hour temp creds to the caller. 4. Logs every mint to `BROKER_AUDIT_DB_PATH` (SQLite, one row per mint). diff --git a/docs/operator-runbook.md b/docs/operator-runbook.md index b4defbd..3b165ce 100644 --- a/docs/operator-runbook.md +++ b/docs/operator-runbook.md @@ -83,7 +83,7 @@ The broker logs which credential path it picked at startup, so misconfiguration #### Recommended: EC2 instance profile -When the broker runs on EC2, attach an instance profile granting `sts:AssumeRole` on `agentkeys-agent`. The SDK picks credentials from IMDS automatically — no env vars, no shared files, no rotation step. This is the path `scripts/setup-broker-host.sh` sets up. +When the broker runs on EC2, attach an instance profile granting `sts:AssumeRole` on `agentkeys-data-role`. The SDK picks credentials from IMDS automatically — no env vars, no shared files, no rotation step. This is the path `scripts/setup-broker-host.sh` sets up. #### Legacy fallback: static IAM-user keys in env @@ -94,7 +94,7 @@ Set both `DAEMON_ACCESS_KEY_ID` *and* `DAEMON_SECRET_ACCESS_KEY` (or the `BROKER | Variable | Required | Description | |---|---|---| | `BROKER_BACKEND_URL` | yes | URL of the AgentKeys backend that issues session tokens (mock-server in dev, chain in v0.2+). | -| `BROKER_AGENT_ROLE_ARN` | yes (or `ACCOUNT_ID`) | ARN of the `agentkeys-agent` role. If unset, derived from `ACCOUNT_ID` as `arn:aws:iam::$ACCOUNT_ID:role/agentkeys-agent`. | +| `BROKER_DATA_ROLE_ARN` | yes (or `ACCOUNT_ID`) | ARN of the `agentkeys-data-role` IAM role the broker assumes-into. If unset, derived from `ACCOUNT_ID` as `arn:aws:iam::$ACCOUNT_ID:role/agentkeys-data-role`. The legacy `BROKER_AGENT_ROLE_ARN` is still accepted as a fallback for pre-2026-04-28 deployments. | | `BROKER_AWS_REGION` | no | AWS region for the STS call. Falls back to `REGION` (the rest-of-agentKeys convention) before defaulting to `us-east-1`. The active profile's `region` setting is used by the SDK independently for credential lookup. | | `BROKER_AUDIT_DB_PATH` | no | SQLite path for the audit log. Default: `$HOME/.agentkeys/broker/audit.sqlite`. | | `BROKER_SESSION_DURATION_SECONDS` | no | TTL for minted credentials. Default: `3600` (1 h). Min: `900`, max: `43200`. | @@ -105,7 +105,7 @@ Set both `DAEMON_ACCESS_KEY_ID` *and* `DAEMON_SECRET_ACCESS_KEY` (or the `BROKER | `BROKER_OIDC_JWT_TTL_SECONDS` | no | TTL (seconds) for minted OIDC JWTs. Default: `300`. Bounded `[60, 3600]`. | | `DAEMON_ACCESS_KEY_ID` / `DAEMON_SECRET_ACCESS_KEY` | no (legacy) | Static IAM-user keys. Only used when no profile / instance profile / SDK default is available. Both must be set together. | -`ACCOUNT_ID` is read indirectly to derive `BROKER_AGENT_ROLE_ARN`. Persist non-secret values (region, account ID, role ARN, OIDC issuer URL) wherever your shell prefers; the broker no longer needs secrets in its environment. +`ACCOUNT_ID` is read indirectly to derive `BROKER_DATA_ROLE_ARN`. Persist non-secret values (region, account ID, role ARN, OIDC issuer URL) wherever your shell prefers; the broker no longer needs secrets in its environment. ### 3.3 Run @@ -176,7 +176,7 @@ CREATE TABLE mint_log ( minted_at INTEGER NOT NULL, -- unix seconds requester_token TEXT NOT NULL, -- bearer token (hashed; see §6.1) requester_wallet TEXT NOT NULL, -- wallet the token resolved to - requested_role TEXT NOT NULL, -- BROKER_AGENT_ROLE_ARN at mint time + requested_role TEXT NOT NULL, -- BROKER_DATA_ROLE_ARN at mint time session_duration_seconds INTEGER NOT NULL, sts_session_name TEXT NOT NULL, -- value passed to AssumeRole; visible in CloudTrail outcome TEXT NOT NULL, -- "ok" | "auth_failed" | "sts_error" @@ -224,7 +224,7 @@ Operator-side, the same binary runs. Configuration source changes from env vars | Broker `/readyz` returns 503 with `backend_unreachable` | `BROKER_BACKEND_URL` wrong, mock-server not running | Check the URL; restart mock-server | | Broker `/readyz` returns 503 with `sts_error` | Daemon AWS key invalid, expired, or missing `sts:AssumeRole` permission | Verify with `aws sts get-caller-identity` using the same env vars | | `POST /v1/mint-aws-creds` returns 401 | Bearer token expired or issued against a different backend | Caller re-runs `agentkeys init` against `BROKER_BACKEND_URL` | -| `POST /v1/mint-aws-creds` returns 502 with `sts_error` | IAM trust policy on `agentkeys-agent` doesn't allow the daemon user | Check the role's trust policy in IAM | +| `POST /v1/mint-aws-creds` returns 502 with `sts_error` | IAM trust policy on `agentkeys-data-role` doesn't allow the daemon user | Check the role's trust policy in IAM | | Audit DB grows unbounded | No retention policy in v0.1 | Run a periodic `DELETE FROM mint_log WHERE minted_at < ?` from cron, or `sqlite3 .. VACUUM` | ## 9. What's NOT in scope for v0.1 diff --git a/docs/spec/plans/development-stages.md b/docs/spec/plans/development-stages.md index 96df45b..379486f 100644 --- a/docs/spec/plans/development-stages.md +++ b/docs/spec/plans/development-stages.md @@ -19,7 +19,7 @@ If you're looking for setup / demo instructions, go to [`../../dev-setup.md`](.. | 3 | Daemon + MCP | `agentkeys-daemon` + `agentkeys-mcp` — Unix-socket JSON-RPC, `memfd_secret`, scope enforcement, 4 MCP tools | 13/13 unit | | 4 | Pair / Approve / Recover | OTP-gated auth requests; 2-terminal pair flow; alias / email / ENS recovery via identity-link table | 15/11 unit + 2-terminal E2E | | 5a | Provisioner (deterministic) | OpenRouter + OpenAI CDP scrapers; `signupEmailOtp` pattern library; HTML-strip + label-aware OTP extractor; mandatory post-provision verify; `agentkeys provision openrouter` | 59/59 unit + live provision | -| 6 (interim, 2026-04) | Hosted email infra | SES domain verification on `bots.litentry.org`; `agentkeys-daemon` IAM user → `agentkeys-agent` assume-role; S3 inbound bucket; `ses-s3` email backend; end-to-end demo from signup → SES receipt → S3 poll → key extraction | `scripts/stage6-demo-run.sh` prints a valid `sk-or-v1-...` key | +| 6 (interim, 2026-04) | Hosted email infra | SES domain verification on `bots.litentry.org`; `agentkeys-daemon` IAM user → `agentkeys-data-role` assume-role; S3 inbound bucket; `ses-s3` email backend; end-to-end demo from signup → SES receipt → S3 poll → key extraction | `scripts/stage6-demo-run.sh` prints a valid `sk-or-v1-...` key | | 7 phase 1 (2026-04) | Broker server | `agentkeys-broker-server` axum service: bearer-gated `POST /v1/mint-aws-creds`, audit SQLite, supervisor probes; daemon `--broker-url` flag wired up | 22/22 unit + integration | | 7 phase 2 (2026-04) | OIDC issuer + AWS-cred wiring | OIDC discovery + JWKS + bearer-gated `POST /v1/mint-oidc-jwt` absorbed into Rust broker (TS `services/oidc-stub/` retired); CLI/MCP `provision` paths fetch AWS temp creds via the broker when `--broker-url` is set; audit destination is the broker's local SQLite per the pluggable-audit-backend framing in [`architecture.md` §11](../architecture.md) | broker integration + clippy clean; cloud federation deployment runbook in [`stage7-wip.md`](../../stage7-wip.md) | diff --git a/docs/spec/ses-email-architecture.md b/docs/spec/ses-email-architecture.md index 22ab8e7..0a7f989 100644 --- a/docs/spec/ses-email-architecture.md +++ b/docs/spec/ses-email-architecture.md @@ -124,7 +124,7 @@ graph TB end subgraph IAM[" IAM "] User["Singleton user agentkeys-daemon
inline: sts:AssumeRole only"] - Role["Singleton role agentkeys-agent
inline: s3:Get/List + ses:SendRawEmail"] + Role["Singleton role agentkeys-data-role
inline: s3:Get/List + ses:SendRawEmail"] end subgraph APP[" Daemon "] Daemon[provisioner-scripts ses-s3 backend] @@ -173,7 +173,7 @@ IAM user "agentkeys-daemon" ↓ sts:AssumeRole → temp creds (1h, auto-refreshed) ↓ -IAM role "agentkeys-agent" +IAM role "agentkeys-data-role" ├─ trust policy: trusts agentkeys-daemon (the user above) ├─ inline policy: │ ├─ s3:ListBucket on agentkeys-mail-${ACCOUNT_ID} @@ -306,7 +306,7 @@ Stage 6 hosts every user's inbox in one AWS account, one S3 bucket, one IAM role { "Sid": "AllowListOwnPrefix", "Effect": "Allow", - "Principal": { "AWS": "arn:aws:iam:::role/agentkeys-agent" }, + "Principal": { "AWS": "arn:aws:iam:::role/agentkeys-data-role" }, "Action": "s3:ListBucket", "Resource": "arn:aws:s3:::agentkeys-mail", "Condition": { "StringLike": { "s3:prefix": "${aws:PrincipalTag/agentkeys_user_wallet}/*" } } @@ -314,14 +314,14 @@ Stage 6 hosts every user's inbox in one AWS account, one S3 bucket, one IAM role { "Sid": "AllowCrudOwnPrefix", "Effect": "Allow", - "Principal": { "AWS": "arn:aws:iam:::role/agentkeys-agent" }, + "Principal": { "AWS": "arn:aws:iam:::role/agentkeys-data-role" }, "Action": ["s3:GetObject", "s3:PutObject", "s3:DeleteObject"], "Resource": "arn:aws:s3:::agentkeys-mail/${aws:PrincipalTag/agentkeys_user_wallet}/*" }, { "Sid": "DenyEverythingElse", "Effect": "Deny", - "Principal": { "AWS": "arn:aws:iam:::role/agentkeys-agent" }, + "Principal": { "AWS": "arn:aws:iam:::role/agentkeys-data-role" }, "NotAction": ["s3:GetObject", "s3:PutObject", "s3:DeleteObject", "s3:ListBucket"], "Resource": "*" } @@ -427,7 +427,7 @@ AgentMail is a **good reference for the SES underpinnings** — but structurally |---|---| | 1 | Register `agentkeys-email.io`. SES domain verification. DNS: MX, DKIM (AWS_SES managed), SPF, DMARC. Request SES production access. | | 2 | S3 bucket `agentkeys-mail` with per-user-prefix structure + `aws:PrincipalTag/agentkeys_user_wallet` bucket policy + lifecycle rules. SES receipt rule with `S3Action` writing raw MIME directly to the bucket (no Lambda). | -| 3 | IAM OIDC provider `oidc.agentkeys.dev` registered in our AWS account. IAM role `agentkeys-agent` with trust policy pinned to TEE enclave + requiring non-empty `agentkeys_user_wallet` claim. Role permissions for `s3:GetObject`/`s3:ListBucket` (per prefix) and `ses:SendRawEmail` (with `ses:FromAddress` condition). | +| 3 | IAM OIDC provider `oidc.agentkeys.dev` registered in our AWS account. IAM role `agentkeys-data-role` with trust policy pinned to TEE enclave + requiring non-empty `agentkeys_user_wallet` claim. Role permissions for `s3:GetObject`/`s3:ListBucket` (per prefix) and `ses:SendRawEmail` (with `ses:FromAddress` condition). | | 4 | TEE-side ES256 OIDC-issuer key derivation at `oidc/issuer/v1` + JWT minter. Thin HTTPS proxy at `oidc.agentkeys.dev` serving static discovery doc + JWKS (Let's Encrypt). | | 5 | `SesEmailAuthority` Rust impl: implements `mint_read_creds(inbox) -> STS response` and `mint_send_creds(inbox) -> STS response` via `sts:AssumeRoleWithWebIdentity`. Emits `CredsMinted` audit extrinsic per call. | | 6 | Daemon MCP tools: `email.list` (S3 list), `email.get` (S3 get + MIME parse locally), `email.send` (assemble MIME + SES SendRawEmail). Each unwraps into `mint` + direct AWS call. | diff --git a/docs/stage6-aws-setup.md b/docs/stage6-aws-setup.md index e7d8f21..ef6004e 100644 --- a/docs/stage6-aws-setup.md +++ b/docs/stage6-aws-setup.md @@ -1,7 +1,7 @@ # Stage 6 AWS Setup Runbook **Audience:** the operator setting up Stage 6's hosted-email infra on real AWS for the first time. Default path is a subdomain on an existing parent (`bots.litentry.org` on AWS account `429071895007`); the wiki-canonical standalone `@agentkeys-email.io` path is the post-interim option. -**Outcome:** an AWS account with SES domain verified, `agentkeys-daemon` IAM user + `agentkeys-agent` role (static-IAM-user trust), S3 bucket + bucket policy, SES receipt rule writing inbound to S3. Once done, the Stage 6 code (mock-server + CLI + provisioner-scripts adapters) can talk to real AWS, and the Stage 5b live demo unblocks. The OIDC-federated variant (TEE-signed JWT → PrincipalTag isolation) is Stage 7 work; test preserved in [`stage7-wip.md`](./stage7-wip.md). +**Outcome:** an AWS account with SES domain verified, `agentkeys-daemon` IAM user + `agentkeys-data-role` role (static-IAM-user trust), S3 bucket + bucket policy, SES receipt rule writing inbound to S3. Once done, the Stage 6 code (mock-server + CLI + provisioner-scripts adapters) can talk to real AWS, and the Stage 5b live demo unblocks. The OIDC-federated variant (TEE-signed JWT → PrincipalTag isolation) is Stage 7 work; test preserved in [`stage7-wip.md`](./stage7-wip.md). **Status:** interim build. TEE-held BYODKIM and TEE-signed OIDC JWTs are deferred until [`heima-gaps-vs-desired-architecture.md`](./spec/heima-gaps-vs-desired-architecture.md) §3 + §4 close. AWS-managed DKIM is used as the Stage 6 interim; replace it with TEE-BYODKIM later. ## 0. Preconditions @@ -130,9 +130,11 @@ aws sesv2 get-email-identity --region "$REGION" --email-identity "$DOMAIN" \ > > **Swap to TEE-BYODKIM happens when [`heima-gaps §4`](./spec/heima-gaps-vs-desired-architecture.md) closes.** Until then, the Stage 6 interim accepts the AWS-custody tradeoff. Do NOT upgrade to "BYODKIM with file-stored key" — that path is strictly worse than AWS-managed (lower availability, similar trust surface). -## 3. IAM: daemon user + `agentkeys-agent` role +## 3. IAM: daemon user + `agentkeys-data-role` -This Stage 6 runbook uses **static IAM-user trust** as the interim: create a dedicated IAM user `agentkeys-daemon`, create the `agentkeys-agent` role that trusts only that user, and attach the S3/SES inline permissions. The user's access keys get injected into the daemon's env at runtime; the daemon calls `sts:AssumeRole` to get temp creds before touching S3 or SES. +> **Note (2026-04-28):** This role was renamed from `agentkeys-agent` → `agentkeys-data-role` to disambiguate from the project's "agent" terminology (the AI agent the credentials are minted *for* is a separate concept from the IAM role the broker assumes *into*). The broker still accepts the legacy `BROKER_AGENT_ROLE_ARN` env var for unmigrated deployments; new deployments should use `BROKER_DATA_ROLE_ARN` and the new role name throughout. + +This Stage 6 runbook uses **static IAM-user trust** as the interim: create a dedicated IAM user `agentkeys-daemon`, create the `agentkeys-data-role` role that trusts only that user, and attach the S3/SES inline permissions. The user's access keys get injected into the daemon's env at runtime; the daemon calls `sts:AssumeRole` to get temp creds before touching S3 or SES. For the full OIDC-federated variant (where a TEE-minted JWT is exchanged at STS for temp creds tagged with `agentkeys_user_wallet`), see [`stage7-wip.md`](./stage7-wip.md). That path delivers cryptographic per-user isolation via PrincipalTag but requires `oidc.agentkeys.dev` hosted publicly with a Let's Encrypt cert — deferred because (a) the hosting adds a Stage 7 dependency and (b) the "right" signer for that path is a TEE-derived ES256 key, blocked on [`heima-gaps §3`](./spec/heima-gaps-vs-desired-architecture.md). @@ -167,18 +169,18 @@ aws iam put-user-policy \ Statement: [{ Effect: "Allow", Action: "sts:AssumeRole", - Resource: "arn:aws:iam::\($acct):role/agentkeys-agent" + Resource: "arn:aws:iam::\($acct):role/agentkeys-data-role" }] }')" ``` > **Why `jq --arg` instead of `cat > file.json < DKIM_STATUS= BUCKET_ARN=arn:aws:s3:::agentkeys-mail-429071895007 -ROLE_ARN=arn:aws:iam::429071895007:role/agentkeys-agent +ROLE_ARN=arn:aws:iam::429071895007:role/agentkeys-data-role DAEMON_USER_ARN=arn:aws:iam::429071895007:user/agentkeys-daemon DAEMON_ACCESS_KEY_ID= DAEMON_SECRET_ACCESS_KEY= # share via 1Password, NOT in chat @@ -447,8 +449,8 @@ I'll then wire `AGENTKEYS_EMAIL_BACKEND=ses-s3` in provisioner-scripts to read f aws ses set-active-receipt-rule-set --rule-set-name "" --region "$REGION" # Drop the role -aws iam delete-role-policy --role-name agentkeys-agent --policy-name agentkeys-agent-inline -aws iam delete-role --role-name agentkeys-agent +aws iam delete-role-policy --role-name agentkeys-data-role --policy-name agentkeys-data-role-inline +aws iam delete-role --role-name agentkeys-data-role # Drop the daemon user (list + delete access keys first — can't delete a user with keys) for KEY in $(aws iam list-access-keys --user-name agentkeys-daemon --query 'AccessKeyMetadata[*].AccessKeyId' --output text); do diff --git a/docs/stage7-wip.md b/docs/stage7-wip.md index df238db..615bdc2 100644 --- a/docs/stage7-wip.md +++ b/docs/stage7-wip.md @@ -38,7 +38,7 @@ cargo run --release -p agentkeys-mock-server -- --port 8090 # Terminal B — broker. AWS credentials come from the operator's # ~/.aws/credentials profile (e.g. agentkeys-daemon) via `awsp` or # AWS_PROFILE. ACCOUNT_ID + REGION live in the operator's shell. The -# broker derives BROKER_AGENT_ROLE_ARN from ACCOUNT_ID. +# broker derives BROKER_DATA_ROLE_ARN from ACCOUNT_ID. awsp agentkeys-daemon export BROKER_BACKEND_URL=http://127.0.0.1:8090 cargo run --release -p agentkeys-broker-server -- --port 8091 @@ -51,7 +51,7 @@ SESSION=$(curl -sf -X POST http://127.0.0.1:8090/session/create \ CREDS=$(curl -sf -X POST http://127.0.0.1:8091/v1/mint-aws-creds \ -H "Authorization: Bearer $SESSION") echo "$CREDS" | jq '{access_key_id, expiration, wallet}' -# → real 1h temp creds, scoped to the assumed agentkeys-agent role +# → real 1h temp creds, scoped to the assumed agentkeys-data-role role ``` Acceptance: `curl /healthz` → 200, `curl /readyz` → 200, `mint-aws-creds` returns creds, audit row appears in `~/.agentkeys/broker/audit.sqlite`. @@ -126,7 +126,7 @@ A four-terminal walk-through that exercises everything Phase 2 ships, with no AW - A release build: `cargo build --release -p agentkeys-mock-server -p agentkeys-broker-server -p agentkeys-cli` (≈ 90 s cold). - `jq` and `curl` on `$PATH`. -- For the AWS-side check (step 4b + 6), `awsp agentkeys-daemon` (or another profile with `sts:AssumeRole` on `agentkeys-agent`) plus `ACCOUNT_ID` from your operator setup. For offline-only, skip those steps and use `--skip-startup-check`. +- For the AWS-side check (step 4b + 6), `awsp agentkeys-daemon` (or another profile with `sts:AssumeRole` on `agentkeys-data-role`) plus `ACCOUNT_ID` from your operator setup. For offline-only, skip those steps and use `--skip-startup-check`. ### Walk-through @@ -190,7 +190,7 @@ export AGENTKEYS_BROKER_URL=http://127.0.0.1:8091 # 6. Audit log inspection sqlite3 ~/.agentkeys/broker/audit.sqlite \ "SELECT outcome, requested_role, requester_wallet, occurred_at FROM mint_audit ORDER BY id DESC LIMIT 10;" -# expect: a row per mint, with requested_role IN ('arn:aws:iam::*:role/agentkeys-agent', 'oidc_jwt') +# expect: a row per mint, with requested_role IN ('arn:aws:iam::*:role/agentkeys-data-role', 'oidc_jwt') ``` ### Acceptance @@ -304,7 +304,7 @@ The broker resolves AWS credentials through the SDK default chain. Pick one of t #### 3a. EC2 instance profile (recommended on AWS) -If the broker host is an EC2 instance, attach an IAM **instance profile** with `sts:AssumeRole` permission on `agentkeys-agent`. The SDK pulls credentials from IMDS automatically — **no secrets land on the host's filesystem, no env vars, no rotation runbook**. +If the broker host is an EC2 instance, attach an IAM **instance profile** with `sts:AssumeRole` permission on `agentkeys-data-role`. The SDK pulls credentials from IMDS automatically — **no secrets land on the host's filesystem, no env vars, no rotation runbook**. ```bash # One-time, from your admin workstation: @@ -317,12 +317,12 @@ aws iam create-role --role-name $ROLE_NAME --assume-role-policy-document "$(jq - Statement: [{Effect:"Allow", Principal:{Service:"ec2.amazonaws.com"}, Action:"sts:AssumeRole"}] }')" -# Inline policy: the only thing the broker host can do is sts:AssumeRole on agentkeys-agent. +# Inline policy: the only thing the broker host can do is sts:AssumeRole on agentkeys-data-role. aws iam put-role-policy --role-name $ROLE_NAME --policy-name BrokerAssumeAgent \ --policy-document "$(jq -n --arg account "$ACCOUNT_ID" '{ Version: "2012-10-17", Statement: [{Effect:"Allow", Action:"sts:AssumeRole", - Resource:"arn:aws:iam::\($account):role/agentkeys-agent"}] + Resource:"arn:aws:iam::\($account):role/agentkeys-data-role"}] }')" aws iam create-instance-profile --instance-profile-name $INSTANCE_PROFILE @@ -566,7 +566,7 @@ Replaces [`stage6-aws-setup.md` §3b](./stage6-aws-setup.md) (static IAM user). OIDC_ISSUER_HOST="$(echo "$OIDC_ISSUER" | sed 's|https://||')" aws iam update-assume-role-policy \ - --role-name agentkeys-agent \ + --role-name agentkeys-data-role \ --policy-document "$(jq -n \ --arg provider "$OIDC_PROVIDER_ARN" \ --arg aud_key "${OIDC_ISSUER_HOST}:aud" \ @@ -592,7 +592,7 @@ Replaces the `AllowDaemonRead` statement in [`stage6-aws-setup.md` §4](./stage6 { "Sid": "AllowDaemonReadOwnPrefix", "Effect": "Allow", - "Principal": {"AWS": "arn:aws:iam::${ACCOUNT_ID}:role/agentkeys-agent"}, + "Principal": {"AWS": "arn:aws:iam::${ACCOUNT_ID}:role/agentkeys-data-role"}, "Action": ["s3:GetObject", "s3:ListBucket"], "Resource": [ "arn:aws:s3:::$BUCKET", @@ -620,7 +620,7 @@ WALLET=$(jq -R 'split(".") | .[1] | @base64d | fromjson | .agentkeys_user_wallet # Exchange for temp creds CREDS=$(aws sts assume-role-with-web-identity \ - --role-arn "arn:aws:iam::${ACCOUNT_ID}:role/agentkeys-agent" \ + --role-arn "arn:aws:iam::${ACCOUNT_ID}:role/agentkeys-data-role" \ --role-session-name "stage7-wip-$(date +%s)" \ --web-identity-token "$JWT") export AWS_ACCESS_KEY_ID=$(echo "$CREDS" | jq -r .Credentials.AccessKeyId) diff --git a/scripts/setup-broker-host.sh b/scripts/setup-broker-host.sh index 0f55791..1e0f450 100755 --- a/scripts/setup-broker-host.sh +++ b/scripts/setup-broker-host.sh @@ -204,13 +204,13 @@ EOF if [[ -z "$ACCOUNT_ID" ]]; then explain "AWS account ID" \ "12-digit account ID for the AWS account that holds your" \ - "agentkeys-daemon IAM user (or role) and the agentkeys-agent role." \ - "Used to derive BROKER_AGENT_ROLE_ARN if not overridden." + "agentkeys-daemon IAM user (or role) and the agentkeys-data-role role." \ + "Used to derive BROKER_DATA_ROLE_ARN if not overridden." prompt_required ACCOUNT_ID "Account ID" fi explain "AWS region" \ - "Region the broker calls STS in. Use the region your agentkeys-agent" \ + "Region the broker calls STS in. Use the region your agentkeys-data-role" \ "role and the operator's S3 bucket already live in." prompt_default REGION "Region" "$REGION" @@ -578,7 +578,7 @@ case "$CRED_MODE" in cat <&1) if ! echo "$CREDS" | jq -e .Credentials >/dev/null 2>&1; then diff --git a/wiki/email-system.md b/wiki/email-system.md index c42160c..b38d595 100644 --- a/wiki/email-system.md +++ b/wiki/email-system.md @@ -184,7 +184,7 @@ graph TB end subgraph IAM[" IAM "] User["Singleton user 'agentkeys-daemon'
+ inline policy: only sts:AssumeRole"] - Role["Singleton role 'agentkeys-agent'
+ inline policy: s3:Get/List + ses:SendRawEmail"] + Role["Singleton role 'agentkeys-data-role'
+ inline policy: s3:Get/List + ses:SendRawEmail"] end subgraph APP[" Our code "] Daemon[Daemon process] @@ -206,7 +206,7 @@ graph TB | Singleton — one per AWS account regardless of user count | Per-user — logical, no AWS resource per user | |---|---| | 1 IAM user `agentkeys-daemon` | N throwaway addresses `bot-@` (DB / on-chain) | -| 1 IAM role `agentkeys-agent` | N S3 objects under `inbound/.eml` (lifecycle-capped) | +| 1 IAM role `agentkeys-data-role` | N S3 objects under `inbound/.eml` (lifecycle-capped) | | 1 S3 bucket | (no other AWS resources scale per user) | | 1 SES domain identity | | | 1 SES wildcard receipt rule on `*@` | | @@ -229,7 +229,7 @@ operator's long-lived AWS access keys (stored in 1Password) ↓ injected to daemon as AWS_ACCESS_KEY_ID + AWS_SECRET_ACCESS_KEY env IAM user (agentkeys-daemon) ↓ sts:AssumeRole — only action this user can perform -IAM role (agentkeys-agent) +IAM role (agentkeys-data-role) ↓ returns 1h temp creds (auto-refreshed) S3 GetObject + ses:SendRawEmail API calls ``` @@ -240,7 +240,7 @@ Compromise of the long-lived access keys is bounded to "attacker can assume the | | Stage 6 interim (shipped today) | Stage 7 target | |---|---|---| -| Bucket policy | `agentkeys-agent` reads whole bucket | `agentkeys-agent` only reads prefix matching `${aws:PrincipalTag/agentkeys_user_wallet}` | +| Bucket policy | `agentkeys-data-role` reads whole bucket | `agentkeys-data-role` only reads prefix matching `${aws:PrincipalTag/agentkeys_user_wallet}` | | Per-user separation | App-side — daemon filters by `To:` header | Cloud-side — bucket policy denies cross-prefix reads | | Failure mode if our app has a bug | User A could read user B's mail | `AccessDenied` from S3 | | Auth flow | Long-lived IAM user → `sts:AssumeRole` | OIDC JWT (with `agentkeys_user_wallet` claim) → `sts:AssumeRoleWithWebIdentity` | diff --git a/wiki/tag-based-access.md b/wiki/tag-based-access.md index 4b30db7..821b7ff 100644 --- a/wiki/tag-based-access.md +++ b/wiki/tag-based-access.md @@ -45,7 +45,7 @@ TEE Authority (mint step): AWS STS (exchange step): POST sts:AssumeRoleWithWebIdentity WebIdentityToken = - RoleArn = arn:aws:iam:::role/agentkeys-agent + RoleArn = arn:aws:iam:::role/agentkeys-data-role → validates JWT via our JWKS → maps JWT claim agentkeys_user_wallet → session tag (PrincipalTag) → returns temp creds (AccessKey, SecretKey, SessionToken) @@ -154,7 +154,7 @@ During `AssumeRoleWithWebIdentity`, AWS maps principal tags declared in the OIDC { "Sid": "AllowListOwnPrefix", "Effect": "Allow", - "Principal": { "AWS": "arn:aws:iam::123456789012:role/agentkeys-agent" }, + "Principal": { "AWS": "arn:aws:iam::123456789012:role/agentkeys-data-role" }, "Action": "s3:ListBucket", "Resource": "arn:aws:s3:::agentkeys-mail", "Condition": { @@ -168,14 +168,14 @@ During `AssumeRoleWithWebIdentity`, AWS maps principal tags declared in the OIDC { "Sid": "AllowCrudOwnPrefix", "Effect": "Allow", - "Principal": { "AWS": "arn:aws:iam::123456789012:role/agentkeys-agent" }, + "Principal": { "AWS": "arn:aws:iam::123456789012:role/agentkeys-data-role" }, "Action": ["s3:GetObject", "s3:PutObject", "s3:DeleteObject"], "Resource": "arn:aws:s3:::agentkeys-mail/${aws:PrincipalTag/agentkeys_user_wallet}/*" }, { "Sid": "DenyEverythingElse", "Effect": "Deny", - "Principal": { "AWS": "arn:aws:iam::123456789012:role/agentkeys-agent" }, + "Principal": { "AWS": "arn:aws:iam::123456789012:role/agentkeys-data-role" }, "NotAction": ["s3:GetObject", "s3:PutObject", "s3:DeleteObject", "s3:ListBucket"], "Resource": "*" } @@ -183,7 +183,7 @@ During `AssumeRoleWithWebIdentity`, AWS maps principal tags declared in the OIDC } ``` -Every user assumes the **same role** — `agentkeys-agent`. But each session carries a different PrincipalTag derived from their JWT claim, and the bucket policy expands `${aws:PrincipalTag/agentkeys_user_wallet}` per session. User A with tag `0xABC` sees only `agentkeys-mail/0xABC/*`. User B with tag `0xBEEF` sees only `agentkeys-mail/0xBEEF/*`. Cryptographic separation, zero code on our side. +Every user assumes the **same role** — `agentkeys-data-role`. But each session carries a different PrincipalTag derived from their JWT claim, and the bucket policy expands `${aws:PrincipalTag/agentkeys_user_wallet}` per session. User A with tag `0xABC` sees only `agentkeys-mail/0xABC/*`. User B with tag `0xBEEF` sees only `agentkeys-mail/0xBEEF/*`. Cryptographic separation, zero code on our side. --- @@ -258,7 +258,7 @@ Tag-based access control is the **technical mechanism that lets rule #4 (broker- - [ ] Include `agentkeys_user_wallet` in the TEE's JWT claim-set (parallel with existing `sub`) - [ ] Update OIDC discovery doc to list the claim in `claims_supported` - [ ] Register the OIDC provider in each AWS account we operate -- [ ] Create the `agentkeys-agent` role with trust policy requiring the claim + pinned to enclave mrenclave +- [ ] Create the `agentkeys-data-role` role with trust policy requiring the claim + pinned to enclave mrenclave - [ ] Apply the shared-bucket policy using `${aws:PrincipalTag/agentkeys_user_wallet}` - [ ] Integration test: mint two JWTs for two different wallets; verify each can access only its prefix; verify `agentkeys_user_wallet=""` is denied - [ ] Chain-audit extrinsic at mint time includes the claim values (redacted appropriately) From 456e12a2d70e10e32e06f5729ac3861d1209771d Mon Sep 17 00:00:00 2001 From: wildmeta-agent Date: Tue, 28 Apr 2026 13:06:51 +0800 Subject: [PATCH 08/15] docs: use broker.litentry.org as canonical broker hostname example MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Aligns the docs with bots.litentry.org (already canonical for the SES data-plane domain) so a litentry-following operator doesn't have to mentally substitute placeholders. broker.litentry.org is a control-plane hostname — distinct from bots.litentry.org which is the data-plane (email recipient) domain. Files: docs/stage7-wip.md, docs/operator-runbook.md, docs/dev-setup.md, scripts/setup-broker-host.sh. ops@ contact also updated to ops@litentry.org. Co-Authored-By: Claude Opus 4.7 (1M context) --- docs/dev-setup.md | 2 +- docs/operator-runbook.md | 2 +- docs/stage7-wip.md | 30 +++++++++++++++--------------- scripts/setup-broker-host.sh | 6 +++--- 4 files changed, 20 insertions(+), 20 deletions(-) diff --git a/docs/dev-setup.md b/docs/dev-setup.md index aea85b4..b23e71a 100644 --- a/docs/dev-setup.md +++ b/docs/dev-setup.md @@ -92,7 +92,7 @@ You're building an agent that needs OpenAI / OpenRouter / X / etc. credentials b ### 4.1 What you need from the operator -- `AGENTKEYS_BROKER_URL` — e.g. `http://broker.local:8091` or `https://broker.example.dev`. +- `AGENTKEYS_BROKER_URL` — e.g. `http://broker.local:8091` or `https://broker.litentry.org`. - `AGENTKEYS_BEARER_TOKEN` — short-lived; the operator hands these out per-developer. That's it. No AWS keys, no `aws sts assume-role`, no `stage6-demo-env.sh` sourcing. diff --git a/docs/operator-runbook.md b/docs/operator-runbook.md index 3b165ce..7ba01cb 100644 --- a/docs/operator-runbook.md +++ b/docs/operator-runbook.md @@ -212,7 +212,7 @@ When `broker.agentkeys.dev` (or your hosted equivalent) is live, the migration f ```diff -export AGENTKEYS_BROKER_URL=http://broker.local:8091 -+export AGENTKEYS_BROKER_URL=https://broker.example.dev ++export AGENTKEYS_BROKER_URL=https://broker.litentry.org ``` Operator-side, the same binary runs. Configuration source changes from env vars to KMS-sealed config (interface design only in v0.1; full implementation is the Stage 7 phase 2 hosted-deploy work). diff --git a/docs/stage7-wip.md b/docs/stage7-wip.md index 615bdc2..197e5d2 100644 --- a/docs/stage7-wip.md +++ b/docs/stage7-wip.md @@ -231,7 +231,7 @@ This section is for operators who want their broker reachable by daemons running ``` ┌── developer laptop / CI / cloud sandbox ──┐ │ agentkeys-daemon (or `agentkeys` CLI) │ -│ --broker-url https://broker.example.dev │ +│ --broker-url https://broker.litentry.org │ └───────────────────┬───────────────────────┘ │ HTTPS (bearer) ▼ @@ -277,7 +277,7 @@ Pick whatever fits your stack. Two examples that satisfy the requirements (TLS-t Either way you need: -- A DNS name resolving to the host (e.g. `broker.example.dev`). +- A DNS name resolving to the host (e.g. `broker.litentry.org`). - A public-CA TLS certificate covering that name (Let's Encrypt is free; ACM is free for ALB use). - Firewall: inbound `:443` from anywhere, inbound `:22` from your admin IP, **everything else closed**. The broker's `:8091` and the backend's `:8090` are reached only via localhost or the private network. @@ -377,7 +377,7 @@ These values are not secrets and live in the systemd unit directly (Step 4): ACCOUNT_ID=429071895007 REGION=us-east-1 BROKER_BACKEND_URL=http://127.0.0.1:8090 -BROKER_OIDC_ISSUER=https://broker.example.dev +BROKER_OIDC_ISSUER=https://broker.litentry.org ``` `BROKER_OIDC_ISSUER` **must** match the public URL the reverse proxy serves — AWS rejects `create-open-id-connect-provider` if the registered URL doesn't equal the `iss` claim emitted by the broker. @@ -426,7 +426,7 @@ Environment=HOME=/var/lib/agentkeys Environment=ACCOUNT_ID=429071895007 Environment=REGION=us-east-1 Environment=BROKER_BACKEND_URL=http://127.0.0.1:8090 -Environment=BROKER_OIDC_ISSUER=https://broker.example.dev +Environment=BROKER_OIDC_ISSUER=https://broker.litentry.org # Uncomment ONE of the next two lines depending on the credential path: # 3a (EC2 instance profile): nothing — IMDS handles it. # 3b (named profile): @@ -462,23 +462,23 @@ The broker binds to `127.0.0.1:8091` so only the local reverse proxy can reach i ### Step 5 — Reverse proxy + TLS -Minimal nginx site for `broker.example.dev`: +Minimal nginx site for `broker.litentry.org`: ```nginx # /etc/nginx/sites-available/agentkeys-broker server { listen 80; - server_name broker.example.dev; + server_name broker.litentry.org; location /.well-known/acme-challenge/ { root /var/www/certbot; } location / { return 301 https://$host$request_uri; } } server { listen 443 ssl http2; - server_name broker.example.dev; + server_name broker.litentry.org; - ssl_certificate /etc/letsencrypt/live/broker.example.dev/fullchain.pem; - ssl_certificate_key /etc/letsencrypt/live/broker.example.dev/privkey.pem; + ssl_certificate /etc/letsencrypt/live/broker.litentry.org/fullchain.pem; + ssl_certificate_key /etc/letsencrypt/live/broker.litentry.org/privkey.pem; ssl_protocols TLSv1.2 TLSv1.3; # AWS IAM only fetches the well-known + JWKS during create-open-id-connect-provider; @@ -497,7 +497,7 @@ server { ```bash sudo ln -s /etc/nginx/sites-available/agentkeys-broker /etc/nginx/sites-enabled/ -sudo certbot --nginx -d broker.example.dev --agree-tos -m ops@example.dev +sudo certbot --nginx -d broker.litentry.org --agree-tos -m ops@litentry.org sudo nginx -t && sudo systemctl reload nginx ``` @@ -506,14 +506,14 @@ sudo nginx -t && sudo systemctl reload nginx From a laptop that has nothing AWS-shaped configured: ```bash -curl -sf https://broker.example.dev/healthz # → "ok" -curl -sf https://broker.example.dev/.well-known/openid-configuration | \ - jq '.issuer == "https://broker.example.dev"' # → true -curl -sf https://broker.example.dev/.well-known/jwks.json | jq '.keys[0].kid' +curl -sf https://broker.litentry.org/healthz # → "ok" +curl -sf https://broker.litentry.org/.well-known/openid-configuration | \ + jq '.issuer == "https://broker.litentry.org"' # → true +curl -sf https://broker.litentry.org/.well-known/jwks.json | jq '.keys[0].kid' # End-to-end JWT mint (use a session bearer the operator has provisioned) SESSION= -curl -sf -X POST https://broker.example.dev/v1/mint-oidc-jwt \ +curl -sf -X POST https://broker.litentry.org/v1/mint-oidc-jwt \ -H "Authorization: Bearer $SESSION" | jq '.expiration' ``` diff --git a/scripts/setup-broker-host.sh b/scripts/setup-broker-host.sh index 1e0f450..1b7e1c2 100755 --- a/scripts/setup-broker-host.sh +++ b/scripts/setup-broker-host.sh @@ -11,7 +11,7 @@ # Usage: # bash scripts/setup-broker-host.sh # interactive # bash scripts/setup-broker-host.sh --non-interactive \ # CI -# --issuer-url https://broker.example.dev \ +# --issuer-url https://broker.litentry.org \ # --account-id 429071895007 \ # [--region us-east-1] \ # [--cred-mode instance-profile|profile|static] \ @@ -197,7 +197,7 @@ EOF " • exactly match BROKER_OIDC_ISSUER (this script writes that env var)" \ " • exactly match the --url you pass to AWS later" \ "" \ - "Example: https://broker.example.dev" + "Example: https://broker.litentry.org" prompt_required ISSUER_URL "Issuer URL" fi @@ -287,7 +287,7 @@ EOF fi # ─── Validate non-interactive inputs ───────────────────────────────────────── -[[ -n "$ISSUER_URL" ]] || die "--issuer-url is required (e.g. https://broker.example.dev). Drop --non-interactive for an interactive walk-through." +[[ -n "$ISSUER_URL" ]] || die "--issuer-url is required (e.g. https://broker.litentry.org). Drop --non-interactive for an interactive walk-through." [[ -n "$ACCOUNT_ID" ]] || die "--account-id is required. Drop --non-interactive for an interactive walk-through." [[ -n "$CRED_MODE" ]] || CRED_MODE="instance-profile" case "$CRED_MODE" in From 4592e487df88b1d1fbd89d0a0bf011d90c154303 Mon Sep 17 00:00:00 2001 From: wildmeta-agent Date: Tue, 28 Apr 2026 13:50:17 +0800 Subject: [PATCH 09/15] docs(stage7): add Route 53 DNS-wiring step + setup-broker-host.sh callout MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Step 1b walks through allocating an Elastic IP and upserting the broker.litentry.org A record in Route 53 — the prerequisite that lets certbot's HTTP-01 challenge resolve the host in Step 5. The "Automated path" callout above Step 1 points operators at scripts/setup-broker-host.sh as the bundled automation for Steps 2-5, making the manual walk-through the reference rather than the default. --- docs/stage7-wip.md | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/docs/stage7-wip.md b/docs/stage7-wip.md index 197e5d2..d40b50f 100644 --- a/docs/stage7-wip.md +++ b/docs/stage7-wip.md @@ -268,6 +268,8 @@ For v0.1 operators, two pragmatic options: 1. **Single-host deployment with persistent state (recommended for self-hosted teams).** Keep the mock-server but add a small wrapper: front it with `systemd` (or Docker `restart: unless-stopped`), and mount the SQLite file on persistent storage — `docs/operator-runbook.md` will track the exact patches needed in the next iteration. Until that lands, treat session loss on restart as part of the operator runbook (have developers re-`init` after a backend restart). 2. **Skip the mock and wait for Heima.** If your timeline allows, hold this deployment until the chain-backed backend lands and use the real Heima session-management path. Stage 7 phase 2 isn't gated on this — the broker's interface is the same regardless of which backend implements `/session/create` + `/session/validate`. +> **Automated path:** [`scripts/setup-broker-host.sh`](../scripts/setup-broker-host.sh) bundles Steps 2–5 (binary install, `agentkeys` system user, systemd units, nginx site, certbot issuance) into a single interactive run-on-the-host script. It's idempotent, supports the three credential modes from Step 3, and prompts before each optional step. Steps 1 (provision the host) and 1b (wire DNS) are still manual prerequisites. After running the script, jump to Step 6 for the smoke test. + ### Step 1 — Provision the host Pick whatever fits your stack. Two examples that satisfy the requirements (TLS-terminating reverse proxy + ≥ 1 vCPU / 1 GiB RAM + persistent disk): @@ -281,6 +283,41 @@ Either way you need: - A public-CA TLS certificate covering that name (Let's Encrypt is free; ACM is free for ALB use). - Firewall: inbound `:443` from anywhere, inbound `:22` from your admin IP, **everything else closed**. The broker's `:8091` and the backend's `:8090` are reached only via localhost or the private network. +### Step 1b — Wire DNS to the broker host + +The broker hostname must resolve to the host's public IP **before** certbot runs in Step 5 (Let's Encrypt's HTTP-01 challenge resolves the name and hits port 80). Allocate an Elastic IP (so the address survives stop/start) and add an `A` record. If your DNS lives in AWS Route 53: + +```bash +# 1. Allocate + attach an Elastic IP (run with the right --region for the EC2 instance) +EIP_ALLOC=$(aws ec2 allocate-address --domain vpc --region us-east-1 --query AllocationId --output text) +aws ec2 associate-address --region us-east-1 \ + --instance-id --allocation-id "$EIP_ALLOC" +EIP=$(aws ec2 describe-addresses --region us-east-1 \ + --allocation-ids "$EIP_ALLOC" --query 'Addresses[0].PublicIp' --output text) + +# 2. Upsert the A record in Route 53 (Route 53 is global; no --region needed) +HZ=$(aws route53 list-hosted-zones-by-name --dns-name litentry.org. \ + --query 'HostedZones[0].Id' --output text | sed 's|/hostedzone/||') +aws route53 change-resource-record-sets --hosted-zone-id "$HZ" \ + --change-batch "$(jq -n --arg ip "$EIP" '{ + Changes: [{ + Action: "UPSERT", + ResourceRecordSet: { + Name: "broker.litentry.org.", + Type: "A", + TTL: 300, + ResourceRecords: [{ Value: $ip }] + } + }] + }')" + +# 3. Verify (use DoH if your local resolver is hijacked by a router/proxy) +curl -s 'https://cloudflare-dns.com/dns-query?name=broker.litentry.org&type=A' \ + -H 'accept: application/dns-json' | jq '.Answer' +``` + +For non-AWS DNS providers, create an equivalent A record (`broker.litentry.org` → EIP) in their console. The IAM user running these commands needs `ec2:AllocateAddress` / `ec2:AssociateAddress` / `ec2:DescribeAddresses` and `route53:ChangeResourceRecordSets` / `route53:ListHostedZonesByName` — `agentkeys-admin` is IAM-only by default, so attach a temporary inline policy or use a more privileged user for this one-off. + ### Step 2 — Install the binaries The repo doesn't yet ship a `cargo dist` release; build from source on the target arch and copy the resulting binaries: From fae247807a39e126d28ede7de6ec73c600b8803a Mon Sep 17 00:00:00 2001 From: wildmeta-agent Date: Tue, 28 Apr 2026 14:06:40 +0800 Subject: [PATCH 10/15] fix(setup): two-phase nginx config to break certbot chicken-and-egg MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit setup-broker-host.sh wrote a full :80+:443 nginx site up front, but the :443 block referenced LE cert files that don't exist yet. nginx then refused to start, and `certbot --nginx` aborted at its preflight `nginx -t` check — operators saw: cannot load certificate "/etc/letsencrypt/live//fullchain.pem": BIO_new_file() failed ... No such file or directory Switch to a two-phase config: • Phase A (no cert): :80-only with the ACME challenge location. • Phase B (cert exists): adds the :443 ssl block with proxy_pass. The script detects the cert file at /etc/letsencrypt/live/$ISSUER_HOST/ and writes the right config; re-running after `certbot certonly --webroot` flips A → B automatically. The post-run summary now points at `certbot certonly --webroot` (which works while nginx is up on :80) instead of the broken `--nginx` flow. --- scripts/setup-broker-host.sh | 85 ++++++++++++++++++++++++++++++------ 1 file changed, 72 insertions(+), 13 deletions(-) diff --git a/scripts/setup-broker-host.sh b/scripts/setup-broker-host.sh index 1b7e1c2..3ef9b3f 100755 --- a/scripts/setup-broker-host.sh +++ b/scripts/setup-broker-host.sh @@ -497,13 +497,21 @@ WantedBy=multi-user.target EOF # ─── 6. nginx (optional) ────────────────────────────────────────────────────── -if [[ "$WITH_NGINX" == "yes" ]]; then - if ! have nginx; then - log "Installing nginx" - "${PM_INSTALL[@]}" nginx - fi - log "Writing nginx site for $ISSUER_HOST" - sudo tee /etc/nginx/sites-available/agentkeys-broker >/dev/null </dev/null </dev/null < --non-interactive +# then re-run scripts/setup-broker-host.sh to flip on the :443 block. +server { + listen 80; + server_name $ISSUER_HOST; + location /.well-known/acme-challenge/ { root /var/www/certbot; } + location / { + return 503 "TLS cert not yet issued — see setup-broker-host.sh\n"; + default_type text/plain; + } +} +EOF + fi +} + +if [[ "$WITH_NGINX" == "yes" ]]; then + if ! have nginx; then + log "Installing nginx" + "${PM_INSTALL[@]}" nginx + fi + sudo install -d -m 0755 /var/www/certbot + write_nginx_site if [[ -d /etc/nginx/sites-enabled ]]; then sudo ln -sf /etc/nginx/sites-available/agentkeys-broker /etc/nginx/sites-enabled/ + sudo rm -f /etc/nginx/sites-enabled/default + fi + if sudo nginx -t; then + sudo systemctl reload nginx 2>/dev/null || sudo systemctl restart nginx + else + warn "nginx -t failed — leaving service in current state. Inspect /etc/nginx/sites-available/agentkeys-broker." fi - sudo install -d -m 0755 /var/www/certbot fi # ─── 7. certbot (optional) ──────────────────────────────────────────────────── @@ -617,12 +657,31 @@ cat < - sudo nginx -t && sudo systemctl reload nginx + if sudo test -f "/etc/letsencrypt/live/$ISSUER_HOST/fullchain.pem"; then + cat < --non-interactive + 4. Re-run this script to flip on the :443 block: + bash scripts/setup-broker-host.sh + 5. Verify renewal: + sudo certbot renew --dry-run + + Note: do NOT use \`certbot --nginx\` for the first issuance — its preflight + \`nginx -t\` will fail because the :443 ssl block doesn't exist until step 4. + +EOF + fi fi cat < Date: Tue, 28 Apr 2026 14:14:53 +0800 Subject: [PATCH 11/15] fix(setup): validate ISSUER_URL has scheme + strip trailing slash MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A scheme-less issuer URL like `broker.litentry.org` was silently accepted and propagated into BROKER_OIDC_ISSUER. The broker then emitted JWTs with `iss: "broker.litentry.org"` (no `https://`), which AWS rejects at AssumeRoleWithWebIdentity time and which causes the documented smoke test `jq '.issuer == "https://broker.litentry.org"'` to print false. Validate up front: • require https:// (or http:// with a warning — AWS won't accept it, but local dev might). • strip a trailing slash so BROKER_OIDC_ISSUER matches the JWT iss claim byte-for-byte. Operators hitting the bad config in the wild: edit /etc/systemd/system/agentkeys-broker.service so Environment=BROKER_OIDC_ISSUER=https://, daemon-reload, restart. If you've already registered the OIDC provider on AWS with the wrong URL, delete and recreate it. --- scripts/setup-broker-host.sh | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/scripts/setup-broker-host.sh b/scripts/setup-broker-host.sh index 3ef9b3f..37ea9bc 100755 --- a/scripts/setup-broker-host.sh +++ b/scripts/setup-broker-host.sh @@ -286,8 +286,16 @@ EOF fi fi -# ─── Validate non-interactive inputs ───────────────────────────────────────── +# ─── Validate inputs ───────────────────────────────────────────────────────── [[ -n "$ISSUER_URL" ]] || die "--issuer-url is required (e.g. https://broker.litentry.org). Drop --non-interactive for an interactive walk-through." +case "$ISSUER_URL" in + https://*) ;; + http://*) warn "issuer URL uses http:// — AWS IAM requires TLS; create-open-id-connect-provider will reject this. Continuing anyway."; ;; + *) die "--issuer-url must start with https:// (got '$ISSUER_URL'). The bare hostname is not a valid OIDC issuer; AWS validates the iss claim byte-for-byte."; ;; +esac +# Strip trailing slash — BROKER_OIDC_ISSUER must match the JWT iss claim +# byte-for-byte, and AWS rejects mismatches at AssumeRoleWithWebIdentity time. +ISSUER_URL="${ISSUER_URL%/}" [[ -n "$ACCOUNT_ID" ]] || die "--account-id is required. Drop --non-interactive for an interactive walk-through." [[ -n "$CRED_MODE" ]] || CRED_MODE="instance-profile" case "$CRED_MODE" in From 44a41e5c80b48fd29621b7497d2118f49d82c2a9 Mon Sep 17 00:00:00 2001 From: wildmeta-agent Date: Tue, 28 Apr 2026 14:42:23 +0800 Subject: [PATCH 12/15] =?UTF-8?q?docs(stage7):=20AWS=20recipe=20=E2=80=94?= =?UTF-8?q?=20pre-check=20stale=20OIDC=20provider,=20verify=20after=20regi?= =?UTF-8?q?ster?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Two additions to the AWS federation recipe: 1. Strengthen the issuer prereq check to compare byte-for-byte (catches the scheme-less / trailing-slash bugs operators have hit), with the exact systemd-unit fix inline. 2. New "0. Check for stale provider state" subsection: list providers first, identify the three states (empty / matching / stale), and delete-and-recreate flow for the stale-URL case. 3. Step 1 now ends with `aws iam get-open-id-connect-provider` so operators can confirm AWS actually fetched the JWKS, plus a note on the LE intermediate-CA thumbprint persistence. --- docs/stage7-wip.md | 36 +++++++++++++++++++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) diff --git a/docs/stage7-wip.md b/docs/stage7-wip.md index d40b50f..0ac239d 100644 --- a/docs/stage7-wip.md +++ b/docs/stage7-wip.md @@ -583,7 +583,34 @@ This section is the **operational runbook** for taking the (already-shipped) Pha - Phase 1 broker running publicly (so its `/.well-known/openid-configuration` is fetchable over public TLS). - `export OIDC_ISSUER="$BROKER_OIDC_ISSUER"` — the exact `BROKER_OIDC_ISSUER` you started the broker with. -- Verify `curl -sf "$OIDC_ISSUER/.well-known/openid-configuration" | jq .issuer` returns that string. +- Verify the discovery doc's `iss` claim matches **byte-for-byte** (must be `https://…`, no trailing slash, no scheme-less hostname). AWS rejects the `AssumeRoleWithWebIdentity` call later if these disagree: + ```bash + curl -sf "$OIDC_ISSUER/.well-known/openid-configuration" | jq -e ".issuer == \"$OIDC_ISSUER\"" + # → true + ``` + If this prints `false`, fix the broker's `BROKER_OIDC_ISSUER` env var on the host before continuing — see [Operator runbook §"Fix scheme-less issuer URL"](./operator-runbook.md) or sed the systemd unit: + ```bash + sudo sed -i \ + "s|^Environment=BROKER_OIDC_ISSUER=.*|Environment=BROKER_OIDC_ISSUER=$OIDC_ISSUER|" \ + /etc/systemd/system/agentkeys-broker.service + sudo systemctl daemon-reload && sudo systemctl restart agentkeys-broker + ``` + +#### 0. Check for stale provider state + +Before registering, confirm there isn't a previous registration with a wrong URL still on the account (a common artifact of fixing the issuer mid-bring-up): + +```bash +aws iam list-open-id-connect-providers +``` + +- Empty list (`"OpenIDConnectProviderList": []`) → fresh slate, proceed to step 1. +- A provider whose ARN ends in your current `OIDC_ISSUER` host → already registered, skip step 1, proceed to step 2 (verify with `aws iam get-open-id-connect-provider --open-id-connect-provider-arn ` that the URL matches). +- A provider whose ARN ends in a **different** host (or a stale variant of yours) → delete it before registering the correct one: + ```bash + aws iam delete-open-id-connect-provider \ + --open-id-connect-provider-arn arn:aws:iam::${ACCOUNT_ID}:oidc-provider/ + ``` #### 1. Register the OIDC provider in IAM @@ -593,8 +620,15 @@ aws iam create-open-id-connect-provider \ --client-id-list sts.amazonaws.com \ --thumbprint-list '' export OIDC_PROVIDER_ARN="arn:aws:iam::${ACCOUNT_ID}:oidc-provider/$(echo $OIDC_ISSUER | sed 's|https://||')" + +# Verify it stuck and AWS could fetch the JWKS: +aws iam get-open-id-connect-provider \ + --open-id-connect-provider-arn "$OIDC_PROVIDER_ARN" \ + --query '{Url: Url, ClientIDList: ClientIDList, ThumbprintList: ThumbprintList}' ``` +The IAM user running this needs `iam:CreateOpenIDConnectProvider` and `iam:GetOpenIDConnectProvider` (the standard `agentkeys-admin` IAM-admin scope covers both). AWS auto-derives the cert thumbprint from the Let's Encrypt chain at registration time — if certbot rotates the cert later, the thumbprint stays valid because LE uses the same intermediate CA. + #### 2. Replace the role's trust policy with the federated variant Replaces [`stage6-aws-setup.md` §3b](./stage6-aws-setup.md) (static IAM user). Principal becomes the OIDC provider; the `sts:TagSession` + `aws:RequestTag/agentkeys_user_wallet` condition is what wires cloud-enforced per-user isolation in §3 below. From 92355a8e095904ba7b0305e98220265f86c192d5 Mon Sep 17 00:00:00 2001 From: wildmeta-agent Date: Tue, 28 Apr 2026 15:12:19 +0800 Subject: [PATCH 13/15] docs: consolidate cloud setup; trim stage7 + operator-runbook MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The Stage 6 AWS runbook and the AWS-side half of the Stage 7 doc re-tangled themselves over time — every cross-link was "see also" rather than "the source is here". Operators ended up reading both, then the operator runbook, then both again to figure out which command to run. Restructure into three focused docs, all referenced by stage: • docs/cloud-setup.md (NEW, 548 lines) — every cloud-account resource in one file, split internally by concern (identities → DNS → inbound mail → IAM → OIDC federation → EC2 host → cleanup). Stage 6 vs Stage 7 vs federated-deployment is a *mode* of the same machinery, not three separate runbooks. Tencent Cloud SimpleDM + COS slots in at §2.2 with a 1:1 IAM→CAM mapping table — no new file when we add it. • docs/stage7-wip.md (-469 lines) — Phase 1 / Phase 2 bookkeeping dropped; Stage 7 is just "the broker that issues OIDC JWTs and AWS creds". AWS commands no longer embedded inline; the doc points at cloud-setup.md for provisioning. Smoke test now shows how to mint a session bearer end-to-end (the previous version left SESSION= as a dangling placeholder). • docs/operator-runbook.md (-86 lines) — concise. WIP/scratchpad header gone; Phase 1/Phase 2 framing gone; threat-model section points at the spec doc instead of duplicating it; rotation paths fold into one §5 table. • docs/stage6-aws-setup.md deleted; all referrers (dev-setup, stage8-wip, ses-email-architecture, development-stages, setup-dev-env.sh, setup-broker-host.sh) point at cloud-setup.md. Net: 813 insertions, 1264 deletions across 10 files. Stage 7 gate still passes (STAGE 7 phase 1 + phase 2 PASSED). --- docs/cloud-setup.md | 548 +++++++++++++++++++ docs/dev-setup.md | 10 +- docs/operator-runbook.md | 270 ++++------ docs/spec/plans/development-stages.md | 4 +- docs/spec/ses-email-architecture.md | 4 +- docs/stage6-aws-setup.md | 472 ---------------- docs/stage7-wip.md | 747 +++++--------------------- docs/stage8-wip.md | 2 +- scripts/setup-broker-host.sh | 18 +- scripts/setup-dev-env.sh | 2 +- 10 files changed, 813 insertions(+), 1264 deletions(-) create mode 100644 docs/cloud-setup.md delete mode 100644 docs/stage6-aws-setup.md diff --git a/docs/cloud-setup.md b/docs/cloud-setup.md new file mode 100644 index 0000000..f589363 --- /dev/null +++ b/docs/cloud-setup.md @@ -0,0 +1,548 @@ +# Cloud setup — AgentKeys + +**Audience:** the operator provisioning the cloud account that hosts AgentKeys infrastructure. +**Scope:** one file, every cloud-side resource. Read top-down once per account, then jump back to the section you're touching. + +The runbook is split by concern, not by stage: + +| § | Concern | When you do this | +|---|---------|------------------| +| [§0 Identities](#0-identities--mental-model) | The four IAM principals and what each one is for | Read first | +| [§1 Domain + DNS](#1-domain--dns) | Email subdomain (Stage 6) + broker subdomain (Stage 7) | Once per account | +| [§2 Inbound mail](#2-inbound-mail-backend) | SES + S3 receipt rule (Stage 6) | Once per account | +| [§3 IAM users + role](#3-iam-identities) | `agentkeys-{admin,broker,daemon}` + `agentkeys-data-role` | Once per account | +| [§4 OIDC federation](#4-oidc-federation-stage-7) | Register the broker as an OIDC provider, swap to PrincipalTag-scoped trust | After §1–§3 + a publicly-reachable broker | +| [§5 EC2 broker host](#5-ec2-broker-host-optional) | EIP, A record, security group | Only if you're hosting the broker on AWS | +| [§6 Cleanup](#6-cleanup) | Tear-down recipe | When you want to delete it all | + +**Cloud-portability:** §1 (DNS) and §2 (inbound mail) are the cloud-replaceable layers — Tencent Cloud SimpleDM + COS would slot in here unchanged at the §3+ boundary. See [§2.2](#22-future-tencent-cloud-simpledm--cos). + +--- + +## 0. Identities — mental model + +| Identity | Type | Holds | Purpose | +|---|---|---|---| +| `agentkeys-admin` | IAM user | Long-lived access key | One-shot provisioning. Runs every command in this doc. IAM-admin scope. | +| `agentkeys-broker` | IAM user | Long-lived access key | Operator's SSH-into-EC2 path via EC2 Instance Connect. No data-plane access. | +| `agentkeys-daemon` | IAM user | Long-lived access key | The **broker process** uses this at runtime. Only permission: `sts:AssumeRole` on `agentkeys-data-role`. | +| `agentkeys-data-role` | IAM role | (assumed) | The actual S3/SES permissions live here. `agentkeys-daemon` (Stage 6) or the OIDC provider (Stage 7) is allowed to assume it. | +| `agentkeys-broker-host` | IAM role | (assumed by EC2) | Optional. If the broker runs on EC2, attach this as the instance profile so the daemon never sees a static key. | + +Why "data role" and not "agent role": the project word "agent" already means three things (the AI agent, the AgentKeys product, an IAM role). The role holds **data-plane** permissions, so `agentkeys-data-role` it is. (Renamed from `agentkeys-agent` 2026-04-28; the broker still accepts the legacy `BROKER_AGENT_ROLE_ARN` env var.) + +**Prereqs for everything below:** + +```bash +# AWS CLI v2 + a working agentkeys-admin profile +awsp agentkeys-admin # set AWS_PROFILE +aws sts get-caller-identity # → agentkeys-admin + +# Shell vars used throughout the runbook +export REGION=us-east-1 # SES inbound: us-east-1, us-west-2, eu-west-1 +export DOMAIN=bots.litentry.org # Stage 6 email subdomain +export BROKER_HOST=broker.litentry.org # Stage 7 broker public hostname +export PARENT_ZONE_ID=Z09723983CFJOHAE3VC65 # existing litentry.org Route 53 zone +export ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) +export BUCKET=agentkeys-mail-${ACCOUNT_ID} # global-unique by account-id suffix +echo "REGION=$REGION DOMAIN=$DOMAIN BROKER_HOST=$BROKER_HOST ACCOUNT_ID=$ACCOUNT_ID BUCKET=$BUCKET" +``` + +> **Why `jq -n --arg` and not `cat > file.json < **DKIM key custody:** in this interim setup, AWS SES holds the private DKIM key. We never see it. Trust surface: AWS-internal compromise could forge mail signed as us — bounded blast radius (reputation, not user-data custody). Migration target is TEE-held BYODKIM when [`heima-gaps §4`](./spec/heima-gaps-vs-desired-architecture.md) closes; do **not** intermediate-step to "BYODKIM with file-stored key" (strictly worse than AWS-managed). + +#### Create the S3 bucket for inbound mail + +The bucket policy in [§3.5](#35-s3-bucket-policy) wires SES write + role read; we'll come back to it after the IAM identities exist. + +```bash +aws s3api create-bucket \ + --region "$REGION" --bucket "$BUCKET" \ + $([ "$REGION" != "us-east-1" ] && echo "--create-bucket-configuration LocationConstraint=$REGION") + +aws s3api put-public-access-block --bucket "$BUCKET" \ + --public-access-block-configuration BlockPublicAcls=true,IgnorePublicAcls=true,BlockPublicPolicy=true,RestrictPublicBuckets=true + +# 30-day TTL on inbound objects (throwaway-inbox model) +aws s3api put-bucket-lifecycle-configuration --bucket "$BUCKET" \ + --lifecycle-configuration "$(jq -n '{ + Rules: [{ID:"inbound-30d-ttl", Status:"Enabled", Filter:{Prefix:"inbound/"}, Expiration:{Days:30}}] + }')" +``` + +#### Create the SES receipt rule + +```bash +aws ses create-receipt-rule-set --rule-set-name agentkeys --region "$REGION" 2>/dev/null || true +aws ses create-receipt-rule --region "$REGION" --rule-set-name agentkeys \ + --rule "$(jq -n --arg domain "$DOMAIN" --arg bucket "$BUCKET" '{ + Name: "agentkeys-inbound", Enabled: true, ScanEnabled: true, TlsPolicy: "Optional", + Recipients: [$domain], + Actions: [{S3Action: {BucketName: $bucket, ObjectKeyPrefix: "inbound/"}}] + }')" +aws ses set-active-receipt-rule-set --rule-set-name agentkeys --region "$REGION" +``` + +Inbound MIME lands at `s3://$BUCKET/inbound/`. The first object you'll see is `inbound/AMAZON_SES_SETUP_NOTIFICATION` — AWS's "I successfully wrote to your bucket" marker. Real test mail follows. + +#### Spam handling (read-time filter) + +The SES scanners stamp `X-SES-Spam-Verdict` / `X-SES-Virus-Verdict` headers. The provisioner-scripts `ses-s3` adapter drops messages where either is `FAIL`. No write-time Lambda; trivial receipt rule. + +#### Sandbox vs production sending + +Inbound is unaffected by SES sandbox status. You only need to request production access when the agent **sends** mail to arbitrary addresses (replies, notifications). Console → Support → "Service limit increase" → "SES Sending Limits" → "Request Production Access". + +### 2.2 Future: Tencent Cloud SimpleDM + COS + +For deployments serving China-region traffic, the analogous backend is: + +| Layer | AWS (current) | Tencent Cloud (future) | +|---|---|---| +| Email service | SES (SendRawEmail / receipt rules) | SimpleDM (`SendEmail` + receive-rule policies) | +| Object store | S3 + bucket policy | COS + bucket-policy / CAM role | +| Identity service | IAM users + roles + STS AssumeRole | CAM users + roles + STS AssumeRole | +| OIDC federation | `iam:CreateOpenIDConnectProvider` | CAM `CreateOIDCConfig` | + +The provisioner-scripts `email-backends/` interface already abstracts the inbound contract (object key + raw MIME). A Tencent backend slots in as `tencent-simpledm-cos`, with the same upstream API as `ses-s3`. Identity layout in §3 stays unchanged structurally — replace `iam` with `cam` calls. **No work in this runbook depends on AWS specifically except the AWS CLI invocations** — the IAM model maps 1:1 onto CAM. + +--- + +## 3. IAM identities + +### 3.1 `agentkeys-daemon` IAM user (broker runtime) + +```bash +aws iam create-user --user-name agentkeys-daemon +aws iam create-access-key --user-name agentkeys-daemon +# → save AccessKeyId + SecretAccessKey to your secret manager. NOT to git. + +aws iam put-user-policy --user-name agentkeys-daemon \ + --policy-name agentkeys-daemon-assume-role \ + --policy-document "$(jq -n --arg acct "$ACCOUNT_ID" '{ + Version: "2012-10-17", + Statement: [{ + Effect: "Allow", Action: "sts:AssumeRole", + Resource: "arn:aws:iam::\($acct):role/agentkeys-data-role" + }] + }')" +``` + +The daemon user can do exactly one thing: assume `agentkeys-data-role`. Any S3/SES action goes through the role's permissions, never the user's. + +### 3.2 `agentkeys-data-role` + +The role's trust policy starts with the **static-IAM-user** variant (Stage 6). [§4.2](#42-replace-the-roles-trust-policy-federated-variant) swaps it for the OIDC-federated variant once the broker is publicly reachable. + +```bash +aws iam create-role --role-name agentkeys-data-role \ + --assume-role-policy-document "$(jq -n --arg acct "$ACCOUNT_ID" '{ + Version: "2012-10-17", + Statement: [{ + Effect: "Allow", + Principal: {AWS: "arn:aws:iam::\($acct):user/agentkeys-daemon"}, + Action: "sts:AssumeRole" + }] + }')" + +aws iam put-role-policy --role-name agentkeys-data-role \ + --policy-name agentkeys-data-role-inline \ + --policy-document "$(jq -n \ + --arg bucket "$BUCKET" --arg region "$REGION" \ + --arg acct "$ACCOUNT_ID" --arg domain "$DOMAIN" \ + '{ + Version: "2012-10-17", + Statement: [ + {Effect:"Allow", Action:"s3:ListBucket", Resource:"arn:aws:s3:::\($bucket)"}, + {Effect:"Allow", Action:"s3:GetObject", Resource:"arn:aws:s3:::\($bucket)/*"}, + {Effect:"Allow", Action:"ses:SendRawEmail", Resource:"arn:aws:ses:\($region):\($acct):identity/\($domain)"} + ] + }')" + +export ROLE_ARN=$(aws iam get-role --role-name agentkeys-data-role --query 'Role.Arn' --output text) +echo "ROLE_ARN=$ROLE_ARN" +``` + +### 3.3 `agentkeys-admin`, `agentkeys-broker` (already provisioned) + +If you've come this far, `agentkeys-admin` exists (you're using it now). `agentkeys-broker` is whatever IAM user you SSH into the broker EC2 with via EC2 Instance Connect — its perms are out of scope here (`ec2-instance-connect:SendSSHPublicKey` on the host's instance ID is sufficient). + +### 3.4 `agentkeys-broker-host` instance profile (optional, EC2-only) + +If the broker runs on EC2, attach this so the daemon never holds a static key. The host's runtime credentials come from IMDS. + +```bash +ROLE_NAME=agentkeys-broker-host + +aws iam create-role --role-name $ROLE_NAME \ + --assume-role-policy-document "$(jq -n '{ + Version: "2012-10-17", + Statement: [{Effect:"Allow", Principal:{Service:"ec2.amazonaws.com"}, Action:"sts:AssumeRole"}] + }')" + +aws iam put-role-policy --role-name $ROLE_NAME --policy-name BrokerAssumeData \ + --policy-document "$(jq -n --arg acct "$ACCOUNT_ID" '{ + Version: "2012-10-17", + Statement: [{Effect:"Allow", Action:"sts:AssumeRole", + Resource:"arn:aws:iam::\($acct):role/agentkeys-data-role"}] + }')" + +aws iam create-instance-profile --instance-profile-name $ROLE_NAME +aws iam add-role-to-instance-profile --instance-profile-name $ROLE_NAME --role-name $ROLE_NAME +aws ec2 associate-iam-instance-profile --region "$REGION" \ + --instance-id \ + --iam-instance-profile Name=$ROLE_NAME +``` + +### 3.5 S3 bucket policy + +Now that `agentkeys-data-role` exists, attach the bucket policy. The static-IAM-user variant: SES writes inbound, role reads everything. + +```bash +aws s3api put-bucket-policy --bucket "$BUCKET" \ + --policy "$(jq -n --arg bucket "$BUCKET" --arg acct "$ACCOUNT_ID" '{ + Version: "2012-10-17", + Statement: [ + { + Sid: "AllowSESWriteInbound", Effect: "Allow", + Principal: {Service: "ses.amazonaws.com"}, + Action: "s3:PutObject", + Resource: "arn:aws:s3:::\($bucket)/*", + Condition: {StringEquals: {"aws:Referer": $acct}} + }, + { + Sid: "AllowDaemonRead", Effect: "Allow", + Principal: {AWS: "arn:aws:iam::\($acct):role/agentkeys-data-role"}, + Action: ["s3:GetObject", "s3:ListBucket"], + Resource: ["arn:aws:s3:::\($bucket)", "arn:aws:s3:::\($bucket)/*"] + } + ] + }')" +``` + +The federated variant (PrincipalTag-scoped) lands in [§4.3](#43-upgrade-bucket-policy-to-principaltag-scoped). + +--- + +## 4. OIDC federation (Stage 7) + +Replaces the `agentkeys-daemon → AssumeRole` path in §3.2 with `OIDC-broker-JWT → AssumeRoleWithWebIdentity`. The benefit: per-user isolation enforced **inside AWS** (via PrincipalTag on the assumed session), not just by the daemon's app code. + +### 4.1 Prereqs + +- §1–§3 done. +- Broker reachable at `https://$BROKER_HOST` over public TLS (see [§5](#5-ec2-broker-host-optional) for the EC2 wiring + `scripts/setup-broker-host.sh` for the host bootstrap). +- The broker's discovery doc agrees with `$BROKER_HOST` byte-for-byte: + ```bash + export OIDC_ISSUER="https://$BROKER_HOST" + curl -sf "$OIDC_ISSUER/.well-known/openid-configuration" | jq -e ".issuer == \"$OIDC_ISSUER\"" + # → true + ``` + If `false`, fix the broker's `BROKER_OIDC_ISSUER` env var before continuing — AWS validates the registered URL against the JWT `iss` claim byte-for-byte (no scheme, trailing slash, or hostname-only forms allowed): + ```bash + sudo sed -i \ + "s|^Environment=BROKER_OIDC_ISSUER=.*|Environment=BROKER_OIDC_ISSUER=$OIDC_ISSUER|" \ + /etc/systemd/system/agentkeys-broker.service + sudo systemctl daemon-reload && sudo systemctl restart agentkeys-broker + ``` + +### 4.2 Register the OIDC provider + +Pre-check for stale state from earlier bring-ups: + +```bash +aws iam list-open-id-connect-providers +``` + +- Empty list → fresh slate; proceed. +- ARN ends in `$BROKER_HOST` → already registered; skip the create, jump to the trust-policy update. +- ARN ends in a different host → delete, then register the correct one: + ```bash + aws iam delete-open-id-connect-provider \ + --open-id-connect-provider-arn arn:aws:iam::${ACCOUNT_ID}:oidc-provider/ + ``` + +Register: + +```bash +aws iam create-open-id-connect-provider \ + --url "$OIDC_ISSUER" \ + --client-id-list sts.amazonaws.com \ + --thumbprint-list '' +export OIDC_PROVIDER_ARN="arn:aws:iam::${ACCOUNT_ID}:oidc-provider/$BROKER_HOST" + +aws iam get-open-id-connect-provider \ + --open-id-connect-provider-arn "$OIDC_PROVIDER_ARN" \ + --query '{Url: Url, ClientIDList: ClientIDList}' +# → {"Url": "https://broker.litentry.org", "ClientIDList": ["sts.amazonaws.com"]} +``` + +AWS auto-derives the cert thumbprint from the Let's Encrypt chain. The thumbprint stays valid across cert renewals because LE uses a stable intermediate CA. + +### 4.3 Replace the role's trust policy (federated variant) + +Principal flips from `agentkeys-daemon` to the OIDC provider; the `sts:TagSession` + `aws:RequestTag/agentkeys_user_wallet` condition is what cloud-enforces per-user isolation in [§4.4](#44-upgrade-bucket-policy-to-principaltag-scoped). + +```bash +aws iam update-assume-role-policy --role-name agentkeys-data-role \ + --policy-document "$(jq -n \ + --arg provider "$OIDC_PROVIDER_ARN" \ + --arg aud_key "${BROKER_HOST}:aud" \ + '{ + Version: "2012-10-17", + Statement: [{ + Effect: "Allow", + Principal: {Federated: $provider}, + Action: ["sts:AssumeRoleWithWebIdentity", "sts:TagSession"], + Condition: { + StringEquals: {($aud_key): "sts.amazonaws.com"}, + StringNotEquals: {"aws:RequestTag/agentkeys_user_wallet": ""} + } + }] + }')" +``` + +### 4.4 Upgrade bucket policy to PrincipalTag-scoped + +Replaces `AllowDaemonRead` from §3.5. The cloud now enforces "the assumed session can only touch the prefix matching its PrincipalTag" — even if app code has a bug. + +```bash +aws s3api put-bucket-policy --bucket "$BUCKET" \ + --policy "$(jq -n --arg bucket "$BUCKET" --arg acct "$ACCOUNT_ID" '{ + Version: "2012-10-17", + Statement: [ + { + Sid: "AllowSESWriteInbound", Effect: "Allow", + Principal: {Service: "ses.amazonaws.com"}, + Action: "s3:PutObject", + Resource: "arn:aws:s3:::\($bucket)/*", + Condition: {StringEquals: {"aws:Referer": $acct}} + }, + { + Sid: "AllowDaemonReadOwnPrefix", Effect: "Allow", + Principal: {AWS: "arn:aws:iam::\($acct):role/agentkeys-data-role"}, + Action: ["s3:GetObject", "s3:ListBucket"], + Resource: [ + "arn:aws:s3:::\($bucket)", + "arn:aws:s3:::\($bucket)/${aws:PrincipalTag/agentkeys_user_wallet}/*" + ], + Condition: { + StringEquals: {"s3:prefix": "${aws:PrincipalTag/agentkeys_user_wallet}/"} + } + } + ] + }')" +``` + +### 4.5 End-to-end proof + +Mint a JWT, assume the role with it, prove that wallet A can read its own prefix but **not** wallet B's: + +```bash +# 1. Mint a session bearer against the backend (mock-server in dev, chain in v0.2+) +SESSION=$(curl -sf -X POST http://127.0.0.1:8090/session/create \ + -H 'content-type: application/json' \ + -d '{"auth_token":"federation-proof"}' | jq -r .session) + +# 2. Mint an OIDC JWT via the broker (bearer → JWT) +JWT=$(curl -sf -X POST "$OIDC_ISSUER/v1/mint-oidc-jwt" \ + -H "Authorization: Bearer $SESSION" | jq -r .jwt) +WALLET=$(jq -R 'split(".") | .[1] | @base64d | fromjson | .agentkeys_user_wallet' <<<"$JWT" -r) + +# 3. Exchange JWT for AWS temp creds +CREDS=$(aws sts assume-role-with-web-identity \ + --role-arn "arn:aws:iam::${ACCOUNT_ID}:role/agentkeys-data-role" \ + --role-session-name "fed-proof-$(date +%s)" \ + --web-identity-token "$JWT") +export AWS_ACCESS_KEY_ID=$(echo "$CREDS" | jq -r .Credentials.AccessKeyId) +export AWS_SECRET_ACCESS_KEY=$(echo "$CREDS" | jq -r .Credentials.SecretAccessKey) +export AWS_SESSION_TOKEN=$(echo "$CREDS" | jq -r .Credentials.SessionToken) + +# 4a. Own prefix — should succeed (empty list is fine, no AccessDenied) +aws s3api list-objects-v2 --bucket "$BUCKET" --prefix "$WALLET/" + +# 4b. KEY MOMENT — someone else's prefix MUST AccessDenied +aws s3api list-objects-v2 --bucket "$BUCKET" --prefix "0xdeadbeef/" +# → AccessDenied +``` + +Step 4b is the property the static-IAM path (§3) cannot prove: cloud-enforced isolation, zero app-side trust required. + +### 4.6 (Future) TEE-derived signer swap + +The on-disk ES256 keypair shipped today is a complete v0.1 signer. When [`heima-gaps §3`](./spec/heima-gaps-vs-desired-architecture.md) closes, swap [`crates/agentkeys-broker-server/src/oidc.rs::OidcKeypair::load_or_generate`](../crates/agentkeys-broker-server/src/oidc.rs) for a TEE oracle call. JWKS, JWT shape, STS exchange, and bucket policy stay identical — only the signing backend changes. + +--- + +## 5. EC2 broker host (optional) + +If the broker runs on EC2 (the recommended path for AWS-native deployments), wire DNS + EIP + security group before running [`scripts/setup-broker-host.sh`](../scripts/setup-broker-host.sh) on the box. + +### 5.1 Allocate + attach an Elastic IP + +```bash +EIP_ALLOC=$(aws ec2 allocate-address --domain vpc --region "$REGION" --query AllocationId --output text) +aws ec2 associate-address --region "$REGION" \ + --instance-id --allocation-id "$EIP_ALLOC" +EIP=$(aws ec2 describe-addresses --region "$REGION" \ + --allocation-ids "$EIP_ALLOC" --query 'Addresses[0].PublicIp' --output text) +echo "EIP=$EIP" +``` + +### 5.2 Wire the A record + +```bash +aws route53 change-resource-record-sets --hosted-zone-id "$PARENT_ZONE_ID" \ + --change-batch "$(jq -n --arg name "$BROKER_HOST." --arg ip "$EIP" '{ + Changes: [{ + Action: "UPSERT", + ResourceRecordSet: {Name: $name, Type: "A", TTL: 300, ResourceRecords: [{Value: $ip}]} + }] + }')" + +# Verify (use DoH if your local resolver hijacks port 53) +curl -s "https://cloudflare-dns.com/dns-query?name=$BROKER_HOST&type=A" \ + -H 'accept: application/dns-json' | jq '.Answer[0].data' +``` + +### 5.3 Open security-group ports 80 + 443 + +Let's Encrypt's HTTP-01 challenge needs port 80 open from anywhere; the broker serves on 443 afterward. SSH (22) should be admin-IP-only. + +```bash +INSTANCE_ID= +SG=$(aws ec2 describe-instances --region "$REGION" --instance-ids "$INSTANCE_ID" \ + --query 'Reservations[0].Instances[0].SecurityGroups[0].GroupId' --output text) + +aws ec2 authorize-security-group-ingress --region "$REGION" --group-id "$SG" \ + --protocol tcp --port 443 --cidr 0.0.0.0/0 +aws ec2 authorize-security-group-ingress --region "$REGION" --group-id "$SG" \ + --protocol tcp --port 80 --cidr 0.0.0.0/0 +``` + +### 5.4 Bootstrap the host + +SSH in as `agentkeys-broker` (via EC2 Instance Connect: `aws ec2-instance-connect ssh --instance-id $INSTANCE_ID`) and run: + +```bash +git clone https://github.com/litentry/agentKeys.git +cd agentKeys +sudo bash scripts/setup-broker-host.sh +# Interactive walk-through; pick instance-profile credential mode +# (assuming §3.4 attached agentkeys-broker-host). +``` + +The script writes systemd units, an HTTP-only nginx config, then prints the certbot command. After cert issuance, re-run the script — it detects the cert file and flips on the `:443` ssl block. + +--- + +## 6. Cleanup + +```bash +# OIDC federation (if §4 ran) +aws iam delete-open-id-connect-provider \ + --open-id-connect-provider-arn "$OIDC_PROVIDER_ARN" 2>/dev/null + +# IAM +aws iam delete-role-policy --role-name agentkeys-data-role --policy-name agentkeys-data-role-inline +aws iam delete-role --role-name agentkeys-data-role +for KEY in $(aws iam list-access-keys --user-name agentkeys-daemon --query 'AccessKeyMetadata[*].AccessKeyId' --output text); do + aws iam delete-access-key --user-name agentkeys-daemon --access-key-id "$KEY" +done +aws iam delete-user-policy --user-name agentkeys-daemon --policy-name agentkeys-daemon-assume-role +aws iam delete-user --user-name agentkeys-daemon + +# Optional: the broker-host instance profile +aws iam remove-role-from-instance-profile --instance-profile-name agentkeys-broker-host --role-name agentkeys-broker-host 2>/dev/null +aws iam delete-instance-profile --instance-profile-name agentkeys-broker-host 2>/dev/null +aws iam delete-role-policy --role-name agentkeys-broker-host --policy-name BrokerAssumeData 2>/dev/null +aws iam delete-role --role-name agentkeys-broker-host 2>/dev/null + +# SES + S3 +aws ses set-active-receipt-rule-set --rule-set-name "" --region "$REGION" +aws sesv2 delete-email-identity --region "$REGION" --email-identity "$DOMAIN" +aws s3 rm "s3://$BUCKET" --recursive +aws s3api delete-bucket --bucket "$BUCKET" + +# DNS records on the parent zone are NOT auto-deleted — you'll need to +# remove the DKIM CNAMEs, MX, SPF, DMARC, and broker A record by hand +# if you want a clean zone. +``` + +--- + +## Follow-ups tracked elsewhere + +- **TEE-BYODKIM** — replace AWS-managed DKIM. Depends on [`heima-gaps §4`](./spec/heima-gaps-vs-desired-architecture.md). +- **TEE-derived OIDC signer** — replace on-disk ES256. Depends on [`heima-gaps §3`](./spec/heima-gaps-vs-desired-architecture.md). +- **Per-address S3 prefix routing** — currently all inbound lands in `inbound/`; per-`/
/` prefix routing wants either a SES Lambda or subdomain receipt rules. +- **GCP / Tencent recipes** — equivalent of §4 against GCP Workload Identity Federation and Tencent CAM. JWT/JWKS shape works cross-cloud unchanged; only the registration step differs. diff --git a/docs/dev-setup.md b/docs/dev-setup.md index b23e71a..0aef101 100644 --- a/docs/dev-setup.md +++ b/docs/dev-setup.md @@ -137,7 +137,7 @@ You operate the AgentKeys infrastructure for a team. You hold the long-lived `ag ### 5.1 One-time: AWS setup -Run through [`stage6-aws-setup.md`](./stage6-aws-setup.md) through §7 once per AWS account. Afterwards you'll have: +Run through [`cloud-setup.md`](./cloud-setup.md) §1–§3 once per AWS account. Afterwards you'll have: - SES domain identity verified on `bots.litentry.org` (or your substitute via `AGENTKEYS_EMAIL_DOMAIN`) - `agentkeys-daemon` IAM user with `sts:AssumeRole` only @@ -145,7 +145,7 @@ Run through [`stage6-aws-setup.md`](./stage6-aws-setup.md) through §7 once per - S3 bucket `agentkeys-mail-` with receipt rule writing inbound to `inbound/` - Route 53 records: three DKIM CNAMEs, MX, SPF, DMARC -Manage the daemon user's long-lived AWS keys via a **named profile** in `~/.aws/credentials` (mode 0600). The broker uses the AWS SDK's default credential chain — `AWS_PROFILE` (set by `awsp` or your shell), the shared credentials file, or an EC2 instance profile via IMDS. **No long-lived AWS keys live in env vars.** See [`operator-runbook.md` §3.1](./operator-runbook.md) for the full credential story. +Manage the daemon user's long-lived AWS keys via a **named profile** in `~/.aws/credentials` (mode 0600). The broker uses the AWS SDK's default credential chain — `AWS_PROFILE` (set by `awsp` or your shell), the shared credentials file, or an EC2 instance profile via IMDS. **No long-lived AWS keys live in env vars.** See [`operator-runbook.md` §2](./operator-runbook.md#2-aws-credentials) for the full credential story. ### 5.2 Run the broker server @@ -242,7 +242,7 @@ The stage-done script is the authoritative evaluator — never self-grade. If it | Mock server won't bind port 8090 | Stale process | `lsof -i :8090`, kill, restart | | Broker won't bind port 8091 | Stale process | `lsof -i :8091`, kill, restart | | `agentkeys init` double-prompts on macOS | Known keyring-rs update path | Filed under Stage 9 "idempotent init" item | -| `bot-@bots.litentry.org` email never arrives | DNS / MX / SES receipt-rule misconfigured, or bucket missing write perm | `aws s3 ls s3://$BUCKET/inbound/ --recursive` — if empty >60s after signup, re-verify §2–§5 of `stage6-aws-setup.md` | +| `bot-@bots.litentry.org` email never arrives | DNS / MX / SES receipt-rule misconfigured, or bucket missing write perm | `aws s3 ls s3://$BUCKET/inbound/ --recursive` — if empty >60s after signup, re-verify [`cloud-setup.md` §1–§2](./cloud-setup.md#1-domain--dns) | | `MalformedPolicyDocument: ... failed legacy parsing` during operator setup | Heredoc-generated JSON lost a `$VAR:r` / `$VAR:h` to a zsh modifier | Use the `jq -n --arg … '{…}'` pattern — never heredoc JSON into AWS calls | ## 9. When a provider changes their flow @@ -254,8 +254,8 @@ The longer-term plan (Stage 5b) is to detect drift automatically from telemetry ## 10. Further reading - [`spec/plans/development-stages.md`](./spec/plans/development-stages.md) — Shipped / Active / Planned roadmap -- [`stage6-aws-setup.md`](./stage6-aws-setup.md) — one-time AWS infra (operator role) -- [`stage7-wip.md`](./stage7-wip.md) — broker server + OIDC-federated future +- [`cloud-setup.md`](./cloud-setup.md) — one-time AWS infra (DNS, SES, S3, IAM, OIDC federation) +- [`stage7-wip.md`](./stage7-wip.md) — broker server design + acceptance test - [`operator-runbook.md`](./operator-runbook.md) — start, supervise, rotate, monitor the broker - [`spec/credential-backend-interface.md`](./spec/credential-backend-interface.md) — 15-method trait contract - [`spec/ses-email-architecture.md`](./spec/ses-email-architecture.md) — Stage 6 email pipeline deep-dive diff --git a/docs/operator-runbook.md b/docs/operator-runbook.md index 7ba01cb..ffefa96 100644 --- a/docs/operator-runbook.md +++ b/docs/operator-runbook.md @@ -1,244 +1,184 @@ -# Operator Runbook — AgentKeys Broker Server +# Operator runbook — AgentKeys broker -**Audience:** the person running `agentkeys-broker-server` for a team. If you're an app developer trying to use a broker someone else runs, see [`dev-setup.md` §4](./dev-setup.md). If you're an end user of an agent, see [`dev-setup.md` §6](./dev-setup.md). +**Audience:** the person running `agentkeys-broker-server` for a team. App developers using a broker someone else runs read [`dev-setup.md` §4](./dev-setup.md). End users of an agent read [`dev-setup.md` §6](./dev-setup.md). -**Scope:** start, supervise, rotate keys, monitor audit, and migrate from local to hosted. v0.1 deliberately avoids TEE / KMS / hosted-only paths — those land later. +**What the broker is.** A long-running HTTP service that holds the operator's `agentkeys-daemon` AWS access key (or assumes a role via instance profile) and mints two kinds of short-lived credentials to authenticated daemons: -> **WIP / scratchpad.** This runbook ships alongside the v0.1 broker. Stage 7 phase 1 (broker mint-aws-creds + audit) and phase 2 (OIDC issuer surface + provisioner-scripts AWS-cred wiring) are both live. The `sts:AssumeRoleWithWebIdentity` federation step is still deferred — it needs public TLS hosting of the issuer URL, see [`stage7-wip.md`](./stage7-wip.md). Stage 8 (off-chain vault) sections are forward-looking. +| Endpoint | Output | +|---|---| +| `POST /v1/mint-aws-creds` | 1 h scoped AWS temp creds via `sts:AssumeRole`. | +| `POST /v1/mint-oidc-jwt` | Short-lived ES256 JWT for `sts:AssumeRoleWithWebIdentity`. | +| `GET /.well-known/openid-configuration` | OIDC discovery doc. | +| `GET /.well-known/jwks.json` | JWK Set with the broker's public key + `kid`. | +| `GET /healthz`, `/readyz` | Supervisor probes. | -## 1. What the broker is +Both `mint-*` endpoints write a row to `~/.agentkeys/broker/audit.sqlite` before credentials leave the process. -`agentkeys-broker-server` is the long-running HTTP service that holds the operator's long-lived `agentkeys-daemon` AWS access key and brokers 1-hour scoped credentials to authenticated daemons. It is the boundary that lets app developers run daemons against your infrastructure **without holding any AWS credentials themselves**. +**Threat model.** Defends against developer-laptop compromise (devs hold only short-lived bearers; the long-lived AWS key never leaves the broker host). Does **not** defend against broker-process compromise — that's the v0.2+ TEE story; see [`spec/threat-model-key-custody.md`](./spec/threat-model-key-custody.md). -User-facing endpoints: +For v0.1: run on a host you trust, rotate the daemon key on a schedule (§3), watch the audit log (§4). -- `POST /v1/mint-aws-creds` — bearer-token in, temp AWS creds out (phase 1). -- `POST /v1/mint-oidc-jwt` — bearer-token in, short-lived ES256 JWT out (phase 2). Suitable for `sts:AssumeRoleWithWebIdentity` once the issuer URL is publicly hosted. -- `GET /.well-known/openid-configuration` — OIDC discovery doc. -- `GET /.well-known/jwks.json` — JWK Set with the broker's ES256 P-256 public key + `kid`. +--- -Operator-side: `/healthz`, `/readyz` health checks, and an audit log written to local SQLite. Both `mint-aws-creds` and `mint-oidc-jwt` write to the same audit table — `requested_role = "oidc_jwt"` distinguishes JWT mints in the ledger. +## 1. Setup pointers -The remaining federation step (`aws iam create-open-id-connect-provider --url $BROKER_OIDC_ISSUER` + `sts:AssumeRoleWithWebIdentity`) is the public-hosting recipe in [`stage7-wip.md` §"Phase 2 — federation step"](./stage7-wip.md). +| Task | Where | +|---|---| +| AWS account provisioning (IAM, SES, S3, OIDC federation) | [`cloud-setup.md`](./cloud-setup.md) | +| Broker-host bootstrap (binaries, systemd, nginx, certbot) | [`scripts/setup-broker-host.sh`](../scripts/setup-broker-host.sh) + [`stage7-wip.md` §"Remote deployment"](./stage7-wip.md#remote-deployment) | +| Stage 7 design + acceptance test | [`stage7-wip.md`](./stage7-wip.md) | +| Three-role mental model (operator vs developer vs end-user) | [`dev-setup.md`](./dev-setup.md) | -## 2. Threat model — what the broker is and isn't defending against +--- -**Defends against:** developer laptops being lost, stolen, or compromised. Without the broker, every developer holds the same long-lived daemon AWS key — one compromise burns everyone. With the broker, only the broker process holds the long-lived key; developer machines hold only short-lived bearer tokens. +## 2. AWS credentials -**Does NOT defend against:** broker process compromise. If an attacker gets RCE on the broker, they get the long-lived AWS key and can mint arbitrary scoped credentials. The v0.1 broker runs on commodity hardware in plaintext; TEE-backed hosting is the v0.2+ evolution. See [`spec/threat-model-key-custody.md`](./spec/threat-model-key-custody.md) for the broader position. +The broker resolves AWS credentials through the SDK default provider chain. Pick **one** path: -**Operator implications for v0.1:** +### 2.1 EC2 instance profile (recommended on AWS) -- Run the broker on a host you trust. Don't co-tenant with untrusted workloads. -- Rotate the daemon AWS key on a schedule (§5). -- Watch the audit log (§6) — anomalous mint patterns are your earliest signal. +The host's instance profile (`agentkeys-broker-host`, see [`cloud-setup.md` §3.4](./cloud-setup.md#34-agentkeys-broker-host-instance-profile-optional-ec2-only)) carries `sts:AssumeRole` on `agentkeys-data-role`. The SDK pulls credentials from IMDS automatically — no env vars, no shared files, no rotation runbook. Verify with `aws sts get-caller-identity` from the host. -## 3. Start the broker +### 2.2 Named profile (non-EC2 hosts) -### 3.1 AWS credentials - -The broker resolves AWS credentials through the AWS SDK's default provider chain — **named profiles in `~/.aws/credentials`** (recommended for local dev), **EC2 instance profile via IMDS** (recommended for cloud deployments), or static IAM-user keys in env vars (legacy fallback). - -#### Recommended: named profiles + `awsp` - -Profiles live in `~/.aws/credentials` and `~/.aws/config` (mode `0600`). One profile per role; switch with `awsp ` or `export AWS_PROFILE=`. Example layout: +Drop the daemon user's keys into `~/.aws/credentials` for the system user the broker runs as. The systemd unit sets `AWS_PROFILE=agentkeys-daemon`: ``` -~/.aws/credentials # mode 0600 -[agentkeys-admin] # admin operations -aws_access_key_id = AKIA... -aws_secret_access_key = ... - -[agentkeys-broker] # EC2 Instance Connect to broker host +~/.aws/credentials # mode 0600 +[agentkeys-daemon] aws_access_key_id = AKIA... aws_secret_access_key = ... -[agentkeys-daemon] # what the broker process assumes from -aws_access_key_id = AKIA... -aws_secret_access_key = ... -``` - -``` -~/.aws/config # mode 0600 -[profile agentkeys-admin] -region = us-east-1 -output = json - -[profile agentkeys-broker] -region = us-east-1 - +~/.aws/config # mode 0600 [profile agentkeys-daemon] region = us-east-1 ``` -Run the broker with the daemon profile active: +For local dev: `awsp agentkeys-daemon` (or `export AWS_PROFILE=agentkeys-daemon`) before `cargo run`. -```bash -awsp agentkeys-daemon # sets AWS_PROFILE=agentkeys-daemon -agentkeys-broker-server --port 8091 -# → "AWS credentials: SDK default chain (AWS_PROFILE / ~/.aws / IMDS)" -``` +### 2.3 Static keys in env (legacy) -The broker logs which credential path it picked at startup, so misconfiguration is visible in the first second of the log. +Set `DAEMON_ACCESS_KEY_ID` *and* `DAEMON_SECRET_ACCESS_KEY` (both required together; setting only one is rejected at startup). Prefer 2.1 or 2.2. -#### Recommended: EC2 instance profile +The broker logs which path it picked at startup: `AWS credentials: SDK default chain ...` or `AWS credentials: static IAM-user keys ...`. Always check this in the first second of the log. -When the broker runs on EC2, attach an instance profile granting `sts:AssumeRole` on `agentkeys-data-role`. The SDK picks credentials from IMDS automatically — no env vars, no shared files, no rotation step. This is the path `scripts/setup-broker-host.sh` sets up. +--- -#### Legacy fallback: static IAM-user keys in env +## 3. Configuration -Set both `DAEMON_ACCESS_KEY_ID` *and* `DAEMON_SECRET_ACCESS_KEY` (or the `BROKER_DAEMON_*` aliases). The broker logs `AWS credentials: static IAM-user keys (DAEMON_ACCESS_KEY_ID env)` when it picks this path. Setting only one of the pair is rejected at startup. Prefer profiles or instance-profile. +| Env var | Required | Notes | +|---|---|---| +| `BROKER_BACKEND_URL` | yes | Backend that issues / validates session bearers (mock-server in dev, chain in v0.2+). | +| `BROKER_DATA_ROLE_ARN` | yes (or `ACCOUNT_ID`) | ARN of `agentkeys-data-role`. Falls back to `arn:aws:iam::$ACCOUNT_ID:role/agentkeys-data-role`. Legacy `BROKER_AGENT_ROLE_ARN` accepted for unmigrated deployments. | +| `BROKER_OIDC_ISSUER` | for production | Public URL emitted as `iss`. **Must** match the `aws iam create-open-id-connect-provider --url` value byte-for-byte. Default: `https://oidc.agentkeys.dev`. | +| `BROKER_AWS_REGION` | no | STS region. Falls back to `REGION`, then `us-east-1`. | +| `BROKER_AUDIT_DB_PATH` | no | Default: `$HOME/.agentkeys/broker/audit.sqlite`. | +| `BROKER_OIDC_KEYPAIR_PATH` | no | Default: `$HOME/.agentkeys/broker/oidc-keypair.json` (mode 0600). | +| `BROKER_OIDC_JWT_TTL_SECONDS` | no | Default `300`. Bounded `[60, 3600]`. | +| `BROKER_SESSION_DURATION_SECONDS` | no | TTL for AWS-cred mints. Default `3600`. Bounded `[900, 43200]`. | +| `BROKER_BACKEND_TIMEOUT_SECONDS` | no | HTTP timeout to backend. Default `10`. | +| `BROKER_SHUTDOWN_GRACE_SECONDS` | no | Graceful drain cap. Default `30`. | +| `DAEMON_ACCESS_KEY_ID` / `DAEMON_SECRET_ACCESS_KEY` | legacy | Static IAM keys (§2.3). Both required if used. | -### 3.2 Other configuration +--- -| Variable | Required | Description | -|---|---|---| -| `BROKER_BACKEND_URL` | yes | URL of the AgentKeys backend that issues session tokens (mock-server in dev, chain in v0.2+). | -| `BROKER_DATA_ROLE_ARN` | yes (or `ACCOUNT_ID`) | ARN of the `agentkeys-data-role` IAM role the broker assumes-into. If unset, derived from `ACCOUNT_ID` as `arn:aws:iam::$ACCOUNT_ID:role/agentkeys-data-role`. The legacy `BROKER_AGENT_ROLE_ARN` is still accepted as a fallback for pre-2026-04-28 deployments. | -| `BROKER_AWS_REGION` | no | AWS region for the STS call. Falls back to `REGION` (the rest-of-agentKeys convention) before defaulting to `us-east-1`. The active profile's `region` setting is used by the SDK independently for credential lookup. | -| `BROKER_AUDIT_DB_PATH` | no | SQLite path for the audit log. Default: `$HOME/.agentkeys/broker/audit.sqlite`. | -| `BROKER_SESSION_DURATION_SECONDS` | no | TTL for minted credentials. Default: `3600` (1 h). Min: `900`, max: `43200`. | -| `BROKER_BACKEND_TIMEOUT_SECONDS` | no | HTTP timeout for backend `/session/validate` calls. Default: `10`. | -| `BROKER_SHUTDOWN_GRACE_SECONDS` | no | Hard cap on graceful-shutdown drain. Default: `30`. | -| `BROKER_OIDC_ISSUER` | no | Public URL the broker advertises in the OIDC discovery doc and JWT `iss` claim. Must match the URL used at `aws iam create-open-id-connect-provider` time. Default: `https://oidc.agentkeys.dev`. | -| `BROKER_OIDC_KEYPAIR_PATH` | no | Path to the persisted ES256 keypair (mode 0600). Generated on first start, reused on subsequent restarts so the registered IAM OIDC provider stays valid. Default: `$HOME/.agentkeys/broker/oidc-keypair.json`. | -| `BROKER_OIDC_JWT_TTL_SECONDS` | no | TTL (seconds) for minted OIDC JWTs. Default: `300`. Bounded `[60, 3600]`. | -| `DAEMON_ACCESS_KEY_ID` / `DAEMON_SECRET_ACCESS_KEY` | no (legacy) | Static IAM-user keys. Only used when no profile / instance profile / SDK default is available. Both must be set together. | - -`ACCOUNT_ID` is read indirectly to derive `BROKER_DATA_ROLE_ARN`. Persist non-secret values (region, account ID, role ARN, OIDC issuer URL) wherever your shell prefers; the broker no longer needs secrets in its environment. - -### 3.3 Run +## 4. Run + supervise -```bash -awsp agentkeys-daemon # or attach instance profile -cargo run --release -p agentkeys-broker-server -- --port 8091 -# → broker listening on 0.0.0.0:8091 -``` +For production, use [`scripts/setup-broker-host.sh`](../scripts/setup-broker-host.sh) — writes systemd units for both the broker and mock backend, with `Restart=on-failure` and a dedicated `agentkeys` system user. Logs to journald (`journalctl -u agentkeys-broker -f`). -Or from the built binary: +For local dev: ```bash -awsp agentkeys-daemon -./target/release/agentkeys-broker-server --port 8091 +awsp agentkeys-daemon # or attach instance profile +cargo run --release -p agentkeys-broker-server -- --port 8091 ``` -The first second of the log shows which credential path the broker picked: `AWS credentials: SDK default chain ...` or `AWS credentials: static IAM-user keys ...`. Always check this before declaring the broker healthy in a new environment. - -### 3.4 Verify it came up +Verify it came up: ```bash -curl -sf http://127.0.0.1:8091/healthz # → 200 ok -curl -sf http://127.0.0.1:8091/readyz # → 200 ok if backend + STS reachable, 503 otherwise +curl -sf http://127.0.0.1:8091/healthz # → "ok" +curl -sf http://127.0.0.1:8091/readyz # → 200 if backend + STS reachable, 503 otherwise ``` -`/readyz` checks: the configured `BROKER_BACKEND_URL` is reachable, and the broker's daemon credentials can call `sts:GetCallerIdentity`. Use this as your supervisor health probe. - -## 4. Supervise - -The broker is a stateless HTTP service (audit DB aside). Restart it freely — there's no in-memory session state to preserve. Recommended supervision: +`/readyz` checks that `BROKER_BACKEND_URL` is reachable and that the broker's daemon credentials can call `sts:GetCallerIdentity`. Use this as your supervisor probe. -- **systemd** (Linux operator host): unit file with `Restart=on-failure`, `EnvironmentFile=` pointing at a 0600 file (or `LoadCredential=`). -- **launchd** (macOS dev box): plist with `KeepAlive` + `ThrottleInterval`. -- **PM2 / supervisord** are also fine — anything that respawns on crash. - -Logs go to stderr in `tracing-subscriber` JSON format when `RUST_LOG=info` is set. Aggregate them with whatever you already use (journald, CloudWatch, Loki). +--- ## 5. Rotate the daemon AWS key -Long-lived keys age out. Rotation procedure depends on the credential path: - -### Named profile (recommended) - -1. In IAM, **create** a second access key on the `agentkeys-daemon` user — both old and new keys are now valid. -2. Update the `agentkeys-daemon` profile in `~/.aws/credentials` with the new key. -3. Restart the broker — the SDK re-reads the shared file on each `aws_config::defaults().load()` (i.e., on process restart). -4. Verify with `curl /readyz` — should return 200. -5. In IAM, **deactivate** (not delete) the old access key. Wait 24 h. -6. If nothing broke, delete the old key. If something broke, reactivate and roll back. - -### EC2 instance profile - -Rotation is automatic — IMDS-vended credentials refresh on a schedule managed by AWS. No operator step. +| Path | Procedure | +|---|---| +| **Instance profile (§2.1)** | Automatic. IMDS-vended credentials refresh on AWS's schedule. No operator step. | +| **Named profile (§2.2)** | (1) IAM `create-access-key` for `agentkeys-daemon` — both keys now valid. (2) Update `~/.aws/credentials`. (3) `sudo systemctl restart agentkeys-broker`. (4) `curl /readyz` → 200. (5) IAM `update-access-key --status Inactive` on the old key. (6) Wait 24 h. (7) Delete old key (or reactivate + roll back). | +| **Static keys (§2.3)** | Same as named-profile but step 2 updates the `DAEMON_*` env vars in your supervisor config / `EnvironmentFile=`. | -### Legacy static-keys env-var path +**Cadence:** rotate every 90 days minimum; immediately on any operator-laptop compromise. -Same as the profile flow but step 2 updates the `DAEMON_*` env vars in your supervisor config. - -**Cadence recommendation:** rotate every 90 days minimum, immediately on any operator-laptop compromise. +--- ## 6. Audit -Every credential mint is logged to `BROKER_AUDIT_DB_PATH` (default `~/.agentkeys/broker/audit.sqlite`). Schema: +Schema (`~/.agentkeys/broker/audit.sqlite`): ```sql CREATE TABLE mint_log ( id INTEGER PRIMARY KEY AUTOINCREMENT, - minted_at INTEGER NOT NULL, -- unix seconds - requester_token TEXT NOT NULL, -- bearer token (hashed; see §6.1) - requester_wallet TEXT NOT NULL, -- wallet the token resolved to - requested_role TEXT NOT NULL, -- BROKER_DATA_ROLE_ARN at mint time + minted_at INTEGER NOT NULL, -- unix seconds + requester_token TEXT NOT NULL, -- sha256(bearer); never the raw token + requester_wallet TEXT NOT NULL, + requested_role TEXT NOT NULL, -- ARN, or "oidc_jwt" for JWT mints session_duration_seconds INTEGER NOT NULL, - sts_session_name TEXT NOT NULL, -- value passed to AssumeRole; visible in CloudTrail - outcome TEXT NOT NULL, -- "ok" | "auth_failed" | "sts_error" - outcome_detail TEXT -- nullable; error message on failure + sts_session_name TEXT NOT NULL, -- visible in CloudTrail + outcome TEXT NOT NULL, -- "ok" | "auth_failed" | "sts_error" | "backend_error" + outcome_detail TEXT ); - -CREATE INDEX idx_mint_log_minted_at ON mint_log(minted_at); -CREATE INDEX idx_mint_log_wallet ON mint_log(requester_wallet); ``` Inspect: ```bash sqlite3 ~/.agentkeys/broker/audit.sqlite \ - "SELECT minted_at, requester_wallet, outcome FROM mint_log ORDER BY id DESC LIMIT 20" + "SELECT minted_at, requester_wallet, requested_role, outcome \ + FROM mint_log ORDER BY id DESC LIMIT 20" ``` -**(later)** Stage 8 will mirror this audit data on-chain via a `BlobWritten` extrinsic per [`stage8-wip.md`](./stage8-wip.md). Until then, the SQLite file is the only audit surface — back it up. - -### 6.1 Why the bearer token is hashed in the audit log - -Storing the raw bearer token in the audit DB would mean a read of the audit DB compromises every active session. The audit log records `sha256(token)` so a leaked audit DB cannot be replayed against the backend. The `requester_wallet` column is the join key for the human-meaningful "who minted this" question. - -### 6.2 What anomalies look like +**Anomaly signals:** -- Same `requester_wallet` minting at >10× normal rate → token compromised, possibly replay-attempted from elsewhere. -- `outcome="auth_failed"` clusters → someone is fishing for valid tokens. -- `outcome="sts_error"` clusters → the operator's IAM trust policy or daemon key is misconfigured. +- One `requester_wallet` minting at >10× the normal rate → token compromised. +- `outcome="auth_failed"` clusters → someone fishing for valid bearers. +- `outcome="sts_error"` clusters → IAM trust policy or daemon key misconfigured. -## 7. Migrate from local to hosted **(later)** +Bearer tokens are stored as `sha256(token)` so a leaked audit DB cannot be replayed against the backend; `requester_wallet` is the join key for "who minted this". -When `broker.agentkeys.dev` (or your hosted equivalent) is live, the migration for app developers is one env var: +--- -```diff --export AGENTKEYS_BROKER_URL=http://broker.local:8091 -+export AGENTKEYS_BROKER_URL=https://broker.litentry.org -``` - -Operator-side, the same binary runs. Configuration source changes from env vars to KMS-sealed config (interface design only in v0.1; full implementation is the Stage 7 phase 2 hosted-deploy work). - -## 8. Common failure modes +## 7. Common failure modes | Symptom | Likely cause | Fix | |---|---|---| -| Broker `/readyz` returns 503 with `backend_unreachable` | `BROKER_BACKEND_URL` wrong, mock-server not running | Check the URL; restart mock-server | -| Broker `/readyz` returns 503 with `sts_error` | Daemon AWS key invalid, expired, or missing `sts:AssumeRole` permission | Verify with `aws sts get-caller-identity` using the same env vars | -| `POST /v1/mint-aws-creds` returns 401 | Bearer token expired or issued against a different backend | Caller re-runs `agentkeys init` against `BROKER_BACKEND_URL` | -| `POST /v1/mint-aws-creds` returns 502 with `sts_error` | IAM trust policy on `agentkeys-data-role` doesn't allow the daemon user | Check the role's trust policy in IAM | -| Audit DB grows unbounded | No retention policy in v0.1 | Run a periodic `DELETE FROM mint_log WHERE minted_at < ?` from cron, or `sqlite3 .. VACUUM` | +| `/readyz` returns 503 with `backend_unreachable` | `BROKER_BACKEND_URL` wrong / mock-server down | Check the URL; restart the backend. | +| `/readyz` returns 503 with `sts_error` | Daemon key invalid, expired, or missing `sts:AssumeRole` permission | `aws sts get-caller-identity` with the same env / profile. | +| `mint-aws-creds` returns 401 | Bearer expired or issued against a different backend | Caller re-runs `agentkeys init` against `BROKER_BACKEND_URL`. | +| `mint-aws-creds` returns 502 with `sts_error` | Trust policy on `agentkeys-data-role` doesn't allow the daemon user | Check the role's trust policy; see [`cloud-setup.md` §3.2](./cloud-setup.md#32-agentkeys-data-role). | +| `mint-oidc-jwt` returns 502 / discovery doc `iss` ≠ requested URL | `BROKER_OIDC_ISSUER` mismatch | sed the systemd unit; see [`stage7-wip.md`](./stage7-wip.md). | +| AWS rejects `AssumeRoleWithWebIdentity` | `BROKER_OIDC_ISSUER` and `aws iam create-open-id-connect-provider --url` disagree byte-for-byte | Re-register the OIDC provider per [`cloud-setup.md` §4.2](./cloud-setup.md#42-register-the-oidc-provider). | +| Audit DB grows unbounded | No retention policy in v0.1 | Cron `DELETE FROM mint_log WHERE minted_at < ?` + `VACUUM`. | -## 9. What's NOT in scope for v0.1 +--- + +## 8. Out of scope for v0.1 - TEE / enclave-backed broker. Plaintext on commodity hardware. -- KMS-sealed configuration source. Env vars only. -- Secret-manager integration as a config source (Vault, AWS Secrets Manager, GCP Secret Manager). Operator persists the daemon AWS keys in `~/.zshenv` (or supervisor-managed env) themselves. -- Multi-tenant operator support. One broker process serves one operator's `agentkeys-daemon` key. -- `sts:AssumeRoleWithWebIdentity` exchange against the broker's issuer. The broker now serves a conforming OIDC discovery + JWKS surface and a bearer-gated `mint-oidc-jwt` endpoint, but the AWS-side `create-open-id-connect-provider` registration requires the issuer URL to be reachable over public TLS — that hosting step is the remaining blocker (Stage 7 phase 2 federation step). +- KMS-sealed configuration. Env vars only. +- Vault / Secrets Manager / GCP Secret Manager integration. Operator persists the daemon key themselves. +- Multi-tenant broker. One process serves one operator's `agentkeys-daemon` key. - Automatic key rotation. Rotate manually per §5. -## 10. Further reading +--- + +## 9. Further reading -- [`dev-setup.md`](./dev-setup.md) — the three-role guide. Read §3 first if you're not sure which role you are. -- [`stage6-aws-setup.md`](./stage6-aws-setup.md) — one-time IAM + SES + S3 setup that produces the daemon key the broker holds. -- [`stage7-wip.md`](./stage7-wip.md) — full Stage 7 design, including the OIDC-federation half deferred to phase 2. -- [`spec/threat-model-key-custody.md`](./spec/threat-model-key-custody.md) — broader security position the broker is one component of. +- [`cloud-setup.md`](./cloud-setup.md) — one-time AWS provisioning (DNS, SES, S3, IAM, OIDC federation). +- [`stage7-wip.md`](./stage7-wip.md) — Stage 7 design + acceptance test. +- [`dev-setup.md`](./dev-setup.md) — three-role guide for app developers and end users. +- [`spec/threat-model-key-custody.md`](./spec/threat-model-key-custody.md) — the broader security position the broker is one component of. diff --git a/docs/spec/plans/development-stages.md b/docs/spec/plans/development-stages.md index 379486f..6c4d8be 100644 --- a/docs/spec/plans/development-stages.md +++ b/docs/spec/plans/development-stages.md @@ -21,7 +21,7 @@ If you're looking for setup / demo instructions, go to [`../../dev-setup.md`](.. | 5a | Provisioner (deterministic) | OpenRouter + OpenAI CDP scrapers; `signupEmailOtp` pattern library; HTML-strip + label-aware OTP extractor; mandatory post-provision verify; `agentkeys provision openrouter` | 59/59 unit + live provision | | 6 (interim, 2026-04) | Hosted email infra | SES domain verification on `bots.litentry.org`; `agentkeys-daemon` IAM user → `agentkeys-data-role` assume-role; S3 inbound bucket; `ses-s3` email backend; end-to-end demo from signup → SES receipt → S3 poll → key extraction | `scripts/stage6-demo-run.sh` prints a valid `sk-or-v1-...` key | | 7 phase 1 (2026-04) | Broker server | `agentkeys-broker-server` axum service: bearer-gated `POST /v1/mint-aws-creds`, audit SQLite, supervisor probes; daemon `--broker-url` flag wired up | 22/22 unit + integration | -| 7 phase 2 (2026-04) | OIDC issuer + AWS-cred wiring | OIDC discovery + JWKS + bearer-gated `POST /v1/mint-oidc-jwt` absorbed into Rust broker (TS `services/oidc-stub/` retired); CLI/MCP `provision` paths fetch AWS temp creds via the broker when `--broker-url` is set; audit destination is the broker's local SQLite per the pluggable-audit-backend framing in [`architecture.md` §11](../architecture.md) | broker integration + clippy clean; cloud federation deployment runbook in [`stage7-wip.md`](../../stage7-wip.md) | +| 7 phase 2 (2026-04) | OIDC issuer + AWS-cred wiring | OIDC discovery + JWKS + bearer-gated `POST /v1/mint-oidc-jwt` absorbed into Rust broker (TS `services/oidc-stub/` retired); CLI/MCP `provision` paths fetch AWS temp creds via the broker when `--broker-url` is set; audit destination is the broker's local SQLite per the pluggable-audit-backend framing in [`architecture.md` §11](../architecture.md) | broker integration + clippy clean; cloud federation deployment runbook in [`cloud-setup.md` §4](../../cloud-setup.md) | ### Non-stage work shipped alongside @@ -67,7 +67,7 @@ Both phases shipped — see Shipped table above. Scratch notes: [`../../stage7-w **Operational follow-ups (not architectural blockers):** -- Public TLS hosting of `$BROKER_OIDC_ISSUER` so `aws iam create-open-id-connect-provider` can fetch the JWKS. Per-operator deployment task; recipe in [`stage7-wip.md` §"Cloud federation deployment"](../../stage7-wip.md). +- Public TLS hosting of `$BROKER_OIDC_ISSUER` so `aws iam create-open-id-connect-provider` can fetch the JWKS. Per-operator deployment task; recipe in [`cloud-setup.md` §4 "OIDC federation"](../../cloud-setup.md). - Higher-assurance signer (TEE-derived ES256 at `oidc/issuer/v1`, blocked on `heima-gaps §3`). The on-disk keypair shipped today is a complete v0.1 signer — TEE is hardening, not a Stage-7 prerequisite. - Audit-destination swap (chain anchoring or sealed log service). The broker's local SQLite is one valid choice in the [pluggable audit-backend layer](../architecture.md#11-audit-destination-is-pluggable) — operators can swap per their threat model and jurisdiction. diff --git a/docs/spec/ses-email-architecture.md b/docs/spec/ses-email-architecture.md index 0a7f989..baef751 100644 --- a/docs/spec/ses-email-architecture.md +++ b/docs/spec/ses-email-architecture.md @@ -194,13 +194,13 @@ The split exists so the long-lived secret (user access key) only does ONE thing | Auth flow | `sts:AssumeRole` from IAM user (static keys) | `sts:AssumeRoleWithWebIdentity` from OIDC JWT | | AWS resource count | Same singletons | Same singletons (no new IAM per user) | | Failure mode if app has a bug | User A could read user B's mail | AccessDenied from cloud — bug caught at the boundary | -| Where to read more | This spec + [`docs/stage6-aws-setup.md`](../stage6-aws-setup.md) | [`docs/stage7-wip.md`](../stage7-wip.md), §10.4 PrincipalTag pattern below | +| Where to read more | This spec + [`docs/cloud-setup.md`](../cloud-setup.md) | [`docs/cloud-setup.md` §4](../cloud-setup.md#4-oidc-federation-stage-7) + §10.4 PrincipalTag pattern below | The migration from Stage 6 to Stage 7 is mostly a trust-policy rewrite + a `Resource`/`Condition` swap on the bucket policy (see §10.4). No new IAM resources, no per-user provisioning. Singleton stays singleton. ### What this spec does NOT cover (intentionally) -- **Operator setup specifics** (account ID, hosted zone ID, exact ARNs) live in [`docs/stage6-aws-setup.md`](../stage6-aws-setup.md), the operator-facing runbook. Reference that for the actual AWS CLI calls. +- **Operator setup specifics** (account ID, hosted zone ID, exact ARNs) live in [`docs/cloud-setup.md`](../cloud-setup.md), the operator-facing runbook. Reference that for the actual AWS CLI calls. - **PrincipalTag enforcement details** are in §10.4 below + [`wiki/tag-based-access.md`](../../wiki/tag-based-access.md). - **OIDC issuer key derivation + JWKS** are in §10.5 + [`wiki/oidc-federation.md`](../../wiki/oidc-federation.md). diff --git a/docs/stage6-aws-setup.md b/docs/stage6-aws-setup.md deleted file mode 100644 index ef6004e..0000000 --- a/docs/stage6-aws-setup.md +++ /dev/null @@ -1,472 +0,0 @@ -# Stage 6 AWS Setup Runbook - -**Audience:** the operator setting up Stage 6's hosted-email infra on real AWS for the first time. Default path is a subdomain on an existing parent (`bots.litentry.org` on AWS account `429071895007`); the wiki-canonical standalone `@agentkeys-email.io` path is the post-interim option. -**Outcome:** an AWS account with SES domain verified, `agentkeys-daemon` IAM user + `agentkeys-data-role` role (static-IAM-user trust), S3 bucket + bucket policy, SES receipt rule writing inbound to S3. Once done, the Stage 6 code (mock-server + CLI + provisioner-scripts adapters) can talk to real AWS, and the Stage 5b live demo unblocks. The OIDC-federated variant (TEE-signed JWT → PrincipalTag isolation) is Stage 7 work; test preserved in [`stage7-wip.md`](./stage7-wip.md). -**Status:** interim build. TEE-held BYODKIM and TEE-signed OIDC JWTs are deferred until [`heima-gaps-vs-desired-architecture.md`](./spec/heima-gaps-vs-desired-architecture.md) §3 + §4 close. AWS-managed DKIM is used as the Stage 6 interim; replace it with TEE-BYODKIM later. - -## 0. Preconditions - -- AWS account with **IAM admin** or equivalent (roles, OIDC providers, IAM policies, S3 buckets, SES identities, Route 53 hosted zones). -- `aws` CLI v2 installed and authenticated. `aws sts get-caller-identity` must return your identity. -- A **parent domain** already hosted in Route 53. This runbook uses a subdomain carved out of the parent. We default to `bots.litentry.org` on account `429071895007` (hosted zone `Z09723983CFJOHAE3VC65`). - -### Domain decision — subdomain on litentry.org vs standalone agentkeys-email.io - -Two viable shapes: - -| Path | Domain | Hosted zone | Cost | Use when | -|---|---|---|---|---| -| **A. Subdomain on existing parent** (this runbook's default) | `bots.litentry.org` — email addresses look like `bot-ab12cd@bots.litentry.org` | Reuses `litentry.org` zone (`Z09723983CFJOHAE3VC65`) — just add records, no delegation | $0 — parent already registered | Stage 6 interim / internal testing; parent domain's reputation bootstraps deliverability | -| **B. Standalone canonical domain** | `agentkeys-email.io` — matches the wiki's published hosted-default | New Route 53 hosted zone on fresh registration | ~$15/yr + fresh-domain reputation build | Production-facing v0.1+; external users will see and trust the name | - -Stage 6 goes with Path A because (1) it's what the user already has set up, (2) it's free, (3) inheriting `litentry.org`'s reputation is better for initial deliverability than a brand-new `.io`. The Stage 6 code is domain-agnostic — reads `AGENTKEYS_EMAIL_DOMAIN` — so swapping to `agentkeys-email.io` later is a one-env-var change. - -Set these once at the top of your shell for the rest of the runbook: - -```bash -export REGION=us-east-1 # SES inbound regions: us-east-1, us-west-2, eu-west-1 -export DOMAIN=bots.litentry.org # the subdomain we'll run SES under -export PARENT_ZONE_ID=Z09723983CFJOHAE3VC65 # existing litentry.org Route 53 hosted zone -export ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) -export BUCKET=agentkeys-mail-${ACCOUNT_ID} # bucket names are globally unique; account-id suffix avoids collisions -``` - -Verify all four resolved correctly before proceeding: - -```bash -echo "REGION=$REGION DOMAIN=$DOMAIN PARENT_ZONE_ID=$PARENT_ZONE_ID ACCOUNT_ID=$ACCOUNT_ID BUCKET=$BUCKET" -# Expected: REGION=us-east-1 DOMAIN=bots.litentry.org PARENT_ZONE_ID=Z09723983CFJOHAE3VC65 ACCOUNT_ID=429071895007 BUCKET=agentkeys-mail-429071895007 -``` - -## 1. DNS prep on the existing litentry.org hosted zone - -No domain registration needed — we just publish records for the `bots` subdomain inside the existing litentry.org zone. Later sections generate DKIM tokens and an MX record; you'll UPSERT them against `$PARENT_ZONE_ID`. - -Confirm the parent zone is reachable before we start: - -```bash -aws route53 get-hosted-zone --id "$PARENT_ZONE_ID" \ - --query 'HostedZone.{name: Name, private: Config.PrivateZone}' -# Expected: {"name": "litentry.org.", "private": false} -``` - -If that fails, your aws creds don't have Route 53 permissions on this zone — fix before continuing. - -### Note: no subdomain NS delegation required - -Because `bots.litentry.org` lives *inside* the same hosted zone as `litentry.org`, every DNS change below is an UPSERT on the parent zone. You do NOT need to create a separate child hosted zone for `bots.litentry.org`. (That's only needed if someone *else* is going to manage `bots.litentry.org` records.) - -### Nothing-else-breaks check - -This runbook adds records scoped to `bots.litentry.org` and `*.bots.litentry.org`. It does NOT touch the apex `litentry.org` MX, SPF, DMARC, or any records for other subdomains. If you have existing inbound mail on `litentry.org`, it is unaffected. - -## 2. SES domain identity + DKIM (AWS-managed interim) - -Verify the domain in SES, which also generates AWS-managed DKIM keys we'll publish as CNAMEs. - -```bash -aws sesv2 create-email-identity \ - --region "$REGION" \ - --email-identity "$DOMAIN" \ - --dkim-signing-attributes NextSigningKeyLength=RSA_2048_BIT -``` - -Get the three DKIM CNAME tokens AWS generated: - -```bash -aws sesv2 get-email-identity \ - --region "$REGION" \ - --email-identity "$DOMAIN" \ - --query 'DkimAttributes.Tokens' --output text -# → three strings like: -``` - -Publish the DKIM CNAMEs + SPF + DMARC + MX records in Route 53. `jq --arg` interpolates the env vars outside shell parsing, so zsh modifiers never bite; the JSON is validated by jq on construction; no file lands on disk. - -```bash -read -r T1 T2 T3 <<<"$(aws sesv2 get-email-identity --region "$REGION" \ - --email-identity "$DOMAIN" --query 'DkimAttributes.Tokens' --output text)" -echo "DKIM tokens: $T1 $T2 $T3" - -aws route53 change-resource-record-sets \ - --hosted-zone-id "$PARENT_ZONE_ID" \ - --change-batch "$(jq -n \ - --arg domain "$DOMAIN" \ - --arg region "$REGION" \ - --arg t1 "$T1" --arg t2 "$T2" --arg t3 "$T3" \ - '{ - Comment: "Stage 6 email infra for \($domain)", - Changes: [ - {Action:"UPSERT", ResourceRecordSet:{Name:"\($t1)._domainkey.\($domain)", Type:"CNAME", TTL:300, ResourceRecords:[{Value:"\($t1).dkim.amazonses.com"}]}}, - {Action:"UPSERT", ResourceRecordSet:{Name:"\($t2)._domainkey.\($domain)", Type:"CNAME", TTL:300, ResourceRecords:[{Value:"\($t2).dkim.amazonses.com"}]}}, - {Action:"UPSERT", ResourceRecordSet:{Name:"\($t3)._domainkey.\($domain)", Type:"CNAME", TTL:300, ResourceRecords:[{Value:"\($t3).dkim.amazonses.com"}]}}, - {Action:"UPSERT", ResourceRecordSet:{Name:$domain, Type:"MX", TTL:300, ResourceRecords:[{Value:"10 inbound-smtp.\($region).amazonaws.com"}]}}, - {Action:"UPSERT", ResourceRecordSet:{Name:$domain, Type:"TXT", TTL:300, ResourceRecords:[{Value:"\"v=spf1 include:amazonses.com -all\""}]}}, - {Action:"UPSERT", ResourceRecordSet:{Name:"_dmarc.\($domain)", Type:"TXT", TTL:300, ResourceRecords:[{Value:"\"v=DMARC1; p=quarantine; rua=mailto:dmarc@\($domain)\""}]}} - ] - }')" -``` - -> **Note on the DMARC `rua` address:** the DMARC aggregate-report mailbox `dmarc@$DOMAIN` must exist once the receipt rule in §6 is live. Until then, DMARC reports that come in get swallowed by SES. That's fine for Stage 6 interim. For a production posture, add a dedicated `dmarc@` inbox or point the `rua` at a mailbox you already monitor. - -Wait ~5 minutes for propagation, then confirm verification: - -```bash -aws sesv2 get-email-identity --region "$REGION" --email-identity "$DOMAIN" \ - --query '{verified: VerifiedForSendingStatus, dkim: DkimAttributes.Status}' -# → {"verified": true, "dkim": "SUCCESS"} -``` - -> **Interim DKIM key custody — explicit.** In this Stage 6 setup, **AWS SES itself holds the private DKIM key.** We never generate, see, or store it. The three CNAME records you published point `._domainkey.$DOMAIN` at `.dkim.amazonses.com`, where AWS publishes the matching public key. SES signs every outbound message with the private key sitting inside its DKIM signing service; we just call `ses:SendRawEmail` and trust AWS to sign correctly. -> -> **What we're trusting AWS with:** DKIM signing authority for `$DOMAIN`. An AWS-internal compromise or an account takeover could forge mail that passes DKIM as us. Bounded blast radius: the signed mail cannot touch anything in the TEE, forge session tokens, or access user data — it's a reputation risk (spam or phishing claiming to be us), not a key-custody-of-user-data risk. -> -> **Migration spectrum (target = TEE-BYODKIM):** -> | Option | Who holds the private key | Rule #2 | Complexity | -> |---|---|---|---| -> | AWS-managed DKIM (this interim) | AWS SES — opaque service | ❌ | trivial | -> | BYODKIM, key in AWS KMS + Lambda signer | AWS KMS HSM | ⚠ partial | medium (adds outbound Lambda) | -> | BYODKIM, key in enclave (`dkim//v1`) | TEE-sealed, derived from master seed | ✅ | high — blocked on [`heima-gaps §4`](./spec/heima-gaps-vs-desired-architecture.md) | -> -> **Swap to TEE-BYODKIM happens when [`heima-gaps §4`](./spec/heima-gaps-vs-desired-architecture.md) closes.** Until then, the Stage 6 interim accepts the AWS-custody tradeoff. Do NOT upgrade to "BYODKIM with file-stored key" — that path is strictly worse than AWS-managed (lower availability, similar trust surface). - -## 3. IAM: daemon user + `agentkeys-data-role` - -> **Note (2026-04-28):** This role was renamed from `agentkeys-agent` → `agentkeys-data-role` to disambiguate from the project's "agent" terminology (the AI agent the credentials are minted *for* is a separate concept from the IAM role the broker assumes *into*). The broker still accepts the legacy `BROKER_AGENT_ROLE_ARN` env var for unmigrated deployments; new deployments should use `BROKER_DATA_ROLE_ARN` and the new role name throughout. - -This Stage 6 runbook uses **static IAM-user trust** as the interim: create a dedicated IAM user `agentkeys-daemon`, create the `agentkeys-data-role` role that trusts only that user, and attach the S3/SES inline permissions. The user's access keys get injected into the daemon's env at runtime; the daemon calls `sts:AssumeRole` to get temp creds before touching S3 or SES. - -For the full OIDC-federated variant (where a TEE-minted JWT is exchanged at STS for temp creds tagged with `agentkeys_user_wallet`), see [`stage7-wip.md`](./stage7-wip.md). That path delivers cryptographic per-user isolation via PrincipalTag but requires `oidc.agentkeys.dev` hosted publicly with a Let's Encrypt cert — deferred because (a) the hosting adds a Stage 7 dependency and (b) the "right" signer for that path is a TEE-derived ES256 key, blocked on [`heima-gaps §3`](./spec/heima-gaps-vs-desired-architecture.md). - -### 3a. Create the daemon IAM user - -> **Env-var sanity check (run this once before §3 and §4).** Every `jq -n --arg` call below reads `$ACCOUNT_ID`, `$BUCKET`, `$REGION`, `$DOMAIN` from the current shell. A fresh shell tab will have none of them set. -> -> ```bash -> : "${ACCOUNT_ID:?re-run §0 env setup}" -> : "${REGION:?re-run §0 env setup}" -> : "${DOMAIN:?re-run §0 env setup}" -> : "${BUCKET:?re-run §0 env setup}" -> echo "OK: ACCOUNT_ID=$ACCOUNT_ID REGION=$REGION DOMAIN=$DOMAIN BUCKET=$BUCKET" -> ``` - -```bash -aws iam create-user --user-name agentkeys-daemon - -# Generate an access key. Save AccessKeyId + SecretAccessKey IMMEDIATELY — -# the secret is only shown on creation. Inject into daemon env as -# AWS_ACCESS_KEY_ID / AWS_SECRET_ACCESS_KEY later. -aws iam create-access-key --user-name agentkeys-daemon -# → save both values to 1Password / your secret manager. NOT to git. - -# User's only permission: sts:AssumeRole on the role we're about to create. -# All real S3/SES access comes from the role. -aws iam put-user-policy \ - --user-name agentkeys-daemon \ - --policy-name agentkeys-daemon-assume-role \ - --policy-document "$(jq -n --arg acct "$ACCOUNT_ID" '{ - Version: "2012-10-17", - Statement: [{ - Effect: "Allow", - Action: "sts:AssumeRole", - Resource: "arn:aws:iam::\($acct):role/agentkeys-data-role" - }] - }')" -``` - -> **Why `jq --arg` instead of `cat > file.json < **Per-user isolation note.** With the static-IAM-user path, per-user isolation lives *app-side* in the daemon — the daemon knows which wallet it's acting as and scopes its own S3 keys accordingly. The cloud does NOT enforce isolation; an app bug could let one wallet read another's prefix. The OIDC-federated path in [`stage7-wip.md`](./stage7-wip.md) enforces isolation at the bucket-policy layer via `${aws:PrincipalTag/agentkeys_user_wallet}` — recommended for production. See also [`wiki/tag-based-access.md`](../wiki/tag-based-access.md). - -## 4. S3 bucket for inbound mail - -Now that `agentkeys-data-role` exists, we can apply the full bucket policy in one shot — no split. - -```bash -aws s3api create-bucket \ - --region "$REGION" \ - --bucket "$BUCKET" \ - $([ "$REGION" != "us-east-1" ] && echo "--create-bucket-configuration LocationConstraint=$REGION") - -aws s3api put-public-access-block \ - --bucket "$BUCKET" \ - --public-access-block-configuration BlockPublicAcls=true,IgnorePublicAcls=true,BlockPublicPolicy=true,RestrictPublicBuckets=true -``` - -Bucket policy — SES writes inbound, `agentkeys-data-role` role reads: - -```bash -aws s3api put-bucket-policy --bucket "$BUCKET" \ - --policy "$(jq -n \ - --arg bucket "$BUCKET" \ - --arg acct "$ACCOUNT_ID" \ - '{ - Version: "2012-10-17", - Statement: [ - { - Sid: "AllowSESWriteInbound", - Effect: "Allow", - Principal: {Service: "ses.amazonaws.com"}, - Action: "s3:PutObject", - Resource: "arn:aws:s3:::\($bucket)/*", - Condition: {StringEquals: {"aws:Referer": $acct}} - }, - { - Sid: "AllowDaemonRead", - Effect: "Allow", - Principal: {AWS: "arn:aws:iam::\($acct):role/agentkeys-data-role"}, - Action: ["s3:GetObject", "s3:ListBucket"], - Resource: ["arn:aws:s3:::\($bucket)", "arn:aws:s3:::\($bucket)/*"] - } - ] - }')" -``` - -Verify both statements present: - -```bash -aws s3api get-bucket-policy --bucket "$BUCKET" --query 'Policy' --output text | jq '.Statement | length' -# → 2 -``` - -> **What's different from the OIDC path.** Here `AllowDaemonRead` gives the role read-access to the whole bucket — the daemon is trusted to self-scope via the `s3:prefix` / object-key conventions its own code applies. The OIDC path instead puts a `${aws:PrincipalTag/agentkeys_user_wallet}/*` condition here and mints one PrincipalTag per session. If you later migrate to OIDC, this statement's `Resource` + `Condition` are the two things that change. - -## 5. SES receipt rule for inbound - -Create a rule set and rule that writes all inbound to our S3 bucket: - -```bash -# Rule set is an account-wide resource; create once -aws ses create-receipt-rule-set --rule-set-name agentkeys --region "$REGION" - -# Rule: match *@$DOMAIN, write to S3 -aws ses create-receipt-rule \ - --region "$REGION" \ - --rule-set-name agentkeys \ - --rule "$(jq -n --arg domain "$DOMAIN" --arg bucket "$BUCKET" '{ - Name: "agentkeys-inbound", - Enabled: true, - ScanEnabled: true, - TlsPolicy: "Optional", - Recipients: [$domain], - Actions: [{ - S3Action: { - BucketName: $bucket, - ObjectKeyPrefix: "inbound/" - } - }] - }')" - -aws ses set-active-receipt-rule-set --rule-set-name agentkeys --region "$REGION" -``` - -Note: this writes raw MIME to `s3://agentkeys-mail/inbound/`. The Stage 6 mock mirrors this shape; the ses-s3 adapter in provisioner-scripts reads from this path. - -> **Follow-up:** the object-key prefix should eventually become `s3://agentkeys-mail//
/` so per-user bucket-policy conditions bite. That requires a Lambda between SES and S3 to route by address (Stage 6 post-MVP) or SES's new subdomain routing. For now, all inbound lands in `inbound/` and the daemon filters by `To:` header. - -## 6. Test: send yourself a test message - -> **Heads-up: you'll likely see one S3 object already** named `inbound/AMAZON_SES_SETUP_NOTIFICATION`. AWS writes that *once* when the receipt rule first activates — it's their "I successfully tested write access to your bucket" marker, NOT your test mail. Confirms SES → S3 plumbing works; ignore it from here on. - -Send a message to `test@$DOMAIN` from ANY outside mailbox (your Gmail on your phone works; the macOS `/usr/bin/mail` command usually does NOT — no MTA configured by default, so the message sits queued locally and never reaches SES). - -Then verify it landed in S3 within ~30 s. The `LATEST` query below auto-filters out the AWS setup-notification marker so it only picks up real inbound mail: - -```bash -# Any object at all? (You'll likely see AMAZON_SES_SETUP_NOTIFICATION -# plus your test mail, if it arrived.) -aws s3 ls "s3://$BUCKET/inbound/" --recursive - -# Grab the most-recent REAL inbound object (excluding the SES setup -# marker) and dump the first 400 bytes of raw MIME: -LATEST=$(aws s3api list-objects-v2 --bucket "$BUCKET" --prefix "inbound/" \ - --query 'sort_by(Contents,&LastModified)[?Key!=`inbound/AMAZON_SES_SETUP_NOTIFICATION`] | [-1].Key' \ - --output text) -[ "$LATEST" = "None" ] && { echo "no inbound mail yet — see troubleshooting below"; } \ - || { echo "latest key: $LATEST"; aws s3 cp "s3://$BUCKET/$LATEST" - | head -c 400; } -``` - -If you see your `Subject:` + body in the output, the inbound pipeline is live. Skip to §7. - -> **Alternative sender — SES self-loop** (avoids needing to switch to your Gmail). Sends from one address on your verified domain to another — same domain, but goes out through the public internet and back via SES inbound, which exercises the full path. Only works if you're out of SES sandbox mode OR the recipient address is verified: -> -> ```bash -> aws ses send-email --region "$REGION" \ -> --from "noreply@$DOMAIN" \ -> --destination "ToAddresses=test@$DOMAIN" \ -> --message "Subject={Data=stage6-setup-test},Body={Text={Data=hello stage 6}}" -> # If sandbox-mode error: aws sesv2 create-email-identity --region "$REGION" --email-identity "test@$DOMAIN" -> # then click the verification link AWS emails to test@$DOMAIN (which you can read via S3 once the receipt rule fires). -> ``` - -### Troubleshooting — nothing landed in S3 - -```bash -# (a) Is the receipt rule set active? Should show "agentkeys". -aws ses describe-active-receipt-rule-set --region "$REGION" \ - --query 'Metadata.Name' - -# (b) Does $DOMAIN's MX record resolve to SES inbound? Should show -# "10 inbound-smtp.us-east-1.amazonaws.com." (or your region's). -dig MX "$DOMAIN" +short - -# (c) Is SES in the right identity state? -aws sesv2 get-email-identity --region "$REGION" --email-identity "$DOMAIN" \ - --query '{verified: VerifiedForSendingStatus, dkim: DkimAttributes.Status}' - -# (d) Did the sender get a bounce? If you sent from Gmail, check Gmail's -# outbox / the inbox for a delivery failure notification. -``` - -Most common cause when all four checks pass: you sent from a sender that failed silently. Retry from a distinct outside mailbox you can monitor. - -## 7. Operational notes — inbound spam & lifecycle - -The wildcard receipt rule from §5 accepts mail to **any** address under `$DOMAIN`, including addresses we never minted. SES's built-in scanners stamp `X-SES-Spam-Verdict` and `X-SES-Virus-Verdict` headers but do not drop mail; storage grows unboundedly without intervention. Three hardening items in priority order for the throwaway-inbox use case. - -### 7.1 S3 lifecycle policy — auto-expire `inbound/*` after 30 days (do this now) - -Single CLI call. Prevents the bucket from growing forever as bot inboxes accumulate verification emails + any spam that slips through. Throwaway addresses are intended to receive one or two messages then be discarded, so 30 days is generous. - -```bash -aws s3api put-bucket-lifecycle-configuration --bucket "$BUCKET" \ - --lifecycle-configuration "$(jq -n '{ - Rules: [{ - ID: "inbound-30d-ttl", - Status: "Enabled", - Filter: {Prefix: "inbound/"}, - Expiration: {Days: 30} - }] - }')" - -# Verify -aws s3api get-bucket-lifecycle-configuration --bucket "$BUCKET" \ - --query 'Rules[0].{id: ID, prefix: Filter.Prefix, days: Expiration.Days}' -# → {"id": "inbound-30d-ttl", "prefix": "inbound/", "days": 30} -``` - -Tune `Days` if you want shorter / longer retention. AWS deletes objects in batches once daily, so the actual delete latency is up to 48 h. - -### 7.2 Spam handling — read-time, not write-time (Stage 6 interim) - -The architecturally clean spot to drop spam is at READ time in our daemon: when it downloads an `.eml` from S3, parse the `X-SES-Spam-Verdict` header; if `FAIL`, skip and don't pass the body to the scraper. This keeps the SES receipt rule trivial (one S3Action), avoids a per-message Lambda invocation cost, and pushes the policy decision to the place that knows what's "real" mail (the bot expects an OpenRouter verification — anything else is spam regardless of SES's verdict). - -Pseudo-code for the daemon's filter: - -```rust -// in provisioner-scripts/src/lib/email-backends/ses-s3.ts equivalent -fn is_spam(eml: &str) -> bool { - eml.lines().any(|l| l.starts_with("X-SES-Spam-Verdict: FAIL") - || l.starts_with("X-SES-Virus-Verdict: FAIL")) -} -``` - -Add a write-time Lambda ONLY if S3 cost or daemon poll-bandwidth becomes a problem at scale. For Stage 6 demo, read-time filter is sufficient. - -### 7.3 SES sandbox vs production — only matters for OUTBOUND - -Fresh AWS accounts ship with SES in **sandbox mode**, which restricts outbound to verified recipient addresses (cap of 200/day). **Inbound is unaffected** — the wildcard receipt rule + S3 write works regardless of sandbox status, which is why your test from Gmail landed despite (likely) being in sandbox. - -You only need to request production access when the agent itself starts SENDING mail to arbitrary user addresses (replies, notifications). Request via AWS Console → Support → Create case → "Service limit increase" → "SES Sending Limits" → "Request Production Access". Review usually ≤24 h; provide a one-line use case ("transactional verification mail for AI agent inboxes"). - -For Stage 6 demo (Gmail-style verification email INBOUND), no action needed. - -### What we're NOT mitigating in Stage 6 (deferred) - -- **Address enumeration** — an attacker scanning `bot-aaaaaa@`, `bot-aaaaab@`... gets the same "accepted" response from SES. Mitigation requires a per-address allowlist (Lambda lookup against our chain) before S3Action. Tracked as a Stage 6 post-MVP item. -- **Per-recipient inbound rate limit** — none enforced. A bot inbox can be flooded with tens of thousands of messages. Mitigation: same Lambda pattern. -- **Sender allow/deny lists** — SES does not have native domain allowlists; would need a Lambda. For verification emails, the sender domain is whoever the agent signs up at (OpenRouter, etc.) — too dynamic for a static allowlist anyway. - -## 8. Hand-back to Claude / the Stage 6 code - -When the above completes, share these values back so I can wire them into the Stage 6 code (via env vars, NOT committed to git): - -``` -ACCOUNT_ID=429071895007 -REGION=us-east-1 -DOMAIN=bots.litentry.org -PARENT_ZONE_ID=Z09723983CFJOHAE3VC65 -SES_VERIFIED= -DKIM_STATUS= -BUCKET_ARN=arn:aws:s3:::agentkeys-mail-429071895007 -ROLE_ARN=arn:aws:iam::429071895007:role/agentkeys-data-role -DAEMON_USER_ARN=arn:aws:iam::429071895007:user/agentkeys-daemon -DAEMON_ACCESS_KEY_ID= -DAEMON_SECRET_ACCESS_KEY= # share via 1Password, NOT in chat -``` - -I'll then wire `AGENTKEYS_EMAIL_BACKEND=ses-s3` in provisioner-scripts to read from `$BUCKET_ARN` using the `agentkeys-daemon` user's access key to assume `$ROLE_ARN` at runtime. - -## Follow-ups tracked elsewhere - -- **TEE-BYODKIM**: replace AWS-managed DKIM with TEE-held Ed25519. Depends on [`heima-gaps §4`](./spec/heima-gaps-vs-desired-architecture.md). Track via [issue #50](https://github.com/litentry/agentKeys/issues/50). -- **TEE-signed OIDC JWT**: replace `agentkeys-oidc-stub` / static-IAM trust with TEE-derive(`oidc/issuer/v1`) + sts:AssumeRoleWithWebIdentity. Depends on heima-gaps §3. -- **Per-address S3 prefix**: currently all inbound lands in `s3://$BUCKET/inbound/`; Stage 6 post-MVP should route to `s3://$BUCKET//
/` either via SES Lambda or subdomain routing. -- **Throwaway inbox lifecycle**: currently addresses are unbounded; Stage 6 post-MVP should add TTL + audit-logged revocation. - -## Cleanup (if you want to tear down) - -```bash -# Disable the active rule set (keeps SES inbound from hitting this bucket) -aws ses set-active-receipt-rule-set --rule-set-name "" --region "$REGION" - -# Drop the role -aws iam delete-role-policy --role-name agentkeys-data-role --policy-name agentkeys-data-role-inline -aws iam delete-role --role-name agentkeys-data-role - -# Drop the daemon user (list + delete access keys first — can't delete a user with keys) -for KEY in $(aws iam list-access-keys --user-name agentkeys-daemon --query 'AccessKeyMetadata[*].AccessKeyId' --output text); do - aws iam delete-access-key --user-name agentkeys-daemon --access-key-id "$KEY" -done -aws iam delete-user-policy --user-name agentkeys-daemon --policy-name agentkeys-daemon-assume-role -aws iam delete-user --user-name agentkeys-daemon - -# Drop the bucket (contents first) -aws s3 rm "s3://$BUCKET" --recursive -aws s3api delete-bucket --bucket "$BUCKET" - -# Delete SES domain identity -aws sesv2 delete-email-identity --region "$REGION" --email-identity "$DOMAIN" - -# Domain / hosted zone stays — you're using the existing litentry.org zone. -# Only the Stage 6 records we UPSERTed need cleanup; leave DNS alone unless -# you want to revert SPF/DMARC/MX/DKIM records on bots.litentry.org. -``` diff --git a/docs/stage7-wip.md b/docs/stage7-wip.md index 0ac239d..3e6e226 100644 --- a/docs/stage7-wip.md +++ b/docs/stage7-wip.md @@ -1,132 +1,66 @@ -# Stage 7 — WIP notes +# Stage 7 — Generalized OIDC Provider -> **Status (2026-04-27).** Phase 1 (broker server) shipped in PR [#60](https://github.com/litentry/agentKeys/pull/60). Phase 2 (OIDC issuer + provisioner-scripts AWS-cred wiring) ships in PR [#61](https://github.com/litentry/agentKeys/pull/61) and is **architecturally complete**: the Rust broker owns the OIDC surface end-to-end, the audit destination is the broker's local SQLite (one valid choice in the [pluggable audit-destination layer](spec/architecture.md#11-audit-destination-is-pluggable)), and the provisioner subprocess is wired through the broker for AWS-cred minting. What's left is operational deployment for cloud-side OIDC federation (public TLS, `aws iam create-open-id-connect-provider`) — out of scope for the architecture but relevant to the cloud-deployment runbook. +> **Status (2026-04-28).** Architecturally complete. The Rust broker owns the OIDC surface end-to-end (discovery + JWKS + bearer-gated `mint-oidc-jwt`); the provisioner-scripts AWS-cred path is wired through the broker; the audit destination is the broker's local SQLite per [`architecture.md` §11](spec/architecture.md#11-audit-destination-is-pluggable). The remaining work is operational: deploy the broker on a public hostname so AWS / GCP / Tencent IAM can fetch the JWKS during OIDC-provider registration. That deployment recipe is split between this doc (broker bring-up) and [`cloud-setup.md`](./cloud-setup.md) (cloud account provisioning). -## What Stage 7 is +## What Stage 7 delivers -Two halves that compose into the canonical "broker, not proxy" architecture: +A long-running broker that issues two kinds of short-lived credentials to authenticated daemons, so app-developer machines never hold long-lived AWS keys: -1. **Phase 1 — Broker server (shipped, PR #60).** A long-running HTTP service holds the operator's long-lived `agentkeys-daemon` AWS access key and brokers 1-hour scoped credentials to authenticated daemons. Lets app developers run daemons against operator infrastructure without ever touching AWS keys themselves. -2. **Phase 2 — OIDC issuer + AWS-cred wiring (shipped, PR #61).** The Rust broker now serves the conforming OIDC discovery + JWKS surface and a bearer-gated `POST /v1/mint-oidc-jwt` endpoint, replacing the standalone TS `services/oidc-stub/` package. Provisioner-scripts AWS-cred wiring is live: `agentkeys provision ` (CLI) and the `agentkeys.provision` MCP tool fetch 1-hour temp creds from the broker and inject them into the scraper subprocess env when `--broker-url` is set. The audit destination is the broker's append-only SQLite at `~/.agentkeys/broker/audit.sqlite` — see [§"Audit destination is pluggable" below](#audit-destination-is-pluggable) for why that's a complete v0.1 choice, not a placeholder. - -Per [`docs/spec/plans/development-stages.md`](./spec/plans/development-stages.md), this is the "Generalized OIDC Provider" stage after Stage 6 (Federated Own Email). - -> **Scope boundary (added 2026-04-26).** Stage 7 ships the per-user isolation primitive — JWT claim → PrincipalTag → resource-policy gate. **It does not commit a position on where credential ciphertext lives.** The previously-assumed `pallet-secrets-vault` (on-chain encrypted blob store) is superseded by [`stage8-wip.md`](./stage8-wip.md), which moves ciphertext off-chain into the same PrincipalTag-gated S3 prefixes. See [`docs/spec/threat-model-key-custody.md`](./spec/threat-model-key-custody.md) for the architectural rationale. - -## Phase 1 — Broker server (shipped, PR #60) - -The credential broker that lets app developers run daemons without holding any AWS keys. Static-IAM trust path; OIDC federation deferred to phase 2. - -**Code:** - -- [`crates/agentkeys-broker-server/`](../crates/agentkeys-broker-server/) — axum HTTP service. - - `POST /v1/mint-aws-creds` — bearer-token in (validated via the backend's `/session/validate`), 1-hour scoped AWS creds out (`sts:assume-role` on the operator's daemon key). - - `GET /healthz`, `GET /readyz` — operator supervisor probes; `readyz` checks backend reachability + `sts:GetCallerIdentity`. - - SQLite audit log on every mint (sha256-hashed bearer tokens, wallet, outcome, sts session name) at `$HOME/.agentkeys/broker/audit.sqlite` by default. - - Trait-abstracted `StsClient` with `AwsStsClient` (production) and `StubStsClient` (gated by `test-stub` feature) — testable without live AWS. -- [`crates/agentkeys-mock-server/`](../crates/agentkeys-mock-server/) gains `GET /session/validate` so the broker validates bearer tokens through the existing session backend rather than duplicating session state. -- [`crates/agentkeys-daemon/`](../crates/agentkeys-daemon/) gains `--broker-url` / `AGENTKEYS_BROKER_URL` flag (consumer wiring of temp creds into provisioner-scripts lands in phase 2). - -**Operator setup + test:** see [`docs/operator-runbook.md`](./operator-runbook.md) for start / supervise / rotate / audit, and [`docs/dev-setup.md` §5](./dev-setup.md) for the three-terminal solo-dev loop. - -**End-to-end proof for phase 1** (run from inside the workspace): - -```bash -# Terminal A — mock backend -cargo run --release -p agentkeys-mock-server -- --port 8090 - -# Terminal B — broker. AWS credentials come from the operator's -# ~/.aws/credentials profile (e.g. agentkeys-daemon) via `awsp` or -# AWS_PROFILE. ACCOUNT_ID + REGION live in the operator's shell. The -# broker derives BROKER_DATA_ROLE_ARN from ACCOUNT_ID. -awsp agentkeys-daemon -export BROKER_BACKEND_URL=http://127.0.0.1:8090 -cargo run --release -p agentkeys-broker-server -- --port 8091 - -# Terminal C — proof: mint a session, then mint AWS creds via the broker -SESSION=$(curl -sf -X POST http://127.0.0.1:8090/session/create \ - -H 'content-type: application/json' \ - -d '{"auth_token":"phase1-demo"}' | jq -r .session) - -CREDS=$(curl -sf -X POST http://127.0.0.1:8091/v1/mint-aws-creds \ - -H "Authorization: Bearer $SESSION") -echo "$CREDS" | jq '{access_key_id, expiration, wallet}' -# → real 1h temp creds, scoped to the assumed agentkeys-data-role role -``` - -Acceptance: `curl /healthz` → 200, `curl /readyz` → 200, `mint-aws-creds` returns creds, audit row appears in `~/.agentkeys/broker/audit.sqlite`. - -**Out of phase 1 (now landing in phase 2):** +| Endpoint | Auth | Output | Used for | +|---|---|---|---| +| `POST /v1/mint-aws-creds` | bearer | 1 h scoped AWS temp creds (via `sts:AssumeRole` on the operator's daemon key) | Direct cred path — operator-trusted, app-side isolation. | +| `POST /v1/mint-oidc-jwt` | bearer | Short-lived ES256 JWT | Federated path — `sts:AssumeRoleWithWebIdentity` → cloud-enforced PrincipalTag isolation. | +| `GET /.well-known/openid-configuration` | none | OIDC discovery doc | Consumed by `aws iam create-open-id-connect-provider` at registration time. | +| `GET /.well-known/jwks.json` | none | JWK Set with the broker's ES256 P-256 public key + `kid` | Same — AWS pulls the public key once, caches it. | -- Rust-broker OIDC discovery / JWKS / `mint-oidc-jwt` (delivered — see §"Phase 2 — OIDC issuer (Rust broker)" below). -- TS `services/oidc-stub/` retirement (directory deleted in this PR; OIDC surface now lives entirely in the Rust broker). -- Provisioner-scripts AWS-cred consumer rewiring (delivered — `agentkeys provision` and `agentkeys.provision` MCP tool now mint creds via the broker when `--broker-url` is set). +Both `mint-*` endpoints write a row to the broker's append-only SQLite audit DB before credentials leave the process. JWT mints land with `requested_role = "oidc_jwt"`; AWS-cred mints land with the assumed role ARN. -**Operational follow-ups (not architectural blockers):** +> **Scope boundary.** Stage 7 ships the **per-user isolation primitive** — JWT claim → PrincipalTag → resource-policy gate. It does **not** commit a position on where credential ciphertext lives; that's Stage 8 ([`stage8-wip.md`](./stage8-wip.md)). -- `aws iam create-open-id-connect-provider` against a public TLS endpoint + `sts:AssumeRoleWithWebIdentity` exchange. The recipe is in §["Cloud federation deployment"](#cloud-federation-deployment) below. This is a deployment task, not a Stage-7 design task — the broker already serves the conforming OIDC surface; what's missing is just routing public TLS traffic to it. -- TEE-derived signer (a *higher-assurance* swap of the on-disk ES256 keypair). The on-disk keypair shipped today is a complete v0.1 signer per the [pluggable audit destination](spec/architecture.md#11-audit-destination-is-pluggable) framing; TEE is the v0.2+ hardening path, not a Stage-7 prerequisite. -- Chain-anchored audit (Heima or otherwise). Phase 2 ships with the broker's local SQLite as the audit destination — also a complete v0.1 choice. Operators who want chain anchoring can swap the audit backend without touching the OIDC issuer code. +## Code -## Phase 2 — OIDC issuer (Rust broker) +| Crate | What it owns | +|---|---| +| [`crates/agentkeys-broker-server/`](../crates/agentkeys-broker-server/) | Axum HTTP service. ES256 keypair gen/persist (mode 0600), JWT signing, audit DB, STS client (trait-abstracted with a `test-stub` feature for offline tests). | +| [`crates/agentkeys-mock-server/`](../crates/agentkeys-mock-server/) | Backend stub. Issues session bearers via `POST /session/create`; the broker validates against `GET /session/validate`. In-memory SQLite — fine for dev, not a long-running production backend. | +| [`crates/agentkeys-cli/`](../crates/agentkeys-cli/) + [`crates/agentkeys-mcp/`](../crates/agentkeys-mcp/) + [`crates/agentkeys-daemon/`](../crates/agentkeys-daemon/) | `--broker-url` / `AGENTKEYS_BROKER_URL` everywhere; `provision` subcommands fetch AWS creds via the broker before spawning scrapers. | -The Rust broker exposes three new endpoints. They are the same endpoints the TS oidc-stub used to serve; the schemas, JWT shape, JWKS shape, and bucket-policy enforcement are byte-for-byte compatible so federation recipes already written against the stub keep working unchanged. +## Configuration -| Method | Path | Auth | Purpose | -|---|---|---|---| -| `GET` | `/.well-known/openid-configuration` | none | Discovery doc the AWS IAM `create-open-id-connect-provider` step reads. | -| `GET` | `/.well-known/jwks.json` | none | JWK Set with the broker's ES256 P-256 public key + `kid`. | -| `POST` | `/v1/mint-oidc-jwt` | bearer | Validates the bearer against the backend's `/session/validate`, then mints a short-lived ES256 JWT carrying `sub=agentkeys:agent:`, `aud=sts.amazonaws.com`, `agentkeys_user_wallet=`. | - -### Configuration +The broker reads AWS credentials from the SDK default chain (instance profile → named profile → static keys, in that order). See [`operator-runbook.md` §2](./operator-runbook.md#2-aws-credentials) for the full credential story. | Env var | Default | Notes | |---|---|---| -| `BROKER_OIDC_ISSUER` | `https://oidc.agentkeys.dev` | The exact string emitted as `iss` and as the discovery `issuer`. AWS requires this to match the URL `create-open-id-connect-provider --url` was registered with. | -| `BROKER_OIDC_KEYPAIR_PATH` | `~/.agentkeys/broker/oidc-keypair.json` | On first start the broker generates a P-256 keypair and persists it mode 0600. Subsequent restarts reuse the same `kid` so the registered IAM OIDC provider stays valid. | -| `BROKER_OIDC_JWT_TTL_SECONDS` | `300` | Bounded `[60, 3600]`. STS only checks the JWT at the moment of exchange; short TTL limits replay risk if the broker leaks a JWT. | - -### Audit log - -Both `mint-aws-creds` and `mint-oidc-jwt` write to the same SQLite audit table at `~/.agentkeys/broker/audit.sqlite`. JWT mints land with `requested_role = "oidc_jwt"` and `sts_session_name = ` — operators see one ledger for both credential types. - - -#### Why local SQLite is a complete v0.1 audit destination - -Earlier docs ([`threat-model-key-custody.md`](spec/threat-model-key-custody.md), `wiki/blockchain-tee-architecture.md`) describe audit + anchoring as Heima-pallet operations. That description is **one instance** of the architecture, not a constraint of it. The audit/anchoring layer is a pluggable backend behind a single interface: append a tamper-evident record of *who did what, when, against which agent*. - -Per [`architecture.md` §11](spec/architecture.md#11-audit-destination-is-pluggable), the trait surface accommodates: - -- **Federated public chain** — Heima parachain, other Substrate parachains. -- **General-purpose public chain** — Ethereum, Solana, Sui, Cosmos. -- **Permissioned / consortium chain** — Hyperledger Fabric, Quorum, Aliyun BaaS (relevant for jurisdictions like China where public-chain anchoring is non-starter). -- **Plain backend server** — append-only SQLite (what the broker ships today), Postgres + immutable WAL, S3-with-Object-Lock, sealed log services. -- **TEE-attested append-only log** — Heima TEE + sealed storage, AWS Nitro + KMS, Azure Confidential Ledger. +| `BROKER_BACKEND_URL` | (required) | URL of the session-management backend (mock-server in dev, chain in v0.2+). | +| `BROKER_DATA_ROLE_ARN` | derived from `ACCOUNT_ID` | ARN of `agentkeys-data-role`. Legacy `BROKER_AGENT_ROLE_ARN` accepted for unmigrated deployments. | +| `BROKER_OIDC_ISSUER` | `https://oidc.agentkeys.dev` | Public URL emitted as `iss`. **Must** match the URL registered with `aws iam create-open-id-connect-provider` byte-for-byte. | +| `BROKER_OIDC_KEYPAIR_PATH` | `~/.agentkeys/broker/oidc-keypair.json` | ES256 keypair (mode 0600), generated on first start, reused thereafter so the registered IAM provider stays valid. | +| `BROKER_OIDC_JWT_TTL_SECONDS` | `300` | Bounded `[60, 3600]`. Short TTL limits replay window. | +| `BROKER_AUDIT_DB_PATH` | `~/.agentkeys/broker/audit.sqlite` | Audit destination. | -The Stage 7 broker ships in the "plain backend server" row. SQLite at `~/.agentkeys/broker/audit.sqlite` is append-only by virtue of the application code (only `INSERT`s, never `UPDATE`/`DELETE`), keys are sha256-hashed before write, and the audit-write happens *before* credentials leave the broker — that's the property operators need. Migrating to a chain-anchored destination is a backend swap, not a Stage-7 redesign. +## Audit destination is pluggable -This is what makes Phase 2 architecturally complete today: the OIDC issuer + audit pair is one self-contained unit; the audit's storage backend is a deployment-time choice. +Earlier docs describe audit + anchoring as a Heima-pallet operation. That's **one** instance of the architecture, not a constraint of it. The audit layer is a pluggable backend behind a single interface: append a tamper-evident record of *who did what, when, against which agent*. Per [`architecture.md` §11](spec/architecture.md#11-audit-destination-is-pluggable): -### Provisioner-scripts AWS-cred wiring +| Class | Examples | +|---|---| +| Federated public chain | Heima parachain, other Substrate parachains | +| General-purpose public chain | Ethereum, Solana, Sui, Cosmos | +| Permissioned / consortium chain | Hyperledger Fabric, Quorum, Aliyun BaaS (China) | +| Plain backend server | append-only SQLite (broker default), Postgres + immutable WAL, S3-with-Object-Lock | +| TEE-attested append-only log | Heima TEE + sealed storage, AWS Nitro + KMS, Azure Confidential Ledger | -Operators no longer have to source `scripts/stage6-demo-env.sh`. With `--broker-url` set on the daemon, MCP, or CLI: +Stage 7 ships in the "plain backend server" row. Migrating to a chain-anchored destination is a backend swap, not a redesign. -1. Before spawning the scraper subprocess, the provisioner calls `POST /v1/mint-aws-creds` with its session bearer. -2. The broker validates the bearer, runs `sts:AssumeRole` on the operator's daemon key, and returns 1-hour scoped creds. -3. The provisioner injects `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, `AWS_SESSION_TOKEN` (plus `AWS_REGION`/`AWS_DEFAULT_REGION` if set) into the subprocess env. -4. The scraper's existing SES → S3 email path works unchanged. +## Operator end-to-end test -The legacy `stage6-demo-env.sh` flow still works when `--broker-url` is unset; the wiring is purely additive. - -## Operator end-to-end test (Phase 2) - -A four-terminal walk-through that exercises everything Phase 2 ships, with no AWS round-trip required (the broker's `--skip-startup-check` lets you stand it up offline). Run it once after a fresh build to confirm your operator setup is wired correctly. Times below are wall-clock expectations on a recent laptop. +A four-terminal walk-through that exercises everything Stage 7 ships, with no AWS round-trip required (`--skip-startup-check` lets the broker stand up offline). Run it once after a fresh build to confirm operator wiring. ### Prereqs -- A release build: `cargo build --release -p agentkeys-mock-server -p agentkeys-broker-server -p agentkeys-cli` (≈ 90 s cold). +- Release build: `cargo build --release -p agentkeys-mock-server -p agentkeys-broker-server -p agentkeys-cli` (≈ 90 s cold). - `jq` and `curl` on `$PATH`. -- For the AWS-side check (step 4b + 6), `awsp agentkeys-daemon` (or another profile with `sts:AssumeRole` on `agentkeys-data-role`) plus `ACCOUNT_ID` from your operator setup. For offline-only, skip those steps and use `--skip-startup-check`. +- For the AWS-cred path (step 4b): `awsp agentkeys-daemon` (or another profile with `sts:AssumeRole` on `agentkeys-data-role`) plus `ACCOUNT_ID` from your operator setup. Skip step 4b on the offline path. ### Walk-through @@ -134,590 +68,187 @@ A four-terminal walk-through that exercises everything Phase 2 ships, with no AW # Terminal A — backend (mock-server, in-memory SQLite) ./target/release/agentkeys-mock-server --port 8090 # expect: "Mock server running on port 8090" -# CAVEAT: this server keeps state in-memory — it works for the E2E test -# but is NOT a long-running production backend. See the "Remote -# deployment" section below for the production backend story. - -# Terminal B — broker. Two ways to pass AWS credentials: -# • Offline path (no AWS round-trip): --skip-startup-check, no creds needed. -# • Live path: awsp agentkeys-daemon (SDK default chain) -# See docs/operator-runbook.md §3.1 for the full credential story. + +# Terminal B — broker export BROKER_BACKEND_URL=http://127.0.0.1:8090 -export BROKER_OIDC_ISSUER=http://localhost:8091 # http for dev only; production must be https +export BROKER_OIDC_ISSUER=http://localhost:8091 # http for dev only; production is https export ACCOUNT_ID=000000000000 # offline path tolerates a stub ./target/release/agentkeys-broker-server --port 8091 --skip-startup-check -# expect: "AWS credentials: SDK default chain (AWS_PROFILE / ~/.aws / IMDS)" +# expect: "AWS credentials: SDK default chain ..." # "OIDC signer ready" with kid=v1- # "broker listening on 0.0.0.0:8091" # Terminal C — checks -# 1. Healthz -curl -sf http://127.0.0.1:8091/healthz # → "ok" -# 2. Discovery doc (the surface AWS would consume after registration) +curl -sf http://127.0.0.1:8091/healthz # → "ok" curl -sf http://127.0.0.1:8091/.well-known/openid-configuration | jq . -# 3. JWKS (the public-key Set the issuer publishes) curl -sf http://127.0.0.1:8091/.well-known/jwks.json | jq '.keys[0] | {kty, crv, alg, kid}' -# 4. Mint a session against the backend, then mint an OIDC JWT and an -# AWS-creds response from the broker. +# 1. Mint a session bearer against the backend. +# `auth_token` is the developer-facing handle; the mock-server resolves +# it to a wallet on first use. In production this comes from the chain. SESSION=$(curl -sf -X POST http://127.0.0.1:8090/session/create \ -H 'content-type: application/json' \ -d '{"auth_token":"phase2-e2e"}' | jq -r .session) +echo "SESSION=$SESSION" -# 4a. JWT mint +# 2a. Mint an OIDC JWT (decode the claims to verify shape) JWT=$(curl -sf -X POST http://127.0.0.1:8091/v1/mint-oidc-jwt \ -H "Authorization: Bearer $SESSION" | jq -r .jwt) echo "$JWT" | awk -F. '{print $2}' | base64 --decode 2>/dev/null | jq . # expect: claims with iss, sub=agentkeys:agent:, aud=sts.amazonaws.com, -# agentkeys_user_wallet, iat, exp. +# agentkeys_user_wallet, iat, exp. -# 4b. AWS-creds mint (requires real AWS daemon creds; skip on the -# offline path). +# 2b. AWS-creds mint (LIVE path — needs real daemon creds; skip offline) CREDS=$(curl -sf -X POST http://127.0.0.1:8091/v1/mint-aws-creds \ -H "Authorization: Bearer $SESSION") echo "$CREDS" | jq '{access_key_id, expiration, wallet}' -# 5. Provisioner-scripts wiring (CLI side). With AGENTKEYS_BROKER_URL -# set, `agentkeys provision` fetches AWS creds via the broker before -# spawning the scraper subprocess — no stage6-demo-env.sh sourcing. +# 3. Provisioner-scripts wiring (CLI side). With AGENTKEYS_BROKER_URL set, +# `agentkeys provision` fetches AWS creds via the broker before spawning +# the scraper subprocess. export AGENTKEYS_BROKER_URL=http://127.0.0.1:8091 -./target/release/agentkeys init --mock-token phase2-e2e # session in OS keyring -./target/release/agentkeys provision openrouter --force # full live signup; takes minutes -# alternatively: confirm just the broker hop without doing the live signup -./target/release/agentkeys --broker-url http://127.0.0.1:8091 \ - provision openrouter --help # should not error on the env-fetch path +./target/release/agentkeys init --mock-token phase2-e2e +./target/release/agentkeys provision openrouter --help # exercises broker fetch path -# 6. Audit log inspection +# 4. Audit log sqlite3 ~/.agentkeys/broker/audit.sqlite \ "SELECT outcome, requested_role, requester_wallet, occurred_at FROM mint_audit ORDER BY id DESC LIMIT 10;" -# expect: a row per mint, with requested_role IN ('arn:aws:iam::*:role/agentkeys-data-role', 'oidc_jwt') +# expect: rows with requested_role IN ('arn:aws:iam::*:role/agentkeys-data-role', 'oidc_jwt') ``` ### Acceptance -- `/healthz` and `/readyz` both return `200`. -- `/.well-known/openid-configuration` returns a body where `issuer` matches `BROKER_OIDC_ISSUER`. +- `/healthz` and `/readyz` return `200`. +- `/.well-known/openid-configuration` `.issuer` matches `BROKER_OIDC_ISSUER` byte-for-byte. - `/.well-known/jwks.json` returns a JWK Set with `alg=ES256`, `crv=P-256`, a stable `kid`. -- `mint-oidc-jwt` returns a JWT whose claims (decoded) include `agentkeys_user_wallet` matching the session's wallet, `aud=sts.amazonaws.com`, and a future `exp`. -- The audit DB has a fresh row per mint with `outcome=ok` (or `auth_failed` for the negative checks below). +- `mint-oidc-jwt` returns a JWT whose claims include `agentkeys_user_wallet`, `aud=sts.amazonaws.com`, future `exp`. +- The audit DB has one row per mint with `outcome=ok`. -### Negative checks (verify the failure modes) +### Negative checks ```bash -# Missing bearer → 401 +# Missing bearer → 401 + auth_failed audit row curl -sf -o /dev/null -w "%{http_code}\n" -X POST http://127.0.0.1:8091/v1/mint-oidc-jwt -# expect: 401, with one auth_failed row in the audit DB. -# Bogus bearer → 401 +# Bogus bearer → 401 + auth_failed audit row curl -sf -o /dev/null -w "%{http_code}\n" -X POST http://127.0.0.1:8091/v1/mint-oidc-jwt \ -H 'Authorization: Bearer never-minted' -# expect: 401 + auth_failed audit row. -# Backend down (kill terminal A first) → 502 +# Backend down (kill terminal A first) → 502 + backend_error audit row curl -sf -o /dev/null -w "%{http_code}\n" -X POST http://127.0.0.1:8091/v1/mint-oidc-jwt \ -H "Authorization: Bearer $SESSION" -# expect: 502, with a backend_error audit row (NOT auth_failed — the -# distinction is what an oncall operator chases when triaging). ``` -If any of these don't match, capture the broker's stderr (Terminal B) and the audit row, then file an issue — the broker exposes one ledger so triage shouldn't require log digging. +The `backend_error` vs `auth_failed` distinction is what oncall chases — keep them disambiguated in the audit DB. ## Remote deployment -This section is for operators who want their broker reachable by daemons running on developer laptops, CI, or cloud sandboxes — and who eventually want AWS / GCP / etc. to OIDC-federate against it. Phase 2 architecture is complete on a single host (see the operator E2E above); these instructions take that single-host setup and put it on real infrastructure. +For the broker to be reachable by daemons on developer laptops / CI / cloud sandboxes — and for AWS to OIDC-federate against it — it needs a public HTTPS hostname. The split: + +- **Cloud-account provisioning** (DNS, EIP, SES/S3, IAM, OIDC federation): [`cloud-setup.md`](./cloud-setup.md). +- **Broker-host bootstrap** (binaries, systemd, nginx, certbot): this section + [`scripts/setup-broker-host.sh`](../scripts/setup-broker-host.sh). ### Topology ``` ┌── developer laptop / CI / cloud sandbox ──┐ │ agentkeys-daemon (or `agentkeys` CLI) │ -│ --broker-url https://broker.litentry.org │ +│ --broker-url https://broker.litentry.org │ └───────────────────┬───────────────────────┘ │ HTTPS (bearer) ▼ -┌── operator-managed host(s) ─────────────────────────────┐ -│ │ -│ reverse proxy (TLS terminator) │ -│ nginx + Let's Encrypt / AWS ALB + ACM / │ -│ Caddy / CloudFront in front of broker │ -│ │ │ -│ ▼ │ -│ agentkeys-broker-server :8091 ──────────┐ │ -│ (BROKER_BACKEND_URL=http://backend:8090) │ │ -│ │ │ -│ agentkeys-mock-server (or Heima-backed │ HTTP │ -│ successor) :8090 ◄──────────┘ │ -│ │ -│ ~/.agentkeys/broker/audit.sqlite │ -│ ~/.agentkeys/broker/oidc-keypair.json (mode 0600) │ -└──────────────────────────────────────────────────────────┘ -``` - -The two server processes are deployed together. The mock backend (or its production successor) is **not** exposed publicly — only the broker is. The broker reaches the backend over the operator's private network. - -### Backend server: production caveats - -`agentkeys-mock-server` exists for v0 operators who don't yet have Heima integration. It's deliberately simple — Axum + **in-memory** SQLite — which means: - -- **State is lost on restart.** Every running session, identity link, and audit row vanishes when the process exits. For development this is fine; for a backend that other developers' daemons depend on, it's not. -- **No HA.** Single-process, single-node. -- **No TLS at the listener.** Always front it with a reverse proxy (or co-locate with the broker on the same private network and don't expose it externally). - -For v0.1 operators, two pragmatic options: - -1. **Single-host deployment with persistent state (recommended for self-hosted teams).** Keep the mock-server but add a small wrapper: front it with `systemd` (or Docker `restart: unless-stopped`), and mount the SQLite file on persistent storage — `docs/operator-runbook.md` will track the exact patches needed in the next iteration. Until that lands, treat session loss on restart as part of the operator runbook (have developers re-`init` after a backend restart). -2. **Skip the mock and wait for Heima.** If your timeline allows, hold this deployment until the chain-backed backend lands and use the real Heima session-management path. Stage 7 phase 2 isn't gated on this — the broker's interface is the same regardless of which backend implements `/session/create` + `/session/validate`. - -> **Automated path:** [`scripts/setup-broker-host.sh`](../scripts/setup-broker-host.sh) bundles Steps 2–5 (binary install, `agentkeys` system user, systemd units, nginx site, certbot issuance) into a single interactive run-on-the-host script. It's idempotent, supports the three credential modes from Step 3, and prompts before each optional step. Steps 1 (provision the host) and 1b (wire DNS) are still manual prerequisites. After running the script, jump to Step 6 for the smoke test. - -### Step 1 — Provision the host - -Pick whatever fits your stack. Two examples that satisfy the requirements (TLS-terminating reverse proxy + ≥ 1 vCPU / 1 GiB RAM + persistent disk): - -- **AWS:** `t4g.small` EC2 + Elastic IP + Route 53 A record + ALB with ACM cert. Or skip the ALB and run nginx directly on the instance. -- **DigitalOcean / Hetzner / Linode:** any 1 GiB droplet + a managed DNS A record + nginx + Let's Encrypt via certbot. - -Either way you need: - -- A DNS name resolving to the host (e.g. `broker.litentry.org`). -- A public-CA TLS certificate covering that name (Let's Encrypt is free; ACM is free for ALB use). -- Firewall: inbound `:443` from anywhere, inbound `:22` from your admin IP, **everything else closed**. The broker's `:8091` and the backend's `:8090` are reached only via localhost or the private network. - -### Step 1b — Wire DNS to the broker host - -The broker hostname must resolve to the host's public IP **before** certbot runs in Step 5 (Let's Encrypt's HTTP-01 challenge resolves the name and hits port 80). Allocate an Elastic IP (so the address survives stop/start) and add an `A` record. If your DNS lives in AWS Route 53: - -```bash -# 1. Allocate + attach an Elastic IP (run with the right --region for the EC2 instance) -EIP_ALLOC=$(aws ec2 allocate-address --domain vpc --region us-east-1 --query AllocationId --output text) -aws ec2 associate-address --region us-east-1 \ - --instance-id --allocation-id "$EIP_ALLOC" -EIP=$(aws ec2 describe-addresses --region us-east-1 \ - --allocation-ids "$EIP_ALLOC" --query 'Addresses[0].PublicIp' --output text) - -# 2. Upsert the A record in Route 53 (Route 53 is global; no --region needed) -HZ=$(aws route53 list-hosted-zones-by-name --dns-name litentry.org. \ - --query 'HostedZones[0].Id' --output text | sed 's|/hostedzone/||') -aws route53 change-resource-record-sets --hosted-zone-id "$HZ" \ - --change-batch "$(jq -n --arg ip "$EIP" '{ - Changes: [{ - Action: "UPSERT", - ResourceRecordSet: { - Name: "broker.litentry.org.", - Type: "A", - TTL: 300, - ResourceRecords: [{ Value: $ip }] - } - }] - }')" - -# 3. Verify (use DoH if your local resolver is hijacked by a router/proxy) -curl -s 'https://cloudflare-dns.com/dns-query?name=broker.litentry.org&type=A' \ - -H 'accept: application/dns-json' | jq '.Answer' -``` - -For non-AWS DNS providers, create an equivalent A record (`broker.litentry.org` → EIP) in their console. The IAM user running these commands needs `ec2:AllocateAddress` / `ec2:AssociateAddress` / `ec2:DescribeAddresses` and `route53:ChangeResourceRecordSets` / `route53:ListHostedZonesByName` — `agentkeys-admin` is IAM-only by default, so attach a temporary inline policy or use a more privileged user for this one-off. - -### Step 2 — Install the binaries - -The repo doesn't yet ship a `cargo dist` release; build from source on the target arch and copy the resulting binaries: - -```bash -git clone https://github.com/litentry/agentKeys.git -cd agentKeys -cargo build --release \ - -p agentkeys-mock-server \ - -p agentkeys-broker-server - -sudo install -m 0755 \ - target/release/agentkeys-mock-server \ - target/release/agentkeys-broker-server \ - /usr/local/bin/ -``` - -### Step 3 — AWS credentials + non-secret config - -The broker resolves AWS credentials through the SDK default chain. Pick one of three paths, in order of preference: - -#### 3a. EC2 instance profile (recommended on AWS) - -If the broker host is an EC2 instance, attach an IAM **instance profile** with `sts:AssumeRole` permission on `agentkeys-data-role`. The SDK pulls credentials from IMDS automatically — **no secrets land on the host's filesystem, no env vars, no rotation runbook**. - -```bash -# One-time, from your admin workstation: -ROLE_NAME=agentkeys-broker-host -INSTANCE_PROFILE=$ROLE_NAME - -# Trust policy: only this EC2 role may assume. -aws iam create-role --role-name $ROLE_NAME --assume-role-policy-document "$(jq -n '{ - Version: "2012-10-17", - Statement: [{Effect:"Allow", Principal:{Service:"ec2.amazonaws.com"}, Action:"sts:AssumeRole"}] -}')" - -# Inline policy: the only thing the broker host can do is sts:AssumeRole on agentkeys-data-role. -aws iam put-role-policy --role-name $ROLE_NAME --policy-name BrokerAssumeAgent \ - --policy-document "$(jq -n --arg account "$ACCOUNT_ID" '{ - Version: "2012-10-17", - Statement: [{Effect:"Allow", Action:"sts:AssumeRole", - Resource:"arn:aws:iam::\($account):role/agentkeys-data-role"}] - }')" - -aws iam create-instance-profile --instance-profile-name $INSTANCE_PROFILE -aws iam add-role-to-instance-profile --instance-profile-name $INSTANCE_PROFILE --role-name $ROLE_NAME -aws ec2 associate-iam-instance-profile \ - --instance-id \ - --iam-instance-profile Name=$INSTANCE_PROFILE +┌── operator-managed host ────────────────────────────────┐ +│ reverse proxy (nginx + Let's Encrypt) │ +│ :80 ACME challenge + 301 → :443 │ +│ :443 ssl + proxy_pass to broker │ +│ │ │ +│ ▼ │ +│ agentkeys-broker-server 127.0.0.1:8091 │ +│ (BROKER_BACKEND_URL=http://127.0.0.1:8090) │ +│ │ +│ agentkeys-mock-server (or Heima-backed successor) │ +│ 127.0.0.1:8090 │ +│ │ +│ /var/lib/agentkeys/.agentkeys/broker/audit.sqlite │ +│ /var/lib/agentkeys/.agentkeys/broker/oidc-keypair.json │ +└─────────────────────────────────────────────────────────┘ ``` -Verify from the host: `aws sts get-caller-identity` should print the assumed role ARN. +The broker binds to `127.0.0.1:8091`. Only the local reverse proxy reaches it. **Never** bind the broker to `0.0.0.0` without TLS in front — bearer tokens and minted credentials would traverse the network in cleartext (the broker logs a warning on startup if you do). -#### 3b. Named profile in `~/.aws/credentials` (non-EC2 hosts) - -Hosts outside AWS (DigitalOcean, Hetzner, etc.) can't use IMDS. Drop the operator user's profile into `~/.aws/credentials` for the `agentkeys` system user: - -```bash -sudo install -d -m 0700 -o agentkeys -g agentkeys /var/lib/agentkeys/.aws -sudo -u agentkeys tee /var/lib/agentkeys/.aws/credentials >/dev/null <<'EOF' -[agentkeys-daemon] -aws_access_key_id = AKIA... -aws_secret_access_key = ... -EOF -sudo chmod 600 /var/lib/agentkeys/.aws/credentials - -sudo -u agentkeys tee /var/lib/agentkeys/.aws/config >/dev/null <<'EOF' -[profile agentkeys-daemon] -region = us-east-1 -EOF -sudo chmod 600 /var/lib/agentkeys/.aws/config -``` +### Backend caveats -The systemd unit below sets `Environment=HOME=/var/lib/agentkeys` so the SDK finds these files; the unit also sets `AWS_PROFILE=agentkeys-daemon` so it picks the right profile. +`agentkeys-mock-server` keeps state in-memory: -#### 3c. Legacy static-keys env file (only if 3a/3b are not options) +- **State is lost on restart.** Sessions, identity links, audit rows vanish. Fine for dev; for a backend that other developers' daemons depend on, supervise it (systemd `Restart=on-failure`) and have developers re-`init` after restarts. +- **No HA.** Single process, single node. +- **No TLS at the listener.** Always co-locate behind the broker's loopback or front it with the same reverse proxy. -```bash -sudo install -d -m 0700 /etc/agentkeys -sudo tee /etc/agentkeys/broker.env >/dev/null <<'EOF' -DAEMON_ACCESS_KEY_ID=AKIA... -DAEMON_SECRET_ACCESS_KEY=... -EOF -sudo chmod 600 /etc/agentkeys/broker.env -``` - -Only the systemd unit's `EnvironmentFile=` references this; nothing else on the host should read it. - -#### Non-secret config (all three paths) - -These values are not secrets and live in the systemd unit directly (Step 4): - -``` -ACCOUNT_ID=429071895007 -REGION=us-east-1 -BROKER_BACKEND_URL=http://127.0.0.1:8090 -BROKER_OIDC_ISSUER=https://broker.litentry.org -``` +For a production-grade backend, hold the deployment until Heima session management lands — Stage 7 is not gated on this; the broker's interface is identical regardless of which backend implements `/session/{create,validate}`. -`BROKER_OIDC_ISSUER` **must** match the public URL the reverse proxy serves — AWS rejects `create-open-id-connect-provider` if the registered URL doesn't equal the `iss` claim emitted by the broker. - -### Step 4 — systemd units - -```ini -# /etc/systemd/system/agentkeys-backend.service -[Unit] -Description=AgentKeys mock backend (session management) -After=network-online.target -Wants=network-online.target - -[Service] -Type=simple -ExecStart=/usr/local/bin/agentkeys-mock-server --port 8090 -Restart=on-failure -RestartSec=5s -User=agentkeys -Group=agentkeys -# Listens on all interfaces; only the local broker should reach it. -# Use a host firewall (ufw / nftables) to drop :8090 from anywhere -# but 127.0.0.1 + the broker's IP. -NoNewPrivileges=true -ProtectSystem=strict -ProtectHome=true -PrivateTmp=true - -[Install] -WantedBy=multi-user.target -``` +### Deployment -```ini -# /etc/systemd/system/agentkeys-broker.service -[Unit] -Description=AgentKeys broker (Stage 7) -After=network-online.target agentkeys-backend.service -Wants=network-online.target -Requires=agentkeys-backend.service - -[Service] -Type=simple -# Non-secret config goes inline; AWS credentials come from the SDK's -# default chain (IMDS for 3a, ~/.aws/* for 3b, EnvironmentFile for 3c). -Environment=HOME=/var/lib/agentkeys -Environment=ACCOUNT_ID=429071895007 -Environment=REGION=us-east-1 -Environment=BROKER_BACKEND_URL=http://127.0.0.1:8090 -Environment=BROKER_OIDC_ISSUER=https://broker.litentry.org -# Uncomment ONE of the next two lines depending on the credential path: -# 3a (EC2 instance profile): nothing — IMDS handles it. -# 3b (named profile): -#Environment=AWS_PROFILE=agentkeys-daemon -# 3c (legacy static keys): -#EnvironmentFile=/etc/agentkeys/broker.env -ExecStart=/usr/local/bin/agentkeys-broker-server --port 8091 --bind 127.0.0.1 -Restart=on-failure -RestartSec=5s -User=agentkeys -Group=agentkeys -# Persist audit + keypair (and ~/.aws if 3b) under /var/lib/agentkeys — -# operator must pre-create this dir mode 0700, owned by the agentkeys user. -NoNewPrivileges=true -ProtectSystem=strict -ProtectHome=true -ReadWritePaths=/var/lib/agentkeys -PrivateTmp=true - -[Install] -WantedBy=multi-user.target -``` +The fully manual long-form walk-through (host provisioning, build, systemd units, nginx, certbot) is bundled into [`scripts/setup-broker-host.sh`](../scripts/setup-broker-host.sh): ```bash -sudo useradd --system --home /var/lib/agentkeys --shell /usr/sbin/nologin agentkeys -sudo install -d -m 0700 -o agentkeys -g agentkeys /var/lib/agentkeys -sudo systemctl daemon-reload -sudo systemctl enable --now agentkeys-backend agentkeys-broker -sudo systemctl status agentkeys-backend agentkeys-broker +# On the host, as agentkey-broker (or any sudoer): +git clone https://github.com/litentry/agentKeys.git && cd agentKeys +sudo bash scripts/setup-broker-host.sh +# Interactive walk-through: +# • prompts for issuer URL (must be https://, no trailing slash) +# • prompts for credential mode (instance-profile / profile / static) +# • writes systemd units + HTTP-only nginx config +# • prints the certbot command to run next +# After certbot succeeds, re-run the script to flip on the :443 ssl block. ``` -The broker binds to `127.0.0.1:8091` so only the local reverse proxy can reach it. **Never** bind the broker to `0.0.0.0` without TLS — bearer tokens and minted credentials would traverse the network in cleartext (the broker logs a warning on startup if you do, see [`crates/agentkeys-broker-server/src/main.rs::warn_if_non_loopback_without_tls`](../crates/agentkeys-broker-server/src/main.rs)). - -### Step 5 — Reverse proxy + TLS - -Minimal nginx site for `broker.litentry.org`: - -```nginx -# /etc/nginx/sites-available/agentkeys-broker -server { - listen 80; - server_name broker.litentry.org; - location /.well-known/acme-challenge/ { root /var/www/certbot; } - location / { return 301 https://$host$request_uri; } -} - -server { - listen 443 ssl http2; - server_name broker.litentry.org; - - ssl_certificate /etc/letsencrypt/live/broker.litentry.org/fullchain.pem; - ssl_certificate_key /etc/letsencrypt/live/broker.litentry.org/privkey.pem; - ssl_protocols TLSv1.2 TLSv1.3; - - # AWS IAM only fetches the well-known + JWKS during create-open-id-connect-provider; - # the rest of the broker is bearer-gated. Keep the proxy thin: no auth, - # no caching of /v1/*, just TLS termination. - location / { - proxy_pass http://127.0.0.1:8091; - proxy_http_version 1.1; - proxy_set_header Host $host; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header X-Forwarded-For $remote_addr; - proxy_read_timeout 30s; - } -} -``` +The script is idempotent. Re-run after any operator-side change (cred-mode swap, issuer-URL fix, cert renewal). What's still manual: -```bash -sudo ln -s /etc/nginx/sites-available/agentkeys-broker /etc/nginx/sites-enabled/ -sudo certbot --nginx -d broker.litentry.org --agree-tos -m ops@litentry.org -sudo nginx -t && sudo systemctl reload nginx -``` +- **Cloud-side IAM, SES, S3, OIDC federation** → [`cloud-setup.md`](./cloud-setup.md). +- **DNS A record + EIP** → [`cloud-setup.md` §5](./cloud-setup.md#5-ec2-broker-host-optional). +- **Initial cert issuance** → `sudo certbot certonly --webroot -w /var/www/certbot -d ` (the `--nginx` plugin chickens-and-eggs on the empty cert path; webroot doesn't). -### Step 6 — Smoke test from a client machine +### Smoke test (after deployment) -From a laptop that has nothing AWS-shaped configured: +From any machine with no AWS-shaped configuration: ```bash -curl -sf https://broker.litentry.org/healthz # → "ok" +# 1. Discovery + JWKS reachable +curl -sf https://broker.litentry.org/healthz # → "ok" curl -sf https://broker.litentry.org/.well-known/openid-configuration | \ - jq '.issuer == "https://broker.litentry.org"' # → true + jq -e '.issuer == "https://broker.litentry.org"' # → true curl -sf https://broker.litentry.org/.well-known/jwks.json | jq '.keys[0].kid' -# End-to-end JWT mint (use a session bearer the operator has provisioned) -SESSION= +# 2. Mint a session bearer against the backend. +# The backend is NOT public — SSH-tunnel to its loopback: +# ssh -i ~/.ssh/agentkey-broker.pem -L 8090:127.0.0.1:8090 \ +# agentkey-broker@ +# then in another terminal on your laptop: +SESSION=$(curl -sf -X POST http://127.0.0.1:8090/session/create \ + -H 'content-type: application/json' \ + -d '{"auth_token":"smoke"}' | jq -r .session) + +# 3. End-to-end JWT mint curl -sf -X POST https://broker.litentry.org/v1/mint-oidc-jwt \ -H "Authorization: Bearer $SESSION" | jq '.expiration' -``` - -If the discovery `issuer` field doesn't equal the URL you're hitting, your `BROKER_OIDC_ISSUER` env var disagrees with the reverse-proxy `server_name` — fix this before running the AWS federation step or `create-open-id-connect-provider` will reject every JWT. - -### Step 7 — Wire AWS federation - -Once the smoke test above passes, follow [§"Cloud federation deployment"](#cloud-federation-deployment) below to register the OIDC provider with AWS IAM and verify the cloud-enforced isolation property. - -### Operations: rotate, observe, harden - -- **Rotate the daemon AWS key.** See [`operator-runbook.md` §5](./operator-runbook.md). The broker picks up the new key on the next `systemctl restart agentkeys-broker`; in-flight requests drain per `BROKER_SHUTDOWN_GRACE_SECONDS`. -- **Watch the audit log.** `sqlite3 /var/lib/agentkeys/.agentkeys/broker/audit.sqlite` per [`operator-runbook.md` §6](./operator-runbook.md). Anomalous mint spikes or `auth_failed` clusters are your earliest signal. -- **Watch the Let's Encrypt cert.** Certbot's renewal timer ships with the package; verify with `sudo systemctl list-timers | grep certbot`. AWS doesn't pin the cert, but `aws iam create-open-id-connect-provider` does record a thumbprint at registration time — if you swap the issuer to a different CA later, AWS will need the thumbprint refreshed. -- **Don't enable broker `:8091` ingress.** The host firewall must drop `:8091` from anywhere except `127.0.0.1`. The reverse proxy is the only legitimate caller. -## Cloud federation deployment - -This section is the **operational runbook** for taking the (already-shipped) Phase 2 broker and making AWS (or GCP / Ali Cloud) trust its JWTs without operator-side IAM-user keys. It's not a Stage-7 architecture step — Phase 2 ships complete with the local SQLite audit destination above. Each cloud provider's IAM service has its own registration step, and that step needs the broker reachable over public TLS. That's what this section walks through. - -### What's actually needed - -- The broker (or a `/.well-known/*` reverse proxy in front of it) reachable at `$BROKER_OIDC_ISSUER` over public TLS, so AWS IAM can fetch the JWKS during `create-open-id-connect-provider`. Operator picks: nginx + Let's Encrypt, AWS ALB + ACM, Caddy with auto-TLS, CloudFront + S3 for static `/.well-known/*` + Lambda for sign, etc. -- Stage 6 AWS setup complete per [`docs/stage6-aws-setup.md`](./stage6-aws-setup.md) (the daemon-IAM-user trust path established there is the fallback while the federated path is being rolled out). -- A higher-assurance signer if the operator's threat model requires it (TEE-derived ES256 at `oidc/issuer/v1`, blocked on [`heima-gaps §3`](./spec/heima-gaps-vs-desired-architecture.md)). The on-disk keypair shipped today is a complete v0.1 signer; TEE is a hardening swap, not a federation prerequisite. When ready, swap by replacing [`crates/agentkeys-broker-server/src/oidc.rs::OidcKeypair::load_or_generate`](../crates/agentkeys-broker-server/src/oidc.rs) with a TEE oracle call. JWKS, JWT shape, STS exchange, and bucket-policy enforcement all stay identical. - -### AWS recipe - -#### Prereqs - -- Phase 1 broker running publicly (so its `/.well-known/openid-configuration` is fetchable over public TLS). -- `export OIDC_ISSUER="$BROKER_OIDC_ISSUER"` — the exact `BROKER_OIDC_ISSUER` you started the broker with. -- Verify the discovery doc's `iss` claim matches **byte-for-byte** (must be `https://…`, no trailing slash, no scheme-less hostname). AWS rejects the `AssumeRoleWithWebIdentity` call later if these disagree: - ```bash - curl -sf "$OIDC_ISSUER/.well-known/openid-configuration" | jq -e ".issuer == \"$OIDC_ISSUER\"" - # → true - ``` - If this prints `false`, fix the broker's `BROKER_OIDC_ISSUER` env var on the host before continuing — see [Operator runbook §"Fix scheme-less issuer URL"](./operator-runbook.md) or sed the systemd unit: - ```bash - sudo sed -i \ - "s|^Environment=BROKER_OIDC_ISSUER=.*|Environment=BROKER_OIDC_ISSUER=$OIDC_ISSUER|" \ - /etc/systemd/system/agentkeys-broker.service - sudo systemctl daemon-reload && sudo systemctl restart agentkeys-broker - ``` - -#### 0. Check for stale provider state - -Before registering, confirm there isn't a previous registration with a wrong URL still on the account (a common artifact of fixing the issuer mid-bring-up): - -```bash -aws iam list-open-id-connect-providers -``` - -- Empty list (`"OpenIDConnectProviderList": []`) → fresh slate, proceed to step 1. -- A provider whose ARN ends in your current `OIDC_ISSUER` host → already registered, skip step 1, proceed to step 2 (verify with `aws iam get-open-id-connect-provider --open-id-connect-provider-arn ` that the URL matches). -- A provider whose ARN ends in a **different** host (or a stale variant of yours) → delete it before registering the correct one: - ```bash - aws iam delete-open-id-connect-provider \ - --open-id-connect-provider-arn arn:aws:iam::${ACCOUNT_ID}:oidc-provider/ - ``` - -#### 1. Register the OIDC provider in IAM - -```bash -aws iam create-open-id-connect-provider \ - --url "$OIDC_ISSUER" \ - --client-id-list sts.amazonaws.com \ - --thumbprint-list '' -export OIDC_PROVIDER_ARN="arn:aws:iam::${ACCOUNT_ID}:oidc-provider/$(echo $OIDC_ISSUER | sed 's|https://||')" - -# Verify it stuck and AWS could fetch the JWKS: -aws iam get-open-id-connect-provider \ - --open-id-connect-provider-arn "$OIDC_PROVIDER_ARN" \ - --query '{Url: Url, ClientIDList: ClientIDList, ThumbprintList: ThumbprintList}' +# 4. End-to-end AWS-creds mint (skip if the broker is in offline mode) +curl -sf -X POST https://broker.litentry.org/v1/mint-aws-creds \ + -H "Authorization: Bearer $SESSION" | jq '{access_key_id, expiration, wallet}' ``` -The IAM user running this needs `iam:CreateOpenIDConnectProvider` and `iam:GetOpenIDConnectProvider` (the standard `agentkeys-admin` IAM-admin scope covers both). AWS auto-derives the cert thumbprint from the Let's Encrypt chain at registration time — if certbot rotates the cert later, the thumbprint stays valid because LE uses the same intermediate CA. - -#### 2. Replace the role's trust policy with the federated variant - -Replaces [`stage6-aws-setup.md` §3b](./stage6-aws-setup.md) (static IAM user). Principal becomes the OIDC provider; the `sts:TagSession` + `aws:RequestTag/agentkeys_user_wallet` condition is what wires cloud-enforced per-user isolation in §3 below. - -```bash -OIDC_ISSUER_HOST="$(echo "$OIDC_ISSUER" | sed 's|https://||')" - -aws iam update-assume-role-policy \ - --role-name agentkeys-data-role \ - --policy-document "$(jq -n \ - --arg provider "$OIDC_PROVIDER_ARN" \ - --arg aud_key "${OIDC_ISSUER_HOST}:aud" \ - '{ - Version: "2012-10-17", - Statement: [{ - Effect: "Allow", - Principal: {Federated: $provider}, - Action: ["sts:AssumeRoleWithWebIdentity", "sts:TagSession"], - Condition: { - StringEquals: {($aud_key): "sts.amazonaws.com"}, - StringNotEquals: {"aws:RequestTag/agentkeys_user_wallet": ""} - } - }] - }')" -``` - -#### 3. Upgrade bucket policy to PrincipalTag-scoped - -Replaces the `AllowDaemonRead` statement in [`stage6-aws-setup.md` §4](./stage6-aws-setup.md). Cloud now enforces "the assumed session can only touch the prefix matching its PrincipalTag": - -```json -{ - "Sid": "AllowDaemonReadOwnPrefix", - "Effect": "Allow", - "Principal": {"AWS": "arn:aws:iam::${ACCOUNT_ID}:role/agentkeys-data-role"}, - "Action": ["s3:GetObject", "s3:ListBucket"], - "Resource": [ - "arn:aws:s3:::$BUCKET", - "arn:aws:s3:::$BUCKET/${aws:PrincipalTag/agentkeys_user_wallet}/*" - ], - "Condition": { - "StringEquals": {"s3:prefix": "${aws:PrincipalTag/agentkeys_user_wallet}/"} - } -} -``` - -#### 4. End-to-end proof - -The one test that proves phase 2 works: a JWT claiming wallet A can only touch wallet A's prefix — never B's. - -```bash -# Mint a JWT via the broker. Bearer must come from `POST /session/create` -# against the backend; the wallet inside the JWT is whatever wallet that -# session is bound to (so this recipe presumes the operator drove the same -# session-create flow phase 1 already documented). -SESSION= -JWT=$(curl -sf -X POST "$BROKER_URL/v1/mint-oidc-jwt" \ - -H "Authorization: Bearer $SESSION" | jq -r .jwt) -WALLET=$(jq -R 'split(".") | .[1] | @base64d | fromjson | .agentkeys_user_wallet' <<<"$JWT" -r) - -# Exchange for temp creds -CREDS=$(aws sts assume-role-with-web-identity \ - --role-arn "arn:aws:iam::${ACCOUNT_ID}:role/agentkeys-data-role" \ - --role-session-name "stage7-wip-$(date +%s)" \ - --web-identity-token "$JWT") -export AWS_ACCESS_KEY_ID=$(echo "$CREDS" | jq -r .Credentials.AccessKeyId) -export AWS_SECRET_ACCESS_KEY=$(echo "$CREDS" | jq -r .Credentials.SecretAccessKey) -export AWS_SESSION_TOKEN=$(echo "$CREDS" | jq -r .Credentials.SessionToken) - -# (a) own prefix — should succeed (empty is fine, no AccessDenied) -aws s3api list-objects-v2 --bucket "$BUCKET" --prefix "$WALLET/" - -# (b) someone else's prefix — THIS IS THE KEY MOMENT — should AccessDenied -aws s3api list-objects-v2 --bucket "$BUCKET" --prefix "0xdeadbeef/" -``` - -Test (b) is what Stage 6's static-IAM path can't prove. Cloud-enforced, zero app-side trust. The phase 1 broker's `assume-role` path **does** issue scoped creds, but isolation enforcement still relies on the operator's IAM trust policy alone — phase 2 moves enforcement into AWS itself. - -#### 5. Swap the on-disk keypair for a TEE-derived signer +If `.issuer` doesn't match the URL byte-for-byte, fix `BROKER_OIDC_ISSUER` on the host before [§4](./cloud-setup.md#4-oidc-federation-stage-7) — AWS rejects mismatches at `AssumeRoleWithWebIdentity` time. -When [`heima-gaps §3`](./spec/heima-gaps-vs-desired-architecture.md) closes, replace `crates/agentkeys-broker-server/src/oidc.rs::OidcKeypair::load_or_generate` with a call to the TEE's `derive("oidc/issuer/v1")`. JWKS, JWT shape, STS exchange, and bucket-policy enforcement all stay identical — only the signing backend changes. +## Operations -## Operational follow-ups (post Phase 2) +- **Start, supervise, rotate, audit** → [`operator-runbook.md`](./operator-runbook.md). +- **Cloud-account provisioning + OIDC federation** → [`cloud-setup.md`](./cloud-setup.md). +- **Don't expose `:8091` ingress.** Host firewall must drop `:8091` from anywhere except `127.0.0.1`. Nginx is the only legitimate caller. +- **Cert renewal.** Certbot's renewal timer ships with the package (`sudo systemctl list-timers | grep certbot`). AWS doesn't pin the cert; thumbprint persistence comes from the LE intermediate CA. -Phase 2 architecture is complete. The remaining items are deployment and hardening tasks, scoped per-operator: +## Operational follow-ups -- **Public TLS hosting** — terminate TLS at a reverse proxy in front of the Rust broker (nginx + Let's Encrypt, AWS ALB + ACM, Caddy, etc.), or absorb the issuer endpoints behind a CloudFront+ALB pair so `oidc.agentkeys.dev` (or chosen issuer URL) resolves to the broker's `/.well-known/*` surface. Required for AWS `create-open-id-connect-provider` registration. -- **TEE signer swap** — replace the on-disk ES256 keypair with a TEE-derived `oidc/issuer/v1` key when [`heima-gaps §3`](./spec/heima-gaps-vs-desired-architecture.md) closes. Hardening, not a Stage-7 prerequisite — see §"Cloud federation deployment" above. -- **Audit-destination swap** — point the audit log at a chain (Heima, Ethereum, Solana, permissioned) or a sealed log service per the [pluggable audit destination](spec/architecture.md#11-audit-destination-is-pluggable) framing. Configuration choice, not a Stage-7 redesign. -- **GCP / Ali Cloud federation recipes** — equivalent of the AWS §"Cloud federation deployment" recipe for GCP Workload Identity Federation and Ali Cloud RAM. The OIDC discovery + JWT shape work cross-cloud unchanged; only the IAM-side registration step differs. -- **Promote phase 1 + 2 doc** — once the live three-terminal demo passes for a non-operator developer (with no AWS env vars on their machine), promote [`docs/operator-runbook.md`](./operator-runbook.md) from WIP to canonical. -- **Stage 8 hand-off** — the bucket prefix `s3://agentkeys-vault//` is the reuse point with Stage 8; ciphertext + per-epoch DEK rotation live in [`stage8-wip.md`](./stage8-wip.md), not here. +- **GCP / Tencent federation recipes** — equivalent of [`cloud-setup.md` §4](./cloud-setup.md#4-oidc-federation-stage-7) for Workload Identity Federation and Tencent CAM. JWT/JWKS shape works cross-cloud unchanged; only the registration step differs. +- **TEE-derived signer** — replace [`crates/agentkeys-broker-server/src/oidc.rs::OidcKeypair::load_or_generate`](../crates/agentkeys-broker-server/src/oidc.rs) with a TEE oracle when [`heima-gaps §3`](./spec/heima-gaps-vs-desired-architecture.md) closes. JWKS, JWT shape, STS exchange, and bucket-policy enforcement stay identical. +- **Audit-destination swap** — point the audit log at a chain or sealed log per the [pluggable framing](spec/architecture.md#11-audit-destination-is-pluggable). Configuration choice, not a redesign. +- **Stage 8 hand-off** — `s3://agentkeys-vault//` is the reuse point with [`stage8-wip.md`](./stage8-wip.md); ciphertext + per-epoch DEK rotation live there, not here. diff --git a/docs/stage8-wip.md b/docs/stage8-wip.md index 9e0e52d..a78fc90 100644 --- a/docs/stage8-wip.md +++ b/docs/stage8-wip.md @@ -228,7 +228,7 @@ There are no users today, so no live data to migrate. The migration is doc-and-d - [`docs/spec/threat-model-key-custody.md`](./spec/threat-model-key-custody.md) — the architectural position this doc implements. - [`docs/stage7-wip.md`](./stage7-wip.md) — OIDC + PrincipalTag, the isolation primitive Stage 8 reuses. -- [`docs/stage6-aws-setup.md`](./stage6-aws-setup.md) — AWS infra for SES + S3 (singleton); the same AWS account hosts the vault bucket. +- [`docs/cloud-setup.md`](./cloud-setup.md) — AWS infra for SES + S3 (singleton); the same AWS account hosts the vault bucket. - [`docs/spec/heima-gaps-vs-desired-architecture.md`](./spec/heima-gaps-vs-desired-architecture.md) — needs new gap entry for `pallet-vault-pointers`. - [`docs/spec/credential-backend-interface.md`](./spec/credential-backend-interface.md) — `store_credential` / `read_credential` semantics translate cleanly; mapping table updated. - [`docs/spec/plans/development-stages.md`](./spec/plans/development-stages.md) — Stage 8 entry, post-renumber. diff --git a/scripts/setup-broker-host.sh b/scripts/setup-broker-host.sh index 37ea9bc..f54d92a 100755 --- a/scripts/setup-broker-host.sh +++ b/scripts/setup-broker-host.sh @@ -2,8 +2,9 @@ # AgentKeys broker-host bootstrap. # # Provisions a fresh Linux host into a running broker. Automates the manual -# steps in docs/stage7-wip.md "Remote deployment" §1-7. Idempotent — safe -# to re-run after partial failures. +# steps in docs/stage7-wip.md "Remote deployment". Idempotent — safe to +# re-run after partial failures. Cloud-account setup (IAM, SES, S3, OIDC +# federation) lives in docs/cloud-setup.md. # # Run with no flags on a TTY for an interactive walk-through that explains # each decision before it's made. Pass flags / --non-interactive for CI. @@ -178,10 +179,11 @@ if $INTERACTIVE; then ================================================================================ AgentKeys broker host bootstrap — interactive ================================================================================ -This script walks through the steps in docs/stage7-wip.md "Remote deployment" -on this host. It will install packages, create a system user, drop systemd -units, and (optionally) configure nginx + certbot. Re-runs are safe; existing -files won't be overwritten without your input. +This script walks through the host-side bootstrap from docs/stage7-wip.md +"Remote deployment". It will install packages, create a system user, drop +systemd units, and (optionally) configure nginx + certbot. Re-runs are safe; +existing files won't be overwritten without your input. Cloud-account setup +(IAM, SES, S3, OIDC federation) is separate — see docs/cloud-setup.md. You'll be asked about each optional step before it happens. Pass --help for the non-interactive flag set. @@ -698,8 +700,8 @@ cat < Date: Tue, 28 Apr 2026 15:32:08 +0800 Subject: [PATCH 14/15] docs(dev-setup): clarify devs and end users self-mint bearers identically MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The role table and §4.1 / §5.3 had been framing the developer's bearer as something the operator "hands out per-developer" while the end-user bearer was minted via `agentkeys init`. That implied an architectural split where there isn't one — the two roles use the same code path, the same backend endpoint, and the same OS-keychain storage. Reframe: • §3 role table: dev's "What you hold" matches end user's. Adds an inline callout: bearers are self-minted. Operator's job is making the backend reachable, not pre-issuing tokens. v0.1 vs v0.2+ table explains today's loopback friction vs tomorrow's chain RPC. • §4.1 "What you need from the operator": replace AGENTKEYS_BEARER_TOKEN-from-operator with AGENTKEYS_BACKEND_URL + a self-served `agentkeys init` snippet. Adds a "why isn't it public" callout that names the bearer's role as the per-user identity gate. • §5.3 retitled "How developers get bearer tokens" (was "Hand off bearer tokens to your developers"). Operator's job is *backend reachability*, not token distribution. • §6 end-user opener now explicitly notes the token is the same kind the developer holds in §4. No code changes; documentation-only. --- docs/dev-setup.md | 33 +++++++++++++++++++++++++-------- 1 file changed, 25 insertions(+), 8 deletions(-) diff --git a/docs/dev-setup.md b/docs/dev-setup.md index 0aef101..63716c4 100644 --- a/docs/dev-setup.md +++ b/docs/dev-setup.md @@ -80,9 +80,11 @@ AgentKeys has three roles. Each runs a different set of processes and holds a di | Role | What you run | What you hold | Read | |---|---|---|---| -| **App developer** — building an agent against AgentKeys | `agentkeys-daemon` + an agent process | A short-lived bearer token from the operator. **Zero AWS credentials.** | §4 | -| **App owner / operator** — running the broker for a team | `agentkeys-broker-server` (+ optionally the mock backend in dev) | Long-lived `agentkeys-daemon` AWS access key (persisted in `~/.zshenv` or supervisor-managed env). The broker's own master session. | §5 | -| **End user** — using a credential-brokered agent | `agentkeys` CLI | A 30-day master session token in OS keychain. | §6 | +| **App developer** — building an agent against AgentKeys | `agentkeys-daemon` + an agent process | A 30-day session token in OS keychain (minted by `agentkeys init` against the backend, same mechanism end users use). **Zero AWS credentials.** | §4 | +| **App owner / operator** — running the broker for a team | `agentkeys-broker-server` (+ optionally the mock backend in dev) | Long-lived `agentkeys-daemon` AWS access key (named profile in `~/.aws/credentials` or EC2 instance profile). | §5 | +| **End user** — using a credential-brokered agent | `agentkeys` CLI | A 30-day session token in OS keychain (same as the developer's). | §6 | + +> **Bearer tokens are self-minted, not operator-issued.** Both developer and end user run `agentkeys init` against the **backend** (mock-server in v0.1, Heima chain in v0.2+) to mint their own session tokens. The two roles use *exactly the same code path*. The only practical difference today is that v0.1's mock backend listens on the broker host's loopback (not public), so anyone calling `agentkeys init` either runs on the broker host, SSH-tunnels to its `:8090`, or talks to a backend the operator chose to expose on a private network. Once the chain backend lands, both roles call `agentkeys init` against the public chain RPC — fully self-serve, no operator handoff for either. **Solo dev?** You'll wear all three hats. Read §5 first to stand up your own broker, then §4 to point a daemon at it, then §6 for the user-facing CLI. @@ -93,9 +95,18 @@ You're building an agent that needs OpenAI / OpenRouter / X / etc. credentials b ### 4.1 What you need from the operator - `AGENTKEYS_BROKER_URL` — e.g. `http://broker.local:8091` or `https://broker.litentry.org`. -- `AGENTKEYS_BEARER_TOKEN` — short-lived; the operator hands these out per-developer. +- `AGENTKEYS_BACKEND_URL` — where you mint your session token from. In v0.1 this is the operator's mock backend; in v0.2+ it's the chain RPC. + +That's it. No AWS keys, no `aws sts assume-role`, no `stage6-demo-env.sh` sourcing. You **mint your own bearer** via `agentkeys init` against the backend URL — the operator never hand-delivers tokens. + +```bash +agentkeys --backend "$AGENTKEYS_BACKEND_URL" init +# → session token minted by the backend, stored in OS keychain +``` + +The bearer is what the broker uses to identify which wallet you're acting as (cred-mint requests are scoped to that wallet). The broker validates it against the backend's `/session/validate` on every mint — drop the bearer, and the broker has no way to scope credentials per-user. -That's it. No AWS keys, no `aws sts assume-role`, no `stage6-demo-env.sh` sourcing. +> **Why isn't the bearer just public?** Because it's the per-user identity gate. Anyone with the bearer can act as your wallet against AgentKeys. Treat it like an SSH key: store in your OS keychain (which `agentkeys init` does automatically), don't paste into chat / git / Slack. ### 4.2 Run the daemon against the broker @@ -176,9 +187,15 @@ The broker: For runbook detail (start / supervise / rotate / monitor / migrate to hosted), see [`docs/operator-runbook.md`](./operator-runbook.md). For the automated remote-host bootstrap, see [`scripts/setup-broker-host.sh`](../scripts/setup-broker-host.sh). -### 5.3 Hand off bearer tokens to your developers +### 5.3 How developers get bearer tokens + +Developers self-mint by running `agentkeys init` against the backend — the operator does **not** hand-deliver tokens. The operator's job is to make the backend reachable to the people who should have access: + +| v0.1 (mock-server) | v0.2+ (Heima chain) | +|---|---| +| Backend listens on `127.0.0.1:8090` of the broker host. Choices for giving developers access: (a) SSH-tunnel `:8090` to the dev's machine; (b) expose `:8090` on a private network / VPN; (c) put it behind a separate auth-gating reverse proxy. The mock accepts any `auth_token` string with no real auth, so don't expose it publicly. | Backend = chain. Public RPC. Developers call `agentkeys init` directly with their wallet signature; identity gating is enforced by the chain. No operator-side reachability work. | -For v0.1 each developer gets a session token by running `agentkeys init` against your mock backend (or the real chain backend). The token they receive is what they paste into `AGENTKEYS_BEARER_TOKEN` per §4.1. Token TTL is 30 days per [`wiki/session-token.md`](../wiki/session-token.md). +Token TTL is 30 days per [`wiki/session-token.md`](../wiki/session-token.md). Same code path, same token shape, same storage location for both developers and end users — the only thing that varies is how each one reaches the backend. ### 5.4 Solo-dev mock-backend loop @@ -203,7 +220,7 @@ Then in a fourth terminal you wear the **app-developer** hat (§4): point a daem ## 6. End user -You're using an agent that's been provisioned via AgentKeys. Your only commitment is a 30-day session token that lives in your OS keychain. Your agent's daemon goes through someone else's broker — you don't run any AWS yourself. +You're using an agent that's been provisioned via AgentKeys. Your only commitment is a 30-day session token in your OS keychain — the **same kind of token a developer holds** (§4), minted by the same `agentkeys init` command against the same backend. Your agent's daemon goes through someone else's broker; you don't run any AWS yourself. ```bash BIN=$(pwd)/target/release/agentkeys From 0b58b456a6d4225a1cc58cac17c29d0ab2c91a5f Mon Sep 17 00:00:00 2001 From: wildmeta-agent Date: Tue, 28 Apr 2026 15:46:29 +0800 Subject: [PATCH 15/15] =?UTF-8?q?docs(dev-setup):=20rewrite=20=C2=A74=20ar?= =?UTF-8?q?ound=20three=20dev=20contexts?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The previous §4 framed the app developer as a passive consumer of broker URLs the operator handed out. That misses the actual workflow: devs are hacking on daemon code, MCP integrations, and provisioner scrapers — they need to iterate on the binary AND test against real SES/S3, neither of which the old text addressed clearly. Rewrite around three contexts that match what a dev is actually doing right now: • Context A — pure-local code loop (mock-server + broker on the laptop, --skip-startup-check, stub creds). Use for control-plane iteration: session create, JWT mint, audit-row writes. • Context B — local daemon, hosted email pipeline. Daemon runs locally; broker, backend, and AWS account belong to the operator. PrincipalTag scopes the dev's wallet to its own S3 prefix even though the AWS account is shared. This is the realistic loop — full SES → S3 → key-extract works because the operator's email infra is real. The local-only attempt at the email pipeline is explicitly called out as not-feasible (SES wants real DNS + MX + receipt rule). • Context C — operator runtime. Code-identical to B; differs only in being unattended. Also calls out the permanent vs transitional pieces: broker mints creds, backend issues sessions — both forever. Mock-server is the v0.1 stand-in for Heima chain RPC; goes away when chain lands. Also includes the operator-runbook.md §1.1 callout from the prior turn (session bearers — how callers get them) that didn't get committed. --- docs/dev-setup.md | 94 ++++++++++++++++++++++++++++------------ docs/operator-runbook.md | 26 +++++++++++ 2 files changed, 92 insertions(+), 28 deletions(-) diff --git a/docs/dev-setup.md b/docs/dev-setup.md index 63716c4..9dc78bb 100644 --- a/docs/dev-setup.md +++ b/docs/dev-setup.md @@ -90,58 +90,96 @@ AgentKeys has three roles. Each runs a different set of processes and holds a di ## 4. App developer -You're building an agent that needs OpenAI / OpenRouter / X / etc. credentials brokered through AgentKeys. You do **not** run AWS. You do **not** hold long-lived credentials. You run a daemon and point it at a broker your operator already provisioned. +You're building or integrating an agent against AgentKeys. You're hacking on daemon code, scraper code, MCP integrations — not running AWS, not holding long-lived credentials. The system you talk to is **always**: a broker (auth + cred-mint) + a backend (sessions). You pick which instance of each you talk to depending on what you're testing. -### 4.1 What you need from the operator +### 4.1 What stays the same forever -- `AGENTKEYS_BROKER_URL` — e.g. `http://broker.local:8091` or `https://broker.litentry.org`. -- `AGENTKEYS_BACKEND_URL` — where you mint your session token from. In v0.1 this is the operator's mock backend; in v0.2+ it's the chain RPC. +Two things are permanent regardless of stage: -That's it. No AWS keys, no `aws sts assume-role`, no `stage6-demo-env.sh` sourcing. You **mint your own bearer** via `agentkeys init` against the backend URL — the operator never hand-delivers tokens. +- **The broker mints credentials, the backend issues sessions.** You always run `agentkeys init` against a backend to get a session bearer; you always present that bearer to a broker to get scoped AWS creds. Both endpoints are open-source binaries (`agentkeys-broker-server`, `agentkeys-mock-server`). +- **You never hold long-lived AWS keys.** The broker holds them; you hold a 30-day session bearer in your OS keychain. The bearer is what the broker uses to identify which wallet your daemon is acting as. -```bash -agentkeys --backend "$AGENTKEYS_BACKEND_URL" init -# → session token minted by the backend, stored in OS keychain -``` +What changes between stages: the **backend** is the mock-server today; it'll be the Heima chain RPC in v0.2+. The broker stays. When Heima ships, the mock-server goes away (devs and users mint sessions by signing with their wallet against the chain). + +### 4.2 Three dev contexts + +Pick the one that matches what you're testing right now. They differ in *what's local vs hosted*, not in the daemon binary or its API. -The bearer is what the broker uses to identify which wallet you're acting as (cred-mint requests are scoped to that wallet). The broker validates it against the backend's `/session/validate` on every mint — drop the bearer, and the broker has no way to scope credentials per-user. +| Context | Backend | Broker | AWS account | Email pipeline | Use when | +|---|---|---|---|---|---| +| **A. Pure-local code loop** | Local mock-server `127.0.0.1:8090` | Local broker `127.0.0.1:8091` | Stub creds (`--skip-startup-check`) | Stubbed — no real SES/S3 | Iterating on daemon/CLI/MCP code paths. Logic, error handling, JWT shape, audit-row writes. | +| **B. Local code, hosted email** | Operator's backend (via `https://broker.litentry.org/session/create` if proxied, or SSH-tunnel) | `https://broker.litentry.org` | Operator's AWS, scoped per-wallet by PrincipalTag | **Real** SES/S3 — full signup → verification email → key extract | Exercising the actual provider signup flow end-to-end. The realistic loop. | +| **C. Operator runtime** | Same as B | Same as B | Same as B | Same as B | When your daemon is unattended in CI / a sandbox / a server. Identical to B; just not hand-driven. | -> **Why isn't the bearer just public?** Because it's the per-user identity gate. Anyone with the bearer can act as your wallet against AgentKeys. Treat it like an SSH key: store in your OS keychain (which `agentkeys init` does automatically), don't paste into chat / git / Slack. +The **email pipeline can't be made fully local** — SES needs a verified DNS domain, real MX records, and a receipt rule that writes to an accessible S3 bucket. That's the operator's responsibility (see [`cloud-setup.md`](./cloud-setup.md)). For dev signups against real services like OpenRouter, you need context B. -### 4.2 Run the daemon against the broker +### 4.3 Context A — pure-local code loop + +Stand up everything on your laptop. Three terminals: ```bash -export AGENTKEYS_BROKER_URL=http://broker.local:8091 -export AGENTKEYS_BEARER_TOKEN= +# Terminal 1 — backend (mock) +cargo run --release -p agentkeys-mock-server -- --port 8090 -BIN=$(pwd)/target/release/agentkeys-daemon -$BIN --broker-url "$AGENTKEYS_BROKER_URL" --session "$AGENTKEYS_BEARER_TOKEN" --stdio +# Terminal 2 — broker (offline path; no AWS round-trip) +export BROKER_BACKEND_URL=http://127.0.0.1:8090 +export BROKER_OIDC_ISSUER=http://localhost:8091 # http for dev only +export ACCOUNT_ID=000000000000 # stub +cargo run --release -p agentkeys-broker-server -- --port 8091 --skip-startup-check + +# Terminal 3 — your daemon / CLI +export AGENTKEYS_BACKEND=http://127.0.0.1:8090 +export AGENTKEYS_BROKER_URL=http://127.0.0.1:8091 +agentkeys init --mock-token dev-loop +# → session minted, stored in keychain. Now use the daemon normally. ``` -When the daemon needs to access the operator's S3 vault (to read or store a credential), it calls the broker's `POST /v1/mint-aws-creds` with the bearer token. The broker exchanges it for a 1-hour scoped AWS session and hands it back — you never touch the long-lived daemon AWS key. +What works in A: the entire control plane — session create, JWT mint, AWS-cred mint (returns stub creds), audit-row write. What doesn't: any provisioner that needs a real provider signup (no email lands anywhere because there's no SES). Use unit tests + the offline E2E in [`stage7-wip.md` §"Operator end-to-end test"](./stage7-wip.md#operator-end-to-end-test) to exercise this path. -### 4.3 Provision a new service +### 4.4 Context B — local daemon, hosted email pipeline -The provisioner scripts run unchanged from your machine. With `--broker-url` set, the daemon (or the `agentkeys` CLI directly) calls the broker's `POST /v1/mint-aws-creds` right before spawning the scraper subprocess and injects 1-hour scoped `AWS_*` env vars into the child process. **You no longer need to source `scripts/stage6-demo-env.sh`** — that path is the legacy fallback for ops who run without a broker. +Your daemon runs locally; the broker, backend, and AWS account belong to the operator. This is the dev loop that actually exercises SES → S3. -```bash -$BIN --broker-url "$AGENTKEYS_BROKER_URL" --session "$AGENTKEYS_BEARER_TOKEN" \ - provision openrouter --identity bot-$(date +%s)@bots.example.dev -``` +What you need from the operator: + +- `AGENTKEYS_BACKEND` — usually `https://broker.litentry.org/session/create` (if the operator proxies it through nginx) or `http://127.0.0.1:8090` via an SSH tunnel they grant you. +- `AGENTKEYS_BROKER_URL` — `https://broker.litentry.org`. +- An email subdomain you can use, e.g. `bots.litentry.org`. -Or via the CLI: +Then: ```bash -agentkeys --broker-url "$AGENTKEYS_BROKER_URL" provision openrouter +# Mint a session — same code path as the end-user flow. +agentkeys --backend "$AGENTKEYS_BACKEND" init +# → session in keychain. + +# Run your daemon against the hosted broker. AWS creds for the SES/S3 +# pipeline are minted by the broker, scoped to your wallet via PrincipalTag. +export AGENTKEYS_BROKER_URL=https://broker.litentry.org +agentkeys --broker-url "$AGENTKEYS_BROKER_URL" provision openrouter \ + --identity bot-$(date +%s)@bots.litentry.org ``` -Success criteria: +The provisioner subprocess gets 1-hour scoped `AWS_*` env vars injected by the daemon — your code never sees a long-lived key, the operator's audit log records the mint, and (with [federation](./cloud-setup.md#4-oidc-federation-stage-7) wired) the cloud rejects any cross-prefix S3 access at the IAM layer. **Drop `scripts/stage6-demo-env.sh`** — that's the legacy no-broker fallback. + +If you want to keep editing daemon code while you test, skip the `cargo install` and run the daemon binary straight from `target/release/agentkeys-daemon` after each `cargo build --release -p agentkeys-daemon`. + +### 4.5 Context C — operator runtime -1. The scraper exits 0 with a key on stdout. -2. `agentkeys read openrouter` returns that same key. +Identical to Context B from your code's point of view. Difference is operational: the daemon runs unattended (a systemd unit, a Kubernetes deployment, a CI job, etc.), and the bearer is provisioned out-of-band into whatever secret store that runtime uses (`LoadCredential=` for systemd, a Kubernetes Secret, GitHub Actions secret, etc.). The bearer's 30-day TTL gives you a comfortable rotation window. + +### 4.6 Provisioning a new service — success criteria + +Whichever context you're in: + +1. The scraper subprocess exits 0 with the captured API key on stdout. +2. `agentkeys read ` returns the same key. +3. (Context B/C only) The audit DB on the broker host has a fresh row with `outcome=ok` and your wallet in `requester_wallet`. If the scraper fails, see §8 troubleshooting. +> **Why the bearer isn't just public.** It's the per-user identity gate — anyone with your bearer can act as your wallet against the broker. Treat it like an SSH private key: store in OS keychain (which `agentkeys init` does automatically), never paste into chat / git / Slack. + ## 5. App owner / operator You operate the AgentKeys infrastructure for a team. You hold the long-lived `agentkeys-daemon` AWS key. You run the broker server. Other developers point their daemons at your broker. diff --git a/docs/operator-runbook.md b/docs/operator-runbook.md index ffefa96..03cf0f5 100644 --- a/docs/operator-runbook.md +++ b/docs/operator-runbook.md @@ -29,6 +29,32 @@ For v0.1: run on a host you trust, rotate the daemon key on a schedule (§3), wa | Stage 7 design + acceptance test | [`stage7-wip.md`](./stage7-wip.md) | | Three-role mental model (operator vs developer vs end-user) | [`dev-setup.md`](./dev-setup.md) | +### 1.1 Session bearers — how callers get them + +Both **developers** building agents and **end users** running them mint their own session bearers via `agentkeys init` against the **backend**. The two roles use exactly the same code path; from the broker's point of view they're indistinguishable. The bearer goes into the OS keychain on the caller's machine; the operator never hand-delivers tokens. + +The friction in v0.1 is not the bearer — it's *reaching the backend's `/session/create`*: + +| Backend exposure | Who can `agentkeys init` | When to use | +|---|---|---| +| **Loopback only** (default in `setup-broker-host.sh`) | Operator only, plus anyone the operator gives SSH access to (forward `-L 8090:127.0.0.1:8090`) | Conservative default. Pick this until you have either real backend auth or a trust boundary you're comfortable with. | +| **Public via nginx proxy** — add a `location /session/` block on `broker.litentry.org` that proxies to `http://127.0.0.1:8090` | Anyone with the URL | Acceptable for an internal team you already trust, **not** for an open service. The mock-server's `/session/create` accepts any `auth_token` string with zero validation; making it public means "anyone can mint a session against any wallet." | +| **Heima chain RPC** (v0.2+) | Anyone with a wallet (chain validates the signature) | Production. Fully self-serve; no operator handoff for either role. | + +Today, an operator standing up `broker.litentry.org` for a small trusted team can either: + +1. **Keep the backend loopback-only** and hand each developer either an SSH key (so they tunnel to `:8090` and self-mint) or a one-shot bearer minted on their behalf (`curl 127.0.0.1:8090/session/create` from the host). +2. **Proxy `/session/create` through nginx** by adding to `/etc/nginx/sites-available/agentkeys-broker`: + ```nginx + location = /session/create { + proxy_pass http://127.0.0.1:8090; + proxy_set_header Host $host; + } + ``` + Then `agentkeys init --backend https://broker.litentry.org` works for everyone — same flow as the end-user `agentkeys init` path. Do **not** also proxy `/session/validate` — only the broker on loopback should call that. + +Once Heima's chain backend lands, this knob disappears: the chain's RPC is public by construction, identity is gated by wallet signature, and `agentkeys init` is fully self-serve for both roles. + --- ## 2. AWS credentials