diff --git a/content/Agents/creating-agents.mdx b/content/Agents/creating-agents.mdx
index c6ad7334..3534af31 100644
--- a/content/Agents/creating-agents.mdx
+++ b/content/Agents/creating-agents.mdx
@@ -197,7 +197,15 @@ All implement [StandardSchema](https://github.com/standard-schema/standard-schem
### Type Inference
-TypeScript automatically infers types from your schemas:
+TypeScript automatically infers types from your schemas. Don't add explicit type annotations to handler parameters:
+
+```typescript
+// Good: types inferred from schema
+handler: async (ctx, input) => { ... }
+
+// Bad: explicit types can cause issues
+handler: async (ctx: AgentContext, input: MyInput) => { ... }
+```
```typescript
const agent = createAgent('Search', {
diff --git a/content/Agents/evaluations.mdx b/content/Agents/evaluations.mdx
index fd0c1804..e8c6267b 100644
--- a/content/Agents/evaluations.mdx
+++ b/content/Agents/evaluations.mdx
@@ -5,6 +5,21 @@ description: Automatically test and validate agent outputs for quality and compl
Evaluations (evals) are automated tests that run after your agent completes. They validate output quality, check compliance, and monitor performance without blocking agent responses.
+## Why Evals?
+
+Most evaluation tools test the LLM: did the model respond appropriately? That's fine for chatbots, but agents aren't single LLM calls. They're entire runs with multiple model calls, tool executions, and orchestration working together.
+
+Agent failures can happen anywhere in the run—a tool call that returned bad data, a state bug that corrupted context, and more. Testing just the LLM response misses most of this.
+
+Agentuity evals test the whole run—every tool call, state change, and orchestration step. They run on every session in production, so you catch issues with real traffic.
+
+**The result:**
+
+- **Full-run evaluation**: Test the entire agent execution, not just LLM responses
+- **Production monitoring**: Once configured, evals run automatically on every session
+- **Async by default**: Evals don't block responses, so users aren't waiting
+- **Preset library**: Common checks (PII, safety, hallucination) available out of the box
+
Evals come in two types: **binary** (pass/fail) for yes/no criteria, and **score** (0-1) for quality gradients.
diff --git a/content/Agents/standalone-execution.mdx b/content/Agents/standalone-execution.mdx
index c402cb4b..5076fca6 100644
--- a/content/Agents/standalone-execution.mdx
+++ b/content/Agents/standalone-execution.mdx
@@ -16,10 +16,20 @@ import { createAgentContext } from '@agentuity/runtime';
import chatAgent from '@agent/chat';
const ctx = createAgentContext();
-const result = await ctx.invoke(() => chatAgent.run({ message: 'Hello' }));
+const result = await ctx.run(chatAgent, { message: 'Hello' });
```
-The `invoke()` method executes your agent with full infrastructure support: tracing, session management, and access to all storage services.
+The `run()` method executes your agent with full infrastructure support: tracing, session management, and access to all storage services.
+
+For agents that don't require input:
+
+```typescript
+const result = await ctx.run(statusAgent);
+```
+
+
+The older `ctx.invoke(() => agent.run(input))` pattern still works but `ctx.run(agent, input)` is preferred for its cleaner syntax.
+
## Options
@@ -45,10 +55,7 @@ await createApp();
// Run cleanup every hour
cron.schedule('0 * * * *', async () => {
const ctx = createAgentContext({ trigger: 'cron' });
-
- await ctx.invoke(async () => {
- await cleanupAgent.run({ task: 'expired-sessions' });
- });
+ await ctx.run(cleanupAgent, { task: 'expired-sessions' });
});
```
@@ -58,25 +65,23 @@ For most scheduled tasks, use the [`cron()` middleware](/Routes/cron) instead. I
## Multiple Agents in Sequence
-Run multiple agents within a single `invoke()` call to share the same session and tracing context:
+Run multiple agents in sequence with the same context:
```typescript
const ctx = createAgentContext();
-const result = await ctx.invoke(async () => {
- // First agent analyzes the input
- const analysis = await analyzeAgent.run({ text: userInput });
-
- // Second agent generates response based on analysis
- const response = await respondAgent.run({
- analysis: analysis.summary,
- sentiment: analysis.sentiment,
- });
+// First agent analyzes the input
+const analysis = await ctx.run(analyzeAgent, { text: userInput });
- return response;
+// Second agent generates response based on analysis
+const response = await ctx.run(respondAgent, {
+ analysis: analysis.summary,
+ sentiment: analysis.sentiment,
});
```
+Each `ctx.run()` call shares the same session and tracing context.
+
## Reusing Contexts
Create a context once and reuse it for multiple invocations:
@@ -84,9 +89,9 @@ Create a context once and reuse it for multiple invocations:
```typescript
const ctx = createAgentContext({ trigger: 'websocket' });
-// Each invoke() gets its own session and tracing span
+// Each run() gets its own session and tracing span
websocket.on('message', async (data) => {
- const result = await ctx.invoke(() => messageAgent.run(data));
+ const result = await ctx.run(messageAgent, data);
websocket.send(result);
});
```
@@ -104,6 +109,28 @@ Standalone contexts provide the same infrastructure as HTTP request handlers:
- **Session events**: Start/complete events for observability
+## Detecting Runtime Context
+
+Use `isInsideAgentRuntime()` to check if code is running within the Agentuity runtime:
+
+```typescript
+import { isInsideAgentRuntime, createAgentContext } from '@agentuity/runtime';
+import myAgent from '@agent/my-agent';
+
+async function processRequest(data: unknown) {
+ if (isInsideAgentRuntime()) {
+ // Already in runtime context, call agent directly
+ return myAgent.run(data);
+ }
+
+ // Outside runtime, create context first
+ const ctx = createAgentContext();
+ return ctx.run(myAgent, data);
+}
+```
+
+This is useful for writing utility functions that work both inside agent handlers and in standalone scripts.
+
## Next Steps
- [Calling Other Agents](/Agents/calling-other-agents): Agent-to-agent communication patterns
diff --git a/content/Agents/streaming-responses.mdx b/content/Agents/streaming-responses.mdx
index 9edc0b70..6c041339 100644
--- a/content/Agents/streaming-responses.mdx
+++ b/content/Agents/streaming-responses.mdx
@@ -50,6 +50,10 @@ Streaming requires both: `schema.stream: true` in your agent (so the handler ret
Enable streaming by setting `stream: true` in your schema and returning a `textStream`:
+
+The `textStream` from AI SDK's `streamText()` works directly with Agentuity's streaming middleware. Return it from your handler without additional processing.
+
+
```typescript
import { createAgent } from '@agentuity/runtime';
import { streamText } from 'ai';
diff --git a/content/Agents/workbench.mdx b/content/Agents/workbench.mdx
index c45ce29c..f0e1e01c 100644
--- a/content/Agents/workbench.mdx
+++ b/content/Agents/workbench.mdx
@@ -5,6 +5,19 @@ description: Use the built-in development UI to test agents, validate schemas, a
Workbench is a built-in UI for testing your agents during development. It automatically discovers your agents, displays their input/output schemas, and lets you execute them with real inputs.
+## Why Workbench?
+
+Testing agents isn't like testing traditional APIs. You need to validate input schemas, see how responses format, test multi-turn conversations, and understand execution timing. Using `curl` or Postman means manually constructing JSON payloads and parsing responses.
+
+Workbench understands your agents. It reads your schemas, generates test forms, maintains conversation threads, and shows execution metrics. When something goes wrong, you see exactly what the agent received and returned.
+
+**Key capabilities:**
+
+- **Schema-aware testing**: Input forms generated from your actual schemas
+- **Thread persistence**: Test multi-turn conversations without manual state tracking
+- **Execution metrics**: See token usage and response times for every request
+- **Quick iteration**: Test prompts display in the UI for one-click execution
+
## Enabling Workbench
Add a `workbench` section to your `agentuity.config.ts`:
diff --git a/content/Learn/Cookbook/Patterns/server-utilities.mdx b/content/Learn/Cookbook/Patterns/server-utilities.mdx
index 90480c73..983f3577 100644
--- a/content/Learn/Cookbook/Patterns/server-utilities.mdx
+++ b/content/Learn/Cookbook/Patterns/server-utilities.mdx
@@ -1,6 +1,6 @@
---
title: SDK Utilities for External Apps
-description: Use storage, logging, error handling, and schema utilities from external backends like Next.js or Express
+description: Use storage, queues, logging, and error handling utilities from external backends like Next.js or Express
---
Use `@agentuity/server` and `@agentuity/core` utilities in external apps, scripts, or backends that integrate with Agentuity.
@@ -122,6 +122,146 @@ export async function GET(request: NextRequest) {
}
```
+## Queue Management
+
+Manage queues programmatically from external apps or scripts using `APIClient`:
+
+```typescript title="lib/agentuity-queues.ts"
+import { APIClient, createLogger, getServiceUrls } from '@agentuity/server';
+
+export const logger = createLogger('info');
+const urls = getServiceUrls(process.env.AGENTUITY_REGION!);
+
+export const client = new APIClient(
+ urls.catalyst,
+ logger,
+ process.env.AGENTUITY_SDK_KEY
+);
+```
+
+### Creating and Managing Queues
+
+```typescript
+import {
+ createQueue,
+ listQueues,
+ deleteQueue,
+ pauseQueue,
+ resumeQueue,
+} from '@agentuity/server';
+import { client } from '@/lib/agentuity-queues';
+
+// Create a worker queue
+const queue = await createQueue(client, {
+ name: 'order-processing',
+ queue_type: 'worker',
+ settings: {
+ default_max_retries: 5,
+ default_visibility_timeout_seconds: 60,
+ },
+});
+
+// List all queues
+const { queues } = await listQueues(client);
+
+// Pause and resume
+await pauseQueue(client, 'order-processing');
+await resumeQueue(client, 'order-processing');
+
+// Delete a queue
+await deleteQueue(client, 'old-queue');
+```
+
+### Dead Letter Queue Operations
+
+```typescript
+import {
+ listDeadLetterMessages,
+ replayDeadLetterMessage,
+ purgeDeadLetter,
+} from '@agentuity/server';
+import { client, logger } from '@/lib/agentuity-queues';
+
+// List failed messages
+const { messages } = await listDeadLetterMessages(client, 'order-processing');
+
+for (const msg of messages) {
+ logger.warn('Failed message', { id: msg.id, reason: msg.failure_reason });
+
+ // Replay back to the queue
+ await replayDeadLetterMessage(client, 'order-processing', msg.id);
+}
+
+// Purge all DLQ messages
+await purgeDeadLetter(client, 'order-processing');
+```
+
+### Webhook Destinations
+
+```typescript
+import { createDestination } from '@agentuity/server';
+import { client } from '@/lib/agentuity-queues';
+
+await createDestination(client, 'order-processing', {
+ destination_type: 'http',
+ config: {
+ url: 'https://api.example.com/webhook/orders',
+ method: 'POST',
+ headers: { 'X-API-Key': 'secret' },
+ timeout_ms: 30000,
+ retry_policy: {
+ max_attempts: 5,
+ initial_backoff_ms: 1000,
+ max_backoff_ms: 60000,
+ backoff_multiplier: 2.0,
+ },
+ },
+});
+```
+
+### HTTP Ingestion Sources
+
+```typescript
+import { createSource } from '@agentuity/server';
+import { client, logger } from '@/lib/agentuity-queues';
+
+const source = await createSource(client, 'webhook-queue', {
+ name: 'stripe-webhooks',
+ description: 'Receives Stripe payment events',
+ auth_type: 'header',
+ auth_value: 'Bearer whsec_...',
+});
+
+// External services POST to this URL
+logger.info('Source created', { url: source.url });
+```
+
+### Pull-Based Consumption
+
+For workers that pull and acknowledge messages:
+
+```typescript
+import { receiveMessage, ackMessage, nackMessage } from '@agentuity/server';
+import { client } from '@/lib/agentuity-queues';
+
+// Receive a message (blocks until available or timeout)
+const message = await receiveMessage(client, 'order-processing');
+
+if (message) {
+ try {
+ await processOrder(message.payload);
+ await ackMessage(client, 'order-processing', message.id);
+ } catch (error) {
+ // Message returns to queue for retry
+ await nackMessage(client, 'order-processing', message.id);
+ }
+}
+```
+
+
+For one-off queue management, use the CLI instead: `agentuity cloud queue create`, `agentuity cloud queue dlq`, etc. See [Queues](/Services/queues) for CLI commands.
+
+
## Alternative: HTTP Routes
If you want to centralize storage logic in your Agentuity project (for [middleware](/Routes/middleware), sharing across multiple apps, or avoiding SDK key distribution), use [HTTP routes](/Routes/http) instead.
@@ -182,7 +322,8 @@ export default router;
Add authentication middleware to protect storage endpoints:
```typescript title="src/api/sessions/route.ts"
-import { createRouter, createMiddleware } from '@agentuity/runtime';
+import { createRouter } from '@agentuity/runtime';
+import { createMiddleware } from 'hono/factory';
const router = createRouter();
@@ -330,6 +471,7 @@ const jsonSchema = toJSONSchema(schema);
## See Also
+- [Queues](/Services/queues): Queue concepts and CLI commands
- [HTTP Routes](/Routes/http): Route creation with `createRouter`
- [Route Middleware](/Routes/middleware): Authentication patterns
- [RPC Client](/Frontend/rpc-client): Typed client generation
diff --git a/content/Reference/CLI/configuration.mdx b/content/Reference/CLI/configuration.mdx
index 812c850b..198d4efd 100644
--- a/content/Reference/CLI/configuration.mdx
+++ b/content/Reference/CLI/configuration.mdx
@@ -118,6 +118,58 @@ agentuity cloud secret import .env.secrets
**In agents:** Access secrets via `process.env.API_KEY`. Secrets are injected at runtime and never logged.
+## Organization-Level Configuration
+
+Set environment variables and secrets at the organization level to share them across all projects in that organization. Use the `--org` flag with any `env` or `secret` command.
+
+### Set Org-Level Variables
+
+```bash
+# Set using your default org
+agentuity cloud env set DATABASE_URL "postgresql://..." --org
+
+# Set for a specific org
+agentuity cloud env set DATABASE_URL "postgresql://..." --org org_abc123
+```
+
+### Set Org-Level Secrets
+
+```bash
+# Set shared secret for default org
+agentuity cloud secret set SHARED_API_KEY "sk_..." --org
+
+# Set for specific org
+agentuity cloud secret set SHARED_API_KEY "sk_..." --org org_abc123
+```
+
+### List Org-Level Values
+
+```bash
+# List org environment variables
+agentuity cloud env list --org
+
+# List org secrets
+agentuity cloud secret list --org
+```
+
+### Get/Delete Org-Level Values
+
+```bash
+# Get an org variable
+agentuity cloud env get DATABASE_URL --org
+
+# Delete an org secret
+agentuity cloud secret delete OLD_KEY --org
+```
+
+
+Organization-level values are inherited by all projects in that organization. Project-level values take precedence over organization-level values when both are set.
+
+
+
+Set a default organization with `agentuity auth org select` to avoid specifying `--org` on every command. See [Getting Started](/Reference/CLI/getting-started) for details.
+
+
## API Keys
Create and manage API keys for programmatic access to your project.
diff --git a/content/Reference/CLI/deployment.mdx b/content/Reference/CLI/deployment.mdx
index 2ae513da..2c0da001 100644
--- a/content/Reference/CLI/deployment.mdx
+++ b/content/Reference/CLI/deployment.mdx
@@ -5,6 +5,19 @@ description: Deploy your agents to Agentuity Cloud with automatic infrastructure
Deploy your Agentuity project to the cloud with a single command. The platform handles infrastructure, scaling, and monitoring automatically.
+## Why Deploy to Agentuity?
+
+Deploying agents *should be* as easy as deploying a web app. But agents need long-running processes, persistent storage, sandbox environments, and observability built in. Setting this up yourself means stitching together containers, databases, secret management, and monitoring.
+
+Agentuity handles all of this automatically. You push code, and the platform provisions everything your agents need: compute, storage, networking, and observability. No infrastructure configuration, no Docker files, no Kubernetes.
+
+**What this gives you:**
+
+- **Single-command deployment**: `agentuity deploy` handles build, upload, and provisioning
+- **Automatic HTTPS**: Every deployment gets secure URLs with SSL certificates
+- **Built-in rollback**: Revert to any previous deployment instantly
+- **Zero infrastructure management**: No containers, orchestration, or scaling to configure
+
## Deploy Your Project
```bash
@@ -124,16 +137,19 @@ agentuity cloud deployment show dep_abc123xyz --project-id=proj_abc123
- Active status
- Tags and custom domains
- Cloud region
+- DNS records (for custom domain configuration)
- Git metadata (repo, commit, branch, PR)
- Build information (SDK version, runtime, platform)
```
-ID: dep_abc123xyz
-Project: proj_456def
-State: completed
-Active: Yes
-Created: 12/1/25, 3:45 PM
-Tags: staging, hotfix-123
+ID: dep_abc123xyz
+Project: proj_456def
+State: completed
+Active: Yes
+Created: 12/1/25, 3:45 PM
+Tags: staging, hotfix-123
+Domains: api.example.com
+DNS Records: api.example.com CNAME p1234567890.agentuity.cloud
Git Information
Repo: myorg/myapp
@@ -148,6 +164,10 @@ Build Information
Arch: x64
```
+
+When custom domains are configured, the `show` command displays the required DNS records. Use this to verify your CNAME configuration.
+
+
## Viewing Logs
Fetch logs for a deployment:
diff --git a/content/Reference/CLI/development.mdx b/content/Reference/CLI/development.mdx
index c5f0b8e8..7b40e933 100644
--- a/content/Reference/CLI/development.mdx
+++ b/content/Reference/CLI/development.mdx
@@ -150,7 +150,18 @@ agentuity dev
agentuity dev --public
```
-The `--public` flag creates a secure tunnel through Agentuity's edge network, giving your local server a public HTTPS URL for testing webhooks, sharing with teammates, or external integrations.
+### Why Public URLs?
+
+Testing webhooks and external integrations during local development is painful. You either deploy constantly, configure port forwarding, or pay for third-party tunneling services. Each option adds friction to the development loop.
+
+Agentuity's Gravity network handles this automatically. When you run `agentuity dev`, your local server gets a public HTTPS URL instantly. No configuration, no separate tools, no accounts to manage. External services can reach your local agents as if they were already deployed.
+
+**This means:**
+
+- **Instant HTTPS URLs**: Automatic certificate generation
+- **Zero setup**: Works out of the box, no firewall rules or port forwarding
+- **Secure tunneling**: Encrypted connections through Agentuity's edge network
+- **Automatic reconnection**: Handles network interruptions gracefully
**Example output:**
```
diff --git a/content/Reference/CLI/getting-started.mdx b/content/Reference/CLI/getting-started.mdx
index ed49eb60..83b5fa2c 100644
--- a/content/Reference/CLI/getting-started.mdx
+++ b/content/Reference/CLI/getting-started.mdx
@@ -92,6 +92,38 @@ agentuity auth logout
agentuity logout
```
+## Preferences
+
+Set default organization and region to avoid specifying them on every command.
+
+### Organization Preferences
+
+```bash
+# Set default organization
+agentuity auth org select org_abc123
+
+# Show current default organization
+agentuity auth org current
+
+# Clear default organization
+agentuity auth org unselect
+```
+
+When a default organization is set, commands that support `--org` will use it automatically.
+
+### Region Preferences
+
+```bash
+# Set default region for deployments
+agentuity cloud region select us-west-1
+
+# Show current default region
+agentuity cloud region current
+
+# Clear default region
+agentuity cloud region unselect
+```
+
## Creating Projects
### Create a New Project
@@ -130,12 +162,17 @@ The agent uses the OpenAI SDK routed through Agentuity's AI Gateway. This means
### Project Creation Options
-- `--name `: Project name
-- `--dir `: Directory to create the project in
-- `--template `: Template to use (optional, defaults to "default")
-- `--no-install`: Skip dependency installation
-- `--no-build`: Skip initial build
-- `--no-register`: Don't register the project with Agentuity Cloud
+| Option | Description |
+|--------|-------------|
+| `--name ` | Project name |
+| `--dir ` | Directory to create the project in |
+| `--template ` | Template to use (optional, defaults to "default") |
+| `--no-install` | Skip dependency installation |
+| `--no-build` | Skip initial build |
+| `--no-register` | Don't register the project with Agentuity Cloud |
+| `--database ` | Database option: `skip`, `new`, or existing database name |
+| `--storage ` | Storage option: `skip`, `new`, or existing bucket name |
+| `--enable-auth` | Enable Agentuity Auth for the project |
Example:
@@ -146,6 +183,35 @@ agentuity project create \
--no-build
```
+### Headless Project Creation
+
+Create projects non-interactively for CI/CD pipelines and automation:
+
+```bash
+# Create with new database and storage
+agentuity project create \
+ --name my-agent \
+ --database new \
+ --storage new \
+ --enable-auth
+
+# Skip optional resources
+agentuity project create \
+ --name my-agent \
+ --database skip \
+ --storage skip
+
+# Use existing resources
+agentuity project create \
+ --name my-agent \
+ --database my-existing-db \
+ --storage my-bucket
+```
+
+
+The `--database` and `--storage` options require authentication. Run `agentuity login` first or the command will fail.
+
+
## Managing Projects
### List Projects
@@ -194,6 +260,27 @@ agentuity project delete --confirm
Project deletion is permanent and cannot be undone. Deployed agents and associated resources will be removed.
+### Import a Project
+
+Register an existing local Agentuity project with the cloud:
+
+```bash
+# Import current directory
+agentuity project import
+
+# Import from specific directory
+agentuity project import --dir ./my-existing-project
+
+# Validate without importing
+agentuity project import --validate-only
+```
+
+The import command verifies that the directory contains a valid Agentuity project (requires `@agentuity/runtime` dependency and either `agentuity.config.ts` or an `agentuity/` directory).
+
+
+When you run `agentuity dev` or `agentuity deploy` on an unregistered project while authenticated, the CLI will prompt you to import it automatically.
+
+
## Command Shortcuts
Several common commands have shortcuts that let you skip the subcommand prefix:
diff --git a/content/Reference/CLI/storage.mdx b/content/Reference/CLI/storage.mdx
index a19c6764..76b64e84 100644
--- a/content/Reference/CLI/storage.mdx
+++ b/content/Reference/CLI/storage.mdx
@@ -13,6 +13,10 @@ All storage commands require the `cloud` prefix. For example: `agentuity cloud k
Inspect and manage key-value data organized into namespaces.
+
+KV storage is scoped to your organization, not individual projects. You can run KV commands from any directory without needing a project context.
+
+
### Interactive REPL
Start an interactive session for faster exploration:
diff --git a/content/Routes/sse.mdx b/content/Routes/sse.mdx
index 2841c1a7..8f78943a 100644
--- a/content/Routes/sse.mdx
+++ b/content/Routes/sse.mdx
@@ -229,6 +229,35 @@ Or with cURL:
curl -N https://your-project.agentuity.cloud/agent-name
```
+## Streaming LLM Responses
+
+Use SSE to stream AI SDK responses to clients:
+
+```typescript
+import { createRouter, sse } from '@agentuity/runtime';
+import { streamText } from 'ai';
+import { anthropic } from '@ai-sdk/anthropic';
+
+const router = createRouter();
+
+router.post('/chat', sse(async (c, stream) => {
+ const { message } = await c.req.json();
+
+ const result = streamText({
+ model: anthropic('claude-sonnet-4-5'),
+ prompt: message,
+ });
+
+ for await (const chunk of result.textStream) {
+ await stream.write(chunk);
+ }
+
+ stream.close();
+}));
+
+export default router;
+```
+
## SSE vs WebSocket
| Aspect | SSE | WebSocket |
diff --git a/content/Services/Observability/sessions-debugging.mdx b/content/Services/Observability/sessions-debugging.mdx
index 97006031..7d118ec1 100644
--- a/content/Services/Observability/sessions-debugging.mdx
+++ b/content/Services/Observability/sessions-debugging.mdx
@@ -5,6 +5,21 @@ description: Debug agents using session IDs, CLI commands, and trace timelines
Every request to your agents gets a unique session ID (`sess_...`). Sessions link logs, traces, and state, making them essential for debugging.
+## Why Sessions?
+
+Traditional observability tracks individual HTTP requests. That works for stateless web servers. Agents go beyond single requests—a conversation might span dozens of LLM calls, tool executions, and orchestration steps across multiple interactions.
+
+Without sessions, debugging becomes guesswork. You see individual logs but can't connect them. Was that error from the same conversation? What happened before it failed? Which user was affected?
+
+Agentuity tracks all of this automatically. Every run ties to a session. Every conversation thread is preserved. You get full context into what happened, when, and why—*without* writing tracking code.
+
+**In practice:**
+
+- **Unified tracing**: All logs, spans, and state from a single request are linked by session ID
+- **Conversation context**: Sessions group into threads, so you can trace multi-turn conversations
+- **Automatic correlation**: No manual tracking code, every call in a session is connected
+- **Session inspection**: Review what happened in a session to reproduce issues
+
## Sessions vs Threads
| Scope | Lifetime | ID Prefix | Use For |
diff --git a/content/Services/Sandbox/index.mdx b/content/Services/Sandbox/index.mdx
index 8b6845d3..0c2bd377 100644
--- a/content/Services/Sandbox/index.mdx
+++ b/content/Services/Sandbox/index.mdx
@@ -5,6 +5,21 @@ description: Run code in isolated, secure containers with configurable resources
Execute code in isolated Linux containers with configurable resource limits, network controls, and execution timeouts.
+## Why Sandboxes?
+
+Agents that reason about code need somewhere safe to execute it. Whether generating Python scripts, validating builds, or running user-provided code, you can't let arbitrary execution happen on your infrastructure.
+
+The pattern keeps repeating: spin up a secure environment, run code, tear it down. Without proper isolation, a single bad script could access sensitive data, exhaust resources, or compromise your systems.
+
+Agentuity sandboxes handle this automatically. Every execution runs in an isolated container with its own filesystem, configurable resource limits, and network controls. When execution completes, the environment is destroyed.
+
+**What this gives you:**
+
+- **Security by default**: Network disabled, filesystem isolated, resource limits enforced
+- **No infrastructure management**: Containers spin up and tear down automatically
+- **Multi-language support**: Your agents are TypeScript, but sandboxes can run Python, Node.js, shell scripts, or anything available via `apt install`
+- **Consistent environments**: Use snapshots to get the same setup every time, with dependencies pre-installed
+
## Three Ways to Use Sandboxes
| Method | Best For |
@@ -62,6 +77,21 @@ Pre-configured AI coding assistants:
Run `agentuity cloud sandbox runtime list` to see all available runtimes, or view them in the [Agentuity App](https://app-v1.agentuity.com) under **Services > Sandbox > Runtimes**.
+### Runtime Metadata
+
+Each runtime includes metadata for identification and resource planning:
+
+| Field | Description |
+|-------|-------------|
+| `description` | What the runtime provides |
+| `icon` | URL to runtime icon |
+| `brandColor` | Hex color for UI display |
+| `documentationUrl` | Link to runtime documentation |
+| `tags` | Categories like `language`, `ai-agent` |
+| `requirements` | Minimum memory, CPU, disk, and network needs |
+
+View runtime details with `agentuity cloud sandbox runtime list --json`.
+
## Snapshots
A snapshot captures the filesystem state of a sandbox. You create new sandboxes *from* a snapshot rather than running it directly.
@@ -143,6 +173,8 @@ The API is identical in both contexts.
| `resources.cpu` | CPU limit in millicores | `'500m'`, `'1000m'` |
| `resources.disk` | Disk space limit | `'1Gi'` |
| `network.enabled` | Allow outbound network | `true` (default: `false`) |
+| `network.port` | Port to expose to internet (1024-65535) | `3000` |
+| `projectId` | Associate sandbox with a project | `'proj_abc123'` |
| `timeout.idle` | Idle timeout before cleanup | `'10m'`, `'1h'` |
| `timeout.execution` | Max execution time per command | `'5m'`, `'30s'` |
| `dependencies` | Apt packages to install | `['python3', 'git']` |
diff --git a/content/Services/Sandbox/sdk-usage.mdx b/content/Services/Sandbox/sdk-usage.mdx
index c0d2c313..20889d2f 100644
--- a/content/Services/Sandbox/sdk-usage.mdx
+++ b/content/Services/Sandbox/sdk-usage.mdx
@@ -121,6 +121,49 @@ await sandbox.writeFiles([
]);
```
+### Exposing Ports
+
+Expose a port from the sandbox to make it accessible via a public URL:
+
+```typescript
+const sandbox = await ctx.sandbox.create({
+ network: {
+ enabled: true,
+ port: 3000, // Expose port 3000 (valid range: 1024-65535)
+ },
+ resources: { memory: '512Mi' },
+});
+
+// Start a web server inside the sandbox
+await sandbox.execute({ command: ['npm', 'run', 'serve'] });
+
+// Get the public URL
+const info = await ctx.sandbox.get(sandbox.id);
+if (info.url) {
+ ctx.logger.info('Server accessible at', { url: info.url, port: info.networkPort });
+}
+```
+
+
+Setting `network.port` requires `network.enabled: true`. The sandbox must have network access to expose ports.
+
+
+### Project Association
+
+Associate sandboxes with a project for organization and filtering:
+
+```typescript
+const sandbox = await ctx.sandbox.create({
+ projectId: 'proj_abc123', // Associate with project
+ resources: { memory: '512Mi' },
+});
+
+// List sandboxes by project
+const { sandboxes } = await ctx.sandbox.list({
+ projectId: 'proj_abc123',
+});
+```
+
### Reading Files
Read files from the sandbox as streams:
@@ -350,6 +393,8 @@ await ctx.sandbox.destroy('sbx_abc123');
| `resources.cpu` | `string` | CPU in millicores: `'500m'`, `'1000m'` |
| `resources.disk` | `string` | Disk limit: `'512Mi'`, `'2Gi'` |
| `network.enabled` | `boolean` | Enable outbound network (default: `false`) |
+| `network.port` | `number` | Port to expose to internet (1024-65535) |
+| `projectId` | `string` | Associate sandbox with a project |
| `timeout.idle` | `string` | Auto-destroy after idle: `'10m'`, `'1h'` |
| `timeout.execution` | `string` | Max command duration: `'30s'`, `'5m'` |
| `dependencies` | `string[]` | Apt packages: `['python3', 'git']` |
@@ -378,6 +423,9 @@ Returned by `sandbox.execute()`:
| `durationMs` | `number` | Execution duration in milliseconds |
| `stdoutStreamUrl` | `string` | URL to fetch stdout stream |
| `stderrStreamUrl` | `string` | URL to fetch stderr stream |
+| `cpuTimeMs` | `number` | CPU time consumed in milliseconds |
+| `memoryByteSec` | `number` | Memory usage in byte-seconds |
+| `networkEgressBytes` | `number` | Outbound network traffic in bytes |
### SandboxRunResult
@@ -388,6 +436,53 @@ Returned by `sandbox.execute()`:
| `durationMs` | `number` | Execution duration |
| `stdout` | `string` | Captured stdout (if available) |
| `stderr` | `string` | Captured stderr (if available) |
+| `cpuTimeMs` | `number` | CPU time consumed in milliseconds |
+| `memoryByteSec` | `number` | Memory usage in byte-seconds |
+| `networkEgressBytes` | `number` | Outbound network traffic in bytes |
+
+### SandboxInfo
+
+Returned by `ctx.sandbox.get()` and in list results:
+
+| Field | Type | Description |
+|-------|------|-------------|
+| `sandboxId` | `string` | Unique sandbox identifier |
+| `status` | `string` | `'idle'`, `'busy'`, `'terminated'` |
+| `createdAt` | `string` | ISO timestamp |
+| `snapshotId` | `string` | Source snapshot (if created from snapshot) |
+| `networkPort` | `number` | Port exposed from sandbox (if configured) |
+| `url` | `string` | Public URL (when port is configured) |
+| `user` | `SandboxUserInfo` | User who created the sandbox |
+| `agent` | `SandboxAgentInfo` | Agent that created the sandbox |
+| `project` | `SandboxProjectInfo` | Associated project |
+| `org` | `SandboxOrgInfo` | Organization (always present) |
+
+Access context information from sandbox info:
+
+```typescript
+const info = await ctx.sandbox.get('sbx_abc123');
+
+// Organization is always present
+ctx.logger.info('Organization', { id: info.org.id, name: info.org.name });
+
+// User info (when created by a user)
+if (info.user) {
+ ctx.logger.info('Created by', {
+ userId: info.user.id,
+ name: `${info.user.firstName} ${info.user.lastName}`,
+ });
+}
+
+// Agent info (when created by an agent)
+if (info.agent) {
+ ctx.logger.info('Agent', { id: info.agent.id, name: info.agent.name });
+}
+
+// Project info (when associated with a project)
+if (info.project) {
+ ctx.logger.info('Project', { id: info.project.id, name: info.project.name });
+}
+```
## Best Practices
diff --git a/content/Services/Storage/durable-streams.mdx b/content/Services/Storage/durable-streams.mdx
index 413836c4..29c8caf1 100644
--- a/content/Services/Storage/durable-streams.mdx
+++ b/content/Services/Storage/durable-streams.mdx
@@ -3,7 +3,22 @@ title: Durable Streams
description: Streaming storage for large exports, audit logs, and real-time data
---
-Durable streams provide streaming storage for large data exports, audit logs, and real-time processing. Streams follow a **write-once, read-many** pattern: once data is written and the stream is closed, the content is immutable and can be accessed via a permanent public URL.
+Durable streams provide streaming storage for large data exports, audit logs, and real-time processing. Streams follow a **write-once, read-many** pattern: once data is written and the stream is closed, the content is immutable and accessible via URL until deleted.
+
+## Why Durable Streams?
+
+WebSocket and SSE connections are straightforward to set up, but they're fragile in practice. Tabs get suspended, networks disconnect, pages get refreshed. When the connection drops, any in-flight data is lost—unless you build custom reconnection logic on top.
+
+This becomes a real problem when you're streaming LLM responses. Token streaming is often the primary UI for chat applications, and agentic apps stream tool outputs and progress events over long-running sessions. If someone refreshes mid-generation, you're faced with two bad options: re-run an expensive inference call, or lose the response entirely.
+
+Durable streams solve this by decoupling the data from the connection. Instead of streaming directly to the client, you write to persistent storage and return a URL. The stream continues writing in the background regardless of what happens on the client side.
+
+**What this gives you:**
+
+- **Refresh-safe**: If someone refreshes the page mid-stream, the URL still works and the content is preserved. The expensive work you already did isn't wasted.
+- **Background processing**: Return the stream URL immediately and write data asynchronously with `ctx.waitUntil()`. Your handler responds fast while the stream continues writing.
+- **Shareable URLs**: A stream is just a URL. You can share it, open it on another device, or have multiple viewers access the same output.
+- **Durable artifacts**: Once closed, streams are immutable and remain accessible via their public URL. Useful for audit logs, exports, and generated reports.
## When to Use Durable Streams
diff --git a/content/Services/Storage/key-value.mdx b/content/Services/Storage/key-value.mdx
index 577cad1f..33d4b68c 100644
--- a/content/Services/Storage/key-value.mdx
+++ b/content/Services/Storage/key-value.mdx
@@ -152,8 +152,15 @@ Create and delete namespaces programmatically:
```typescript
const agent = createAgent('NamespaceManager', {
handler: async (ctx, input) => {
- // Create a new namespace
- await ctx.kv.createNamespace('tenant-123');
+ // Create a namespace with default TTL for all keys
+ await ctx.kv.createNamespace('cache', {
+ defaultTTLSeconds: 3600, // all keys expire in 1 hour by default
+ });
+
+ // Create a namespace with no expiration
+ await ctx.kv.createNamespace('config', {
+ defaultTTLSeconds: 0, // keys never expire
+ });
// Delete a namespace (removes all keys)
await ctx.kv.deleteNamespace('old-cache');
@@ -163,13 +170,25 @@ const agent = createAgent('NamespaceManager', {
});
```
+**Default TTL options:**
+
+| Value | Behavior |
+|-------|----------|
+| `undefined` | Keys expire after 7 days (default) |
+| `0` | Keys never expire |
+| `60` - `7776000` | Custom TTL in seconds (1 minute to 90 days) |
+
+
+When a key is read with less than 50% of its TTL remaining, the expiration is automatically extended. This keeps frequently-accessed data alive without manual renewal.
+
+
`deleteNamespace()` permanently removes the namespace and all its keys. This operation cannot be undone.
## TTL Strategy
-Keys persist indefinitely by default. Use TTL for temporary data:
+Keys expire after 7 days by default unless a namespace-level or per-key TTL is set. Use TTL for temporary data:
| Data Type | Suggested TTL |
|-----------|---------------|
diff --git a/content/Services/Storage/vector.mdx b/content/Services/Storage/vector.mdx
index 97ebb43e..abb87374 100644
--- a/content/Services/Storage/vector.mdx
+++ b/content/Services/Storage/vector.mdx
@@ -143,6 +143,8 @@ const agent = createAgent('BatchRetriever', {
### exists() - Check Namespace
+Check if a namespace contains any vectors:
+
```typescript
const hasData = await ctx.vector.exists('knowledge-base');
if (!hasData) {
@@ -150,6 +152,10 @@ if (!hasData) {
}
```
+
+`exists()` returns `false` for namespaces that exist but contain no vectors. Use this to verify your knowledge base has been populated with data before searching.
+
+
## Deleting Vectors
```typescript
diff --git a/content/Services/queues.mdx b/content/Services/queues.mdx
new file mode 100644
index 00000000..20f6ba2d
--- /dev/null
+++ b/content/Services/queues.mdx
@@ -0,0 +1,229 @@
+---
+title: Queues
+description: Publish messages for async processing, webhooks, and event-driven workflows
+---
+
+Queues enable asynchronous message processing for background tasks, webhooks, and event-driven workflows. Publish messages from agents and consume them with workers or webhook destinations.
+
+## When to Use Queues
+
+| Pattern | Best For |
+|---------|----------|
+| **Queues** | Background jobs, webhooks, event-driven processing, decoupled services |
+| [Durable Streams](/Services/Storage/durable-streams) | Large exports, audit logs, streaming data |
+| [Key-Value](/Services/Storage/key-value) | Fast lookups, caching, configuration |
+
+**Use queues when you need to:**
+- Process tasks asynchronously (email sending, report generation)
+- Decouple services with reliable message delivery
+- Deliver webhooks to external endpoints
+- Handle bursty workloads with rate limiting
+- Retry failed operations with exponential backoff
+
+## Queue Types
+
+| Type | Behavior |
+|------|----------|
+| `worker` | Each message is processed by exactly one consumer. Use for background jobs. |
+| `pubsub` | Messages are broadcast to all subscribers. Use for event notifications. |
+
+## Publishing Messages
+
+Publish messages from agents using `ctx.queue.publish()`:
+
+```typescript
+import { createAgent } from '@agentuity/runtime';
+
+const agent = createAgent('OrderProcessor', {
+ handler: async (ctx, input) => {
+ // Queue an email to be sent asynchronously
+ const result = await ctx.queue.publish('email-queue', {
+ to: input.customerEmail,
+ subject: 'Order Confirmed',
+ orderId: input.orderId,
+ });
+
+ ctx.logger.info('Email queued', { messageId: result.id });
+
+ return { success: true, messageId: result.id };
+ },
+});
+```
+
+### Publish Options
+
+```typescript
+const agent = createAgent('TaskScheduler', {
+ handler: async (ctx, input) => {
+ await ctx.queue.publish('task-queue', input.task, {
+ // Attach metadata for filtering or routing
+ metadata: { priority: 'high', region: 'us-west' },
+
+ // Guarantee ordering for messages with the same key
+ partitionKey: input.customerId,
+
+ // Prevent duplicate messages
+ idempotencyKey: `task-${input.taskId}-v1`,
+
+ // Auto-expire after 1 hour
+ ttl: 3600,
+ });
+
+ return { queued: true };
+ },
+});
+```
+
+| Option | Description |
+|--------|-------------|
+| `metadata` | Key-value pairs for routing or filtering |
+| `partitionKey` | Messages with the same key are processed in order |
+| `idempotencyKey` | Prevents duplicate messages if the same key is published again |
+| `ttl` | Time-to-live in seconds before the message expires |
+
+### Publish Result
+
+```typescript
+interface QueuePublishResult {
+ id: string; // Unique message ID (msg_...)
+ offset: number; // Sequential position in the queue
+ publishedAt: string; // ISO 8601 timestamp
+}
+```
+
+## Error Handling
+
+```typescript
+import { QueueNotFoundError, QueueValidationError } from '@agentuity/core';
+
+const agent = createAgent('SafePublisher', {
+ handler: async (ctx, input) => {
+ try {
+ await ctx.queue.publish('notifications', input.notification);
+ return { success: true };
+ } catch (error) {
+ if (error instanceof QueueNotFoundError) {
+ ctx.logger.error('Queue does not exist', { queue: 'notifications' });
+ return { success: false, error: 'Queue not found' };
+ }
+ if (error instanceof QueueValidationError) {
+ ctx.logger.error('Invalid message', { field: error.field });
+ return { success: false, error: 'Validation failed' };
+ }
+ throw error;
+ }
+ },
+});
+```
+
+## Queue Management
+
+Create and manage queues using the CLI or `@agentuity/server` package.
+
+### CLI Commands
+
+```bash
+# Create a worker queue
+agentuity cloud queue create --name order-processing --type worker
+
+# Create a pubsub queue for broadcasting
+agentuity cloud queue create --name events --type pubsub
+
+# List all queues
+agentuity cloud queue list
+
+# Get queue details and stats
+agentuity cloud queue get order-processing
+
+# Pause/resume processing
+agentuity cloud queue pause order-processing
+agentuity cloud queue resume order-processing
+
+# Delete a queue
+agentuity cloud queue delete order-processing
+```
+
+For programmatic queue management, see [SDK Utilities for External Apps](/Learn/Cookbook/Patterns/server-utilities#queue-management).
+
+## Consuming Messages
+
+### Webhook Destinations
+
+Configure webhook destinations to automatically deliver messages to HTTP endpoints. Set these up in the [App](https://app-v1.agentuity.com) or [programmatically](/Learn/Cookbook/Patterns/server-utilities#webhook-destinations).
+
+Webhook destinations support:
+- Custom headers and authentication
+- Configurable timeouts (up to 30 seconds)
+- Retry policies with exponential backoff
+
+### Pull-Based Consumption
+
+For workers that pull and process messages, see [Pull-Based Consumption](/Learn/Cookbook/Patterns/server-utilities#pull-based-consumption). This pattern is useful for long-running workers that need fine-grained control over message processing.
+
+## Dead Letter Queue
+
+Messages that exceed the retry limit are moved to the dead letter queue (DLQ). Inspect and replay failed messages:
+
+```bash
+# List failed messages
+agentuity cloud queue dlq order-processing
+
+# Replay a message back to the queue
+agentuity cloud queue dlq replay order-processing --message-id msg_abc123
+
+# Purge all DLQ messages
+agentuity cloud queue dlq purge order-processing
+```
+
+For programmatic DLQ access, see [Dead Letter Queue Operations](/Learn/Cookbook/Patterns/server-utilities#dead-letter-queue-operations).
+
+## HTTP Ingestion Sources
+
+Create public HTTP endpoints to ingest data into queues from external services. Configure these in the [App](https://app-v1.agentuity.com) or [programmatically](/Learn/Cookbook/Patterns/server-utilities#http-ingestion-sources).
+
+| Auth Type | Description |
+|-----------|-------------|
+| `none` | No authentication |
+| `basic` | HTTP Basic Auth (`username:password`) |
+| `header` | Custom header value (`Bearer token`) |
+
+## Queue Settings
+
+Configure queue behavior when creating or updating:
+
+| Setting | Default | Description |
+|---------|---------|-------------|
+| `default_ttl_seconds` | null | Message expiration (null = never) |
+| `default_visibility_timeout_seconds` | 30 | Processing timeout before message returns to queue |
+| `default_max_retries` | 5 | Attempts before moving to DLQ |
+| `default_retry_backoff_ms` | 1000 | Initial retry delay |
+| `default_retry_max_backoff_ms` | 60000 | Maximum retry delay |
+| `default_retry_multiplier` | 2.0 | Exponential backoff multiplier |
+| `max_in_flight_per_client` | 10 | Concurrent messages per consumer |
+| `retention_seconds` | 2592000 | How long to keep acknowledged messages (30 days) |
+
+## Validation Limits
+
+| Limit | Value |
+|-------|-------|
+| Queue name length | 1-256 characters |
+| Queue name format | Lowercase letters, digits, underscores, hyphens. Must start with letter or underscore. |
+| Payload size | 1 MB max |
+| Partition key length | 256 characters max |
+| Idempotency key length | 256 characters max |
+| Batch size | 1000 messages max |
+
+## Best Practices
+
+- **Use idempotency keys** for operations that shouldn't be duplicated (payments, emails)
+- **Set appropriate TTLs** for time-sensitive messages
+- **Use partition keys** when message ordering matters within a group
+- **Monitor DLQ** regularly to catch and fix processing failures
+- **Configure webhook retry policies** to handle transient failures gracefully
+
+## Next Steps
+
+- [Durable Streams](/Services/Storage/durable-streams): Streaming storage for large exports
+- [Key-Value Storage](/Services/Storage/key-value): Fast caching and configuration
+- [Background Tasks](/Learn/Cookbook/Patterns/background-tasks): Patterns for async processing
+- [Webhook Handler](/Learn/Cookbook/Patterns/webhook-handler): Receiving external webhooks
diff --git a/package-lock.json b/package-lock.json
index 11105c1e..51cf7968 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -213,6 +213,7 @@
"resolved": "https://registry.npmjs.org/@agentuity/react/-/react-0.1.20.tgz",
"integrity": "sha512-2Ae9T3zZu8hWkj/RCGUs5EkLwJPGeAyVQiZw3sd3ZgunapDsAhrHuYS8clWsthxWsxxdwmJe1HxdQph17Um4Fw==",
"license": "Apache-2.0",
+ "peer": true,
"dependencies": {
"@agentuity/core": "0.1.20",
"@agentuity/frontend": "0.1.20"
@@ -8904,6 +8905,7 @@
"version": "1.4.15",
"resolved": "https://registry.npmjs.org/@better-auth/core/-/core-1.4.15.tgz",
"integrity": "sha512-uAvq8YA7SaS7v+TrvH/Kwt7LAJihzUqB3FX8VweDsqu3gn5t51M+Bve+V1vVWR9qBAtC6cN68V6b+scxZxDY4A==",
+ "peer": true,
"dependencies": {
"@standard-schema/spec": "^1.0.0",
"zod": "^4.1.12"
@@ -8933,12 +8935,14 @@
"version": "0.3.0",
"resolved": "https://registry.npmjs.org/@better-auth/utils/-/utils-0.3.0.tgz",
"integrity": "sha512-W+Adw6ZA6mgvnSnhOki270rwJ42t4XzSK6YWGF//BbVXL6SwCLWfyzBc1lN2m/4RM28KubdBKQ4X5VMoLRNPQw==",
- "license": "MIT"
+ "license": "MIT",
+ "peer": true
},
"node_modules/@better-fetch/fetch": {
"version": "1.1.21",
"resolved": "https://registry.npmjs.org/@better-fetch/fetch/-/fetch-1.1.21.tgz",
- "integrity": "sha512-/ImESw0sskqlVR94jB+5+Pxjf+xBwDZF/N5+y2/q4EqD7IARUTSpPfIo8uf39SYpCxyOCtbyYpUrZ3F/k0zT4A=="
+ "integrity": "sha512-/ImESw0sskqlVR94jB+5+Pxjf+xBwDZF/N5+y2/q4EqD7IARUTSpPfIo8uf39SYpCxyOCtbyYpUrZ3F/k0zT4A==",
+ "peer": true
},
"node_modules/@biomejs/biome": {
"version": "2.1.2",
@@ -10633,6 +10637,7 @@
"integrity": "sha512-2I0gnIVPtfnMw9ee9h1dJG7tp81+8Ob3OJb3Mv37rx5L40/b0i7djjCVvGOVqc9AEIQyvyu1i6ypKdFw8R8gQw==",
"dev": true,
"license": "MIT",
+ "peer": true,
"engines": {
"node": "^14.21.3 || >=16"
},
@@ -14018,6 +14023,7 @@
"resolved": "https://registry.npmjs.org/@types/react/-/react-19.2.8.tgz",
"integrity": "sha512-3MbSL37jEchWZz2p2mjntRZtPt837ij10ApxKfgmXCTuHWagYg7iA5bqPw6C8BMPfwidlvfPI/fxOc42HLhcyg==",
"license": "MIT",
+ "peer": true,
"dependencies": {
"csstype": "^3.2.2"
}
@@ -14028,6 +14034,7 @@
"integrity": "sha512-jp2L/eY6fn+KgVVQAOqYItbF0VY/YApe5Mz2F0aykSO8gx31bYCZyvSeYxCHKvzHG5eZjc+zyaS5BrBWya2+kQ==",
"devOptional": true,
"license": "MIT",
+ "peer": true,
"peerDependencies": {
"@types/react": "^19.2.0"
}
@@ -14095,6 +14102,7 @@
"resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz",
"integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==",
"license": "MIT",
+ "peer": true,
"bin": {
"acorn": "bin/acorn"
},
@@ -14241,6 +14249,7 @@
"resolved": "https://registry.npmjs.org/better-call/-/better-call-1.1.8.tgz",
"integrity": "sha512-XMQ2rs6FNXasGNfMjzbyroSwKwYbZ/T3IxruSS6U2MJRsSYh3wYtG3o6H00ZlKZ/C/UPOAD97tqgQJNsxyeTXw==",
"license": "MIT",
+ "peer": true,
"dependencies": {
"@better-auth/utils": "^0.3.0",
"@better-fetch/fetch": "^1.1.4",
@@ -14865,6 +14874,7 @@
"resolved": "https://registry.npmjs.org/cytoscape/-/cytoscape-3.33.1.tgz",
"integrity": "sha512-iJc4TwyANnOGR1OmWhsS9ayRS3s+XQ185FmuHObThD+5AeJCakAAbWv8KimMTt08xCCLNgneQwFp+JRJOr9qGQ==",
"license": "MIT",
+ "peer": true,
"engines": {
"node": ">=0.10"
}
@@ -15286,6 +15296,7 @@
"resolved": "https://registry.npmjs.org/d3-selection/-/d3-selection-3.0.0.tgz",
"integrity": "sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ==",
"license": "ISC",
+ "peer": true,
"engines": {
"node": ">=12"
}
@@ -15516,6 +15527,7 @@
"resolved": "https://registry.npmjs.org/drizzle-orm/-/drizzle-orm-0.45.1.tgz",
"integrity": "sha512-Te0FOdKIistGNPMq2jscdqngBRfBpC8uMFVwqjf6gtTVJHIQ/dosgV/CLBU2N4ZJBsXL5savCba9b0YJskKdcA==",
"license": "Apache-2.0",
+ "peer": true,
"peerDependencies": {
"@aws-sdk/client-rds-data": ">=3",
"@cloudflare/workers-types": ">=4",
@@ -16405,6 +16417,7 @@
"resolved": "https://registry.npmjs.org/fumadocs-core/-/fumadocs-core-15.2.6.tgz",
"integrity": "sha512-5+Bq8iQGXAQ5K5igw612rzGPdup1bnROWa3F0UbdECsSiSQvNkHLlCL0Hyptvll1NV4zx6YFMf8vQRt1aSt4vA==",
"license": "MIT",
+ "peer": true,
"dependencies": {
"@formatjs/intl-localematcher": "^0.6.1",
"@orama/orama": "^3.1.4",
@@ -16519,6 +16532,7 @@
"resolved": "https://registry.npmjs.org/fumadocs-ui/-/fumadocs-ui-15.8.5.tgz",
"integrity": "sha512-9pyB+9rOOsrFnmmZ9xREp/OgVhyaSq2ocEpqTNbeQ7tlJ6JWbdFWfW0C9lRXprQEB6DJWUDtDxqKS5QXLH0EGA==",
"license": "MIT",
+ "peer": true,
"dependencies": {
"@radix-ui/react-accordion": "^1.2.12",
"@radix-ui/react-collapsible": "^1.1.12",
@@ -17298,6 +17312,7 @@
"resolved": "https://registry.npmjs.org/jose/-/jose-6.1.3.tgz",
"integrity": "sha512-0TpaTfihd4QMNwrz/ob2Bp7X04yuxJkjRGi4aKmOqwhov54i6u79oCv7T+C7lo70MKH6BesI3vscD1yb/yzKXQ==",
"license": "MIT",
+ "peer": true,
"funding": {
"url": "https://github.com/sponsors/panva"
}
@@ -17369,6 +17384,7 @@
"resolved": "https://registry.npmjs.org/kysely/-/kysely-0.28.10.tgz",
"integrity": "sha512-ksNxfzIW77OcZ+QWSAPC7yDqUSaIVwkTWnTPNiIy//vifNbwsSgQ57OkkncHxxpcBHM3LRfLAZVEh7kjq5twVA==",
"license": "MIT",
+ "peer": true,
"engines": {
"node": ">=20.0.0"
}
@@ -17704,6 +17720,7 @@
"resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.525.0.tgz",
"integrity": "sha512-Tm1txJ2OkymCGkvwoHt33Y2JpN5xucVq1slHcgE6Lk0WjDfjgKWor5CdVER8U6DvcfMwh4M8XxmpTiyzfmfDYQ==",
"license": "ISC",
+ "peer": true,
"peerDependencies": {
"react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0"
}
@@ -19064,6 +19081,7 @@
}
],
"license": "MIT",
+ "peer": true,
"engines": {
"node": "^20.0.0 || >=22.0.0"
}
@@ -19082,6 +19100,7 @@
"resolved": "https://registry.npmjs.org/next/-/next-15.3.8.tgz",
"integrity": "sha512-L+4c5Hlr84fuaNADZbB9+ceRX9/CzwxJ+obXIGHupboB/Q1OLbSUapFs4bO8hnS/E6zV/JDX7sG1QpKVR2bguA==",
"license": "MIT",
+ "peer": true,
"dependencies": {
"@next/env": "15.3.8",
"@swc/counter": "0.1.3",
@@ -19427,6 +19446,7 @@
"integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
"dev": true,
"license": "MIT",
+ "peer": true,
"engines": {
"node": ">=12"
},
@@ -19594,6 +19614,7 @@
"resolved": "https://registry.npmjs.org/react/-/react-19.2.3.tgz",
"integrity": "sha512-Ku/hhYbVjOQnXDZFv2+RibmLFGwFdeeKHFcOTlrt7xplBnya5OGn/hIRDsqDiSUcfORsDC7MPxwork8jBwsIWA==",
"license": "MIT",
+ "peer": true,
"engines": {
"node": ">=0.10.0"
}
@@ -19603,6 +19624,7 @@
"resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.3.tgz",
"integrity": "sha512-yELu4WmLPw5Mr/lmeEpox5rw3RETacE++JgHqQzd2dg+YbJuat3jH4ingc+WPZhxaoFzdv9y33G+F7Nl5O0GBg==",
"license": "MIT",
+ "peer": true,
"dependencies": {
"scheduler": "^0.27.0"
},
@@ -20604,7 +20626,8 @@
"resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-4.1.18.tgz",
"integrity": "sha512-4+Z+0yiYyEtUVCScyfHCxOYP06L5Ne+JiHhY2IjR2KWMIWhJOYZKLSGZaP5HkZ8+bY0cxfzwDE5uOmzFXyIwxw==",
"devOptional": true,
- "license": "MIT"
+ "license": "MIT",
+ "peer": true
},
"node_modules/tapable": {
"version": "2.3.0",
@@ -20765,6 +20788,7 @@
"resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz",
"integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==",
"license": "Apache-2.0",
+ "peer": true,
"bin": {
"tsc": "bin/tsc",
"tsserver": "bin/tsserver"
@@ -20802,6 +20826,7 @@
"integrity": "sha512-i7qRCmY42zmCwnYlh9H2SvLEypEFGye5iRmEMKjcGi7zk9UquigRjFtTLz0TYqr0ZGLZhaMHl/foy1bZR+Cwlw==",
"dev": true,
"license": "MIT",
+ "peer": true,
"dependencies": {
"pathe": "^2.0.3"
}
@@ -21147,6 +21172,7 @@
"dev": true,
"hasInstallScript": true,
"license": "Apache-2.0",
+ "peer": true,
"bin": {
"workerd": "bin/workerd"
},
@@ -21167,6 +21193,7 @@
"integrity": "sha512-Z4xn6jFZTaugcOKz42xvRAYKgkVUERHVbuCJ5+f+gK+R6k12L02unakPGOA0L0ejhUl16dqDjKe4tmL9sedHcw==",
"dev": true,
"license": "MIT OR Apache-2.0",
+ "peer": true,
"dependencies": {
"@cloudflare/kv-asset-handler": "0.4.2",
"@cloudflare/unenv-preset": "2.10.0",