From 30426eca464d2f10c22c5ba6e5ad0266abd035c8 Mon Sep 17 00:00:00 2001 From: Universe Date: Wed, 8 Apr 2026 22:47:31 +0900 Subject: [PATCH 1/8] feat: implement document synchronization protocol with clock and diff utilities - Add DocumentClock class for managing monotonic document clocks. - Introduce diff utilities for computing, applying, and composing document diffs. - Create protocol types for defining the wire protocol between client and server. - Implement transport layer with WebSocketTransport for communication. - Add presence management for tracking user presence in the document. - Include validation logic for incoming diffs to ensure structural integrity. - Set up TypeScript configuration for the project. --- .../__tests__/client.test.ts | 446 ++++++++++++ .../grida-canvas-sync/__tests__/diff.test.ts | 420 +++++++++++ .../grida-canvas-sync/__tests__/helpers.ts | 346 +++++++++ .../__tests__/integration.test.ts | 670 ++++++++++++++++++ .../__tests__/validate.test.ts | 168 +++++ packages/grida-canvas-sync/package.json | 34 + packages/grida-canvas-sync/src/client.ts | 428 +++++++++++ packages/grida-canvas-sync/src/clock.ts | 34 + packages/grida-canvas-sync/src/diff.ts | 364 ++++++++++ packages/grida-canvas-sync/src/index.ts | 100 +++ packages/grida-canvas-sync/src/presence.ts | 29 + packages/grida-canvas-sync/src/protocol.ts | 254 +++++++ packages/grida-canvas-sync/src/transport.ts | 167 +++++ packages/grida-canvas-sync/src/validate.ts | 168 +++++ packages/grida-canvas-sync/tsconfig.json | 17 + 15 files changed, 3645 insertions(+) create mode 100644 packages/grida-canvas-sync/__tests__/client.test.ts create mode 100644 packages/grida-canvas-sync/__tests__/diff.test.ts create mode 100644 packages/grida-canvas-sync/__tests__/helpers.ts create mode 100644 packages/grida-canvas-sync/__tests__/integration.test.ts create mode 100644 packages/grida-canvas-sync/__tests__/validate.test.ts create mode 100644 packages/grida-canvas-sync/package.json create mode 100644 packages/grida-canvas-sync/src/client.ts create mode 100644 packages/grida-canvas-sync/src/clock.ts create mode 100644 packages/grida-canvas-sync/src/diff.ts create mode 100644 packages/grida-canvas-sync/src/index.ts create mode 100644 packages/grida-canvas-sync/src/presence.ts create mode 100644 packages/grida-canvas-sync/src/protocol.ts create mode 100644 packages/grida-canvas-sync/src/transport.ts create mode 100644 packages/grida-canvas-sync/src/validate.ts create mode 100644 packages/grida-canvas-sync/tsconfig.json diff --git a/packages/grida-canvas-sync/__tests__/client.test.ts b/packages/grida-canvas-sync/__tests__/client.test.ts new file mode 100644 index 000000000..e8ed584b6 --- /dev/null +++ b/packages/grida-canvas-sync/__tests__/client.test.ts @@ -0,0 +1,446 @@ +import { describe, it, expect, vi, beforeEach } from "vitest"; +import { SyncClient, type SyncClientStatus } from "../src/client"; +import type { DocumentState } from "../src/diff"; +import type { + ClientMessage, + ServerMessage, + SerializedNode, + DocumentDiff, +} from "../src/protocol"; +import type { ISyncTransport, TransportStatus } from "../src/transport"; + +// --------------------------------------------------------------------------- +// MockTransport +// --------------------------------------------------------------------------- + +class MockTransport implements ISyncTransport { + status: TransportStatus = "disconnected"; + sent: ClientMessage[] = []; + + private _messageHandlers = new Set<(msg: ServerMessage) => void>(); + private _statusHandlers = new Set<(status: TransportStatus) => void>(); + + send(message: ClientMessage): void { + this.sent.push(message); + } + + onMessage(handler: (msg: ServerMessage) => void): () => void { + this._messageHandlers.add(handler); + return () => this._messageHandlers.delete(handler); + } + + onStatusChange(handler: (status: TransportStatus) => void): () => void { + this._statusHandlers.add(handler); + return () => this._statusHandlers.delete(handler); + } + + connect(): void { + this._setStatus("connecting"); + // Simulate async connect + queueMicrotask(() => this._setStatus("connected")); + } + + disconnect(): void { + this._setStatus("disconnected"); + } + + // --- Test helpers --- + + /** Simulate the transport becoming connected (synchronously). */ + simulateConnected(): void { + this._setStatus("connected"); + } + + /** Deliver a server message to the client. */ + deliver(msg: ServerMessage): void { + for (const h of this._messageHandlers) h(msg); + } + + private _setStatus(s: TransportStatus): void { + this.status = s; + for (const h of this._statusHandlers) h(s); + } +} + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +function makeNode( + id: string, + props: Record = {} +): SerializedNode { + return { type: "rectangle", id, ...props } as SerializedNode; +} + +function emptyState(): DocumentState { + return { nodes: {}, scenes: [] }; +} + +function createClientAndTransport( + initialState: DocumentState = emptyState(), + lastClock = 0 +) { + const transport = new MockTransport(); + const client = new SyncClient({ + schema: "0.91.0-test", + transport, + initialState, + lastClock, + pushInterval: -1, // Synchronous flush for deterministic tests + }); + return { transport, client }; +} + +/** Connect the client through the full handshake. */ +function connectClient( + transport: MockTransport, + client: SyncClient, + serverState?: { nodes: Record; scenes: string[] } +) { + transport.simulateConnected(); + // Client should have sent a connect message + const connectMsg = transport.sent.find((m) => m.type === "connect"); + expect(connectMsg).toBeDefined(); + + // Server responds with connect_ok + if (serverState) { + transport.deliver({ + type: "connect_ok", + clock: 1, + state: serverState.nodes, + scenes: serverState.scenes, + }); + } else { + transport.deliver({ + type: "connect_ok", + clock: 0, + }); + } +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +describe("SyncClient", () => { + describe("connection lifecycle", () => { + it("starts disconnected", () => { + const { client } = createClientAndTransport(); + expect(client.status).toBe("disconnected"); + }); + + it("transitions through connecting → syncing → ready", () => { + const { transport, client } = createClientAndTransport(); + const statuses: SyncClientStatus[] = []; + client.on("statusChange", (s) => statuses.push(s)); + + transport.simulateConnected(); + expect(statuses).toContain("syncing"); + + transport.deliver({ type: "connect_ok", clock: 0 }); + expect(statuses).toContain("ready"); + expect(client.status).toBe("ready"); + }); + + it("sends connect message with schema and lastClock", () => { + const { transport, client } = createClientAndTransport(emptyState(), 42); + transport.simulateConnected(); + + const msg = transport.sent.find((m) => m.type === "connect"); + expect(msg).toEqual({ + type: "connect", + schema: "0.91.0-test", + lastClock: 42, + }); + }); + + it("applies server state on connect_ok with full state", () => { + const { transport, client } = createClientAndTransport(); + const node = makeNode("n1", { width: 100 }); + connectClient(transport, client, { + nodes: { n1: node }, + scenes: ["s1"], + }); + + expect(client.state.nodes["n1"]).toEqual(node); + expect(client.state.scenes).toEqual(["s1"]); + expect(client.serverClock).toBe(1); + }); + + it("applies incremental diff on connect_ok", () => { + const initialState: DocumentState = { + nodes: { n1: makeNode("n1", { width: 100 }) }, + scenes: ["s1"], + }; + const { transport, client } = createClientAndTransport(initialState, 5); + transport.simulateConnected(); + + // Server sends incremental diff + transport.deliver({ + type: "connect_ok", + clock: 7, + diff: { + nodes: { + n1: { op: "patch", fields: { width: { op: "put", value: 200 } } }, + }, + }, + }); + + expect(client.state.nodes["n1"]).toEqual(makeNode("n1", { width: 200 })); + expect(client.serverClock).toBe(7); + }); + }); + + describe("push and ack", () => { + it("pushes a diff and gets commit ack", () => { + const { transport, client } = createClientAndTransport(); + connectClient(transport, client); + transport.sent = []; // Clear connect messages + + client.pushDiff({ + nodes: { n1: { op: "put", node: makeNode("n1", { width: 100 }) } }, + }); + + // With pushInterval: -1, push is sent synchronously + const pushMsg = transport.sent.find((m) => m.type === "push")!; + expect(pushMsg).toBeDefined(); + expect(pushMsg.type).toBe("push"); + + // Server acks with commit + transport.deliver({ + type: "push_ok", + serverClock: 1, + clientClock: (pushMsg as { clientClock: number }).clientClock, + result: "commit", + }); + + expect(client.isDirty).toBe(false); + expect(client.canonical.nodes["n1"]).toEqual( + makeNode("n1", { width: 100 }) + ); + }); + + it("handles rebase response", () => { + const { transport, client } = createClientAndTransport(); + connectClient(transport, client); + transport.sent = []; + + client.pushDiff({ + nodes: { n1: { op: "put", node: makeNode("n1", { width: 100 }) } }, + }); + + // Server rebases — normalizes width to a clamped value + transport.deliver({ + type: "push_ok", + serverClock: 1, + clientClock: 1, + result: "rebase", + diff: { + nodes: { + n1: { op: "put", node: makeNode("n1", { width: 50 }) }, + }, + }, + }); + + // Canonical should use server's version + expect(client.canonical.nodes["n1"]).toEqual( + makeNode("n1", { width: 50 }) + ); + }); + + it("handles discard response", () => { + const { transport, client } = createClientAndTransport(); + connectClient(transport, client); + transport.sent = []; + + client.pushDiff({ + nodes: { n1: { op: "put", node: makeNode("n1") } }, + }); + + transport.deliver({ + type: "push_ok", + serverClock: 0, + clientClock: 1, + result: "discard", + }); + + // Canonical should be unchanged + expect(client.canonical.nodes["n1"]).toBeUndefined(); + // Local state should also reflect the discard (speculative removed) + expect(client.state.nodes["n1"]).toBeUndefined(); + }); + }); + + describe("remote patches", () => { + it("applies remote patch to canonical and recomputes local", () => { + const initialState: DocumentState = { + nodes: { n1: makeNode("n1", { width: 100 }) }, + scenes: [], + }; + const { transport, client } = createClientAndTransport(initialState); + connectClient(transport, client); + + const stateChanges: DocumentState[] = []; + client.on("stateChange", (s) => stateChanges.push(s)); + + // Remote patch from another client + transport.deliver({ + type: "patch", + serverClock: 1, + diff: { + nodes: { + n1: { + op: "patch", + fields: { width: { op: "put", value: 200 } }, + }, + }, + }, + }); + + expect(client.state.nodes["n1"]).toEqual(makeNode("n1", { width: 200 })); + expect(client.serverClock).toBe(1); + expect(stateChanges.length).toBeGreaterThan(0); + }); + + it("preserves speculative changes over remote patches", () => { + const initialState: DocumentState = { + nodes: { + n1: makeNode("n1", { width: 100, height: 50 }), + }, + scenes: [], + }; + const { transport, client } = createClientAndTransport(initialState); + connectClient(transport, client); + + // Local change (not yet ack'd) + client.pushDiff({ + nodes: { + n1: { + op: "patch", + fields: { width: { op: "put", value: 999 } }, + }, + }, + }); + + // Remote change to a DIFFERENT field + transport.deliver({ + type: "patch", + serverClock: 1, + diff: { + nodes: { + n1: { + op: "patch", + fields: { height: { op: "put", value: 200 } }, + }, + }, + }, + }); + + // Local state should have BOTH: local width=999 AND remote height=200 + expect(client.state.nodes["n1"]).toEqual( + makeNode("n1", { width: 999, height: 200 }) + ); + }); + }); + + describe("optimistic state", () => { + it("local state reflects unsent changes immediately", () => { + const { transport, client } = createClientAndTransport(); + connectClient(transport, client); + + client.pushDiff({ + nodes: { n1: { op: "put", node: makeNode("n1", { width: 100 }) } }, + }); + + // Before push is flushed, local state should already include the change + expect(client.state.nodes["n1"]).toEqual(makeNode("n1", { width: 100 })); + expect(client.isDirty).toBe(true); + }); + + it("composes multiple rapid local changes", () => { + const { transport, client } = createClientAndTransport(); + connectClient(transport, client); + + client.pushDiff({ + nodes: { n1: { op: "put", node: makeNode("n1", { width: 100 }) } }, + }); + client.pushDiff({ + nodes: { + n1: { + op: "patch", + fields: { width: { op: "put", value: 200 } }, + }, + }, + }); + + expect(client.state.nodes["n1"]).toEqual(makeNode("n1", { width: 200 })); + }); + }); + + describe("presence", () => { + it("emits presenceChange on server presence message", () => { + const { transport, client } = createClientAndTransport(); + connectClient(transport, client); + + const presenceEvents: Record[] = []; + client.on("presenceChange", (p) => presenceEvents.push(p)); + + transport.deliver({ + type: "presence", + peers: { + peer1: { + cursor: { cursor_id: "c1", x: 10, y: 20, t: Date.now() }, + profile: { name: "Alice", color: "#ff0000" }, + }, + }, + }); + + expect(presenceEvents).toHaveLength(1); + expect(presenceEvents[0]).toHaveProperty("peer1"); + }); + }); + + describe("error handling", () => { + it("emits error on server error message", () => { + const { transport, client } = createClientAndTransport(); + connectClient(transport, client); + + const errors: { code: string; message: string }[] = []; + client.on("error", (e) => errors.push(e)); + + transport.deliver({ + type: "error", + code: "SCHEMA_MISMATCH", + message: "Incompatible schema version", + }); + + expect(errors).toEqual([ + { code: "SCHEMA_MISMATCH", message: "Incompatible schema version" }, + ]); + }); + }); + + describe("cleanup", () => { + it("destroy clears all handlers and disconnects", () => { + const { transport, client } = createClientAndTransport(); + connectClient(transport, client); + + const handler = vi.fn(); + client.on("stateChange", handler); + + client.destroy(); + + // Delivering a message after destroy should not call the handler + transport.deliver({ + type: "patch", + serverClock: 1, + diff: { nodes: { n1: { op: "remove" } } }, + }); + + // Handler might have been called during destroy's disconnect, + // but the point is the event system is torn down + expect(client.status).toBe("disconnected"); + }); + }); +}); diff --git a/packages/grida-canvas-sync/__tests__/diff.test.ts b/packages/grida-canvas-sync/__tests__/diff.test.ts new file mode 100644 index 000000000..224aa458d --- /dev/null +++ b/packages/grida-canvas-sync/__tests__/diff.test.ts @@ -0,0 +1,420 @@ +import { describe, it, expect } from "vitest"; +import { + computeDiff, + applyDiff, + composeDiffs, + isDiffEmpty, + jsonEqual, + type DocumentState, +} from "../src/diff"; +import type { DocumentDiff, SerializedNode } from "../src/protocol"; + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +function makeNode( + id: string, + props: Record = {} +): SerializedNode { + return { type: "rectangle", id, ...props } as SerializedNode; +} + +function emptyState(): DocumentState { + return { nodes: {}, scenes: [] }; +} + +// --------------------------------------------------------------------------- +// jsonEqual +// --------------------------------------------------------------------------- + +describe("jsonEqual", () => { + it("primitives", () => { + expect(jsonEqual(1, 1)).toBe(true); + expect(jsonEqual("a", "a")).toBe(true); + expect(jsonEqual(true, true)).toBe(true); + expect(jsonEqual(null, null)).toBe(true); + expect(jsonEqual(1, 2)).toBe(false); + expect(jsonEqual("a", "b")).toBe(false); + expect(jsonEqual(null, 0)).toBe(false); + }); + + it("arrays", () => { + expect(jsonEqual([1, 2], [1, 2])).toBe(true); + expect(jsonEqual([1, 2], [1, 3])).toBe(false); + expect(jsonEqual([1], [1, 2])).toBe(false); + expect(jsonEqual([], [])).toBe(true); + }); + + it("objects", () => { + expect(jsonEqual({ a: 1 }, { a: 1 })).toBe(true); + expect(jsonEqual({ a: 1 }, { a: 2 })).toBe(false); + expect(jsonEqual({ a: 1 }, { a: 1, b: 2 })).toBe(false); + expect(jsonEqual({}, {})).toBe(true); + }); + + it("nested", () => { + expect(jsonEqual({ a: [1, { b: 2 }] }, { a: [1, { b: 2 }] })).toBe(true); + expect(jsonEqual({ a: [1, { b: 2 }] }, { a: [1, { b: 3 }] })).toBe(false); + }); +}); + +// --------------------------------------------------------------------------- +// computeDiff +// --------------------------------------------------------------------------- + +describe("computeDiff", () => { + it("returns null for identical states", () => { + const state: DocumentState = { + nodes: { n1: makeNode("n1", { width: 100 }) }, + scenes: ["s1"], + }; + expect(computeDiff(state, state)).toBeNull(); + }); + + it("returns null for deep-equal states", () => { + const a: DocumentState = { + nodes: { n1: makeNode("n1", { width: 100, fill: { r: 1, g: 0, b: 0 } }) }, + scenes: ["s1"], + }; + const b: DocumentState = { + nodes: { n1: makeNode("n1", { width: 100, fill: { r: 1, g: 0, b: 0 } }) }, + scenes: ["s1"], + }; + expect(computeDiff(a, b)).toBeNull(); + }); + + it("detects added nodes", () => { + const before = emptyState(); + const after: DocumentState = { + nodes: { n1: makeNode("n1") }, + scenes: [], + }; + const diff = computeDiff(before, after)!; + expect(diff).not.toBeNull(); + expect(diff.nodes!["n1"]).toEqual({ op: "put", node: makeNode("n1") }); + }); + + it("detects removed nodes", () => { + const before: DocumentState = { + nodes: { n1: makeNode("n1") }, + scenes: [], + }; + const after = emptyState(); + const diff = computeDiff(before, after)!; + expect(diff.nodes!["n1"]).toEqual({ op: "remove" }); + }); + + it("detects field changes as patch", () => { + const before: DocumentState = { + nodes: { n1: makeNode("n1", { width: 100 }) }, + scenes: [], + }; + const after: DocumentState = { + nodes: { n1: makeNode("n1", { width: 200 }) }, + scenes: [], + }; + const diff = computeDiff(before, after)!; + expect(diff.nodes!["n1"]).toEqual({ + op: "patch", + fields: { width: { op: "put", value: 200 } }, + }); + }); + + it("detects deleted fields", () => { + const before: DocumentState = { + nodes: { n1: makeNode("n1", { width: 100, height: 50 }) }, + scenes: [], + }; + const after: DocumentState = { + nodes: { n1: makeNode("n1", { width: 100 }) }, + scenes: [], + }; + const diff = computeDiff(before, after)!; + expect(diff.nodes!["n1"]).toEqual({ + op: "patch", + fields: { height: { op: "delete" } }, + }); + }); + + it("detects type change as put (full replacement)", () => { + const before: DocumentState = { + nodes: { n1: makeNode("n1", { width: 100 }) }, + scenes: [], + }; + const after: DocumentState = { + nodes: { + n1: { type: "ellipse", id: "n1", radius: 50 } as SerializedNode, + }, + scenes: [], + }; + const diff = computeDiff(before, after)!; + expect(diff.nodes!["n1"].op).toBe("put"); + }); + + it("detects scene reordering", () => { + const before: DocumentState = { + nodes: {}, + scenes: ["s1", "s2"], + }; + const after: DocumentState = { + nodes: {}, + scenes: ["s2", "s1"], + }; + const diff = computeDiff(before, after)!; + expect(diff.scenes).toEqual([{ op: "reorder", ids: ["s2", "s1"] }]); + }); + + it("detects scene additions", () => { + const before: DocumentState = { nodes: {}, scenes: ["s1"] }; + const after: DocumentState = { nodes: {}, scenes: ["s1", "s2"] }; + const diff = computeDiff(before, after)!; + expect(diff.scenes).toContainEqual({ op: "add", id: "s2" }); + }); + + it("detects scene removals", () => { + const before: DocumentState = { nodes: {}, scenes: ["s1", "s2"] }; + const after: DocumentState = { nodes: {}, scenes: ["s1"] }; + const diff = computeDiff(before, after)!; + expect(diff.scenes).toContainEqual({ op: "remove", id: "s2" }); + }); +}); + +// --------------------------------------------------------------------------- +// applyDiff +// --------------------------------------------------------------------------- + +describe("applyDiff", () => { + it("puts a new node", () => { + const state = emptyState(); + const node = makeNode("n1", { width: 100 }); + const result = applyDiff(state, { nodes: { n1: { op: "put", node } } }); + expect(result.nodes["n1"]).toEqual(node); + }); + + it("patches an existing node", () => { + const state: DocumentState = { + nodes: { n1: makeNode("n1", { width: 100, height: 50 }) }, + scenes: [], + }; + const result = applyDiff(state, { + nodes: { + n1: { + op: "patch", + fields: { + width: { op: "put", value: 200 }, + height: { op: "delete" }, + }, + }, + }, + }); + expect(result.nodes["n1"]).toEqual(makeNode("n1", { width: 200 })); + }); + + it("removes a node", () => { + const state: DocumentState = { + nodes: { n1: makeNode("n1") }, + scenes: [], + }; + const result = applyDiff(state, { nodes: { n1: { op: "remove" } } }); + expect(result.nodes["n1"]).toBeUndefined(); + }); + + it("adds a scene", () => { + const state: DocumentState = { nodes: {}, scenes: ["s1"] }; + const result = applyDiff(state, { scenes: [{ op: "add", id: "s2" }] }); + expect(result.scenes).toEqual(["s1", "s2"]); + }); + + it("removes a scene", () => { + const state: DocumentState = { nodes: {}, scenes: ["s1", "s2"] }; + const result = applyDiff(state, { scenes: [{ op: "remove", id: "s1" }] }); + expect(result.scenes).toEqual(["s2"]); + }); + + it("reorders scenes", () => { + const state: DocumentState = { nodes: {}, scenes: ["s1", "s2", "s3"] }; + const result = applyDiff(state, { + scenes: [{ op: "reorder", ids: ["s3", "s1", "s2"] }], + }); + expect(result.scenes).toEqual(["s3", "s1", "s2"]); + }); + + it("does not mutate the input state", () => { + const state: DocumentState = { + nodes: { n1: makeNode("n1", { width: 100 }) }, + scenes: ["s1"], + }; + const nodesBefore = state.nodes; + const scenesBefore = state.scenes; + applyDiff(state, { + nodes: { + n1: { op: "patch", fields: { width: { op: "put", value: 200 } } }, + }, + }); + expect(state.nodes).toBe(nodesBefore); + expect(state.scenes).toBe(scenesBefore); + expect(state.nodes["n1"]).toEqual(makeNode("n1", { width: 100 })); + }); + + it("skips patch on non-existent node", () => { + const state = emptyState(); + const result = applyDiff(state, { + nodes: { + n1: { op: "patch", fields: { width: { op: "put", value: 100 } } }, + }, + }); + expect(result.nodes["n1"]).toBeUndefined(); + }); + + it("handles add of already-existing scene (no duplicate)", () => { + const state: DocumentState = { nodes: {}, scenes: ["s1"] }; + const result = applyDiff(state, { scenes: [{ op: "add", id: "s1" }] }); + expect(result.scenes).toEqual(["s1"]); + }); +}); + +// --------------------------------------------------------------------------- +// computeDiff + applyDiff round-trip +// --------------------------------------------------------------------------- + +describe("diff round-trip", () => { + it("apply(before, computeDiff(before, after)) === after", () => { + const before: DocumentState = { + nodes: { + n1: makeNode("n1", { width: 100, height: 50 }), + n2: makeNode("n2", { x: 10 }), + }, + scenes: ["s1", "s2"], + }; + const after: DocumentState = { + nodes: { + n1: makeNode("n1", { width: 200, height: 50 }), + n3: makeNode("n3", { color: "red" }), + }, + scenes: ["s2"], + }; + const diff = computeDiff(before, after)!; + const result = applyDiff(before, diff); + expect(result.nodes).toEqual(after.nodes); + expect(result.scenes).toEqual(after.scenes); + }); +}); + +// --------------------------------------------------------------------------- +// composeDiffs +// --------------------------------------------------------------------------- + +describe("composeDiffs", () => { + it("composes two patches on the same node", () => { + const a: DocumentDiff = { + nodes: { + n1: { op: "patch", fields: { width: { op: "put", value: 200 } } }, + }, + }; + const b: DocumentDiff = { + nodes: { + n1: { op: "patch", fields: { height: { op: "put", value: 100 } } }, + }, + }; + const composed = composeDiffs(a, b); + expect(composed.nodes!["n1"]).toEqual({ + op: "patch", + fields: { + width: { op: "put", value: 200 }, + height: { op: "put", value: 100 }, + }, + }); + }); + + it("b's put overrides a's patch", () => { + const a: DocumentDiff = { + nodes: { + n1: { op: "patch", fields: { width: { op: "put", value: 200 } } }, + }, + }; + const b: DocumentDiff = { + nodes: { n1: { op: "put", node: makeNode("n1", { width: 300 }) } }, + }; + const composed = composeDiffs(a, b); + expect(composed.nodes!["n1"]).toEqual({ + op: "put", + node: makeNode("n1", { width: 300 }), + }); + }); + + it("b's remove overrides a's put", () => { + const a: DocumentDiff = { + nodes: { n1: { op: "put", node: makeNode("n1") } }, + }; + const b: DocumentDiff = { + nodes: { n1: { op: "remove" } }, + }; + const composed = composeDiffs(a, b); + expect(composed.nodes!["n1"]).toEqual({ op: "remove" }); + }); + + it("b's patch on a's put merges into the put", () => { + const a: DocumentDiff = { + nodes: { n1: { op: "put", node: makeNode("n1", { width: 100 }) } }, + }; + const b: DocumentDiff = { + nodes: { + n1: { op: "patch", fields: { width: { op: "put", value: 200 } } }, + }, + }; + const composed = composeDiffs(a, b); + expect(composed.nodes!["n1"]).toEqual({ + op: "put", + node: makeNode("n1", { width: 200 }), + }); + }); + + it("composes disjoint node ops", () => { + const a: DocumentDiff = { + nodes: { n1: { op: "put", node: makeNode("n1") } }, + }; + const b: DocumentDiff = { + nodes: { n2: { op: "put", node: makeNode("n2") } }, + }; + const composed = composeDiffs(a, b); + expect(Object.keys(composed.nodes!)).toEqual(["n1", "n2"]); + }); + + it("concatenates scene ops", () => { + const a: DocumentDiff = { scenes: [{ op: "add", id: "s1" }] }; + const b: DocumentDiff = { scenes: [{ op: "add", id: "s2" }] }; + const composed = composeDiffs(a, b); + expect(composed.scenes).toEqual([ + { op: "add", id: "s1" }, + { op: "add", id: "s2" }, + ]); + }); + + it("composes empty diffs", () => { + const composed = composeDiffs({}, {}); + expect(isDiffEmpty(composed)).toBe(true); + }); +}); + +// --------------------------------------------------------------------------- +// isDiffEmpty +// --------------------------------------------------------------------------- + +describe("isDiffEmpty", () => { + it("empty object is empty", () => { + expect(isDiffEmpty({})).toBe(true); + }); + + it("empty nodes is empty", () => { + expect(isDiffEmpty({ nodes: {} })).toBe(true); + }); + + it("non-empty nodes is not empty", () => { + expect(isDiffEmpty({ nodes: { n1: { op: "remove" } } })).toBe(false); + }); + + it("empty scenes is empty", () => { + expect(isDiffEmpty({ scenes: [] })).toBe(true); + }); +}); diff --git a/packages/grida-canvas-sync/__tests__/helpers.ts b/packages/grida-canvas-sync/__tests__/helpers.ts new file mode 100644 index 000000000..2800afbd8 --- /dev/null +++ b/packages/grida-canvas-sync/__tests__/helpers.ts @@ -0,0 +1,346 @@ +/** + * Test harness: MockTransport + MockServer for multi-client integration tests. + * + * MockServer simulates the SyncRoom (Durable Object) behavior: + * - Maintains canonical state + * - Processes pushes (validate → apply → ack + broadcast) + * - Handles connect handshakes + * - Relays presence + * + * MockTransport is a synchronous in-memory transport that delivers messages + * immediately (no async, no timers), making tests deterministic. + */ + +import type { + ClientMessage, + ServerMessage, + DocumentDiff, + NodeId, + SerializedNode, + PushMessage, + ConnectMessage, + PresenceState, +} from "../src/protocol"; +import { type DocumentState, applyDiff, isDiffEmpty } from "../src/diff"; +import { validateDiff } from "../src/validate"; +import { DocumentClock } from "../src/clock"; +import { SyncClient } from "../src/client"; +import type { ISyncTransport, TransportStatus } from "../src/transport"; + +// --------------------------------------------------------------------------- +// MockTransport — synchronous, deterministic +// --------------------------------------------------------------------------- + +export class MockTransport implements ISyncTransport { + status: TransportStatus = "disconnected"; + sent: ClientMessage[] = []; + + private _messageHandlers = new Set<(msg: ServerMessage) => void>(); + private _statusHandlers = new Set<(status: TransportStatus) => void>(); + + /** Callback wired by MockServer to receive client messages. */ + _onClientMessage: ((msg: ClientMessage) => void) | null = null; + + send(message: ClientMessage): void { + this.sent.push(message); + // Deliver to server immediately (synchronous) + this._onClientMessage?.(message); + } + + onMessage(handler: (msg: ServerMessage) => void): () => void { + this._messageHandlers.add(handler); + return () => this._messageHandlers.delete(handler); + } + + onStatusChange(handler: (status: TransportStatus) => void): () => void { + this._statusHandlers.add(handler); + return () => this._statusHandlers.delete(handler); + } + + connect(): void { + // no-op — MockServer controls status via simulateConnected() + } + + disconnect(): void { + this._setStatus("disconnected"); + } + + // --- Test helpers --- + + simulateConnected(): void { + this._setStatus("connected"); + } + + simulateDisconnected(): void { + this._setStatus("disconnected"); + } + + /** Deliver a server message to the client. */ + deliver(msg: ServerMessage): void { + for (const h of this._messageHandlers) h(msg); + } + + private _setStatus(s: TransportStatus): void { + if (this.status === s) return; + this.status = s; + for (const h of this._statusHandlers) h(s); + } +} + +// --------------------------------------------------------------------------- +// MockServer — simulates SyncRoom behavior +// --------------------------------------------------------------------------- + +interface MockSession { + id: string; + transport: MockTransport; + presence?: PresenceState; +} + +export class MockServer { + canonical: DocumentState; + clock: DocumentClock; + private sessions: Map = new Map(); + + constructor(initialState: DocumentState = { nodes: {}, scenes: [] }) { + this.canonical = initialState; + this.clock = new DocumentClock(0); + } + + /** Register a client transport with this server. Returns the session id. */ + addSession(sessionId: string, transport: MockTransport): void { + const session: MockSession = { id: sessionId, transport }; + this.sessions.set(sessionId, session); + + // Wire the transport to deliver messages to our handler + transport._onClientMessage = (msg) => + this._handleClientMessage(sessionId, msg); + } + + /** Simulate the client connecting (transport goes connected → client sends connect → server replies). */ + connectSession(sessionId: string): void { + const session = this.sessions.get(sessionId); + if (!session) throw new Error(`Unknown session: ${sessionId}`); + session.transport.simulateConnected(); + } + + /** Disconnect a session. */ + disconnectSession(sessionId: string): void { + const session = this.sessions.get(sessionId); + if (!session) return; + session.transport.simulateDisconnected(); + } + + /** Process all pending work (for tests that use pushInterval > 0). */ + // In our tests, pushInterval=0 and delivery is synchronous, so this is usually not needed. + + // ------------------------------------------------------------------------- + // Message handling — mirrors SyncRoom logic + // ------------------------------------------------------------------------- + + private _handleClientMessage(sessionId: string, msg: ClientMessage): void { + const session = this.sessions.get(sessionId); + if (!session) return; + + switch (msg.type) { + case "connect": + this._handleConnect(session, msg); + break; + case "push": + this._handlePush(session, msg); + break; + case "ping": + session.transport.deliver({ type: "pong" }); + break; + case "presence_update": + this._handlePresenceUpdate(session, msg.presence); + break; + } + } + + private _handleConnect(session: MockSession, msg: ConnectMessage): void { + if (msg.lastClock === 0 || msg.lastClock < this.clock.value) { + // Send full state + session.transport.deliver({ + type: "connect_ok", + clock: this.clock.value, + state: this.canonical.nodes, + scenes: this.canonical.scenes, + }); + } else { + // Client is up to date + session.transport.deliver({ + type: "connect_ok", + clock: this.clock.value, + }); + } + } + + private _handlePush(session: MockSession, msg: PushMessage): void { + const validation = validateDiff(this.canonical, msg.diff); + + if (!validation.valid) { + session.transport.deliver({ + type: "push_ok", + serverClock: this.clock.value, + clientClock: msg.clientClock, + result: "discard", + }); + return; + } + + // Apply the diff to canonical + const newClock = this.clock.tick(); + this.canonical = applyDiff(this.canonical, msg.diff); + + // Ack the pusher + session.transport.deliver({ + type: "push_ok", + serverClock: newClock, + clientClock: msg.clientClock, + result: "commit", + }); + + // Broadcast to all OTHER sessions + for (const [id, other] of this.sessions) { + if (id === session.id) continue; + if (other.transport.status !== "connected") continue; + other.transport.deliver({ + type: "patch", + serverClock: newClock, + diff: msg.diff, + }); + } + + // Handle presence piggy-backed on push + if (msg.presence) { + this._handlePresenceUpdate(session, msg.presence); + } + } + + private _handlePresenceUpdate( + session: MockSession, + presence: PresenceState + ): void { + session.presence = presence; + // Broadcast presence to all OTHER sessions + const peers: Record = {}; + for (const [id, s] of this.sessions) { + if (id === session.id) continue; + if (s.presence) peers[id] = s.presence; + } + // Also include the sender's presence for others + for (const [id, other] of this.sessions) { + if (id === session.id) continue; + if (other.transport.status !== "connected") continue; + other.transport.deliver({ + type: "presence", + peers: { ...peers, [session.id]: presence }, + }); + } + } +} + +// --------------------------------------------------------------------------- +// Factory helpers +// --------------------------------------------------------------------------- + +export function makeNode( + id: string, + props: Record = {} +): SerializedNode { + return { type: "rectangle", id, ...props } as SerializedNode; +} + +export function emptyState(): DocumentState { + return { nodes: {}, scenes: [] }; +} + +/** + * Create a full test setup: server + N clients, all connected. + */ +export function createRoom( + clientCount: number, + initialState: DocumentState = emptyState() +): { + server: MockServer; + clients: SyncClient[]; + transports: MockTransport[]; +} { + const server = new MockServer(initialState); + const clients: SyncClient[] = []; + const transports: MockTransport[] = []; + + for (let i = 0; i < clientCount; i++) { + const transport = new MockTransport(); + const client = new SyncClient({ + schema: "0.91.0-test", + transport, + initialState, + lastClock: 0, + pushInterval: -1, // Synchronous flush for deterministic tests + }); + + server.addSession(`client-${i}`, transport); + clients.push(client); + transports.push(transport); + } + + return { server, clients, transports }; +} + +/** Connect all clients in a room (handshake completes synchronously). */ +export function connectAll( + server: MockServer, + transports: MockTransport[] +): void { + for (let i = 0; i < transports.length; i++) { + server.connectSession(`client-${i}`); + } +} + +/** + * Assert that all clients and the server have converged to the same state. + */ +export function assertConvergence( + server: MockServer, + clients: SyncClient[] +): void { + for (let i = 0; i < clients.length; i++) { + const clientNodes = clients[i].state.nodes; + const serverNodes = server.canonical.nodes; + + // Same set of node IDs + const clientIds = Object.keys(clientNodes).sort(); + const serverIds = Object.keys(serverNodes).sort(); + if (clientIds.join(",") !== serverIds.join(",")) { + throw new Error( + `Client ${i} node IDs [${clientIds}] !== server [${serverIds}]` + ); + } + + // Same field values for each node + for (const id of serverIds) { + const sNode = serverNodes[id]; + const cNode = clientNodes[id]; + for (const key of Object.keys(sNode)) { + const sv = JSON.stringify(sNode[key]); + const cv = JSON.stringify(cNode[key]); + if (sv !== cv) { + throw new Error( + `Client ${i} node "${id}" field "${key}": ${cv} !== server ${sv}` + ); + } + } + } + + // Same scenes + const clientScenes = clients[i].state.scenes.join(","); + const serverScenes = server.canonical.scenes.join(","); + if (clientScenes !== serverScenes) { + throw new Error( + `Client ${i} scenes [${clientScenes}] !== server [${serverScenes}]` + ); + } + } +} diff --git a/packages/grida-canvas-sync/__tests__/integration.test.ts b/packages/grida-canvas-sync/__tests__/integration.test.ts new file mode 100644 index 000000000..f681f36d1 --- /dev/null +++ b/packages/grida-canvas-sync/__tests__/integration.test.ts @@ -0,0 +1,670 @@ +import { describe, it, expect } from "vitest"; +import { + MockServer, + MockTransport, + createRoom, + connectAll, + assertConvergence, + makeNode, + emptyState, +} from "./helpers"; +import type { DocumentState } from "../src/diff"; +import type { DocumentDiff } from "../src/protocol"; + +// --------------------------------------------------------------------------- +// Multi-client integration tests +// +// These tests simulate real collaboration scenarios with 2-3 clients +// connected through a MockServer that implements SyncRoom semantics. +// All delivery is synchronous, so tests are deterministic. +// --------------------------------------------------------------------------- + +describe("multi-client integration", () => { + // ----------------------------------------------------------------------- + // Basic two-client collaboration + // ----------------------------------------------------------------------- + + describe("basic two-client workflow", () => { + it("client A creates a node, client B sees it", () => { + const { server, clients, transports } = createRoom(2); + connectAll(server, transports); + + const [A, B] = clients; + + A.pushDiff({ + nodes: { + n1: { op: "put", node: makeNode("n1", { width: 100 }) }, + }, + }); + + // After synchronous flush + server broadcast: + // A should have n1 (committed), B should have n1 (via patch) + expect(A.state.nodes["n1"]).toEqual(makeNode("n1", { width: 100 })); + expect(B.state.nodes["n1"]).toEqual(makeNode("n1", { width: 100 })); + assertConvergence(server, clients); + }); + + it("both clients create different nodes concurrently", () => { + const { server, clients, transports } = createRoom(2); + connectAll(server, transports); + + const [A, B] = clients; + + // Both push "simultaneously" — but with synchronous delivery, + // A's push processes first, then B's + A.pushDiff({ + nodes: { nA: { op: "put", node: makeNode("nA", { x: 10 }) } }, + }); + B.pushDiff({ + nodes: { nB: { op: "put", node: makeNode("nB", { y: 20 }) } }, + }); + + // Both nodes should exist on both clients + expect(A.state.nodes["nA"]).toBeDefined(); + expect(A.state.nodes["nB"]).toBeDefined(); + expect(B.state.nodes["nA"]).toBeDefined(); + expect(B.state.nodes["nB"]).toBeDefined(); + assertConvergence(server, clients); + }); + + it("client A modifies, client B modifies a different node", () => { + const initial: DocumentState = { + nodes: { + n1: makeNode("n1", { width: 100 }), + n2: makeNode("n2", { height: 50 }), + }, + scenes: [], + }; + const { server, clients, transports } = createRoom(2, initial); + connectAll(server, transports); + + const [A, B] = clients; + + A.pushDiff({ + nodes: { + n1: { + op: "patch", + fields: { width: { op: "put", value: 200 } }, + }, + }, + }); + B.pushDiff({ + nodes: { + n2: { + op: "patch", + fields: { height: { op: "put", value: 100 } }, + }, + }, + }); + + expect(A.state.nodes["n1"]).toEqual(makeNode("n1", { width: 200 })); + expect(A.state.nodes["n2"]).toEqual(makeNode("n2", { height: 100 })); + assertConvergence(server, clients); + }); + }); + + // ----------------------------------------------------------------------- + // Concurrent edits to the same node + // ----------------------------------------------------------------------- + + describe("concurrent edits to the same node", () => { + it("different fields of the same node — both survive", () => { + const initial: DocumentState = { + nodes: { + n1: makeNode("n1", { width: 100, height: 50 }), + }, + scenes: [], + }; + const { server, clients, transports } = createRoom(2, initial); + connectAll(server, transports); + + const [A, B] = clients; + + A.pushDiff({ + nodes: { + n1: { + op: "patch", + fields: { width: { op: "put", value: 200 } }, + }, + }, + }); + + B.pushDiff({ + nodes: { + n1: { + op: "patch", + fields: { height: { op: "put", value: 100 } }, + }, + }, + }); + + // Both changes should be present (non-conflicting fields) + assertConvergence(server, clients); + expect(server.canonical.nodes["n1"]).toEqual( + makeNode("n1", { width: 200, height: 100 }) + ); + }); + + it("same field of the same node — last-write-wins (server order)", () => { + const initial: DocumentState = { + nodes: { + n1: makeNode("n1", { width: 100 }), + }, + scenes: [], + }; + const { server, clients, transports } = createRoom(2, initial); + connectAll(server, transports); + + const [A, B] = clients; + + // A sets width to 200, then B sets width to 300 + // Server processes A first, then B. B wins (LWW). + A.pushDiff({ + nodes: { + n1: { + op: "patch", + fields: { width: { op: "put", value: 200 } }, + }, + }, + }); + + B.pushDiff({ + nodes: { + n1: { + op: "patch", + fields: { width: { op: "put", value: 300 } }, + }, + }, + }); + + // Server should have B's value (processed second) + assertConvergence(server, clients); + expect(server.canonical.nodes["n1"]).toEqual( + makeNode("n1", { width: 300 }) + ); + }); + }); + + // ----------------------------------------------------------------------- + // Add vs delete conflicts + // ----------------------------------------------------------------------- + + describe("add vs delete conflicts", () => { + it("A deletes a node, B patches it — delete wins (A processed first)", () => { + const initial: DocumentState = { + nodes: { + n1: makeNode("n1", { width: 100 }), + }, + scenes: [], + }; + const { server, clients, transports } = createRoom(2, initial); + connectAll(server, transports); + + const [A, B] = clients; + + // A deletes n1 + A.pushDiff({ nodes: { n1: { op: "remove" } } }); + + // B tries to patch n1 — but it's already deleted on the server. + // The server should validate and discard B's push. + B.pushDiff({ + nodes: { + n1: { + op: "patch", + fields: { width: { op: "put", value: 200 } }, + }, + }, + }); + + // n1 should be deleted + assertConvergence(server, clients); + expect(server.canonical.nodes["n1"]).toBeUndefined(); + }); + + it("A and B both delete the same node — idempotent", () => { + const initial: DocumentState = { + nodes: { + n1: makeNode("n1"), + }, + scenes: [], + }; + const { server, clients, transports } = createRoom(2, initial); + connectAll(server, transports); + + const [A, B] = clients; + + A.pushDiff({ nodes: { n1: { op: "remove" } } }); + B.pushDiff({ nodes: { n1: { op: "remove" } } }); + + // Both clients should see n1 removed + // B's push is discarded by the server (REMOVE_MISSING_NODE validation) + assertConvergence(server, clients); + expect(server.canonical.nodes["n1"]).toBeUndefined(); + }); + + it("A creates node, B creates a different node with same content — both survive (different IDs)", () => { + const { server, clients, transports } = createRoom(2); + connectAll(server, transports); + + const [A, B] = clients; + + A.pushDiff({ + nodes: { nA: { op: "put", node: makeNode("nA", { color: "red" }) } }, + }); + B.pushDiff({ + nodes: { nB: { op: "put", node: makeNode("nB", { color: "red" }) } }, + }); + + assertConvergence(server, clients); + expect(Object.keys(server.canonical.nodes).sort()).toEqual(["nA", "nB"]); + }); + }); + + // ----------------------------------------------------------------------- + // Reconnection scenarios + // ----------------------------------------------------------------------- + + describe("reconnection", () => { + it("client disconnects, other edits, client reconnects and catches up", () => { + const initial: DocumentState = { + nodes: { n1: makeNode("n1", { width: 100 }) }, + scenes: [], + }; + const { server, clients, transports } = createRoom(2, initial); + connectAll(server, transports); + + const [A, B] = clients; + + // Disconnect client B + server.disconnectSession("client-1"); + + // A makes some edits while B is offline + A.pushDiff({ + nodes: { + n1: { + op: "patch", + fields: { width: { op: "put", value: 200 } }, + }, + }, + }); + A.pushDiff({ + nodes: { + n2: { op: "put", node: makeNode("n2", { x: 50 }) }, + }, + }); + + // B's state is stale — still has old width, no n2 + expect(B.state.nodes["n1"]).toEqual(makeNode("n1", { width: 100 })); + expect(B.state.nodes["n2"]).toBeUndefined(); + + // Reconnect B — server sends full state + server.connectSession("client-1"); + + // Now B should have caught up + expect(B.state.nodes["n1"]).toEqual(makeNode("n1", { width: 200 })); + expect(B.state.nodes["n2"]).toEqual(makeNode("n2", { x: 50 })); + assertConvergence(server, clients); + }); + + it("client has unsent local changes when reconnecting — they get pushed after handshake", () => { + const initial: DocumentState = { + nodes: { n1: makeNode("n1", { width: 100 }) }, + scenes: [], + }; + const { server, clients, transports } = createRoom(2, initial); + connectAll(server, transports); + + const [A, B] = clients; + + // Disconnect A + server.disconnectSession("client-0"); + + // A makes a local edit while disconnected (optimistic) + A.pushDiff({ + nodes: { + n1: { + op: "patch", + fields: { width: { op: "put", value: 999 } }, + }, + }, + }); + + // A's local state shows the edit + expect(A.state.nodes["n1"]).toEqual(makeNode("n1", { width: 999 })); + + // But server and B don't have it + expect(server.canonical.nodes["n1"]).toEqual( + makeNode("n1", { width: 100 }) + ); + + // Reconnect A — with pushInterval: -1, unsent changes flush + // synchronously during the connect_ok handler + server.connectSession("client-0"); + + // Now check convergence + expect(server.canonical.nodes["n1"]).toEqual( + makeNode("n1", { width: 999 }) + ); + assertConvergence(server, clients); + }); + }); + + // ----------------------------------------------------------------------- + // Rapid burst editing + // ----------------------------------------------------------------------- + + describe("rapid burst editing", () => { + it("many rapid local changes compose into one push", () => { + const { server, clients, transports } = createRoom(2); + connectAll(server, transports); + + const [A, B] = clients; + transports[0].sent = []; // Clear handshake messages + + // Simulate rapid property changes (e.g., dragging a resize handle) + for (let i = 0; i < 50; i++) { + A.pushDiff({ + nodes: { + n1: + i === 0 + ? { op: "put", node: makeNode("n1", { width: i * 10 }) } + : { + op: "patch", + fields: { width: { op: "put", value: i * 10 } }, + }, + }, + }); + } + + // With pushInterval: 0, each pushDiff triggers a flush. + // But the first push locks (pushInFlight), so subsequent changes + // compose into the unsent buffer and push after ack. + // The exact number of push messages depends on synchronous ack timing. + + // The important assertion: all clients converge to the final value + assertConvergence(server, clients); + expect(server.canonical.nodes["n1"]).toEqual( + makeNode("n1", { width: 490 }) + ); + }); + + it("interleaved edits from both clients during a burst", () => { + const initial: DocumentState = { + nodes: { n1: makeNode("n1", { width: 0, height: 0 }) }, + scenes: [], + }; + const { server, clients, transports } = createRoom(2, initial); + connectAll(server, transports); + + const [A, B] = clients; + + // A updates width, B updates height, alternating + for (let i = 1; i <= 10; i++) { + A.pushDiff({ + nodes: { + n1: { + op: "patch", + fields: { width: { op: "put", value: i * 10 } }, + }, + }, + }); + B.pushDiff({ + nodes: { + n1: { + op: "patch", + fields: { height: { op: "put", value: i * 5 } }, + }, + }, + }); + } + + assertConvergence(server, clients); + // Both final values should be present + expect(server.canonical.nodes["n1"]).toEqual( + makeNode("n1", { width: 100, height: 50 }) + ); + }); + }); + + // ----------------------------------------------------------------------- + // Three-client scenarios + // ----------------------------------------------------------------------- + + describe("three clients", () => { + it("three clients each create a node — all three exist everywhere", () => { + const { server, clients, transports } = createRoom(3); + connectAll(server, transports); + + const [A, B, C] = clients; + + A.pushDiff({ + nodes: { nA: { op: "put", node: makeNode("nA") } }, + }); + B.pushDiff({ + nodes: { nB: { op: "put", node: makeNode("nB") } }, + }); + C.pushDiff({ + nodes: { nC: { op: "put", node: makeNode("nC") } }, + }); + + assertConvergence(server, clients); + expect(Object.keys(server.canonical.nodes).sort()).toEqual([ + "nA", + "nB", + "nC", + ]); + }); + + it("three clients edit the same node's different fields — all fields survive", () => { + const initial: DocumentState = { + nodes: { + n1: makeNode("n1", { x: 0, y: 0, width: 100 }), + }, + scenes: [], + }; + const { server, clients, transports } = createRoom(3, initial); + connectAll(server, transports); + + const [A, B, C] = clients; + + A.pushDiff({ + nodes: { + n1: { op: "patch", fields: { x: { op: "put", value: 50 } } }, + }, + }); + B.pushDiff({ + nodes: { + n1: { op: "patch", fields: { y: { op: "put", value: 75 } } }, + }, + }); + C.pushDiff({ + nodes: { + n1: { + op: "patch", + fields: { width: { op: "put", value: 200 } }, + }, + }, + }); + + assertConvergence(server, clients); + expect(server.canonical.nodes["n1"]).toEqual( + makeNode("n1", { x: 50, y: 75, width: 200 }) + ); + }); + }); + + // ----------------------------------------------------------------------- + // Scene operations + // ----------------------------------------------------------------------- + + describe("scene operations", () => { + it("client A adds a scene, client B sees it", () => { + const { server, clients, transports } = createRoom(2); + connectAll(server, transports); + + const [A, B] = clients; + + A.pushDiff({ + nodes: { + s1: { + op: "put", + node: { type: "scene", id: "s1", name: "Page 1" } as any, + }, + }, + scenes: [{ op: "add", id: "s1" }], + }); + + assertConvergence(server, clients); + expect(server.canonical.scenes).toContain("s1"); + expect(B.state.scenes).toContain("s1"); + }); + + it("client A removes a scene, client B sees it", () => { + const initial: DocumentState = { + nodes: { + s1: { type: "scene", id: "s1", name: "Page 1" } as any, + s2: { type: "scene", id: "s2", name: "Page 2" } as any, + }, + scenes: ["s1", "s2"], + }; + const { server, clients, transports } = createRoom(2, initial); + connectAll(server, transports); + + const [A, B] = clients; + + A.pushDiff({ + nodes: { s1: { op: "remove" } }, + scenes: [{ op: "remove", id: "s1" }], + }); + + assertConvergence(server, clients); + expect(server.canonical.scenes).toEqual(["s2"]); + expect(server.canonical.nodes["s1"]).toBeUndefined(); + }); + }); + + // ----------------------------------------------------------------------- + // Complex real-world scenario + // ----------------------------------------------------------------------- + + describe("complex real-world scenario", () => { + it("multi-step workflow: create, modify, group, delete across two clients", () => { + const { server, clients, transports } = createRoom(2); + connectAll(server, transports); + + const [A, B] = clients; + + // Step 1: A creates a scene and two rectangles + A.pushDiff({ + nodes: { + s1: { + op: "put", + node: { type: "scene", id: "s1", name: "Main" } as any, + }, + rect1: { + op: "put", + node: makeNode("rect1", { + width: 100, + height: 50, + x: 0, + y: 0, + parent_id: "s1", + }), + }, + rect2: { + op: "put", + node: makeNode("rect2", { + width: 200, + height: 100, + x: 150, + y: 0, + parent_id: "s1", + }), + }, + }, + scenes: [{ op: "add", id: "s1" }], + }); + + assertConvergence(server, clients); + expect(Object.keys(B.state.nodes)).toContain("rect1"); + expect(Object.keys(B.state.nodes)).toContain("rect2"); + + // Step 2: B resizes rect1 while A changes rect2's color + B.pushDiff({ + nodes: { + rect1: { + op: "patch", + fields: { + width: { op: "put", value: 300 }, + height: { op: "put", value: 150 }, + }, + }, + }, + }); + A.pushDiff({ + nodes: { + rect2: { + op: "patch", + fields: { fill: { op: "put", value: "#ff0000" } }, + }, + }, + }); + + assertConvergence(server, clients); + expect(server.canonical.nodes["rect1"]).toEqual( + makeNode("rect1", { + width: 300, + height: 150, + x: 0, + y: 0, + parent_id: "s1", + }) + ); + expect(server.canonical.nodes["rect2"]).toMatchObject({ + fill: "#ff0000", + }); + + // Step 3: A creates a group containing both rects + A.pushDiff({ + nodes: { + group1: { + op: "put", + node: { + type: "group", + id: "group1", + parent_id: "s1", + } as any, + }, + rect1: { + op: "patch", + fields: { parent_id: { op: "put", value: "group1" } }, + }, + rect2: { + op: "patch", + fields: { parent_id: { op: "put", value: "group1" } }, + }, + }, + }); + + assertConvergence(server, clients); + expect(B.state.nodes["group1"]).toBeDefined(); + expect(B.state.nodes["rect1"]).toMatchObject({ parent_id: "group1" }); + + // Step 4: B deletes rect2 + B.pushDiff({ + nodes: { rect2: { op: "remove" } }, + }); + + assertConvergence(server, clients); + expect(server.canonical.nodes["rect2"]).toBeUndefined(); + expect(A.state.nodes["rect2"]).toBeUndefined(); + + // Final: 3 nodes remain (s1, rect1, group1) + expect(Object.keys(server.canonical.nodes).sort()).toEqual([ + "group1", + "rect1", + "s1", + ]); + }); + }); +}); diff --git a/packages/grida-canvas-sync/__tests__/validate.test.ts b/packages/grida-canvas-sync/__tests__/validate.test.ts new file mode 100644 index 000000000..80c6b8415 --- /dev/null +++ b/packages/grida-canvas-sync/__tests__/validate.test.ts @@ -0,0 +1,168 @@ +import { describe, it, expect } from "vitest"; +import { validateDiff } from "../src/validate"; +import type { DocumentState } from "../src/diff"; +import type { DocumentDiff, SerializedNode } from "../src/protocol"; + +function makeNode( + id: string, + type = "rectangle", + props: Record = {} +): SerializedNode { + return { type, id, ...props } as SerializedNode; +} + +function stateWith( + nodes: Record, + scenes: string[] = [] +): DocumentState { + return { nodes, scenes }; +} + +describe("validateDiff", () => { + it("valid put passes", () => { + const state = stateWith({}); + const diff: DocumentDiff = { + nodes: { n1: { op: "put", node: makeNode("n1") } }, + }; + const result = validateDiff(state, diff); + expect(result.valid).toBe(true); + expect(result.errors).toHaveLength(0); + }); + + it("valid patch on existing node passes", () => { + const state = stateWith({ n1: makeNode("n1") }); + const diff: DocumentDiff = { + nodes: { + n1: { op: "patch", fields: { width: { op: "put", value: 100 } } }, + }, + }; + const result = validateDiff(state, diff); + expect(result.valid).toBe(true); + }); + + it("valid remove of existing node passes", () => { + const state = stateWith({ n1: makeNode("n1") }); + const diff: DocumentDiff = { nodes: { n1: { op: "remove" } } }; + const result = validateDiff(state, diff); + expect(result.valid).toBe(true); + }); + + it("patch on non-existent node fails", () => { + const state = stateWith({}); + const diff: DocumentDiff = { + nodes: { + n1: { op: "patch", fields: { width: { op: "put", value: 100 } } }, + }, + }; + const result = validateDiff(state, diff); + expect(result.valid).toBe(false); + expect(result.errors[0].code).toBe("PATCH_MISSING_NODE"); + }); + + it("remove of non-existent node fails", () => { + const state = stateWith({}); + const diff: DocumentDiff = { nodes: { n1: { op: "remove" } } }; + const result = validateDiff(state, diff); + expect(result.valid).toBe(false); + expect(result.errors[0].code).toBe("REMOVE_MISSING_NODE"); + }); + + it("put without type fails", () => { + const state = stateWith({}); + const diff: DocumentDiff = { + nodes: { + n1: { op: "put", node: { id: "n1" } as SerializedNode }, + }, + }; + const result = validateDiff(state, diff); + expect(result.valid).toBe(false); + expect(result.errors[0].code).toBe("PUT_MISSING_TYPE"); + }); + + it("put without id fails", () => { + const state = stateWith({}); + const diff: DocumentDiff = { + nodes: { + n1: { op: "put", node: { type: "rectangle" } as SerializedNode }, + }, + }; + const result = validateDiff(state, diff); + expect(result.valid).toBe(false); + expect(result.errors[0].code).toBe("PUT_MISSING_ID"); + }); + + it("put with mismatched id fails", () => { + const state = stateWith({}); + const diff: DocumentDiff = { + nodes: { + n1: { op: "put", node: makeNode("n2") }, + }, + }; + const result = validateDiff(state, diff); + expect(result.valid).toBe(false); + expect(result.errors[0].code).toBe("PUT_ID_MISMATCH"); + }); + + it("patching immutable field 'id' fails", () => { + const state = stateWith({ n1: makeNode("n1") }); + const diff: DocumentDiff = { + nodes: { + n1: { op: "patch", fields: { id: { op: "put", value: "n2" } } }, + }, + }; + const result = validateDiff(state, diff); + expect(result.valid).toBe(false); + expect(result.errors[0].code).toBe("PATCH_IMMUTABLE_FIELD"); + }); + + it("patching immutable field 'type' fails", () => { + const state = stateWith({ n1: makeNode("n1") }); + const diff: DocumentDiff = { + nodes: { + n1: { op: "patch", fields: { type: { op: "put", value: "ellipse" } } }, + }, + }; + const result = validateDiff(state, diff); + expect(result.valid).toBe(false); + expect(result.errors[0].code).toBe("PATCH_IMMUTABLE_FIELD"); + }); + + it("scene add for non-existent node fails", () => { + const state = stateWith({}, []); + const diff: DocumentDiff = { scenes: [{ op: "add", id: "s1" }] }; + const result = validateDiff(state, diff); + expect(result.valid).toBe(false); + expect(result.errors[0].code).toBe("SCENE_ADD_MISSING_NODE"); + }); + + it("scene add for node created in same diff passes", () => { + const state = stateWith({}, []); + const diff: DocumentDiff = { + nodes: { s1: { op: "put", node: makeNode("s1", "scene") } }, + scenes: [{ op: "add", id: "s1" }], + }; + const result = validateDiff(state, diff); + expect(result.valid).toBe(true); + }); + + it("scene remove for non-existent scene fails", () => { + const state = stateWith({}, ["s1"]); + const diff: DocumentDiff = { scenes: [{ op: "remove", id: "s2" }] }; + const result = validateDiff(state, diff); + expect(result.valid).toBe(false); + expect(result.errors[0].code).toBe("SCENE_REMOVE_MISSING"); + }); + + it("collects multiple errors", () => { + const state = stateWith({}); + const diff: DocumentDiff = { + nodes: { + n1: { op: "patch", fields: { width: { op: "put", value: 100 } } }, + n2: { op: "remove" }, + }, + }; + const result = validateDiff(state, diff); + expect(result.valid).toBe(false); + expect(result.errors.length).toBe(2); + }); +}); diff --git a/packages/grida-canvas-sync/package.json b/packages/grida-canvas-sync/package.json new file mode 100644 index 000000000..5af10afff --- /dev/null +++ b/packages/grida-canvas-sync/package.json @@ -0,0 +1,34 @@ +{ + "name": "@grida/canvas-sync", + "version": "0.0.0", + "private": true, + "description": "Server-authoritative document sync for Grida Canvas", + "license": "Apache-2.0", + "author": "softmarshmallow", + "repository": "https://github.com/gridaco/grida", + "files": [ + "dist" + ], + "main": "./dist/index.js", + "module": "./dist/index.mjs", + "types": "./dist/index.d.ts", + "exports": { + ".": { + "types": "./dist/index.d.ts", + "import": "./dist/index.mjs", + "require": "./dist/index.js" + } + }, + "scripts": { + "build": "tsup src/index.ts --format cjs,esm --dts", + "dev": "tsup src/index.ts --format cjs,esm --dts --watch", + "test": "vitest run", + "test:watch": "vitest", + "typecheck": "tsc --noEmit" + }, + "devDependencies": { + "tsup": "^8.0.0", + "typescript": "^5.0.0", + "vitest": "^4" + } +} diff --git a/packages/grida-canvas-sync/src/client.ts b/packages/grida-canvas-sync/src/client.ts new file mode 100644 index 000000000..5c2b6c51e --- /dev/null +++ b/packages/grida-canvas-sync/src/client.ts @@ -0,0 +1,428 @@ +/** + * @module client + * + * `SyncClient` — the client-side sync engine. + * + * Maintains three layers of state: + * canonical — last server-confirmed state + * speculative — diffs that have been pushed but not yet ack'd + * unsent — local changes not yet pushed + * + * The "local state" that the editor renders is: + * apply(canonical, compose(...speculative, unsent)) + * + * On server ack/patch, the client rebases: undo speculative, apply server + * truth, re-apply remaining speculative + unsent. + */ + +import type { + DocumentDiff, + PresenceState, + ServerMessage, + ClientMessage, + ConnectOkMessage, + PushOkMessage, + PatchMessage, + NodeId, + SerializedNode, +} from "./protocol"; +import { + type DocumentState, + applyDiff, + composeDiffs, + isDiffEmpty, +} from "./diff"; +import type { ISyncTransport, TransportStatus } from "./transport"; + +// --------------------------------------------------------------------------- +// Types +// --------------------------------------------------------------------------- + +export interface SyncClientOptions { + /** Schema version for the connect handshake. */ + readonly schema: string; + /** Transport to use for sending/receiving messages. */ + readonly transport: ISyncTransport; + /** Initial document state (from OPFS cache or empty). */ + readonly initialState: DocumentState; + /** Last known server clock (from OPFS sync-state.json). 0 for fresh. */ + readonly lastClock?: number; + /** + * Push debounce interval in ms. Default: 50. + * Set to a negative value (e.g. -1) for synchronous flush (useful for tests). + */ + readonly pushInterval?: number; +} + +export type SyncClientStatus = + | "disconnected" + | "connecting" + | "syncing" // connect handshake sent, waiting for connect_ok + | "ready"; // connected and in sync + +export type SyncClientEventMap = { + /** Fired when the merged local state changes (canonical + speculative + unsent). */ + stateChange: DocumentState; + /** Fired on presence updates from peers. */ + presenceChange: Record; + /** Fired on status changes. */ + statusChange: SyncClientStatus; + /** Fired on server errors. */ + error: { code: string; message: string }; +}; + +type EventHandler = ( + data: SyncClientEventMap[K] +) => void; + +// --------------------------------------------------------------------------- +// SyncClient +// --------------------------------------------------------------------------- + +export class SyncClient { + // -- State layers -- + private _canonical: DocumentState; + private _speculative: DocumentDiff[] = []; + private _unsent: DocumentDiff = {}; + private _localState: DocumentState; + + // -- Clock -- + private _serverClock: number; + private _clientClock: number = 0; + + // -- Transport -- + private readonly _transport: ISyncTransport; + private readonly _schema: string; + private readonly _pushInterval: number; + + // -- Push scheduling -- + private _pushTimer: ReturnType | null = null; + private _pushInFlight = false; + + // -- Status -- + private _status: SyncClientStatus = "disconnected"; + + // -- Event handlers -- + private _handlers: { + [K in keyof SyncClientEventMap]: Set>; + } = { + stateChange: new Set(), + presenceChange: new Set(), + statusChange: new Set(), + error: new Set(), + }; + + // -- Cleanup -- + private _unsubscribeMessage: (() => void) | null = null; + private _unsubscribeStatus: (() => void) | null = null; + + constructor(options: SyncClientOptions) { + this._canonical = options.initialState; + this._localState = options.initialState; + this._serverClock = options.lastClock ?? 0; + this._schema = options.schema; + this._transport = options.transport; + this._pushInterval = options.pushInterval ?? 50; + + // Wire up transport + this._unsubscribeMessage = this._transport.onMessage( + this._handleServerMessage.bind(this) + ); + this._unsubscribeStatus = this._transport.onStatusChange( + this._handleTransportStatus.bind(this) + ); + } + + // ------------------------------------------------------------------------- + // Public API + // ------------------------------------------------------------------------- + + /** Current sync status. */ + get status(): SyncClientStatus { + return this._status; + } + + /** Last known server clock. */ + get serverClock(): number { + return this._serverClock; + } + + /** The merged local state (what the editor should render). */ + get state(): DocumentState { + return this._localState; + } + + /** The canonical (server-confirmed) state. */ + get canonical(): DocumentState { + return this._canonical; + } + + /** Whether there are unsent or unacknowledged changes. */ + get isDirty(): boolean { + return this._speculative.length > 0 || !isDiffEmpty(this._unsent); + } + + /** Subscribe to events. Returns an unsubscribe function. */ + on( + event: K, + handler: EventHandler + ): () => void { + (this._handlers[event] as Set>).add(handler); + return () => + (this._handlers[event] as Set>).delete(handler); + } + + /** + * Push a local diff. This is the primary mutation API. + * The diff is applied optimistically and scheduled for push to server. + */ + pushDiff(diff: DocumentDiff): void { + if (isDiffEmpty(diff)) return; + + // Compose into unsent buffer + this._unsent = composeDiffs(this._unsent, diff); + + // Recompute local state + this._recomputeLocalState(); + + // Schedule a push + this._schedulePush(); + } + + /** Update local presence (sent with the next push or immediately). */ + setPresence(presence: PresenceState): void { + if (this._status === "ready") { + this._transport.send({ + type: "presence_update", + presence, + }); + } + } + + /** Connect to the server. */ + connect(): void { + this._transport.connect(); + } + + /** Disconnect from the server. */ + disconnect(): void { + if (this._pushTimer !== null) { + clearTimeout(this._pushTimer); + this._pushTimer = null; + } + this._transport.disconnect(); + } + + /** Tear down all subscriptions. Call this when done. */ + destroy(): void { + this.disconnect(); + this._unsubscribeMessage?.(); + this._unsubscribeStatus?.(); + this._unsubscribeMessage = null; + this._unsubscribeStatus = null; + for (const set of Object.values(this._handlers)) { + set.clear(); + } + } + + // ------------------------------------------------------------------------- + // Server message handling + // ------------------------------------------------------------------------- + + private _handleServerMessage(msg: ServerMessage): void { + switch (msg.type) { + case "connect_ok": + this._handleConnectOk(msg); + break; + case "push_ok": + this._handlePushOk(msg); + break; + case "patch": + this._handlePatch(msg); + break; + case "presence": + this._emit("presenceChange", { ...msg.peers }); + break; + case "pong": + // Heartbeat response — no action needed + break; + case "error": + this._emit("error", { code: msg.code, message: msg.message }); + break; + } + } + + private _handleConnectOk(msg: ConnectOkMessage): void { + if (msg.state) { + // Full state — server sent everything + this._canonical = { + nodes: msg.state as Record, + scenes: msg.scenes ?? [], + }; + } else if (msg.diff) { + // Incremental — apply catch-up diff + this._canonical = applyDiff(this._canonical, msg.diff); + if (msg.scenes) { + this._canonical = { ...this._canonical, scenes: msg.scenes }; + } + } + + this._serverClock = msg.clock; + this._setStatus("ready"); + this._recomputeLocalState(); + + // If we have unsent changes from before reconnect, push them + if (!isDiffEmpty(this._unsent)) { + this._schedulePush(); + } + } + + private _handlePushOk(msg: PushOkMessage): void { + // Find and remove the acknowledged speculative diff + // Speculative diffs are in push order; the first one matches the ack + if (this._speculative.length === 0) return; + + switch (msg.result) { + case "commit": + // Server accepted our diff as-is — apply to canonical + this._canonical = applyDiff(this._canonical, this._speculative[0]); + this._speculative.shift(); + break; + case "rebase": + // Server modified our diff — use server's version + if (msg.diff) { + this._canonical = applyDiff(this._canonical, msg.diff); + } + this._speculative.shift(); + break; + case "discard": + // Server rejected our diff — drop it + this._speculative.shift(); + break; + } + + this._serverClock = msg.serverClock; + this._pushInFlight = false; + this._recomputeLocalState(); + + // If there's more to push, schedule it + if (!isDiffEmpty(this._unsent) || this._speculative.length > 0) { + this._schedulePush(); + } + } + + private _handlePatch(msg: PatchMessage): void { + // Another client's change, broadcast by the server + this._canonical = applyDiff(this._canonical, msg.diff); + this._serverClock = msg.serverClock; + this._recomputeLocalState(); + } + + // ------------------------------------------------------------------------- + // Transport status handling + // ------------------------------------------------------------------------- + + private _handleTransportStatus(status: TransportStatus): void { + switch (status) { + case "connecting": + this._setStatus("connecting"); + break; + case "connected": + // Send the connect handshake + this._setStatus("syncing"); + this._transport.send({ + type: "connect", + schema: this._schema, + lastClock: this._serverClock, + }); + break; + case "disconnected": + this._pushInFlight = false; + this._setStatus("disconnected"); + break; + } + } + + // ------------------------------------------------------------------------- + // Push scheduling + // ------------------------------------------------------------------------- + + private _schedulePush(): void { + if (this._pushTimer !== null) return; // Already scheduled + if (this._status !== "ready") return; // Not connected + + if (this._pushInterval < 0) { + // Synchronous flush (test mode) + this._flush(); + } else { + this._pushTimer = setTimeout(() => { + this._pushTimer = null; + this._flush(); + }, this._pushInterval); + } + } + + private _flush(): void { + if (this._status !== "ready") return; + if (this._pushInFlight) return; // Wait for ack before sending another + if (isDiffEmpty(this._unsent)) return; // Nothing to send + + const diff = this._unsent; + this._unsent = {}; + this._speculative.push(diff); + this._pushInFlight = true; + + const clientClock = ++this._clientClock; + + this._transport.send({ + type: "push", + clientClock, + diff, + }); + } + + // ------------------------------------------------------------------------- + // State recomputation + // ------------------------------------------------------------------------- + + /** + * Recompute localState from canonical + speculative + unsent. + * Emits "stateChange" if the state actually changed. + */ + private _recomputeLocalState(): void { + let merged = this._canonical; + + for (const spec of this._speculative) { + merged = applyDiff(merged, spec); + } + + if (!isDiffEmpty(this._unsent)) { + merged = applyDiff(merged, this._unsent); + } + + // Ref check — if nothing changed, skip the event + if (merged === this._localState) return; + + this._localState = merged; + this._emit("stateChange", merged); + } + + // ------------------------------------------------------------------------- + // Event emission + // ------------------------------------------------------------------------- + + private _emit( + event: K, + data: SyncClientEventMap[K] + ): void { + for (const handler of this._handlers[event] as Set>) { + handler(data); + } + } + + private _setStatus(status: SyncClientStatus): void { + if (this._status === status) return; + this._status = status; + this._emit("statusChange", status); + } +} diff --git a/packages/grida-canvas-sync/src/clock.ts b/packages/grida-canvas-sync/src/clock.ts new file mode 100644 index 000000000..2a6f2b59a --- /dev/null +++ b/packages/grida-canvas-sync/src/clock.ts @@ -0,0 +1,34 @@ +/** + * @module clock + * + * Monotonic clock utilities for the sync protocol. + * The server maintains a single monotonic clock per document/room. + * Clients track the last-seen server clock for reconnection. + */ + +/** + * A monotonic document clock. Each committed change increments the clock by 1. + * Used by the server to order changes and by clients to request deltas on reconnect. + */ +export class DocumentClock { + private _value: number; + + constructor(initial: number = 0) { + this._value = initial; + } + + /** Current clock value. */ + get value(): number { + return this._value; + } + + /** Increment and return the new value. */ + tick(): number { + return ++this._value; + } + + /** Reset to a specific value (used when loading from storage). */ + reset(value: number): void { + this._value = value; + } +} diff --git a/packages/grida-canvas-sync/src/diff.ts b/packages/grida-canvas-sync/src/diff.ts new file mode 100644 index 000000000..3d49c3628 --- /dev/null +++ b/packages/grida-canvas-sync/src/diff.ts @@ -0,0 +1,364 @@ +/** + * @module diff + * + * Pure functions for computing, applying, and composing document diffs. + * + * All functions operate on a flat record map (`Record`) + * plus a scene ordering array. This is the "sync-friendly" representation; + * conversion to/from the editor's `grida.program.document.Document` lives in + * {@link serialize}. + */ + +import type { + NodeId, + SerializedNode, + DocumentDiff, + NodeOp, + FieldOp, + SceneOp, + JsonValue, +} from "./protocol"; + +// --------------------------------------------------------------------------- +// Document state — the flat representation used by the sync layer +// --------------------------------------------------------------------------- + +export interface DocumentState { + readonly nodes: Readonly>; + readonly scenes: readonly NodeId[]; +} + +// --------------------------------------------------------------------------- +// computeDiff +// --------------------------------------------------------------------------- + +/** + * Compute the diff needed to transform `before` into `after`. + * Returns `null` if the two states are identical. + */ +export function computeDiff( + before: DocumentState, + after: DocumentState +): DocumentDiff | null { + const nodeOps: Record = {}; + let hasNodeOps = false; + + // Detect removed nodes (in before but not in after) + for (const id of Object.keys(before.nodes)) { + if (!(id in after.nodes)) { + nodeOps[id] = { op: "remove" }; + hasNodeOps = true; + } + } + + // Detect added or changed nodes + for (const [id, afterNode] of Object.entries(after.nodes)) { + const beforeNode = before.nodes[id]; + if (!beforeNode) { + // New node + nodeOps[id] = { op: "put", node: afterNode }; + hasNodeOps = true; + } else { + // Existing node — compute field-level diff + const patch = computeNodePatch(beforeNode, afterNode); + if (patch) { + nodeOps[id] = patch; + hasNodeOps = true; + } + } + } + + // Detect scene ordering changes + const sceneOps = computeSceneDiff(before.scenes, after.scenes); + const hasSceneOps = sceneOps !== null; + + if (!hasNodeOps && !hasSceneOps) { + return null; + } + + const diff: DocumentDiff = {}; + if (hasNodeOps) { + (diff as { nodes: typeof nodeOps }).nodes = nodeOps; + } + if (hasSceneOps) { + (diff as { scenes: typeof sceneOps }).scenes = sceneOps; + } + return diff; +} + +/** + * Compute a field-level patch for a single node. + * Returns `null` if the nodes are identical. + */ +function computeNodePatch( + before: SerializedNode, + after: SerializedNode +): NodeOp | null { + // If the type changed, it's a full replacement + if (before.type !== after.type) { + return { op: "put", node: after }; + } + + const fields: Record = {}; + let hasFields = false; + + // Check all keys in `after` for changes or additions + for (const [key, afterVal] of Object.entries(after)) { + if (key === "id") continue; // id never changes + const beforeVal = before[key]; + if (!jsonEqual(beforeVal, afterVal)) { + fields[key] = { op: "put", value: afterVal }; + hasFields = true; + } + } + + // Check for deleted keys (in before but not in after, excluding id) + for (const key of Object.keys(before)) { + if (key === "id") continue; + if (!(key in after)) { + fields[key] = { op: "delete" }; + hasFields = true; + } + } + + if (!hasFields) return null; + return { op: "patch", fields }; +} + +/** + * Compute scene diff. Returns scene ops or null if identical. + */ +function computeSceneDiff( + before: readonly NodeId[], + after: readonly NodeId[] +): SceneOp[] | null { + // Quick equality check + if ( + before.length === after.length && + before.every((id, i) => id === after[i]) + ) { + return null; + } + + const ops: SceneOp[] = []; + const beforeSet = new Set(before); + const afterSet = new Set(after); + + // Removed scenes + for (const id of before) { + if (!afterSet.has(id)) { + ops.push({ op: "remove", id }); + } + } + + // Added scenes + for (const id of after) { + if (!beforeSet.has(id)) { + ops.push({ op: "add", id }); + } + } + + // If the set is the same but order changed, emit a reorder + if (ops.length === 0) { + ops.push({ op: "reorder", ids: after }); + } + + return ops; +} + +// --------------------------------------------------------------------------- +// applyDiff +// --------------------------------------------------------------------------- + +/** + * Apply a diff to a document state, producing a new state. + * This is a pure function — the input state is not mutated. + */ +export function applyDiff( + state: DocumentState, + diff: DocumentDiff +): DocumentState { + let nodes = { ...state.nodes }; + let scenes = [...state.scenes]; + + // Apply node operations + if (diff.nodes) { + for (const [id, op] of Object.entries(diff.nodes)) { + switch (op.op) { + case "put": + nodes[id] = op.node; + break; + case "patch": + if (id in nodes) { + nodes[id] = applyFieldOps(nodes[id], op.fields); + } + // If node doesn't exist, skip (server should have validated) + break; + case "remove": + delete nodes[id]; + break; + } + } + } + + // Apply scene operations (in order) + if (diff.scenes) { + for (const sceneOp of diff.scenes) { + switch (sceneOp.op) { + case "add": + if (!scenes.includes(sceneOp.id)) { + scenes.push(sceneOp.id); + } + break; + case "remove": + scenes = scenes.filter((id) => id !== sceneOp.id); + break; + case "reorder": + scenes = [...sceneOp.ids]; + break; + } + } + } + + return { nodes, scenes }; +} + +/** + * Apply field-level operations to a serialized node, returning a new node. + */ +function applyFieldOps( + node: SerializedNode, + fields: Readonly> +): SerializedNode { + const result: Record = { ...node }; + for (const [key, op] of Object.entries(fields)) { + switch (op.op) { + case "put": + result[key] = op.value; + break; + case "delete": + delete result[key]; + break; + } + } + return result as SerializedNode; +} + +// --------------------------------------------------------------------------- +// composeDiffs +// --------------------------------------------------------------------------- + +/** + * Compose two diffs into a single diff that has the same effect as + * applying `a` followed by `b`. + * + * This is used by SyncClient to merge unsent local changes into one diff. + */ +export function composeDiffs(a: DocumentDiff, b: DocumentDiff): DocumentDiff { + const nodes: Record = {}; + + // Start with all ops from `a` + if (a.nodes) { + for (const [id, op] of Object.entries(a.nodes)) { + nodes[id] = op; + } + } + + // Merge ops from `b` + if (b.nodes) { + for (const [id, bOp] of Object.entries(b.nodes)) { + const aOp = nodes[id]; + if (!aOp) { + nodes[id] = bOp; + continue; + } + nodes[id] = composeNodeOps(aOp, bOp); + } + } + + // Scene ops: just concatenate (they are applied in order) + const scenes = + a.scenes || b.scenes + ? [...(a.scenes ?? []), ...(b.scenes ?? [])] + : undefined; + + const result: DocumentDiff = {}; + if (Object.keys(nodes).length > 0) { + (result as { nodes: typeof nodes }).nodes = nodes; + } + if (scenes && scenes.length > 0) { + (result as { scenes: typeof scenes }).scenes = scenes; + } + return result; +} + +/** + * Compose two node-level operations. + */ +function composeNodeOps(a: NodeOp, b: NodeOp): NodeOp { + // If b is a full put or remove, it overrides anything + if (b.op === "put" || b.op === "remove") { + return b; + } + + // b is "patch" + if (a.op === "remove") { + // Can't patch a removed node — the patch wins (implies re-creation path) + return b; + } + + if (a.op === "put") { + // Apply b's patches to a's node snapshot + const patched = applyFieldOps(a.node, b.fields); + return { op: "put", node: patched }; + } + + // Both are "patch" — merge field ops (b overrides a for same keys) + const fields: Record = { ...a.fields, ...b.fields }; + return { op: "patch", fields }; +} + +// --------------------------------------------------------------------------- +// Utilities +// --------------------------------------------------------------------------- + +/** Check if a diff is empty (no effective operations). */ +export function isDiffEmpty(diff: DocumentDiff): boolean { + const hasNodes = diff.nodes && Object.keys(diff.nodes).length > 0; + const hasScenes = diff.scenes && diff.scenes.length > 0; + const hasMeta = diff.metadata && Object.keys(diff.metadata).length > 0; + return !hasNodes && !hasScenes && !hasMeta; +} + +/** + * Deep equality check for JSON values. + * Used to detect whether a field has actually changed. + */ +export function jsonEqual(a: unknown, b: unknown): boolean { + if (a === b) return true; + if (a === null || b === null) return false; + if (typeof a !== typeof b) return false; + + if (Array.isArray(a)) { + if (!Array.isArray(b) || a.length !== b.length) return false; + for (let i = 0; i < a.length; i++) { + if (!jsonEqual(a[i], b[i])) return false; + } + return true; + } + + if (typeof a === "object" && typeof b === "object") { + const aObj = a as Record; + const bObj = b as Record; + const aKeys = Object.keys(aObj); + const bKeys = Object.keys(bObj); + if (aKeys.length !== bKeys.length) return false; + for (const key of aKeys) { + if (!Object.prototype.hasOwnProperty.call(bObj, key)) return false; + if (!jsonEqual(aObj[key], bObj[key])) return false; + } + return true; + } + + return false; +} diff --git a/packages/grida-canvas-sync/src/index.ts b/packages/grida-canvas-sync/src/index.ts new file mode 100644 index 000000000..466110c5f --- /dev/null +++ b/packages/grida-canvas-sync/src/index.ts @@ -0,0 +1,100 @@ +/** + * @grida/canvas-sync + * + * Server-authoritative document sync for Grida Canvas. + * + * @example + * ```ts + * import { SyncClient, WebSocketTransport } from "@grida/canvas-sync"; + * + * const transport = new WebSocketTransport({ + * url: "wss://live.grida.co/room/my-room", + * }); + * + * const client = new SyncClient({ + * schema: "0.91.0-beta+20260311", + * transport, + * initialState: { nodes: {}, scenes: [] }, + * }); + * + * client.on("stateChange", (state) => { + * // Update the editor with the new state + * }); + * + * client.connect(); + * ``` + */ + +// Protocol types +export type { + NodeId, + JsonValue, + FieldOp, + FieldPut, + FieldDelete, + SerializedNode, + NodeOp, + NodePut, + NodePatch, + NodeRemove, + SceneOp, + SceneAdd, + SceneRemove, + SceneReorder, + DocumentDiff, + CursorPresence, + PresenceState, + ClientMessage, + ConnectMessage, + PushMessage, + PingMessage, + PresenceUpdateMessage, + ServerMessage, + ConnectOkMessage, + PushOkMessage, + PatchMessage, + PresenceBroadcastMessage, + PongMessage, + ErrorMessage, + PushResult, +} from "./protocol"; + +// Diff operations +export { + computeDiff, + applyDiff, + composeDiffs, + isDiffEmpty, + jsonEqual, +} from "./diff"; +export type { DocumentState } from "./diff"; + +// Validation +export { validateDiff } from "./validate"; +export type { + ValidationResult, + ValidationError, + ValidationErrorCode, +} from "./validate"; + +// Clock +export { DocumentClock } from "./clock"; + +// Transport +export { WebSocketTransport } from "./transport"; +export type { + ISyncTransport, + TransportStatus, + WebSocketTransportOptions, +} from "./transport"; + +// Presence +export { mergePresence, hasVisibleCursor } from "./presence"; + +// Client +export { SyncClient } from "./client"; +export type { + SyncClientOptions, + SyncClientStatus, + SyncClientEventMap, +} from "./client"; diff --git a/packages/grida-canvas-sync/src/presence.ts b/packages/grida-canvas-sync/src/presence.ts new file mode 100644 index 000000000..7c8f34432 --- /dev/null +++ b/packages/grida-canvas-sync/src/presence.ts @@ -0,0 +1,29 @@ +/** + * @module presence + * + * Presence state management. Presence is ephemeral (not persisted) and + * relayed by the server to all peers in the same room. + */ + +import type { PresenceState } from "./protocol"; + +/** + * Merge incoming peer presence states with the current known set. + * Entries not in `incoming` are removed (the server sends the full peer map). + */ +export function mergePresence( + incoming: Readonly> +): Record { + // The server sends a full snapshot of all peer presence states. + // We just accept it as-is — no local merging needed. + return { ...incoming }; +} + +/** Check if a presence state has a visible cursor. */ +export function hasVisibleCursor(p: PresenceState): boolean { + return ( + p.cursor !== undefined && + p.profile !== undefined && + p.profile.color !== undefined + ); +} diff --git a/packages/grida-canvas-sync/src/protocol.ts b/packages/grida-canvas-sync/src/protocol.ts new file mode 100644 index 000000000..c86863ee7 --- /dev/null +++ b/packages/grida-canvas-sync/src/protocol.ts @@ -0,0 +1,254 @@ +/** + * @module protocol + * + * Wire protocol types for the Grida Canvas sync system. + * + * This module defines the contract between SyncClient (browser) and + * SyncRoom (Cloudflare Durable Object). All types here are pure data — + * no behavior, no dependencies beyond JSON-serializable primitives. + * + * The protocol is server-authoritative: the server validates and may + * modify pushed diffs before committing them. Clients optimistically + * apply their own changes and rebase when the server responds. + */ + +// --------------------------------------------------------------------------- +// Identifiers +// --------------------------------------------------------------------------- + +/** Node identifier — currently a string, will migrate to packed u32 later. */ +export type NodeId = string; + +// --------------------------------------------------------------------------- +// JSON primitives +// --------------------------------------------------------------------------- + +/** + * Any JSON-serializable value. Used for field values in diffs. + * Intentionally loose — validation is done by {@link validate}. + */ +export type JsonValue = + | string + | number + | boolean + | null + | JsonValue[] + | { [key: string]: JsonValue }; + +// --------------------------------------------------------------------------- +// Field-level operations +// --------------------------------------------------------------------------- + +/** Replace a field's value. */ +export interface FieldPut { + readonly op: "put"; + readonly value: JsonValue; +} + +/** Delete (unset) a field. */ +export interface FieldDelete { + readonly op: "delete"; +} + +/** A single field-level operation. */ +export type FieldOp = FieldPut | FieldDelete; + +// --------------------------------------------------------------------------- +// Node-level operations +// --------------------------------------------------------------------------- + +/** + * Serialized node — a plain JSON object representing a full node snapshot. + * The `type` discriminant and `id` are always present. + */ +export interface SerializedNode { + readonly type: string; + readonly id: NodeId; + readonly [key: string]: JsonValue; +} + +/** Insert a new node (or replace an existing one wholesale). */ +export interface NodePut { + readonly op: "put"; + readonly node: SerializedNode; +} + +/** Patch individual fields of an existing node. */ +export interface NodePatch { + readonly op: "patch"; + readonly fields: Readonly>; +} + +/** Remove (tombstone) a node. */ +export interface NodeRemove { + readonly op: "remove"; +} + +/** A single node-level operation. */ +export type NodeOp = NodePut | NodePatch | NodeRemove; + +// --------------------------------------------------------------------------- +// Scene operations +// --------------------------------------------------------------------------- + +export interface SceneAdd { + readonly op: "add"; + readonly id: NodeId; +} + +export interface SceneRemove { + readonly op: "remove"; + readonly id: NodeId; +} + +export interface SceneReorder { + readonly op: "reorder"; + readonly ids: readonly NodeId[]; +} + +export type SceneOp = SceneAdd | SceneRemove | SceneReorder; + +// --------------------------------------------------------------------------- +// Document diff +// --------------------------------------------------------------------------- + +/** + * A diff describing changes to a document. + * + * - `nodes` — per-node operations (insert, patch, or remove) + * - `scenes` — ordered list of scene-level operations + * - `metadata` — document-level metadata changes (keyed by metadata key) + * + * An empty diff (all fields undefined or empty) is a no-op. + */ +export interface DocumentDiff { + readonly nodes?: Readonly>; + readonly scenes?: readonly SceneOp[]; + readonly metadata?: Readonly>; +} + +// --------------------------------------------------------------------------- +// Presence (ephemeral, not persisted) +// --------------------------------------------------------------------------- + +export interface CursorPresence { + readonly cursor_id: string; + readonly x: number; + readonly y: number; + /** Epoch ms — used for tie-breaking when multiple entries share a cursor_id. */ + readonly t: number; +} + +export interface PresenceState { + readonly cursor?: CursorPresence; + readonly selection?: readonly NodeId[]; + readonly scene_id?: string; + readonly viewport?: { + readonly x: number; + readonly y: number; + readonly zoom: number; + }; + /** Palette / display name for the cursor badge. */ + readonly profile?: { + readonly name?: string; + readonly color?: string; + }; +} + +// --------------------------------------------------------------------------- +// Wire messages: Client → Server +// --------------------------------------------------------------------------- + +export interface ConnectMessage { + readonly type: "connect"; + /** Schema version string (e.g. "0.91.0-beta+20260311"). */ + readonly schema: string; + /** Last known server clock. 0 for a fresh connection. */ + readonly lastClock: number; +} + +export interface PushMessage { + readonly type: "push"; + /** Client-assigned sequence number for this push (monotonically increasing). */ + readonly clientClock: number; + readonly diff: DocumentDiff; + readonly presence?: PresenceState; +} + +export interface PingMessage { + readonly type: "ping"; +} + +export interface PresenceUpdateMessage { + readonly type: "presence_update"; + readonly presence: PresenceState; +} + +export type ClientMessage = + | ConnectMessage + | PushMessage + | PingMessage + | PresenceUpdateMessage; + +// --------------------------------------------------------------------------- +// Wire messages: Server → Client +// --------------------------------------------------------------------------- + +export type PushResult = "commit" | "discard" | "rebase"; + +export interface ConnectOkMessage { + readonly type: "connect_ok"; + /** Current server clock. */ + readonly clock: number; + /** If the client is behind, this contains the catch-up diff. */ + readonly diff?: DocumentDiff; + /** + * Full document state. Sent when the client's lastClock is too stale + * for an incremental diff (or on first connect with lastClock=0). + */ + readonly state?: Readonly>; + /** Scene ref ordering. */ + readonly scenes?: readonly NodeId[]; +} + +export interface PushOkMessage { + readonly type: "push_ok"; + readonly serverClock: number; + /** The client clock this is acknowledging. */ + readonly clientClock: number; + readonly result: PushResult; + /** + * When result is "rebase", this contains the server's version of the diff + * (which may differ from what the client sent). + */ + readonly diff?: DocumentDiff; +} + +export interface PatchMessage { + readonly type: "patch"; + readonly serverClock: number; + readonly diff: DocumentDiff; +} + +export interface PresenceBroadcastMessage { + readonly type: "presence"; + readonly peers: Readonly>; +} + +export interface PongMessage { + readonly type: "pong"; +} + +export interface ErrorMessage { + readonly type: "error"; + readonly code: string; + readonly message: string; +} + +export type ServerMessage = + | ConnectOkMessage + | PushOkMessage + | PatchMessage + | PresenceBroadcastMessage + | PongMessage + | ErrorMessage; diff --git a/packages/grida-canvas-sync/src/transport.ts b/packages/grida-canvas-sync/src/transport.ts new file mode 100644 index 000000000..641c9d9fd --- /dev/null +++ b/packages/grida-canvas-sync/src/transport.ts @@ -0,0 +1,167 @@ +/** + * @module transport + * + * Transport abstraction for the sync protocol. + * + * `SyncClient` depends on `ISyncTransport`, not on WebSocket directly. + * This allows unit testing with `MockTransport` and future transport + * swaps (SharedWorker, WebRTC, etc.) without changing the client. + */ + +import type { ClientMessage, ServerMessage } from "./protocol"; + +// --------------------------------------------------------------------------- +// Transport interface +// --------------------------------------------------------------------------- + +export type TransportStatus = "disconnected" | "connecting" | "connected"; + +export interface ISyncTransport { + /** Current connection status. */ + readonly status: TransportStatus; + + /** Send a message to the server. Throws if not connected. */ + send(message: ClientMessage): void; + + /** Register a handler for incoming server messages. Returns unsubscribe fn. */ + onMessage(handler: (message: ServerMessage) => void): () => void; + + /** Register a handler for status changes. Returns unsubscribe fn. */ + onStatusChange(handler: (status: TransportStatus) => void): () => void; + + /** Open the connection. */ + connect(): void; + + /** Close the connection. */ + disconnect(): void; +} + +// --------------------------------------------------------------------------- +// WebSocket transport +// --------------------------------------------------------------------------- + +export interface WebSocketTransportOptions { + /** Full WebSocket URL (e.g. "wss://live.grida.co/room/abc"). */ + readonly url: string; + /** Reconnect delay in ms after an unexpected close. Default: 1000. */ + readonly reconnectDelay?: number; + /** Max reconnect attempts. Default: Infinity. */ + readonly maxReconnectAttempts?: number; +} + +export class WebSocketTransport implements ISyncTransport { + private _status: TransportStatus = "disconnected"; + private _ws: WebSocket | null = null; + private _messageHandlers = new Set<(msg: ServerMessage) => void>(); + private _statusHandlers = new Set<(status: TransportStatus) => void>(); + private _reconnectAttempts = 0; + private _reconnectTimer: ReturnType | null = null; + private _intentionalClose = false; + + private readonly _url: string; + private readonly _reconnectDelay: number; + private readonly _maxReconnectAttempts: number; + + constructor(options: WebSocketTransportOptions) { + this._url = options.url; + this._reconnectDelay = options.reconnectDelay ?? 1000; + this._maxReconnectAttempts = options.maxReconnectAttempts ?? Infinity; + } + + get status(): TransportStatus { + return this._status; + } + + send(message: ClientMessage): void { + if (!this._ws || this._ws.readyState !== WebSocket.OPEN) { + throw new Error("WebSocketTransport: not connected"); + } + this._ws.send(JSON.stringify(message)); + } + + onMessage(handler: (msg: ServerMessage) => void): () => void { + this._messageHandlers.add(handler); + return () => this._messageHandlers.delete(handler); + } + + onStatusChange(handler: (status: TransportStatus) => void): () => void { + this._statusHandlers.add(handler); + return () => this._statusHandlers.delete(handler); + } + + connect(): void { + if (this._status !== "disconnected") return; + this._intentionalClose = false; + this._openSocket(); + } + + disconnect(): void { + this._intentionalClose = true; + if (this._reconnectTimer !== null) { + clearTimeout(this._reconnectTimer); + this._reconnectTimer = null; + } + if (this._ws) { + this._ws.close(); + this._ws = null; + } + this._setStatus("disconnected"); + } + + // ------------------------------------------------------------------------- + // Internal + // ------------------------------------------------------------------------- + + private _openSocket(): void { + this._setStatus("connecting"); + const ws = new WebSocket(this._url); + + ws.onopen = () => { + this._reconnectAttempts = 0; + this._setStatus("connected"); + }; + + ws.onmessage = (event) => { + try { + const msg = JSON.parse(event.data as string) as ServerMessage; + for (const handler of this._messageHandlers) { + handler(msg); + } + } catch { + // Malformed message — ignore + } + }; + + ws.onclose = () => { + this._ws = null; + this._setStatus("disconnected"); + if (!this._intentionalClose) { + this._scheduleReconnect(); + } + }; + + ws.onerror = () => { + // onerror is always followed by onclose in browsers + }; + + this._ws = ws; + } + + private _scheduleReconnect(): void { + if (this._reconnectAttempts >= this._maxReconnectAttempts) return; + this._reconnectAttempts++; + const delay = this._reconnectDelay * Math.min(this._reconnectAttempts, 10); + this._reconnectTimer = setTimeout(() => { + this._reconnectTimer = null; + this._openSocket(); + }, delay); + } + + private _setStatus(status: TransportStatus): void { + if (this._status === status) return; + this._status = status; + for (const handler of this._statusHandlers) { + handler(status); + } + } +} diff --git a/packages/grida-canvas-sync/src/validate.ts b/packages/grida-canvas-sync/src/validate.ts new file mode 100644 index 000000000..7bac86e27 --- /dev/null +++ b/packages/grida-canvas-sync/src/validate.ts @@ -0,0 +1,168 @@ +/** + * @module validate + * + * Server-side validation of incoming diffs against canonical state. + * The server calls `validateDiff` before applying a push. Invalid + * operations are collected as errors; the server can then decide to + * discard the entire push or strip invalid ops. + */ + +import type { DocumentDiff, NodeId, NodeOp } from "./protocol"; +import type { DocumentState } from "./diff"; + +// --------------------------------------------------------------------------- +// Validation result +// --------------------------------------------------------------------------- + +export interface ValidationError { + /** The node ID (or "__document__" for doc-level issues). */ + readonly target: string; + readonly code: ValidationErrorCode; + readonly message: string; +} + +export type ValidationErrorCode = + | "PATCH_MISSING_NODE" // Trying to patch a node that doesn't exist + | "REMOVE_MISSING_NODE" // Trying to remove a node that doesn't exist + | "PUT_MISSING_TYPE" // Put node is missing the `type` field + | "PUT_MISSING_ID" // Put node is missing the `id` field + | "PUT_ID_MISMATCH" // Put node's id doesn't match the key in the diff + | "PATCH_IMMUTABLE_FIELD" // Trying to patch `id` or `type` + | "SCENE_ADD_MISSING_NODE" // Adding a scene ref for a node that doesn't exist + | "SCENE_ADD_NOT_SCENE" // Adding a scene ref for a node that isn't a scene type + | "SCENE_REMOVE_MISSING"; // Removing a scene ref that doesn't exist + +export interface ValidationResult { + readonly valid: boolean; + readonly errors: readonly ValidationError[]; +} + +// --------------------------------------------------------------------------- +// validateDiff +// --------------------------------------------------------------------------- + +/** + * Validate a diff against the current canonical state. + * + * Returns a result with `valid: true` and empty errors if everything checks out. + * Otherwise returns the list of issues found. + * + * This does NOT check authorization — only structural validity. + */ +export function validateDiff( + state: DocumentState, + diff: DocumentDiff +): ValidationResult { + const errors: ValidationError[] = []; + + if (diff.nodes) { + for (const [id, op] of Object.entries(diff.nodes)) { + validateNodeOp(state, id, op, errors); + } + } + + if (diff.scenes) { + // Build a projected node set (after applying node ops from this diff) + const projectedNodes = new Set(Object.keys(state.nodes)); + if (diff.nodes) { + for (const [id, op] of Object.entries(diff.nodes)) { + if (op.op === "put") projectedNodes.add(id); + if (op.op === "remove") projectedNodes.delete(id); + } + } + + const currentScenes = new Set(state.scenes); + for (const sceneOp of diff.scenes) { + switch (sceneOp.op) { + case "add": + if (!projectedNodes.has(sceneOp.id)) { + errors.push({ + target: sceneOp.id, + code: "SCENE_ADD_MISSING_NODE", + message: `Scene add references non-existent node "${sceneOp.id}"`, + }); + } + break; + case "remove": + if (!currentScenes.has(sceneOp.id)) { + errors.push({ + target: sceneOp.id, + code: "SCENE_REMOVE_MISSING", + message: `Scene remove references non-existent scene "${sceneOp.id}"`, + }); + } + break; + // "reorder" — no structural validation needed (just a permutation) + } + } + } + + return { + valid: errors.length === 0, + errors, + }; +} + +function validateNodeOp( + state: DocumentState, + id: NodeId, + op: NodeOp, + errors: ValidationError[] +): void { + switch (op.op) { + case "put": { + if (!op.node.type) { + errors.push({ + target: id, + code: "PUT_MISSING_TYPE", + message: `Put for "${id}" is missing the "type" field`, + }); + } + if (!op.node.id) { + errors.push({ + target: id, + code: "PUT_MISSING_ID", + message: `Put for "${id}" is missing the "id" field`, + }); + } + if (op.node.id && op.node.id !== id) { + errors.push({ + target: id, + code: "PUT_ID_MISMATCH", + message: `Put for "${id}" has mismatched id "${op.node.id}"`, + }); + } + break; + } + case "patch": { + if (!(id in state.nodes)) { + errors.push({ + target: id, + code: "PATCH_MISSING_NODE", + message: `Patch targets non-existent node "${id}"`, + }); + } + // Check for immutable field mutations + for (const key of Object.keys(op.fields)) { + if (key === "id" || key === "type") { + errors.push({ + target: id, + code: "PATCH_IMMUTABLE_FIELD", + message: `Patch for "${id}" attempts to change immutable field "${key}"`, + }); + } + } + break; + } + case "remove": { + if (!(id in state.nodes)) { + errors.push({ + target: id, + code: "REMOVE_MISSING_NODE", + message: `Remove targets non-existent node "${id}"`, + }); + } + break; + } + } +} diff --git a/packages/grida-canvas-sync/tsconfig.json b/packages/grida-canvas-sync/tsconfig.json new file mode 100644 index 000000000..c9fce898e --- /dev/null +++ b/packages/grida-canvas-sync/tsconfig.json @@ -0,0 +1,17 @@ +{ + "compilerOptions": { + "target": "es2020", + "module": "esnext", + "moduleResolution": "bundler", + "types": ["vitest/globals"], + "skipLibCheck": true, + "esModuleInterop": true, + "noImplicitAny": true, + "strict": true, + "outDir": "./dist", + "rootDir": "./src", + "declaration": true + }, + "include": ["src"], + "exclude": ["dist", "__tests__"] +} From 5f9e7aa5e988ef09008fc8baca56723d804c5d03 Mon Sep 17 00:00:00 2001 From: Universe Date: Wed, 8 Apr 2026 22:47:48 +0900 Subject: [PATCH 2/8] feat: refactor document synchronization with new SyncRoom and storage implementation --- .../package.json | 6 +- .../grida-canvas-document-worker-cf/src/do.ts | 157 -------- .../src/index.ts | 109 +++--- .../src/lib/index.ts | 2 - .../src/lib/storage.ts | 90 ----- .../src/lib/websocket.ts | 143 ------- .../src/room.ts | 361 ++++++++++++++++++ .../src/storage.ts | 283 ++++++++++++++ .../tsconfig.json | 24 +- 9 files changed, 699 insertions(+), 476 deletions(-) delete mode 100644 services/grida-canvas-document-worker-cf/src/do.ts delete mode 100644 services/grida-canvas-document-worker-cf/src/lib/index.ts delete mode 100644 services/grida-canvas-document-worker-cf/src/lib/storage.ts delete mode 100644 services/grida-canvas-document-worker-cf/src/lib/websocket.ts create mode 100644 services/grida-canvas-document-worker-cf/src/room.ts create mode 100644 services/grida-canvas-document-worker-cf/src/storage.ts diff --git a/services/grida-canvas-document-worker-cf/package.json b/services/grida-canvas-document-worker-cf/package.json index 848b7bd63..44e6f8aa1 100644 --- a/services/grida-canvas-document-worker-cf/package.json +++ b/services/grida-canvas-document-worker-cf/package.json @@ -10,10 +10,8 @@ "typecheck": "tsc --noEmit" }, "dependencies": { - "hono": "^4.9.8", - "lib0": "^0.2.114", - "y-protocols": "^1.0.6", - "yjs": "^13.6.27" + "@grida/canvas-sync": "workspace:*", + "hono": "^4.9.8" }, "devDependencies": { "typescript": "^5", diff --git a/services/grida-canvas-document-worker-cf/src/do.ts b/services/grida-canvas-document-worker-cf/src/do.ts deleted file mode 100644 index 5372604db..000000000 --- a/services/grida-canvas-document-worker-cf/src/do.ts +++ /dev/null @@ -1,157 +0,0 @@ -import { DurableObject } from "cloudflare:workers"; -import { removeAwarenessStates } from "y-protocols/awareness"; -import { applyUpdate, encodeStateAsUpdate } from "yjs"; -import { WSSharedDoc, setupWSConnection } from "./lib/websocket"; -import { YTransactionStorage } from "./lib/storage"; -import type { Env } from "hono"; -import { Hono } from "hono"; - -const createApp = (createRoom: (roomId: string) => WebSocket) => { - const app = new Hono(); - - return app.get("/rooms/:roomId", async (c) => { - const roomId = c.req.param("roomId"); - const client = createRoom(roomId); - - return new Response(null, { - webSocket: client, - status: 101, - statusText: "Switching Protocols", - }); - }); -}; - -export class G1DO extends DurableObject { - protected app = createApp(this.createRoom.bind(this)); - protected doc = new WSSharedDoc(); - protected storage = new YTransactionStorage(this.state.storage); - protected sessions = new Map void>(); - private awarenessClients = new Set(); - - constructor( - public state: DurableObjectState, - public env: T["Bindings"] - ) { - super(state, env); - - void this.state.blockConcurrencyWhile(this.onStart.bind(this)); - } - - protected async onStart(): Promise { - const doc = await this.storage.getYDoc(); - applyUpdate(this.doc, encodeStateAsUpdate(doc)); - - for (const ws of this.state.getWebSockets()) { - this.registerWebSocket(ws); - } - - this.doc.on("update", async (update) => { - await this.storage.storeUpdate(update); - }); - this.doc.awareness.on( - "update", - async ({ - added, - removed, - updated, - }: { - added: number[]; - removed: number[]; - updated: number[]; - }) => { - for (const client of [...added, ...updated]) { - this.awarenessClients.add(client); - } - for (const client of removed) { - this.awarenessClients.delete(client); - } - } - ); - } - - protected createRoom(roomId: string) { - const pair = new WebSocketPair(); - const client = pair[0]; - const server = pair[1]; - server.serializeAttachment({ - roomId, - connectedAt: new Date(), - }); - - this.state.acceptWebSocket(server); - this.registerWebSocket(server); - - return client; - } - - fetch(request: Request): Response | Promise { - return this.app.request(request, undefined, this.env); - } - - async updateYDoc(update: Uint8Array): Promise { - this.doc.update(update); - await this.cleanup(); - } - async getYDoc(): Promise { - return encodeStateAsUpdate(this.doc); - } - - async webSocketMessage( - ws: WebSocket, - message: string | ArrayBuffer - ): Promise { - if (!(message instanceof ArrayBuffer)) return; - - // Basic message size validation for security - if (message.byteLength > 1024 * 1024) { - // 1MB limit - console.warn("Message too large, ignoring"); - return; - } - - const update = new Uint8Array(message); - await this.updateYDoc(update); - } - - async webSocketError(ws: WebSocket): Promise { - await this.unregisterWebSocket(ws); - await this.cleanup(); - } - - async webSocketClose(ws: WebSocket): Promise { - await this.unregisterWebSocket(ws); - await this.cleanup(); - } - - protected registerWebSocket(ws: WebSocket) { - setupWSConnection(ws, this.doc); - const s = this.doc.notify((message) => { - ws.send(message); - }); - this.sessions.set(ws, s); - } - - protected async unregisterWebSocket(ws: WebSocket) { - try { - const dispose = this.sessions.get(ws); - dispose?.(); - this.sessions.delete(ws); - const clientIds = this.awarenessClients; - - removeAwarenessStates(this.doc.awareness, Array.from(clientIds), null); - } catch (e) { - console.error("Error unregistering WebSocket:", e); - // Continue cleanup even if awareness removal fails - } - } - - protected async cleanup() { - if (this.sessions.size < 1) { - try { - await this.storage.commit(); - } catch (error) { - console.error("Error during cleanup commit:", error); - } - } - } -} diff --git a/services/grida-canvas-document-worker-cf/src/index.ts b/services/grida-canvas-document-worker-cf/src/index.ts index 25fd92f03..01b981ad7 100644 --- a/services/grida-canvas-document-worker-cf/src/index.ts +++ b/services/grida-canvas-document-worker-cf/src/index.ts @@ -1,57 +1,52 @@ -import { type Context, Hono } from "hono"; -import { cors } from "hono/cors"; -import { G1DO } from "./do"; - -const app = new Hono(); -app.use("*", cors()); - -const route = app.route( - "/editor", - app.get("/:id", async (c: Context) => { - try { - if (c.req.header("Upgrade") !== "websocket") { - return c.body("Expected websocket", { - status: 426, - statusText: "Upgrade Required", - }); - } - - const roomId = c.req.param("id"); - if (!roomId || roomId.length === 0) { - return c.body("Invalid room ID", { - status: 400, - statusText: "Bad Request", - }); - } - - // Basic room ID validation for security - if (!/^[a-zA-Z0-9_-]+$/.test(roomId) || roomId.length > 100) { - return c.body("Invalid room ID format", { - status: 400, - statusText: "Bad Request", - }); - } - - const obj = c.env.G1; - const stub = obj.get(obj.idFromName(roomId)); - - // Create websocket connection directly - const client = (stub as any).createRoom(roomId); - - return new Response(null, { - webSocket: client, - status: 101, - statusText: "Switching Protocols", - }); - } catch (error) { - console.error("WebSocket connection error:", error); - return c.body("Internal Server Error", { - status: 500, - statusText: "Internal Server Error", - }); - } - }) -); - -export default route; -export { G1DO }; +/** + * Grida Canvas Document Worker — Cloudflare Worker entrypoint. + * + * Routes: + * GET /room/:roomId → WebSocket upgrade to the SyncRoom Durable Object + * GET /health → 200 OK + * + * Legacy route (kept for backward compatibility during migration): + * GET /editor/:roomId → Same as /room/:roomId + */ + +import { Hono } from "hono"; + +export { G1DO } from "./room"; + +const app = new Hono<{ Bindings: Env }>(); + +// Health check +app.get("/health", (c) => c.text("ok")); + +// WebSocket upgrade → Durable Object +app.get("/room/:roomId", (c) => { + return upgradeToRoom(c.env, c.req.raw, c.req.param("roomId")); +}); + +// Legacy route (the old YJS service used /editor/:roomId) +app.get("/editor/:roomId", (c) => { + return upgradeToRoom(c.env, c.req.raw, c.req.param("roomId")); +}); + +function upgradeToRoom( + env: Env, + request: Request, + roomId: string +): Response | Promise { + // Validate room ID + if (!roomId || roomId.length > 100 || !/^[a-zA-Z0-9_-]+$/.test(roomId)) { + return new Response("Invalid room ID", { status: 400 }); + } + + // Must be a WebSocket upgrade + if (request.headers.get("Upgrade") !== "websocket") { + return new Response("Expected WebSocket upgrade", { status: 426 }); + } + + // Route to the Durable Object for this room + const id = env.G1.idFromName(roomId); + const stub = env.G1.get(id); + return stub.fetch(request); +} + +export default app; diff --git a/services/grida-canvas-document-worker-cf/src/lib/index.ts b/services/grida-canvas-document-worker-cf/src/lib/index.ts deleted file mode 100644 index 4db48051d..000000000 --- a/services/grida-canvas-document-worker-cf/src/lib/index.ts +++ /dev/null @@ -1,2 +0,0 @@ -export { WSSharedDoc, setupWSConnection } from "./websocket"; -export { YTransactionStorage } from "./storage"; diff --git a/services/grida-canvas-document-worker-cf/src/lib/storage.ts b/services/grida-canvas-document-worker-cf/src/lib/storage.ts deleted file mode 100644 index 993f07d73..000000000 --- a/services/grida-canvas-document-worker-cf/src/lib/storage.ts +++ /dev/null @@ -1,90 +0,0 @@ -import { Doc, applyUpdate, encodeStateAsUpdate } from "yjs"; - -export class YTransactionStorage { - private readonly MAX_BYTES = 10 * 1024; // 10KB - private readonly MAX_UPDATES = 500; - - constructor(private readonly storage: DurableObjectStorage) {} - - private storageKey(type: "update" | "state", name?: string | number): string { - return `ydoc:${type}:${name ?? ""}`; - } - - async getYDoc(): Promise { - const snapshot = (await this.storage.get( - this.storageKey("state", "doc") - )) as Uint8Array | undefined; - const data = (await this.storage.list({ - prefix: this.storageKey("update"), - })) as Map; - - const updates: Uint8Array[] = Array.from(data.values()); - const doc = new Doc(); - - doc.transact(() => { - if (snapshot) { - applyUpdate(doc, snapshot); - } - for (const update of updates) { - applyUpdate(doc, update); - } - }); - - return doc; - } - - async storeUpdate(update: Uint8Array): Promise { - if (update.byteLength === 0) { - return; // Skip empty updates - } - - try { - return await this.storage.transaction(async (tx) => { - const bytes = - ((await tx.get(this.storageKey("state", "bytes"))) as number) ?? 0; - const count = - ((await tx.get(this.storageKey("state", "count"))) as number) ?? 0; - - const updateBytes = bytes + update.byteLength; - const updateCount = count + 1; - - if (updateBytes > this.MAX_BYTES || updateCount > this.MAX_UPDATES) { - const doc = await this.getYDoc(); - applyUpdate(doc, update); - await this._commit(doc, tx); - } else { - await tx.put(this.storageKey("state", "bytes"), updateBytes); - await tx.put(this.storageKey("state", "count"), updateCount); - await tx.put(this.storageKey("update", updateCount), update); - } - }); - } catch (error) { - console.error("Error storing update:", error); - throw error; // Re-throw to let caller handle - } - } - - async commit(): Promise { - const doc = await this.getYDoc(); - return this.storage.transaction(async (tx) => { - await this._commit(doc, tx); - }); - } - - private async _commit(doc: Doc, tx: DurableObjectTransaction) { - const data = (await tx.list({ - prefix: this.storageKey("update"), - })) as Map; - - for (const update of data.values()) { - applyUpdate(doc, update); - } - - const update = encodeStateAsUpdate(doc); - - await tx.delete(Array.from(data.keys())); - await tx.put(this.storageKey("state", "bytes"), 0); - await tx.put(this.storageKey("state", "count"), 0); - await tx.put(this.storageKey("state", "doc"), update); - } -} diff --git a/services/grida-canvas-document-worker-cf/src/lib/websocket.ts b/services/grida-canvas-document-worker-cf/src/lib/websocket.ts deleted file mode 100644 index 402662b4f..000000000 --- a/services/grida-canvas-document-worker-cf/src/lib/websocket.ts +++ /dev/null @@ -1,143 +0,0 @@ -import { createDecoder, readVarUint, readVarUint8Array } from "lib0/decoding"; -import { - createEncoder, - length, - toUint8Array, - writeVarUint, - writeVarUint8Array, -} from "lib0/encoding"; -import { - applyAwarenessUpdate, - Awareness, - encodeAwarenessUpdate, -} from "y-protocols/awareness"; -import { readSyncMessage, writeSyncStep1, writeUpdate } from "y-protocols/sync"; -import { Doc } from "yjs"; - -// Message types -const MESSAGE_TYPES = { - sync: 0, - awareness: 1, -} as const; - -type MessageType = keyof typeof MESSAGE_TYPES; - -function createTypedEncoder(type: MessageType) { - const encoder = createEncoder(); - writeVarUint(encoder, MESSAGE_TYPES[type]); - return encoder; -} - -export class WSSharedDoc extends Doc { - private listeners = new Set<(message: Uint8Array) => void>(); - readonly awareness = new Awareness(this); - - constructor(gc = true) { - super({ gc }); - this.awareness.setLocalState(null); - - // Awareness updates - this.awareness.on( - "update", - (changes: { added: number[]; updated: number[]; removed: number[] }) => { - this.awarenessChangeHandler(changes); - } - ); - - // Document updates - this.on("update", (update: Uint8Array) => { - this.syncMessageHandler(update); - }); - } - - update(message: Uint8Array) { - const encoder = createEncoder(); - const decoder = createDecoder(message); - const type = readVarUint(decoder); - - switch (type) { - case MESSAGE_TYPES.sync: { - writeVarUint(encoder, MESSAGE_TYPES.sync); - readSyncMessage(decoder, encoder, this, null); - - if (length(encoder) > 1) { - this._notify(toUint8Array(encoder)); - } - break; - } - case MESSAGE_TYPES.awareness: { - applyAwarenessUpdate(this.awareness, readVarUint8Array(decoder), null); - break; - } - } - } - - notify(listener: (message: Uint8Array) => void) { - this.listeners.add(listener); - return () => { - this.listeners.delete(listener); - }; - } - - private syncMessageHandler(update: Uint8Array) { - const encoder = createTypedEncoder("sync"); - writeUpdate(encoder, update); - this._notify(toUint8Array(encoder)); - } - - private awarenessChangeHandler({ - added, - updated, - removed, - }: { - added: number[]; - updated: number[]; - removed: number[]; - }) { - const changed = [...added, ...updated, ...removed]; - const encoder = createTypedEncoder("awareness"); - const update = encodeAwarenessUpdate( - this.awareness, - changed, - this.awareness.states - ); - writeVarUint8Array(encoder, update); - this._notify(toUint8Array(encoder)); - } - - private _notify(message: Uint8Array) { - // Use for...of for better performance with large listener sets - for (const subscriber of this.listeners) { - try { - subscriber(message); - } catch (error) { - console.error("Error notifying subscriber:", error); - // Remove faulty subscriber to prevent future errors - this.listeners.delete(subscriber); - } - } - } -} - -export function setupWSConnection(ws: WebSocket, doc: WSSharedDoc) { - // Send initial sync - { - const encoder = createTypedEncoder("sync"); - writeSyncStep1(encoder, doc); - ws.send(toUint8Array(encoder)); - } - - // Send awareness states - { - const states = doc.awareness.getStates(); - if (states.size > 0) { - const encoder = createTypedEncoder("awareness"); - const update = encodeAwarenessUpdate( - doc.awareness, - Array.from(states.keys()) - ); - writeVarUint8Array(encoder, update); - ws.send(toUint8Array(encoder)); - } - } -} diff --git a/services/grida-canvas-document-worker-cf/src/room.ts b/services/grida-canvas-document-worker-cf/src/room.ts new file mode 100644 index 000000000..e5c2f68cf --- /dev/null +++ b/services/grida-canvas-document-worker-cf/src/room.ts @@ -0,0 +1,361 @@ +/** + * @module room + * + * SyncRoom — the Cloudflare Durable Object that owns a single document. + * + * One SyncRoom instance per document/room. It: + * - Holds the canonical document state in memory + * - Processes client pushes (validate → apply → ack → broadcast) + * - Handles WebSocket lifecycle with hibernation support + * - Persists state to embedded SQLite via SyncStorage + * - Relays presence (volatile, not persisted) + */ + +import type { + ClientMessage, + ServerMessage, + DocumentDiff, + PresenceState, + NodeId, + SerializedNode, +} from "@grida/canvas-sync"; +import { + DocumentClock, + applyDiff, + validateDiff, + type DocumentState, +} from "@grida/canvas-sync"; +import { SyncStorage } from "./storage"; + +// --------------------------------------------------------------------------- +// Session metadata (attached to WebSocket via tags) +// --------------------------------------------------------------------------- + +const SESSION_TAG_PREFIX = "session:"; + +interface SessionState { + schemaVersion?: string; + presence?: PresenceState; +} + +// --------------------------------------------------------------------------- +// G1DO — the Durable Object class +// --------------------------------------------------------------------------- + +export class G1DO implements DurableObject { + private readonly state: DurableObjectState; + private storage!: SyncStorage; + private clock!: DocumentClock; + private canonical!: DocumentState; + + /** Per-session ephemeral state (keyed by session ID). */ + private sessions = new Map(); + + private initialized = false; + + constructor(state: DurableObjectState, _env: Env) { + this.state = state; + // Block all requests until initialization is done + this.state.blockConcurrencyWhile(async () => { + this._initialize(); + }); + } + + private _initialize(): void { + if (this.initialized) return; + + this.storage = new SyncStorage(this.state.storage.sql); + const stored = this.storage.getFullState(); + + this.canonical = { + nodes: stored.nodes, + scenes: stored.scenes, + }; + this.clock = new DocumentClock(stored.clock); + this.initialized = true; + + // Recover sessions from hibernated WebSockets + for (const ws of this.state.getWebSockets()) { + const tags = this.state.getTags(ws); + const sessionTag = tags.find((t) => t.startsWith(SESSION_TAG_PREFIX)); + if (sessionTag) { + const sessionId = sessionTag.slice(SESSION_TAG_PREFIX.length); + if (!this.sessions.has(sessionId)) { + this.sessions.set(sessionId, {}); + } + } + } + } + + // ------------------------------------------------------------------------- + // HTTP handler (WebSocket upgrade) + // ------------------------------------------------------------------------- + + async fetch(request: Request): Promise { + const url = new URL(request.url); + + // Health check + if (url.pathname === "/health") { + return new Response("ok", { status: 200 }); + } + + // WebSocket upgrade + if (request.headers.get("Upgrade") === "websocket") { + return this._handleWebSocketUpgrade(request); + } + + return new Response("Expected WebSocket", { status: 426 }); + } + + private _handleWebSocketUpgrade(_request: Request): Response { + const pair = new WebSocketPair(); + const [client, server] = [pair[0], pair[1]]; + + // Generate a unique session ID + const sessionId = crypto.randomUUID(); + + // Accept with hibernation support and tag with session ID + this.state.acceptWebSocket(server, [SESSION_TAG_PREFIX + sessionId]); + + this.sessions.set(sessionId, {}); + + return new Response(null, { status: 101, webSocket: client }); + } + + // ------------------------------------------------------------------------- + // Hibernatable WebSocket handlers + // ------------------------------------------------------------------------- + + async webSocketMessage( + ws: WebSocket, + message: string | ArrayBuffer + ): Promise { + if (typeof message !== "string") return; + + let msg: ClientMessage; + try { + msg = JSON.parse(message) as ClientMessage; + } catch { + this._send(ws, { + type: "error", + code: "INVALID_JSON", + message: "Could not parse message as JSON", + }); + return; + } + + const sessionId = this._getSessionId(ws); + if (!sessionId) return; + + switch (msg.type) { + case "connect": + this._handleConnect(ws, sessionId, msg); + break; + case "push": + this._handlePush(ws, sessionId, msg); + break; + case "ping": + this._send(ws, { type: "pong" }); + break; + case "presence_update": + this._handlePresenceUpdate(sessionId, msg.presence); + break; + } + } + + async webSocketClose( + ws: WebSocket, + _code: number, + _reason: string, + _wasClean: boolean + ): Promise { + const sessionId = this._getSessionId(ws); + if (sessionId) { + this.sessions.delete(sessionId); + // Broadcast updated presence (peer left) + this._broadcastPresence(); + } + } + + async webSocketError(ws: WebSocket, _error: unknown): Promise { + const sessionId = this._getSessionId(ws); + if (sessionId) { + this.sessions.delete(sessionId); + } + ws.close(1011, "WebSocket error"); + } + + // ------------------------------------------------------------------------- + // Protocol handlers + // ------------------------------------------------------------------------- + + private _handleConnect( + ws: WebSocket, + sessionId: string, + msg: { schema: string; lastClock: number } + ): void { + const session = this.sessions.get(sessionId); + if (session) { + session.schemaVersion = msg.schema; + } + + if (msg.lastClock === 0 || msg.lastClock < this.clock.value) { + // Client needs a full state or delta + const delta = + msg.lastClock > 0 ? this.storage.getDelta(msg.lastClock) : null; + + if (delta && msg.lastClock > 0) { + // Incremental catch-up + this._send(ws, { + type: "connect_ok", + clock: this.clock.value, + diff: delta, + scenes: this.canonical.scenes, + }); + } else { + // Full state + this._send(ws, { + type: "connect_ok", + clock: this.clock.value, + state: this.canonical.nodes, + scenes: this.canonical.scenes, + }); + } + } else { + // Client is up to date + this._send(ws, { + type: "connect_ok", + clock: this.clock.value, + }); + } + } + + private _handlePush( + ws: WebSocket, + sessionId: string, + msg: { clientClock: number; diff: DocumentDiff; presence?: PresenceState } + ): void { + // Validate the diff + const validation = validateDiff(this.canonical, msg.diff); + + if (!validation.valid) { + this._send(ws, { + type: "push_ok", + serverClock: this.clock.value, + clientClock: msg.clientClock, + result: "discard", + }); + return; + } + + // Apply the diff + const newClock = this.clock.tick(); + this.canonical = applyDiff(this.canonical, msg.diff); + + // Persist to SQLite + this.storage.applyDiff(msg.diff, newClock); + + // Ack the pusher + this._send(ws, { + type: "push_ok", + serverClock: newClock, + clientClock: msg.clientClock, + result: "commit", + }); + + // Broadcast to all other sessions + this._broadcastExcept(sessionId, { + type: "patch", + serverClock: newClock, + diff: msg.diff, + }); + + // Handle presence piggy-backed on push + if (msg.presence) { + this._handlePresenceUpdate(sessionId, msg.presence); + } + } + + private _handlePresenceUpdate( + sessionId: string, + presence: PresenceState + ): void { + const session = this.sessions.get(sessionId); + if (session) { + session.presence = presence; + } + this._broadcastPresence(); + } + + // ------------------------------------------------------------------------- + // Broadcasting + // ------------------------------------------------------------------------- + + /** Send a message to all connected WebSockets except the given session. */ + private _broadcastExcept(excludeSessionId: string, msg: ServerMessage): void { + const payload = JSON.stringify(msg); + for (const ws of this.state.getWebSockets()) { + const sid = this._getSessionId(ws); + if (sid && sid !== excludeSessionId) { + try { + ws.send(payload); + } catch { + // WebSocket may have closed between getWebSockets() and send() + } + } + } + } + + /** Broadcast current presence state to all sessions. */ + private _broadcastPresence(): void { + // Build per-session presence views (each session gets everyone else's presence) + const allPresence: Record = {}; + for (const [sid, session] of this.sessions) { + if (session.presence) { + allPresence[sid] = session.presence; + } + } + + for (const ws of this.state.getWebSockets()) { + const sid = this._getSessionId(ws); + if (!sid) continue; + + // Build peers map excluding self + const peers: Record = {}; + for (const [peerId, presence] of Object.entries(allPresence)) { + if (peerId !== sid) { + peers[peerId] = presence; + } + } + + // Only send if there are peers with presence + if (Object.keys(peers).length > 0) { + try { + ws.send(JSON.stringify({ type: "presence", peers })); + } catch { + // WebSocket may have closed + } + } + } + } + + // ------------------------------------------------------------------------- + // Helpers + // ------------------------------------------------------------------------- + + /** Extract the session ID from a WebSocket's tags. */ + private _getSessionId(ws: WebSocket): string | null { + const tags = this.state.getTags(ws); + const tag = tags.find((t) => t.startsWith(SESSION_TAG_PREFIX)); + return tag ? tag.slice(SESSION_TAG_PREFIX.length) : null; + } + + /** Send a typed message to a WebSocket. */ + private _send(ws: WebSocket, msg: ServerMessage): void { + try { + ws.send(JSON.stringify(msg)); + } catch { + // WebSocket may have closed + } + } +} diff --git a/services/grida-canvas-document-worker-cf/src/storage.ts b/services/grida-canvas-document-worker-cf/src/storage.ts new file mode 100644 index 000000000..cbbf31281 --- /dev/null +++ b/services/grida-canvas-document-worker-cf/src/storage.ts @@ -0,0 +1,283 @@ +/** + * @module storage + * + * SQLite storage adapter for the SyncRoom Durable Object. + * + * Uses the DO's embedded SQLite database (`state.storage.sql`) for persistence. + * Three tables: + * - `records` — current node state (node_id → serialized JSON, clock) + * - `tombstones` — deleted node IDs (for reconnecting clients to detect deletes) + * - `meta` — key-value metadata (document clock, schema version, scenes) + * + * All writes happen synchronously via `sql.exec()` inside the DO's single-threaded model. + */ + +import type { + NodeId, + SerializedNode, + DocumentDiff, + NodeOp, +} from "@grida/canvas-sync"; + +// --------------------------------------------------------------------------- +// Types +// --------------------------------------------------------------------------- + +export interface StoredDocument { + nodes: Record; + scenes: NodeId[]; + clock: number; +} + +// --------------------------------------------------------------------------- +// SyncStorage +// --------------------------------------------------------------------------- + +export class SyncStorage { + private readonly sql: SqlStorage; + + constructor(sql: SqlStorage) { + this.sql = sql; + this._ensureSchema(); + } + + // ------------------------------------------------------------------------- + // Schema + // ------------------------------------------------------------------------- + + private _ensureSchema(): void { + this.sql.exec(` + CREATE TABLE IF NOT EXISTS records ( + node_id TEXT PRIMARY KEY, + data TEXT NOT NULL, + clock INTEGER NOT NULL DEFAULT 0 + ); + CREATE TABLE IF NOT EXISTS tombstones ( + node_id TEXT PRIMARY KEY, + clock INTEGER NOT NULL DEFAULT 0 + ); + CREATE TABLE IF NOT EXISTS meta ( + key TEXT PRIMARY KEY, + value TEXT NOT NULL + ); + `); + } + + // ------------------------------------------------------------------------- + // Full state load (on DO startup / hibernation wake) + // ------------------------------------------------------------------------- + + /** Load the entire document state from SQLite. */ + getFullState(): StoredDocument { + const nodes: Record = {}; + + const rows = this.sql.exec("SELECT node_id, data FROM records").toArray(); + for (const row of rows) { + const id = row.node_id as string; + nodes[id] = JSON.parse(row.data as string) as SerializedNode; + } + + const clock = this._getMetaInt("clock", 0); + const scenes = this._getMetaJson("scenes", []); + + return { nodes, scenes, clock }; + } + + // ------------------------------------------------------------------------- + // Diff application + // ------------------------------------------------------------------------- + + /** Apply a diff and persist to SQLite. Returns the new clock value. */ + applyDiff(diff: DocumentDiff, clock: number): void { + // Apply node operations + if (diff.nodes) { + for (const [id, op] of Object.entries(diff.nodes)) { + this._applyNodeOp(id, op, clock); + } + } + + // Apply scene operations + if (diff.scenes) { + let scenes = this._getMetaJson("scenes", []); + for (const sceneOp of diff.scenes) { + switch (sceneOp.op) { + case "add": + if (!scenes.includes(sceneOp.id)) { + scenes.push(sceneOp.id); + } + break; + case "remove": + scenes = scenes.filter((id) => id !== sceneOp.id); + break; + case "reorder": + scenes = [...sceneOp.ids]; + break; + } + } + this._setMetaJson("scenes", scenes); + } + + // Update clock + this._setMetaInt("clock", clock); + + // Prune tombstones if too many + this._pruneTombstones(5000); + } + + private _applyNodeOp(id: NodeId, op: NodeOp, clock: number): void { + switch (op.op) { + case "put": { + const data = JSON.stringify(op.node); + this.sql.exec( + "INSERT OR REPLACE INTO records (node_id, data, clock) VALUES (?, ?, ?)", + id, + data, + clock + ); + // Remove from tombstones if it was previously deleted + this.sql.exec("DELETE FROM tombstones WHERE node_id = ?", id); + break; + } + case "patch": { + // Read current, apply patches, write back + const row = this.sql + .exec("SELECT data FROM records WHERE node_id = ?", id) + .toArray(); + if (row.length === 0) break; // Skip if node doesn't exist + const node = JSON.parse(row[0].data as string) as Record< + string, + unknown + >; + for (const [key, fieldOp] of Object.entries(op.fields)) { + switch (fieldOp.op) { + case "put": + node[key] = fieldOp.value; + break; + case "delete": + delete node[key]; + break; + } + } + this.sql.exec( + "UPDATE records SET data = ?, clock = ? WHERE node_id = ?", + JSON.stringify(node), + clock, + id + ); + break; + } + case "remove": { + this.sql.exec("DELETE FROM records WHERE node_id = ?", id); + this.sql.exec( + "INSERT OR REPLACE INTO tombstones (node_id, clock) VALUES (?, ?)", + id, + clock + ); + break; + } + } + } + + // ------------------------------------------------------------------------- + // Delta queries (for reconnecting clients) + // ------------------------------------------------------------------------- + + /** + * Get all changes since a given clock. + * Returns a diff containing: + * - Nodes that were added or modified since `sinceClock` + * - Nodes that were deleted since `sinceClock` (as remove ops) + * + * Returns null if the clock is current (no changes). + */ + getDelta(sinceClock: number): DocumentDiff | null { + const currentClock = this._getMetaInt("clock", 0); + if (sinceClock >= currentClock) return null; + + const nodeOps: Record = {}; + let hasOps = false; + + // Changed/added records + const changed = this.sql + .exec("SELECT node_id, data FROM records WHERE clock > ?", sinceClock) + .toArray(); + for (const row of changed) { + const id = row.node_id as string; + const node = JSON.parse(row.data as string) as SerializedNode; + nodeOps[id] = { op: "put", node }; + hasOps = true; + } + + // Deleted records (tombstones) + const deleted = this.sql + .exec("SELECT node_id FROM tombstones WHERE clock > ?", sinceClock) + .toArray(); + for (const row of deleted) { + const id = row.node_id as string; + nodeOps[id] = { op: "remove" }; + hasOps = true; + } + + if (!hasOps) return null; + return { nodes: nodeOps }; + } + + // ------------------------------------------------------------------------- + // Meta helpers + // ------------------------------------------------------------------------- + + private _getMetaInt(key: string, defaultValue: number): number { + const rows = this.sql + .exec("SELECT value FROM meta WHERE key = ?", key) + .toArray(); + if (rows.length === 0) return defaultValue; + return parseInt(rows[0].value as string, 10); + } + + private _setMetaInt(key: string, value: number): void { + this.sql.exec( + "INSERT OR REPLACE INTO meta (key, value) VALUES (?, ?)", + key, + value.toString() + ); + } + + private _getMetaJson(key: string, defaultValue: T): T { + const rows = this.sql + .exec("SELECT value FROM meta WHERE key = ?", key) + .toArray(); + if (rows.length === 0) return defaultValue; + try { + return JSON.parse(rows[0].value as string) as T; + } catch { + return defaultValue; + } + } + + private _setMetaJson(key: string, value: unknown): void { + this.sql.exec( + "INSERT OR REPLACE INTO meta (key, value) VALUES (?, ?)", + key, + JSON.stringify(value) + ); + } + + // ------------------------------------------------------------------------- + // Tombstone pruning + // ------------------------------------------------------------------------- + + private _pruneTombstones(maxCount: number): void { + const countRows = this.sql + .exec("SELECT COUNT(*) as cnt FROM tombstones") + .toArray(); + const count = (countRows[0]?.cnt as number) ?? 0; + if (count <= maxCount) return; + + // Delete the oldest tombstones beyond the limit + const toDelete = count - maxCount; + this.sql.exec( + "DELETE FROM tombstones WHERE node_id IN (SELECT node_id FROM tombstones ORDER BY clock ASC LIMIT ?)", + toDelete + ); + } +} diff --git a/services/grida-canvas-document-worker-cf/tsconfig.json b/services/grida-canvas-document-worker-cf/tsconfig.json index a3f664611..b58fab82f 100644 --- a/services/grida-canvas-document-worker-cf/tsconfig.json +++ b/services/grida-canvas-document-worker-cf/tsconfig.json @@ -1,40 +1,18 @@ { "compilerOptions": { - /* Visit https://aka.ms/tsconfig.json to read more about this file */ - - /* Set the JavaScript language version for emitted JavaScript and include compatible library declarations. */ "target": "es2021", - /* Specify a set of bundled library declaration files that describe the target runtime environment. */ "lib": ["es2021"], - /* Specify what JSX code is generated. */ "jsx": "react-jsx", - - /* Specify what module code is generated. */ "module": "es2022", - /* Specify how TypeScript looks up a file from a given module specifier. */ - "moduleResolution": "node", - /* Enable importing .json files */ + "moduleResolution": "bundler", "resolveJsonModule": true, - - /* Allow JavaScript files to be a part of your program. Use the `checkJS` option to get errors from these files. */ "allowJs": true, - /* Enable error reporting in type-checked JavaScript files. */ "checkJs": false, - - /* Disable emitting files from a compilation. */ "noEmit": true, - - /* Ensure that each file can be safely transpiled without relying on other imports. */ "isolatedModules": true, - /* Allow 'import x from y' when a module doesn't have a default export. */ "allowSyntheticDefaultImports": true, - /* Ensure that casing is correct in imports. */ "forceConsistentCasingInFileNames": true, - - /* Enable all strict type-checking options. */ "strict": true, - - /* Skip type checking all .d.ts files. */ "skipLibCheck": true, "types": ["./worker-configuration.d.ts"] } From 618259e6c2624a887c0208a40bfc990376d7914c Mon Sep 17 00:00:00 2001 From: Universe Date: Thu, 9 Apr 2026 01:16:24 +0900 Subject: [PATCH 3/8] refactor: remove Yjs integration and related patches - Deleted y-document.ts and y-patches.ts files, removing Yjs dependencies. - Updated package.json to remove Yjs and related packages. - Introduced @grida/canvas-sync as a new dependency for document synchronization. - Refactored integration tests to utilize a new makeNode helper function for creating nodes. - Enhanced document worker to include CORS support and improved WebSocket message handling. - Implemented atomic write operations in SyncStorage to ensure data integrity during diff applications. --- .../playground/playground.tsx | 8 +- .../headless/sync-integration.test.ts | 465 ++++++++++++++++++ .../plugins/sync/document-sync.ts | 130 +++++ editor/grida-canvas/plugins/sync/index.ts | 80 +++ .../plugins/sync/presence-sync.ts | 201 ++++++++ editor/grida-canvas/plugins/sync/serialize.ts | 106 ++++ .../plugins/yjs/__tests__/y-patches.test.ts | 230 --------- editor/grida-canvas/plugins/yjs/index.ts | 56 --- .../grida-canvas/plugins/yjs/y-awareness.ts | 177 ------- editor/grida-canvas/plugins/yjs/y-document.ts | 165 ------- editor/grida-canvas/plugins/yjs/y-patches.ts | 330 ------------- editor/package.json | 5 +- .../grida-canvas-sync/__tests__/helpers.ts | 5 +- .../__tests__/integration.test.ts | 14 +- pnpm-lock.yaml | 120 +---- .../src/index.ts | 2 + .../src/room.ts | 64 ++- .../src/storage.ts | 75 +-- 18 files changed, 1101 insertions(+), 1132 deletions(-) create mode 100644 editor/grida-canvas/__tests__/headless/sync-integration.test.ts create mode 100644 editor/grida-canvas/plugins/sync/document-sync.ts create mode 100644 editor/grida-canvas/plugins/sync/index.ts create mode 100644 editor/grida-canvas/plugins/sync/presence-sync.ts create mode 100644 editor/grida-canvas/plugins/sync/serialize.ts delete mode 100644 editor/grida-canvas/plugins/yjs/__tests__/y-patches.test.ts delete mode 100644 editor/grida-canvas/plugins/yjs/index.ts delete mode 100644 editor/grida-canvas/plugins/yjs/y-awareness.ts delete mode 100644 editor/grida-canvas/plugins/yjs/y-document.ts delete mode 100644 editor/grida-canvas/plugins/yjs/y-patches.ts diff --git a/editor/grida-canvas-hosted/playground/playground.tsx b/editor/grida-canvas-hosted/playground/playground.tsx index cf7a9a993..abcaa5925 100644 --- a/editor/grida-canvas-hosted/playground/playground.tsx +++ b/editor/grida-canvas-hosted/playground/playground.tsx @@ -98,7 +98,7 @@ import { DarwinSidebarHeaderDragArea } from "../../host/desktop"; import { editor } from "@/grida-canvas"; import useDisableSwipeBack from "@/grida-canvas-react/viewport/hooks/use-disable-browser-swipe-back"; import { WindowGlobalCurrentEditorProvider } from "@/grida-canvas-react/devtools/global-api-host"; -import { EditorYSyncPlugin } from "@/grida-canvas/plugins/yjs"; +import { EditorSyncPlugin } from "@/grida-canvas/plugins/sync"; import { Editor } from "@/grida-canvas/editor"; import { PlayerAvatar } from "@/components/multiplayer/avatar"; import grida from "@grida/schema"; @@ -276,7 +276,7 @@ const get_or_create_demo_session_cursor_id = (): string => { }; function useSyncMultiplayerCursors(editor: Editor, room_id?: string) { - const pluginRef = useRef(null); + const pluginRef = useRef(null); useEffect(() => { if (!room_id) return; @@ -284,7 +284,7 @@ function useSyncMultiplayerCursors(editor: Editor, room_id?: string) { const cursorId = get_or_create_demo_session_cursor_id(); if (!pluginRef.current) { - pluginRef.current = new EditorYSyncPlugin(editor, room_id, { + pluginRef.current = new EditorSyncPlugin(editor, room_id, { cursor_id: cursorId, palette: colors[randomcolorname({ exclude: neutral_colors })], }); @@ -327,7 +327,7 @@ export type CanvasPlaygroundProps = { export default function CanvasPlayground({ document = distro.playground.EMPTY_DOCUMENT, - backend = "dom", + backend = "canvas", templates, src, room_id, diff --git a/editor/grida-canvas/__tests__/headless/sync-integration.test.ts b/editor/grida-canvas/__tests__/headless/sync-integration.test.ts new file mode 100644 index 000000000..53f656e5e --- /dev/null +++ b/editor/grida-canvas/__tests__/headless/sync-integration.test.ts @@ -0,0 +1,465 @@ +/** + * End-to-end sync integration tests with headless editors. + * + * Two (or three) real Editor instances wired through MockServer + + * DocumentSyncAdapter — testing the full pipeline: + * + * Editor dispatch → Zustand store → documentToState → computeDiff + * → SyncClient → MockServer → broadcast → SyncClient → stateToDocument + * → applyDocumentPatches → Editor state + * + * This catches bugs that unit-level sync tests miss: serialization + * round-trip issues, mutex feedback loops, __doc_meta__ handling, etc. + */ + +import { describe, it, expect, afterEach } from "vitest"; +import type grida from "@grida/schema"; +import { Editor } from "@/grida-canvas/editor"; +import { createHeadlessEditor } from "@/grida-canvas/__tests__/utils"; +import { createDocumentWithRects } from "@/grida-canvas/__tests__/utils/fixtures"; +import { rectNode } from "@/grida-canvas/__tests__/utils/factories"; +import { + SyncClient, + computeDiff, + type DocumentState, + type ClientMessage, + type ServerMessage, + type DocumentDiff, + DocumentClock, + applyDiff, + validateDiff, +} from "@grida/canvas-sync"; +import type { + ISyncTransport, + TransportStatus, + PresenceState, +} from "@grida/canvas-sync"; +import { DocumentSyncAdapter } from "@/grida-canvas/plugins/sync/document-sync"; +import { + documentToState, + stateToDocument, +} from "@/grida-canvas/plugins/sync/serialize"; + +// --------------------------------------------------------------------------- +// MockTransport (same pattern as the sync package tests) +// --------------------------------------------------------------------------- + +class MockTransport implements ISyncTransport { + status: TransportStatus = "disconnected"; + sent: ClientMessage[] = []; + private _messageHandlers = new Set<(msg: ServerMessage) => void>(); + private _statusHandlers = new Set<(status: TransportStatus) => void>(); + _onClientMessage: ((msg: ClientMessage) => void) | null = null; + + send(message: ClientMessage): void { + this.sent.push(message); + this._onClientMessage?.(message); + } + onMessage(handler: (msg: ServerMessage) => void): () => void { + this._messageHandlers.add(handler); + return () => this._messageHandlers.delete(handler); + } + onStatusChange(handler: (status: TransportStatus) => void): () => void { + this._statusHandlers.add(handler); + return () => this._statusHandlers.delete(handler); + } + connect(): void {} + disconnect(): void { + this._setStatus("disconnected"); + } + simulateConnected(): void { + this._setStatus("connected"); + } + simulateDisconnected(): void { + this._setStatus("disconnected"); + } + deliver(msg: ServerMessage): void { + for (const h of this._messageHandlers) h(msg); + } + private _setStatus(s: TransportStatus): void { + if (this.status === s) return; + this.status = s; + for (const h of this._statusHandlers) h(s); + } +} + +// --------------------------------------------------------------------------- +// MockServer (mirrors SyncRoom logic) +// --------------------------------------------------------------------------- + +interface MockSession { + id: string; + transport: MockTransport; + presence?: PresenceState; +} + +class MockServer { + canonical: DocumentState; + clock: DocumentClock; + private _sessions = new Map(); + + constructor(initialState: DocumentState = { nodes: {}, scenes: [] }) { + this.canonical = initialState; + this.clock = new DocumentClock(0); + } + + addSession(sessionId: string, transport: MockTransport): void { + const session: MockSession = { id: sessionId, transport }; + this._sessions.set(sessionId, session); + transport._onClientMessage = (msg) => + this._handleClientMessage(sessionId, msg); + } + + connectSession(sessionId: string): void { + const session = this._sessions.get(sessionId); + if (!session) throw new Error(`Unknown session: ${sessionId}`); + session.transport.simulateConnected(); + } + + /** Find the session ID for a given transport instance. */ + findSessionIdByTransport(transport: MockTransport): string | undefined { + for (const [id, session] of this._sessions) { + if (session.transport === transport) return id; + } + return undefined; + } + + private _handleClientMessage(sessionId: string, msg: ClientMessage): void { + const session = this._sessions.get(sessionId); + if (!session) return; + switch (msg.type) { + case "connect": + this._handleConnect(session, msg); + break; + case "push": + this._handlePush(session, msg); + break; + case "ping": + session.transport.deliver({ type: "pong" }); + break; + } + } + + private _handleConnect( + session: MockSession, + msg: { schema: string; lastClock: number } + ): void { + session.transport.deliver({ + type: "connect_ok", + clock: this.clock.value, + state: this.canonical.nodes, + scenes: this.canonical.scenes, + }); + } + + private _handlePush( + session: MockSession, + msg: { clientClock: number; diff: DocumentDiff; presence?: PresenceState } + ): void { + const validation = validateDiff(this.canonical, msg.diff); + if (!validation.valid) { + session.transport.deliver({ + type: "push_ok", + serverClock: this.clock.value, + clientClock: msg.clientClock, + result: "discard", + }); + return; + } + const newClock = this.clock.tick(); + this.canonical = applyDiff(this.canonical, msg.diff); + session.transport.deliver({ + type: "push_ok", + serverClock: newClock, + clientClock: msg.clientClock, + result: "commit", + }); + for (const [id, other] of this._sessions) { + if (id === session.id) continue; + if (other.transport.status !== "connected") continue; + other.transport.deliver({ + type: "patch", + serverClock: newClock, + diff: msg.diff, + }); + } + } +} + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +interface SyncedEditor { + editor: Editor; + client: SyncClient; + transport: MockTransport; + adapter: DocumentSyncAdapter; +} + +function createSyncedEditor( + server: MockServer, + sessionId: string, + doc: grida.program.document.Document +): SyncedEditor { + const editor = createHeadlessEditor({ document: doc }); + const transport = new MockTransport(); + const initialState = documentToState(editor.doc.state.document); + const client = new SyncClient({ + schema: "test", + transport, + initialState, + lastClock: 0, + pushInterval: -1, // synchronous for deterministic tests + }); + const adapter = new DocumentSyncAdapter(editor, client); + server.addSession(sessionId, transport); + return { editor, client, transport, adapter }; +} + +function connectAndSync(server: MockServer, se: SyncedEditor): void { + const sessionId = server.findSessionIdByTransport(se.transport); + if (!sessionId) throw new Error("Transport not registered with server"); + server.connectSession(sessionId); +} + +/** + * Flush a synced editor's pending changes to the server. + * With pushInterval: -1, pushDiff is synchronous, but the throttled + * editor subscription fires asynchronously. We force it by calling + * computeDiff + pushDiff manually. + */ +function flushEditorToServer(se: SyncedEditor): void { + const currentState = documentToState(se.editor.doc.state.document); + const diff = computeDiff(se.adapter.lastSyncedState, currentState); + if (diff) { + se.adapter.lastSyncedState = currentState; + se.client.pushDiff(diff); + } +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +describe("Sync integration with headless editors", () => { + const editors: Editor[] = []; + + afterEach(() => { + for (const ed of editors) { + try { + ed.dispose(); + } catch {} + } + editors.length = 0; + }); + + // ----------------------------------------------------------------------- + // serialize round-trip + // ----------------------------------------------------------------------- + + describe("serialize round-trip", () => { + it("documentToState → stateToDocument preserves nodes", () => { + const doc = createDocumentWithRects(3); + const state = documentToState(doc); + const restored = stateToDocument(state); + + expect(Object.keys(restored.nodes).sort()).toEqual( + Object.keys(doc.nodes).sort() + ); + // Check a specific node + expect(restored.nodes["rect-0"]).toEqual(doc.nodes["rect-0"]); + }); + + it("preserves scenes_ref", () => { + const doc = createDocumentWithRects(2); + const state = documentToState(doc); + const restored = stateToDocument(state); + expect(restored.scenes_ref).toEqual(doc.scenes_ref); + }); + + it("preserves links", () => { + const doc = createDocumentWithRects(2); + const state = documentToState(doc); + const restored = stateToDocument(state); + expect(restored.links).toEqual(doc.links); + }); + + it("preserves images and bitmaps", () => { + const doc = createDocumentWithRects(1); + doc.images = { + "img-1": { + type: "image/png", + url: "test.png", + width: 100, + height: 100, + bytes: 1000, + }, + }; + const state = documentToState(doc); + const restored = stateToDocument(state); + expect(restored.images).toEqual(doc.images); + }); + + it("__doc_meta__ is excluded from restored nodes", () => { + const doc = createDocumentWithRects(1); + const state = documentToState(doc); + expect(state.nodes["__doc_meta__"]).toBeDefined(); + const restored = stateToDocument(state); + expect(restored.nodes["__doc_meta__"]).toBeUndefined(); + }); + }); + + // ----------------------------------------------------------------------- + // Two-editor sync + // ----------------------------------------------------------------------- + + describe("two-editor sync", () => { + it("editor A creates a node, editor B receives it", () => { + const doc = createDocumentWithRects(1); + const server = new MockServer(documentToState(doc)); + + const A = createSyncedEditor(server, "A", doc); + const B = createSyncedEditor(server, "B", doc); + editors.push(A.editor, B.editor); + + connectAndSync(server, A); + connectAndSync(server, B); + + // A adds a new rectangle via dispatch + A.editor.doc.dispatch({ + type: "insert", + id: "new-rect", + prototype: rectNode("new-rect", { + x: 200, + y: 200, + width: 150, + height: 75, + }), + target: "scene", + }); + + // Flush A's changes to the server + flushEditorToServer(A); + + // B should now have the new node + const bNodes = B.editor.doc.state.document.nodes; + expect(bNodes["new-rect"]).toBeDefined(); + expect(bNodes["new-rect"].type).toBe("rectangle"); + }); + + it("both editors modify different nodes — both changes survive", () => { + const doc = createDocumentWithRects(2); + const server = new MockServer(documentToState(doc)); + + const A = createSyncedEditor(server, "A", doc); + const B = createSyncedEditor(server, "B", doc); + editors.push(A.editor, B.editor); + + connectAndSync(server, A); + connectAndSync(server, B); + + // A modifies rect-0 + A.editor.doc.dispatch({ + type: "node/change/*", + node_id: "rect-0", + name: "Renamed by A", + }); + flushEditorToServer(A); + + // B modifies rect-1 + B.editor.doc.dispatch({ + type: "node/change/*", + node_id: "rect-1", + name: "Renamed by B", + }); + flushEditorToServer(B); + + // Both changes should be present on both editors + expect(A.editor.doc.state.document.nodes["rect-0"].name).toBe( + "Renamed by A" + ); + expect(A.editor.doc.state.document.nodes["rect-1"].name).toBe( + "Renamed by B" + ); + expect(B.editor.doc.state.document.nodes["rect-0"].name).toBe( + "Renamed by A" + ); + expect(B.editor.doc.state.document.nodes["rect-1"].name).toBe( + "Renamed by B" + ); + }); + + it("editor A deletes a node, editor B sees it disappear", () => { + const doc = createDocumentWithRects(2); + const server = new MockServer(documentToState(doc)); + + const A = createSyncedEditor(server, "A", doc); + const B = createSyncedEditor(server, "B", doc); + editors.push(A.editor, B.editor); + + connectAndSync(server, A); + connectAndSync(server, B); + + // A deletes rect-1 + A.editor.doc.delete(["rect-1"]); + flushEditorToServer(A); + + // B should no longer have rect-1 + expect(B.editor.doc.state.document.nodes["rect-1"]).toBeUndefined(); + // But rect-0 should still exist + expect(B.editor.doc.state.document.nodes["rect-0"]).toBeDefined(); + }); + + it("server state is consistent with both editors", () => { + const doc = createDocumentWithRects(3); + const server = new MockServer(documentToState(doc)); + + const A = createSyncedEditor(server, "A", doc); + const B = createSyncedEditor(server, "B", doc); + editors.push(A.editor, B.editor); + + connectAndSync(server, A); + connectAndSync(server, B); + + // A renames rect-0 + A.editor.doc.dispatch({ + type: "node/change/*", + node_id: "rect-0", + name: "AAA", + }); + flushEditorToServer(A); + + // B renames rect-2 + B.editor.doc.dispatch({ + type: "node/change/*", + node_id: "rect-2", + name: "BBB", + }); + flushEditorToServer(B); + + // A deletes rect-1 + A.editor.doc.delete(["rect-1"]); + flushEditorToServer(A); + + // All three should agree + const aNodeIds = Object.keys(A.editor.doc.state.document.nodes).sort(); + const bNodeIds = Object.keys(B.editor.doc.state.document.nodes).sort(); + // Server nodes exclude __doc_meta__ + const serverNodeIds = Object.keys(server.canonical.nodes) + .filter((id) => id !== "__doc_meta__") + .sort(); + + expect(aNodeIds).toEqual(bNodeIds); + expect(aNodeIds).toEqual(expect.arrayContaining(serverNodeIds)); + + // Verify specific values + expect(A.editor.doc.state.document.nodes["rect-0"].name).toBe("AAA"); + expect(B.editor.doc.state.document.nodes["rect-0"].name).toBe("AAA"); + expect(A.editor.doc.state.document.nodes["rect-1"]).toBeUndefined(); + expect(B.editor.doc.state.document.nodes["rect-1"]).toBeUndefined(); + expect(A.editor.doc.state.document.nodes["rect-2"].name).toBe("BBB"); + expect(B.editor.doc.state.document.nodes["rect-2"].name).toBe("BBB"); + }); + }); +}); diff --git a/editor/grida-canvas/plugins/sync/document-sync.ts b/editor/grida-canvas/plugins/sync/document-sync.ts new file mode 100644 index 000000000..4a7d13ef7 --- /dev/null +++ b/editor/grida-canvas/plugins/sync/document-sync.ts @@ -0,0 +1,130 @@ +/** + * @module document-sync + * + * Bridges the SyncClient ↔ EditorDocumentStore. + * + * Local → Remote: subscribes to editor document changes, computes diffs, + * pushes to the SyncClient. + * + * Remote → Local: listens for SyncClient state changes, converts back to + * Immer patches, applies via editor.doc.applyDocumentPatches(). + */ + +import type { Editor, EditorDocumentStore } from "@/grida-canvas/editor"; +import { editor } from "@/grida-canvas/editor.i"; +import type grida from "@grida/schema"; +import { + SyncClient, + computeDiff, + type DocumentState, +} from "@grida/canvas-sync"; +import { documentToState, stateToDocument } from "./serialize"; + +export class DocumentSyncAdapter { + private _unsubscribeEditor: (() => void) | null = null; + private _unsubscribeClient: (() => void) | null = null; + + /** + * Mutex to prevent feedback loops: + * local edit → push to sync → sync fires stateChange → apply to editor → editor fires subscription → ... + */ + private readonly _mutex = editor.createMutex(); + + /** + * Last-seen editor transaction ID. Used to skip duplicate subscription fires. + */ + private _lastTid: number = 0; + + /** + * Last DocumentState we synced to the client. Used to compute diffs. + * Public for test access (flushEditorToServer needs to read/write this). + */ + lastSyncedState: DocumentState; + + constructor( + private readonly _editor: Editor, + private readonly _client: SyncClient + ) { + this.lastSyncedState = documentToState(this._editor.doc.state.document); + + this._setupEditorToClient(); + this._setupClientToEditor(); + } + + // ----------------------------------------------------------------------- + // Local → Remote (editor changes → SyncClient push) + // ----------------------------------------------------------------------- + + private _setupEditorToClient(): void { + this._unsubscribeEditor = this._editor.doc.subscribeWithSelector( + (state) => state.document, + editor.throttle( + ( + store: EditorDocumentStore, + _next: grida.program.document.Document, + _prev: grida.program.document.Document + ) => { + if (store.locked) return; + if (this._lastTid === store.tid) return; + this._lastTid = store.tid; + + this._mutex(() => { + const currentState = documentToState( + this._editor.doc.state.document + ); + const diff = computeDiff(this.lastSyncedState, currentState); + if (diff) { + this.lastSyncedState = currentState; + this._client.pushDiff(diff); + } + }); + }, + 30, // 30ms throttle, same as old YJS plugin + { trailing: true } + ) + ); + } + + // ----------------------------------------------------------------------- + // Remote → Local (SyncClient state changes → editor) + // ----------------------------------------------------------------------- + + private _setupClientToEditor(): void { + this._unsubscribeClient = this._client.on("stateChange", (newState) => { + this._mutex( + () => { + // Convert sync state back to editor Document + const newDoc = stateToDocument(newState); + + // Apply as a wholesale document replacement + this._editor.doc.applyDocumentPatches([ + { + op: "replace", + path: ["document"], + value: newDoc, + }, + ]); + + // Update our tracked state so next local diff is correct + this.lastSyncedState = newState; + }, + () => { + // Mutex locked — this stateChange was triggered by our own push. + // Just update the tracked state. + this.lastSyncedState = newState; + } + ); + }); + } + + // ----------------------------------------------------------------------- + // Cleanup + // ----------------------------------------------------------------------- + + destroy(): void { + this._unsubscribeEditor?.(); + this._unsubscribeClient?.(); + this._unsubscribeEditor = null; + this._unsubscribeClient = null; + } +} diff --git a/editor/grida-canvas/plugins/sync/index.ts b/editor/grida-canvas/plugins/sync/index.ts new file mode 100644 index 000000000..15c9d1b0d --- /dev/null +++ b/editor/grida-canvas/plugins/sync/index.ts @@ -0,0 +1,80 @@ +/** + * @module sync + * + * EditorSyncPlugin — replaces EditorYSyncPlugin. + * + * Wires a SyncClient (from @grida/canvas-sync) to the editor, handling + * both document synchronization and cursor/presence relay. + */ + +import type { Editor } from "@/grida-canvas/editor"; +import type { editor } from "@/grida-canvas/editor.i"; +import { + SyncClient, + WebSocketTransport, + type DocumentState, +} from "@grida/canvas-sync"; +import { documentToState } from "./serialize"; +import { DocumentSyncAdapter } from "./document-sync"; +import { PresenceSyncAdapter } from "./presence-sync"; + +const SYNC_URL = + process.env.NODE_ENV === "development" + ? "ws://localhost:8787/room" + : "wss://live.grida.co/room"; + +export class EditorSyncPlugin { + public readonly client: SyncClient; + private readonly _transport: WebSocketTransport; + private readonly _documentSync: DocumentSyncAdapter; + private readonly _presenceSync: PresenceSyncAdapter; + + constructor( + private readonly _editor: Editor, + private readonly roomId: string, + private readonly cursor: { + palette: editor.state.MultiplayerCursorColorPalette; + cursor_id: string; + } + ) { + console.log("sync::constructor", roomId); + + // Build initial state from editor's current document + const initialState: DocumentState = documentToState( + this._editor.doc.state.document + ); + + // Create transport and client + this._transport = new WebSocketTransport({ + url: `${SYNC_URL}/${this.roomId}`, + reconnectDelay: 1000, + maxReconnectAttempts: 50, + }); + + this.client = new SyncClient({ + schema: "0.91.0-beta+20260311", // TODO: import from @grida/schema + transport: this._transport, + initialState, + lastClock: 0, // TODO: load from OPFS sync-state.json + pushInterval: 50, + }); + + // Wire up adapters + this._documentSync = new DocumentSyncAdapter(this._editor, this.client); + this._presenceSync = new PresenceSyncAdapter( + this._editor, + this.client, + this.cursor + ); + + // Connect + this.client.connect(); + } + + public destroy(): void { + console.log("sync::destroy"); + this._documentSync.destroy(); + this._presenceSync.destroy(); + this.client.destroy(); + } +} diff --git a/editor/grida-canvas/plugins/sync/presence-sync.ts b/editor/grida-canvas/plugins/sync/presence-sync.ts new file mode 100644 index 000000000..e95b56528 --- /dev/null +++ b/editor/grida-canvas/plugins/sync/presence-sync.ts @@ -0,0 +1,201 @@ +/** + * @module presence-sync + * + * Bridges cursor/presence between the editor and SyncClient. + * + * Reuses the same subscription patterns as the old YJS AwarenessSyncManager + * but sends/receives via SyncClient.setPresence() / onPresenceChange(). + */ + +import type { Editor } from "@/grida-canvas/editor"; +import { editor } from "@/grida-canvas/editor.i"; +import type { SyncClient, PresenceState } from "@grida/canvas-sync"; +import type cmath from "@grida/cmath"; +import equal from "fast-deep-equal"; + +export class PresenceSyncAdapter { + private _unsubscribeGeo: (() => void) | null = null; + private _unsubscribeFocus: (() => void) | null = null; + private _unsubscribeCursorChat: (() => void) | null = null; + private _unsubscribePresence: (() => void) | null = null; + + private _currentGeo: { + position: [number, number]; + marquee_a: [number, number] | null; + transform: cmath.Transform; + } = { + position: [0, 0], + marquee_a: null, + transform: [ + [1, 0, 0], + [0, 1, 0], + ], + }; + + private _currentFocus: { + scene_id: string | undefined; + selection: string[]; + } = { + scene_id: undefined, + selection: [], + }; + + private _currentCursorChat: { txt: string; ts: number } | null = null; + + constructor( + private readonly _editor: Editor, + private readonly _client: SyncClient, + private readonly _cursor: { + cursor_id: string; + palette: editor.state.MultiplayerCursorColorPalette; + } + ) { + this._setupLocalToRemote(); + this._setupRemoteToLocal(); + } + + // ----------------------------------------------------------------------- + // Local → Remote + // ----------------------------------------------------------------------- + + private _setupLocalToRemote(): void { + // High-frequency: pointer, marquee, camera + this._unsubscribeGeo = this._editor.doc.subscribeWithSelector( + (state) => ({ + pointer: state.pointer, + marquee: state.marquee, + transform: state.transform, + }), + (_store, next) => { + const { pointer, marquee, transform } = next; + this._currentGeo = { + position: pointer.position, + marquee_a: marquee?.a ?? null, + transform, + }; + this._pushPresence(); + }, + equal + ); + + // Medium-frequency: selection, scene + this._unsubscribeFocus = this._editor.doc.subscribeWithSelector( + (state) => ({ + selection: state.selection, + scene_id: state.scene_id, + }), + (_store, next) => { + this._currentFocus = { + scene_id: next.scene_id, + selection: next.selection, + }; + this._pushPresence(); + }, + equal + ); + + // Low-frequency: cursor chat + this._unsubscribeCursorChat = this._editor.doc.subscribeWithSelector( + (state) => state.local_cursor_chat, + (_store, next) => { + this._currentCursorChat = next.message + ? { txt: next.message, ts: next.last_modified || Date.now() } + : null; + this._pushPresence(); + }, + equal + ); + } + + private _pushPresence(): void { + this._client.setPresence({ + cursor: { + cursor_id: this._cursor.cursor_id, + x: this._currentGeo.position[0], + y: this._currentGeo.position[1], + t: Date.now(), + }, + selection: this._currentFocus.selection, + scene_id: this._currentFocus.scene_id, + viewport: { + x: this._currentGeo.transform[0]?.[2] ?? 0, + y: this._currentGeo.transform[1]?.[2] ?? 0, + zoom: this._currentGeo.transform[0]?.[0] ?? 1, + }, + profile: { + name: this._cursor.cursor_id, + color: this._cursor.palette["500"], + }, + }); + } + + // ----------------------------------------------------------------------- + // Remote → Local + // ----------------------------------------------------------------------- + + private _setupRemoteToLocal(): void { + this._unsubscribePresence = this._client.on("presenceChange", (peers) => { + const cursors: Record = {}; + + for (const [peerId, presence] of Object.entries(peers)) { + if (!presence.cursor || !presence.profile?.color) continue; + + const cursor = presence.cursor; + cursors[cursor.cursor_id] = { + t: cursor.t, + id: cursor.cursor_id, + position: [cursor.x, cursor.y], + palette: this._colorToPalette(presence.profile.color), + transform: presence.viewport + ? [ + [presence.viewport.zoom, 0, presence.viewport.x], + [0, presence.viewport.zoom, presence.viewport.y], + ] + : null, + selection: [...(presence.selection ?? [])], + scene_id: presence.scene_id, + marquee: null, // TODO: encode marquee in presence if needed + ephemeral_chat: null, // TODO: encode cursor chat in presence + }; + } + + this._editor.surface.__sync_cursors(cursors); + }); + } + + /** + * Build a minimal palette from a single color string. + * The old YJS system sent the full palette; the new protocol sends + * just the primary color. We synthesize a palette by using the color + * for all slots. This is good enough for rendering — the cursor badge + * primarily uses the "500" slot. + */ + private _colorToPalette( + color: string + ): editor.state.MultiplayerCursorColorPalette { + return { + "50": color, + "100": color, + "200": color, + "300": color, + "400": color, + "500": color, + "600": color, + "700": color, + "800": color, + "900": color, + "950": color, + }; + } + + // ----------------------------------------------------------------------- + // Cleanup + // ----------------------------------------------------------------------- + + destroy(): void { + this._unsubscribeGeo?.(); + this._unsubscribeFocus?.(); + this._unsubscribeCursorChat?.(); + this._unsubscribePresence?.(); + } +} diff --git a/editor/grida-canvas/plugins/sync/serialize.ts b/editor/grida-canvas/plugins/sync/serialize.ts new file mode 100644 index 000000000..4ca47989d --- /dev/null +++ b/editor/grida-canvas/plugins/sync/serialize.ts @@ -0,0 +1,106 @@ +/** + * @module serialize + * + * Converts between the editor's `grida.program.document.Document` and the + * sync layer's `DocumentState` (flat record map + scene ordering). + * + * This is the bridge between the editor's rich type system and the sync + * protocol's JSON-serializable flat representation. + */ + +import type grida from "@grida/schema"; +import type { DocumentState, SerializedNode, NodeId } from "@grida/canvas-sync"; + +type BitmapsMap = grida.program.document.Document["bitmaps"]; + +/** + * Shape of the `__doc_meta__` pseudo-node used to round-trip + * document-level data through the sync layer's flat record map. + */ +interface DocMetaRecord { + readonly type: "__doc_meta__"; + readonly id: "__doc_meta__"; + readonly links: Record; + readonly images: Record; + readonly bitmaps: BitmapsMap; + readonly properties: grida.program.schema.Properties; + readonly metadata: grida.program.document.INodeMetadata["metadata"]; + readonly entry_scene_id: string | null; +} + +/** + * Convert the editor's Document model into the sync-friendly DocumentState. + * + * Nodes are serialized as plain JSON objects (they already are JSON-compatible). + * The `links` adjacency list, images, bitmaps, properties, and metadata are + * stored as a special `__doc_meta__` pseudo-node so they round-trip through + * the sync layer. + */ +export function documentToState( + doc: grida.program.document.Document +): DocumentState { + const nodes: Record = {}; + + // Serialize all nodes + for (const [id, node] of Object.entries(doc.nodes)) { + nodes[id] = node as unknown as SerializedNode; + } + + // Store document-level data as a special meta record + const meta: DocMetaRecord = { + type: "__doc_meta__", + id: "__doc_meta__", + links: doc.links ?? {}, + images: doc.images ?? {}, + bitmaps: doc.bitmaps ?? {}, + properties: doc.properties ?? {}, + metadata: doc.metadata, + entry_scene_id: doc.entry_scene_id ?? null, + }; + nodes["__doc_meta__"] = meta as unknown as SerializedNode; + + return { + nodes, + scenes: doc.scenes_ref ?? [], + }; +} + +/** + * Convert a DocumentState back into the editor's Document model. + */ +export function stateToDocument( + state: DocumentState +): grida.program.document.Document { + const nodes: Record = {}; + let links: Record = {}; + let images: Record = {}; + let bitmaps: BitmapsMap = {}; + let properties: grida.program.schema.Properties = {}; + let metadata: grida.program.document.INodeMetadata["metadata"] = undefined; + let entry_scene_id: string | undefined = undefined; + + for (const [id, serialized] of Object.entries(state.nodes)) { + if (id === "__doc_meta__") { + const meta = serialized as unknown as DocMetaRecord; + links = meta.links ?? {}; + images = meta.images ?? {}; + bitmaps = meta.bitmaps ?? {}; + properties = meta.properties ?? {}; + metadata = meta.metadata ?? undefined; + entry_scene_id = meta.entry_scene_id ?? undefined; + continue; + } + nodes[id] = serialized as unknown as grida.program.nodes.Node; + } + + return { + nodes, + links, + scenes_ref: [...state.scenes], + images, + bitmaps, + properties, + metadata, + entry_scene_id, + }; +} diff --git a/editor/grida-canvas/plugins/yjs/__tests__/y-patches.test.ts b/editor/grida-canvas/plugins/yjs/__tests__/y-patches.test.ts deleted file mode 100644 index 23a7da85f..000000000 --- a/editor/grida-canvas/plugins/yjs/__tests__/y-patches.test.ts +++ /dev/null @@ -1,230 +0,0 @@ -import * as Y from "yjs"; -import type { Patch } from "immer"; - -import { YPatchBinder, applyPatchToTarget } from "../y-patches"; -import { extractDocumentPatches } from "../y-document"; - -describe("applyPatchToTarget", () => { - it("updates nested values without clobbering siblings", () => { - const doc = new Y.Doc(); - const target = doc.getMap("test"); - - applyPatchToTarget(target, { - op: "replace", - path: [], - value: { - node: { id: "node", x: 0, y: 0 }, - }, - }); - - applyPatchToTarget(target, { - op: "replace", - path: ["node", "x"], - value: 42, - }); - - const node = target.get("node") as Y.Map; - expect(node.get("x")).toBe(42); - expect(node.get("y")).toBe(0); - }); - - it("handles array operations", () => { - const doc = new Y.Doc(); - const target = doc.getArray("test"); - - applyPatchToTarget(target, { - op: "replace", - path: [], - value: [1, 2, 3], - }); - - applyPatchToTarget(target, { - op: "add", - path: [1], - value: 99, - }); - - expect(target.toArray()).toEqual([1, 99, 2, 3]); - }); - - it("handles empty patches gracefully", () => { - const doc = new Y.Doc(); - const target = doc.getMap("test"); - - expect(() => { - applyPatchToTarget(target, { - op: "replace", - path: [], - value: {}, - }); - }).not.toThrow(); - }); -}); - -describe("YPatchBinder", () => { - it("applies local patches to Y structures", () => { - const doc = new Y.Doc(); - const nodes = doc.getMap("nodes"); - const binder = new YPatchBinder(nodes, {}, "client-a", () => {}); - - binder.applyLocalPatches([ - { - op: "add", - path: ["node-1"], - value: { id: "node-1", x: 0, y: 0 }, - }, - ]); - - binder.applyLocalPatches([ - { - op: "replace", - path: ["node-1", "x"], - value: 100, - }, - ]); - - const node = nodes.get("node-1") as Y.Map; - expect(node.get("x")).toBe(100); - expect(node.get("y")).toBe(0); - }); - - it("handles empty patch arrays", () => { - const doc = new Y.Doc(); - const nodes = doc.getMap("nodes"); - const binder = new YPatchBinder(nodes, {}, "client-a", () => {}); - - expect(() => { - binder.applyLocalPatches([]); - }).not.toThrow(); - }); - - it("maintains snapshot consistency", () => { - const doc = new Y.Doc(); - const nodes = doc.getMap("nodes"); - const binder = new YPatchBinder( - nodes, - { existing: "data" }, - "client-a", - () => {} - ); - - const snapshot = binder.getSnapshot(); - expect(snapshot).toEqual({ existing: "data" }); - }); - - it("emits patches for remote updates", () => { - const docA = new Y.Doc(); - const docB = new Y.Doc(); - - const nodesA = docA.getMap("nodes"); - const nodesB = docB.getMap("nodes"); - - const received: Patch[][] = []; - const binder = new YPatchBinder(nodesA, {}, "client-a", (patches) => { - received.push(patches); - }); - - binder.applyLocalPatches([ - { - op: "add", - path: ["node-1"], - value: { id: "node-1", x: 0, y: 0 }, - }, - ]); - - Y.applyUpdate(docB, Y.encodeStateAsUpdate(docA)); - - docB.transact(() => { - const node = nodesB.get("node-1") as Y.Map; - node.set("y", 88); - }, "client-b"); - - Y.applyUpdate(docA, Y.encodeStateAsUpdate(docB)); - - expect(received).toHaveLength(1); - expect(received[0][0]).toMatchObject({ - path: ["node-1", "y"], - value: 88, - }); - }); -}); - -describe("extractDocumentPatches", () => { - it("extracts document patches and removes document prefix", () => { - const patches: Patch[] = [ - { - op: "replace", - path: ["document", "nodes", "node-1", "x"], - value: 10, - }, - { - op: "replace", - path: ["document", "scenes", "scene-1"], - value: { id: "scene-1" }, - }, - ]; - - const result = extractDocumentPatches(patches); - - expect(result).toEqual([ - expect.objectContaining({ - path: ["nodes", "node-1", "x"], - value: 10, - }), - expect.objectContaining({ - path: ["scenes", "scene-1"], - value: { id: "scene-1" }, - }), - ]); - }); - - it("filters out non-document patches", () => { - const patches: Patch[] = [ - { - op: "replace", - path: ["selection"], - value: ["node-1"], - }, - { - op: "replace", - path: ["document", "nodes", "node-1"], - value: { id: "node-1" }, - }, - ]; - - const result = extractDocumentPatches(patches); - - expect(result).toHaveLength(1); - expect(result[0].path).toEqual(["nodes", "node-1"]); - }); - - it("handles empty patches", () => { - const result = extractDocumentPatches([]); - expect(result).toHaveLength(0); - }); - - it("handles full document replacement", () => { - const patches: Patch[] = [ - { - op: "replace", - path: ["document"], - value: { - nodes: { "node-1": { id: "node-1" } }, - scenes: { "scene-1": { id: "scene-1" } }, - }, - }, - ]; - - const result = extractDocumentPatches(patches); - - expect(result).toEqual([ - expect.objectContaining({ - path: [], - value: { - nodes: { "node-1": { id: "node-1" } }, - scenes: { "scene-1": { id: "scene-1" } }, - }, - }), - ]); - }); -}); diff --git a/editor/grida-canvas/plugins/yjs/index.ts b/editor/grida-canvas/plugins/yjs/index.ts deleted file mode 100644 index f007c804b..000000000 --- a/editor/grida-canvas/plugins/yjs/index.ts +++ /dev/null @@ -1,56 +0,0 @@ -import * as Y from "yjs"; -import { WebsocketProvider } from "y-websocket"; -import type { Awareness } from "y-protocols/awareness"; -import type { Editor } from "@/grida-canvas/editor"; -import type { editor } from "@/grida-canvas/editor.i"; -import { AwarenessSyncManager } from "./y-awareness"; -import { DocumentSyncManager } from "./y-document"; - -export class EditorYSyncPlugin { - public readonly doc: Y.Doc; - public readonly provider: WebsocketProvider; - public readonly awareness: Awareness; - private readonly _documentSync: DocumentSyncManager; - private readonly _awarenessSync: AwarenessSyncManager; - - constructor( - private readonly _editor: Editor, - private readonly room_id: string, - private readonly cursor: { - palette: editor.state.MultiplayerCursorColorPalette; - cursor_id: string; - } - ) { - console.log("sync-y::constructor"); - this.doc = new Y.Doc(); - this.provider = new WebsocketProvider( - process.env.NODE_ENV === "development" - ? "wss://localhost:8787/editor" - : "wss://live.grida.co/editor", - this.room_id, - this.doc - ); - - this.awareness = this.provider.awareness; - - // Initialize sub-managers - this._documentSync = new DocumentSyncManager(this._editor, this.doc); - this._awarenessSync = new AwarenessSyncManager( - this._editor, - this.awareness, - this.cursor - ); - } - - public destroy() { - // Clean up awareness state immediately - this.awareness.setLocalState(null); - - // Destroy sub-managers - this._documentSync.destroy(); - this._awarenessSync.destroy(); - - this.provider.destroy(); - this.doc.destroy(); - } -} diff --git a/editor/grida-canvas/plugins/yjs/y-awareness.ts b/editor/grida-canvas/plugins/yjs/y-awareness.ts deleted file mode 100644 index 9a2ad7dfd..000000000 --- a/editor/grida-canvas/plugins/yjs/y-awareness.ts +++ /dev/null @@ -1,177 +0,0 @@ -import type { Awareness } from "y-protocols/awareness"; -import type { Editor } from "@/grida-canvas/editor"; -import { editor } from "@/grida-canvas/editor.i"; -import equal from "fast-deep-equal"; - -/** - * class for managing awareness/cursor synchronization - */ -export class AwarenessSyncManager { - private __unsubscribe_geo_change!: () => void; - private __unsubscribe_focus_change!: () => void; - - private _currentState: Partial< - Omit - > = {}; - - private __unsubscribe_cursor_chat_change!: () => void; - - constructor( - private readonly _editor: Editor, - private readonly _awareness: Awareness, - private readonly _cursor: { - palette: editor.state.MultiplayerCursorColorPalette; - cursor_id: string; - } - ) { - this._setupAwarenessSync(); - } - - private _setupAwarenessSync() { - const aware = () => { - const states = Array.from(this._awareness.getStates().entries()) - .filter(([id]) => id !== this._awareness.clientID) - .filter(([_, state]) => { - // Only process states that have a complete player object with palette - return state && state.profile?.palette; - }) - .map((_: any) => { - const [id, state] = _ as [ - string, - editor.multiplayer.AwarenessPayload, - ]; - const { - cursor_id, - profile: { palette }, - focus: { scene_id, selection }, - geo: { transform, position = [0, 0], marquee_a }, - cursor_chat, - } = state; - - const marquee = marquee_a ? { a: marquee_a, b: position } : null; - - return { - t: Date.now(), - id: cursor_id, // Use cursor_id instead of awareness clientID - position, - palette, - marquee: marquee, - transform, - selection, - scene_id, - ephemeral_chat: cursor_chat, - } satisfies editor.state.MultiplayerCursor; - }); - - // Convert to object format {[cursorId]: cursor} with timestamp-based conflict resolution - const cursorsObject = states.reduce( - (acc, state) => { - const existing = acc[state.id]; - if (!existing || state.t > existing.t) { - acc[state.id] = state; - } - return acc; - }, - {} as Record - ); - - this._editor.surface.__sync_cursors(cursorsObject); - }; - - this._awareness.on("change", aware); - this._awareness.on("remove", aware); - aware(); - - this._setupGeoAwarenessSync(); - this._setupFocusAwarenessSync(); - this._setupCursorChatAwarenessSync(); - } - - private _setupGeoAwarenessSync() { - // High-frequency updates for geometric data (mouse movement, camera) - this.__unsubscribe_geo_change = this._editor.doc.subscribeWithSelector( - (state) => ({ - pointer: state.pointer, - marquee: state.marquee, - transform: state.transform, - }), - (editor, next) => { - if (editor.locked) return; - const { pointer, marquee, transform } = next; - - this._currentState.geo = { - transform, - position: pointer.position, - marquee_a: marquee?.a ?? null, - }; - - this._syncAwarenessState(); - }, - equal - ); - } - - private _setupFocusAwarenessSync() { - // Medium-frequency updates for focus changes (page switches, selections) - this.__unsubscribe_focus_change = this._editor.doc.subscribeWithSelector( - (state) => ({ - selection: state.selection, - scene_id: state.scene_id, - }), - (editor, next) => { - if (editor.locked) return; - const { selection, scene_id } = next; - - this._currentState.focus = { - scene_id, - selection, - }; - - this._syncAwarenessState(); - }, - equal - ); - } - - private _setupCursorChatAwarenessSync() { - // Sync cursor chat state from editor to awareness - this.__unsubscribe_cursor_chat_change = - this._editor.doc.subscribeWithSelector( - (state) => state.local_cursor_chat, - (editor, next) => { - if (editor.locked) return; - const { message, last_modified } = next; - - this._currentState.cursor_chat = message - ? { txt: message, ts: last_modified || Date.now() } - : null; - - this._syncAwarenessState(); - }, - equal - ); - } - - private _syncAwarenessState() { - this._awareness.setLocalState({ - cursor_id: this._cursor.cursor_id, - profile: { palette: this._cursor.palette }, - focus: this._currentState.focus || { scene_id: undefined, selection: [] }, - geo: this._currentState.geo || { - transform: [ - [1, 0, 0], - [0, 1, 0], - ], - position: [0, 0], - marquee_a: null, - }, - cursor_chat: this._currentState.cursor_chat || null, - } satisfies editor.multiplayer.AwarenessPayload); - } - - public destroy() { - this.__unsubscribe_geo_change(); - this.__unsubscribe_focus_change(); - this.__unsubscribe_cursor_chat_change(); - } -} diff --git a/editor/grida-canvas/plugins/yjs/y-document.ts b/editor/grida-canvas/plugins/yjs/y-document.ts deleted file mode 100644 index dab55df61..000000000 --- a/editor/grida-canvas/plugins/yjs/y-document.ts +++ /dev/null @@ -1,165 +0,0 @@ -import * as Y from "yjs"; -import { editor } from "@/grida-canvas/editor.i"; -import type { Editor, EditorDocumentStore } from "@/grida-canvas/editor"; -import type grida from "@grida/schema"; -import type { Patch } from "immer"; -import { YPatchBinder } from "./y-patches"; -import assert from "assert"; - -/** - * Filters and transforms patches to only include document-level changes, - * removing the "document" prefix from the path - */ -export function extractDocumentPatches(patches: Patch[]): Patch[] { - const documentPatches: Patch[] = []; - - for (const patch of patches) { - // Skip non-document patches - if (patch.path[0] !== "document") { - continue; - } - - // Remove the "document" prefix from the path - const [, ...rest] = patch.path; - documentPatches.push({ - ...patch, - path: rest, - }); - } - - return documentPatches; -} - -/** - * class for managing document synchronization - */ -export class DocumentSyncManager { - private __unsubscribe_document_change!: () => void; - private readonly ymap_document: Y.Map; - private throttle_ms: number = 30; - private readonly documentBinder: YPatchBinder>; - - /** - * Unique origin identifier for this client's transactions - * This is used by YJS to track which transactions originated from this client - */ - private readonly origin: string = `client-${Math.random().toString(36).slice(2)}`; - - /** - * Mutex to prevent feedback loops between editor changes and Y.js changes - */ - private readonly mutex = editor.createMutex(); - private rc: number = 0; - constructor( - private readonly _editor: Editor, - doc: Y.Doc - ) { - this.ymap_document = doc.getMap("document"); - - const initialDocument = this.ymap_document.toJSON() as Record; - - this.documentBinder = new YPatchBinder( - this.ymap_document, - initialDocument, - this.origin, - (patches) => this._handleRemotePatches(patches) - ); - - this._initializeStateFromSources(); - this._setupDocumentSync(); - } - - private _setupDocumentSync() { - // Subscribe to editor document changes and sync to Y.Doc - this.__unsubscribe_document_change = this._editor.doc.subscribeWithSelector( - (state) => state.document, - editor.throttle( - ( - editorStore: EditorDocumentStore, - next: grida.program.document.Document, - prev: grida.program.document.Document, - _action, - patches = [] - ) => { - if (editorStore.locked) return; - if (this.rc === editorStore.tid) return; - this.rc = editorStore.tid; - - const documentPatches = extractDocumentPatches(patches); - - this.mutex(() => { - if (documentPatches.length) { - this.documentBinder.applyLocalPatches(documentPatches); - } - }); - }, - this.throttle_ms, - { trailing: true } - ) - ); - } - - public destroy() { - this.documentBinder.destroy(); - this.__unsubscribe_document_change(); - } - - private _initializeStateFromSources() { - const localDocument = this._editor.doc.state.document; - const remoteDocument = this.documentBinder.getSnapshot(); - - const hasRemoteData = Object.keys(remoteDocument ?? {}).length > 0; - const hasLocalData = - Object.keys(localDocument.nodes).length > 0 || - (localDocument.scenes_ref?.length ?? 0) > 0; - - // If remote is empty but local has data, push local to remote - if (this.ymap_document.size === 0 && hasLocalData) { - this.documentBinder.applyLocalPatches([ - { - op: "replace", - path: [], - value: { - nodes: localDocument.nodes, - scenes_ref: localDocument.scenes_ref, - links: localDocument.links, - metadata: localDocument.metadata, - }, - }, - ]); - } - // If remote has data, pull it to local - else if (hasRemoteData) { - this.mutex(() => { - this._editor.doc.applyDocumentPatches([ - { - op: "replace", - path: ["document"], - value: remoteDocument, - }, - ]); - }); - } - } - - private _handleRemotePatches(patches: Patch[]) { - if (!patches.length) { - return; - } - - // Add "document" prefix to all patches - const prefixed = patches.map((patch) => ({ - ...patch, - path: ["document", ...patch.path], - })); - - this.mutex( - () => { - this._editor.doc.applyDocumentPatches(prefixed); - }, - () => { - console.log("sync:down skipped (mutex locked)"); - } - ); - } -} diff --git a/editor/grida-canvas/plugins/yjs/y-patches.ts b/editor/grida-canvas/plugins/yjs/y-patches.ts deleted file mode 100644 index bac66ba2a..000000000 --- a/editor/grida-canvas/plugins/yjs/y-patches.ts +++ /dev/null @@ -1,330 +0,0 @@ -import { applyPatches, enablePatches, Patch, produceWithPatches } from "immer"; -import * as Y from "yjs"; -import assert from "assert"; - -enablePatches(); - -export type JSONValue = - | string - | number - | boolean - | null - | undefined - | JSONObject - | JSONArray; -export type JSONObject = { [key: string]: JSONValue }; -export type JSONArray = JSONValue[]; - -function isJSONObject(value: unknown): value is JSONObject { - return typeof value === "object" && value !== null && !Array.isArray(value); -} - -function isJSONArray(value: unknown): value is JSONArray { - return Array.isArray(value); -} - -function toPlainValue(value: any): JSONValue { - if (value instanceof Y.Map || value instanceof Y.Array) { - return value.toJSON(); - } - return value as JSONValue; -} - -function toYDataType(value: JSONValue): any { - if (value === undefined) { - return null; - } - if (isJSONArray(value)) { - const arr = new Y.Array(); - arr.push(value.map(toYDataType)); - return arr; - } - if (isJSONObject(value)) { - const map = new Y.Map(); - for (const [k, v] of Object.entries(value)) { - map.set(k, toYDataType(v)); - } - return map; - } - return value; -} - -/** - * Ensures a Yjs container exists at the given key, creating it if necessary. - * Uses `nextKey` to determine whether to create a Y.Map (string key) or Y.Array (number key). - */ -function ensureContainer( - base: Y.Map | Y.Array, - key: string | number, - nextKey: string | number | undefined -): Y.Map | Y.Array { - if (base instanceof Y.Map && typeof key === "string") { - const value = base.get(key); - if (value instanceof Y.AbstractType) { - return value as Y.Map | Y.Array; - } - if (value === undefined) { - const created = typeof nextKey === "number" ? new Y.Array() : new Y.Map(); - base.set(key, created); - return created; - } - if (value === null) { - const created = typeof nextKey === "number" ? new Y.Array() : new Y.Map(); - base.set(key, created); - return created; - } - const created = toYDataType(value as JSONValue); - base.set(key, created); - return created as Y.Map | Y.Array; - } - - if (base instanceof Y.Array && typeof key === "number") { - const value = base.get(key); - if (value instanceof Y.AbstractType) { - return value as Y.Map | Y.Array; - } - if (value === undefined || value === null) { - const created = typeof nextKey === "number" ? new Y.Array() : new Y.Map(); - if (key >= base.length) { - base.insert(key, [created]); - } else { - base.delete(key); - base.insert(key, [created]); - } - return created; - } - const created = toYDataType(value as JSONValue); - base.delete(key); - base.insert(key, [created]); - return created as Y.Map | Y.Array; - } - - assert.fail("Unsupported container traversal"); -} - -export function applyPatchToTarget( - target: Y.Map | Y.Array, - patch: Patch -) { - const { op, path, value } = patch; - - if (!path.length) { - assert.strictEqual(op, "replace", "Root level patch must be replace"); - - if (target instanceof Y.Map && isJSONObject(value)) { - target.clear(); - for (const [k, v] of Object.entries(value)) { - target.set(k, toYDataType(v)); - } - return; - } - - if (target instanceof Y.Array && isJSONArray(value)) { - target.delete(0, target.length); - target.insert(0, value.map(toYDataType)); - return; - } - - assert.fail("Unsupported root patch value"); - } - - let base: Y.Map | Y.Array = target; - for (let i = 0; i < path.length - 1; i++) { - const step = path[i]; - const nextKey = path[i + 1]; - if (base instanceof Y.Map && typeof step === "string") { - const nextValue = base.get(step); - if (nextValue instanceof Y.AbstractType) { - base = nextValue as Y.Map | Y.Array; - continue; - } - if (nextValue === undefined || nextValue === null) { - base = ensureContainer(base, step, nextKey); - continue; - } - if (isJSONObject(nextValue) || isJSONArray(nextValue)) { - const created = toYDataType(nextValue as JSONValue); - base.set(step, created); - base = created as Y.Map | Y.Array; - continue; - } - assert.fail("Cannot traverse primitive value"); - } else if (base instanceof Y.Array && typeof step === "number") { - const nextValue = base.get(step); - if (nextValue instanceof Y.AbstractType) { - base = nextValue as Y.Map | Y.Array; - continue; - } - if (nextValue === undefined || nextValue === null) { - base = ensureContainer(base, step, nextKey); - continue; - } - if (isJSONObject(nextValue) || isJSONArray(nextValue)) { - const created = toYDataType(nextValue as JSONValue); - base.delete(step); - base.insert(step, [created]); - base = created as Y.Map | Y.Array; - continue; - } - assert.fail("Cannot traverse primitive value"); - } else { - assert.fail("Unsupported traversal path"); - } - } - - const property = path[path.length - 1]; - - if (base instanceof Y.Map && typeof property === "string") { - switch (op) { - case "add": - case "replace": - base.set(property, toYDataType(value as JSONValue)); - break; - case "remove": - base.delete(property); - break; - } - return; - } - - if (base instanceof Y.Array && typeof property === "number") { - switch (op) { - case "add": - base.insert(property, [toYDataType(value as JSONValue)]); - break; - case "replace": - base.delete(property); - base.insert(property, [toYDataType(value as JSONValue)]); - break; - case "remove": - base.delete(property); - break; - } - return; - } - - if (base instanceof Y.Array && property === "length") { - if (typeof value === "number" && value < base.length) { - base.delete(value, base.length - value); - } - return; - } - - assert.fail("Unsupported patch application"); -} - -function applyYEvent(base: any, event: Y.YEvent) { - if (event instanceof Y.YMapEvent && isJSONObject(base)) { - const source = event.target as Y.Map; - event.changes.keys.forEach((change, key) => { - switch (change.action) { - case "add": - case "update": - base[key] = toPlainValue(source.get(key)); - break; - case "delete": - delete base[key]; - break; - } - }); - } else if (event instanceof Y.YArrayEvent && isJSONArray(base)) { - const arr = base as any[]; - let retain = 0; - event.changes.delta.forEach((change) => { - if (change.retain) { - retain += change.retain; - } - if (change.delete) { - arr.splice(retain, change.delete); - } - if (change.insert) { - if (Array.isArray(change.insert)) { - arr.splice(retain, 0, ...change.insert.map(toPlainValue)); - retain += change.insert.length; - } else { - arr.splice(retain, 0, toPlainValue(change.insert)); - retain += 1; - } - } - }); - } -} - -function applyYEventsWithPatches( - snapshot: S, - events: Y.YEvent[] -): [S, Patch[]] { - const [result, patches] = produceWithPatches(snapshot, (draft: any) => { - for (const event of events) { - let base = draft; - for (const step of event.path) { - base = base[step as any]; - } - applyYEvent(base, event); - } - }); - return [result, patches]; -} - -export class YPatchBinder { - private snapshot: S; - private readonly observer: (events: Y.YEvent[]) => void; - - constructor( - private readonly source: Y.Map | Y.Array, - initialSnapshot: S, - private readonly origin: string, - private readonly onRemotePatches: (patches: Patch[]) => void - ) { - this.snapshot = initialSnapshot; - - this.observer = (events) => { - if (!events.length) return; - const transaction = events[0].transaction; - if (!transaction) return; - if (transaction.local) return; - - const [nextSnapshot, patches] = applyYEventsWithPatches( - this.snapshot, - events - ); - if (patches.length === 0) { - return; - } - this.snapshot = nextSnapshot; - this.onRemotePatches(patches); - }; - - this.source.observeDeep(this.observer); - } - - getSnapshot(): S { - return this.snapshot; - } - - applyLocalPatches(patches: Patch[]) { - if (!patches.length) { - return; - } - - const nextSnapshot = applyPatches(this.snapshot, patches) as S; - const doc = this.source.doc; - const apply = () => { - for (const patch of patches) { - applyPatchToTarget(this.source, patch); - } - }; - - if (doc) { - doc.transact(apply, this.origin); - } else { - apply(); - } - - this.snapshot = nextSnapshot; - } - - destroy() { - this.source.unobserveDeep(this.observer); - } -} diff --git a/editor/package.json b/editor/package.json index 6597cc522..3f2af2d1b 100644 --- a/editor/package.json +++ b/editor/package.json @@ -32,6 +32,7 @@ "@fingerprintjs/fingerprintjs": "^4.2.2", "@formatjs/intl-localematcher": "^0.6.1", "@grida/bitmap": "workspace:*", + "@grida/canvas-sync": "workspace:*", "@grida/canvas-wasm": "workspace:*", "@grida/cg": "workspace:*", "@grida/cmath": "workspace:*", @@ -207,10 +208,6 @@ "uuid": "^9.0.1", "validator": "^13.12.0", "vaul": "^0.9.9", - "y-protocols": "^1.0.6", - "y-webrtc": "^10.3.0", - "y-websocket": "^3.0.0", - "yjs": "^13.6.27", "zod": "^4.3.6", "zustand": "^5.0.3" }, diff --git a/packages/grida-canvas-sync/__tests__/helpers.ts b/packages/grida-canvas-sync/__tests__/helpers.ts index 2800afbd8..ed9d9effb 100644 --- a/packages/grida-canvas-sync/__tests__/helpers.ts +++ b/packages/grida-canvas-sync/__tests__/helpers.ts @@ -247,9 +247,10 @@ export class MockServer { export function makeNode( id: string, - props: Record = {} + props: Record = {}, + type: string = "rectangle" ): SerializedNode { - return { type: "rectangle", id, ...props } as SerializedNode; + return { type, id, ...props } as SerializedNode; } export function emptyState(): DocumentState { diff --git a/packages/grida-canvas-sync/__tests__/integration.test.ts b/packages/grida-canvas-sync/__tests__/integration.test.ts index f681f36d1..753eae749 100644 --- a/packages/grida-canvas-sync/__tests__/integration.test.ts +++ b/packages/grida-canvas-sync/__tests__/integration.test.ts @@ -508,7 +508,7 @@ describe("multi-client integration", () => { nodes: { s1: { op: "put", - node: { type: "scene", id: "s1", name: "Page 1" } as any, + node: makeNode("s1", { name: "Page 1" }, "scene"), }, }, scenes: [{ op: "add", id: "s1" }], @@ -522,8 +522,8 @@ describe("multi-client integration", () => { it("client A removes a scene, client B sees it", () => { const initial: DocumentState = { nodes: { - s1: { type: "scene", id: "s1", name: "Page 1" } as any, - s2: { type: "scene", id: "s2", name: "Page 2" } as any, + s1: makeNode("s1", { name: "Page 1" }, "scene"), + s2: makeNode("s2", { name: "Page 2" }, "scene"), }, scenes: ["s1", "s2"], }; @@ -559,7 +559,7 @@ describe("multi-client integration", () => { nodes: { s1: { op: "put", - node: { type: "scene", id: "s1", name: "Main" } as any, + node: makeNode("s1", { name: "Main" }, "scene"), }, rect1: { op: "put", @@ -629,11 +629,7 @@ describe("multi-client integration", () => { nodes: { group1: { op: "put", - node: { - type: "group", - id: "group1", - parent_id: "s1", - } as any, + node: makeNode("group1", { parent_id: "s1" }, "group"), }, rect1: { op: "patch", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index dacc61402..9b5075444 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -357,6 +357,9 @@ importers: '@grida/bitmap': specifier: workspace:* version: link:../packages/grida-canvas-bitmap + '@grida/canvas-sync': + specifier: workspace:* + version: link:../packages/grida-canvas-sync '@grida/canvas-wasm': specifier: workspace:* version: link:../crates/grida-canvas-wasm @@ -882,18 +885,6 @@ importers: vaul: specifier: ^0.9.9 version: 0.9.9(@types/react-dom@19.1.3(@types/react@19.1.3))(@types/react@19.1.3)(react-dom@19.2.3(react@19.2.3))(react@19.2.3) - y-protocols: - specifier: ^1.0.6 - version: 1.0.7(yjs@13.6.29) - y-webrtc: - specifier: ^10.3.0 - version: 10.3.0(yjs@13.6.29) - y-websocket: - specifier: ^3.0.0 - version: 3.0.0(yjs@13.6.29) - yjs: - specifier: ^13.6.27 - version: 13.6.29 zod: specifier: ^4.3.6 version: 4.3.6 @@ -1200,6 +1191,18 @@ importers: packages/grida-canvas-sequence: {} + packages/grida-canvas-sync: + devDependencies: + tsup: + specifier: ^8.0.0 + version: 8.5.1(jiti@2.6.1)(postcss@8.5.6)(tsx@4.21.0)(typescript@5.9.3)(yaml@2.7.0) + typescript: + specifier: 5.9.3 + version: 5.9.3 + vitest: + specifier: ^4 + version: 4.0.18(@opentelemetry/api@1.9.0)(@types/node@24.12.2)(jiti@2.6.1)(jsdom@20.0.3(canvas@2.11.2(encoding@0.1.13)))(lightningcss@1.30.2)(msw@2.12.10(@types/node@24.12.2)(typescript@5.9.3))(terser@5.39.0)(tsx@4.21.0)(yaml@2.7.0) + packages/grida-canvas-tailwind: {} packages/grida-canvas-tailwind-colors: {} @@ -1303,18 +1306,12 @@ importers: services/grida-canvas-document-worker-cf: dependencies: + '@grida/canvas-sync': + specifier: workspace:* + version: link:../../packages/grida-canvas-sync hono: specifier: ^4.9.8 version: 4.11.8 - lib0: - specifier: ^0.2.114 - version: 0.2.117 - y-protocols: - specifier: ^1.0.6 - version: 1.0.7(yjs@13.6.29) - yjs: - specifier: ^13.6.27 - version: 13.6.29 devDependencies: typescript: specifier: 5.9.3 @@ -8949,9 +8946,6 @@ packages: resolution: {integrity: sha512-+h1lkLKhZMTYjog1VEpJNG7NZJWcuc2DDk/qsqSTRRCOXiLjeQ1d1/udrUGhqMxUgAlwKNZ0cf2uqan5GLuS2A==} engines: {node: '>=6'} - err-code@3.0.1: - resolution: {integrity: sha512-GiaH0KJUewYok+eeY05IIgjtAe4Yltygk9Wqp1V5yVWLdhf0hYZchRjNIT9bb0mSwRcIusT3cx7PJUf3zEIfUA==} - error-ex@1.3.2: resolution: {integrity: sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==} @@ -9594,9 +9588,6 @@ packages: resolution: {integrity: sha512-/Bx5lEn+qRF4TfQ5aLu6NH+UKtvIv7Lhc487y/c8BdludrCTpiWf9wyI0RTyqg49MFefIAvFDuEi5Dfd/zgNxQ==} engines: {node: '>= 0.10'} - get-browser-rtc@1.1.0: - resolution: {integrity: sha512-MghbMJ61EJrRsDe7w1Bvqt3ZsBuqhce5nrn/XAwgwOXhcsz53/ltdxOse1h/8eKXj5slzxdsz56g5rzOFSGwfQ==} - get-caller-file@2.0.5: resolution: {integrity: sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==} engines: {node: 6.* || 8.* || >= 10.*} @@ -10687,11 +10678,6 @@ packages: resolution: {integrity: sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==} engines: {node: '>= 0.8.0'} - lib0@0.2.107: - resolution: {integrity: sha512-2xih/AugT0dJSgeSfsW/bqIPILlsqzEtmw8hXzWEnMLrOz12DTK5z9rjNgUT21/HkBjHSznOQBr67bcZdc8Ltg==} - engines: {node: '>=16'} - hasBin: true - lib0@0.2.114: resolution: {integrity: sha512-gcxmNFzA4hv8UYi8j43uPlQ7CGcyMJ2KQb5kZASw6SnAKAf10hK12i2fjrS3Cl/ugZa5Ui6WwIu1/6MIXiHttQ==} engines: {node: '>=16'} @@ -13545,9 +13531,6 @@ packages: simple-get@4.0.1: resolution: {integrity: sha512-brv7p5WgH0jmQJr1ZDDfKDOSeWWg+OVypG99A/5vYGPqJ6pxiaHLy8nxtFjBA7oMa01ebA9gfh1uMCFqOuXxvA==} - simple-peer@9.11.1: - resolution: {integrity: sha512-D1SaWpOW8afq1CZGWB8xTfrT3FekjQmPValrqncJMX7QFl8YwhrPTZvMCANLtgBwwdS+7zURyqxDDEmY558tTw==} - simplesignal@2.1.7: resolution: {integrity: sha512-PEo2qWpUke7IMhlqiBxrulIFvhJRLkl1ih52Rwa+bPjzhJepcd4GIjn2RiQmFSx3dQvsEAgF0/lXMwMN7vODaA==} @@ -14918,18 +14901,6 @@ packages: utf-8-validate: optional: true - ws@8.18.2: - resolution: {integrity: sha512-DMricUmwGZUVr++AEAe2uiVM7UoO9MAVZMDu05UQOaUII0lp+zOzLLU4Xqh/JvTqklB1T4uELaaPBKyjE1r4fQ==} - engines: {node: '>=10.0.0'} - peerDependencies: - bufferutil: ^4.0.1 - utf-8-validate: '>=5.0.2' - peerDependenciesMeta: - bufferutil: - optional: true - utf-8-validate: - optional: true - ws@8.19.0: resolution: {integrity: sha512-blAT2mjOEIi0ZzruJfIhb3nps74PRWTCz1IjglWEEpQl5XS/UNama6u2/rjFkDDouqr4L67ry+1aGIALViWjDg==} engines: {node: '>=10.0.0'} @@ -14985,19 +14956,6 @@ packages: peerDependencies: yjs: ^13.0.0 - y-webrtc@10.3.0: - resolution: {integrity: sha512-KalJr7dCgUgyVFxoG3CQYbpS0O2qybegD0vI4bYnYHI0MOwoVbucED3RZ5f2o1a5HZb1qEssUKS0H/Upc6p1lA==} - engines: {node: '>=12'} - hasBin: true - peerDependencies: - yjs: ^13.6.8 - - y-websocket@3.0.0: - resolution: {integrity: sha512-mUHy7AzkOZ834T/7piqtlA8Yk6AchqKqcrCXjKW8J1w2lPtRDjz8W5/CvXz9higKAHgKRKqpI3T33YkRFLkPtg==} - engines: {node: '>=16.0.0', npm: '>=8.0.0'} - peerDependencies: - yjs: ^13.5.6 - y18n@5.0.8: resolution: {integrity: sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==} engines: {node: '>=10'} @@ -24346,8 +24304,6 @@ snapshots: env-paths@2.2.1: {} - err-code@3.0.1: {} - error-ex@1.3.2: dependencies: is-arrayish: 0.2.1 @@ -25354,8 +25310,6 @@ snapshots: geojson@0.5.0: {} - get-browser-rtc@1.1.0: {} - get-caller-file@2.0.5: {} get-east-asian-width@1.4.0: {} @@ -26581,10 +26535,6 @@ snapshots: prelude-ls: 1.2.1 type-check: 0.4.0 - lib0@0.2.107: - dependencies: - isomorphic.js: 0.2.5 - lib0@0.2.114: dependencies: isomorphic.js: 0.2.5 @@ -30334,18 +30284,6 @@ snapshots: simple-concat: 1.0.1 optional: true - simple-peer@9.11.1: - dependencies: - buffer: 6.0.3 - debug: 4.4.3 - err-code: 3.0.1 - get-browser-rtc: 1.1.0 - queue-microtask: 1.2.3 - randombytes: 2.1.0 - readable-stream: 3.6.2 - transitivePeerDependencies: - - supports-color - simplesignal@2.1.7: {} sirv@2.0.4: @@ -31909,9 +31847,6 @@ snapshots: ws@8.18.0: {} - ws@8.18.2: - optional: true - ws@8.19.0: {} wsl-utils@0.1.0: @@ -31951,25 +31886,6 @@ snapshots: lib0: 0.2.114 yjs: 13.6.29 - y-webrtc@10.3.0(yjs@13.6.29): - dependencies: - lib0: 0.2.107 - simple-peer: 9.11.1 - y-protocols: 1.0.7(yjs@13.6.29) - yjs: 13.6.29 - optionalDependencies: - ws: 8.18.2 - transitivePeerDependencies: - - bufferutil - - supports-color - - utf-8-validate - - y-websocket@3.0.0(yjs@13.6.29): - dependencies: - lib0: 0.2.107 - y-protocols: 1.0.7(yjs@13.6.29) - yjs: 13.6.29 - y18n@5.0.8: {} yallist@3.1.1: {} diff --git a/services/grida-canvas-document-worker-cf/src/index.ts b/services/grida-canvas-document-worker-cf/src/index.ts index 01b981ad7..b1c275926 100644 --- a/services/grida-canvas-document-worker-cf/src/index.ts +++ b/services/grida-canvas-document-worker-cf/src/index.ts @@ -10,10 +10,12 @@ */ import { Hono } from "hono"; +import { cors } from "hono/cors"; export { G1DO } from "./room"; const app = new Hono<{ Bindings: Env }>(); +app.use("*", cors()); // Health check app.get("/health", (c) => c.text("ok")); diff --git a/services/grida-canvas-document-worker-cf/src/room.ts b/services/grida-canvas-document-worker-cf/src/room.ts index e5c2f68cf..1cc05d929 100644 --- a/services/grida-canvas-document-worker-cf/src/room.ts +++ b/services/grida-canvas-document-worker-cf/src/room.ts @@ -16,8 +16,6 @@ import type { ServerMessage, DocumentDiff, PresenceState, - NodeId, - SerializedNode, } from "@grida/canvas-sync"; import { DocumentClock, @@ -28,11 +26,18 @@ import { import { SyncStorage } from "./storage"; // --------------------------------------------------------------------------- -// Session metadata (attached to WebSocket via tags) +// Constants // --------------------------------------------------------------------------- const SESSION_TAG_PREFIX = "session:"; +/** Maximum incoming WebSocket message size in bytes (1 MB). */ +const MAX_MESSAGE_SIZE = 1024 * 1024; + +// --------------------------------------------------------------------------- +// Session metadata (attached to WebSocket via tags) +// --------------------------------------------------------------------------- + interface SessionState { schemaVersion?: string; presence?: PresenceState; @@ -55,16 +60,13 @@ export class G1DO implements DurableObject { constructor(state: DurableObjectState, _env: Env) { this.state = state; - // Block all requests until initialization is done - this.state.blockConcurrencyWhile(async () => { - this._initialize(); - }); + this.state.blockConcurrencyWhile(() => this._initializeAsync()); } - private _initialize(): void { + private async _initializeAsync(): Promise { if (this.initialized) return; - this.storage = new SyncStorage(this.state.storage.sql); + this.storage = new SyncStorage(this.state.storage); const stored = this.storage.getFullState(); this.canonical = { @@ -132,6 +134,16 @@ export class G1DO implements DurableObject { ): Promise { if (typeof message !== "string") return; + // Guard: reject oversized messages + if (message.length > MAX_MESSAGE_SIZE) { + this._send(ws, { + type: "error", + code: "MESSAGE_TOO_LARGE", + message: `Message exceeds ${MAX_MESSAGE_SIZE} byte limit`, + }); + return; + } + let msg: ClientMessage; try { msg = JSON.parse(message) as ClientMessage; @@ -172,7 +184,8 @@ export class G1DO implements DurableObject { const sessionId = this._getSessionId(ws); if (sessionId) { this.sessions.delete(sessionId); - // Broadcast updated presence (peer left) + // Broadcast updated presence (peer left) — always send, even if + // the peers map is now empty, so clients clear stale cursors. this._broadcastPresence(); } } @@ -181,8 +194,14 @@ export class G1DO implements DurableObject { const sessionId = this._getSessionId(ws); if (sessionId) { this.sessions.delete(sessionId); + // Broadcast presence removal before closing + this._broadcastPresence(); + } + try { + ws.close(1011, "WebSocket error"); + } catch { + // Already closed } - ws.close(1011, "WebSocket error"); } // ------------------------------------------------------------------------- @@ -248,11 +267,9 @@ export class G1DO implements DurableObject { return; } - // Apply the diff + // Apply the diff (in-memory + SQLite, atomically) const newClock = this.clock.tick(); this.canonical = applyDiff(this.canonical, msg.diff); - - // Persist to SQLite this.storage.applyDiff(msg.diff, newClock); // Ack the pusher @@ -306,9 +323,12 @@ export class G1DO implements DurableObject { } } - /** Broadcast current presence state to all sessions. */ + /** + * Broadcast current presence state to all sessions. + * Always sends, even when peers map is empty — this signals to clients + * that a peer has left and stale cursors should be cleared. + */ private _broadcastPresence(): void { - // Build per-session presence views (each session gets everyone else's presence) const allPresence: Record = {}; for (const [sid, session] of this.sessions) { if (session.presence) { @@ -328,13 +348,11 @@ export class G1DO implements DurableObject { } } - // Only send if there are peers with presence - if (Object.keys(peers).length > 0) { - try { - ws.send(JSON.stringify({ type: "presence", peers })); - } catch { - // WebSocket may have closed - } + // Always send — empty peers signals "everyone left" + try { + ws.send(JSON.stringify({ type: "presence", peers })); + } catch { + // WebSocket may have closed } } } diff --git a/services/grida-canvas-document-worker-cf/src/storage.ts b/services/grida-canvas-document-worker-cf/src/storage.ts index cbbf31281..61fdd36a6 100644 --- a/services/grida-canvas-document-worker-cf/src/storage.ts +++ b/services/grida-canvas-document-worker-cf/src/storage.ts @@ -10,6 +10,7 @@ * - `meta` — key-value metadata (document clock, schema version, scenes) * * All writes happen synchronously via `sql.exec()` inside the DO's single-threaded model. + * Multi-statement writes are wrapped in `transactionSync()` for atomicity. */ import type { @@ -17,6 +18,7 @@ import type { SerializedNode, DocumentDiff, NodeOp, + SceneOp, } from "@grida/canvas-sync"; // --------------------------------------------------------------------------- @@ -35,9 +37,11 @@ export interface StoredDocument { export class SyncStorage { private readonly sql: SqlStorage; + private readonly storage: DurableObjectStorage; - constructor(sql: SqlStorage) { - this.sql = sql; + constructor(storage: DurableObjectStorage) { + this.storage = storage; + this.sql = storage.sql; this._ensureSchema(); } @@ -87,41 +91,46 @@ export class SyncStorage { // Diff application // ------------------------------------------------------------------------- - /** Apply a diff and persist to SQLite. Returns the new clock value. */ + /** + * Apply a diff and persist to SQLite atomically. + * Wrapped in a synchronous transaction to prevent partial writes on crash. + */ applyDiff(diff: DocumentDiff, clock: number): void { - // Apply node operations - if (diff.nodes) { - for (const [id, op] of Object.entries(diff.nodes)) { - this._applyNodeOp(id, op, clock); + this.storage.transactionSync(() => { + // Apply node operations + if (diff.nodes) { + for (const [id, op] of Object.entries(diff.nodes)) { + this._applyNodeOp(id, op, clock); + } } - } - // Apply scene operations - if (diff.scenes) { - let scenes = this._getMetaJson("scenes", []); - for (const sceneOp of diff.scenes) { - switch (sceneOp.op) { - case "add": - if (!scenes.includes(sceneOp.id)) { - scenes.push(sceneOp.id); - } - break; - case "remove": - scenes = scenes.filter((id) => id !== sceneOp.id); - break; - case "reorder": - scenes = [...sceneOp.ids]; - break; + // Apply scene operations + if (diff.scenes) { + let scenes = this._getMetaJson("scenes", []); + for (const sceneOp of diff.scenes) { + switch (sceneOp.op) { + case "add": + if (!scenes.includes(sceneOp.id)) { + scenes.push(sceneOp.id); + } + break; + case "remove": + scenes = scenes.filter((id) => id !== sceneOp.id); + break; + case "reorder": + scenes = [...sceneOp.ids]; + break; + } } + this._setMetaJson("scenes", scenes); } - this._setMetaJson("scenes", scenes); - } - // Update clock - this._setMetaInt("clock", clock); + // Update clock + this._setMetaInt("clock", clock); - // Prune tombstones if too many - this._pruneTombstones(5000); + // Prune tombstones if too many + this._pruneTombstones(5000); + }); } private _applyNodeOp(id: NodeId, op: NodeOp, clock: number): void { @@ -189,6 +198,12 @@ export class SyncStorage { * - Nodes that were deleted since `sinceClock` (as remove ops) * * Returns null if the clock is current (no changes). + * + * Note: scene ordering is NOT included in the delta — the caller + * (SyncRoom._handleConnect) sends `scenes` separately as a full snapshot + * alongside the diff. This is intentional: scene ordering is small enough + * that a full snapshot is simpler and more reliable than tracking incremental + * scene ops in the delta. */ getDelta(sinceClock: number): DocumentDiff | null { const currentClock = this._getMetaInt("clock", 0); From 68ce6851729be8478dfbc6ae1585a65dfdcaae29 Mon Sep 17 00:00:00 2001 From: Universe Date: Thu, 9 Apr 2026 13:54:46 +0900 Subject: [PATCH 4/8] =?UTF-8?q?fix:=20address=20PR=20review=20findings=20?= =?UTF-8?q?=E2=80=94=20scene=20diff=20ordering,=20reconnect=20rebase,=20pe?= =?UTF-8?q?rsist-before-advance,=20and=20code=20quality?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Fix critical scene diff ordering bug: computeSceneDiff now always emits a single reorder op, preventing [a,b]→[c,b] from producing [b,c] - Fix reconnect state corruption: clear stale speculative diffs and requeue them as unsent on connect_ok - Fix persist-before-advance: storage.applyDiff runs before in-memory clock/canonical advance, so a storage throw leaves state consistent - Fix spurious stateChange events: fast-path ref equality when no speculative/unsent changes exist, skip applyDiff for empty diffs - Fix double-socket race: cancel pending reconnect timer in connect() - Fix tombstone pruning gap: record tombstone_floor clock, getDelta returns null (forcing full state) for stale sinceClock values - Fix message size guard: use UTF-8 byte length instead of UTF-16 - Implement SCENE_ADD_NOT_SCENE validation (was declared but unused) - Add clock validation guards (non-negative safe integer) - Fix assertConvergence to check keys from both server and client - Remove all unused imports/variables flagged by code quality checks --- .../plugins/sync/presence-sync.ts | 2 +- .../__tests__/client.test.ts | 4 +-- .../grida-canvas-sync/__tests__/diff.test.ts | 16 +++++++--- .../grida-canvas-sync/__tests__/helpers.ts | 7 ++-- .../__tests__/integration.test.ts | 16 +++------- .../__tests__/validate.test.ts | 8 +++++ packages/grida-canvas-sync/src/client.ts | 32 ++++++++++++++++--- packages/grida-canvas-sync/src/clock.ts | 13 ++++++-- packages/grida-canvas-sync/src/diff.ts | 32 +++++-------------- packages/grida-canvas-sync/src/transport.ts | 5 +++ packages/grida-canvas-sync/src/validate.ts | 19 ++++++++--- .../src/room.ts | 14 +++++--- .../src/storage.ts | 20 ++++++++++-- 13 files changed, 123 insertions(+), 65 deletions(-) diff --git a/editor/grida-canvas/plugins/sync/presence-sync.ts b/editor/grida-canvas/plugins/sync/presence-sync.ts index e95b56528..39b33e999 100644 --- a/editor/grida-canvas/plugins/sync/presence-sync.ts +++ b/editor/grida-canvas/plugins/sync/presence-sync.ts @@ -137,7 +137,7 @@ export class PresenceSyncAdapter { this._unsubscribePresence = this._client.on("presenceChange", (peers) => { const cursors: Record = {}; - for (const [peerId, presence] of Object.entries(peers)) { + for (const presence of Object.values(peers)) { if (!presence.cursor || !presence.profile?.color) continue; const cursor = presence.cursor; diff --git a/packages/grida-canvas-sync/__tests__/client.test.ts b/packages/grida-canvas-sync/__tests__/client.test.ts index e8ed584b6..0fd8fcfa5 100644 --- a/packages/grida-canvas-sync/__tests__/client.test.ts +++ b/packages/grida-canvas-sync/__tests__/client.test.ts @@ -1,4 +1,4 @@ -import { describe, it, expect, vi, beforeEach } from "vitest"; +import { describe, it, expect, vi } from "vitest"; import { SyncClient, type SyncClientStatus } from "../src/client"; import type { DocumentState } from "../src/diff"; import type { @@ -144,7 +144,7 @@ describe("SyncClient", () => { }); it("sends connect message with schema and lastClock", () => { - const { transport, client } = createClientAndTransport(emptyState(), 42); + const { transport } = createClientAndTransport(emptyState(), 42); transport.simulateConnected(); const msg = transport.sent.find((m) => m.type === "connect"); diff --git a/packages/grida-canvas-sync/__tests__/diff.test.ts b/packages/grida-canvas-sync/__tests__/diff.test.ts index 224aa458d..bf9e791e3 100644 --- a/packages/grida-canvas-sync/__tests__/diff.test.ts +++ b/packages/grida-canvas-sync/__tests__/diff.test.ts @@ -165,18 +165,26 @@ describe("computeDiff", () => { expect(diff.scenes).toEqual([{ op: "reorder", ids: ["s2", "s1"] }]); }); - it("detects scene additions", () => { + it("detects scene additions via reorder", () => { const before: DocumentState = { nodes: {}, scenes: ["s1"] }; const after: DocumentState = { nodes: {}, scenes: ["s1", "s2"] }; const diff = computeDiff(before, after)!; - expect(diff.scenes).toContainEqual({ op: "add", id: "s2" }); + expect(diff.scenes).toEqual([{ op: "reorder", ids: ["s1", "s2"] }]); }); - it("detects scene removals", () => { + it("detects scene removals via reorder", () => { const before: DocumentState = { nodes: {}, scenes: ["s1", "s2"] }; const after: DocumentState = { nodes: {}, scenes: ["s1"] }; const diff = computeDiff(before, after)!; - expect(diff.scenes).toContainEqual({ op: "remove", id: "s2" }); + expect(diff.scenes).toEqual([{ op: "reorder", ids: ["s1"] }]); + }); + + it("preserves ordering with mixed add+remove", () => { + const before: DocumentState = { nodes: {}, scenes: ["a", "b", "c"] }; + const after: DocumentState = { nodes: {}, scenes: ["c", "b", "d"] }; + const diff = computeDiff(before, after)!; + const result = applyDiff(before, diff); + expect(result.scenes).toEqual(["c", "b", "d"]); }); }); diff --git a/packages/grida-canvas-sync/__tests__/helpers.ts b/packages/grida-canvas-sync/__tests__/helpers.ts index ed9d9effb..7f6f4c5a7 100644 --- a/packages/grida-canvas-sync/__tests__/helpers.ts +++ b/packages/grida-canvas-sync/__tests__/helpers.ts @@ -21,7 +21,7 @@ import type { ConnectMessage, PresenceState, } from "../src/protocol"; -import { type DocumentState, applyDiff, isDiffEmpty } from "../src/diff"; +import { type DocumentState, applyDiff } from "../src/diff"; import { validateDiff } from "../src/validate"; import { DocumentClock } from "../src/clock"; import { SyncClient } from "../src/client"; @@ -320,11 +320,12 @@ export function assertConvergence( ); } - // Same field values for each node + // Same field values for each node (check all keys from both sides) for (const id of serverIds) { const sNode = serverNodes[id]; const cNode = clientNodes[id]; - for (const key of Object.keys(sNode)) { + const allKeys = new Set([...Object.keys(sNode), ...Object.keys(cNode)]); + for (const key of allKeys) { const sv = JSON.stringify(sNode[key]); const cv = JSON.stringify(cNode[key]); if (sv !== cv) { diff --git a/packages/grida-canvas-sync/__tests__/integration.test.ts b/packages/grida-canvas-sync/__tests__/integration.test.ts index 753eae749..285f23779 100644 --- a/packages/grida-canvas-sync/__tests__/integration.test.ts +++ b/packages/grida-canvas-sync/__tests__/integration.test.ts @@ -1,13 +1,5 @@ import { describe, it, expect } from "vitest"; -import { - MockServer, - MockTransport, - createRoom, - connectAll, - assertConvergence, - makeNode, - emptyState, -} from "./helpers"; +import { createRoom, connectAll, assertConvergence, makeNode } from "./helpers"; import type { DocumentState } from "../src/diff"; import type { DocumentDiff } from "../src/protocol"; @@ -314,7 +306,7 @@ describe("multi-client integration", () => { const { server, clients, transports } = createRoom(2, initial); connectAll(server, transports); - const [A, B] = clients; + const [A] = clients; // Disconnect A server.disconnectSession("client-0"); @@ -358,7 +350,7 @@ describe("multi-client integration", () => { const { server, clients, transports } = createRoom(2); connectAll(server, transports); - const [A, B] = clients; + const [A] = clients; transports[0].sent = []; // Clear handshake messages // Simulate rapid property changes (e.g., dragging a resize handle) @@ -530,7 +522,7 @@ describe("multi-client integration", () => { const { server, clients, transports } = createRoom(2, initial); connectAll(server, transports); - const [A, B] = clients; + const [A] = clients; A.pushDiff({ nodes: { s1: { op: "remove" } }, diff --git a/packages/grida-canvas-sync/__tests__/validate.test.ts b/packages/grida-canvas-sync/__tests__/validate.test.ts index 80c6b8415..3170af7a6 100644 --- a/packages/grida-canvas-sync/__tests__/validate.test.ts +++ b/packages/grida-canvas-sync/__tests__/validate.test.ts @@ -145,6 +145,14 @@ describe("validateDiff", () => { expect(result.valid).toBe(true); }); + it("scene add for non-scene node type fails", () => { + const state = stateWith({ r1: makeNode("r1") }); // type is "rectangle" + const diff: DocumentDiff = { scenes: [{ op: "add", id: "r1" }] }; + const result = validateDiff(state, diff); + expect(result.valid).toBe(false); + expect(result.errors[0].code).toBe("SCENE_ADD_NOT_SCENE"); + }); + it("scene remove for non-existent scene fails", () => { const state = stateWith({}, ["s1"]); const diff: DocumentDiff = { scenes: [{ op: "remove", id: "s2" }] }; diff --git a/packages/grida-canvas-sync/src/client.ts b/packages/grida-canvas-sync/src/client.ts index 5c2b6c51e..bd1106c65 100644 --- a/packages/grida-canvas-sync/src/client.ts +++ b/packages/grida-canvas-sync/src/client.ts @@ -267,11 +267,24 @@ export class SyncClient { } } + // Any speculative diffs from the previous connection are stale — + // they were never ack'd, so the server doesn't have them. + // Requeue them into unsent so they get re-pushed. + if (this._speculative.length > 0) { + let requeued = this._unsent; + for (const spec of this._speculative) { + requeued = composeDiffs(spec, requeued); + } + this._unsent = requeued; + this._speculative = []; + } + this._pushInFlight = false; + this._serverClock = msg.clock; this._setStatus("ready"); this._recomputeLocalState(); - // If we have unsent changes from before reconnect, push them + // If we have unsent changes (including requeued speculative), push them if (!isDiffEmpty(this._unsent)) { this._schedulePush(); } @@ -390,19 +403,28 @@ export class SyncClient { * Emits "stateChange" if the state actually changed. */ private _recomputeLocalState(): void { + // Fast path: if there are no speculative or unsent changes, + // the local state IS the canonical state (same reference). + if (this._speculative.length === 0 && isDiffEmpty(this._unsent)) { + if (this._localState === this._canonical) return; + this._localState = this._canonical; + this._emit("stateChange", this._localState); + return; + } + + // Slow path: apply all pending diffs on top of canonical. let merged = this._canonical; for (const spec of this._speculative) { - merged = applyDiff(merged, spec); + if (!isDiffEmpty(spec)) { + merged = applyDiff(merged, spec); + } } if (!isDiffEmpty(this._unsent)) { merged = applyDiff(merged, this._unsent); } - // Ref check — if nothing changed, skip the event - if (merged === this._localState) return; - this._localState = merged; this._emit("stateChange", merged); } diff --git a/packages/grida-canvas-sync/src/clock.ts b/packages/grida-canvas-sync/src/clock.ts index 2a6f2b59a..90f448ae4 100644 --- a/packages/grida-canvas-sync/src/clock.ts +++ b/packages/grida-canvas-sync/src/clock.ts @@ -14,7 +14,7 @@ export class DocumentClock { private _value: number; constructor(initial: number = 0) { - this._value = initial; + this._value = DocumentClock._validate(initial); } /** Current clock value. */ @@ -29,6 +29,15 @@ export class DocumentClock { /** Reset to a specific value (used when loading from storage). */ reset(value: number): void { - this._value = value; + this._value = DocumentClock._validate(value); + } + + private static _validate(v: number): number { + if (!Number.isSafeInteger(v) || v < 0) { + throw new RangeError( + `DocumentClock value must be a non-negative safe integer, got ${v}` + ); + } + return v; } } diff --git a/packages/grida-canvas-sync/src/diff.ts b/packages/grida-canvas-sync/src/diff.ts index 3d49c3628..03b9ed05c 100644 --- a/packages/grida-canvas-sync/src/diff.ts +++ b/packages/grida-canvas-sync/src/diff.ts @@ -127,6 +127,11 @@ function computeNodePatch( /** * Compute scene diff. Returns scene ops or null if identical. + * + * Always emits a single `reorder` op containing the full target ordering. + * This is simpler and more correct than separate add/remove/reorder ops, + * because mixed membership + ordering changes (e.g. `[a,b] → [c,b]`) + * cannot be reliably reconstructed from individual add/remove ops alone. */ function computeSceneDiff( before: readonly NodeId[], @@ -140,30 +145,9 @@ function computeSceneDiff( return null; } - const ops: SceneOp[] = []; - const beforeSet = new Set(before); - const afterSet = new Set(after); - - // Removed scenes - for (const id of before) { - if (!afterSet.has(id)) { - ops.push({ op: "remove", id }); - } - } - - // Added scenes - for (const id of after) { - if (!beforeSet.has(id)) { - ops.push({ op: "add", id }); - } - } - - // If the set is the same but order changed, emit a reorder - if (ops.length === 0) { - ops.push({ op: "reorder", ids: after }); - } - - return ops; + // Emit a single reorder that captures the full target state. + // applyDiff treats "reorder" as a full replacement of the scenes array. + return [{ op: "reorder", ids: after }]; } // --------------------------------------------------------------------------- diff --git a/packages/grida-canvas-sync/src/transport.ts b/packages/grida-canvas-sync/src/transport.ts index 641c9d9fd..32587daa8 100644 --- a/packages/grida-canvas-sync/src/transport.ts +++ b/packages/grida-canvas-sync/src/transport.ts @@ -92,6 +92,11 @@ export class WebSocketTransport implements ISyncTransport { connect(): void { if (this._status !== "disconnected") return; this._intentionalClose = false; + // Cancel any pending reconnect timer to prevent a second socket + if (this._reconnectTimer !== null) { + clearTimeout(this._reconnectTimer); + this._reconnectTimer = null; + } this._openSocket(); } diff --git a/packages/grida-canvas-sync/src/validate.ts b/packages/grida-canvas-sync/src/validate.ts index 7bac86e27..14d73deaa 100644 --- a/packages/grida-canvas-sync/src/validate.ts +++ b/packages/grida-canvas-sync/src/validate.ts @@ -62,12 +62,15 @@ export function validateDiff( } if (diff.scenes) { - // Build a projected node set (after applying node ops from this diff) - const projectedNodes = new Set(Object.keys(state.nodes)); + // Build a projected node set with types (after applying node ops from this diff) + const projectedNodeTypes = new Map(); + for (const [id, node] of Object.entries(state.nodes)) { + projectedNodeTypes.set(id, node.type); + } if (diff.nodes) { for (const [id, op] of Object.entries(diff.nodes)) { - if (op.op === "put") projectedNodes.add(id); - if (op.op === "remove") projectedNodes.delete(id); + if (op.op === "put") projectedNodeTypes.set(id, op.node.type); + if (op.op === "remove") projectedNodeTypes.delete(id); } } @@ -75,12 +78,18 @@ export function validateDiff( for (const sceneOp of diff.scenes) { switch (sceneOp.op) { case "add": - if (!projectedNodes.has(sceneOp.id)) { + if (!projectedNodeTypes.has(sceneOp.id)) { errors.push({ target: sceneOp.id, code: "SCENE_ADD_MISSING_NODE", message: `Scene add references non-existent node "${sceneOp.id}"`, }); + } else if (projectedNodeTypes.get(sceneOp.id) !== "scene") { + errors.push({ + target: sceneOp.id, + code: "SCENE_ADD_NOT_SCENE", + message: `Scene add references node "${sceneOp.id}" with type "${projectedNodeTypes.get(sceneOp.id)}", expected "scene"`, + }); } break; case "remove": diff --git a/services/grida-canvas-document-worker-cf/src/room.ts b/services/grida-canvas-document-worker-cf/src/room.ts index 1cc05d929..48beb176a 100644 --- a/services/grida-canvas-document-worker-cf/src/room.ts +++ b/services/grida-canvas-document-worker-cf/src/room.ts @@ -134,8 +134,8 @@ export class G1DO implements DurableObject { ): Promise { if (typeof message !== "string") return; - // Guard: reject oversized messages - if (message.length > MAX_MESSAGE_SIZE) { + // Guard: reject oversized messages (check UTF-8 byte length, not UTF-16 code units) + if (new TextEncoder().encode(message).byteLength > MAX_MESSAGE_SIZE) { this._send(ws, { type: "error", code: "MESSAGE_TOO_LARGE", @@ -267,11 +267,15 @@ export class G1DO implements DurableObject { return; } - // Apply the diff (in-memory + SQLite, atomically) - const newClock = this.clock.tick(); - this.canonical = applyDiff(this.canonical, msg.diff); + // Persist first — if storage throws, in-memory state stays consistent. + // Compute the next clock value without advancing yet. + const newClock = this.clock.value + 1; this.storage.applyDiff(msg.diff, newClock); + // Only advance in-memory state after successful persist + this.clock.tick(); + this.canonical = applyDiff(this.canonical, msg.diff); + // Ack the pusher this._send(ws, { type: "push_ok", diff --git a/services/grida-canvas-document-worker-cf/src/storage.ts b/services/grida-canvas-document-worker-cf/src/storage.ts index 61fdd36a6..dc1d706e6 100644 --- a/services/grida-canvas-document-worker-cf/src/storage.ts +++ b/services/grida-canvas-document-worker-cf/src/storage.ts @@ -128,7 +128,7 @@ export class SyncStorage { // Update clock this._setMetaInt("clock", clock); - // Prune tombstones if too many + // Prune tombstones if too many, recording the floor clock this._pruneTombstones(5000); }); } @@ -209,6 +209,11 @@ export class SyncStorage { const currentClock = this._getMetaInt("clock", 0); if (sinceClock >= currentClock) return null; + // If the requested clock is older than the oldest retained tombstone, + // we can't guarantee a complete delta — return null to force full state. + const tombstoneFloor = this._getMetaInt("tombstone_floor", 0); + if (sinceClock < tombstoneFloor) return null; + const nodeOps: Record = {}; let hasOps = false; @@ -288,8 +293,19 @@ export class SyncStorage { const count = (countRows[0]?.cnt as number) ?? 0; if (count <= maxCount) return; - // Delete the oldest tombstones beyond the limit + // Find the clock of the oldest tombstone that will survive pruning. + // Any client with sinceClock < this value cannot get a reliable delta. const toDelete = count - maxCount; + const floorRows = this.sql + .exec( + "SELECT clock FROM tombstones ORDER BY clock ASC LIMIT 1 OFFSET ?", + toDelete + ) + .toArray(); + if (floorRows.length > 0) { + this._setMetaInt("tombstone_floor", floorRows[0].clock as number); + } + this.sql.exec( "DELETE FROM tombstones WHERE node_id IN (SELECT node_id FROM tombstones ORDER BY clock ASC LIMIT ?)", toDelete From 6e84bc9b0feede67a3c7da5dd144fae89165409b Mon Sep 17 00:00:00 2001 From: Universe Date: Thu, 9 Apr 2026 14:26:07 +0900 Subject: [PATCH 5/8] wrangler bump --- pnpm-lock.yaml | 362 +- .../package.json | 6 +- .../worker-configuration.d.ts | 8414 +++++++++++++---- 3 files changed, 6499 insertions(+), 2283 deletions(-) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 9b5075444..c5460b4d1 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1317,8 +1317,8 @@ importers: specifier: 5.9.3 version: 5.9.3 wrangler: - specifier: ^4.40.1 - version: 4.63.0 + specifier: ^4.81.0 + version: 4.81.0 packages: @@ -2298,41 +2298,41 @@ packages: resolution: {integrity: sha512-SIOD2DxrRRwQ+jgzlXCqoEFiKOFqaPjhnNTGKXSRLvp1HiOvapLaFG2kEr9dYQTYe8rKrd9uvDUzmAITeNyaHQ==} engines: {node: '>=18.0.0'} - '@cloudflare/unenv-preset@2.12.0': - resolution: {integrity: sha512-NK4vN+2Z/GbfGS4BamtbbVk1rcu5RmqaYGiyHJQrA09AoxdZPHDF3W/EhgI0YSK8p3vRo/VNCtbSJFPON7FWMQ==} + '@cloudflare/unenv-preset@2.16.0': + resolution: {integrity: sha512-8ovsRpwzPoEqPUzoErAYVv8l3FMZNeBVQfJTvtzP4AgLSRGZISRfuChFxHWUQd3n6cnrwkuTGxT+2cGo8EsyYg==} peerDependencies: unenv: 2.0.0-rc.24 - workerd: ^1.20260115.0 + workerd: 1.20260301.1 || ~1.20260302.1 || ~1.20260303.1 || ~1.20260304.1 || >1.20260305.0 <2.0.0-0 peerDependenciesMeta: workerd: optional: true - '@cloudflare/workerd-darwin-64@1.20260205.0': - resolution: {integrity: sha512-ToOItqcirmWPwR+PtT+Q4bdjTn/63ZxhJKEfW4FNn7FxMTS1Tw5dml0T0mieOZbCpcvY8BdvPKFCSlJuI8IVHQ==} + '@cloudflare/workerd-darwin-64@1.20260405.1': + resolution: {integrity: sha512-EbmdBcmeIGogKG4V1odSWQe7z4rHssUD4iaXv0cXA22/MFrzH3iQT0R+FJFyhucGtih/9B9E+6j0QbSQD8xT3w==} engines: {node: '>=16'} cpu: [x64] os: [darwin] - '@cloudflare/workerd-darwin-arm64@1.20260205.0': - resolution: {integrity: sha512-402ZqLz+LrG0NDXp7Hn7IZbI0DyhjNfjAlVenb0K3yod9KCuux0u3NksNBvqJx0mIGHvVR4K05h+jfT5BTHqGA==} + '@cloudflare/workerd-darwin-arm64@1.20260405.1': + resolution: {integrity: sha512-r44r418bOQtoP+Odu+L/BQM9q5cRSXRd1N167PgZQIo4MlqzTwHO4L0wwXhxbcV/PF46rrQre/uTFS8R0R+xSQ==} engines: {node: '>=16'} cpu: [arm64] os: [darwin] - '@cloudflare/workerd-linux-64@1.20260205.0': - resolution: {integrity: sha512-rz9jBzazIA18RHY+osa19hvsPfr0LZI1AJzIjC6UqkKKphcTpHBEQ25Xt8cIA34ivMIqeENpYnnmpDFesLkfcQ==} + '@cloudflare/workerd-linux-64@1.20260405.1': + resolution: {integrity: sha512-Aaq3RWnaTCzMBo77wC8fjOx+SFdO/rlcXa6HAf+PJs51LyMISFOBCJKqSlS6Irphen0WHHxFKPHUO9bjfj8g2g==} engines: {node: '>=16'} cpu: [x64] os: [linux] - '@cloudflare/workerd-linux-arm64@1.20260205.0': - resolution: {integrity: sha512-jr6cKpMM/DBEbL+ATJ9rYue758CKp0SfA/nXt5vR32iINVJrb396ye9iat2y9Moa/PgPKnTrFgmT6urUmG3IUg==} + '@cloudflare/workerd-linux-arm64@1.20260405.1': + resolution: {integrity: sha512-Lbp9Z2wiMzy3Sji3YwMHK5WDlejsH3jF4swAFEv7+jIf3NowZHga3GzwTypNRmcwnfz/XrqQ7Hc0Ul9OoU/lCw==} engines: {node: '>=16'} cpu: [arm64] os: [linux] - '@cloudflare/workerd-windows-64@1.20260205.0': - resolution: {integrity: sha512-SMPW5jCZYOG7XFIglSlsgN8ivcl0pCrSAYxCwxtWvZ88whhcDB/aISNtiQiDZujPH8tIo2hE5dEkxW7tGEwc3A==} + '@cloudflare/workerd-windows-64@1.20260405.1': + resolution: {integrity: sha512-FhE0kt93kj5JnSPVqi4BAXpQQENyKnuSOoJLd35mkMMGhtPrwv5EsReJdck0S8hUocCBlb+U0RmP8ta6k41HjQ==} engines: {node: '>=16'} cpu: [x64] os: [win32] @@ -2984,312 +2984,156 @@ packages: '@emotion/weak-memoize@0.2.5': resolution: {integrity: sha512-6U71C2Wp7r5XtFtQzYrW5iKFT67OixrSxjI4MptCHzdSVlgabczzqLe0ZSgnub/5Kp4hSbpDB1tMytZY9pwxxA==} - '@esbuild/aix-ppc64@0.27.0': - resolution: {integrity: sha512-KuZrd2hRjz01y5JK9mEBSD3Vj3mbCvemhT466rSuJYeE/hjuBrHfjjcjMdTm/sz7au+++sdbJZJmuBwQLuw68A==} - engines: {node: '>=18'} - cpu: [ppc64] - os: [aix] - '@esbuild/aix-ppc64@0.27.3': resolution: {integrity: sha512-9fJMTNFTWZMh5qwrBItuziu834eOCUcEqymSH7pY+zoMVEZg3gcPuBNxH1EvfVYe9h0x/Ptw8KBzv7qxb7l8dg==} engines: {node: '>=18'} cpu: [ppc64] os: [aix] - '@esbuild/android-arm64@0.27.0': - resolution: {integrity: sha512-CC3vt4+1xZrs97/PKDkl0yN7w8edvU2vZvAFGD16n9F0Cvniy5qvzRXjfO1l94efczkkQE6g1x0i73Qf5uthOQ==} - engines: {node: '>=18'} - cpu: [arm64] - os: [android] - '@esbuild/android-arm64@0.27.3': resolution: {integrity: sha512-YdghPYUmj/FX2SYKJ0OZxf+iaKgMsKHVPF1MAq/P8WirnSpCStzKJFjOjzsW0QQ7oIAiccHdcqjbHmJxRb/dmg==} engines: {node: '>=18'} cpu: [arm64] os: [android] - '@esbuild/android-arm@0.27.0': - resolution: {integrity: sha512-j67aezrPNYWJEOHUNLPj9maeJte7uSMM6gMoxfPC9hOg8N02JuQi/T7ewumf4tNvJadFkvLZMlAq73b9uwdMyQ==} - engines: {node: '>=18'} - cpu: [arm] - os: [android] - '@esbuild/android-arm@0.27.3': resolution: {integrity: sha512-i5D1hPY7GIQmXlXhs2w8AWHhenb00+GxjxRncS2ZM7YNVGNfaMxgzSGuO8o8SJzRc/oZwU2bcScvVERk03QhzA==} engines: {node: '>=18'} cpu: [arm] os: [android] - '@esbuild/android-x64@0.27.0': - resolution: {integrity: sha512-wurMkF1nmQajBO1+0CJmcN17U4BP6GqNSROP8t0X/Jiw2ltYGLHpEksp9MpoBqkrFR3kv2/te6Sha26k3+yZ9Q==} - engines: {node: '>=18'} - cpu: [x64] - os: [android] - '@esbuild/android-x64@0.27.3': resolution: {integrity: sha512-IN/0BNTkHtk8lkOM8JWAYFg4ORxBkZQf9zXiEOfERX/CzxW3Vg1ewAhU7QSWQpVIzTW+b8Xy+lGzdYXV6UZObQ==} engines: {node: '>=18'} cpu: [x64] os: [android] - '@esbuild/darwin-arm64@0.27.0': - resolution: {integrity: sha512-uJOQKYCcHhg07DL7i8MzjvS2LaP7W7Pn/7uA0B5S1EnqAirJtbyw4yC5jQ5qcFjHK9l6o/MX9QisBg12kNkdHg==} - engines: {node: '>=18'} - cpu: [arm64] - os: [darwin] - '@esbuild/darwin-arm64@0.27.3': resolution: {integrity: sha512-Re491k7ByTVRy0t3EKWajdLIr0gz2kKKfzafkth4Q8A5n1xTHrkqZgLLjFEHVD+AXdUGgQMq+Godfq45mGpCKg==} engines: {node: '>=18'} cpu: [arm64] os: [darwin] - '@esbuild/darwin-x64@0.27.0': - resolution: {integrity: sha512-8mG6arH3yB/4ZXiEnXof5MK72dE6zM9cDvUcPtxhUZsDjESl9JipZYW60C3JGreKCEP+p8P/72r69m4AZGJd5g==} - engines: {node: '>=18'} - cpu: [x64] - os: [darwin] - '@esbuild/darwin-x64@0.27.3': resolution: {integrity: sha512-vHk/hA7/1AckjGzRqi6wbo+jaShzRowYip6rt6q7VYEDX4LEy1pZfDpdxCBnGtl+A5zq8iXDcyuxwtv3hNtHFg==} engines: {node: '>=18'} cpu: [x64] os: [darwin] - '@esbuild/freebsd-arm64@0.27.0': - resolution: {integrity: sha512-9FHtyO988CwNMMOE3YIeci+UV+x5Zy8fI2qHNpsEtSF83YPBmE8UWmfYAQg6Ux7Gsmd4FejZqnEUZCMGaNQHQw==} - engines: {node: '>=18'} - cpu: [arm64] - os: [freebsd] - '@esbuild/freebsd-arm64@0.27.3': resolution: {integrity: sha512-ipTYM2fjt3kQAYOvo6vcxJx3nBYAzPjgTCk7QEgZG8AUO3ydUhvelmhrbOheMnGOlaSFUoHXB6un+A7q4ygY9w==} engines: {node: '>=18'} cpu: [arm64] os: [freebsd] - '@esbuild/freebsd-x64@0.27.0': - resolution: {integrity: sha512-zCMeMXI4HS/tXvJz8vWGexpZj2YVtRAihHLk1imZj4efx1BQzN76YFeKqlDr3bUWI26wHwLWPd3rwh6pe4EV7g==} - engines: {node: '>=18'} - cpu: [x64] - os: [freebsd] - '@esbuild/freebsd-x64@0.27.3': resolution: {integrity: sha512-dDk0X87T7mI6U3K9VjWtHOXqwAMJBNN2r7bejDsc+j03SEjtD9HrOl8gVFByeM0aJksoUuUVU9TBaZa2rgj0oA==} engines: {node: '>=18'} cpu: [x64] os: [freebsd] - '@esbuild/linux-arm64@0.27.0': - resolution: {integrity: sha512-AS18v0V+vZiLJyi/4LphvBE+OIX682Pu7ZYNsdUHyUKSoRwdnOsMf6FDekwoAFKej14WAkOef3zAORJgAtXnlQ==} - engines: {node: '>=18'} - cpu: [arm64] - os: [linux] - '@esbuild/linux-arm64@0.27.3': resolution: {integrity: sha512-sZOuFz/xWnZ4KH3YfFrKCf1WyPZHakVzTiqji3WDc0BCl2kBwiJLCXpzLzUBLgmp4veFZdvN5ChW4Eq/8Fc2Fg==} engines: {node: '>=18'} cpu: [arm64] os: [linux] - '@esbuild/linux-arm@0.27.0': - resolution: {integrity: sha512-t76XLQDpxgmq2cNXKTVEB7O7YMb42atj2Re2Haf45HkaUpjM2J0UuJZDuaGbPbamzZ7bawyGFUkodL+zcE+jvQ==} - engines: {node: '>=18'} - cpu: [arm] - os: [linux] - '@esbuild/linux-arm@0.27.3': resolution: {integrity: sha512-s6nPv2QkSupJwLYyfS+gwdirm0ukyTFNl3KTgZEAiJDd+iHZcbTPPcWCcRYH+WlNbwChgH2QkE9NSlNrMT8Gfw==} engines: {node: '>=18'} cpu: [arm] os: [linux] - '@esbuild/linux-ia32@0.27.0': - resolution: {integrity: sha512-Mz1jxqm/kfgKkc/KLHC5qIujMvnnarD9ra1cEcrs7qshTUSksPihGrWHVG5+osAIQ68577Zpww7SGapmzSt4Nw==} - engines: {node: '>=18'} - cpu: [ia32] - os: [linux] - '@esbuild/linux-ia32@0.27.3': resolution: {integrity: sha512-yGlQYjdxtLdh0a3jHjuwOrxQjOZYD/C9PfdbgJJF3TIZWnm/tMd/RcNiLngiu4iwcBAOezdnSLAwQDPqTmtTYg==} engines: {node: '>=18'} cpu: [ia32] os: [linux] - '@esbuild/linux-loong64@0.27.0': - resolution: {integrity: sha512-QbEREjdJeIreIAbdG2hLU1yXm1uu+LTdzoq1KCo4G4pFOLlvIspBm36QrQOar9LFduavoWX2msNFAAAY9j4BDg==} - engines: {node: '>=18'} - cpu: [loong64] - os: [linux] - '@esbuild/linux-loong64@0.27.3': resolution: {integrity: sha512-WO60Sn8ly3gtzhyjATDgieJNet/KqsDlX5nRC5Y3oTFcS1l0KWba+SEa9Ja1GfDqSF1z6hif/SkpQJbL63cgOA==} engines: {node: '>=18'} cpu: [loong64] os: [linux] - '@esbuild/linux-mips64el@0.27.0': - resolution: {integrity: sha512-sJz3zRNe4tO2wxvDpH/HYJilb6+2YJxo/ZNbVdtFiKDufzWq4JmKAiHy9iGoLjAV7r/W32VgaHGkk35cUXlNOg==} - engines: {node: '>=18'} - cpu: [mips64el] - os: [linux] - '@esbuild/linux-mips64el@0.27.3': resolution: {integrity: sha512-APsymYA6sGcZ4pD6k+UxbDjOFSvPWyZhjaiPyl/f79xKxwTnrn5QUnXR5prvetuaSMsb4jgeHewIDCIWljrSxw==} engines: {node: '>=18'} cpu: [mips64el] os: [linux] - '@esbuild/linux-ppc64@0.27.0': - resolution: {integrity: sha512-z9N10FBD0DCS2dmSABDBb5TLAyF1/ydVb+N4pi88T45efQ/w4ohr/F/QYCkxDPnkhkp6AIpIcQKQ8F0ANoA2JA==} - engines: {node: '>=18'} - cpu: [ppc64] - os: [linux] - '@esbuild/linux-ppc64@0.27.3': resolution: {integrity: sha512-eizBnTeBefojtDb9nSh4vvVQ3V9Qf9Df01PfawPcRzJH4gFSgrObw+LveUyDoKU3kxi5+9RJTCWlj4FjYXVPEA==} engines: {node: '>=18'} cpu: [ppc64] os: [linux] - '@esbuild/linux-riscv64@0.27.0': - resolution: {integrity: sha512-pQdyAIZ0BWIC5GyvVFn5awDiO14TkT/19FTmFcPdDec94KJ1uZcmFs21Fo8auMXzD4Tt+diXu1LW1gHus9fhFQ==} - engines: {node: '>=18'} - cpu: [riscv64] - os: [linux] - '@esbuild/linux-riscv64@0.27.3': resolution: {integrity: sha512-3Emwh0r5wmfm3ssTWRQSyVhbOHvqegUDRd0WhmXKX2mkHJe1SFCMJhagUleMq+Uci34wLSipf8Lagt4LlpRFWQ==} engines: {node: '>=18'} cpu: [riscv64] os: [linux] - '@esbuild/linux-s390x@0.27.0': - resolution: {integrity: sha512-hPlRWR4eIDDEci953RI1BLZitgi5uqcsjKMxwYfmi4LcwyWo2IcRP+lThVnKjNtk90pLS8nKdroXYOqW+QQH+w==} - engines: {node: '>=18'} - cpu: [s390x] - os: [linux] - '@esbuild/linux-s390x@0.27.3': resolution: {integrity: sha512-pBHUx9LzXWBc7MFIEEL0yD/ZVtNgLytvx60gES28GcWMqil8ElCYR4kvbV2BDqsHOvVDRrOxGySBM9Fcv744hw==} engines: {node: '>=18'} cpu: [s390x] os: [linux] - '@esbuild/linux-x64@0.27.0': - resolution: {integrity: sha512-1hBWx4OUJE2cab++aVZ7pObD6s+DK4mPGpemtnAORBvb5l/g5xFGk0vc0PjSkrDs0XaXj9yyob3d14XqvnQ4gw==} - engines: {node: '>=18'} - cpu: [x64] - os: [linux] - '@esbuild/linux-x64@0.27.3': resolution: {integrity: sha512-Czi8yzXUWIQYAtL/2y6vogER8pvcsOsk5cpwL4Gk5nJqH5UZiVByIY8Eorm5R13gq+DQKYg0+JyQoytLQas4dA==} engines: {node: '>=18'} cpu: [x64] os: [linux] - '@esbuild/netbsd-arm64@0.27.0': - resolution: {integrity: sha512-6m0sfQfxfQfy1qRuecMkJlf1cIzTOgyaeXaiVaaki8/v+WB+U4hc6ik15ZW6TAllRlg/WuQXxWj1jx6C+dfy3w==} - engines: {node: '>=18'} - cpu: [arm64] - os: [netbsd] - '@esbuild/netbsd-arm64@0.27.3': resolution: {integrity: sha512-sDpk0RgmTCR/5HguIZa9n9u+HVKf40fbEUt+iTzSnCaGvY9kFP0YKBWZtJaraonFnqef5SlJ8/TiPAxzyS+UoA==} engines: {node: '>=18'} cpu: [arm64] os: [netbsd] - '@esbuild/netbsd-x64@0.27.0': - resolution: {integrity: sha512-xbbOdfn06FtcJ9d0ShxxvSn2iUsGd/lgPIO2V3VZIPDbEaIj1/3nBBe1AwuEZKXVXkMmpr6LUAgMkLD/4D2PPA==} - engines: {node: '>=18'} - cpu: [x64] - os: [netbsd] - '@esbuild/netbsd-x64@0.27.3': resolution: {integrity: sha512-P14lFKJl/DdaE00LItAukUdZO5iqNH7+PjoBm+fLQjtxfcfFE20Xf5CrLsmZdq5LFFZzb5JMZ9grUwvtVYzjiA==} engines: {node: '>=18'} cpu: [x64] os: [netbsd] - '@esbuild/openbsd-arm64@0.27.0': - resolution: {integrity: sha512-fWgqR8uNbCQ/GGv0yhzttj6sU/9Z5/Sv/VGU3F5OuXK6J6SlriONKrQ7tNlwBrJZXRYk5jUhuWvF7GYzGguBZQ==} - engines: {node: '>=18'} - cpu: [arm64] - os: [openbsd] - '@esbuild/openbsd-arm64@0.27.3': resolution: {integrity: sha512-AIcMP77AvirGbRl/UZFTq5hjXK+2wC7qFRGoHSDrZ5v5b8DK/GYpXW3CPRL53NkvDqb9D+alBiC/dV0Fb7eJcw==} engines: {node: '>=18'} cpu: [arm64] os: [openbsd] - '@esbuild/openbsd-x64@0.27.0': - resolution: {integrity: sha512-aCwlRdSNMNxkGGqQajMUza6uXzR/U0dIl1QmLjPtRbLOx3Gy3otfFu/VjATy4yQzo9yFDGTxYDo1FfAD9oRD2A==} - engines: {node: '>=18'} - cpu: [x64] - os: [openbsd] - '@esbuild/openbsd-x64@0.27.3': resolution: {integrity: sha512-DnW2sRrBzA+YnE70LKqnM3P+z8vehfJWHXECbwBmH/CU51z6FiqTQTHFenPlHmo3a8UgpLyH3PT+87OViOh1AQ==} engines: {node: '>=18'} cpu: [x64] os: [openbsd] - '@esbuild/openharmony-arm64@0.27.0': - resolution: {integrity: sha512-nyvsBccxNAsNYz2jVFYwEGuRRomqZ149A39SHWk4hV0jWxKM0hjBPm3AmdxcbHiFLbBSwG6SbpIcUbXjgyECfA==} - engines: {node: '>=18'} - cpu: [arm64] - os: [openharmony] - '@esbuild/openharmony-arm64@0.27.3': resolution: {integrity: sha512-NinAEgr/etERPTsZJ7aEZQvvg/A6IsZG/LgZy+81wON2huV7SrK3e63dU0XhyZP4RKGyTm7aOgmQk0bGp0fy2g==} engines: {node: '>=18'} cpu: [arm64] os: [openharmony] - '@esbuild/sunos-x64@0.27.0': - resolution: {integrity: sha512-Q1KY1iJafM+UX6CFEL+F4HRTgygmEW568YMqDA5UV97AuZSm21b7SXIrRJDwXWPzr8MGr75fUZPV67FdtMHlHA==} - engines: {node: '>=18'} - cpu: [x64] - os: [sunos] - '@esbuild/sunos-x64@0.27.3': resolution: {integrity: sha512-PanZ+nEz+eWoBJ8/f8HKxTTD172SKwdXebZ0ndd953gt1HRBbhMsaNqjTyYLGLPdoWHy4zLU7bDVJztF5f3BHA==} engines: {node: '>=18'} cpu: [x64] os: [sunos] - '@esbuild/win32-arm64@0.27.0': - resolution: {integrity: sha512-W1eyGNi6d+8kOmZIwi/EDjrL9nxQIQ0MiGqe/AWc6+IaHloxHSGoeRgDRKHFISThLmsewZ5nHFvGFWdBYlgKPg==} - engines: {node: '>=18'} - cpu: [arm64] - os: [win32] - '@esbuild/win32-arm64@0.27.3': resolution: {integrity: sha512-B2t59lWWYrbRDw/tjiWOuzSsFh1Y/E95ofKz7rIVYSQkUYBjfSgf6oeYPNWHToFRr2zx52JKApIcAS/D5TUBnA==} engines: {node: '>=18'} cpu: [arm64] os: [win32] - '@esbuild/win32-ia32@0.27.0': - resolution: {integrity: sha512-30z1aKL9h22kQhilnYkORFYt+3wp7yZsHWus+wSKAJR8JtdfI76LJ4SBdMsCopTR3z/ORqVu5L1vtnHZWVj4cQ==} - engines: {node: '>=18'} - cpu: [ia32] - os: [win32] - '@esbuild/win32-ia32@0.27.3': resolution: {integrity: sha512-QLKSFeXNS8+tHW7tZpMtjlNb7HKau0QDpwm49u0vUp9y1WOF+PEzkU84y9GqYaAVW8aH8f3GcBck26jh54cX4Q==} engines: {node: '>=18'} cpu: [ia32] os: [win32] - '@esbuild/win32-x64@0.27.0': - resolution: {integrity: sha512-aIitBcjQeyOhMTImhLZmtxfdOcuNRpwlPNmlFKPcHQYPhEssw75Cl1TSXJXpMkzaua9FUetx/4OQKq7eJul5Cg==} - engines: {node: '>=18'} - cpu: [x64] - os: [win32] - '@esbuild/win32-x64@0.27.3': resolution: {integrity: sha512-4uJGhsxuptu3OcpVAzli+/gWusVGwZZHTlS63hh++ehExkVT8SgiEf7/uC/PclrPPkLhZqGgCTjd0VWLo6xMqA==} engines: {node: '>=18'} @@ -8999,11 +8843,6 @@ packages: esast-util-from-js@2.0.1: resolution: {integrity: sha512-8Ja+rNJ0Lt56Pcf3TAmpBZjmx8ZcK5Ts4cAzIOjsjevg9oSXJnl6SUQ2EevU8tv3h6ZLWmoKL5H4fgWvdvfETw==} - esbuild@0.27.0: - resolution: {integrity: sha512-jd0f4NHbD6cALCyGElNpGAOtWxSq46l9X/sWB0Nzd5er4Kz2YTm+Vl0qKFT9KUJvD8+fiO8AvoHhFvEatfVixA==} - engines: {node: '>=18'} - hasBin: true - esbuild@0.27.3: resolution: {integrity: sha512-8VwMnyGCONIs6cWue2IdpHxHnAjzxnw2Zr7MkVxB2vjmQ2ivqGFb4LEG3SMnv0Gb2F/G/2yA8zUaiL1gywDCCg==} engines: {node: '>=18'} @@ -11297,8 +11136,8 @@ packages: peerDependencies: webpack: ^5.0.0 - miniflare@4.20260205.0: - resolution: {integrity: sha512-jG1TknEDeFqcq/z5gsOm1rKeg4cNG7ruWxEuiPxl3pnQumavxo8kFpeQC6XKVpAhh2PI9ODGyIYlgd77sTHl5g==} + miniflare@4.20260405.0: + resolution: {integrity: sha512-tpr4XdWMq7zFdsHH+CS0XS47nQzlRZH0rMJ1vobOZbkrs3cIj7qbD40ON616hDnzHxwqwB2qKHzmmuj6oRisSQ==} engines: {node: '>=18.0.0'} hasBin: true @@ -14301,8 +14140,8 @@ packages: undici-types@7.16.0: resolution: {integrity: sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw==} - undici@7.18.2: - resolution: {integrity: sha512-y+8YjDFzWdQlSE9N5nzKMT3g4a5UBX1HKowfdXh0uvAnTaqqwqB92Jt4UXBAeKekDs5IaDKyJFR4X1gYVCgXcw==} + undici@7.24.4: + resolution: {integrity: sha512-BM/JzwwaRXxrLdElV2Uo6cTLEjhSb3WXboncJamZ15NgUURmvlXvxa6xkwIOILIjPNo9i8ku136ZvWV0Uly8+w==} engines: {node: '>=20.18.1'} unenv@2.0.0-rc.24: @@ -14844,17 +14683,17 @@ packages: wordwrap@1.0.0: resolution: {integrity: sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q==} - workerd@1.20260205.0: - resolution: {integrity: sha512-CcMH5clHwrH8VlY7yWS9C/G/C8g9czIz1yU3akMSP9Z3CkEMFSoC3GGdj5G7Alw/PHEeez1+1IrlYger4pwu+w==} + workerd@1.20260405.1: + resolution: {integrity: sha512-bSaRWCv9iO8/FWpgZRjHLGZLolX5s1AErRSYaTECMMHOZKuCbl2+ehnSyc+ZZ/70y+9owADmN6HoYEWvBlJdYw==} engines: {node: '>=16'} hasBin: true - wrangler@4.63.0: - resolution: {integrity: sha512-+R04jF7Eb8K3KRMSgoXpcIdLb8GC62eoSGusYh1pyrSMm/10E0hbKkd7phMJO4HxXc6R7mOHC5SSoX9eof30Uw==} - engines: {node: '>=20.0.0'} + wrangler@4.81.0: + resolution: {integrity: sha512-9fLPDuDcb8Nu6iXrl5E3HGYt3TVhQr/UvqtTvWr9Nl1X7PlQrmWMwQCfSioqN8VHYyQCyESV5jQsoKg8Sx+sEA==} + engines: {node: '>=20.3.0'} hasBin: true peerDependencies: - '@cloudflare/workers-types': ^4.20260205.0 + '@cloudflare/workers-types': ^4.20260405.1 peerDependenciesMeta: '@cloudflare/workers-types': optional: true @@ -16494,25 +16333,25 @@ snapshots: '@cloudflare/kv-asset-handler@0.4.2': {} - '@cloudflare/unenv-preset@2.12.0(unenv@2.0.0-rc.24)(workerd@1.20260205.0)': + '@cloudflare/unenv-preset@2.16.0(unenv@2.0.0-rc.24)(workerd@1.20260405.1)': dependencies: unenv: 2.0.0-rc.24 optionalDependencies: - workerd: 1.20260205.0 + workerd: 1.20260405.1 - '@cloudflare/workerd-darwin-64@1.20260205.0': + '@cloudflare/workerd-darwin-64@1.20260405.1': optional: true - '@cloudflare/workerd-darwin-arm64@1.20260205.0': + '@cloudflare/workerd-darwin-arm64@1.20260405.1': optional: true - '@cloudflare/workerd-linux-64@1.20260205.0': + '@cloudflare/workerd-linux-64@1.20260405.1': optional: true - '@cloudflare/workerd-linux-arm64@1.20260205.0': + '@cloudflare/workerd-linux-arm64@1.20260405.1': optional: true - '@cloudflare/workerd-windows-64@1.20260205.0': + '@cloudflare/workerd-windows-64@1.20260405.1': optional: true '@colors/colors@1.5.0': @@ -17836,159 +17675,81 @@ snapshots: '@emotion/weak-memoize@0.2.5': {} - '@esbuild/aix-ppc64@0.27.0': - optional: true - '@esbuild/aix-ppc64@0.27.3': optional: true - '@esbuild/android-arm64@0.27.0': - optional: true - '@esbuild/android-arm64@0.27.3': optional: true - '@esbuild/android-arm@0.27.0': - optional: true - '@esbuild/android-arm@0.27.3': optional: true - '@esbuild/android-x64@0.27.0': - optional: true - '@esbuild/android-x64@0.27.3': optional: true - '@esbuild/darwin-arm64@0.27.0': - optional: true - '@esbuild/darwin-arm64@0.27.3': optional: true - '@esbuild/darwin-x64@0.27.0': - optional: true - '@esbuild/darwin-x64@0.27.3': optional: true - '@esbuild/freebsd-arm64@0.27.0': - optional: true - '@esbuild/freebsd-arm64@0.27.3': optional: true - '@esbuild/freebsd-x64@0.27.0': - optional: true - '@esbuild/freebsd-x64@0.27.3': optional: true - '@esbuild/linux-arm64@0.27.0': - optional: true - '@esbuild/linux-arm64@0.27.3': optional: true - '@esbuild/linux-arm@0.27.0': - optional: true - '@esbuild/linux-arm@0.27.3': optional: true - '@esbuild/linux-ia32@0.27.0': - optional: true - '@esbuild/linux-ia32@0.27.3': optional: true - '@esbuild/linux-loong64@0.27.0': - optional: true - '@esbuild/linux-loong64@0.27.3': optional: true - '@esbuild/linux-mips64el@0.27.0': - optional: true - '@esbuild/linux-mips64el@0.27.3': optional: true - '@esbuild/linux-ppc64@0.27.0': - optional: true - '@esbuild/linux-ppc64@0.27.3': optional: true - '@esbuild/linux-riscv64@0.27.0': - optional: true - '@esbuild/linux-riscv64@0.27.3': optional: true - '@esbuild/linux-s390x@0.27.0': - optional: true - '@esbuild/linux-s390x@0.27.3': optional: true - '@esbuild/linux-x64@0.27.0': - optional: true - '@esbuild/linux-x64@0.27.3': optional: true - '@esbuild/netbsd-arm64@0.27.0': - optional: true - '@esbuild/netbsd-arm64@0.27.3': optional: true - '@esbuild/netbsd-x64@0.27.0': - optional: true - '@esbuild/netbsd-x64@0.27.3': optional: true - '@esbuild/openbsd-arm64@0.27.0': - optional: true - '@esbuild/openbsd-arm64@0.27.3': optional: true - '@esbuild/openbsd-x64@0.27.0': - optional: true - '@esbuild/openbsd-x64@0.27.3': optional: true - '@esbuild/openharmony-arm64@0.27.0': - optional: true - '@esbuild/openharmony-arm64@0.27.3': optional: true - '@esbuild/sunos-x64@0.27.0': - optional: true - '@esbuild/sunos-x64@0.27.3': optional: true - '@esbuild/win32-arm64@0.27.0': - optional: true - '@esbuild/win32-arm64@0.27.3': optional: true - '@esbuild/win32-ia32@0.27.0': - optional: true - '@esbuild/win32-ia32@0.27.3': optional: true - '@esbuild/win32-x64@0.27.0': - optional: true - '@esbuild/win32-x64@0.27.3': optional: true @@ -24443,35 +24204,6 @@ snapshots: esast-util-from-estree: 2.0.0 vfile-message: 4.0.3 - esbuild@0.27.0: - optionalDependencies: - '@esbuild/aix-ppc64': 0.27.0 - '@esbuild/android-arm': 0.27.0 - '@esbuild/android-arm64': 0.27.0 - '@esbuild/android-x64': 0.27.0 - '@esbuild/darwin-arm64': 0.27.0 - '@esbuild/darwin-x64': 0.27.0 - '@esbuild/freebsd-arm64': 0.27.0 - '@esbuild/freebsd-x64': 0.27.0 - '@esbuild/linux-arm': 0.27.0 - '@esbuild/linux-arm64': 0.27.0 - '@esbuild/linux-ia32': 0.27.0 - '@esbuild/linux-loong64': 0.27.0 - '@esbuild/linux-mips64el': 0.27.0 - '@esbuild/linux-ppc64': 0.27.0 - '@esbuild/linux-riscv64': 0.27.0 - '@esbuild/linux-s390x': 0.27.0 - '@esbuild/linux-x64': 0.27.0 - '@esbuild/netbsd-arm64': 0.27.0 - '@esbuild/netbsd-x64': 0.27.0 - '@esbuild/openbsd-arm64': 0.27.0 - '@esbuild/openbsd-x64': 0.27.0 - '@esbuild/openharmony-arm64': 0.27.0 - '@esbuild/sunos-x64': 0.27.0 - '@esbuild/win32-arm64': 0.27.0 - '@esbuild/win32-ia32': 0.27.0 - '@esbuild/win32-x64': 0.27.0 - esbuild@0.27.3: optionalDependencies: '@esbuild/aix-ppc64': 0.27.3 @@ -27482,12 +27214,12 @@ snapshots: tapable: 2.3.0 webpack: 5.98.0(esbuild@0.27.3) - miniflare@4.20260205.0: + miniflare@4.20260405.0: dependencies: '@cspotcode/source-map-support': 0.8.1 sharp: 0.34.5 - undici: 7.18.2 - workerd: 1.20260205.0 + undici: 7.24.4 + workerd: 1.20260405.1 ws: 8.18.0 youch: 4.1.0-beta.10 transitivePeerDependencies: @@ -31155,7 +30887,7 @@ snapshots: undici-types@7.16.0: {} - undici@7.18.2: {} + undici@7.24.4: {} unenv@2.0.0-rc.24: dependencies: @@ -31792,24 +31524,24 @@ snapshots: wordwrap@1.0.0: {} - workerd@1.20260205.0: + workerd@1.20260405.1: optionalDependencies: - '@cloudflare/workerd-darwin-64': 1.20260205.0 - '@cloudflare/workerd-darwin-arm64': 1.20260205.0 - '@cloudflare/workerd-linux-64': 1.20260205.0 - '@cloudflare/workerd-linux-arm64': 1.20260205.0 - '@cloudflare/workerd-windows-64': 1.20260205.0 + '@cloudflare/workerd-darwin-64': 1.20260405.1 + '@cloudflare/workerd-darwin-arm64': 1.20260405.1 + '@cloudflare/workerd-linux-64': 1.20260405.1 + '@cloudflare/workerd-linux-arm64': 1.20260405.1 + '@cloudflare/workerd-windows-64': 1.20260405.1 - wrangler@4.63.0: + wrangler@4.81.0: dependencies: '@cloudflare/kv-asset-handler': 0.4.2 - '@cloudflare/unenv-preset': 2.12.0(unenv@2.0.0-rc.24)(workerd@1.20260205.0) + '@cloudflare/unenv-preset': 2.16.0(unenv@2.0.0-rc.24)(workerd@1.20260405.1) blake3-wasm: 2.1.5 - esbuild: 0.27.0 - miniflare: 4.20260205.0 + esbuild: 0.27.3 + miniflare: 4.20260405.0 path-to-regexp: 6.3.0 unenv: 2.0.0-rc.24 - workerd: 1.20260205.0 + workerd: 1.20260405.1 optionalDependencies: fsevents: 2.3.3 transitivePeerDependencies: diff --git a/services/grida-canvas-document-worker-cf/package.json b/services/grida-canvas-document-worker-cf/package.json index 44e6f8aa1..6bf62156f 100644 --- a/services/grida-canvas-document-worker-cf/package.json +++ b/services/grida-canvas-document-worker-cf/package.json @@ -4,10 +4,10 @@ "private": true, "scripts": { "cf-typegen": "wrangler types", - "deploy": "wrangler deploy", "dev": "wrangler dev", "start": "wrangler dev", - "typecheck": "tsc --noEmit" + "typecheck": "tsc --noEmit", + "wrangler:deploy": "wrangler deploy" }, "dependencies": { "@grida/canvas-sync": "workspace:*", @@ -15,6 +15,6 @@ }, "devDependencies": { "typescript": "^5", - "wrangler": "^4.40.1" + "wrangler": "^4.81.0" } } diff --git a/services/grida-canvas-document-worker-cf/worker-configuration.d.ts b/services/grida-canvas-document-worker-cf/worker-configuration.d.ts index ae8497ba2..3f52569b0 100644 --- a/services/grida-canvas-document-worker-cf/worker-configuration.d.ts +++ b/services/grida-canvas-document-worker-cf/worker-configuration.d.ts @@ -1,6 +1,6 @@ /* eslint-disable */ // Generated by Wrangler by running `wrangler types` (hash: aa7fde5d227457130fc2a7608073a12b) -// Runtime types generated with workerd@1.20250924.0 2025-09-24 +// Runtime types generated with workerd@1.20260405.1 2025-09-24 declare namespace Cloudflare { interface GlobalProps { mainModule: typeof import("./src/index"); @@ -32,17 +32,26 @@ and limitations under the License. // noinspection JSUnusedGlobalSymbols declare var onmessage: never; /** - * An abnormal event (called an exception) which occurs as a result of calling a method or accessing a property of a web API. + * The **`DOMException`** interface represents an abnormal event (called an **exception**) that occurs as a result of calling a method or accessing a property of a web API. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/DOMException) */ declare class DOMException extends Error { constructor(message?: string, name?: string); - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/DOMException/message) */ + /** + * The **`message`** read-only property of the a message or description associated with the given error name. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/DOMException/message) + */ readonly message: string; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/DOMException/name) */ + /** + * The **`name`** read-only property of the one of the strings associated with an error name. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/DOMException/name) + */ readonly name: string; /** + * The **`code`** read-only property of the DOMException interface returns one of the legacy error code constants, or `0` if none match. * @deprecated * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/DOMException/code) @@ -86,45 +95,121 @@ type WorkerGlobalScopeEventMap = { declare abstract class WorkerGlobalScope extends EventTarget { EventTarget: typeof EventTarget; } -/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/console) */ +/* The **`console`** object provides access to the debugging console (e.g., the Web console in Firefox). * + * The **`console`** object provides access to the debugging console (e.g., the Web console in Firefox). + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console) + */ interface Console { "assert"(condition?: boolean, ...data: any[]): void; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/clear_static) */ + /** + * The **`console.clear()`** static method clears the console if possible. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/clear_static) + */ clear(): void; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/count_static) */ + /** + * The **`console.count()`** static method logs the number of times that this particular call to `count()` has been called. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/count_static) + */ count(label?: string): void; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/countReset_static) */ + /** + * The **`console.countReset()`** static method resets counter used with console/count_static. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/countReset_static) + */ countReset(label?: string): void; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/debug_static) */ + /** + * The **`console.debug()`** static method outputs a message to the console at the 'debug' log level. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/debug_static) + */ debug(...data: any[]): void; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/dir_static) */ + /** + * The **`console.dir()`** static method displays a list of the properties of the specified JavaScript object. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/dir_static) + */ dir(item?: any, options?: any): void; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/dirxml_static) */ + /** + * The **`console.dirxml()`** static method displays an interactive tree of the descendant elements of the specified XML/HTML element. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/dirxml_static) + */ dirxml(...data: any[]): void; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/error_static) */ + /** + * The **`console.error()`** static method outputs a message to the console at the 'error' log level. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/error_static) + */ error(...data: any[]): void; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/group_static) */ + /** + * The **`console.group()`** static method creates a new inline group in the Web console log, causing any subsequent console messages to be indented by an additional level, until console/groupEnd_static is called. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/group_static) + */ group(...data: any[]): void; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/groupCollapsed_static) */ + /** + * The **`console.groupCollapsed()`** static method creates a new inline group in the console. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/groupCollapsed_static) + */ groupCollapsed(...data: any[]): void; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/groupEnd_static) */ + /** + * The **`console.groupEnd()`** static method exits the current inline group in the console. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/groupEnd_static) + */ groupEnd(): void; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/info_static) */ + /** + * The **`console.info()`** static method outputs a message to the console at the 'info' log level. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/info_static) + */ info(...data: any[]): void; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/log_static) */ + /** + * The **`console.log()`** static method outputs a message to the console. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/log_static) + */ log(...data: any[]): void; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/table_static) */ + /** + * The **`console.table()`** static method displays tabular data as a table. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/table_static) + */ table(tabularData?: any, properties?: string[]): void; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/time_static) */ + /** + * The **`console.time()`** static method starts a timer you can use to track how long an operation takes. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/time_static) + */ time(label?: string): void; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/timeEnd_static) */ + /** + * The **`console.timeEnd()`** static method stops a timer that was previously started by calling console/time_static. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/timeEnd_static) + */ timeEnd(label?: string): void; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/timeLog_static) */ + /** + * The **`console.timeLog()`** static method logs the current value of a timer that was previously started by calling console/time_static. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/timeLog_static) + */ timeLog(label?: string, ...data: any[]): void; timeStamp(label?: string): void; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/trace_static) */ + /** + * The **`console.trace()`** static method outputs a stack trace to the console. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/trace_static) + */ trace(...data: any[]): void; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/warn_static) */ + /** + * The **`console.warn()`** static method outputs a warning message to the console at the 'warning' log level. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/warn_static) + */ warn(...data: any[]): void; } declare const console: Console; @@ -198,7 +283,7 @@ declare namespace WebAssembly { function validate(bytes: BufferSource): boolean; } /** - * This ServiceWorker API interface represents the global execution context of a service worker. + * The **`ServiceWorkerGlobalScope`** interface of the Service Worker API represents the global execution context of a service worker. * Available only in secure contexts. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ServiceWorkerGlobalScope) @@ -287,7 +372,7 @@ interface ServiceWorkerGlobalScope extends WorkerGlobalScope { declare function addEventListener(type: Type, handler: EventListenerOrEventListenerObject, options?: EventTargetAddEventListenerOptions | boolean): void; declare function removeEventListener(type: Type, handler: EventListenerOrEventListenerObject, options?: EventTargetEventListenerOptions | boolean): void; /** - * Dispatches a synthetic event event to target and returns true if either event's cancelable attribute value is false or its preventDefault() method was not invoked, and false otherwise. + * The **`dispatchEvent()`** method of the EventTarget sends an Event to the object, (synchronously) invoking the affected event listeners in the appropriate order. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventTarget/dispatchEvent) */ @@ -350,35 +435,30 @@ interface ExecutionContext { passThroughOnException(): void; readonly props: Props; } -type ExportedHandlerFetchHandler = (request: Request>, env: Env, ctx: ExecutionContext) => Response | Promise; -type ExportedHandlerTailHandler = (events: TraceItem[], env: Env, ctx: ExecutionContext) => void | Promise; -type ExportedHandlerTraceHandler = (traces: TraceItem[], env: Env, ctx: ExecutionContext) => void | Promise; -type ExportedHandlerTailStreamHandler = (event: TailStream.TailEvent, env: Env, ctx: ExecutionContext) => TailStream.TailEventHandlerType | Promise; -type ExportedHandlerScheduledHandler = (controller: ScheduledController, env: Env, ctx: ExecutionContext) => void | Promise; -type ExportedHandlerQueueHandler = (batch: MessageBatch, env: Env, ctx: ExecutionContext) => void | Promise; -type ExportedHandlerTestHandler = (controller: TestController, env: Env, ctx: ExecutionContext) => void | Promise; -interface ExportedHandler { - fetch?: ExportedHandlerFetchHandler; - tail?: ExportedHandlerTailHandler; - trace?: ExportedHandlerTraceHandler; - tailStream?: ExportedHandlerTailStreamHandler; - scheduled?: ExportedHandlerScheduledHandler; - test?: ExportedHandlerTestHandler; - email?: EmailExportedHandler; - queue?: ExportedHandlerQueueHandler; +type ExportedHandlerFetchHandler = (request: Request>, env: Env, ctx: ExecutionContext) => Response | Promise; +type ExportedHandlerConnectHandler = (socket: Socket, env: Env, ctx: ExecutionContext) => void | Promise; +type ExportedHandlerTailHandler = (events: TraceItem[], env: Env, ctx: ExecutionContext) => void | Promise; +type ExportedHandlerTraceHandler = (traces: TraceItem[], env: Env, ctx: ExecutionContext) => void | Promise; +type ExportedHandlerTailStreamHandler = (event: TailStream.TailEvent, env: Env, ctx: ExecutionContext) => TailStream.TailEventHandlerType | Promise; +type ExportedHandlerScheduledHandler = (controller: ScheduledController, env: Env, ctx: ExecutionContext) => void | Promise; +type ExportedHandlerQueueHandler = (batch: MessageBatch, env: Env, ctx: ExecutionContext) => void | Promise; +type ExportedHandlerTestHandler = (controller: TestController, env: Env, ctx: ExecutionContext) => void | Promise; +interface ExportedHandler { + fetch?: ExportedHandlerFetchHandler; + connect?: ExportedHandlerConnectHandler; + tail?: ExportedHandlerTailHandler; + trace?: ExportedHandlerTraceHandler; + tailStream?: ExportedHandlerTailStreamHandler; + scheduled?: ExportedHandlerScheduledHandler; + test?: ExportedHandlerTestHandler; + email?: EmailExportedHandler; + queue?: ExportedHandlerQueueHandler; } interface StructuredSerializeOptions { transfer?: any[]; } -/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PromiseRejectionEvent) */ -declare abstract class PromiseRejectionEvent extends Event { - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PromiseRejectionEvent/promise) */ - readonly promise: Promise; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/PromiseRejectionEvent/reason) */ - readonly reason: any; -} declare abstract class Navigator { - sendBeacon(url: string, body?: (ReadableStream | string | (ArrayBuffer | ArrayBufferView) | Blob | FormData | URLSearchParams | URLSearchParams)): boolean; + sendBeacon(url: string, body?: BodyInit): boolean; readonly userAgent: string; readonly hardwareConcurrency: number; readonly language: string; @@ -387,18 +467,20 @@ declare abstract class Navigator { interface AlarmInvocationInfo { readonly isRetry: boolean; readonly retryCount: number; + readonly scheduledTime: number; } interface Cloudflare { readonly compatibilityFlags: Record; } interface DurableObject { fetch(request: Request): Response | Promise; + connect?(socket: Socket): void | Promise; alarm?(alarmInfo?: AlarmInvocationInfo): void | Promise; webSocketMessage?(ws: WebSocket, message: string | ArrayBuffer): void | Promise; webSocketClose?(ws: WebSocket, code: number, reason: string, wasClean: boolean): void | Promise; webSocketError?(ws: WebSocket, error: unknown): void | Promise; } -type DurableObjectStub = Fetcher & { +type DurableObjectStub = Fetcher & { readonly id: DurableObjectId; readonly name?: string; }; @@ -406,6 +488,7 @@ interface DurableObjectId { toString(): string; equals(other: DurableObjectId): boolean; readonly name?: string; + readonly jurisdiction?: string; } declare abstract class DurableObjectNamespace { newUniqueId(options?: DurableObjectNamespaceNewUniqueIdOptions): DurableObjectId; @@ -420,8 +503,10 @@ interface DurableObjectNamespaceNewUniqueIdOptions { jurisdiction?: DurableObjectJurisdiction; } type DurableObjectLocationHint = "wnam" | "enam" | "sam" | "weur" | "eeur" | "apac" | "oc" | "afr" | "me"; +type DurableObjectRoutingMode = "primary-only"; interface DurableObjectNamespaceGetDurableObjectOptions { locationHint?: DurableObjectLocationHint; + routingMode?: DurableObjectRoutingMode; } interface DurableObjectClass<_T extends Rpc.DurableObjectBranded | undefined = undefined> { } @@ -516,116 +601,120 @@ interface AnalyticsEngineDataPoint { blobs?: ((ArrayBuffer | string) | null)[]; } /** - * An event which takes place in the DOM. + * The **`Event`** interface represents an event which takes place on an `EventTarget`. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event) */ declare class Event { constructor(type: string, init?: EventInit); /** - * Returns the type of event, e.g. "click", "hashchange", or "submit". + * The **`type`** read-only property of the Event interface returns a string containing the event's type. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/type) */ get type(): string; /** - * Returns the event's phase, which is one of NONE, CAPTURING_PHASE, AT_TARGET, and BUBBLING_PHASE. + * The **`eventPhase`** read-only property of the being evaluated. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/eventPhase) */ get eventPhase(): number; /** - * Returns true or false depending on how event was initialized. True if event invokes listeners past a ShadowRoot node that is the root of its target, and false otherwise. + * The read-only **`composed`** property of the or not the event will propagate across the shadow DOM boundary into the standard DOM. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/composed) */ get composed(): boolean; /** - * Returns true or false depending on how event was initialized. True if event goes through its target's ancestors in reverse tree order, and false otherwise. + * The **`bubbles`** read-only property of the Event interface indicates whether the event bubbles up through the DOM tree or not. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/bubbles) */ get bubbles(): boolean; /** - * Returns true or false depending on how event was initialized. Its return value does not always carry meaning, but true can indicate that part of the operation during which event was dispatched, can be canceled by invoking the preventDefault() method. + * The **`cancelable`** read-only property of the Event interface indicates whether the event can be canceled, and therefore prevented as if the event never happened. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/cancelable) */ get cancelable(): boolean; /** - * Returns true if preventDefault() was invoked successfully to indicate cancelation, and false otherwise. + * The **`defaultPrevented`** read-only property of the Event interface returns a boolean value indicating whether or not the call to Event.preventDefault() canceled the event. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/defaultPrevented) */ get defaultPrevented(): boolean; /** + * The Event property **`returnValue`** indicates whether the default action for this event has been prevented or not. * @deprecated * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/returnValue) */ get returnValue(): boolean; /** - * Returns the object whose event listener's callback is currently being invoked. + * The **`currentTarget`** read-only property of the Event interface identifies the element to which the event handler has been attached. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/currentTarget) */ get currentTarget(): EventTarget | undefined; /** - * Returns the object to which event is dispatched (its target). + * The read-only **`target`** property of the dispatched. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/target) */ get target(): EventTarget | undefined; /** + * The deprecated **`Event.srcElement`** is an alias for the Event.target property. * @deprecated * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/srcElement) */ get srcElement(): EventTarget | undefined; /** - * Returns the event's timestamp as the number of milliseconds measured relative to the time origin. + * The **`timeStamp`** read-only property of the Event interface returns the time (in milliseconds) at which the event was created. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/timeStamp) */ get timeStamp(): number; /** - * Returns true if event was dispatched by the user agent, and false otherwise. + * The **`isTrusted`** read-only property of the when the event was generated by the user agent (including via user actions and programmatic methods such as HTMLElement.focus()), and `false` when the event was dispatched via The only exception is the `click` event, which initializes the `isTrusted` property to `false` in user agents. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/isTrusted) */ get isTrusted(): boolean; /** + * The **`cancelBubble`** property of the Event interface is deprecated. * @deprecated * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/cancelBubble) */ get cancelBubble(): boolean; /** + * The **`cancelBubble`** property of the Event interface is deprecated. * @deprecated * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/cancelBubble) */ set cancelBubble(value: boolean); /** - * Invoking this method prevents event from reaching any registered event listeners after the current one finishes running and, when dispatched in a tree, also prevents event from reaching any other objects. + * The **`stopImmediatePropagation()`** method of the If several listeners are attached to the same element for the same event type, they are called in the order in which they were added. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/stopImmediatePropagation) */ stopImmediatePropagation(): void; /** - * If invoked when the cancelable attribute value is true, and while executing a listener for the event with passive set to false, signals to the operation that caused event to be dispatched that it needs to be canceled. + * The **`preventDefault()`** method of the Event interface tells the user agent that if the event does not get explicitly handled, its default action should not be taken as it normally would be. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/preventDefault) */ preventDefault(): void; /** - * When dispatched in a tree, invoking this method prevents event from reaching any objects other than the current object. + * The **`stopPropagation()`** method of the Event interface prevents further propagation of the current event in the capturing and bubbling phases. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/stopPropagation) */ stopPropagation(): void; /** - * Returns the invocation target objects of event's path (objects on which listeners will be invoked), except for any nodes in shadow trees of which the shadow root's mode is "closed" that are not reachable from event's currentTarget. + * The **`composedPath()`** method of the Event interface returns the event's path which is an array of the objects on which listeners will be invoked. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/composedPath) */ @@ -646,38 +735,26 @@ interface EventListenerObject { } type EventListenerOrEventListenerObject = EventListener | EventListenerObject; /** - * EventTarget is a DOM interface implemented by objects that can receive events and may have listeners for them. + * The **`EventTarget`** interface is implemented by objects that can receive events and may have listeners for them. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventTarget) */ declare class EventTarget = Record> { constructor(); /** - * Appends an event listener for events whose type attribute value is type. The callback argument sets the callback that will be invoked when the event is dispatched. - * - * The options argument sets listener-specific options. For compatibility this can be a boolean, in which case the method behaves exactly as if the value was specified as options's capture. - * - * When set to true, options's capture prevents callback from being invoked when the event's eventPhase attribute value is BUBBLING_PHASE. When false (or not present), callback will not be invoked when event's eventPhase attribute value is CAPTURING_PHASE. Either way, callback will be invoked if event's eventPhase attribute value is AT_TARGET. - * - * When set to true, options's passive indicates that the callback will not cancel the event by invoking preventDefault(). This is used to enable performance optimizations described in § 2.8 Observing event listeners. - * - * When set to true, options's once indicates that the callback will only be invoked once after which the event listener will be removed. - * - * If an AbortSignal is passed for options's signal, then the event listener will be removed when signal is aborted. - * - * The event listener is appended to target's event listener list and is not appended if it has the same type, callback, and capture. + * The **`addEventListener()`** method of the EventTarget interface sets up a function that will be called whenever the specified event is delivered to the target. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventTarget/addEventListener) */ addEventListener(type: Type, handler: EventListenerOrEventListenerObject, options?: EventTargetAddEventListenerOptions | boolean): void; /** - * Removes the event listener in target's event listener list with the same type, callback, and options. + * The **`removeEventListener()`** method of the EventTarget interface removes an event listener previously registered with EventTarget.addEventListener() from the target. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventTarget/removeEventListener) */ removeEventListener(type: Type, handler: EventListenerOrEventListenerObject, options?: EventTargetEventListenerOptions | boolean): void; /** - * Dispatches a synthetic event event to target and returns true if either event's cancelable attribute value is false or its preventDefault() method was not invoked, and false otherwise. + * The **`dispatchEvent()`** method of the EventTarget sends an Event to the object, (synchronously) invoking the affected event listeners in the appropriate order. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventTarget/dispatchEvent) */ @@ -696,50 +773,70 @@ interface EventTargetHandlerObject { handleEvent: (event: Event) => any | undefined; } /** - * A controller object that allows you to abort one or more DOM requests as and when desired. + * The **`AbortController`** interface represents a controller object that allows you to abort one or more Web requests as and when desired. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortController) */ declare class AbortController { constructor(); /** - * Returns the AbortSignal object associated with this object. + * The **`signal`** read-only property of the AbortController interface returns an AbortSignal object instance, which can be used to communicate with/abort an asynchronous operation as desired. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortController/signal) */ get signal(): AbortSignal; /** - * Invoking this method will set this object's AbortSignal's aborted flag and signal to any observers that the associated activity is to be aborted. + * The **`abort()`** method of the AbortController interface aborts an asynchronous operation before it has completed. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortController/abort) */ abort(reason?: any): void; } /** - * A signal object that allows you to communicate with a DOM request (such as a Fetch) and abort it if required via an AbortController object. + * The **`AbortSignal`** interface represents a signal object that allows you to communicate with an asynchronous operation (such as a fetch request) and abort it if required via an AbortController object. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortSignal) */ declare abstract class AbortSignal extends EventTarget { - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortSignal/abort_static) */ + /** + * The **`AbortSignal.abort()`** static method returns an AbortSignal that is already set as aborted (and which does not trigger an AbortSignal/abort_event event). + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortSignal/abort_static) + */ static abort(reason?: any): AbortSignal; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortSignal/timeout_static) */ + /** + * The **`AbortSignal.timeout()`** static method returns an AbortSignal that will automatically abort after a specified time. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortSignal/timeout_static) + */ static timeout(delay: number): AbortSignal; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortSignal/any_static) */ + /** + * The **`AbortSignal.any()`** static method takes an iterable of abort signals and returns an AbortSignal. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortSignal/any_static) + */ static any(signals: AbortSignal[]): AbortSignal; /** - * Returns true if this AbortSignal's AbortController has signaled to abort, and false otherwise. + * The **`aborted`** read-only property returns a value that indicates whether the asynchronous operations the signal is communicating with are aborted (`true`) or not (`false`). * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortSignal/aborted) */ get aborted(): boolean; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortSignal/reason) */ + /** + * The **`reason`** read-only property returns a JavaScript value that indicates the abort reason. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortSignal/reason) + */ get reason(): any; /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortSignal/abort_event) */ get onabort(): any | null; /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortSignal/abort_event) */ set onabort(value: any | null); - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortSignal/throwIfAborted) */ + /** + * The **`throwIfAborted()`** method throws the signal's abort AbortSignal.reason if the signal has been aborted; otherwise it does nothing. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortSignal/throwIfAborted) + */ throwIfAborted(): void; } interface Scheduler { @@ -749,19 +846,27 @@ interface SchedulerWaitOptions { signal?: AbortSignal; } /** - * Extends the lifetime of the install and activate events dispatched on the global scope as part of the service worker lifecycle. This ensures that any functional events (like FetchEvent) are not dispatched until it upgrades database schemas and deletes the outdated cache entries. + * The **`ExtendableEvent`** interface extends the lifetime of the `install` and `activate` events dispatched on the global scope as part of the service worker lifecycle. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ExtendableEvent) */ declare abstract class ExtendableEvent extends Event { - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ExtendableEvent/waitUntil) */ + /** + * The **`ExtendableEvent.waitUntil()`** method tells the event dispatcher that work is ongoing. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ExtendableEvent/waitUntil) + */ waitUntil(promise: Promise): void; } -/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/CustomEvent) */ +/** + * The **`CustomEvent`** interface represents events initialized by an application for any purpose. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CustomEvent) + */ declare class CustomEvent extends Event { constructor(type: string, init?: CustomEventCustomEventInit); /** - * Returns any custom data event was created with. Typically used for synthetic events. + * The read-only **`detail`** property of the CustomEvent interface returns any data passed when initializing the event. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CustomEvent/detail) */ @@ -774,40 +879,76 @@ interface CustomEventCustomEventInit { detail?: any; } /** - * A file-like object of immutable, raw data. Blobs represent data that isn't necessarily in a JavaScript-native format. The File interface is based on Blob, inheriting blob functionality and expanding it to support files on the user's system. + * The **`Blob`** interface represents a blob, which is a file-like object of immutable, raw data; they can be read as text or binary data, or converted into a ReadableStream so its methods can be used for processing the data. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob) */ declare class Blob { constructor(type?: ((ArrayBuffer | ArrayBufferView) | string | Blob)[], options?: BlobOptions); - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/size) */ + /** + * The **`size`** read-only property of the Blob interface returns the size of the Blob or File in bytes. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/size) + */ get size(): number; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/type) */ + /** + * The **`type`** read-only property of the Blob interface returns the MIME type of the file. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/type) + */ get type(): string; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/slice) */ + /** + * The **`slice()`** method of the Blob interface creates and returns a new `Blob` object which contains data from a subset of the blob on which it's called. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/slice) + */ slice(start?: number, end?: number, type?: string): Blob; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/arrayBuffer) */ + /** + * The **`arrayBuffer()`** method of the Blob interface returns a Promise that resolves with the contents of the blob as binary data contained in an ArrayBuffer. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/arrayBuffer) + */ arrayBuffer(): Promise; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/bytes) */ + /** + * The **`bytes()`** method of the Blob interface returns a Promise that resolves with a Uint8Array containing the contents of the blob as an array of bytes. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/bytes) + */ bytes(): Promise; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/text) */ + /** + * The **`text()`** method of the string containing the contents of the blob, interpreted as UTF-8. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/text) + */ text(): Promise; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/stream) */ + /** + * The **`stream()`** method of the Blob interface returns a ReadableStream which upon reading returns the data contained within the `Blob`. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/stream) + */ stream(): ReadableStream; } interface BlobOptions { type?: string; } /** - * Provides information about files and allows JavaScript in a web page to access their content. + * The **`File`** interface provides information about files and allows JavaScript in a web page to access their content. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/File) */ declare class File extends Blob { constructor(bits: ((ArrayBuffer | ArrayBufferView) | string | Blob)[] | undefined, name: string, options?: FileOptions); - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/File/name) */ + /** + * The **`name`** read-only property of the File interface returns the name of the file represented by a File object. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/File/name) + */ get name(): string; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/File/lastModified) */ + /** + * The **`lastModified`** read-only property of the File interface provides the last modified date of the file as the number of milliseconds since the Unix epoch (January 1, 1970 at midnight). + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/File/lastModified) + */ get lastModified(): number; } interface FileOptions { @@ -820,7 +961,11 @@ interface FileOptions { * [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/cache/) */ declare abstract class CacheStorage { - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/CacheStorage/open) */ + /** + * The **`open()`** method of the the Cache object matching the `cacheName`. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CacheStorage/open) + */ open(cacheName: string): Promise; readonly default: Cache; } @@ -850,14 +995,20 @@ interface CacheQueryOptions { */ declare abstract class Crypto { /** + * The **`Crypto.subtle`** read-only property returns a cryptographic operations. * Available only in secure contexts. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Crypto/subtle) */ get subtle(): SubtleCrypto; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Crypto/getRandomValues) */ + /** + * The **`Crypto.getRandomValues()`** method lets you get cryptographically strong random values. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Crypto/getRandomValues) + */ getRandomValues(buffer: T): T; /** + * The **`randomUUID()`** method of the Crypto interface is used to generate a v4 UUID using a cryptographically secure random number generator. * Available only in secure contexts. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Crypto/randomUUID) @@ -866,52 +1017,116 @@ declare abstract class Crypto { DigestStream: typeof DigestStream; } /** - * This Web Crypto API interface provides a number of low-level cryptographic functions. It is accessed via the Crypto.subtle properties available in a window context (via Window.crypto). + * The **`SubtleCrypto`** interface of the Web Crypto API provides a number of low-level cryptographic functions. * Available only in secure contexts. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto) */ declare abstract class SubtleCrypto { - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/encrypt) */ + /** + * The **`encrypt()`** method of the SubtleCrypto interface encrypts data. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/encrypt) + */ encrypt(algorithm: string | SubtleCryptoEncryptAlgorithm, key: CryptoKey, plainText: ArrayBuffer | ArrayBufferView): Promise; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/decrypt) */ + /** + * The **`decrypt()`** method of the SubtleCrypto interface decrypts some encrypted data. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/decrypt) + */ decrypt(algorithm: string | SubtleCryptoEncryptAlgorithm, key: CryptoKey, cipherText: ArrayBuffer | ArrayBufferView): Promise; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/sign) */ + /** + * The **`sign()`** method of the SubtleCrypto interface generates a digital signature. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/sign) + */ sign(algorithm: string | SubtleCryptoSignAlgorithm, key: CryptoKey, data: ArrayBuffer | ArrayBufferView): Promise; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/verify) */ + /** + * The **`verify()`** method of the SubtleCrypto interface verifies a digital signature. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/verify) + */ verify(algorithm: string | SubtleCryptoSignAlgorithm, key: CryptoKey, signature: ArrayBuffer | ArrayBufferView, data: ArrayBuffer | ArrayBufferView): Promise; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/digest) */ + /** + * The **`digest()`** method of the SubtleCrypto interface generates a _digest_ of the given data, using the specified hash function. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/digest) + */ digest(algorithm: string | SubtleCryptoHashAlgorithm, data: ArrayBuffer | ArrayBufferView): Promise; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/generateKey) */ + /** + * The **`generateKey()`** method of the SubtleCrypto interface is used to generate a new key (for symmetric algorithms) or key pair (for public-key algorithms). + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/generateKey) + */ generateKey(algorithm: string | SubtleCryptoGenerateKeyAlgorithm, extractable: boolean, keyUsages: string[]): Promise; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/deriveKey) */ + /** + * The **`deriveKey()`** method of the SubtleCrypto interface can be used to derive a secret key from a master key. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/deriveKey) + */ deriveKey(algorithm: string | SubtleCryptoDeriveKeyAlgorithm, baseKey: CryptoKey, derivedKeyAlgorithm: string | SubtleCryptoImportKeyAlgorithm, extractable: boolean, keyUsages: string[]): Promise; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/deriveBits) */ + /** + * The **`deriveBits()`** method of the key. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/deriveBits) + */ deriveBits(algorithm: string | SubtleCryptoDeriveKeyAlgorithm, baseKey: CryptoKey, length?: number | null): Promise; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/importKey) */ + /** + * The **`importKey()`** method of the SubtleCrypto interface imports a key: that is, it takes as input a key in an external, portable format and gives you a CryptoKey object that you can use in the Web Crypto API. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/importKey) + */ importKey(format: string, keyData: (ArrayBuffer | ArrayBufferView) | JsonWebKey, algorithm: string | SubtleCryptoImportKeyAlgorithm, extractable: boolean, keyUsages: string[]): Promise; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/exportKey) */ + /** + * The **`exportKey()`** method of the SubtleCrypto interface exports a key: that is, it takes as input a CryptoKey object and gives you the key in an external, portable format. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/exportKey) + */ exportKey(format: string, key: CryptoKey): Promise; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/wrapKey) */ + /** + * The **`wrapKey()`** method of the SubtleCrypto interface 'wraps' a key. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/wrapKey) + */ wrapKey(format: string, key: CryptoKey, wrappingKey: CryptoKey, wrapAlgorithm: string | SubtleCryptoEncryptAlgorithm): Promise; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/unwrapKey) */ + /** + * The **`unwrapKey()`** method of the SubtleCrypto interface 'unwraps' a key. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/unwrapKey) + */ unwrapKey(format: string, wrappedKey: ArrayBuffer | ArrayBufferView, unwrappingKey: CryptoKey, unwrapAlgorithm: string | SubtleCryptoEncryptAlgorithm, unwrappedKeyAlgorithm: string | SubtleCryptoImportKeyAlgorithm, extractable: boolean, keyUsages: string[]): Promise; timingSafeEqual(a: ArrayBuffer | ArrayBufferView, b: ArrayBuffer | ArrayBufferView): boolean; } /** - * The CryptoKey dictionary of the Web Crypto API represents a cryptographic key. + * The **`CryptoKey`** interface of the Web Crypto API represents a cryptographic key obtained from one of the SubtleCrypto methods SubtleCrypto.generateKey, SubtleCrypto.deriveKey, SubtleCrypto.importKey, or SubtleCrypto.unwrapKey. * Available only in secure contexts. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CryptoKey) */ declare abstract class CryptoKey { - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/CryptoKey/type) */ + /** + * The read-only **`type`** property of the CryptoKey interface indicates which kind of key is represented by the object. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CryptoKey/type) + */ readonly type: string; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/CryptoKey/extractable) */ + /** + * The read-only **`extractable`** property of the CryptoKey interface indicates whether or not the key may be extracted using `SubtleCrypto.exportKey()` or `SubtleCrypto.wrapKey()`. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CryptoKey/extractable) + */ readonly extractable: boolean; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/CryptoKey/algorithm) */ + /** + * The read-only **`algorithm`** property of the CryptoKey interface returns an object describing the algorithm for which this key can be used, and any associated extra parameters. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CryptoKey/algorithm) + */ readonly algorithm: CryptoKeyKeyAlgorithm | CryptoKeyAesKeyAlgorithm | CryptoKeyHmacKeyAlgorithm | CryptoKeyRsaKeyAlgorithm | CryptoKeyEllipticKeyAlgorithm | CryptoKeyArbitraryKeyAlgorithm; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/CryptoKey/usages) */ + /** + * The read-only **`usages`** property of the CryptoKey interface indicates what can be done with the key. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CryptoKey/usages) + */ readonly usages: string[]; } interface CryptoKeyPair { @@ -1018,24 +1233,14 @@ declare class DigestStream extends WritableStream get bytesWritten(): number | bigint; } /** - * A decoder for a specific method, that is a specific character encoding, like utf-8, iso-8859-2, koi8, cp1261, gbk, etc. A decoder takes a stream of bytes as input and emits a stream of code points. For a more scalable, non-native library, see StringView – a C-like representation of strings based on typed arrays. + * The **`TextDecoder`** interface represents a decoder for a specific text encoding, such as `UTF-8`, `ISO-8859-2`, `KOI8-R`, `GBK`, etc. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TextDecoder) */ declare class TextDecoder { constructor(label?: string, options?: TextDecoderConstructorOptions); /** - * Returns the result of running encoding's decoder. The method can be invoked zero or more times with options's stream set to true, and then once without options's stream (or set to false), to process a fragmented input. If the invocation without options's stream (or set to false) has no input, it's clearest to omit both arguments. - * - * ``` - * var string = "", decoder = new TextDecoder(encoding), buffer; - * while(buffer = next_chunk()) { - * string += decoder.decode(buffer, {stream:true}); - * } - * string += decoder.decode(); // end-of-queue - * ``` - * - * If the error mode is "fatal" and encoding's decoder returns error, throws a TypeError. + * The **`TextDecoder.decode()`** method returns a string containing text decoded from the buffer passed as a parameter. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TextDecoder/decode) */ @@ -1045,24 +1250,24 @@ declare class TextDecoder { get ignoreBOM(): boolean; } /** - * TextEncoder takes a stream of code points as input and emits a stream of bytes. For a more scalable, non-native library, see StringView – a C-like representation of strings based on typed arrays. + * The **`TextEncoder`** interface takes a stream of code points as input and emits a stream of UTF-8 bytes. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TextEncoder) */ declare class TextEncoder { constructor(); /** - * Returns the result of running UTF-8's encoder. + * The **`TextEncoder.encode()`** method takes a string as input, and returns a Global_Objects/Uint8Array containing the text given in parameters encoded with the specific method for that TextEncoder object. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TextEncoder/encode) */ encode(input?: string): Uint8Array; /** - * Runs the UTF-8 encoder on source, stores the result of that operation into destination, and returns the progress made as an object wherein read is the number of converted code units of source and written is the number of bytes modified in destination. + * The **`TextEncoder.encodeInto()`** method takes a string to encode and a destination Uint8Array to put resulting UTF-8 encoded text into, and returns a dictionary object indicating the progress of the encoding. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TextEncoder/encodeInto) */ - encodeInto(input: string, buffer: ArrayBuffer | ArrayBufferView): TextEncoderEncodeIntoResult; + encodeInto(input: string, buffer: Uint8Array): TextEncoderEncodeIntoResult; get encoding(): string; } interface TextDecoderConstructorOptions { @@ -1077,21 +1282,41 @@ interface TextEncoderEncodeIntoResult { written: number; } /** - * Events providing information related to errors in scripts or in files. + * The **`ErrorEvent`** interface represents events providing information related to errors in scripts or in files. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ErrorEvent) */ declare class ErrorEvent extends Event { constructor(type: string, init?: ErrorEventErrorEventInit); - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ErrorEvent/filename) */ + /** + * The **`filename`** read-only property of the ErrorEvent interface returns a string containing the name of the script file in which the error occurred. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ErrorEvent/filename) + */ get filename(): string; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ErrorEvent/message) */ + /** + * The **`message`** read-only property of the ErrorEvent interface returns a string containing a human-readable error message describing the problem. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ErrorEvent/message) + */ get message(): string; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ErrorEvent/lineno) */ + /** + * The **`lineno`** read-only property of the ErrorEvent interface returns an integer containing the line number of the script file on which the error occurred. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ErrorEvent/lineno) + */ get lineno(): number; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ErrorEvent/colno) */ + /** + * The **`colno`** read-only property of the ErrorEvent interface returns an integer containing the column number of the script file on which the error occurred. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ErrorEvent/colno) + */ get colno(): number; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ErrorEvent/error) */ + /** + * The **`error`** read-only property of the ErrorEvent interface returns a JavaScript value, such as an Error or DOMException, representing the error associated with this event. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ErrorEvent/error) + */ get error(): any; } interface ErrorEventErrorEventInit { @@ -1102,38 +1327,38 @@ interface ErrorEventErrorEventInit { error?: any; } /** - * A message received by a target object. + * The **`MessageEvent`** interface represents a message received by a target object. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageEvent) */ declare class MessageEvent extends Event { constructor(type: string, initializer: MessageEventInit); /** - * Returns the data of the message. + * The **`data`** read-only property of the The data sent by the message emitter; this can be any data type, depending on what originated this event. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageEvent/data) */ readonly data: any; /** - * Returns the origin of the message, for server-sent events and cross-document messaging. + * The **`origin`** read-only property of the origin of the message emitter. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageEvent/origin) */ readonly origin: string | null; /** - * Returns the last event ID string, for server-sent events. + * The **`lastEventId`** read-only property of the unique ID for the event. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageEvent/lastEventId) */ readonly lastEventId: string; /** - * Returns the WindowProxy of the source window, for cross-document messaging, and the MessagePort being attached, in the connect event fired at SharedWorkerGlobalScope objects. + * The **`source`** read-only property of the a WindowProxy, MessagePort, or a `MessageEventSource` (which can be a WindowProxy, message emitter. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageEvent/source) */ readonly source: MessagePort | null; /** - * Returns the MessagePort array sent with the message, for cross-document messaging and channel messaging. + * The **`ports`** read-only property of the containing all MessagePort objects sent with the message, in order. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageEvent/ports) */ @@ -1143,27 +1368,90 @@ interface MessageEventInit { data: ArrayBuffer | string; } /** - * Provides a way to easily construct a set of key/value pairs representing form fields and their values, which can then be easily sent using the XMLHttpRequest.send() method. It uses the same format a form would use if the encoding type were set to "multipart/form-data". + * The **`PromiseRejectionEvent`** interface represents events which are sent to the global script context when JavaScript Promises are rejected. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/PromiseRejectionEvent) + */ +declare abstract class PromiseRejectionEvent extends Event { + /** + * The PromiseRejectionEvent interface's **`promise`** read-only property indicates the JavaScript rejected. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/PromiseRejectionEvent/promise) + */ + readonly promise: Promise; + /** + * The PromiseRejectionEvent **`reason`** read-only property is any JavaScript value or Object which provides the reason passed into Promise.reject(). + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/PromiseRejectionEvent/reason) + */ + readonly reason: any; +} +/** + * The **`FormData`** interface provides a way to construct a set of key/value pairs representing form fields and their values, which can be sent using the Window/fetch, XMLHttpRequest.send() or navigator.sendBeacon() methods. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData) */ declare class FormData { constructor(); - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/append) */ + /** + * The **`append()`** method of the FormData interface appends a new value onto an existing key inside a `FormData` object, or adds the key if it does not already exist. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/append) + */ + append(name: string, value: string | Blob): void; + /** + * The **`append()`** method of the FormData interface appends a new value onto an existing key inside a `FormData` object, or adds the key if it does not already exist. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/append) + */ append(name: string, value: string): void; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/append) */ + /** + * The **`append()`** method of the FormData interface appends a new value onto an existing key inside a `FormData` object, or adds the key if it does not already exist. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/append) + */ append(name: string, value: Blob, filename?: string): void; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/delete) */ + /** + * The **`delete()`** method of the FormData interface deletes a key and its value(s) from a `FormData` object. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/delete) + */ delete(name: string): void; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/get) */ + /** + * The **`get()`** method of the FormData interface returns the first value associated with a given key from within a `FormData` object. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/get) + */ get(name: string): (File | string) | null; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/getAll) */ + /** + * The **`getAll()`** method of the FormData interface returns all the values associated with a given key from within a `FormData` object. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/getAll) + */ getAll(name: string): (File | string)[]; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/has) */ + /** + * The **`has()`** method of the FormData interface returns whether a `FormData` object contains a certain key. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/has) + */ has(name: string): boolean; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/set) */ + /** + * The **`set()`** method of the FormData interface sets a new value for an existing key inside a `FormData` object, or adds the key/value if it does not already exist. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/set) + */ + set(name: string, value: string | Blob): void; + /** + * The **`set()`** method of the FormData interface sets a new value for an existing key inside a `FormData` object, or adds the key/value if it does not already exist. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/set) + */ set(name: string, value: string): void; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/set) */ + /** + * The **`set()`** method of the FormData interface sets a new value for an existing key inside a `FormData` object, or adds the key/value if it does not already exist. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/set) + */ set(name: string, value: Blob, filename?: string): void; /* Returns an array of key, value pairs for every entry in the list. */ entries(): IterableIterator<[ @@ -1251,37 +1539,69 @@ interface DocumentEnd { append(content: string, options?: ContentOptions): DocumentEnd; } /** - * This is the event type for fetch events dispatched on the service worker global scope. It contains information about the fetch, including the request and how the receiver will treat the response. It provides the event.respondWith() method, which allows us to provide a response to this fetch. + * This is the event type for `fetch` events dispatched on the ServiceWorkerGlobalScope. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FetchEvent) */ declare abstract class FetchEvent extends ExtendableEvent { - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/FetchEvent/request) */ + /** + * The **`request`** read-only property of the the event handler. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FetchEvent/request) + */ readonly request: Request; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/FetchEvent/respondWith) */ + /** + * The **`respondWith()`** method of allows you to provide a promise for a Response yourself. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FetchEvent/respondWith) + */ respondWith(promise: Response | Promise): void; passThroughOnException(): void; } type HeadersInit = Headers | Iterable> | Record; /** - * This Fetch API interface allows you to perform various actions on HTTP request and response headers. These actions include retrieving, setting, adding to, and removing. A Headers object has an associated header list, which is initially empty and consists of zero or more name and value pairs.  You can add to this using methods like append() (see Examples.) In all methods of this interface, header names are matched by case-insensitive byte sequence. + * The **`Headers`** interface of the Fetch API allows you to perform various actions on HTTP request and response headers. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Headers) */ declare class Headers { constructor(init?: HeadersInit); - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Headers/get) */ + /** + * The **`get()`** method of the Headers interface returns a byte string of all the values of a header within a `Headers` object with a given name. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Headers/get) + */ get(name: string): string | null; getAll(name: string): string[]; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Headers/getSetCookie) */ + /** + * The **`getSetCookie()`** method of the Headers interface returns an array containing the values of all Set-Cookie headers associated with a response. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Headers/getSetCookie) + */ getSetCookie(): string[]; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Headers/has) */ + /** + * The **`has()`** method of the Headers interface returns a boolean stating whether a `Headers` object contains a certain header. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Headers/has) + */ has(name: string): boolean; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Headers/set) */ + /** + * The **`set()`** method of the Headers interface sets a new value for an existing header inside a `Headers` object, or adds the header if it does not already exist. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Headers/set) + */ set(name: string, value: string): void; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Headers/append) */ - append(name: string, value: string): void; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Headers/delete) */ + /** + * The **`append()`** method of the Headers interface appends a new value onto an existing header inside a `Headers` object, or adds the header if it does not already exist. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Headers/append) + */ + append(name: string, value: string): void; + /** + * The **`delete()`** method of the Headers interface deletes a header from the current `Headers` object. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Headers/delete) + */ delete(name: string): void; forEach(callback: (this: This, value: string, key: string, parent: Headers) => void, thisArg?: This): void; /* Returns an iterator allowing to go through all key/value pairs contained in this object. */ @@ -1318,7 +1638,7 @@ declare abstract class Body { blob(): Promise; } /** - * This Fetch API interface represents the response to a request. + * The **`Response`** interface of the Fetch API represents the response to a request. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response) */ @@ -1330,28 +1650,60 @@ declare var Response: { json(any: any, maybeInit?: (ResponseInit | Response)): Response; }; /** - * This Fetch API interface represents the response to a request. + * The **`Response`** interface of the Fetch API represents the response to a request. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response) */ interface Response extends Body { - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response/clone) */ + /** + * The **`clone()`** method of the Response interface creates a clone of a response object, identical in every way, but stored in a different variable. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response/clone) + */ clone(): Response; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response/status) */ + /** + * The **`status`** read-only property of the Response interface contains the HTTP status codes of the response. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response/status) + */ status: number; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response/statusText) */ + /** + * The **`statusText`** read-only property of the Response interface contains the status message corresponding to the HTTP status code in Response.status. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response/statusText) + */ statusText: string; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response/headers) */ + /** + * The **`headers`** read-only property of the with the response. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response/headers) + */ headers: Headers; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response/ok) */ + /** + * The **`ok`** read-only property of the Response interface contains a Boolean stating whether the response was successful (status in the range 200-299) or not. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response/ok) + */ ok: boolean; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response/redirected) */ + /** + * The **`redirected`** read-only property of the Response interface indicates whether or not the response is the result of a request you made which was redirected. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response/redirected) + */ redirected: boolean; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response/url) */ + /** + * The **`url`** read-only property of the Response interface contains the URL of the response. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response/url) + */ url: string; webSocket: WebSocket | null; cf: any | undefined; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response/type) */ + /** + * The **`type`** read-only property of the Response interface contains the type of the response. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response/type) + */ type: "default" | "error"; } interface ResponseInit { @@ -1364,7 +1716,7 @@ interface ResponseInit { } type RequestInfo> = Request | string; /** - * This Fetch API interface represents a resource request. + * The **`Request`** interface of the Fetch API represents a resource request. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request) */ @@ -1373,59 +1725,63 @@ declare var Request: { new >(input: RequestInfo | URL, init?: RequestInit): Request; }; /** - * This Fetch API interface represents a resource request. + * The **`Request`** interface of the Fetch API represents a resource request. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request) */ interface Request> extends Body { - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/clone) */ + /** + * The **`clone()`** method of the Request interface creates a copy of the current `Request` object. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/clone) + */ clone(): Request; /** - * Returns request's HTTP method, which is "GET" by default. + * The **`method`** read-only property of the `POST`, etc.) A String indicating the method of the request. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/method) */ method: string; /** - * Returns the URL of request as a string. + * The **`url`** read-only property of the Request interface contains the URL of the request. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/url) */ url: string; /** - * Returns a Headers object consisting of the headers associated with request. Note that headers added in the network layer by the user agent will not be accounted for in this object, e.g., the "Host" header. + * The **`headers`** read-only property of the with the request. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/headers) */ headers: Headers; /** - * Returns the redirect mode associated with request, which is a string indicating how redirects for the request will be handled during fetching. A request will follow redirects by default. + * The **`redirect`** read-only property of the Request interface contains the mode for how redirects are handled. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/redirect) */ redirect: string; fetcher: Fetcher | null; /** - * Returns the signal associated with request, which is an AbortSignal object indicating whether or not request has been aborted, and its abort event handler. + * The read-only **`signal`** property of the Request interface returns the AbortSignal associated with the request. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/signal) */ signal: AbortSignal; - cf: Cf | undefined; + cf?: Cf; /** - * Returns request's subresource integrity metadata, which is a cryptographic hash of the resource being fetched. Its value consists of multiple hashes separated by whitespace. [SRI] + * The **`integrity`** read-only property of the Request interface contains the subresource integrity value of the request. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/integrity) */ integrity: string; /** - * Returns a boolean indicating whether or not request can outlive the global in which it was created. + * The **`keepalive`** read-only property of the Request interface contains the request's `keepalive` setting (`true` or `false`), which indicates whether the browser will keep the associated request alive if the page that initiated it is unloaded before the request is complete. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/keepalive) */ keepalive: boolean; /** - * Returns the cache mode associated with request, which is a string indicating how the request will interact with the browser's cache when fetching. + * The **`cache`** read-only property of the Request interface contains the cache mode of the request. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/cache) */ @@ -1746,6 +2102,8 @@ interface Transformer { expectedLength?: number; } interface StreamPipeOptions { + preventAbort?: boolean; + preventCancel?: boolean; /** * Pipes this readable stream to a given writable stream destination. The way in which the piping process behaves under various error conditions can be customized with a number of passed options. It returns a promise that fulfills when the piping process completes successfully, or rejects if any errors were encountered. * @@ -1764,8 +2122,6 @@ interface StreamPipeOptions { * The signal option can be set to an AbortSignal to allow aborting an ongoing pipe operation via the corresponding AbortController. In this case, this source readable stream will be canceled, and destination aborted, unless the respective options preventCancel or preventAbort are set. */ preventClose?: boolean; - preventAbort?: boolean; - preventCancel?: boolean; signal?: AbortSignal; } type ReadableStreamReadResult = { @@ -1776,24 +2132,52 @@ type ReadableStreamReadResult = { value?: undefined; }; /** - * This Streams API interface represents a readable stream of byte data. The Fetch API offers a concrete instance of a ReadableStream through the body property of a Response object. + * The `ReadableStream` interface of the Streams API represents a readable stream of byte data. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream) */ interface ReadableStream { - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream/locked) */ + /** + * The **`locked`** read-only property of the ReadableStream interface returns whether or not the readable stream is locked to a reader. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream/locked) + */ get locked(): boolean; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream/cancel) */ + /** + * The **`cancel()`** method of the ReadableStream interface returns a Promise that resolves when the stream is canceled. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream/cancel) + */ cancel(reason?: any): Promise; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream/getReader) */ + /** + * The **`getReader()`** method of the ReadableStream interface creates a reader and locks the stream to it. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream/getReader) + */ getReader(): ReadableStreamDefaultReader; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream/getReader) */ + /** + * The **`getReader()`** method of the ReadableStream interface creates a reader and locks the stream to it. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream/getReader) + */ getReader(options: ReadableStreamGetReaderOptions): ReadableStreamBYOBReader; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream/pipeThrough) */ + /** + * The **`pipeThrough()`** method of the ReadableStream interface provides a chainable way of piping the current stream through a transform stream or any other writable/readable pair. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream/pipeThrough) + */ pipeThrough(transform: ReadableWritablePair, options?: StreamPipeOptions): ReadableStream; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream/pipeTo) */ + /** + * The **`pipeTo()`** method of the ReadableStream interface pipes the current `ReadableStream` to a given WritableStream and returns a Promise that fulfills when the piping process completes successfully, or rejects if any errors were encountered. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream/pipeTo) + */ pipeTo(destination: WritableStream, options?: StreamPipeOptions): Promise; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream/tee) */ + /** + * The **`tee()`** method of the two-element array containing the two resulting branches as new ReadableStream instances. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream/tee) + */ tee(): [ ReadableStream, ReadableStream @@ -1802,7 +2186,7 @@ interface ReadableStream { [Symbol.asyncIterator](options?: ReadableStreamValuesOptions): AsyncIterableIterator; } /** - * This Streams API interface represents a readable stream of byte data. The Fetch API offers a concrete instance of a ReadableStream through the body property of a Response object. + * The `ReadableStream` interface of the Streams API represents a readable stream of byte data. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream) */ @@ -1811,24 +2195,48 @@ declare const ReadableStream: { new (underlyingSource: UnderlyingByteSource, strategy?: QueuingStrategy): ReadableStream; new (underlyingSource?: UnderlyingSource, strategy?: QueuingStrategy): ReadableStream; }; -/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamDefaultReader) */ +/** + * The **`ReadableStreamDefaultReader`** interface of the Streams API represents a default reader that can be used to read stream data supplied from a network (such as a fetch request). + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamDefaultReader) + */ declare class ReadableStreamDefaultReader { constructor(stream: ReadableStream); get closed(): Promise; cancel(reason?: any): Promise; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamDefaultReader/read) */ + /** + * The **`read()`** method of the ReadableStreamDefaultReader interface returns a Promise providing access to the next chunk in the stream's internal queue. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamDefaultReader/read) + */ read(): Promise>; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamDefaultReader/releaseLock) */ + /** + * The **`releaseLock()`** method of the ReadableStreamDefaultReader interface releases the reader's lock on the stream. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamDefaultReader/releaseLock) + */ releaseLock(): void; } -/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamBYOBReader) */ +/** + * The `ReadableStreamBYOBReader` interface of the Streams API defines a reader for a ReadableStream that supports zero-copy reading from an underlying byte source. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamBYOBReader) + */ declare class ReadableStreamBYOBReader { constructor(stream: ReadableStream); get closed(): Promise; cancel(reason?: any): Promise; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamBYOBReader/read) */ + /** + * The **`read()`** method of the ReadableStreamBYOBReader interface is used to read data into a view on a user-supplied buffer from an associated readable byte stream. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamBYOBReader/read) + */ read(view: T): Promise>; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamBYOBReader/releaseLock) */ + /** + * The **`releaseLock()`** method of the ReadableStreamBYOBReader interface releases the reader's lock on the stream. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamBYOBReader/releaseLock) + */ releaseLock(): void; readAtLeast(minElements: number, view: T): Promise>; } @@ -1843,115 +2251,259 @@ interface ReadableStreamGetReaderOptions { */ mode: "byob"; } -/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamBYOBRequest) */ +/** + * The **`ReadableStreamBYOBRequest`** interface of the Streams API represents a 'pull request' for data from an underlying source that will made as a zero-copy transfer to a consumer (bypassing the stream's internal queues). + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamBYOBRequest) + */ declare abstract class ReadableStreamBYOBRequest { - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamBYOBRequest/view) */ + /** + * The **`view`** getter property of the ReadableStreamBYOBRequest interface returns the current view. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamBYOBRequest/view) + */ get view(): Uint8Array | null; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamBYOBRequest/respond) */ + /** + * The **`respond()`** method of the ReadableStreamBYOBRequest interface is used to signal to the associated readable byte stream that the specified number of bytes were written into the ReadableStreamBYOBRequest.view. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamBYOBRequest/respond) + */ respond(bytesWritten: number): void; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamBYOBRequest/respondWithNewView) */ + /** + * The **`respondWithNewView()`** method of the ReadableStreamBYOBRequest interface specifies a new view that the consumer of the associated readable byte stream should write to instead of ReadableStreamBYOBRequest.view. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamBYOBRequest/respondWithNewView) + */ respondWithNewView(view: ArrayBuffer | ArrayBufferView): void; get atLeast(): number | null; } -/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamDefaultController) */ +/** + * The **`ReadableStreamDefaultController`** interface of the Streams API represents a controller allowing control of a ReadableStream's state and internal queue. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamDefaultController) + */ declare abstract class ReadableStreamDefaultController { - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamDefaultController/desiredSize) */ + /** + * The **`desiredSize`** read-only property of the required to fill the stream's internal queue. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamDefaultController/desiredSize) + */ get desiredSize(): number | null; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamDefaultController/close) */ + /** + * The **`close()`** method of the ReadableStreamDefaultController interface closes the associated stream. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamDefaultController/close) + */ close(): void; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamDefaultController/enqueue) */ + /** + * The **`enqueue()`** method of the ```js-nolint enqueue(chunk) ``` - `chunk` - : The chunk to enqueue. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamDefaultController/enqueue) + */ enqueue(chunk?: R): void; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamDefaultController/error) */ + /** + * The **`error()`** method of the with the associated stream to error. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamDefaultController/error) + */ error(reason: any): void; } -/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableByteStreamController) */ +/** + * The **`ReadableByteStreamController`** interface of the Streams API represents a controller for a readable byte stream. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableByteStreamController) + */ declare abstract class ReadableByteStreamController { - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableByteStreamController/byobRequest) */ + /** + * The **`byobRequest`** read-only property of the ReadableByteStreamController interface returns the current BYOB request, or `null` if there are no pending requests. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableByteStreamController/byobRequest) + */ get byobRequest(): ReadableStreamBYOBRequest | null; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableByteStreamController/desiredSize) */ + /** + * The **`desiredSize`** read-only property of the ReadableByteStreamController interface returns the number of bytes required to fill the stream's internal queue to its 'desired size'. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableByteStreamController/desiredSize) + */ get desiredSize(): number | null; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableByteStreamController/close) */ + /** + * The **`close()`** method of the ReadableByteStreamController interface closes the associated stream. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableByteStreamController/close) + */ close(): void; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableByteStreamController/enqueue) */ + /** + * The **`enqueue()`** method of the ReadableByteStreamController interface enqueues a given chunk on the associated readable byte stream (the chunk is copied into the stream's internal queues). + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableByteStreamController/enqueue) + */ enqueue(chunk: ArrayBuffer | ArrayBufferView): void; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableByteStreamController/error) */ + /** + * The **`error()`** method of the ReadableByteStreamController interface causes any future interactions with the associated stream to error with the specified reason. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableByteStreamController/error) + */ error(reason: any): void; } /** - * This Streams API interface represents a controller allowing control of a WritableStream's state. When constructing a WritableStream, the underlying sink is given a corresponding WritableStreamDefaultController instance to manipulate. + * The **`WritableStreamDefaultController`** interface of the Streams API represents a controller allowing control of a WritableStream's state. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultController) */ declare abstract class WritableStreamDefaultController { - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultController/signal) */ + /** + * The read-only **`signal`** property of the WritableStreamDefaultController interface returns the AbortSignal associated with the controller. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultController/signal) + */ get signal(): AbortSignal; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultController/error) */ + /** + * The **`error()`** method of the with the associated stream to error. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultController/error) + */ error(reason?: any): void; } -/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/TransformStreamDefaultController) */ +/** + * The **`TransformStreamDefaultController`** interface of the Streams API provides methods to manipulate the associated ReadableStream and WritableStream. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TransformStreamDefaultController) + */ declare abstract class TransformStreamDefaultController { - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/TransformStreamDefaultController/desiredSize) */ + /** + * The **`desiredSize`** read-only property of the TransformStreamDefaultController interface returns the desired size to fill the queue of the associated ReadableStream. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TransformStreamDefaultController/desiredSize) + */ get desiredSize(): number | null; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/TransformStreamDefaultController/enqueue) */ + /** + * The **`enqueue()`** method of the TransformStreamDefaultController interface enqueues the given chunk in the readable side of the stream. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TransformStreamDefaultController/enqueue) + */ enqueue(chunk?: O): void; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/TransformStreamDefaultController/error) */ + /** + * The **`error()`** method of the TransformStreamDefaultController interface errors both sides of the stream. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TransformStreamDefaultController/error) + */ error(reason: any): void; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/TransformStreamDefaultController/terminate) */ + /** + * The **`terminate()`** method of the TransformStreamDefaultController interface closes the readable side and errors the writable side of the stream. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TransformStreamDefaultController/terminate) + */ terminate(): void; } interface ReadableWritablePair { + readable: ReadableStream; /** * Provides a convenient, chainable way of piping this readable stream through a transform stream (or any other { writable, readable } pair). It simply pipes the stream into the writable side of the supplied pair, and returns the readable side for further use. * * Piping a stream will lock it for the duration of the pipe, preventing any other consumer from acquiring a reader. */ writable: WritableStream; - readable: ReadableStream; } /** - * This Streams API interface provides a standard abstraction for writing streaming data to a destination, known as a sink. This object comes with built-in backpressure and queuing. + * The **`WritableStream`** interface of the Streams API provides a standard abstraction for writing streaming data to a destination, known as a sink. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStream) */ declare class WritableStream { constructor(underlyingSink?: UnderlyingSink, queuingStrategy?: QueuingStrategy); - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStream/locked) */ + /** + * The **`locked`** read-only property of the WritableStream interface returns a boolean indicating whether the `WritableStream` is locked to a writer. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStream/locked) + */ get locked(): boolean; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStream/abort) */ + /** + * The **`abort()`** method of the WritableStream interface aborts the stream, signaling that the producer can no longer successfully write to the stream and it is to be immediately moved to an error state, with any queued writes discarded. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStream/abort) + */ abort(reason?: any): Promise; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStream/close) */ + /** + * The **`close()`** method of the WritableStream interface closes the associated stream. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStream/close) + */ close(): Promise; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStream/getWriter) */ + /** + * The **`getWriter()`** method of the WritableStream interface returns a new instance of WritableStreamDefaultWriter and locks the stream to that instance. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStream/getWriter) + */ getWriter(): WritableStreamDefaultWriter; } /** - * This Streams API interface is the object returned by WritableStream.getWriter() and once created locks the < writer to the WritableStream ensuring that no other streams can write to the underlying sink. + * The **`WritableStreamDefaultWriter`** interface of the Streams API is the object returned by WritableStream.getWriter() and once created locks the writer to the `WritableStream` ensuring that no other streams can write to the underlying sink. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultWriter) */ declare class WritableStreamDefaultWriter { constructor(stream: WritableStream); - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultWriter/closed) */ + /** + * The **`closed`** read-only property of the the stream errors or the writer's lock is released. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultWriter/closed) + */ get closed(): Promise; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultWriter/ready) */ + /** + * The **`ready`** read-only property of the that resolves when the desired size of the stream's internal queue transitions from non-positive to positive, signaling that it is no longer applying backpressure. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultWriter/ready) + */ get ready(): Promise; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultWriter/desiredSize) */ + /** + * The **`desiredSize`** read-only property of the to fill the stream's internal queue. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultWriter/desiredSize) + */ get desiredSize(): number | null; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultWriter/abort) */ + /** + * The **`abort()`** method of the the producer can no longer successfully write to the stream and it is to be immediately moved to an error state, with any queued writes discarded. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultWriter/abort) + */ abort(reason?: any): Promise; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultWriter/close) */ + /** + * The **`close()`** method of the stream. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultWriter/close) + */ close(): Promise; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultWriter/write) */ + /** + * The **`write()`** method of the operation. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultWriter/write) + */ write(chunk?: W): Promise; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultWriter/releaseLock) */ + /** + * The **`releaseLock()`** method of the corresponding stream. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultWriter/releaseLock) + */ releaseLock(): void; } -/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/TransformStream) */ +/** + * The **`TransformStream`** interface of the Streams API represents a concrete implementation of the pipe chain _transform stream_ concept. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TransformStream) + */ declare class TransformStream { constructor(transformer?: Transformer, writableStrategy?: QueuingStrategy, readableStrategy?: QueuingStrategy); - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/TransformStream/readable) */ + /** + * The **`readable`** read-only property of the TransformStream interface returns the ReadableStream instance controlled by this `TransformStream`. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TransformStream/readable) + */ get readable(): ReadableStream; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/TransformStream/writable) */ + /** + * The **`writable`** read-only property of the TransformStream interface returns the WritableStream instance controlled by this `TransformStream`. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TransformStream/writable) + */ get writable(): WritableStream; } declare class FixedLengthStream extends IdentityTransformStream { @@ -1966,20 +2518,36 @@ interface IdentityTransformStreamQueuingStrategy { interface ReadableStreamValuesOptions { preventCancel?: boolean; } -/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/CompressionStream) */ +/** + * The **`CompressionStream`** interface of the Compression Streams API is an API for compressing a stream of data. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CompressionStream) + */ declare class CompressionStream extends TransformStream { constructor(format: "gzip" | "deflate" | "deflate-raw"); } -/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/DecompressionStream) */ +/** + * The **`DecompressionStream`** interface of the Compression Streams API is an API for decompressing a stream of data. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/DecompressionStream) + */ declare class DecompressionStream extends TransformStream { constructor(format: "gzip" | "deflate" | "deflate-raw"); } -/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/TextEncoderStream) */ +/** + * The **`TextEncoderStream`** interface of the Encoding API converts a stream of strings into bytes in the UTF-8 encoding. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TextEncoderStream) + */ declare class TextEncoderStream extends TransformStream { constructor(); get encoding(): string; } -/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/TextDecoderStream) */ +/** + * The **`TextDecoderStream`** interface of the Encoding API converts a stream of text in a binary encoding, such as UTF-8 etc., to a stream of strings. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TextDecoderStream) + */ declare class TextDecoderStream extends TransformStream { constructor(label?: string, options?: TextDecoderStreamTextDecoderStreamInit); get encoding(): string; @@ -1991,25 +2559,33 @@ interface TextDecoderStreamTextDecoderStreamInit { ignoreBOM?: boolean; } /** - * This Streams API interface provides a built-in byte length queuing strategy that can be used when constructing streams. + * The **`ByteLengthQueuingStrategy`** interface of the Streams API provides a built-in byte length queuing strategy that can be used when constructing streams. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ByteLengthQueuingStrategy) */ declare class ByteLengthQueuingStrategy implements QueuingStrategy { constructor(init: QueuingStrategyInit); - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ByteLengthQueuingStrategy/highWaterMark) */ + /** + * The read-only **`ByteLengthQueuingStrategy.highWaterMark`** property returns the total number of bytes that can be contained in the internal queue before backpressure is applied. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ByteLengthQueuingStrategy/highWaterMark) + */ get highWaterMark(): number; /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ByteLengthQueuingStrategy/size) */ get size(): (chunk?: any) => number; } /** - * This Streams API interface provides a built-in byte length queuing strategy that can be used when constructing streams. + * The **`CountQueuingStrategy`** interface of the Streams API provides a built-in chunk counting queuing strategy that can be used when constructing streams. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CountQueuingStrategy) */ declare class CountQueuingStrategy implements QueuingStrategy { constructor(init: QueuingStrategyInit); - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/CountQueuingStrategy/highWaterMark) */ + /** + * The read-only **`CountQueuingStrategy.highWaterMark`** property returns the total number of chunks that can be contained in the internal queue before backpressure is applied. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CountQueuingStrategy/highWaterMark) + */ get highWaterMark(): number; /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/CountQueuingStrategy/size) */ get size(): (chunk?: any) => number; @@ -2022,6 +2598,11 @@ interface QueuingStrategyInit { */ highWaterMark: number; } +interface TracePreviewInfo { + id: string; + slug: string; + name: string; +} interface ScriptVersion { id?: string; tag?: string; @@ -2032,7 +2613,7 @@ declare abstract class TailEvent extends ExtendableEvent { readonly traces: TraceItem[]; } interface TraceItem { - readonly event: (TraceItemFetchEventInfo | TraceItemJsRpcEventInfo | TraceItemScheduledEventInfo | TraceItemAlarmEventInfo | TraceItemQueueEventInfo | TraceItemEmailEventInfo | TraceItemTailEventInfo | TraceItemCustomEventInfo | TraceItemHibernatableWebSocketEventInfo) | null; + readonly event: (TraceItemFetchEventInfo | TraceItemJsRpcEventInfo | TraceItemConnectEventInfo | TraceItemScheduledEventInfo | TraceItemAlarmEventInfo | TraceItemQueueEventInfo | TraceItemEmailEventInfo | TraceItemTailEventInfo | TraceItemCustomEventInfo | TraceItemHibernatableWebSocketEventInfo) | null; readonly eventTimestamp: number | null; readonly logs: TraceLog[]; readonly exceptions: TraceException[]; @@ -2042,6 +2623,8 @@ interface TraceItem { readonly scriptVersion?: ScriptVersion; readonly dispatchNamespace?: string; readonly scriptTags?: string[]; + readonly tailAttributes?: Record; + readonly preview?: TracePreviewInfo; readonly durableObjectId?: string; readonly outcome: string; readonly executionModel: string; @@ -2052,6 +2635,8 @@ interface TraceItem { interface TraceItemAlarmEventInfo { readonly scheduledTime: Date; } +interface TraceItemConnectEventInfo { +} interface TraceItemCustomEventInfo { } interface TraceItemScheduledEventInfo { @@ -2128,111 +2713,231 @@ interface UnsafeTraceMetrics { fromTrace(item: TraceItem): TraceMetrics; } /** - * The URL interface represents an object providing static methods used for creating object URLs. + * The **`URL`** interface is used to parse, construct, normalize, and encode URL. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL) */ declare class URL { constructor(url: string | URL, base?: string | URL); - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/origin) */ - get origin(): string; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/href) */ - get href(): string; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/href) */ - set href(value: string); - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/protocol) */ - get protocol(): string; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/protocol) */ - set protocol(value: string); - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/username) */ - get username(): string; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/username) */ - set username(value: string); - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/password) */ - get password(): string; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/password) */ - set password(value: string); - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/host) */ - get host(): string; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/host) */ - set host(value: string); - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/hostname) */ - get hostname(): string; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/hostname) */ - set hostname(value: string); - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/port) */ - get port(): string; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/port) */ - set port(value: string); - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/pathname) */ - get pathname(): string; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/pathname) */ - set pathname(value: string); - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/search) */ - get search(): string; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/search) */ - set search(value: string); - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/hash) */ - get hash(): string; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/hash) */ - set hash(value: string); - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/searchParams) */ - get searchParams(): URLSearchParams; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/toJSON) */ - toJSON(): string; - /*function toString() { [native code] }*/ - toString(): string; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/canParse_static) */ - static canParse(url: string, base?: string): boolean; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/parse_static) */ - static parse(url: string, base?: string): URL | null; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/createObjectURL_static) */ - static createObjectURL(object: File | Blob): string; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/revokeObjectURL_static) */ - static revokeObjectURL(object_url: string): void; -} -/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams) */ -declare class URLSearchParams { - constructor(init?: (Iterable> | Record | string)); - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams/size) */ - get size(): number; /** - * Appends a specified key/value pair as a new search parameter. + * The **`origin`** read-only property of the URL interface returns a string containing the Unicode serialization of the origin of the represented URL. * - * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams/append) + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/origin) */ - append(name: string, value: string): void; + get origin(): string; /** - * Deletes the given search parameter, and its associated value, from the list of all search parameters. + * The **`href`** property of the URL interface is a string containing the whole URL. * - * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams/delete) + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/href) */ - delete(name: string, value?: string): void; + get href(): string; /** - * Returns the first value associated to the given search parameter. + * The **`href`** property of the URL interface is a string containing the whole URL. * - * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams/get) + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/href) */ - get(name: string): string | null; + set href(value: string); /** - * Returns all the values association with a given search parameter. + * The **`protocol`** property of the URL interface is a string containing the protocol or scheme of the URL, including the final `':'`. * - * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams/getAll) + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/protocol) */ - getAll(name: string): string[]; + get protocol(): string; /** - * Returns a Boolean indicating if such a search parameter exists. + * The **`protocol`** property of the URL interface is a string containing the protocol or scheme of the URL, including the final `':'`. * - * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams/has) + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/protocol) + */ + set protocol(value: string); + /** + * The **`username`** property of the URL interface is a string containing the username component of the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/username) + */ + get username(): string; + /** + * The **`username`** property of the URL interface is a string containing the username component of the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/username) + */ + set username(value: string); + /** + * The **`password`** property of the URL interface is a string containing the password component of the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/password) + */ + get password(): string; + /** + * The **`password`** property of the URL interface is a string containing the password component of the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/password) + */ + set password(value: string); + /** + * The **`host`** property of the URL interface is a string containing the host, which is the URL.hostname, and then, if the port of the URL is nonempty, a `':'`, followed by the URL.port of the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/host) + */ + get host(): string; + /** + * The **`host`** property of the URL interface is a string containing the host, which is the URL.hostname, and then, if the port of the URL is nonempty, a `':'`, followed by the URL.port of the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/host) + */ + set host(value: string); + /** + * The **`hostname`** property of the URL interface is a string containing either the domain name or IP address of the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/hostname) + */ + get hostname(): string; + /** + * The **`hostname`** property of the URL interface is a string containing either the domain name or IP address of the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/hostname) + */ + set hostname(value: string); + /** + * The **`port`** property of the URL interface is a string containing the port number of the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/port) + */ + get port(): string; + /** + * The **`port`** property of the URL interface is a string containing the port number of the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/port) + */ + set port(value: string); + /** + * The **`pathname`** property of the URL interface represents a location in a hierarchical structure. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/pathname) + */ + get pathname(): string; + /** + * The **`pathname`** property of the URL interface represents a location in a hierarchical structure. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/pathname) + */ + set pathname(value: string); + /** + * The **`search`** property of the URL interface is a search string, also called a _query string_, that is a string containing a `'?'` followed by the parameters of the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/search) + */ + get search(): string; + /** + * The **`search`** property of the URL interface is a search string, also called a _query string_, that is a string containing a `'?'` followed by the parameters of the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/search) + */ + set search(value: string); + /** + * The **`hash`** property of the URL interface is a string containing a `'#'` followed by the fragment identifier of the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/hash) + */ + get hash(): string; + /** + * The **`hash`** property of the URL interface is a string containing a `'#'` followed by the fragment identifier of the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/hash) + */ + set hash(value: string); + /** + * The **`searchParams`** read-only property of the access to the [MISSING: httpmethod('GET')] decoded query arguments contained in the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/searchParams) + */ + get searchParams(): URLSearchParams; + /** + * The **`toJSON()`** method of the URL interface returns a string containing a serialized version of the URL, although in practice it seems to have the same effect as ```js-nolint toJSON() ``` None. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/toJSON) + */ + toJSON(): string; + /*function toString() { [native code] }*/ + toString(): string; + /** + * The **`URL.canParse()`** static method of the URL interface returns a boolean indicating whether or not an absolute URL, or a relative URL combined with a base URL, are parsable and valid. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/canParse_static) + */ + static canParse(url: string, base?: string): boolean; + /** + * The **`URL.parse()`** static method of the URL interface returns a newly created URL object representing the URL defined by the parameters. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/parse_static) + */ + static parse(url: string, base?: string): URL | null; + /** + * The **`createObjectURL()`** static method of the URL interface creates a string containing a URL representing the object given in the parameter. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/createObjectURL_static) + */ + static createObjectURL(object: File | Blob): string; + /** + * The **`revokeObjectURL()`** static method of the URL interface releases an existing object URL which was previously created by calling Call this method when you've finished using an object URL to let the browser know not to keep the reference to the file any longer. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/revokeObjectURL_static) + */ + static revokeObjectURL(object_url: string): void; +} +/** + * The **`URLSearchParams`** interface defines utility methods to work with the query string of a URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams) + */ +declare class URLSearchParams { + constructor(init?: (Iterable> | Record | string)); + /** + * The **`size`** read-only property of the URLSearchParams interface indicates the total number of search parameter entries. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams/size) + */ + get size(): number; + /** + * The **`append()`** method of the URLSearchParams interface appends a specified key/value pair as a new search parameter. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams/append) + */ + append(name: string, value: string): void; + /** + * The **`delete()`** method of the URLSearchParams interface deletes specified parameters and their associated value(s) from the list of all search parameters. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams/delete) + */ + delete(name: string, value?: string): void; + /** + * The **`get()`** method of the URLSearchParams interface returns the first value associated to the given search parameter. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams/get) + */ + get(name: string): string | null; + /** + * The **`getAll()`** method of the URLSearchParams interface returns all the values associated with a given search parameter as an array. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams/getAll) + */ + getAll(name: string): string[]; + /** + * The **`has()`** method of the URLSearchParams interface returns a boolean value that indicates whether the specified parameter is in the search parameters. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams/has) */ has(name: string, value?: string): boolean; /** - * Sets the value associated to a given search parameter to the given value. If there were several values, delete the others. + * The **`set()`** method of the URLSearchParams interface sets the value associated with a given search parameter to the given value. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams/set) */ set(name: string, value: string): void; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams/sort) */ + /** + * The **`URLSearchParams.sort()`** method sorts all key/value pairs contained in this object in place and returns `undefined`. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams/sort) + */ sort(): void; /* Returns an array of key, value pairs for every entry in the search params. */ entries(): IterableIterator<[ @@ -2244,7 +2949,7 @@ declare class URLSearchParams { /* Returns a list of values in the search params. */ values(): IterableIterator; forEach(callback: (this: This, value: string, key: string, parent: URLSearchParams) => void, thisArg?: This): void; - /*function toString() { [native code] } Returns a string containing a query string suitable for use in a URL. Does not include the question mark. */ + /*function toString() { [native code] }*/ toString(): string; [Symbol.iterator](): IterableIterator<[ key: string, @@ -2295,26 +3000,26 @@ interface URLPatternOptions { ignoreCase?: boolean; } /** - * A CloseEvent is sent to clients using WebSockets when the connection is closed. This is delivered to the listener indicated by the WebSocket object's onclose attribute. + * A `CloseEvent` is sent to clients using WebSockets when the connection is closed. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CloseEvent) */ declare class CloseEvent extends Event { constructor(type: string, initializer?: CloseEventInit); /** - * Returns the WebSocket connection close code provided by the server. + * The **`code`** read-only property of the CloseEvent interface returns a WebSocket connection close code indicating the reason the connection was closed. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CloseEvent/code) */ readonly code: number; /** - * Returns the WebSocket connection close reason provided by the server. + * The **`reason`** read-only property of the CloseEvent interface returns the WebSocket connection close reason the server gave for closing the connection; that is, a concise human-readable prose explanation for the closure. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CloseEvent/reason) */ readonly reason: string; /** - * Returns true if the connection closed cleanly; false otherwise. + * The **`wasClean`** read-only property of the CloseEvent interface returns `true` if the connection closed cleanly. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CloseEvent/wasClean) */ @@ -2332,7 +3037,7 @@ type WebSocketEventMap = { error: ErrorEvent; }; /** - * Provides the API for creating and managing a WebSocket connection to a server, as well as for sending and receiving data on the connection. + * The `WebSocket` object provides the API for creating and managing a WebSocket connection to a server, as well as for sending and receiving data on the connection. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WebSocket) */ @@ -2349,20 +3054,20 @@ declare var WebSocket: { readonly CLOSED: number; }; /** - * Provides the API for creating and managing a WebSocket connection to a server, as well as for sending and receiving data on the connection. + * The `WebSocket` object provides the API for creating and managing a WebSocket connection to a server, as well as for sending and receiving data on the connection. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WebSocket) */ interface WebSocket extends EventTarget { - accept(): void; + accept(options?: WebSocketAcceptOptions): void; /** - * Transmits data using the WebSocket connection. data can be a string, a Blob, an ArrayBuffer, or an ArrayBufferView. + * The **`WebSocket.send()`** method enqueues the specified data to be transmitted to the server over the WebSocket connection, increasing the value of `bufferedAmount` by the number of bytes needed to contain the data. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WebSocket/send) */ send(message: (ArrayBuffer | ArrayBufferView) | string): void; /** - * Closes the WebSocket connection, optionally using code as the the WebSocket connection close code and reason as the the WebSocket connection close reason. + * The **`WebSocket.close()`** method closes the already `CLOSED`, this method does nothing. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WebSocket/close) */ @@ -2370,29 +3075,45 @@ interface WebSocket extends EventTarget { serializeAttachment(attachment: any): void; deserializeAttachment(): any | null; /** - * Returns the state of the WebSocket object's connection. It can have the values described below. + * The **`WebSocket.readyState`** read-only property returns the current state of the WebSocket connection. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WebSocket/readyState) */ readyState: number; /** - * Returns the URL that was used to establish the WebSocket connection. + * The **`WebSocket.url`** read-only property returns the absolute URL of the WebSocket as resolved by the constructor. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WebSocket/url) */ url: string | null; /** - * Returns the subprotocol selected by the server, if any. It can be used in conjunction with the array form of the constructor's second argument to perform subprotocol negotiation. + * The **`WebSocket.protocol`** read-only property returns the name of the sub-protocol the server selected; this will be one of the strings specified in the `protocols` parameter when creating the WebSocket object, or the empty string if no connection is established. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WebSocket/protocol) */ protocol: string | null; /** - * Returns the extensions selected by the server, if any. + * The **`WebSocket.extensions`** read-only property returns the extensions selected by the server. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WebSocket/extensions) */ extensions: string | null; + /** + * The **`WebSocket.binaryType`** property controls the type of binary data being received over the WebSocket connection. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WebSocket/binaryType) + */ + binaryType: "blob" | "arraybuffer"; +} +interface WebSocketAcceptOptions { + /** + * When set to `true`, receiving a server-initiated WebSocket Close frame will not + * automatically send a reciprocal Close frame, leaving the connection in a half-open + * state. This is useful for proxying scenarios where you need to coordinate closing + * both sides independently. Defaults to `false` when the + * `no_web_socket_half_open_by_default` compatibility flag is enabled. + */ + allowHalfOpen?: boolean; } declare const WebSocketPair: { new (): { @@ -2451,29 +3172,33 @@ interface SocketInfo { remoteAddress?: string; localAddress?: string; } -/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventSource) */ +/** + * The **`EventSource`** interface is web content's interface to server-sent events. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventSource) + */ declare class EventSource extends EventTarget { constructor(url: string, init?: EventSourceEventSourceInit); /** - * Aborts any instances of the fetch algorithm started for this EventSource object, and sets the readyState attribute to CLOSED. + * The **`close()`** method of the EventSource interface closes the connection, if one is made, and sets the ```js-nolint close() ``` None. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventSource/close) */ close(): void; /** - * Returns the URL providing the event stream. + * The **`url`** read-only property of the URL of the source. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventSource/url) */ get url(): string; /** - * Returns true if the credentials mode for connection requests to the URL providing the event stream is set to "include", and false otherwise. + * The **`withCredentials`** read-only property of the the `EventSource` object was instantiated with CORS credentials set. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventSource/withCredentials) */ get withCredentials(): boolean; /** - * Returns the state of this EventSource object's connection. It can have the values described below. + * The **`readyState`** read-only property of the connection. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventSource/readyState) */ @@ -2506,34 +3231,63 @@ interface Container { destroy(error?: any): Promise; signal(signo: number): void; getTcpPort(port: number): Fetcher; + setInactivityTimeout(durationMs: number | bigint): Promise; + interceptOutboundHttp(addr: string, binding: Fetcher): Promise; + interceptAllOutboundHttp(binding: Fetcher): Promise; + snapshotDirectory(options: ContainerDirectorySnapshotOptions): Promise; + snapshotContainer(options: ContainerSnapshotOptions): Promise; + interceptOutboundHttps(addr: string, binding: Fetcher): Promise; +} +interface ContainerDirectorySnapshot { + id: string; + size: number; + dir: string; + name?: string; +} +interface ContainerDirectorySnapshotOptions { + dir: string; + name?: string; +} +interface ContainerDirectorySnapshotRestoreParams { + snapshot: ContainerDirectorySnapshot; + mountPoint?: string; +} +interface ContainerSnapshot { + id: string; + size: number; + name?: string; +} +interface ContainerSnapshotOptions { + name?: string; } interface ContainerStartupOptions { entrypoint?: string[]; enableInternet: boolean; env?: Record; + labels?: Record; + directorySnapshots?: ContainerDirectorySnapshotRestoreParams[]; + containerSnapshot?: ContainerSnapshot; } /** - * This Channel Messaging API interface represents one of the two ports of a MessageChannel, allowing messages to be sent from one port and listening out for them arriving at the other. + * The **`MessagePort`** interface of the Channel Messaging API represents one of the two ports of a MessageChannel, allowing messages to be sent from one port and listening out for them arriving at the other. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessagePort) */ declare abstract class MessagePort extends EventTarget { /** - * Posts a message through the channel. Objects listed in transfer are transferred, not just cloned, meaning that they are no longer usable on the sending side. - * - * Throws a "DataCloneError" DOMException if transfer contains duplicate objects or port, or if message could not be cloned. + * The **`postMessage()`** method of the transfers ownership of objects to other browsing contexts. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessagePort/postMessage) */ postMessage(data?: any, options?: (any[] | MessagePortPostMessageOptions)): void; /** - * Disconnects the port, so that it is no longer active. + * The **`close()`** method of the MessagePort interface disconnects the port, so it is no longer active. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessagePort/close) */ close(): void; /** - * Begins dispatching messages received on the port. + * The **`start()`** method of the MessagePort interface starts the sending of messages queued on the port. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessagePort/start) */ @@ -2542,20 +3296,20 @@ declare abstract class MessagePort extends EventTarget { set onmessage(value: any | null); } /** - * This Channel Messaging API interface allows us to create a new message channel and send data through it via its two MessagePort properties. + * The **`MessageChannel`** interface of the Channel Messaging API allows us to create a new message channel and send data through it via its two MessagePort properties. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageChannel) */ declare class MessageChannel { constructor(); /** - * Returns the first MessagePort object. + * The **`port1`** read-only property of the the port attached to the context that originated the channel. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageChannel/port1) */ readonly port1: MessagePort; /** - * Returns the second MessagePort object. + * The **`port2`** read-only property of the the port attached to the context at the other end of the channel, which the message is initially sent to. * * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageChannel/port2) */ @@ -2599,7 +3353,8 @@ interface WorkerStubEntrypointOptions { props?: any; } interface WorkerLoader { - get(name: string, getCode: () => WorkerLoaderWorkerCode | Promise): WorkerStub; + get(name: string | null, getCode: () => WorkerLoaderWorkerCode | Promise): WorkerStub; + load(code: WorkerLoaderWorkerCode): WorkerStub; } interface WorkerLoaderModule { js?: string; @@ -2608,6 +3363,7 @@ interface WorkerLoaderModule { data?: ArrayBuffer; json?: any; py?: string; + wasm?: ArrayBuffer; } interface WorkerLoaderWorkerCode { compatibilityDate: string; @@ -2631,92 +3387,541 @@ declare abstract class Performance { get timeOrigin(): number; /* [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/performance/#performancenow) */ now(): number; + /** + * The **`toJSON()`** method of the Performance interface is a Serialization; it returns a JSON representation of the Performance object. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Performance/toJSON) + */ + toJSON(): object; } -type AiImageClassificationInput = { - image: number[]; -}; -type AiImageClassificationOutput = { - score?: number; - label?: string; -}[]; -declare abstract class BaseAiImageClassification { - inputs: AiImageClassificationInput; - postProcessedOutputs: AiImageClassificationOutput; +// ============ AI Search Error Interfaces ============ +interface AiSearchInternalError extends Error { } -type AiImageToTextInput = { - image: number[]; - prompt?: string; - max_tokens?: number; - temperature?: number; - top_p?: number; - top_k?: number; - seed?: number; - repetition_penalty?: number; - frequency_penalty?: number; - presence_penalty?: number; - raw?: boolean; - messages?: RoleScopedChatInput[]; +interface AiSearchNotFoundError extends Error { +} +// ============ AI Search Request Types ============ +type AiSearchSearchRequest = { + messages: Array<{ + role: 'system' | 'developer' | 'user' | 'assistant' | 'tool'; + content: string | null; + }>; + ai_search_options?: { + retrieval?: { + retrieval_type?: 'vector' | 'keyword' | 'hybrid'; + /** Match threshold (0-1, default 0.4) */ + match_threshold?: number; + /** Maximum number of results (1-50, default 10) */ + max_num_results?: number; + filters?: VectorizeVectorMetadataFilter; + /** Context expansion (0-3, default 0) */ + context_expansion?: number; + [key: string]: unknown; + }; + query_rewrite?: { + enabled?: boolean; + model?: string; + rewrite_prompt?: string; + [key: string]: unknown; + }; + reranking?: { + enabled?: boolean; + model?: '@cf/baai/bge-reranker-base' | string; + /** Match threshold (0-1, default 0.4) */ + match_threshold?: number; + [key: string]: unknown; + }; + [key: string]: unknown; + }; }; -type AiImageToTextOutput = { - description: string; +type AiSearchChatCompletionsRequest = { + messages: Array<{ + role: 'system' | 'developer' | 'user' | 'assistant' | 'tool'; + content: string | null; + [key: string]: unknown; + }>; + model?: string; + stream?: boolean; + ai_search_options?: { + retrieval?: { + retrieval_type?: 'vector' | 'keyword' | 'hybrid'; + match_threshold?: number; + max_num_results?: number; + filters?: VectorizeVectorMetadataFilter; + context_expansion?: number; + [key: string]: unknown; + }; + query_rewrite?: { + enabled?: boolean; + model?: string; + rewrite_prompt?: string; + [key: string]: unknown; + }; + reranking?: { + enabled?: boolean; + model?: '@cf/baai/bge-reranker-base' | string; + match_threshold?: number; + [key: string]: unknown; + }; + [key: string]: unknown; + }; + [key: string]: unknown; }; -declare abstract class BaseAiImageToText { - inputs: AiImageToTextInput; - postProcessedOutputs: AiImageToTextOutput; -} -type AiImageTextToTextInput = { - image: string; - prompt?: string; - max_tokens?: number; - temperature?: number; - ignore_eos?: boolean; - top_p?: number; - top_k?: number; - seed?: number; - repetition_penalty?: number; - frequency_penalty?: number; - presence_penalty?: number; - raw?: boolean; - messages?: RoleScopedChatInput[]; +// ============ AI Search Response Types ============ +type AiSearchSearchResponse = { + search_query: string; + chunks: Array<{ + id: string; + type: string; + /** Match score (0-1) */ + score: number; + text: string; + item: { + timestamp?: number; + key: string; + metadata?: Record; + }; + scoring_details?: { + /** Keyword match score (0-1) */ + keyword_score?: number; + /** Vector similarity score (0-1) */ + vector_score?: number; + [key: string]: unknown; + }; + }>; }; -type AiImageTextToTextOutput = { - description: string; +type AiSearchChatCompletionsResponse = { + id?: string; + object?: string; + model?: string; + choices: Array<{ + index?: number; + message: { + role: 'system' | 'developer' | 'user' | 'assistant' | 'tool'; + content: string | null; + [key: string]: unknown; + }; + [key: string]: unknown; + }>; + chunks: AiSearchSearchResponse['chunks']; + [key: string]: unknown; }; -declare abstract class BaseAiImageTextToText { - inputs: AiImageTextToTextInput; - postProcessedOutputs: AiImageTextToTextOutput; -} -type AiMultimodalEmbeddingsInput = { - image: string; - text: string[]; +type AiSearchStatsResponse = { + queued?: number; + running?: number; + completed?: number; + error?: number; + skipped?: number; + outdated?: number; + last_activity?: string; }; -type AiIMultimodalEmbeddingsOutput = { - data: number[][]; - shape: number[]; +// ============ AI Search Instance Info Types ============ +type AiSearchInstanceInfo = { + id: string; + type?: 'r2' | 'web-crawler' | string; + source?: string; + paused?: boolean; + status?: string; + namespace?: string; + created_at?: string; + modified_at?: string; + [key: string]: unknown; }; -declare abstract class BaseAiMultimodalEmbeddings { - inputs: AiImageTextToTextInput; - postProcessedOutputs: AiImageTextToTextOutput; -} -type AiObjectDetectionInput = { - image: number[]; +type AiSearchListResponse = { + result: AiSearchInstanceInfo[]; + result_info?: { + count: number; + page: number; + per_page: number; + total_count: number; + }; }; -type AiObjectDetectionOutput = { - score?: number; - label?: string; -}[]; -declare abstract class BaseAiObjectDetection { - inputs: AiObjectDetectionInput; - postProcessedOutputs: AiObjectDetectionOutput; -} -type AiSentenceSimilarityInput = { - source: string; - sentences: string[]; +// ============ AI Search Config Types ============ +type AiSearchConfig = { + /** Instance ID (1-32 chars, pattern: ^[a-z0-9_]+(?:-[a-z0-9_]+)*$) */ + id: string; + /** Instance type. Omit to create with built-in storage. */ + type?: 'r2' | 'web-crawler' | string; + /** Source URL (required for web-crawler type). */ + source?: string; + source_params?: unknown; + /** Token ID (UUID format) */ + token_id?: string; + ai_gateway_id?: string; + /** Enable query rewriting (default false) */ + rewrite_query?: boolean; + /** Enable reranking (default false) */ + reranking?: boolean; + embedding_model?: string; + ai_search_model?: string; + [key: string]: unknown; }; -type AiSentenceSimilarityOutput = number[]; -declare abstract class BaseAiSentenceSimilarity { - inputs: AiSentenceSimilarityInput; - postProcessedOutputs: AiSentenceSimilarityOutput; +// ============ AI Search Item Types ============ +type AiSearchItemInfo = { + id: string; + key: string; + status: 'completed' | 'error' | 'skipped' | 'queued' | 'processing' | 'outdated'; + metadata?: Record; + [key: string]: unknown; +}; +type AiSearchItemContentResult = { + body: ReadableStream; + contentType: string; + filename: string; + size: number; +}; +type AiSearchUploadItemOptions = { + metadata?: Record; +}; +type AiSearchListItemsParams = { + page?: number; + per_page?: number; +}; +type AiSearchListItemsResponse = { + result: AiSearchItemInfo[]; + result_info?: { + count: number; + page: number; + per_page: number; + total_count: number; + }; +}; +// ============ AI Search Job Types ============ +type AiSearchJobInfo = { + id: string; + source: 'user' | 'schedule'; + description?: string; + last_seen_at?: string; + started_at?: string; + ended_at?: string; + end_reason?: string; +}; +type AiSearchJobLog = { + id: number; + message: string; + message_type: number; + created_at: number; +}; +type AiSearchCreateJobParams = { + description?: string; +}; +type AiSearchListJobsParams = { + page?: number; + per_page?: number; +}; +type AiSearchListJobsResponse = { + result: AiSearchJobInfo[]; + result_info?: { + count: number; + page: number; + per_page: number; + total_count: number; + }; +}; +type AiSearchJobLogsParams = { + page?: number; + per_page?: number; +}; +type AiSearchJobLogsResponse = { + result: AiSearchJobLog[]; + result_info?: { + count: number; + page: number; + per_page: number; + total_count: number; + }; +}; +// ============ AI Search Sub-Service Classes ============ +/** + * Single item service for an AI Search instance. + * Provides info, delete, and download operations on a specific item. + */ +declare abstract class AiSearchItem { + /** Get metadata about this item. */ + info(): Promise; + /** + * Download the item's content. + * @returns Object with body stream, content type, filename, and size. + */ + download(): Promise; +} +/** + * Items collection service for an AI Search instance. + * Provides list, upload, and access to individual items. + */ +declare abstract class AiSearchItems { + /** List items in this instance. */ + list(params?: AiSearchListItemsParams): Promise; + /** + * Upload a file as an item. + * @param name Filename for the uploaded item. + * @param content File content as a ReadableStream, ArrayBuffer, or string. + * @param options Optional metadata to attach to the item. + * @returns The created item info. + */ + upload(name: string, content: ReadableStream | ArrayBuffer | string, options?: AiSearchUploadItemOptions): Promise; + /** + * Upload a file and poll until processing completes. + * @param name Filename for the uploaded item. + * @param content File content as a ReadableStream, ArrayBuffer, or string. + * @param options Optional metadata to attach to the item. + * @returns The item info after processing completes (or timeout). + */ + uploadAndPoll(name: string, content: ReadableStream | ArrayBuffer | string, options?: AiSearchUploadItemOptions): Promise; + /** + * Get an item by ID. + * @param itemId The item identifier. + * @returns Item service for info, delete, and download operations. + */ + get(itemId: string): AiSearchItem; + /** Delete this item from the instance. + * @param itemId The item identifier. + */ + delete(itemId: string): Promise; +} +/** + * Single job service for an AI Search instance. + * Provides info and logs for a specific job. + */ +declare abstract class AiSearchJob { + /** Get metadata about this job. */ + info(): Promise; + /** Get logs for this job. */ + logs(params?: AiSearchJobLogsParams): Promise; +} +/** + * Jobs collection service for an AI Search instance. + * Provides list, create, and access to individual jobs. + */ +declare abstract class AiSearchJobs { + /** List jobs for this instance. */ + list(params?: AiSearchListJobsParams): Promise; + /** + * Create a new indexing job. + * @param params Optional job parameters. + * @returns The created job info. + */ + create(params?: AiSearchCreateJobParams): Promise; + /** + * Get a job by ID. + * @param jobId The job identifier. + * @returns Job service for info and logs operations. + */ + get(jobId: string): AiSearchJob; +} +// ============ AI Search Binding Classes ============ +/** + * Instance-level AI Search service. + * + * Used as: + * - The return type of `AiSearchNamespace.get(name)` (namespace binding) + * - The type of `env.BLOG_SEARCH` (single instance binding via `ai_search`) + * + * Provides search, chat, update, stats, items, and jobs operations. + * + * @example + * ```ts + * // Via namespace binding + * const instance = env.AI_SEARCH.get("blog"); + * const results = await instance.search({ + * messages: [{ role: "user", content: "How does caching work?" }], + * }); + * + * // Via single instance binding + * const results = await env.BLOG_SEARCH.search({ + * messages: [{ role: "user", content: "How does caching work?" }], + * }); + * ``` + */ +declare abstract class AiSearchInstance { + /** + * Search the AI Search instance for relevant chunks. + * @param params Search request with messages and optional AI search options. + * @returns Search response with matching chunks and search query. + */ + search(params: AiSearchSearchRequest): Promise; + /** + * Generate chat completions with AI Search context (streaming). + * @param params Chat completions request with stream: true. + * @returns ReadableStream of server-sent events. + */ + chatCompletions(params: AiSearchChatCompletionsRequest & { + stream: true; + }): Promise; + /** + * Generate chat completions with AI Search context. + * @param params Chat completions request. + * @returns Chat completion response with choices and RAG chunks. + */ + chatCompletions(params: AiSearchChatCompletionsRequest): Promise; + /** + * Update the instance configuration. + * @param config Partial configuration to update. + * @returns Updated instance info. + */ + update(config: Partial): Promise; + /** Get metadata about this instance. */ + info(): Promise; + /** + * Get instance statistics (item count, indexing status, etc.). + * @returns Statistics with counts per status and last activity time. + */ + stats(): Promise; + /** Items collection — list, upload, and manage items in this instance. */ + get items(): AiSearchItems; + /** Jobs collection — list, create, and inspect indexing jobs. */ + get jobs(): AiSearchJobs; +} +/** + * Namespace-level AI Search service. + * + * Used as the type of `env.AI_SEARCH` (namespace binding via `ai_search_namespaces`). + * Scoped to a single namespace. Provides dynamic instance access, creation, and deletion. + * + * @example + * ```ts + * // Access an instance within the namespace + * const blog = env.AI_SEARCH.get("blog"); + * const results = await blog.search({ + * messages: [{ role: "user", content: "How does caching work?" }], + * }); + * + * // List all instances in the namespace + * const instances = await env.AI_SEARCH.list(); + * + * // Create a new instance with built-in storage + * const tenant = await env.AI_SEARCH.create({ + * id: "tenant-123", + * }); + * + * // Upload items into the instance + * await tenant.items.upload("doc.pdf", fileContent); + * + * // Delete an instance + * await env.AI_SEARCH.delete("tenant-123"); + * ``` + */ +declare abstract class AiSearchNamespace { + /** + * Get an instance by name within the bound namespace. + * @param name Instance name. + * @returns Instance service for search, chat, update, stats, items, and jobs. + */ + get(name: string): AiSearchInstance; + /** + * List all instances in the bound namespace. + * @returns Array of instance metadata. + */ + list(): Promise; + /** + * Create a new instance within the bound namespace. + * @param config Instance configuration. Only `id` is required — omit `type` and `source` to create with built-in storage. + * @returns Instance service for the newly created instance. + * + * @example + * ```ts + * // Create with built-in storage (upload items manually) + * const instance = await env.AI_SEARCH.create({ id: "my-search" }); + * + * // Create with web crawler source + * const instance = await env.AI_SEARCH.create({ + * id: "docs-search", + * type: "web-crawler", + * source: "https://developers.cloudflare.com", + * }); + * ``` + */ + create(config: AiSearchConfig): Promise; + /** + * Delete an instance from the bound namespace. + * @param name Instance name to delete. + */ + delete(name: string): Promise; +} +type AiImageClassificationInput = { + image: number[]; +}; +type AiImageClassificationOutput = { + score?: number; + label?: string; +}[]; +declare abstract class BaseAiImageClassification { + inputs: AiImageClassificationInput; + postProcessedOutputs: AiImageClassificationOutput; +} +type AiImageToTextInput = { + image: number[]; + prompt?: string; + max_tokens?: number; + temperature?: number; + top_p?: number; + top_k?: number; + seed?: number; + repetition_penalty?: number; + frequency_penalty?: number; + presence_penalty?: number; + raw?: boolean; + messages?: RoleScopedChatInput[]; +}; +type AiImageToTextOutput = { + description: string; +}; +declare abstract class BaseAiImageToText { + inputs: AiImageToTextInput; + postProcessedOutputs: AiImageToTextOutput; +} +type AiImageTextToTextInput = { + image: string; + prompt?: string; + max_tokens?: number; + temperature?: number; + ignore_eos?: boolean; + top_p?: number; + top_k?: number; + seed?: number; + repetition_penalty?: number; + frequency_penalty?: number; + presence_penalty?: number; + raw?: boolean; + messages?: RoleScopedChatInput[]; +}; +type AiImageTextToTextOutput = { + description: string; +}; +declare abstract class BaseAiImageTextToText { + inputs: AiImageTextToTextInput; + postProcessedOutputs: AiImageTextToTextOutput; +} +type AiMultimodalEmbeddingsInput = { + image: string; + text: string[]; +}; +type AiIMultimodalEmbeddingsOutput = { + data: number[][]; + shape: number[]; +}; +declare abstract class BaseAiMultimodalEmbeddings { + inputs: AiImageTextToTextInput; + postProcessedOutputs: AiImageTextToTextOutput; +} +type AiObjectDetectionInput = { + image: number[]; +}; +type AiObjectDetectionOutput = { + score?: number; + label?: string; +}[]; +declare abstract class BaseAiObjectDetection { + inputs: AiObjectDetectionInput; + postProcessedOutputs: AiObjectDetectionOutput; +} +type AiSentenceSimilarityInput = { + source: string; + sentences: string[]; +}; +type AiSentenceSimilarityOutput = number[]; +declare abstract class BaseAiSentenceSimilarity { + inputs: AiSentenceSimilarityInput; + postProcessedOutputs: AiSentenceSimilarityOutput; } type AiAutomaticSpeechRecognitionInput = { audio: number[]; @@ -2895,193 +4100,2071 @@ declare abstract class BaseAiTranslation { inputs: AiTranslationInput; postProcessedOutputs: AiTranslationOutput; } -type Ai_Cf_Baai_Bge_Base_En_V1_5_Input = { - text: string | string[]; - /** - * The pooling method used in the embedding process. `cls` pooling will generate more accurate embeddings on larger inputs - however, embeddings created with cls pooling are not compatible with embeddings generated with mean pooling. The default pooling method is `mean` in order for this to not be a breaking change, but we highly suggest using the new `cls` pooling for better accuracy. - */ - pooling?: "mean" | "cls"; -} | { - /** - * Batch of the embeddings requests to run using async-queue - */ - requests: { - text: string | string[]; - /** - * The pooling method used in the embedding process. `cls` pooling will generate more accurate embeddings on larger inputs - however, embeddings created with cls pooling are not compatible with embeddings generated with mean pooling. The default pooling method is `mean` in order for this to not be a breaking change, but we highly suggest using the new `cls` pooling for better accuracy. - */ - pooling?: "mean" | "cls"; - }[]; +/** + * Workers AI support for OpenAI's Chat Completions API + */ +type ChatCompletionContentPartText = { + type: "text"; + text: string; }; -type Ai_Cf_Baai_Bge_Base_En_V1_5_Output = { - shape?: number[]; - /** - * Embeddings of the requested text values - */ - data?: number[][]; - /** - * The pooling method used in the embedding process. - */ - pooling?: "mean" | "cls"; -} | AsyncResponse; -interface AsyncResponse { - /** - * The async request id that can be used to obtain the results. - */ - request_id?: string; -} -declare abstract class Base_Ai_Cf_Baai_Bge_Base_En_V1_5 { - inputs: Ai_Cf_Baai_Bge_Base_En_V1_5_Input; - postProcessedOutputs: Ai_Cf_Baai_Bge_Base_En_V1_5_Output; -} -type Ai_Cf_Openai_Whisper_Input = string | { - /** - * An array of integers that represent the audio data constrained to 8-bit unsigned integer values - */ - audio: number[]; +type ChatCompletionContentPartImage = { + type: "image_url"; + image_url: { + url: string; + detail?: "auto" | "low" | "high"; + }; }; -interface Ai_Cf_Openai_Whisper_Output { - /** - * The transcription - */ - text: string; - word_count?: number; - words?: { - word?: string; - /** - * The second this word begins in the recording - */ - start?: number; - /** - * The ending second when the word completes - */ - end?: number; - }[]; - vtt?: string; -} -declare abstract class Base_Ai_Cf_Openai_Whisper { - inputs: Ai_Cf_Openai_Whisper_Input; - postProcessedOutputs: Ai_Cf_Openai_Whisper_Output; -} -type Ai_Cf_Meta_M2M100_1_2B_Input = { - /** - * The text to be translated - */ - text: string; - /** - * The language code of the source text (e.g., 'en' for English). Defaults to 'en' if not specified - */ - source_lang?: string; - /** - * The language code to translate the text into (e.g., 'es' for Spanish) - */ - target_lang: string; +type ChatCompletionContentPartInputAudio = { + type: "input_audio"; + input_audio: { + /** Base64 encoded audio data. */ + data: string; + format: "wav" | "mp3"; + }; +}; +type ChatCompletionContentPartFile = { + type: "file"; + file: { + /** Base64 encoded file data. */ + file_data?: string; + /** The ID of an uploaded file. */ + file_id?: string; + filename?: string; + }; +}; +type ChatCompletionContentPartRefusal = { + type: "refusal"; + refusal: string; +}; +type ChatCompletionContentPart = ChatCompletionContentPartText | ChatCompletionContentPartImage | ChatCompletionContentPartInputAudio | ChatCompletionContentPartFile; +type FunctionDefinition = { + name: string; + description?: string; + parameters?: Record; + strict?: boolean | null; +}; +type ChatCompletionFunctionTool = { + type: "function"; + function: FunctionDefinition; +}; +type ChatCompletionCustomToolGrammarFormat = { + type: "grammar"; + grammar: { + definition: string; + syntax: "lark" | "regex"; + }; +}; +type ChatCompletionCustomToolTextFormat = { + type: "text"; +}; +type ChatCompletionCustomToolFormat = ChatCompletionCustomToolTextFormat | ChatCompletionCustomToolGrammarFormat; +type ChatCompletionCustomTool = { + type: "custom"; + custom: { + name: string; + description?: string; + format?: ChatCompletionCustomToolFormat; + }; +}; +type ChatCompletionTool = ChatCompletionFunctionTool | ChatCompletionCustomTool; +type ChatCompletionMessageFunctionToolCall = { + id: string; + type: "function"; + function: { + name: string; + /** JSON-encoded arguments string. */ + arguments: string; + }; +}; +type ChatCompletionMessageCustomToolCall = { + id: string; + type: "custom"; + custom: { + name: string; + input: string; + }; +}; +type ChatCompletionMessageToolCall = ChatCompletionMessageFunctionToolCall | ChatCompletionMessageCustomToolCall; +type ChatCompletionToolChoiceFunction = { + type: "function"; + function: { + name: string; + }; +}; +type ChatCompletionToolChoiceCustom = { + type: "custom"; + custom: { + name: string; + }; +}; +type ChatCompletionToolChoiceAllowedTools = { + type: "allowed_tools"; + allowed_tools: { + mode: "auto" | "required"; + tools: Array>; + }; +}; +type ChatCompletionToolChoiceOption = "none" | "auto" | "required" | ChatCompletionToolChoiceFunction | ChatCompletionToolChoiceCustom | ChatCompletionToolChoiceAllowedTools; +type DeveloperMessage = { + role: "developer"; + content: string | Array<{ + type: "text"; + text: string; + }>; + name?: string; +}; +type SystemMessage = { + role: "system"; + content: string | Array<{ + type: "text"; + text: string; + }>; + name?: string; +}; +/** + * Permissive merged content part used inside UserMessage arrays. + * + * Cabidela has a limitation where anyOf/oneOf with enum-based discrimination + * inside nested array items does not correctly match different branches for + * different array elements, so the schema uses a single merged object. + */ +type UserMessageContentPart = { + type: "text" | "image_url" | "input_audio" | "file"; + text?: string; + image_url?: { + url?: string; + detail?: "auto" | "low" | "high"; + }; + input_audio?: { + data?: string; + format?: "wav" | "mp3"; + }; + file?: { + file_data?: string; + file_id?: string; + filename?: string; + }; +}; +type UserMessage = { + role: "user"; + content: string | Array; + name?: string; +}; +type AssistantMessageContentPart = { + type: "text" | "refusal"; + text?: string; + refusal?: string; +}; +type AssistantMessage = { + role: "assistant"; + content?: string | null | Array; + refusal?: string | null; + name?: string; + audio?: { + id: string; + }; + tool_calls?: Array; + function_call?: { + name: string; + arguments: string; + }; +}; +type ToolMessage = { + role: "tool"; + content: string | Array<{ + type: "text"; + text: string; + }>; + tool_call_id: string; +}; +type FunctionMessage = { + role: "function"; + content: string; + name: string; +}; +type ChatCompletionMessageParam = DeveloperMessage | SystemMessage | UserMessage | AssistantMessage | ToolMessage | FunctionMessage; +type ChatCompletionsResponseFormatText = { + type: "text"; +}; +type ChatCompletionsResponseFormatJSONObject = { + type: "json_object"; +}; +type ResponseFormatJSONSchema = { + type: "json_schema"; + json_schema: { + name: string; + description?: string; + schema?: Record; + strict?: boolean | null; + }; +}; +type ResponseFormat = ChatCompletionsResponseFormatText | ChatCompletionsResponseFormatJSONObject | ResponseFormatJSONSchema; +type ChatCompletionsStreamOptions = { + include_usage?: boolean; + include_obfuscation?: boolean; +}; +type PredictionContent = { + type: "content"; + content: string | Array<{ + type: "text"; + text: string; + }>; +}; +type AudioParams = { + voice: string | { + id: string; + }; + format: "wav" | "aac" | "mp3" | "flac" | "opus" | "pcm16"; +}; +type WebSearchUserLocation = { + type: "approximate"; + approximate: { + city?: string; + country?: string; + region?: string; + timezone?: string; + }; +}; +type WebSearchOptions = { + search_context_size?: "low" | "medium" | "high"; + user_location?: WebSearchUserLocation; +}; +type ChatTemplateKwargs = { + /** Whether to enable reasoning, enabled by default. */ + enable_thinking?: boolean; + /** If false, preserves reasoning context between turns. */ + clear_thinking?: boolean; +}; +/** Shared optional properties used by both Prompt and Messages input branches. */ +type ChatCompletionsCommonOptions = { + model?: string; + audio?: AudioParams; + frequency_penalty?: number | null; + logit_bias?: Record | null; + logprobs?: boolean | null; + top_logprobs?: number | null; + max_tokens?: number | null; + max_completion_tokens?: number | null; + metadata?: Record | null; + modalities?: Array<"text" | "audio"> | null; + n?: number | null; + parallel_tool_calls?: boolean; + prediction?: PredictionContent; + presence_penalty?: number | null; + reasoning_effort?: "low" | "medium" | "high" | null; + chat_template_kwargs?: ChatTemplateKwargs; + response_format?: ResponseFormat; + seed?: number | null; + service_tier?: "auto" | "default" | "flex" | "scale" | "priority" | null; + stop?: string | Array | null; + store?: boolean | null; + stream?: boolean | null; + stream_options?: ChatCompletionsStreamOptions; + temperature?: number | null; + tool_choice?: ChatCompletionToolChoiceOption; + tools?: Array; + top_p?: number | null; + user?: string; + web_search_options?: WebSearchOptions; + function_call?: "none" | "auto" | { + name: string; + }; + functions?: Array; +}; +type PromptTokensDetails = { + cached_tokens?: number; + audio_tokens?: number; +}; +type CompletionTokensDetails = { + reasoning_tokens?: number; + audio_tokens?: number; + accepted_prediction_tokens?: number; + rejected_prediction_tokens?: number; +}; +type CompletionUsage = { + prompt_tokens: number; + completion_tokens: number; + total_tokens: number; + prompt_tokens_details?: PromptTokensDetails; + completion_tokens_details?: CompletionTokensDetails; +}; +type ChatCompletionTopLogprob = { + token: string; + logprob: number; + bytes: Array | null; +}; +type ChatCompletionTokenLogprob = { + token: string; + logprob: number; + bytes: Array | null; + top_logprobs: Array; +}; +type ChatCompletionAudio = { + id: string; + /** Base64 encoded audio bytes. */ + data: string; + expires_at: number; + transcript: string; +}; +type ChatCompletionUrlCitation = { + type: "url_citation"; + url_citation: { + url: string; + title: string; + start_index: number; + end_index: number; + }; +}; +type ChatCompletionResponseMessage = { + role: "assistant"; + content: string | null; + refusal: string | null; + annotations?: Array; + audio?: ChatCompletionAudio; + tool_calls?: Array; + function_call?: { + name: string; + arguments: string; + } | null; +}; +type ChatCompletionLogprobs = { + content: Array | null; + refusal?: Array | null; +}; +type ChatCompletionChoice = { + index: number; + message: ChatCompletionResponseMessage; + finish_reason: "stop" | "length" | "tool_calls" | "content_filter" | "function_call"; + logprobs: ChatCompletionLogprobs | null; +}; +type ChatCompletionsPromptInput = { + prompt: string; +} & ChatCompletionsCommonOptions; +type ChatCompletionsMessagesInput = { + messages: Array; +} & ChatCompletionsCommonOptions; +type ChatCompletionsOutput = { + id: string; + object: string; + created: number; + model: string; + choices: Array; + usage?: CompletionUsage; + system_fingerprint?: string | null; + service_tier?: "auto" | "default" | "flex" | "scale" | "priority" | null; +}; +/** + * Workers AI support for OpenAI's Responses API + * Reference: https://github.com/openai/openai-node/blob/master/src/resources/responses/responses.ts + * + * It's a stripped down version from its source. + * It currently supports basic function calling, json mode and accepts images as input. + * + * It does not include types for WebSearch, CodeInterpreter, FileInputs, MCP, CustomTools. + * We plan to add those incrementally as model + platform capabilities evolve. + */ +type ResponsesInput = { + background?: boolean | null; + conversation?: string | ResponseConversationParam | null; + include?: Array | null; + input?: string | ResponseInput; + instructions?: string | null; + max_output_tokens?: number | null; + parallel_tool_calls?: boolean | null; + previous_response_id?: string | null; + prompt_cache_key?: string; + reasoning?: Reasoning | null; + safety_identifier?: string; + service_tier?: "auto" | "default" | "flex" | "scale" | "priority" | null; + stream?: boolean | null; + stream_options?: StreamOptions | null; + temperature?: number | null; + text?: ResponseTextConfig; + tool_choice?: ToolChoiceOptions | ToolChoiceFunction; + tools?: Array; + top_p?: number | null; + truncation?: "auto" | "disabled" | null; +}; +type ResponsesOutput = { + id?: string; + created_at?: number; + output_text?: string; + error?: ResponseError | null; + incomplete_details?: ResponseIncompleteDetails | null; + instructions?: string | Array | null; + object?: "response"; + output?: Array; + parallel_tool_calls?: boolean; + temperature?: number | null; + tool_choice?: ToolChoiceOptions | ToolChoiceFunction; + tools?: Array; + top_p?: number | null; + max_output_tokens?: number | null; + previous_response_id?: string | null; + prompt?: ResponsePrompt | null; + reasoning?: Reasoning | null; + safety_identifier?: string; + service_tier?: "auto" | "default" | "flex" | "scale" | "priority" | null; + status?: ResponseStatus; + text?: ResponseTextConfig; + truncation?: "auto" | "disabled" | null; + usage?: ResponseUsage; +}; +type EasyInputMessage = { + content: string | ResponseInputMessageContentList; + role: "user" | "assistant" | "system" | "developer"; + type?: "message"; +}; +type ResponsesFunctionTool = { + name: string; + parameters: { + [key: string]: unknown; + } | null; + strict: boolean | null; + type: "function"; + description?: string | null; +}; +type ResponseIncompleteDetails = { + reason?: "max_output_tokens" | "content_filter"; +}; +type ResponsePrompt = { + id: string; + variables?: { + [key: string]: string | ResponseInputText | ResponseInputImage; + } | null; + version?: string | null; +}; +type Reasoning = { + effort?: ReasoningEffort | null; + generate_summary?: "auto" | "concise" | "detailed" | null; + summary?: "auto" | "concise" | "detailed" | null; +}; +type ResponseContent = ResponseInputText | ResponseInputImage | ResponseOutputText | ResponseOutputRefusal | ResponseContentReasoningText; +type ResponseContentReasoningText = { + text: string; + type: "reasoning_text"; +}; +type ResponseConversationParam = { + id: string; +}; +type ResponseCreatedEvent = { + response: Response; + sequence_number: number; + type: "response.created"; +}; +type ResponseCustomToolCallOutput = { + call_id: string; + output: string | Array; + type: "custom_tool_call_output"; + id?: string; +}; +type ResponseError = { + code: "server_error" | "rate_limit_exceeded" | "invalid_prompt" | "vector_store_timeout" | "invalid_image" | "invalid_image_format" | "invalid_base64_image" | "invalid_image_url" | "image_too_large" | "image_too_small" | "image_parse_error" | "image_content_policy_violation" | "invalid_image_mode" | "image_file_too_large" | "unsupported_image_media_type" | "empty_image_file" | "failed_to_download_image" | "image_file_not_found"; + message: string; +}; +type ResponseErrorEvent = { + code: string | null; + message: string; + param: string | null; + sequence_number: number; + type: "error"; +}; +type ResponseFailedEvent = { + response: Response; + sequence_number: number; + type: "response.failed"; +}; +type ResponseFormatText = { + type: "text"; +}; +type ResponseFormatJSONObject = { + type: "json_object"; +}; +type ResponseFormatTextConfig = ResponseFormatText | ResponseFormatTextJSONSchemaConfig | ResponseFormatJSONObject; +type ResponseFormatTextJSONSchemaConfig = { + name: string; + schema: { + [key: string]: unknown; + }; + type: "json_schema"; + description?: string; + strict?: boolean | null; +}; +type ResponseFunctionCallArgumentsDeltaEvent = { + delta: string; + item_id: string; + output_index: number; + sequence_number: number; + type: "response.function_call_arguments.delta"; +}; +type ResponseFunctionCallArgumentsDoneEvent = { + arguments: string; + item_id: string; + name: string; + output_index: number; + sequence_number: number; + type: "response.function_call_arguments.done"; +}; +type ResponseFunctionCallOutputItem = ResponseInputTextContent | ResponseInputImageContent; +type ResponseFunctionCallOutputItemList = Array; +type ResponseFunctionToolCall = { + arguments: string; + call_id: string; + name: string; + type: "function_call"; + id?: string; + status?: "in_progress" | "completed" | "incomplete"; +}; +interface ResponseFunctionToolCallItem extends ResponseFunctionToolCall { + id: string; +} +type ResponseFunctionToolCallOutputItem = { + id: string; + call_id: string; + output: string | Array; + type: "function_call_output"; + status?: "in_progress" | "completed" | "incomplete"; +}; +type ResponseIncludable = "message.input_image.image_url" | "message.output_text.logprobs"; +type ResponseIncompleteEvent = { + response: Response; + sequence_number: number; + type: "response.incomplete"; +}; +type ResponseInput = Array; +type ResponseInputContent = ResponseInputText | ResponseInputImage; +type ResponseInputImage = { + detail: "low" | "high" | "auto"; + type: "input_image"; + /** + * Base64 encoded image + */ + image_url?: string | null; +}; +type ResponseInputImageContent = { + type: "input_image"; + detail?: "low" | "high" | "auto" | null; + /** + * Base64 encoded image + */ + image_url?: string | null; +}; +type ResponseInputItem = EasyInputMessage | ResponseInputItemMessage | ResponseOutputMessage | ResponseFunctionToolCall | ResponseInputItemFunctionCallOutput | ResponseReasoningItem; +type ResponseInputItemFunctionCallOutput = { + call_id: string; + output: string | ResponseFunctionCallOutputItemList; + type: "function_call_output"; + id?: string | null; + status?: "in_progress" | "completed" | "incomplete" | null; +}; +type ResponseInputItemMessage = { + content: ResponseInputMessageContentList; + role: "user" | "system" | "developer"; + status?: "in_progress" | "completed" | "incomplete"; + type?: "message"; +}; +type ResponseInputMessageContentList = Array; +type ResponseInputMessageItem = { + id: string; + content: ResponseInputMessageContentList; + role: "user" | "system" | "developer"; + status?: "in_progress" | "completed" | "incomplete"; + type?: "message"; +}; +type ResponseInputText = { + text: string; + type: "input_text"; +}; +type ResponseInputTextContent = { + text: string; + type: "input_text"; +}; +type ResponseItem = ResponseInputMessageItem | ResponseOutputMessage | ResponseFunctionToolCallItem | ResponseFunctionToolCallOutputItem; +type ResponseOutputItem = ResponseOutputMessage | ResponseFunctionToolCall | ResponseReasoningItem; +type ResponseOutputItemAddedEvent = { + item: ResponseOutputItem; + output_index: number; + sequence_number: number; + type: "response.output_item.added"; +}; +type ResponseOutputItemDoneEvent = { + item: ResponseOutputItem; + output_index: number; + sequence_number: number; + type: "response.output_item.done"; +}; +type ResponseOutputMessage = { + id: string; + content: Array; + role: "assistant"; + status: "in_progress" | "completed" | "incomplete"; + type: "message"; +}; +type ResponseOutputRefusal = { + refusal: string; + type: "refusal"; +}; +type ResponseOutputText = { + text: string; + type: "output_text"; + logprobs?: Array; +}; +type ResponseReasoningItem = { + id: string; + summary: Array; + type: "reasoning"; + content?: Array; + encrypted_content?: string | null; + status?: "in_progress" | "completed" | "incomplete"; +}; +type ResponseReasoningSummaryItem = { + text: string; + type: "summary_text"; +}; +type ResponseReasoningContentItem = { + text: string; + type: "reasoning_text"; +}; +type ResponseReasoningTextDeltaEvent = { + content_index: number; + delta: string; + item_id: string; + output_index: number; + sequence_number: number; + type: "response.reasoning_text.delta"; +}; +type ResponseReasoningTextDoneEvent = { + content_index: number; + item_id: string; + output_index: number; + sequence_number: number; + text: string; + type: "response.reasoning_text.done"; +}; +type ResponseRefusalDeltaEvent = { + content_index: number; + delta: string; + item_id: string; + output_index: number; + sequence_number: number; + type: "response.refusal.delta"; +}; +type ResponseRefusalDoneEvent = { + content_index: number; + item_id: string; + output_index: number; + refusal: string; + sequence_number: number; + type: "response.refusal.done"; +}; +type ResponseStatus = "completed" | "failed" | "in_progress" | "cancelled" | "queued" | "incomplete"; +type ResponseStreamEvent = ResponseCompletedEvent | ResponseCreatedEvent | ResponseErrorEvent | ResponseFunctionCallArgumentsDeltaEvent | ResponseFunctionCallArgumentsDoneEvent | ResponseFailedEvent | ResponseIncompleteEvent | ResponseOutputItemAddedEvent | ResponseOutputItemDoneEvent | ResponseReasoningTextDeltaEvent | ResponseReasoningTextDoneEvent | ResponseRefusalDeltaEvent | ResponseRefusalDoneEvent | ResponseTextDeltaEvent | ResponseTextDoneEvent; +type ResponseCompletedEvent = { + response: Response; + sequence_number: number; + type: "response.completed"; +}; +type ResponseTextConfig = { + format?: ResponseFormatTextConfig; + verbosity?: "low" | "medium" | "high" | null; +}; +type ResponseTextDeltaEvent = { + content_index: number; + delta: string; + item_id: string; + logprobs: Array; + output_index: number; + sequence_number: number; + type: "response.output_text.delta"; +}; +type ResponseTextDoneEvent = { + content_index: number; + item_id: string; + logprobs: Array; + output_index: number; + sequence_number: number; + text: string; + type: "response.output_text.done"; +}; +type Logprob = { + token: string; + logprob: number; + top_logprobs?: Array; +}; +type TopLogprob = { + token?: string; + logprob?: number; +}; +type ResponseUsage = { + input_tokens: number; + output_tokens: number; + total_tokens: number; +}; +type Tool = ResponsesFunctionTool; +type ToolChoiceFunction = { + name: string; + type: "function"; +}; +type ToolChoiceOptions = "none"; +type ReasoningEffort = "minimal" | "low" | "medium" | "high" | null; +type StreamOptions = { + include_obfuscation?: boolean; +}; +/** Marks keys from T that aren't in U as optional never */ +type Without = { + [P in Exclude]?: never; +}; +/** Either T or U, but not both (mutually exclusive) */ +type XOR = (T & Without) | (U & Without); +type Ai_Cf_Baai_Bge_Base_En_V1_5_Input = { + text: string | string[]; + /** + * The pooling method used in the embedding process. `cls` pooling will generate more accurate embeddings on larger inputs - however, embeddings created with cls pooling are not compatible with embeddings generated with mean pooling. The default pooling method is `mean` in order for this to not be a breaking change, but we highly suggest using the new `cls` pooling for better accuracy. + */ + pooling?: "mean" | "cls"; } | { /** - * Batch of the embeddings requests to run using async-queue + * Batch of the embeddings requests to run using async-queue + */ + requests: { + text: string | string[]; + /** + * The pooling method used in the embedding process. `cls` pooling will generate more accurate embeddings on larger inputs - however, embeddings created with cls pooling are not compatible with embeddings generated with mean pooling. The default pooling method is `mean` in order for this to not be a breaking change, but we highly suggest using the new `cls` pooling for better accuracy. + */ + pooling?: "mean" | "cls"; + }[]; +}; +type Ai_Cf_Baai_Bge_Base_En_V1_5_Output = { + shape?: number[]; + /** + * Embeddings of the requested text values + */ + data?: number[][]; + /** + * The pooling method used in the embedding process. + */ + pooling?: "mean" | "cls"; +} | Ai_Cf_Baai_Bge_Base_En_V1_5_AsyncResponse; +interface Ai_Cf_Baai_Bge_Base_En_V1_5_AsyncResponse { + /** + * The async request id that can be used to obtain the results. + */ + request_id?: string; +} +declare abstract class Base_Ai_Cf_Baai_Bge_Base_En_V1_5 { + inputs: Ai_Cf_Baai_Bge_Base_En_V1_5_Input; + postProcessedOutputs: Ai_Cf_Baai_Bge_Base_En_V1_5_Output; +} +type Ai_Cf_Openai_Whisper_Input = string | { + /** + * An array of integers that represent the audio data constrained to 8-bit unsigned integer values + */ + audio: number[]; +}; +interface Ai_Cf_Openai_Whisper_Output { + /** + * The transcription + */ + text: string; + word_count?: number; + words?: { + word?: string; + /** + * The second this word begins in the recording + */ + start?: number; + /** + * The ending second when the word completes + */ + end?: number; + }[]; + vtt?: string; +} +declare abstract class Base_Ai_Cf_Openai_Whisper { + inputs: Ai_Cf_Openai_Whisper_Input; + postProcessedOutputs: Ai_Cf_Openai_Whisper_Output; +} +type Ai_Cf_Meta_M2M100_1_2B_Input = { + /** + * The text to be translated + */ + text: string; + /** + * The language code of the source text (e.g., 'en' for English). Defaults to 'en' if not specified + */ + source_lang?: string; + /** + * The language code to translate the text into (e.g., 'es' for Spanish) + */ + target_lang: string; +} | { + /** + * Batch of the embeddings requests to run using async-queue + */ + requests: { + /** + * The text to be translated + */ + text: string; + /** + * The language code of the source text (e.g., 'en' for English). Defaults to 'en' if not specified + */ + source_lang?: string; + /** + * The language code to translate the text into (e.g., 'es' for Spanish) + */ + target_lang: string; + }[]; +}; +type Ai_Cf_Meta_M2M100_1_2B_Output = { + /** + * The translated text in the target language + */ + translated_text?: string; +} | Ai_Cf_Meta_M2M100_1_2B_AsyncResponse; +interface Ai_Cf_Meta_M2M100_1_2B_AsyncResponse { + /** + * The async request id that can be used to obtain the results. + */ + request_id?: string; +} +declare abstract class Base_Ai_Cf_Meta_M2M100_1_2B { + inputs: Ai_Cf_Meta_M2M100_1_2B_Input; + postProcessedOutputs: Ai_Cf_Meta_M2M100_1_2B_Output; +} +type Ai_Cf_Baai_Bge_Small_En_V1_5_Input = { + text: string | string[]; + /** + * The pooling method used in the embedding process. `cls` pooling will generate more accurate embeddings on larger inputs - however, embeddings created with cls pooling are not compatible with embeddings generated with mean pooling. The default pooling method is `mean` in order for this to not be a breaking change, but we highly suggest using the new `cls` pooling for better accuracy. + */ + pooling?: "mean" | "cls"; +} | { + /** + * Batch of the embeddings requests to run using async-queue + */ + requests: { + text: string | string[]; + /** + * The pooling method used in the embedding process. `cls` pooling will generate more accurate embeddings on larger inputs - however, embeddings created with cls pooling are not compatible with embeddings generated with mean pooling. The default pooling method is `mean` in order for this to not be a breaking change, but we highly suggest using the new `cls` pooling for better accuracy. + */ + pooling?: "mean" | "cls"; + }[]; +}; +type Ai_Cf_Baai_Bge_Small_En_V1_5_Output = { + shape?: number[]; + /** + * Embeddings of the requested text values + */ + data?: number[][]; + /** + * The pooling method used in the embedding process. + */ + pooling?: "mean" | "cls"; +} | Ai_Cf_Baai_Bge_Small_En_V1_5_AsyncResponse; +interface Ai_Cf_Baai_Bge_Small_En_V1_5_AsyncResponse { + /** + * The async request id that can be used to obtain the results. + */ + request_id?: string; +} +declare abstract class Base_Ai_Cf_Baai_Bge_Small_En_V1_5 { + inputs: Ai_Cf_Baai_Bge_Small_En_V1_5_Input; + postProcessedOutputs: Ai_Cf_Baai_Bge_Small_En_V1_5_Output; +} +type Ai_Cf_Baai_Bge_Large_En_V1_5_Input = { + text: string | string[]; + /** + * The pooling method used in the embedding process. `cls` pooling will generate more accurate embeddings on larger inputs - however, embeddings created with cls pooling are not compatible with embeddings generated with mean pooling. The default pooling method is `mean` in order for this to not be a breaking change, but we highly suggest using the new `cls` pooling for better accuracy. + */ + pooling?: "mean" | "cls"; +} | { + /** + * Batch of the embeddings requests to run using async-queue + */ + requests: { + text: string | string[]; + /** + * The pooling method used in the embedding process. `cls` pooling will generate more accurate embeddings on larger inputs - however, embeddings created with cls pooling are not compatible with embeddings generated with mean pooling. The default pooling method is `mean` in order for this to not be a breaking change, but we highly suggest using the new `cls` pooling for better accuracy. + */ + pooling?: "mean" | "cls"; + }[]; +}; +type Ai_Cf_Baai_Bge_Large_En_V1_5_Output = { + shape?: number[]; + /** + * Embeddings of the requested text values + */ + data?: number[][]; + /** + * The pooling method used in the embedding process. + */ + pooling?: "mean" | "cls"; +} | Ai_Cf_Baai_Bge_Large_En_V1_5_AsyncResponse; +interface Ai_Cf_Baai_Bge_Large_En_V1_5_AsyncResponse { + /** + * The async request id that can be used to obtain the results. + */ + request_id?: string; +} +declare abstract class Base_Ai_Cf_Baai_Bge_Large_En_V1_5 { + inputs: Ai_Cf_Baai_Bge_Large_En_V1_5_Input; + postProcessedOutputs: Ai_Cf_Baai_Bge_Large_En_V1_5_Output; +} +type Ai_Cf_Unum_Uform_Gen2_Qwen_500M_Input = string | { + /** + * The input text prompt for the model to generate a response. + */ + prompt?: string; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * Controls the creativity of the AI's responses by adjusting how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; + image: number[] | (string & NonNullable); + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; +}; +interface Ai_Cf_Unum_Uform_Gen2_Qwen_500M_Output { + description?: string; +} +declare abstract class Base_Ai_Cf_Unum_Uform_Gen2_Qwen_500M { + inputs: Ai_Cf_Unum_Uform_Gen2_Qwen_500M_Input; + postProcessedOutputs: Ai_Cf_Unum_Uform_Gen2_Qwen_500M_Output; +} +type Ai_Cf_Openai_Whisper_Tiny_En_Input = string | { + /** + * An array of integers that represent the audio data constrained to 8-bit unsigned integer values + */ + audio: number[]; +}; +interface Ai_Cf_Openai_Whisper_Tiny_En_Output { + /** + * The transcription + */ + text: string; + word_count?: number; + words?: { + word?: string; + /** + * The second this word begins in the recording + */ + start?: number; + /** + * The ending second when the word completes + */ + end?: number; + }[]; + vtt?: string; +} +declare abstract class Base_Ai_Cf_Openai_Whisper_Tiny_En { + inputs: Ai_Cf_Openai_Whisper_Tiny_En_Input; + postProcessedOutputs: Ai_Cf_Openai_Whisper_Tiny_En_Output; +} +interface Ai_Cf_Openai_Whisper_Large_V3_Turbo_Input { + audio: string | { + body?: object; + contentType?: string; + }; + /** + * Supported tasks are 'translate' or 'transcribe'. + */ + task?: string; + /** + * The language of the audio being transcribed or translated. + */ + language?: string; + /** + * Preprocess the audio with a voice activity detection model. + */ + vad_filter?: boolean; + /** + * A text prompt to help provide context to the model on the contents of the audio. + */ + initial_prompt?: string; + /** + * The prefix appended to the beginning of the output of the transcription and can guide the transcription result. + */ + prefix?: string; + /** + * The number of beams to use in beam search decoding. Higher values may improve accuracy at the cost of speed. + */ + beam_size?: number; + /** + * Whether to condition on previous text during transcription. Setting to false may help prevent hallucination loops. + */ + condition_on_previous_text?: boolean; + /** + * Threshold for detecting no-speech segments. Segments with no-speech probability above this value are skipped. + */ + no_speech_threshold?: number; + /** + * Threshold for filtering out segments with high compression ratio, which often indicate repetitive or hallucinated text. + */ + compression_ratio_threshold?: number; + /** + * Threshold for filtering out segments with low average log probability, indicating low confidence. + */ + log_prob_threshold?: number; + /** + * Optional threshold (in seconds) to skip silent periods that may cause hallucinations. + */ + hallucination_silence_threshold?: number; +} +interface Ai_Cf_Openai_Whisper_Large_V3_Turbo_Output { + transcription_info?: { + /** + * The language of the audio being transcribed or translated. + */ + language?: string; + /** + * The confidence level or probability of the detected language being accurate, represented as a decimal between 0 and 1. + */ + language_probability?: number; + /** + * The total duration of the original audio file, in seconds. + */ + duration?: number; + /** + * The duration of the audio after applying Voice Activity Detection (VAD) to remove silent or irrelevant sections, in seconds. + */ + duration_after_vad?: number; + }; + /** + * The complete transcription of the audio. + */ + text: string; + /** + * The total number of words in the transcription. + */ + word_count?: number; + segments?: { + /** + * The starting time of the segment within the audio, in seconds. + */ + start?: number; + /** + * The ending time of the segment within the audio, in seconds. + */ + end?: number; + /** + * The transcription of the segment. + */ + text?: string; + /** + * The temperature used in the decoding process, controlling randomness in predictions. Lower values result in more deterministic outputs. + */ + temperature?: number; + /** + * The average log probability of the predictions for the words in this segment, indicating overall confidence. + */ + avg_logprob?: number; + /** + * The compression ratio of the input to the output, measuring how much the text was compressed during the transcription process. + */ + compression_ratio?: number; + /** + * The probability that the segment contains no speech, represented as a decimal between 0 and 1. + */ + no_speech_prob?: number; + words?: { + /** + * The individual word transcribed from the audio. + */ + word?: string; + /** + * The starting time of the word within the audio, in seconds. + */ + start?: number; + /** + * The ending time of the word within the audio, in seconds. + */ + end?: number; + }[]; + }[]; + /** + * The transcription in WebVTT format, which includes timing and text information for use in subtitles. + */ + vtt?: string; +} +declare abstract class Base_Ai_Cf_Openai_Whisper_Large_V3_Turbo { + inputs: Ai_Cf_Openai_Whisper_Large_V3_Turbo_Input; + postProcessedOutputs: Ai_Cf_Openai_Whisper_Large_V3_Turbo_Output; +} +type Ai_Cf_Baai_Bge_M3_Input = Ai_Cf_Baai_Bge_M3_Input_QueryAnd_Contexts | Ai_Cf_Baai_Bge_M3_Input_Embedding | { + /** + * Batch of the embeddings requests to run using async-queue + */ + requests: (Ai_Cf_Baai_Bge_M3_Input_QueryAnd_Contexts_1 | Ai_Cf_Baai_Bge_M3_Input_Embedding_1)[]; +}; +interface Ai_Cf_Baai_Bge_M3_Input_QueryAnd_Contexts { + /** + * A query you wish to perform against the provided contexts. If no query is provided the model with respond with embeddings for contexts + */ + query?: string; + /** + * List of provided contexts. Note that the index in this array is important, as the response will refer to it. + */ + contexts: { + /** + * One of the provided context content + */ + text?: string; + }[]; + /** + * When provided with too long context should the model error out or truncate the context to fit? + */ + truncate_inputs?: boolean; +} +interface Ai_Cf_Baai_Bge_M3_Input_Embedding { + text: string | string[]; + /** + * When provided with too long context should the model error out or truncate the context to fit? + */ + truncate_inputs?: boolean; +} +interface Ai_Cf_Baai_Bge_M3_Input_QueryAnd_Contexts_1 { + /** + * A query you wish to perform against the provided contexts. If no query is provided the model with respond with embeddings for contexts + */ + query?: string; + /** + * List of provided contexts. Note that the index in this array is important, as the response will refer to it. + */ + contexts: { + /** + * One of the provided context content + */ + text?: string; + }[]; + /** + * When provided with too long context should the model error out or truncate the context to fit? + */ + truncate_inputs?: boolean; +} +interface Ai_Cf_Baai_Bge_M3_Input_Embedding_1 { + text: string | string[]; + /** + * When provided with too long context should the model error out or truncate the context to fit? + */ + truncate_inputs?: boolean; +} +type Ai_Cf_Baai_Bge_M3_Output = Ai_Cf_Baai_Bge_M3_Output_Query | Ai_Cf_Baai_Bge_M3_Output_EmbeddingFor_Contexts | Ai_Cf_Baai_Bge_M3_Output_Embedding | Ai_Cf_Baai_Bge_M3_AsyncResponse; +interface Ai_Cf_Baai_Bge_M3_Output_Query { + response?: { + /** + * Index of the context in the request + */ + id?: number; + /** + * Score of the context under the index. + */ + score?: number; + }[]; +} +interface Ai_Cf_Baai_Bge_M3_Output_EmbeddingFor_Contexts { + response?: number[][]; + shape?: number[]; + /** + * The pooling method used in the embedding process. + */ + pooling?: "mean" | "cls"; +} +interface Ai_Cf_Baai_Bge_M3_Output_Embedding { + shape?: number[]; + /** + * Embeddings of the requested text values + */ + data?: number[][]; + /** + * The pooling method used in the embedding process. + */ + pooling?: "mean" | "cls"; +} +interface Ai_Cf_Baai_Bge_M3_AsyncResponse { + /** + * The async request id that can be used to obtain the results. + */ + request_id?: string; +} +declare abstract class Base_Ai_Cf_Baai_Bge_M3 { + inputs: Ai_Cf_Baai_Bge_M3_Input; + postProcessedOutputs: Ai_Cf_Baai_Bge_M3_Output; +} +interface Ai_Cf_Black_Forest_Labs_Flux_1_Schnell_Input { + /** + * A text description of the image you want to generate. + */ + prompt: string; + /** + * The number of diffusion steps; higher values can improve quality but take longer. + */ + steps?: number; +} +interface Ai_Cf_Black_Forest_Labs_Flux_1_Schnell_Output { + /** + * The generated image in Base64 format. + */ + image?: string; +} +declare abstract class Base_Ai_Cf_Black_Forest_Labs_Flux_1_Schnell { + inputs: Ai_Cf_Black_Forest_Labs_Flux_1_Schnell_Input; + postProcessedOutputs: Ai_Cf_Black_Forest_Labs_Flux_1_Schnell_Output; +} +type Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct_Input = Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct_Prompt | Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct_Messages; +interface Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct_Prompt { + /** + * The input text prompt for the model to generate a response. + */ + prompt: string; + image?: number[] | (string & NonNullable); + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; + /** + * Name of the LoRA (Low-Rank Adaptation) model to fine-tune the base model. + */ + lora?: string; +} +interface Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct_Messages { + /** + * An array of message objects representing the conversation history. + */ + messages: { + /** + * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). + */ + role?: string; + /** + * The tool call id. If you don't know what to put here you can fall back to 000000001 + */ + tool_call_id?: string; + content?: string | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }[] | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }; + }[]; + image?: number[] | (string & NonNullable); + functions?: { + name: string; + code: string; + }[]; + /** + * A list of tools available for the assistant to use. + */ + tools?: ({ + /** + * The name of the tool. More descriptive the better. + */ + name: string; + /** + * A brief description of what the tool does. + */ + description: string; + /** + * Schema defining the parameters accepted by the tool. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + } | { + /** + * Specifies the type of tool (e.g., 'function'). + */ + type: string; + /** + * Details of the function tool. + */ + function: { + /** + * The name of the function. + */ + name: string; + /** + * A brief description of what the function does. + */ + description: string; + /** + * Schema defining the parameters accepted by the function. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + }; + })[]; + /** + * If true, the response will be streamed back incrementally. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Controls the creativity of the AI's responses by adjusting how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +type Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct_Output = { + /** + * The generated text response from the model + */ + response?: string; + /** + * An array of tool calls requests made during the response generation + */ + tool_calls?: { + /** + * The arguments passed to be passed to the tool call request + */ + arguments?: object; + /** + * The name of the tool to be called + */ + name?: string; + }[]; +}; +declare abstract class Base_Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct { + inputs: Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct_Input; + postProcessedOutputs: Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct_Output; +} +type Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Input = Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Prompt | Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Messages | Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Async_Batch; +interface Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Prompt { + /** + * The input text prompt for the model to generate a response. + */ + prompt: string; + /** + * Name of the LoRA (Low-Rank Adaptation) model to fine-tune the base model. + */ + lora?: string; + response_format?: Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_JSON_Mode; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_JSON_Mode { + type?: "json_object" | "json_schema"; + json_schema?: unknown; +} +interface Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Messages { + /** + * An array of message objects representing the conversation history. + */ + messages: { + /** + * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). + */ + role: string; + content: string | { + /** + * Type of the content (text) + */ + type?: string; + /** + * Text content + */ + text?: string; + }[]; + }[]; + functions?: { + name: string; + code: string; + }[]; + /** + * A list of tools available for the assistant to use. + */ + tools?: ({ + /** + * The name of the tool. More descriptive the better. + */ + name: string; + /** + * A brief description of what the tool does. + */ + description: string; + /** + * Schema defining the parameters accepted by the tool. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + } | { + /** + * Specifies the type of tool (e.g., 'function'). + */ + type: string; + /** + * Details of the function tool. + */ + function: { + /** + * The name of the function. + */ + name: string; + /** + * A brief description of what the function does. + */ + description: string; + /** + * Schema defining the parameters accepted by the function. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + }; + })[]; + response_format?: Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_JSON_Mode_1; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_JSON_Mode_1 { + type?: "json_object" | "json_schema"; + json_schema?: unknown; +} +interface Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Async_Batch { + requests?: { + /** + * User-supplied reference. This field will be present in the response as well it can be used to reference the request and response. It's NOT validated to be unique. + */ + external_reference?: string; + /** + * Prompt for the text generation model + */ + prompt?: string; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; + response_format?: Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_JSON_Mode_2; + }[]; +} +interface Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_JSON_Mode_2 { + type?: "json_object" | "json_schema"; + json_schema?: unknown; +} +type Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Output = { + /** + * The generated text response from the model */ - requests: { + response: string; + /** + * Usage statistics for the inference request + */ + usage?: { /** - * The text to be translated + * Total number of tokens in input */ - text: string; + prompt_tokens?: number; /** - * The language code of the source text (e.g., 'en' for English). Defaults to 'en' if not specified + * Total number of tokens in output */ - source_lang?: string; + completion_tokens?: number; /** - * The language code to translate the text into (e.g., 'es' for Spanish) + * Total number of input and output tokens */ - target_lang: string; + total_tokens?: number; + }; + /** + * An array of tool calls requests made during the response generation + */ + tool_calls?: { + /** + * The arguments passed to be passed to the tool call request + */ + arguments?: object; + /** + * The name of the tool to be called + */ + name?: string; }[]; -}; -type Ai_Cf_Meta_M2M100_1_2B_Output = { +} | string | Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_AsyncResponse; +interface Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_AsyncResponse { /** - * The translated text in the target language + * The async request id that can be used to obtain the results. */ - translated_text?: string; -} | AsyncResponse; -declare abstract class Base_Ai_Cf_Meta_M2M100_1_2B { - inputs: Ai_Cf_Meta_M2M100_1_2B_Input; - postProcessedOutputs: Ai_Cf_Meta_M2M100_1_2B_Output; + request_id?: string; } -type Ai_Cf_Baai_Bge_Small_En_V1_5_Input = { - text: string | string[]; +declare abstract class Base_Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast { + inputs: Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Input; + postProcessedOutputs: Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Output; +} +interface Ai_Cf_Meta_Llama_Guard_3_8B_Input { /** - * The pooling method used in the embedding process. `cls` pooling will generate more accurate embeddings on larger inputs - however, embeddings created with cls pooling are not compatible with embeddings generated with mean pooling. The default pooling method is `mean` in order for this to not be a breaking change, but we highly suggest using the new `cls` pooling for better accuracy. + * An array of message objects representing the conversation history. */ - pooling?: "mean" | "cls"; -} | { + messages: { + /** + * The role of the message sender must alternate between 'user' and 'assistant'. + */ + role: "user" | "assistant"; + /** + * The content of the message as a string. + */ + content: string; + }[]; /** - * Batch of the embeddings requests to run using async-queue + * The maximum number of tokens to generate in the response. */ - requests: { - text: string | string[]; + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Dictate the output format of the generated response. + */ + response_format?: { + /** + * Set to json_object to process and output generated text as JSON. + */ + type?: string; + }; +} +interface Ai_Cf_Meta_Llama_Guard_3_8B_Output { + response?: string | { + /** + * Whether the conversation is safe or not. + */ + safe?: boolean; + /** + * A list of what hazard categories predicted for the conversation, if the conversation is deemed unsafe. + */ + categories?: string[]; + }; + /** + * Usage statistics for the inference request + */ + usage?: { + /** + * Total number of tokens in input + */ + prompt_tokens?: number; + /** + * Total number of tokens in output + */ + completion_tokens?: number; + /** + * Total number of input and output tokens + */ + total_tokens?: number; + }; +} +declare abstract class Base_Ai_Cf_Meta_Llama_Guard_3_8B { + inputs: Ai_Cf_Meta_Llama_Guard_3_8B_Input; + postProcessedOutputs: Ai_Cf_Meta_Llama_Guard_3_8B_Output; +} +interface Ai_Cf_Baai_Bge_Reranker_Base_Input { + /** + * A query you wish to perform against the provided contexts. + */ + /** + * Number of returned results starting with the best score. + */ + top_k?: number; + /** + * List of provided contexts. Note that the index in this array is important, as the response will refer to it. + */ + contexts: { + /** + * One of the provided context content + */ + text?: string; + }[]; +} +interface Ai_Cf_Baai_Bge_Reranker_Base_Output { + response?: { + /** + * Index of the context in the request + */ + id?: number; + /** + * Score of the context under the index. + */ + score?: number; + }[]; +} +declare abstract class Base_Ai_Cf_Baai_Bge_Reranker_Base { + inputs: Ai_Cf_Baai_Bge_Reranker_Base_Input; + postProcessedOutputs: Ai_Cf_Baai_Bge_Reranker_Base_Output; +} +type Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_Input = Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_Prompt | Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_Messages; +interface Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_Prompt { + /** + * The input text prompt for the model to generate a response. + */ + prompt: string; + /** + * Name of the LoRA (Low-Rank Adaptation) model to fine-tune the base model. + */ + lora?: string; + response_format?: Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_JSON_Mode; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_JSON_Mode { + type?: "json_object" | "json_schema"; + json_schema?: unknown; +} +interface Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_Messages { + /** + * An array of message objects representing the conversation history. + */ + messages: { + /** + * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). + */ + role: string; + /** + * The content of the message as a string. + */ + content: string; + }[]; + functions?: { + name: string; + code: string; + }[]; + /** + * A list of tools available for the assistant to use. + */ + tools?: ({ + /** + * The name of the tool. More descriptive the better. + */ + name: string; + /** + * A brief description of what the tool does. + */ + description: string; + /** + * Schema defining the parameters accepted by the tool. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + } | { /** - * The pooling method used in the embedding process. `cls` pooling will generate more accurate embeddings on larger inputs - however, embeddings created with cls pooling are not compatible with embeddings generated with mean pooling. The default pooling method is `mean` in order for this to not be a breaking change, but we highly suggest using the new `cls` pooling for better accuracy. + * Specifies the type of tool (e.g., 'function'). */ - pooling?: "mean" | "cls"; - }[]; -}; -type Ai_Cf_Baai_Bge_Small_En_V1_5_Output = { - shape?: number[]; - /** - * Embeddings of the requested text values - */ - data?: number[][]; - /** - * The pooling method used in the embedding process. - */ - pooling?: "mean" | "cls"; -} | AsyncResponse; -declare abstract class Base_Ai_Cf_Baai_Bge_Small_En_V1_5 { - inputs: Ai_Cf_Baai_Bge_Small_En_V1_5_Input; - postProcessedOutputs: Ai_Cf_Baai_Bge_Small_En_V1_5_Output; -} -type Ai_Cf_Baai_Bge_Large_En_V1_5_Input = { - text: string | string[]; - /** - * The pooling method used in the embedding process. `cls` pooling will generate more accurate embeddings on larger inputs - however, embeddings created with cls pooling are not compatible with embeddings generated with mean pooling. The default pooling method is `mean` in order for this to not be a breaking change, but we highly suggest using the new `cls` pooling for better accuracy. - */ - pooling?: "mean" | "cls"; -} | { - /** - * Batch of the embeddings requests to run using async-queue - */ - requests: { - text: string | string[]; + type: string; /** - * The pooling method used in the embedding process. `cls` pooling will generate more accurate embeddings on larger inputs - however, embeddings created with cls pooling are not compatible with embeddings generated with mean pooling. The default pooling method is `mean` in order for this to not be a breaking change, but we highly suggest using the new `cls` pooling for better accuracy. + * Details of the function tool. */ - pooling?: "mean" | "cls"; - }[]; -}; -type Ai_Cf_Baai_Bge_Large_En_V1_5_Output = { - shape?: number[]; + function: { + /** + * The name of the function. + */ + name: string; + /** + * A brief description of what the function does. + */ + description: string; + /** + * Schema defining the parameters accepted by the function. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + }; + })[]; + response_format?: Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_JSON_Mode_1; /** - * Embeddings of the requested text values + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. */ - data?: number[][]; + raw?: boolean; /** - * The pooling method used in the embedding process. + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. */ - pooling?: "mean" | "cls"; -} | AsyncResponse; -declare abstract class Base_Ai_Cf_Baai_Bge_Large_En_V1_5 { - inputs: Ai_Cf_Baai_Bge_Large_En_V1_5_Input; - postProcessedOutputs: Ai_Cf_Baai_Bge_Large_En_V1_5_Output; -} -type Ai_Cf_Unum_Uform_Gen2_Qwen_500M_Input = string | { + stream?: boolean; /** - * The input text prompt for the model to generate a response. + * The maximum number of tokens to generate in the response. */ - prompt?: string; + max_tokens?: number; /** - * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + * Controls the randomness of the output; higher values produce more random results. */ - raw?: boolean; + temperature?: number; /** - * Controls the creativity of the AI's responses by adjusting how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. */ top_p?: number; /** @@ -3104,275 +6187,327 @@ type Ai_Cf_Unum_Uform_Gen2_Qwen_500M_Input = string | { * Increases the likelihood of the model introducing new topics. */ presence_penalty?: number; - image: number[] | (string & NonNullable); - /** - * The maximum number of tokens to generate in the response. - */ - max_tokens?: number; -}; -interface Ai_Cf_Unum_Uform_Gen2_Qwen_500M_Output { - description?: string; } -declare abstract class Base_Ai_Cf_Unum_Uform_Gen2_Qwen_500M { - inputs: Ai_Cf_Unum_Uform_Gen2_Qwen_500M_Input; - postProcessedOutputs: Ai_Cf_Unum_Uform_Gen2_Qwen_500M_Output; +interface Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_JSON_Mode_1 { + type?: "json_object" | "json_schema"; + json_schema?: unknown; } -type Ai_Cf_Openai_Whisper_Tiny_En_Input = string | { +type Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_Output = { /** - * An array of integers that represent the audio data constrained to 8-bit unsigned integer values + * The generated text response from the model */ - audio: number[]; -}; -interface Ai_Cf_Openai_Whisper_Tiny_En_Output { + response: string; /** - * The transcription + * Usage statistics for the inference request */ - text: string; - word_count?: number; - words?: { - word?: string; + usage?: { /** - * The second this word begins in the recording + * Total number of tokens in input */ - start?: number; + prompt_tokens?: number; /** - * The ending second when the word completes + * Total number of tokens in output */ - end?: number; + completion_tokens?: number; + /** + * Total number of input and output tokens + */ + total_tokens?: number; + }; + /** + * An array of tool calls requests made during the response generation + */ + tool_calls?: { + /** + * The arguments passed to be passed to the tool call request + */ + arguments?: object; + /** + * The name of the tool to be called + */ + name?: string; }[]; - vtt?: string; -} -declare abstract class Base_Ai_Cf_Openai_Whisper_Tiny_En { - inputs: Ai_Cf_Openai_Whisper_Tiny_En_Input; - postProcessedOutputs: Ai_Cf_Openai_Whisper_Tiny_En_Output; +}; +declare abstract class Base_Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct { + inputs: Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_Input; + postProcessedOutputs: Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_Output; } -interface Ai_Cf_Openai_Whisper_Large_V3_Turbo_Input { +type Ai_Cf_Qwen_Qwq_32B_Input = Ai_Cf_Qwen_Qwq_32B_Prompt | Ai_Cf_Qwen_Qwq_32B_Messages; +interface Ai_Cf_Qwen_Qwq_32B_Prompt { /** - * Base64 encoded value of the audio data. + * The input text prompt for the model to generate a response. */ - audio: string; + prompt: string; /** - * Supported tasks are 'translate' or 'transcribe'. + * JSON schema that should be fulfilled for the response. */ - task?: string; + guided_json?: object; /** - * The language of the audio being transcribed or translated. + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. */ - language?: string; + raw?: boolean; /** - * Preprocess the audio with a voice activity detection model. + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. */ - vad_filter?: boolean; + stream?: boolean; /** - * A text prompt to help provide context to the model on the contents of the audio. + * The maximum number of tokens to generate in the response. */ - initial_prompt?: string; + max_tokens?: number; /** - * The prefix it appended the the beginning of the output of the transcription and can guide the transcription result. + * Controls the randomness of the output; higher values produce more random results. */ - prefix?: string; + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; } -interface Ai_Cf_Openai_Whisper_Large_V3_Turbo_Output { - transcription_info?: { - /** - * The language of the audio being transcribed or translated. - */ - language?: string; - /** - * The confidence level or probability of the detected language being accurate, represented as a decimal between 0 and 1. - */ - language_probability?: number; +interface Ai_Cf_Qwen_Qwq_32B_Messages { + /** + * An array of message objects representing the conversation history. + */ + messages: { /** - * The total duration of the original audio file, in seconds. + * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). */ - duration?: number; + role?: string; /** - * The duration of the audio after applying Voice Activity Detection (VAD) to remove silent or irrelevant sections, in seconds. + * The tool call id. If you don't know what to put here you can fall back to 000000001 */ - duration_after_vad?: number; - }; - /** - * The complete transcription of the audio. - */ - text: string; + tool_call_id?: string; + content?: string | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }[] | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }; + }[]; + functions?: { + name: string; + code: string; + }[]; /** - * The total number of words in the transcription. + * A list of tools available for the assistant to use. */ - word_count?: number; - segments?: { - /** - * The starting time of the segment within the audio, in seconds. - */ - start?: number; - /** - * The ending time of the segment within the audio, in seconds. - */ - end?: number; + tools?: ({ /** - * The transcription of the segment. + * The name of the tool. More descriptive the better. */ - text?: string; + name: string; /** - * The temperature used in the decoding process, controlling randomness in predictions. Lower values result in more deterministic outputs. + * A brief description of what the tool does. */ - temperature?: number; + description: string; /** - * The average log probability of the predictions for the words in this segment, indicating overall confidence. + * Schema defining the parameters accepted by the tool. */ - avg_logprob?: number; + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + } | { /** - * The compression ratio of the input to the output, measuring how much the text was compressed during the transcription process. + * Specifies the type of tool (e.g., 'function'). */ - compression_ratio?: number; + type: string; /** - * The probability that the segment contains no speech, represented as a decimal between 0 and 1. + * Details of the function tool. */ - no_speech_prob?: number; - words?: { + function: { /** - * The individual word transcribed from the audio. + * The name of the function. */ - word?: string; + name: string; /** - * The starting time of the word within the audio, in seconds. + * A brief description of what the function does. */ - start?: number; + description: string; /** - * The ending time of the word within the audio, in seconds. + * Schema defining the parameters accepted by the function. */ - end?: number; - }[]; - }[]; + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + }; + })[]; /** - * The transcription in WebVTT format, which includes timing and text information for use in subtitles. + * JSON schema that should be fufilled for the response. */ - vtt?: string; -} -declare abstract class Base_Ai_Cf_Openai_Whisper_Large_V3_Turbo { - inputs: Ai_Cf_Openai_Whisper_Large_V3_Turbo_Input; - postProcessedOutputs: Ai_Cf_Openai_Whisper_Large_V3_Turbo_Output; -} -type Ai_Cf_Baai_Bge_M3_Input = BGEM3InputQueryAndContexts | BGEM3InputEmbedding | { + guided_json?: object; /** - * Batch of the embeddings requests to run using async-queue + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. */ - requests: (BGEM3InputQueryAndContexts1 | BGEM3InputEmbedding1)[]; -}; -interface BGEM3InputQueryAndContexts { + raw?: boolean; /** - * A query you wish to perform against the provided contexts. If no query is provided the model with respond with embeddings for contexts + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. */ - query?: string; + stream?: boolean; /** - * List of provided contexts. Note that the index in this array is important, as the response will refer to it. + * The maximum number of tokens to generate in the response. */ - contexts: { - /** - * One of the provided context content - */ - text?: string; - }[]; + max_tokens?: number; /** - * When provided with too long context should the model error out or truncate the context to fit? + * Controls the randomness of the output; higher values produce more random results. */ - truncate_inputs?: boolean; -} -interface BGEM3InputEmbedding { - text: string | string[]; + temperature?: number; /** - * When provided with too long context should the model error out or truncate the context to fit? + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. */ - truncate_inputs?: boolean; -} -interface BGEM3InputQueryAndContexts1 { + top_p?: number; /** - * A query you wish to perform against the provided contexts. If no query is provided the model with respond with embeddings for contexts + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. */ - query?: string; + top_k?: number; /** - * List of provided contexts. Note that the index in this array is important, as the response will refer to it. + * Random seed for reproducibility of the generation. */ - contexts: { - /** - * One of the provided context content - */ - text?: string; - }[]; + seed?: number; /** - * When provided with too long context should the model error out or truncate the context to fit? + * Penalty for repeated tokens; higher values discourage repetition. */ - truncate_inputs?: boolean; -} -interface BGEM3InputEmbedding1 { - text: string | string[]; + repetition_penalty?: number; /** - * When provided with too long context should the model error out or truncate the context to fit? + * Decreases the likelihood of the model repeating the same lines verbatim. */ - truncate_inputs?: boolean; -} -type Ai_Cf_Baai_Bge_M3_Output = BGEM3OuputQuery | BGEM3OutputEmbeddingForContexts | BGEM3OuputEmbedding | AsyncResponse; -interface BGEM3OuputQuery { - response?: { - /** - * Index of the context in the request - */ - id?: number; - /** - * Score of the context under the index. - */ - score?: number; - }[]; -} -interface BGEM3OutputEmbeddingForContexts { - response?: number[][]; - shape?: number[]; + frequency_penalty?: number; /** - * The pooling method used in the embedding process. + * Increases the likelihood of the model introducing new topics. */ - pooling?: "mean" | "cls"; + presence_penalty?: number; } -interface BGEM3OuputEmbedding { - shape?: number[]; - /** - * Embeddings of the requested text values - */ - data?: number[][]; +type Ai_Cf_Qwen_Qwq_32B_Output = { /** - * The pooling method used in the embedding process. + * The generated text response from the model */ - pooling?: "mean" | "cls"; -} -declare abstract class Base_Ai_Cf_Baai_Bge_M3 { - inputs: Ai_Cf_Baai_Bge_M3_Input; - postProcessedOutputs: Ai_Cf_Baai_Bge_M3_Output; -} -interface Ai_Cf_Black_Forest_Labs_Flux_1_Schnell_Input { + response: string; /** - * A text description of the image you want to generate. + * Usage statistics for the inference request */ - prompt: string; + usage?: { + /** + * Total number of tokens in input + */ + prompt_tokens?: number; + /** + * Total number of tokens in output + */ + completion_tokens?: number; + /** + * Total number of input and output tokens + */ + total_tokens?: number; + }; /** - * The number of diffusion steps; higher values can improve quality but take longer. + * An array of tool calls requests made during the response generation */ - steps?: number; + tool_calls?: { + /** + * The arguments passed to be passed to the tool call request + */ + arguments?: object; + /** + * The name of the tool to be called + */ + name?: string; + }[]; +}; +declare abstract class Base_Ai_Cf_Qwen_Qwq_32B { + inputs: Ai_Cf_Qwen_Qwq_32B_Input; + postProcessedOutputs: Ai_Cf_Qwen_Qwq_32B_Output; } -interface Ai_Cf_Black_Forest_Labs_Flux_1_Schnell_Output { +type Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct_Input = Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct_Prompt | Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct_Messages; +interface Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct_Prompt { /** - * The generated image in Base64 format. + * The input text prompt for the model to generate a response. */ - image?: string; -} -declare abstract class Base_Ai_Cf_Black_Forest_Labs_Flux_1_Schnell { - inputs: Ai_Cf_Black_Forest_Labs_Flux_1_Schnell_Input; - postProcessedOutputs: Ai_Cf_Black_Forest_Labs_Flux_1_Schnell_Output; -} -type Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct_Input = Prompt | Messages; -interface Prompt { + prompt: string; /** - * The input text prompt for the model to generate a response. + * JSON schema that should be fulfilled for the response. */ - prompt: string; - image?: number[] | (string & NonNullable); + guided_json?: object; /** * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. */ @@ -3413,12 +6548,8 @@ interface Prompt { * Increases the likelihood of the model introducing new topics. */ presence_penalty?: number; - /** - * Name of the LoRA (Low-Rank Adaptation) model to fine-tune the base model. - */ - lora?: string; } -interface Messages { +interface Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct_Messages { /** * An array of message objects representing the conversation history. */ @@ -3457,7 +6588,6 @@ interface Messages { }; }; }[]; - image?: number[] | (string & NonNullable); functions?: { name: string; code: string; @@ -3550,7 +6680,15 @@ interface Messages { }; })[]; /** - * If true, the response will be streamed back incrementally. + * JSON schema that should be fufilled for the response. + */ + guided_json?: object; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. */ stream?: boolean; /** @@ -3562,7 +6700,7 @@ interface Messages { */ temperature?: number; /** - * Controls the creativity of the AI's responses by adjusting how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. */ top_p?: number; /** @@ -3586,11 +6724,28 @@ interface Messages { */ presence_penalty?: number; } -type Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct_Output = { +type Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct_Output = { /** * The generated text response from the model */ - response?: string; + response: string; + /** + * Usage statistics for the inference request + */ + usage?: { + /** + * Total number of tokens in input + */ + prompt_tokens?: number; + /** + * Total number of tokens in output + */ + completion_tokens?: number; + /** + * Total number of input and output tokens + */ + total_tokens?: number; + }; /** * An array of tool calls requests made during the response generation */ @@ -3605,21 +6760,20 @@ type Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct_Output = { name?: string; }[]; }; -declare abstract class Base_Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct { - inputs: Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct_Input; - postProcessedOutputs: Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct_Output; +declare abstract class Base_Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct { + inputs: Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct_Input; + postProcessedOutputs: Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct_Output; } -type Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Input = Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Prompt | Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Messages | AsyncBatch; -interface Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Prompt { +type Ai_Cf_Google_Gemma_3_12B_It_Input = Ai_Cf_Google_Gemma_3_12B_It_Prompt | Ai_Cf_Google_Gemma_3_12B_It_Messages; +interface Ai_Cf_Google_Gemma_3_12B_It_Prompt { /** * The input text prompt for the model to generate a response. */ prompt: string; /** - * Name of the LoRA (Low-Rank Adaptation) model to fine-tune the base model. + * JSON schema that should be fufilled for the response. */ - lora?: string; - response_format?: JSONMode; + guided_json?: object; /** * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. */ @@ -3661,11 +6815,7 @@ interface Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Prompt { */ presence_penalty?: number; } -interface JSONMode { - type?: "json_object" | "json_schema"; - json_schema?: unknown; -} -interface Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Messages { +interface Ai_Cf_Google_Gemma_3_12B_It_Messages { /** * An array of message objects representing the conversation history. */ @@ -3673,11 +6823,20 @@ interface Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Messages { /** * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). */ - role: string; - /** - * The content of the message as a string. - */ - content: string; + role?: string; + content?: string | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }[]; }[]; functions?: { name: string; @@ -3770,7 +6929,10 @@ interface Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Messages { }; }; })[]; - response_format?: JSONMode; + /** + * JSON schema that should be fufilled for the response. + */ + guided_json?: object; /** * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. */ @@ -3812,105 +6974,245 @@ interface Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Messages { */ presence_penalty?: number; } -interface AsyncBatch { - requests?: { - /** - * User-supplied reference. This field will be present in the response as well it can be used to reference the request and response. It's NOT validated to be unique. - */ - external_reference?: string; +type Ai_Cf_Google_Gemma_3_12B_It_Output = { + /** + * The generated text response from the model + */ + response: string; + /** + * Usage statistics for the inference request + */ + usage?: { /** - * Prompt for the text generation model + * Total number of tokens in input */ - prompt?: string; + prompt_tokens?: number; /** - * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + * Total number of tokens in output */ - stream?: boolean; + completion_tokens?: number; /** - * The maximum number of tokens to generate in the response. + * Total number of input and output tokens */ - max_tokens?: number; + total_tokens?: number; + }; + /** + * An array of tool calls requests made during the response generation + */ + tool_calls?: { /** - * Controls the randomness of the output; higher values produce more random results. + * The arguments passed to be passed to the tool call request */ - temperature?: number; + arguments?: object; /** - * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + * The name of the tool to be called */ - top_p?: number; + name?: string; + }[]; +}; +declare abstract class Base_Ai_Cf_Google_Gemma_3_12B_It { + inputs: Ai_Cf_Google_Gemma_3_12B_It_Input; + postProcessedOutputs: Ai_Cf_Google_Gemma_3_12B_It_Output; +} +type Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Input = Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Prompt | Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Messages | Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Async_Batch; +interface Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Prompt { + /** + * The input text prompt for the model to generate a response. + */ + prompt: string; + /** + * JSON schema that should be fulfilled for the response. + */ + guided_json?: object; + response_format?: Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_JSON_Mode; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_JSON_Mode { + type?: "json_object" | "json_schema"; + json_schema?: unknown; +} +interface Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Messages { + /** + * An array of message objects representing the conversation history. + */ + messages: { /** - * Random seed for reproducibility of the generation. + * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). */ - seed?: number; + role?: string; /** - * Penalty for repeated tokens; higher values discourage repetition. + * The tool call id. If you don't know what to put here you can fall back to 000000001 */ - repetition_penalty?: number; + tool_call_id?: string; + content?: string | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }[] | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }; + }[]; + functions?: { + name: string; + code: string; + }[]; + /** + * A list of tools available for the assistant to use. + */ + tools?: ({ /** - * Decreases the likelihood of the model repeating the same lines verbatim. + * The name of the tool. More descriptive the better. */ - frequency_penalty?: number; + name: string; /** - * Increases the likelihood of the model introducing new topics. + * A brief description of what the tool does. */ - presence_penalty?: number; - response_format?: JSONMode; - }[]; -} -type Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Output = { - /** - * The generated text response from the model - */ - response: string; - /** - * Usage statistics for the inference request - */ - usage?: { + description: string; /** - * Total number of tokens in input + * Schema defining the parameters accepted by the tool. */ - prompt_tokens?: number; + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + } | { /** - * Total number of tokens in output + * Specifies the type of tool (e.g., 'function'). */ - completion_tokens?: number; + type: string; /** - * Total number of input and output tokens + * Details of the function tool. */ - total_tokens?: number; - }; + function: { + /** + * The name of the function. + */ + name: string; + /** + * A brief description of what the function does. + */ + description: string; + /** + * Schema defining the parameters accepted by the function. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + }; + })[]; + response_format?: Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_JSON_Mode; /** - * An array of tool calls requests made during the response generation + * JSON schema that should be fufilled for the response. */ - tool_calls?: { - /** - * The arguments passed to be passed to the tool call request - */ - arguments?: object; - /** - * The name of the tool to be called - */ - name?: string; - }[]; -} | string | AsyncResponse; -declare abstract class Base_Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast { - inputs: Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Input; - postProcessedOutputs: Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Output; -} -interface Ai_Cf_Meta_Llama_Guard_3_8B_Input { + guided_json?: object; /** - * An array of message objects representing the conversation history. + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. */ - messages: { - /** - * The role of the message sender must alternate between 'user' and 'assistant'. - */ - role: "user" | "assistant"; - /** - * The content of the message as a string. - */ - content: string; - }[]; + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; /** * The maximum number of tokens to generate in the response. */ @@ -3920,93 +7222,43 @@ interface Ai_Cf_Meta_Llama_Guard_3_8B_Input { */ temperature?: number; /** - * Dictate the output format of the generated response. + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. */ - response_format?: { - /** - * Set to json_object to process and output generated text as JSON. - */ - type?: string; - }; -} -interface Ai_Cf_Meta_Llama_Guard_3_8B_Output { - response?: string | { - /** - * Whether the conversation is safe or not. - */ - safe?: boolean; - /** - * A list of what hazard categories predicted for the conversation, if the conversation is deemed unsafe. - */ - categories?: string[]; - }; + top_p?: number; /** - * Usage statistics for the inference request + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. */ - usage?: { - /** - * Total number of tokens in input - */ - prompt_tokens?: number; - /** - * Total number of tokens in output - */ - completion_tokens?: number; - /** - * Total number of input and output tokens - */ - total_tokens?: number; - }; -} -declare abstract class Base_Ai_Cf_Meta_Llama_Guard_3_8B { - inputs: Ai_Cf_Meta_Llama_Guard_3_8B_Input; - postProcessedOutputs: Ai_Cf_Meta_Llama_Guard_3_8B_Output; -} -interface Ai_Cf_Baai_Bge_Reranker_Base_Input { + top_k?: number; /** - * A query you wish to perform against the provided contexts. + * Random seed for reproducibility of the generation. */ + seed?: number; /** - * Number of returned results starting with the best score. + * Penalty for repeated tokens; higher values discourage repetition. */ - top_k?: number; + repetition_penalty?: number; /** - * List of provided contexts. Note that the index in this array is important, as the response will refer to it. + * Decreases the likelihood of the model repeating the same lines verbatim. */ - contexts: { - /** - * One of the provided context content - */ - text?: string; - }[]; -} -interface Ai_Cf_Baai_Bge_Reranker_Base_Output { - response?: { - /** - * Index of the context in the request - */ - id?: number; - /** - * Score of the context under the index. - */ - score?: number; - }[]; + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; } -declare abstract class Base_Ai_Cf_Baai_Bge_Reranker_Base { - inputs: Ai_Cf_Baai_Bge_Reranker_Base_Input; - postProcessedOutputs: Ai_Cf_Baai_Bge_Reranker_Base_Output; +interface Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Async_Batch { + requests: (Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Prompt_Inner | Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Messages_Inner)[]; } -type Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_Input = Qwen2_5_Coder_32B_Instruct_Prompt | Qwen2_5_Coder_32B_Instruct_Messages; -interface Qwen2_5_Coder_32B_Instruct_Prompt { +interface Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Prompt_Inner { /** * The input text prompt for the model to generate a response. */ prompt: string; /** - * Name of the LoRA (Low-Rank Adaptation) model to fine-tune the base model. + * JSON schema that should be fulfilled for the response. */ - lora?: string; - response_format?: JSONMode; + guided_json?: object; + response_format?: Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_JSON_Mode; /** * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. */ @@ -4048,7 +7300,7 @@ interface Qwen2_5_Coder_32B_Instruct_Prompt { */ presence_penalty?: number; } -interface Qwen2_5_Coder_32B_Instruct_Messages { +interface Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Messages_Inner { /** * An array of message objects representing the conversation history. */ @@ -4056,11 +7308,36 @@ interface Qwen2_5_Coder_32B_Instruct_Messages { /** * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). */ - role: string; + role?: string; /** - * The content of the message as a string. + * The tool call id. If you don't know what to put here you can fall back to 000000001 */ - content: string; + tool_call_id?: string; + content?: string | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }[] | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }; }[]; functions?: { name: string; @@ -4153,7 +7430,11 @@ interface Qwen2_5_Coder_32B_Instruct_Messages { }; }; })[]; - response_format?: JSONMode; + response_format?: Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_JSON_Mode; + /** + * JSON schema that should be fufilled for the response. + */ + guided_json?: object; /** * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. */ @@ -4195,7 +7476,7 @@ interface Qwen2_5_Coder_32B_Instruct_Messages { */ presence_penalty?: number; } -type Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_Output = { +type Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Output = { /** * The generated text response from the model */ @@ -4222,29 +7503,43 @@ type Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_Output = { */ tool_calls?: { /** - * The arguments passed to be passed to the tool call request + * The tool call id. */ - arguments?: object; + id?: string; /** - * The name of the tool to be called + * Specifies the type of tool (e.g., 'function'). */ - name?: string; + type?: string; + /** + * Details of the function tool. + */ + function?: { + /** + * The name of the tool to be called + */ + name?: string; + /** + * The arguments passed to be passed to the tool call request + */ + arguments?: object; + }; }[]; }; -declare abstract class Base_Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct { - inputs: Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_Input; - postProcessedOutputs: Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_Output; +declare abstract class Base_Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct { + inputs: Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Input; + postProcessedOutputs: Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Output; } -type Ai_Cf_Qwen_Qwq_32B_Input = Qwen_Qwq_32B_Prompt | Qwen_Qwq_32B_Messages; -interface Qwen_Qwq_32B_Prompt { +type Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Input = Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Prompt | Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Messages | Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Async_Batch; +interface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Prompt { /** * The input text prompt for the model to generate a response. */ prompt: string; /** - * JSON schema that should be fulfilled for the response. + * Name of the LoRA (Low-Rank Adaptation) model to fine-tune the base model. */ - guided_json?: object; + lora?: string; + response_format?: Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_JSON_Mode; /** * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. */ @@ -4286,7 +7581,11 @@ interface Qwen_Qwq_32B_Prompt { */ presence_penalty?: number; } -interface Qwen_Qwq_32B_Messages { +interface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_JSON_Mode { + type?: "json_object" | "json_schema"; + json_schema?: unknown; +} +interface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Messages { /** * An array of message objects representing the conversation history. */ @@ -4294,36 +7593,17 @@ interface Qwen_Qwq_32B_Messages { /** * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). */ - role?: string; - /** - * The tool call id. Must be supplied for tool calls for Mistral-3. If you don't know what to put here you can fall back to 000000001 - */ - tool_call_id?: string; - content?: string | { + role: string; + content: string | { /** - * Type of the content provided + * Type of the content (text) */ type?: string; - text?: string; - image_url?: { - /** - * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted - */ - url?: string; - }; - }[] | { /** - * Type of the content provided + * Text content */ - type?: string; text?: string; - image_url?: { - /** - * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted - */ - url?: string; - }; - }; + }[]; }[]; functions?: { name: string; @@ -4416,10 +7696,7 @@ interface Qwen_Qwq_32B_Messages { }; }; })[]; - /** - * JSON schema that should be fufilled for the response. - */ - guided_json?: object; + response_format?: Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_JSON_Mode_1; /** * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. */ @@ -4461,56 +7738,23 @@ interface Qwen_Qwq_32B_Messages { */ presence_penalty?: number; } -type Ai_Cf_Qwen_Qwq_32B_Output = { - /** - * The generated text response from the model - */ - response: string; - /** - * Usage statistics for the inference request - */ - usage?: { - /** - * Total number of tokens in input - */ - prompt_tokens?: number; - /** - * Total number of tokens in output - */ - completion_tokens?: number; - /** - * Total number of input and output tokens - */ - total_tokens?: number; - }; - /** - * An array of tool calls requests made during the response generation - */ - tool_calls?: { - /** - * The arguments passed to be passed to the tool call request - */ - arguments?: object; - /** - * The name of the tool to be called - */ - name?: string; - }[]; -}; -declare abstract class Base_Ai_Cf_Qwen_Qwq_32B { - inputs: Ai_Cf_Qwen_Qwq_32B_Input; - postProcessedOutputs: Ai_Cf_Qwen_Qwq_32B_Output; +interface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_JSON_Mode_1 { + type?: "json_object" | "json_schema"; + json_schema?: unknown; } -type Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct_Input = Mistral_Small_3_1_24B_Instruct_Prompt | Mistral_Small_3_1_24B_Instruct_Messages; -interface Mistral_Small_3_1_24B_Instruct_Prompt { +interface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Async_Batch { + requests: (Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Prompt_1 | Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Messages_1)[]; +} +interface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Prompt_1 { /** * The input text prompt for the model to generate a response. */ prompt: string; /** - * JSON schema that should be fulfilled for the response. + * Name of the LoRA (Low-Rank Adaptation) model to fine-tune the base model. */ - guided_json?: object; + lora?: string; + response_format?: Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_JSON_Mode_2; /** * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. */ @@ -4552,7 +7796,11 @@ interface Mistral_Small_3_1_24B_Instruct_Prompt { */ presence_penalty?: number; } -interface Mistral_Small_3_1_24B_Instruct_Messages { +interface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_JSON_Mode_2 { + type?: "json_object" | "json_schema"; + json_schema?: unknown; +} +interface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Messages_1 { /** * An array of message objects representing the conversation history. */ @@ -4560,36 +7808,17 @@ interface Mistral_Small_3_1_24B_Instruct_Messages { /** * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). */ - role?: string; - /** - * The tool call id. Must be supplied for tool calls for Mistral-3. If you don't know what to put here you can fall back to 000000001 - */ - tool_call_id?: string; - content?: string | { + role: string; + content: string | { /** - * Type of the content provided + * Type of the content (text) */ type?: string; - text?: string; - image_url?: { - /** - * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted - */ - url?: string; - }; - }[] | { /** - * Type of the content provided + * Text content */ - type?: string; text?: string; - image_url?: { - /** - * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted - */ - url?: string; - }; - }; + }[]; }[]; functions?: { name: string; @@ -4682,10 +7911,7 @@ interface Mistral_Small_3_1_24B_Instruct_Messages { }; }; })[]; - /** - * JSON schema that should be fufilled for the response. - */ - guided_json?: object; + response_format?: Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_JSON_Mode_3; /** * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. */ @@ -4727,11 +7953,89 @@ interface Mistral_Small_3_1_24B_Instruct_Messages { */ presence_penalty?: number; } -type Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct_Output = { +interface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_JSON_Mode_3 { + type?: "json_object" | "json_schema"; + json_schema?: unknown; +} +type Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Output = Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Chat_Completion_Response | Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Text_Completion_Response | string | Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_AsyncResponse; +interface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Chat_Completion_Response { /** - * The generated text response from the model + * Unique identifier for the completion */ - response: string; + id?: string; + /** + * Object type identifier + */ + object?: "chat.completion"; + /** + * Unix timestamp of when the completion was created + */ + created?: number; + /** + * Model used for the completion + */ + model?: string; + /** + * List of completion choices + */ + choices?: { + /** + * Index of the choice in the list + */ + index?: number; + /** + * The message generated by the model + */ + message?: { + /** + * Role of the message author + */ + role: string; + /** + * The content of the message + */ + content: string; + /** + * Internal reasoning content (if available) + */ + reasoning_content?: string; + /** + * Tool calls made by the assistant + */ + tool_calls?: { + /** + * Unique identifier for the tool call + */ + id: string; + /** + * Type of tool call + */ + type: "function"; + function: { + /** + * Name of the function to call + */ + name: string; + /** + * JSON string of arguments for the function + */ + arguments: string; + }; + }[]; + }; + /** + * Reason why the model stopped generating + */ + finish_reason?: string; + /** + * Stop reason (may be null) + */ + stop_reason?: string | null; + /** + * Log probabilities (if requested) + */ + logprobs?: {} | null; + }[]; /** * Usage statistics for the inference request */ @@ -4750,526 +8054,471 @@ type Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct_Output = { total_tokens?: number; }; /** - * An array of tool calls requests made during the response generation + * Log probabilities for the prompt (if requested) */ - tool_calls?: { + prompt_logprobs?: {} | null; +} +interface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Text_Completion_Response { + /** + * Unique identifier for the completion + */ + id?: string; + /** + * Object type identifier + */ + object?: "text_completion"; + /** + * Unix timestamp of when the completion was created + */ + created?: number; + /** + * Model used for the completion + */ + model?: string; + /** + * List of completion choices + */ + choices?: { /** - * The arguments passed to be passed to the tool call request + * Index of the choice in the list */ - arguments?: object; + index: number; /** - * The name of the tool to be called + * The generated text completion */ - name?: string; + text: string; + /** + * Reason why the model stopped generating + */ + finish_reason: string; + /** + * Stop reason (may be null) + */ + stop_reason?: string | null; + /** + * Log probabilities (if requested) + */ + logprobs?: {} | null; + /** + * Log probabilities for the prompt (if requested) + */ + prompt_logprobs?: {} | null; }[]; -}; -declare abstract class Base_Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct { - inputs: Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct_Input; - postProcessedOutputs: Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct_Output; + /** + * Usage statistics for the inference request + */ + usage?: { + /** + * Total number of tokens in input + */ + prompt_tokens?: number; + /** + * Total number of tokens in output + */ + completion_tokens?: number; + /** + * Total number of input and output tokens + */ + total_tokens?: number; + }; +} +interface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_AsyncResponse { + /** + * The async request id that can be used to obtain the results. + */ + request_id?: string; } -type Ai_Cf_Google_Gemma_3_12B_It_Input = Google_Gemma_3_12B_It_Prompt | Google_Gemma_3_12B_It_Messages; -interface Google_Gemma_3_12B_It_Prompt { +declare abstract class Base_Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8 { + inputs: Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Input; + postProcessedOutputs: Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Output; +} +interface Ai_Cf_Deepgram_Nova_3_Input { + audio: { + body: object; + contentType: string; + }; + /** + * Sets how the model will interpret strings submitted to the custom_topic param. When strict, the model will only return topics submitted using the custom_topic param. When extended, the model will return its own detected topics in addition to those submitted using the custom_topic param. + */ + custom_topic_mode?: "extended" | "strict"; + /** + * Custom topics you want the model to detect within your input audio or text if present Submit up to 100 + */ + custom_topic?: string; + /** + * Sets how the model will interpret intents submitted to the custom_intent param. When strict, the model will only return intents submitted using the custom_intent param. When extended, the model will return its own detected intents in addition those submitted using the custom_intents param + */ + custom_intent_mode?: "extended" | "strict"; + /** + * Custom intents you want the model to detect within your input audio if present + */ + custom_intent?: string; + /** + * Identifies and extracts key entities from content in submitted audio + */ + detect_entities?: boolean; + /** + * Identifies the dominant language spoken in submitted audio + */ + detect_language?: boolean; + /** + * Recognize speaker changes. Each word in the transcript will be assigned a speaker number starting at 0 + */ + diarize?: boolean; + /** + * Identify and extract key entities from content in submitted audio + */ + dictation?: boolean; + /** + * Specify the expected encoding of your submitted audio + */ + encoding?: "linear16" | "flac" | "mulaw" | "amr-nb" | "amr-wb" | "opus" | "speex" | "g729"; + /** + * Arbitrary key-value pairs that are attached to the API response for usage in downstream processing + */ + extra?: string; + /** + * Filler Words can help transcribe interruptions in your audio, like 'uh' and 'um' + */ + filler_words?: boolean; + /** + * Key term prompting can boost or suppress specialized terminology and brands. + */ + keyterm?: string; + /** + * Keywords can boost or suppress specialized terminology and brands. + */ + keywords?: string; /** - * The input text prompt for the model to generate a response. + * The BCP-47 language tag that hints at the primary spoken language. Depending on the Model and API endpoint you choose only certain languages are available. */ - prompt: string; + language?: string; /** - * JSON schema that should be fufilled for the response. + * Spoken measurements will be converted to their corresponding abbreviations. */ - guided_json?: object; + measurements?: boolean; /** - * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + * Opts out requests from the Deepgram Model Improvement Program. Refer to our Docs for pricing impacts before setting this to true. https://dpgr.am/deepgram-mip. */ - raw?: boolean; + mip_opt_out?: boolean; /** - * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + * Mode of operation for the model representing broad area of topic that will be talked about in the supplied audio */ - stream?: boolean; + mode?: "general" | "medical" | "finance"; /** - * The maximum number of tokens to generate in the response. + * Transcribe each audio channel independently. */ - max_tokens?: number; + multichannel?: boolean; /** - * Controls the randomness of the output; higher values produce more random results. + * Numerals converts numbers from written format to numerical format. */ - temperature?: number; + numerals?: boolean; /** - * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + * Splits audio into paragraphs to improve transcript readability. */ - top_p?: number; + paragraphs?: boolean; /** - * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + * Profanity Filter looks for recognized profanity and converts it to the nearest recognized non-profane word or removes it from the transcript completely. */ - top_k?: number; + profanity_filter?: boolean; /** - * Random seed for reproducibility of the generation. + * Add punctuation and capitalization to the transcript. */ - seed?: number; + punctuate?: boolean; /** - * Penalty for repeated tokens; higher values discourage repetition. + * Redaction removes sensitive information from your transcripts. */ - repetition_penalty?: number; + redact?: string; /** - * Decreases the likelihood of the model repeating the same lines verbatim. + * Search for terms or phrases in submitted audio and replaces them. */ - frequency_penalty?: number; + replace?: string; /** - * Increases the likelihood of the model introducing new topics. + * Search for terms or phrases in submitted audio. */ - presence_penalty?: number; -} -interface Google_Gemma_3_12B_It_Messages { + search?: string; /** - * An array of message objects representing the conversation history. + * Recognizes the sentiment throughout a transcript or text. */ - messages: { - /** - * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). - */ - role?: string; - content?: string | { - /** - * Type of the content provided - */ - type?: string; - text?: string; - image_url?: { - /** - * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted - */ - url?: string; - }; - }[] | { - /** - * Type of the content provided - */ - type?: string; - text?: string; - image_url?: { - /** - * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted - */ - url?: string; - }; - }; - }[]; - functions?: { - name: string; - code: string; - }[]; + sentiment?: boolean; /** - * A list of tools available for the assistant to use. + * Apply formatting to transcript output. When set to true, additional formatting will be applied to transcripts to improve readability. */ - tools?: ({ - /** - * The name of the tool. More descriptive the better. - */ - name: string; - /** - * A brief description of what the tool does. - */ - description: string; - /** - * Schema defining the parameters accepted by the tool. - */ - parameters: { - /** - * The type of the parameters object (usually 'object'). - */ - type: string; - /** - * List of required parameter names. - */ - required?: string[]; - /** - * Definitions of each parameter. - */ - properties: { - [k: string]: { - /** - * The data type of the parameter. - */ - type: string; - /** - * A description of the expected parameter. - */ - description: string; - }; - }; - }; - } | { - /** - * Specifies the type of tool (e.g., 'function'). - */ - type: string; - /** - * Details of the function tool. - */ - function: { - /** - * The name of the function. - */ - name: string; - /** - * A brief description of what the function does. - */ - description: string; - /** - * Schema defining the parameters accepted by the function. - */ - parameters: { - /** - * The type of the parameters object (usually 'object'). - */ - type: string; - /** - * List of required parameter names. - */ - required?: string[]; - /** - * Definitions of each parameter. - */ - properties: { - [k: string]: { - /** - * The data type of the parameter. - */ - type: string; - /** - * A description of the expected parameter. - */ - description: string; - }; - }; - }; - }; - })[]; + smart_format?: boolean; /** - * JSON schema that should be fufilled for the response. + * Detect topics throughout a transcript or text. */ - guided_json?: object; + topics?: boolean; /** - * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + * Segments speech into meaningful semantic units. */ - raw?: boolean; + utterances?: boolean; /** - * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + * Seconds to wait before detecting a pause between words in submitted audio. */ - stream?: boolean; + utt_split?: number; /** - * The maximum number of tokens to generate in the response. + * The number of channels in the submitted audio */ - max_tokens?: number; + channels?: number; /** - * Controls the randomness of the output; higher values produce more random results. + * Specifies whether the streaming endpoint should provide ongoing transcription updates as more audio is received. When set to true, the endpoint sends continuous updates, meaning transcription results may evolve over time. Note: Supported only for webosockets. */ - temperature?: number; + interim_results?: boolean; /** - * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + * Indicates how long model will wait to detect whether a speaker has finished speaking or pauses for a significant period of time. When set to a value, the streaming endpoint immediately finalizes the transcription for the processed time range and returns the transcript with a speech_final parameter set to true. Can also be set to false to disable endpointing */ - top_p?: number; + endpointing?: string; /** - * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + * Indicates that speech has started. You'll begin receiving Speech Started messages upon speech starting. Note: Supported only for webosockets. */ - top_k?: number; + vad_events?: boolean; /** - * Random seed for reproducibility of the generation. + * Indicates how long model will wait to send an UtteranceEnd message after a word has been transcribed. Use with interim_results. Note: Supported only for webosockets. */ - seed?: number; + utterance_end_ms?: boolean; +} +interface Ai_Cf_Deepgram_Nova_3_Output { + results?: { + channels?: { + alternatives?: { + confidence?: number; + transcript?: string; + words?: { + confidence?: number; + end?: number; + start?: number; + word?: string; + }[]; + }[]; + }[]; + summary?: { + result?: string; + short?: string; + }; + sentiments?: { + segments?: { + text?: string; + start_word?: number; + end_word?: number; + sentiment?: string; + sentiment_score?: number; + }[]; + average?: { + sentiment?: string; + sentiment_score?: number; + }; + }; + }; +} +declare abstract class Base_Ai_Cf_Deepgram_Nova_3 { + inputs: Ai_Cf_Deepgram_Nova_3_Input; + postProcessedOutputs: Ai_Cf_Deepgram_Nova_3_Output; +} +interface Ai_Cf_Qwen_Qwen3_Embedding_0_6B_Input { + queries?: string | string[]; /** - * Penalty for repeated tokens; higher values discourage repetition. + * Optional instruction for the task + */ + instruction?: string; + documents?: string | string[]; + text?: string | string[]; +} +interface Ai_Cf_Qwen_Qwen3_Embedding_0_6B_Output { + data?: number[][]; + shape?: number[]; +} +declare abstract class Base_Ai_Cf_Qwen_Qwen3_Embedding_0_6B { + inputs: Ai_Cf_Qwen_Qwen3_Embedding_0_6B_Input; + postProcessedOutputs: Ai_Cf_Qwen_Qwen3_Embedding_0_6B_Output; +} +type Ai_Cf_Pipecat_Ai_Smart_Turn_V2_Input = { + /** + * readable stream with audio data and content-type specified for that data */ - repetition_penalty?: number; + audio: { + body: object; + contentType: string; + }; /** - * Decreases the likelihood of the model repeating the same lines verbatim. + * type of data PCM data that's sent to the inference server as raw array */ - frequency_penalty?: number; + dtype?: "uint8" | "float32" | "float64"; +} | { /** - * Increases the likelihood of the model introducing new topics. + * base64 encoded audio data */ - presence_penalty?: number; -} -type Ai_Cf_Google_Gemma_3_12B_It_Output = { + audio: string; /** - * The generated text response from the model + * type of data PCM data that's sent to the inference server as raw array */ - response: string; + dtype?: "uint8" | "float32" | "float64"; +}; +interface Ai_Cf_Pipecat_Ai_Smart_Turn_V2_Output { /** - * Usage statistics for the inference request + * if true, end-of-turn was detected */ - usage?: { - /** - * Total number of tokens in input - */ - prompt_tokens?: number; - /** - * Total number of tokens in output - */ - completion_tokens?: number; - /** - * Total number of input and output tokens - */ - total_tokens?: number; - }; + is_complete?: boolean; /** - * An array of tool calls requests made during the response generation + * probability of the end-of-turn detection */ - tool_calls?: { - /** - * The arguments passed to be passed to the tool call request - */ - arguments?: object; - /** - * The name of the tool to be called - */ - name?: string; - }[]; -}; -declare abstract class Base_Ai_Cf_Google_Gemma_3_12B_It { - inputs: Ai_Cf_Google_Gemma_3_12B_It_Input; - postProcessedOutputs: Ai_Cf_Google_Gemma_3_12B_It_Output; + probability?: number; +} +declare abstract class Base_Ai_Cf_Pipecat_Ai_Smart_Turn_V2 { + inputs: Ai_Cf_Pipecat_Ai_Smart_Turn_V2_Input; + postProcessedOutputs: Ai_Cf_Pipecat_Ai_Smart_Turn_V2_Output; +} +declare abstract class Base_Ai_Cf_Openai_Gpt_Oss_120B { + inputs: XOR; + postProcessedOutputs: XOR; +} +declare abstract class Base_Ai_Cf_Openai_Gpt_Oss_20B { + inputs: XOR; + postProcessedOutputs: XOR; } -type Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Input = Ai_Cf_Meta_Llama_4_Prompt | Ai_Cf_Meta_Llama_4_Messages | Ai_Cf_Meta_Llama_4_Async_Batch; -interface Ai_Cf_Meta_Llama_4_Prompt { +interface Ai_Cf_Leonardo_Phoenix_1_0_Input { /** - * The input text prompt for the model to generate a response. + * A text description of the image you want to generate. */ prompt: string; /** - * JSON schema that should be fulfilled for the response. - */ - guided_json?: object; - response_format?: JSONMode; - /** - * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + * Controls how closely the generated image should adhere to the prompt; higher values make the image more aligned with the prompt */ - raw?: boolean; + guidance?: number; /** - * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + * Random seed for reproducibility of the image generation */ - stream?: boolean; + seed?: number; /** - * The maximum number of tokens to generate in the response. + * The height of the generated image in pixels */ - max_tokens?: number; + height?: number; /** - * Controls the randomness of the output; higher values produce more random results. + * The width of the generated image in pixels */ - temperature?: number; + width?: number; /** - * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + * The number of diffusion steps; higher values can improve quality but take longer */ - top_p?: number; + num_steps?: number; /** - * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + * Specify what to exclude from the generated images */ - top_k?: number; + negative_prompt?: string; +} +/** + * The generated image in JPEG format + */ +type Ai_Cf_Leonardo_Phoenix_1_0_Output = string; +declare abstract class Base_Ai_Cf_Leonardo_Phoenix_1_0 { + inputs: Ai_Cf_Leonardo_Phoenix_1_0_Input; + postProcessedOutputs: Ai_Cf_Leonardo_Phoenix_1_0_Output; +} +interface Ai_Cf_Leonardo_Lucid_Origin_Input { /** - * Random seed for reproducibility of the generation. + * A text description of the image you want to generate. */ - seed?: number; + prompt: string; /** - * Penalty for repeated tokens; higher values discourage repetition. + * Controls how closely the generated image should adhere to the prompt; higher values make the image more aligned with the prompt */ - repetition_penalty?: number; + guidance?: number; /** - * Decreases the likelihood of the model repeating the same lines verbatim. + * Random seed for reproducibility of the image generation */ - frequency_penalty?: number; + seed?: number; /** - * Increases the likelihood of the model introducing new topics. + * The height of the generated image in pixels */ - presence_penalty?: number; -} -interface Ai_Cf_Meta_Llama_4_Messages { + height?: number; /** - * An array of message objects representing the conversation history. + * The width of the generated image in pixels */ - messages: { - /** - * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). - */ - role?: string; - /** - * The tool call id. If you don't know what to put here you can fall back to 000000001 - */ - tool_call_id?: string; - content?: string | { - /** - * Type of the content provided - */ - type?: string; - text?: string; - image_url?: { - /** - * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted - */ - url?: string; - }; - }[] | { - /** - * Type of the content provided - */ - type?: string; - text?: string; - image_url?: { - /** - * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted - */ - url?: string; - }; - }; - }[]; - functions?: { - name: string; - code: string; - }[]; + width?: number; /** - * A list of tools available for the assistant to use. + * The number of diffusion steps; higher values can improve quality but take longer */ - tools?: ({ - /** - * The name of the tool. More descriptive the better. - */ - name: string; - /** - * A brief description of what the tool does. - */ - description: string; - /** - * Schema defining the parameters accepted by the tool. - */ - parameters: { - /** - * The type of the parameters object (usually 'object'). - */ - type: string; - /** - * List of required parameter names. - */ - required?: string[]; - /** - * Definitions of each parameter. - */ - properties: { - [k: string]: { - /** - * The data type of the parameter. - */ - type: string; - /** - * A description of the expected parameter. - */ - description: string; - }; - }; - }; - } | { - /** - * Specifies the type of tool (e.g., 'function'). - */ - type: string; - /** - * Details of the function tool. - */ - function: { - /** - * The name of the function. - */ - name: string; - /** - * A brief description of what the function does. - */ - description: string; - /** - * Schema defining the parameters accepted by the function. - */ - parameters: { - /** - * The type of the parameters object (usually 'object'). - */ - type: string; - /** - * List of required parameter names. - */ - required?: string[]; - /** - * Definitions of each parameter. - */ - properties: { - [k: string]: { - /** - * The data type of the parameter. - */ - type: string; - /** - * A description of the expected parameter. - */ - description: string; - }; - }; - }; - }; - })[]; - response_format?: JSONMode; + num_steps?: number; /** - * JSON schema that should be fufilled for the response. + * The number of diffusion steps; higher values can improve quality but take longer */ - guided_json?: object; + steps?: number; +} +interface Ai_Cf_Leonardo_Lucid_Origin_Output { /** - * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + * The generated image in Base64 format. */ - raw?: boolean; + image?: string; +} +declare abstract class Base_Ai_Cf_Leonardo_Lucid_Origin { + inputs: Ai_Cf_Leonardo_Lucid_Origin_Input; + postProcessedOutputs: Ai_Cf_Leonardo_Lucid_Origin_Output; +} +interface Ai_Cf_Deepgram_Aura_1_Input { /** - * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + * Speaker used to produce the audio. */ - stream?: boolean; + speaker?: "angus" | "asteria" | "arcas" | "orion" | "orpheus" | "athena" | "luna" | "zeus" | "perseus" | "helios" | "hera" | "stella"; /** - * The maximum number of tokens to generate in the response. + * Encoding of the output audio. */ - max_tokens?: number; + encoding?: "linear16" | "flac" | "mulaw" | "alaw" | "mp3" | "opus" | "aac"; /** - * Controls the randomness of the output; higher values produce more random results. + * Container specifies the file format wrapper for the output audio. The available options depend on the encoding type.. */ - temperature?: number; + container?: "none" | "wav" | "ogg"; /** - * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + * The text content to be converted to speech */ - top_p?: number; + text: string; /** - * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + * Sample Rate specifies the sample rate for the output audio. Based on the encoding, different sample rates are supported. For some encodings, the sample rate is not configurable */ - top_k?: number; + sample_rate?: number; /** - * Random seed for reproducibility of the generation. + * The bitrate of the audio in bits per second. Choose from predefined ranges or specific values based on the encoding type. */ - seed?: number; + bit_rate?: number; +} +/** + * The generated audio in MP3 format + */ +type Ai_Cf_Deepgram_Aura_1_Output = string; +declare abstract class Base_Ai_Cf_Deepgram_Aura_1 { + inputs: Ai_Cf_Deepgram_Aura_1_Input; + postProcessedOutputs: Ai_Cf_Deepgram_Aura_1_Output; +} +interface Ai_Cf_Ai4Bharat_Indictrans2_En_Indic_1B_Input { /** - * Penalty for repeated tokens; higher values discourage repetition. + * Input text to translate. Can be a single string or a list of strings. */ - repetition_penalty?: number; + text: string | string[]; /** - * Decreases the likelihood of the model repeating the same lines verbatim. + * Target langauge to translate to */ - frequency_penalty?: number; + target_language: "asm_Beng" | "awa_Deva" | "ben_Beng" | "bho_Deva" | "brx_Deva" | "doi_Deva" | "eng_Latn" | "gom_Deva" | "gon_Deva" | "guj_Gujr" | "hin_Deva" | "hne_Deva" | "kan_Knda" | "kas_Arab" | "kas_Deva" | "kha_Latn" | "lus_Latn" | "mag_Deva" | "mai_Deva" | "mal_Mlym" | "mar_Deva" | "mni_Beng" | "mni_Mtei" | "npi_Deva" | "ory_Orya" | "pan_Guru" | "san_Deva" | "sat_Olck" | "snd_Arab" | "snd_Deva" | "tam_Taml" | "tel_Telu" | "urd_Arab" | "unr_Deva"; +} +interface Ai_Cf_Ai4Bharat_Indictrans2_En_Indic_1B_Output { /** - * Increases the likelihood of the model introducing new topics. + * Translated texts */ - presence_penalty?: number; + translations: string[]; } -interface Ai_Cf_Meta_Llama_4_Async_Batch { - requests: (Ai_Cf_Meta_Llama_4_Prompt_Inner | Ai_Cf_Meta_Llama_4_Messages_Inner)[]; +declare abstract class Base_Ai_Cf_Ai4Bharat_Indictrans2_En_Indic_1B { + inputs: Ai_Cf_Ai4Bharat_Indictrans2_En_Indic_1B_Input; + postProcessedOutputs: Ai_Cf_Ai4Bharat_Indictrans2_En_Indic_1B_Output; } -interface Ai_Cf_Meta_Llama_4_Prompt_Inner { +type Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Input = Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Prompt | Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Messages | Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Async_Batch; +interface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Prompt { /** * The input text prompt for the model to generate a response. */ prompt: string; /** - * JSON schema that should be fulfilled for the response. + * Name of the LoRA (Low-Rank Adaptation) model to fine-tune the base model. */ - guided_json?: object; - response_format?: JSONMode; + lora?: string; + response_format?: Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_JSON_Mode; /** * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. */ @@ -5311,7 +8560,11 @@ interface Ai_Cf_Meta_Llama_4_Prompt_Inner { */ presence_penalty?: number; } -interface Ai_Cf_Meta_Llama_4_Messages_Inner { +interface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_JSON_Mode { + type?: "json_object" | "json_schema"; + json_schema?: unknown; +} +interface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Messages { /** * An array of message objects representing the conversation history. */ @@ -5319,36 +8572,17 @@ interface Ai_Cf_Meta_Llama_4_Messages_Inner { /** * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). */ - role?: string; - /** - * The tool call id. If you don't know what to put here you can fall back to 000000001 - */ - tool_call_id?: string; - content?: string | { + role: string; + content: string | { /** - * Type of the content provided + * Type of the content (text) */ type?: string; - text?: string; - image_url?: { - /** - * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted - */ - url?: string; - }; - }[] | { /** - * Type of the content provided + * Text content */ - type?: string; text?: string; - image_url?: { - /** - * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted - */ - url?: string; - }; - }; + }[]; }[]; functions?: { name: string; @@ -5441,11 +8675,7 @@ interface Ai_Cf_Meta_Llama_4_Messages_Inner { }; }; })[]; - response_format?: JSONMode; - /** - * JSON schema that should be fufilled for the response. - */ - guided_json?: object; + response_format?: Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_JSON_Mode_1; /** * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. */ @@ -5487,441 +8717,553 @@ interface Ai_Cf_Meta_Llama_4_Messages_Inner { */ presence_penalty?: number; } -type Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Output = { - /** - * The generated text response from the model - */ - response: string; - /** - * Usage statistics for the inference request - */ - usage?: { - /** - * Total number of tokens in input - */ - prompt_tokens?: number; - /** - * Total number of tokens in output - */ - completion_tokens?: number; - /** - * Total number of input and output tokens - */ - total_tokens?: number; - }; - /** - * An array of tool calls requests made during the response generation - */ - tool_calls?: { - /** - * The tool call id. - */ - id?: string; - /** - * Specifies the type of tool (e.g., 'function'). - */ - type?: string; - /** - * Details of the function tool. - */ - function?: { - /** - * The name of the tool to be called - */ - name?: string; - /** - * The arguments passed to be passed to the tool call request - */ - arguments?: object; - }; - }[]; -}; -declare abstract class Base_Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct { - inputs: Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Input; - postProcessedOutputs: Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Output; +interface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_JSON_Mode_1 { + type?: "json_object" | "json_schema"; + json_schema?: unknown; } -interface Ai_Cf_Deepgram_Nova_3_Input { - audio: { - body: object; - contentType: string; - }; - /** - * Sets how the model will interpret strings submitted to the custom_topic param. When strict, the model will only return topics submitted using the custom_topic param. When extended, the model will return its own detected topics in addition to those submitted using the custom_topic param. - */ - custom_topic_mode?: "extended" | "strict"; - /** - * Custom topics you want the model to detect within your input audio or text if present Submit up to 100 - */ - custom_topic?: string; - /** - * Sets how the model will interpret intents submitted to the custom_intent param. When strict, the model will only return intents submitted using the custom_intent param. When extended, the model will return its own detected intents in addition those submitted using the custom_intents param - */ - custom_intent_mode?: "extended" | "strict"; - /** - * Custom intents you want the model to detect within your input audio if present - */ - custom_intent?: string; - /** - * Identifies and extracts key entities from content in submitted audio - */ - detect_entities?: boolean; - /** - * Identifies the dominant language spoken in submitted audio - */ - detect_language?: boolean; +interface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Async_Batch { + requests: (Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Prompt_1 | Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Messages_1)[]; +} +interface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Prompt_1 { /** - * Recognize speaker changes. Each word in the transcript will be assigned a speaker number starting at 0 + * The input text prompt for the model to generate a response. */ - diarize?: boolean; + prompt: string; /** - * Identify and extract key entities from content in submitted audio + * Name of the LoRA (Low-Rank Adaptation) model to fine-tune the base model. */ - dictation?: boolean; + lora?: string; + response_format?: Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_JSON_Mode_2; /** - * Specify the expected encoding of your submitted audio + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. */ - encoding?: "linear16" | "flac" | "mulaw" | "amr-nb" | "amr-wb" | "opus" | "speex" | "g729"; + raw?: boolean; /** - * Arbitrary key-value pairs that are attached to the API response for usage in downstream processing + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. */ - extra?: string; + stream?: boolean; /** - * Filler Words can help transcribe interruptions in your audio, like 'uh' and 'um' + * The maximum number of tokens to generate in the response. */ - filler_words?: boolean; + max_tokens?: number; /** - * Key term prompting can boost or suppress specialized terminology and brands. + * Controls the randomness of the output; higher values produce more random results. */ - keyterm?: string; + temperature?: number; /** - * Keywords can boost or suppress specialized terminology and brands. + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. */ - keywords?: string; + top_p?: number; /** - * The BCP-47 language tag that hints at the primary spoken language. Depending on the Model and API endpoint you choose only certain languages are available. + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. */ - language?: string; + top_k?: number; /** - * Spoken measurements will be converted to their corresponding abbreviations. + * Random seed for reproducibility of the generation. */ - measurements?: boolean; + seed?: number; /** - * Opts out requests from the Deepgram Model Improvement Program. Refer to our Docs for pricing impacts before setting this to true. https://dpgr.am/deepgram-mip. + * Penalty for repeated tokens; higher values discourage repetition. */ - mip_opt_out?: boolean; + repetition_penalty?: number; /** - * Mode of operation for the model representing broad area of topic that will be talked about in the supplied audio + * Decreases the likelihood of the model repeating the same lines verbatim. */ - mode?: "general" | "medical" | "finance"; + frequency_penalty?: number; /** - * Transcribe each audio channel independently. + * Increases the likelihood of the model introducing new topics. */ - multichannel?: boolean; + presence_penalty?: number; +} +interface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_JSON_Mode_2 { + type?: "json_object" | "json_schema"; + json_schema?: unknown; +} +interface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Messages_1 { /** - * Numerals converts numbers from written format to numerical format. + * An array of message objects representing the conversation history. */ - numerals?: boolean; + messages: { + /** + * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). + */ + role: string; + content: string | { + /** + * Type of the content (text) + */ + type?: string; + /** + * Text content + */ + text?: string; + }[]; + }[]; + functions?: { + name: string; + code: string; + }[]; /** - * Splits audio into paragraphs to improve transcript readability. + * A list of tools available for the assistant to use. */ - paragraphs?: boolean; + tools?: ({ + /** + * The name of the tool. More descriptive the better. + */ + name: string; + /** + * A brief description of what the tool does. + */ + description: string; + /** + * Schema defining the parameters accepted by the tool. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + } | { + /** + * Specifies the type of tool (e.g., 'function'). + */ + type: string; + /** + * Details of the function tool. + */ + function: { + /** + * The name of the function. + */ + name: string; + /** + * A brief description of what the function does. + */ + description: string; + /** + * Schema defining the parameters accepted by the function. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + }; + })[]; + response_format?: Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_JSON_Mode_3; /** - * Profanity Filter looks for recognized profanity and converts it to the nearest recognized non-profane word or removes it from the transcript completely. + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. */ - profanity_filter?: boolean; + raw?: boolean; /** - * Add punctuation and capitalization to the transcript. + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. */ - punctuate?: boolean; + stream?: boolean; /** - * Redaction removes sensitive information from your transcripts. + * The maximum number of tokens to generate in the response. */ - redact?: string; + max_tokens?: number; /** - * Search for terms or phrases in submitted audio and replaces them. + * Controls the randomness of the output; higher values produce more random results. */ - replace?: string; + temperature?: number; /** - * Search for terms or phrases in submitted audio. + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. */ - search?: string; + top_p?: number; /** - * Recognizes the sentiment throughout a transcript or text. + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. */ - sentiment?: boolean; + top_k?: number; /** - * Apply formatting to transcript output. When set to true, additional formatting will be applied to transcripts to improve readability. + * Random seed for reproducibility of the generation. */ - smart_format?: boolean; + seed?: number; /** - * Detect topics throughout a transcript or text. + * Penalty for repeated tokens; higher values discourage repetition. */ - topics?: boolean; + repetition_penalty?: number; /** - * Segments speech into meaningful semantic units. + * Decreases the likelihood of the model repeating the same lines verbatim. */ - utterances?: boolean; + frequency_penalty?: number; /** - * Seconds to wait before detecting a pause between words in submitted audio. + * Increases the likelihood of the model introducing new topics. */ - utt_split?: number; + presence_penalty?: number; +} +interface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_JSON_Mode_3 { + type?: "json_object" | "json_schema"; + json_schema?: unknown; +} +type Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Output = Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Chat_Completion_Response | Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Text_Completion_Response | string | Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_AsyncResponse; +interface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Chat_Completion_Response { /** - * The number of channels in the submitted audio + * Unique identifier for the completion */ - channels?: number; + id?: string; /** - * Specifies whether the streaming endpoint should provide ongoing transcription updates as more audio is received. When set to true, the endpoint sends continuous updates, meaning transcription results may evolve over time. Note: Supported only for webosockets. + * Object type identifier */ - interim_results?: boolean; + object?: "chat.completion"; /** - * Indicates how long model will wait to detect whether a speaker has finished speaking or pauses for a significant period of time. When set to a value, the streaming endpoint immediately finalizes the transcription for the processed time range and returns the transcript with a speech_final parameter set to true. Can also be set to false to disable endpointing + * Unix timestamp of when the completion was created */ - endpointing?: string; + created?: number; /** - * Indicates that speech has started. You'll begin receiving Speech Started messages upon speech starting. Note: Supported only for webosockets. + * Model used for the completion */ - vad_events?: boolean; + model?: string; /** - * Indicates how long model will wait to send an UtteranceEnd message after a word has been transcribed. Use with interim_results. Note: Supported only for webosockets. + * List of completion choices */ - utterance_end_ms?: boolean; -} -interface Ai_Cf_Deepgram_Nova_3_Output { - results?: { - channels?: { - alternatives?: { - confidence?: number; - transcript?: string; - words?: { - confidence?: number; - end?: number; - start?: number; - word?: string; - }[]; - }[]; - }[]; - summary?: { - result?: string; - short?: string; - }; - sentiments?: { - segments?: { - text?: string; - start_word?: number; - end_word?: number; - sentiment?: string; - sentiment_score?: number; + choices?: { + /** + * Index of the choice in the list + */ + index?: number; + /** + * The message generated by the model + */ + message?: { + /** + * Role of the message author + */ + role: string; + /** + * The content of the message + */ + content: string; + /** + * Internal reasoning content (if available) + */ + reasoning_content?: string; + /** + * Tool calls made by the assistant + */ + tool_calls?: { + /** + * Unique identifier for the tool call + */ + id: string; + /** + * Type of tool call + */ + type: "function"; + function: { + /** + * Name of the function to call + */ + name: string; + /** + * JSON string of arguments for the function + */ + arguments: string; + }; }[]; - average?: { - sentiment?: string; - sentiment_score?: number; - }; }; - }; -} -declare abstract class Base_Ai_Cf_Deepgram_Nova_3 { - inputs: Ai_Cf_Deepgram_Nova_3_Input; - postProcessedOutputs: Ai_Cf_Deepgram_Nova_3_Output; -} -type Ai_Cf_Pipecat_Ai_Smart_Turn_V2_Input = { + /** + * Reason why the model stopped generating + */ + finish_reason?: string; + /** + * Stop reason (may be null) + */ + stop_reason?: string | null; + /** + * Log probabilities (if requested) + */ + logprobs?: {} | null; + }[]; /** - * readable stream with audio data and content-type specified for that data + * Usage statistics for the inference request */ - audio: { - body: object; - contentType: string; + usage?: { + /** + * Total number of tokens in input + */ + prompt_tokens?: number; + /** + * Total number of tokens in output + */ + completion_tokens?: number; + /** + * Total number of input and output tokens + */ + total_tokens?: number; }; /** - * type of data PCM data that's sent to the inference server as raw array + * Log probabilities for the prompt (if requested) */ - dtype?: "uint8" | "float32" | "float64"; -} | { + prompt_logprobs?: {} | null; +} +interface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Text_Completion_Response { /** - * base64 encoded audio data + * Unique identifier for the completion */ - audio: string; + id?: string; /** - * type of data PCM data that's sent to the inference server as raw array + * Object type identifier */ - dtype?: "uint8" | "float32" | "float64"; -}; -interface Ai_Cf_Pipecat_Ai_Smart_Turn_V2_Output { + object?: "text_completion"; /** - * if true, end-of-turn was detected + * Unix timestamp of when the completion was created */ - is_complete?: boolean; + created?: number; /** - * probability of the end-of-turn detection + * Model used for the completion */ - probability?: number; -} -declare abstract class Base_Ai_Cf_Pipecat_Ai_Smart_Turn_V2 { - inputs: Ai_Cf_Pipecat_Ai_Smart_Turn_V2_Input; - postProcessedOutputs: Ai_Cf_Pipecat_Ai_Smart_Turn_V2_Output; -} -type Ai_Cf_Openai_Gpt_Oss_120B_Input = GPT_OSS_120B_Responses | GPT_OSS_120B_Responses_Async; -interface GPT_OSS_120B_Responses { + model?: string; + /** + * List of completion choices + */ + choices?: { + /** + * Index of the choice in the list + */ + index: number; + /** + * The generated text completion + */ + text: string; + /** + * Reason why the model stopped generating + */ + finish_reason: string; + /** + * Stop reason (may be null) + */ + stop_reason?: string | null; + /** + * Log probabilities (if requested) + */ + logprobs?: {} | null; + /** + * Log probabilities for the prompt (if requested) + */ + prompt_logprobs?: {} | null; + }[]; /** - * Responses API Input messages. Refer to OpenAI Responses API docs to learn more about supported content types + * Usage statistics for the inference request */ - input: string | unknown[]; - reasoning?: { + usage?: { + /** + * Total number of tokens in input + */ + prompt_tokens?: number; /** - * Constrains effort on reasoning for reasoning models. Currently supported values are low, medium, and high. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response. + * Total number of tokens in output */ - effort?: "low" | "medium" | "high"; + completion_tokens?: number; /** - * A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process. One of auto, concise, or detailed. + * Total number of input and output tokens */ - summary?: "auto" | "concise" | "detailed"; + total_tokens?: number; }; } -interface GPT_OSS_120B_Responses_Async { - requests: { - /** - * Responses API Input messages. Refer to OpenAI Responses API docs to learn more about supported content types - */ - input: string | unknown[]; - reasoning?: { - /** - * Constrains effort on reasoning for reasoning models. Currently supported values are low, medium, and high. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response. - */ - effort?: "low" | "medium" | "high"; - /** - * A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process. One of auto, concise, or detailed. - */ - summary?: "auto" | "concise" | "detailed"; - }; - }[]; +interface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_AsyncResponse { + /** + * The async request id that can be used to obtain the results. + */ + request_id?: string; } -type Ai_Cf_Openai_Gpt_Oss_120B_Output = {} | (string & NonNullable); -declare abstract class Base_Ai_Cf_Openai_Gpt_Oss_120B { - inputs: Ai_Cf_Openai_Gpt_Oss_120B_Input; - postProcessedOutputs: Ai_Cf_Openai_Gpt_Oss_120B_Output; +declare abstract class Base_Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It { + inputs: Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Input; + postProcessedOutputs: Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Output; } -type Ai_Cf_Openai_Gpt_Oss_20B_Input = GPT_OSS_20B_Responses | GPT_OSS_20B_Responses_Async; -interface GPT_OSS_20B_Responses { +interface Ai_Cf_Pfnet_Plamo_Embedding_1B_Input { /** - * Responses API Input messages. Refer to OpenAI Responses API docs to learn more about supported content types + * Input text to embed. Can be a single string or a list of strings. */ - input: string | unknown[]; - reasoning?: { - /** - * Constrains effort on reasoning for reasoning models. Currently supported values are low, medium, and high. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response. - */ - effort?: "low" | "medium" | "high"; - /** - * A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process. One of auto, concise, or detailed. - */ - summary?: "auto" | "concise" | "detailed"; - }; + text: string | string[]; } -interface GPT_OSS_20B_Responses_Async { - requests: { - /** - * Responses API Input messages. Refer to OpenAI Responses API docs to learn more about supported content types - */ - input: string | unknown[]; - reasoning?: { - /** - * Constrains effort on reasoning for reasoning models. Currently supported values are low, medium, and high. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response. - */ - effort?: "low" | "medium" | "high"; - /** - * A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process. One of auto, concise, or detailed. - */ - summary?: "auto" | "concise" | "detailed"; - }; - }[]; +interface Ai_Cf_Pfnet_Plamo_Embedding_1B_Output { + /** + * Embedding vectors, where each vector is a list of floats. + */ + data: number[][]; + /** + * Shape of the embedding data as [number_of_embeddings, embedding_dimension]. + * + * @minItems 2 + * @maxItems 2 + */ + shape: [ + number, + number + ]; } -type Ai_Cf_Openai_Gpt_Oss_20B_Output = {} | (string & NonNullable); -declare abstract class Base_Ai_Cf_Openai_Gpt_Oss_20B { - inputs: Ai_Cf_Openai_Gpt_Oss_20B_Input; - postProcessedOutputs: Ai_Cf_Openai_Gpt_Oss_20B_Output; +declare abstract class Base_Ai_Cf_Pfnet_Plamo_Embedding_1B { + inputs: Ai_Cf_Pfnet_Plamo_Embedding_1B_Input; + postProcessedOutputs: Ai_Cf_Pfnet_Plamo_Embedding_1B_Output; } -interface Ai_Cf_Leonardo_Phoenix_1_0_Input { +interface Ai_Cf_Deepgram_Flux_Input { /** - * A text description of the image you want to generate. + * Encoding of the audio stream. Currently only supports raw signed little-endian 16-bit PCM. */ - prompt: string; + encoding: "linear16"; /** - * Controls how closely the generated image should adhere to the prompt; higher values make the image more aligned with the prompt + * Sample rate of the audio stream in Hz. */ - guidance?: number; + sample_rate: string; /** - * Random seed for reproducibility of the image generation + * End-of-turn confidence required to fire an eager end-of-turn event. When set, enables EagerEndOfTurn and TurnResumed events. Valid Values 0.3 - 0.9. */ - seed?: number; + eager_eot_threshold?: string; /** - * The height of the generated image in pixels + * End-of-turn confidence required to finish a turn. Valid Values 0.5 - 0.9. */ - height?: number; + eot_threshold?: string; /** - * The width of the generated image in pixels + * A turn will be finished when this much time has passed after speech, regardless of EOT confidence. */ - width?: number; + eot_timeout_ms?: string; /** - * The number of diffusion steps; higher values can improve quality but take longer + * Keyterm prompting can improve recognition of specialized terminology. Pass multiple keyterm query parameters to boost multiple keyterms. */ - num_steps?: number; + keyterm?: string; /** - * Specify what to exclude from the generated images + * Opts out requests from the Deepgram Model Improvement Program. Refer to Deepgram Docs for pricing impacts before setting this to true. https://dpgr.am/deepgram-mip */ - negative_prompt?: string; + mip_opt_out?: "true" | "false"; + /** + * Label your requests for the purpose of identification during usage reporting + */ + tag?: string; } /** - * The generated image in JPEG format + * Output will be returned as websocket messages. */ -type Ai_Cf_Leonardo_Phoenix_1_0_Output = string; -declare abstract class Base_Ai_Cf_Leonardo_Phoenix_1_0 { - inputs: Ai_Cf_Leonardo_Phoenix_1_0_Input; - postProcessedOutputs: Ai_Cf_Leonardo_Phoenix_1_0_Output; -} -interface Ai_Cf_Leonardo_Lucid_Origin_Input { +interface Ai_Cf_Deepgram_Flux_Output { /** - * A text description of the image you want to generate. + * The unique identifier of the request (uuid) */ - prompt: string; + request_id?: string; /** - * Controls how closely the generated image should adhere to the prompt; higher values make the image more aligned with the prompt + * Starts at 0 and increments for each message the server sends to the client. */ - guidance?: number; + sequence_id?: number; /** - * Random seed for reproducibility of the image generation + * The type of event being reported. */ - seed?: number; + event?: "Update" | "StartOfTurn" | "EagerEndOfTurn" | "TurnResumed" | "EndOfTurn"; /** - * The height of the generated image in pixels + * The index of the current turn */ - height?: number; + turn_index?: number; /** - * The width of the generated image in pixels + * Start time in seconds of the audio range that was transcribed */ - width?: number; + audio_window_start?: number; /** - * The number of diffusion steps; higher values can improve quality but take longer + * End time in seconds of the audio range that was transcribed */ - num_steps?: number; + audio_window_end?: number; /** - * The number of diffusion steps; higher values can improve quality but take longer + * Text that was said over the course of the current turn */ - steps?: number; + transcript?: string; + /** + * The words in the transcript + */ + words?: { + /** + * The individual punctuated, properly-cased word from the transcript + */ + word: string; + /** + * Confidence that this word was transcribed correctly + */ + confidence: number; + }[]; + /** + * Confidence that no more speech is coming in this turn + */ + end_of_turn_confidence?: number; } -interface Ai_Cf_Leonardo_Lucid_Origin_Output { +declare abstract class Base_Ai_Cf_Deepgram_Flux { + inputs: Ai_Cf_Deepgram_Flux_Input; + postProcessedOutputs: Ai_Cf_Deepgram_Flux_Output; +} +interface Ai_Cf_Deepgram_Aura_2_En_Input { /** - * The generated image in Base64 format. + * Speaker used to produce the audio. */ - image?: string; + speaker?: "amalthea" | "andromeda" | "apollo" | "arcas" | "aries" | "asteria" | "athena" | "atlas" | "aurora" | "callista" | "cora" | "cordelia" | "delia" | "draco" | "electra" | "harmonia" | "helena" | "hera" | "hermes" | "hyperion" | "iris" | "janus" | "juno" | "jupiter" | "luna" | "mars" | "minerva" | "neptune" | "odysseus" | "ophelia" | "orion" | "orpheus" | "pandora" | "phoebe" | "pluto" | "saturn" | "thalia" | "theia" | "vesta" | "zeus"; + /** + * Encoding of the output audio. + */ + encoding?: "linear16" | "flac" | "mulaw" | "alaw" | "mp3" | "opus" | "aac"; + /** + * Container specifies the file format wrapper for the output audio. The available options depend on the encoding type.. + */ + container?: "none" | "wav" | "ogg"; + /** + * The text content to be converted to speech + */ + text: string; + /** + * Sample Rate specifies the sample rate for the output audio. Based on the encoding, different sample rates are supported. For some encodings, the sample rate is not configurable + */ + sample_rate?: number; + /** + * The bitrate of the audio in bits per second. Choose from predefined ranges or specific values based on the encoding type. + */ + bit_rate?: number; } -declare abstract class Base_Ai_Cf_Leonardo_Lucid_Origin { - inputs: Ai_Cf_Leonardo_Lucid_Origin_Input; - postProcessedOutputs: Ai_Cf_Leonardo_Lucid_Origin_Output; +/** + * The generated audio in MP3 format + */ +type Ai_Cf_Deepgram_Aura_2_En_Output = string; +declare abstract class Base_Ai_Cf_Deepgram_Aura_2_En { + inputs: Ai_Cf_Deepgram_Aura_2_En_Input; + postProcessedOutputs: Ai_Cf_Deepgram_Aura_2_En_Output; } -interface Ai_Cf_Deepgram_Aura_1_Input { +interface Ai_Cf_Deepgram_Aura_2_Es_Input { /** * Speaker used to produce the audio. */ - speaker?: "angus" | "asteria" | "arcas" | "orion" | "orpheus" | "athena" | "luna" | "zeus" | "perseus" | "helios" | "hera" | "stella"; + speaker?: "sirio" | "nestor" | "carina" | "celeste" | "alvaro" | "diana" | "aquila" | "selena" | "estrella" | "javier"; /** * Encoding of the output audio. */ @@ -5946,10 +9288,70 @@ interface Ai_Cf_Deepgram_Aura_1_Input { /** * The generated audio in MP3 format */ -type Ai_Cf_Deepgram_Aura_1_Output = string; -declare abstract class Base_Ai_Cf_Deepgram_Aura_1 { - inputs: Ai_Cf_Deepgram_Aura_1_Input; - postProcessedOutputs: Ai_Cf_Deepgram_Aura_1_Output; +type Ai_Cf_Deepgram_Aura_2_Es_Output = string; +declare abstract class Base_Ai_Cf_Deepgram_Aura_2_Es { + inputs: Ai_Cf_Deepgram_Aura_2_Es_Input; + postProcessedOutputs: Ai_Cf_Deepgram_Aura_2_Es_Output; +} +interface Ai_Cf_Black_Forest_Labs_Flux_2_Dev_Input { + multipart: { + body?: object; + contentType?: string; + }; +} +interface Ai_Cf_Black_Forest_Labs_Flux_2_Dev_Output { + /** + * Generated image as Base64 string. + */ + image?: string; +} +declare abstract class Base_Ai_Cf_Black_Forest_Labs_Flux_2_Dev { + inputs: Ai_Cf_Black_Forest_Labs_Flux_2_Dev_Input; + postProcessedOutputs: Ai_Cf_Black_Forest_Labs_Flux_2_Dev_Output; +} +interface Ai_Cf_Black_Forest_Labs_Flux_2_Klein_4B_Input { + multipart: { + body?: object; + contentType?: string; + }; +} +interface Ai_Cf_Black_Forest_Labs_Flux_2_Klein_4B_Output { + /** + * Generated image as Base64 string. + */ + image?: string; +} +declare abstract class Base_Ai_Cf_Black_Forest_Labs_Flux_2_Klein_4B { + inputs: Ai_Cf_Black_Forest_Labs_Flux_2_Klein_4B_Input; + postProcessedOutputs: Ai_Cf_Black_Forest_Labs_Flux_2_Klein_4B_Output; +} +interface Ai_Cf_Black_Forest_Labs_Flux_2_Klein_9B_Input { + multipart: { + body?: object; + contentType?: string; + }; +} +interface Ai_Cf_Black_Forest_Labs_Flux_2_Klein_9B_Output { + /** + * Generated image as Base64 string. + */ + image?: string; +} +declare abstract class Base_Ai_Cf_Black_Forest_Labs_Flux_2_Klein_9B { + inputs: Ai_Cf_Black_Forest_Labs_Flux_2_Klein_9B_Input; + postProcessedOutputs: Ai_Cf_Black_Forest_Labs_Flux_2_Klein_9B_Output; +} +declare abstract class Base_Ai_Cf_Zai_Org_Glm_4_7_Flash { + inputs: ChatCompletionsInput; + postProcessedOutputs: ChatCompletionsOutput; +} +declare abstract class Base_Ai_Cf_Moonshotai_Kimi_K2_5 { + inputs: ChatCompletionsInput; + postProcessedOutputs: ChatCompletionsOutput; +} +declare abstract class Base_Ai_Cf_Nvidia_Nemotron_3_120B_A12B { + inputs: ChatCompletionsInput; + postProcessedOutputs: ChatCompletionsOutput; } interface AiModels { "@cf/huggingface/distilbert-sst-2-int8": BaseAiTextClassification; @@ -5969,7 +9371,6 @@ interface AiModels { "@hf/thebloke/zephyr-7b-beta-awq": BaseAiTextGeneration; "@hf/thebloke/openhermes-2.5-mistral-7b-awq": BaseAiTextGeneration; "@hf/thebloke/neural-chat-7b-v3-1-awq": BaseAiTextGeneration; - "@hf/thebloke/llamaguard-7b-awq": BaseAiTextGeneration; "@hf/thebloke/deepseek-coder-6.7b-base-awq": BaseAiTextGeneration; "@hf/thebloke/deepseek-coder-6.7b-instruct-awq": BaseAiTextGeneration; "@cf/deepseek-ai/deepseek-math-7b-instruct": BaseAiTextGeneration; @@ -5994,12 +9395,12 @@ interface AiModels { "@cf/meta/llama-3-8b-instruct": BaseAiTextGeneration; "@cf/fblgit/una-cybertron-7b-v2-bf16": BaseAiTextGeneration; "@cf/meta/llama-3-8b-instruct-awq": BaseAiTextGeneration; - "@hf/meta-llama/meta-llama-3-8b-instruct": BaseAiTextGeneration; "@cf/meta/llama-3.1-8b-instruct-fp8": BaseAiTextGeneration; "@cf/meta/llama-3.1-8b-instruct-awq": BaseAiTextGeneration; "@cf/meta/llama-3.2-3b-instruct": BaseAiTextGeneration; "@cf/meta/llama-3.2-1b-instruct": BaseAiTextGeneration; "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b": BaseAiTextGeneration; + "@cf/ibm-granite/granite-4.0-h-micro": BaseAiTextGeneration; "@cf/facebook/bart-large-cnn": BaseAiSummarization; "@cf/llava-hf/llava-1.5-7b-hf": BaseAiImageToText; "@cf/baai/bge-base-en-v1.5": Base_Ai_Cf_Baai_Bge_Base_En_V1_5; @@ -6021,13 +9422,27 @@ interface AiModels { "@cf/mistralai/mistral-small-3.1-24b-instruct": Base_Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct; "@cf/google/gemma-3-12b-it": Base_Ai_Cf_Google_Gemma_3_12B_It; "@cf/meta/llama-4-scout-17b-16e-instruct": Base_Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct; + "@cf/qwen/qwen3-30b-a3b-fp8": Base_Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8; "@cf/deepgram/nova-3": Base_Ai_Cf_Deepgram_Nova_3; + "@cf/qwen/qwen3-embedding-0.6b": Base_Ai_Cf_Qwen_Qwen3_Embedding_0_6B; "@cf/pipecat-ai/smart-turn-v2": Base_Ai_Cf_Pipecat_Ai_Smart_Turn_V2; "@cf/openai/gpt-oss-120b": Base_Ai_Cf_Openai_Gpt_Oss_120B; "@cf/openai/gpt-oss-20b": Base_Ai_Cf_Openai_Gpt_Oss_20B; "@cf/leonardo/phoenix-1.0": Base_Ai_Cf_Leonardo_Phoenix_1_0; "@cf/leonardo/lucid-origin": Base_Ai_Cf_Leonardo_Lucid_Origin; "@cf/deepgram/aura-1": Base_Ai_Cf_Deepgram_Aura_1; + "@cf/ai4bharat/indictrans2-en-indic-1B": Base_Ai_Cf_Ai4Bharat_Indictrans2_En_Indic_1B; + "@cf/aisingapore/gemma-sea-lion-v4-27b-it": Base_Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It; + "@cf/pfnet/plamo-embedding-1b": Base_Ai_Cf_Pfnet_Plamo_Embedding_1B; + "@cf/deepgram/flux": Base_Ai_Cf_Deepgram_Flux; + "@cf/deepgram/aura-2-en": Base_Ai_Cf_Deepgram_Aura_2_En; + "@cf/deepgram/aura-2-es": Base_Ai_Cf_Deepgram_Aura_2_Es; + "@cf/black-forest-labs/flux-2-dev": Base_Ai_Cf_Black_Forest_Labs_Flux_2_Dev; + "@cf/black-forest-labs/flux-2-klein-4b": Base_Ai_Cf_Black_Forest_Labs_Flux_2_Klein_4B; + "@cf/black-forest-labs/flux-2-klein-9b": Base_Ai_Cf_Black_Forest_Labs_Flux_2_Klein_9B; + "@cf/zai-org/glm-4.7-flash": Base_Ai_Cf_Zai_Org_Glm_4_7_Flash; + "@cf/moonshotai/kimi-k2.5": Base_Ai_Cf_Moonshotai_Kimi_K2_5; + "@cf/nvidia/nemotron-3-120b-a12b": Base_Ai_Cf_Nvidia_Nemotron_3_120B_A12B; } type AiOptions = { /** @@ -6039,17 +9454,21 @@ type AiOptions = { * Establish websocket connections, only works for supported models */ websocket?: boolean; + /** + * Tag your requests to group and view them in Cloudflare dashboard. + * + * Rules: + * Tags must only contain letters, numbers, and the symbols: : - . / @ + * Each tag can have maximum 50 characters. + * Maximum 5 tags are allowed each request. + * Duplicate tags will removed. + */ + tags?: string[]; gateway?: GatewayOptions; returnRawResponse?: boolean; prefix?: string; extraHeaders?: object; -}; -type ConversionResponse = { - name: string; - mimeType: string; - format: "markdown"; - tokens: number; - data: string; + signal?: AbortSignal; }; type AiModelsSearchParams = { author?: string; @@ -6076,6 +9495,10 @@ type AiModelsSearchObject = { value: string; }[]; }; +type ChatCompletionsBase = XOR; +type ChatCompletionsInput = XOR; interface InferenceUpstreamError extends Error { } interface AiInternalError extends Error { @@ -6084,6 +9507,18 @@ type AiModelListType = Record; declare abstract class Ai { aiGatewayLogId: string | null; gateway(gatewayId: string): AiGateway; + /** + * @deprecated Use the standalone `ai_search_namespaces` or `ai_search` Workers bindings instead. + * See https://developers.cloudflare.com/ai-search/usage/workers-binding/ + */ + aiSearch(): AiSearchNamespace; + /** + * @deprecated AutoRAG has been replaced by AI Search. + * Use the standalone `ai_search_namespaces` or `ai_search` Workers bindings instead. + * See https://developers.cloudflare.com/ai-search/usage/workers-binding/ + * + * @param autoragId Instance ID + */ autorag(autoragId: string): AutoRAG; run(model: Name, inputs: InputOptions, options?: Options): Promise { stream: true; } ? ReadableStream : AiModelList[Name]["postProcessedOutputs"]>; models(params?: AiModelsSearchParams): Promise; - toMarkdown(files: { - name: string; - blob: Blob; - }[], options?: { - gateway?: GatewayOptions; - extraHeaders?: object; - }): Promise; - toMarkdown(files: { - name: string; - blob: Blob; - }, options?: { - gateway?: GatewayOptions; - extraHeaders?: object; - }): Promise; + toMarkdown(): ToMarkdownService; + toMarkdown(files: MarkdownDocument[], options?: ConversionRequestOptions): Promise; + toMarkdown(files: MarkdownDocument, options?: ConversionRequestOptions): Promise; } type GatewayRetries = { maxAttempts?: 1 | 2 | 3 | 4 | 5; @@ -6203,12 +9627,28 @@ declare abstract class AiGateway { }): Promise; getUrl(provider?: AIGatewayProviders | string): Promise; // eslint-disable-line } +/** + * @deprecated Use the standalone AI Search Workers binding instead. + * See https://developers.cloudflare.com/ai-search/usage/workers-binding/ + */ interface AutoRAGInternalError extends Error { } +/** + * @deprecated Use the standalone AI Search Workers binding instead. + * See https://developers.cloudflare.com/ai-search/usage/workers-binding/ + */ interface AutoRAGNotFoundError extends Error { } +/** + * @deprecated Use the standalone AI Search Workers binding instead. + * See https://developers.cloudflare.com/ai-search/usage/workers-binding/ + */ interface AutoRAGUnauthorizedError extends Error { } +/** + * @deprecated Use the standalone AI Search Workers binding instead. + * See https://developers.cloudflare.com/ai-search/usage/workers-binding/ + */ interface AutoRAGNameNotSetError extends Error { } type ComparisonFilter = { @@ -6220,6 +9660,10 @@ type CompoundFilter = { type: 'and' | 'or'; filters: ComparisonFilter[]; }; +/** + * @deprecated Use the standalone AI Search Workers binding instead. + * See https://developers.cloudflare.com/ai-search/usage/workers-binding/ + */ type AutoRagSearchRequest = { query: string; filters?: CompoundFilter | ComparisonFilter; @@ -6228,15 +9672,31 @@ type AutoRagSearchRequest = { ranker?: string; score_threshold?: number; }; + reranking?: { + enabled?: boolean; + model?: string; + }; rewrite_query?: boolean; }; +/** + * @deprecated Use the standalone AI Search Workers binding instead. + * See https://developers.cloudflare.com/ai-search/usage/workers-binding/ + */ type AutoRagAiSearchRequest = AutoRagSearchRequest & { stream?: boolean; system_prompt?: string; }; +/** + * @deprecated Use the standalone AI Search Workers binding instead. + * See https://developers.cloudflare.com/ai-search/usage/workers-binding/ + */ type AutoRagAiSearchRequestStreaming = Omit & { stream: true; }; +/** + * @deprecated Use the standalone AI Search Workers binding instead. + * See https://developers.cloudflare.com/ai-search/usage/workers-binding/ + */ type AutoRagSearchResponse = { object: 'vector_store.search_results.page'; search_query: string; @@ -6253,6 +9713,10 @@ type AutoRagSearchResponse = { has_more: boolean; next_page: string | null; }; +/** + * @deprecated Use the standalone AI Search Workers binding instead. + * See https://developers.cloudflare.com/ai-search/usage/workers-binding/ + */ type AutoRagListResponse = { id: string; enable: boolean; @@ -6262,14 +9726,42 @@ type AutoRagListResponse = { paused: boolean; status: string; }[]; +/** + * @deprecated Use the standalone AI Search Workers binding instead. + * See https://developers.cloudflare.com/ai-search/usage/workers-binding/ + */ type AutoRagAiSearchResponse = AutoRagSearchResponse & { response: string; }; +/** + * @deprecated Use the standalone AI Search Workers binding instead. + * See https://developers.cloudflare.com/ai-search/usage/workers-binding/ + */ declare abstract class AutoRAG { + /** + * @deprecated Use the standalone AI Search Workers binding instead. + * See https://developers.cloudflare.com/ai-search/usage/workers-binding/ + */ list(): Promise; + /** + * @deprecated Use the standalone AI Search Workers binding instead. + * See https://developers.cloudflare.com/ai-search/usage/workers-binding/ + */ search(params: AutoRagSearchRequest): Promise; + /** + * @deprecated Use the standalone AI Search Workers binding instead. + * See https://developers.cloudflare.com/ai-search/usage/workers-binding/ + */ aiSearch(params: AutoRagAiSearchRequestStreaming): Promise; + /** + * @deprecated Use the standalone AI Search Workers binding instead. + * See https://developers.cloudflare.com/ai-search/usage/workers-binding/ + */ aiSearch(params: AutoRagAiSearchRequest): Promise; + /** + * @deprecated Use the standalone AI Search Workers binding instead. + * See https://developers.cloudflare.com/ai-search/usage/workers-binding/ + */ aiSearch(params: AutoRagAiSearchRequest): Promise; } interface BasicImageTransformations { @@ -6382,6 +9874,41 @@ interface RequestInitCfProperties extends Record { * (e.g. { '200-299': 86400, '404': 1, '500-599': 0 }) */ cacheTtlByStatus?: Record; + /** + * Explicit Cache-Control header value to set on the response stored in cache. + * This gives full control over cache directives (e.g. 'public, max-age=3600, s-maxage=86400'). + * + * Cannot be used together with `cacheTtl` or the `cache` request option (`no-store`/`no-cache`), + * as these are mutually exclusive cache control mechanisms. Setting both will throw a TypeError. + * + * Can be used together with `cacheTtlByStatus`. + */ + cacheControl?: string; + /** + * Whether the response should be eligible for Cache Reserve storage. + */ + cacheReserveEligible?: boolean; + /** + * Whether to respect strong ETags (as opposed to weak ETags) from the origin. + */ + respectStrongEtag?: boolean; + /** + * Whether to strip ETag headers from the origin response before caching. + */ + stripEtags?: boolean; + /** + * Whether to strip Last-Modified headers from the origin response before caching. + */ + stripLastModified?: boolean; + /** + * Whether to enable Cache Deception Armor, which protects against web cache + * deception attacks by verifying the Content-Type matches the URL extension. + */ + cacheDeceptionArmor?: boolean; + /** + * Minimum file size in bytes for a response to be eligible for Cache Reserve storage. + */ + cacheReserveMinimumFileSize?: number; scrapeShield?: boolean; apps?: boolean; image?: RequestInitCfPropertiesImage; @@ -6984,7 +10511,7 @@ interface IncomingRequestCfPropertiesTLSClientAuthPlaceholder { certNotAfter: ""; } /** Possible outcomes of TLS verification */ -declare type CertVerificationStatus = +declare type CertVerificationStatus = /** Authentication succeeded */ "SUCCESS" /** No certificate was presented */ @@ -7020,6 +10547,10 @@ interface D1Meta { * The region of the database instance that executed the query. */ served_by_region?: string; + /** + * The three letters airport code of the colo that executed the query. + */ + served_by_colo?: string; /** * True if-and-only-if the database instance that executed the query was the primary. */ @@ -7048,7 +10579,7 @@ interface D1ExecResult { count: number; duration: number; } -type D1SessionConstraint = +type D1SessionConstraint = // Indicates that the first query should go to the primary, and the rest queries // using the same D1DatabaseSession will go to any replica that is consistent with // the bookmark maintained by the session (returned by the first query). @@ -7108,6 +10639,15 @@ declare abstract class D1PreparedStatement { // ignored when `Disposable` is included in the standard lib. interface Disposable { } +/** + * The returned data after sending an email + */ +interface EmailSendResult { + /** + * The Email Message ID + */ + messageId: string; +} /** * An email message that can be sent from a Worker. */ @@ -7149,24 +10689,55 @@ interface ForwardableEmailMessage extends EmailMessage { * @param headers A [Headers object](https://developer.mozilla.org/en-US/docs/Web/API/Headers). * @returns A promise that resolves when the email message is forwarded. */ - forward(rcptTo: string, headers?: Headers): Promise; + forward(rcptTo: string, headers?: Headers): Promise; /** * Reply to the sender of this email message with a new EmailMessage object. * @param message The reply message. * @returns A promise that resolves when the email message is replied. */ - reply(message: EmailMessage): Promise; + reply(message: EmailMessage): Promise; +} +/** A file attachment for an email message */ +type EmailAttachment = { + disposition: 'inline'; + contentId: string; + filename: string; + type: string; + content: string | ArrayBuffer | ArrayBufferView; +} | { + disposition: 'attachment'; + contentId?: undefined; + filename: string; + type: string; + content: string | ArrayBuffer | ArrayBufferView; +}; +/** An Email Address */ +interface EmailAddress { + name: string; + email: string; } /** * A binding that allows a Worker to send email messages. */ interface SendEmail { - send(message: EmailMessage): Promise; + send(message: EmailMessage): Promise; + send(builder: { + from: string | EmailAddress; + to: string | string[]; + subject: string; + replyTo?: string | EmailAddress; + cc?: string | string[]; + bcc?: string | string[]; + headers?: Record; + text?: string; + html?: string; + attachments?: EmailAttachment[]; + }): Promise; } declare abstract class EmailEvent extends ExtendableEvent { readonly message: ForwardableEmailMessage; } -declare type EmailExportedHandler = (message: ForwardableEmailMessage, env: Env, ctx: ExecutionContext) => void | Promise; +declare type EmailExportedHandler = (message: ForwardableEmailMessage, env: Env, ctx: ExecutionContext) => void | Promise; declare module "cloudflare:email" { let _EmailMessage: { prototype: EmailMessage; @@ -7194,7 +10765,7 @@ interface Hyperdrive { /** * Connect directly to Hyperdrive as if it's your database, returning a TCP socket. * - * Calling this method returns an idential socket to if you call + * Calling this method returns an identical socket to if you call * `connect("host:port")` using the `host` and `port` fields from this object. * Pick whichever approach works better with your preferred DB client library. * @@ -7307,6 +10878,87 @@ type ImageOutputOptions = { background?: string; anim?: boolean; }; +interface ImageMetadata { + id: string; + filename?: string; + uploaded?: string; + requireSignedURLs: boolean; + meta?: Record; + variants: string[]; + draft?: boolean; + creator?: string; +} +interface ImageUploadOptions { + id?: string; + filename?: string; + requireSignedURLs?: boolean; + metadata?: Record; + creator?: string; + encoding?: 'base64'; +} +interface ImageUpdateOptions { + requireSignedURLs?: boolean; + metadata?: Record; + creator?: string; +} +interface ImageListOptions { + limit?: number; + cursor?: string; + sortOrder?: 'asc' | 'desc'; + creator?: string; +} +interface ImageList { + images: ImageMetadata[]; + cursor?: string; + listComplete: boolean; +} +interface ImageHandle { + /** + * Get metadata for a hosted image + * @returns Image metadata, or null if not found + */ + details(): Promise; + /** + * Get the raw image data for a hosted image + * @returns ReadableStream of image bytes, or null if not found + */ + bytes(): Promise | null>; + /** + * Update hosted image metadata + * @param options Properties to update + * @returns Updated image metadata + * @throws {@link ImagesError} if update fails + */ + update(options: ImageUpdateOptions): Promise; + /** + * Delete a hosted image + * @returns True if deleted, false if not found + */ + delete(): Promise; +} +interface HostedImagesBinding { + /** + * Get a handle for a hosted image + * @param imageId The ID of the image (UUID or custom ID) + * @returns A handle for per-image operations + */ + image(imageId: string): ImageHandle; + /** + * Upload a new hosted image + * @param image The image file to upload + * @param options Upload configuration + * @returns Metadata for the uploaded image + * @throws {@link ImagesError} if upload fails + */ + upload(image: ReadableStream | ArrayBuffer, options?: ImageUploadOptions): Promise; + /** + * List hosted images with pagination + * @param options List configuration + * @returns List of images with pagination info + * @throws {@link ImagesError} if list fails + */ + list(options?: ImageListOptions): Promise; +} interface ImagesBinding { /** * Get image metadata (type, width and height) @@ -7320,6 +10972,10 @@ interface ImagesBinding { * @returns A transform handle */ input(stream: ReadableStream, options?: ImageInputOptions): ImageTransformer; + /** + * Access hosted images CRUD operations + */ + readonly hosted: HostedImagesBinding; } interface ImageTransformer { /** @@ -7386,7 +11042,13 @@ interface MediaTransformer { * @param transform - Configuration for how the media should be transformed * @returns A generator for producing the transformed media output */ - transform(transform: MediaTransformationInputOptions): MediaTransformationGenerator; + transform(transform?: MediaTransformationInputOptions): MediaTransformationGenerator; + /** + * Generates the final media output with specified options. + * @param output - Configuration for the output format and parameters + * @returns The final transformation result containing the transformed media + */ + output(output?: MediaTransformationOutputOptions): MediaTransformationResult; } /** * Generator for producing media transformation results. @@ -7398,7 +11060,7 @@ interface MediaTransformationGenerator { * @param output - Configuration for the output format and parameters * @returns The final transformation result containing the transformed media */ - output(output: MediaTransformationOutputOptions): MediaTransformationResult; + output(output?: MediaTransformationOutputOptions): MediaTransformationResult; } /** * Result of a media transformation operation. @@ -7407,19 +11069,19 @@ interface MediaTransformationGenerator { interface MediaTransformationResult { /** * Returns the transformed media as a readable stream of bytes. - * @returns A stream containing the transformed media data + * @returns A promise containing a readable stream with the transformed media */ - media(): ReadableStream; + media(): Promise>; /** * Returns the transformed media as an HTTP response object. - * @returns The transformed media as a Response, ready to store in cache or return to users + * @returns The transformed media as a Promise, ready to store in cache or return to users */ - response(): Response; + response(): Promise; /** * Returns the MIME type of the transformed media. - * @returns The content type string (e.g., 'image/jpeg', 'video/mp4') + * @returns A promise containing the content type string (e.g., 'image/jpeg', 'video/mp4') */ - contentType(): string; + contentType(): Promise; } /** * Configuration options for transforming media input. @@ -7452,6 +11114,10 @@ type MediaTransformationOutputOptions = { * Duration for video clips, audio extraction, and spritesheet generation (e.g. '5s'). */ duration?: string; + /** + * Number of frames in the spritesheet. + */ + imageCount?: number; /** * Output format for the generated media. */ @@ -7466,6 +11132,19 @@ interface MediaError extends Error { readonly message: string; readonly stack?: string; } +declare module 'cloudflare:node' { + interface NodeStyleServer { + listen(...args: unknown[]): this; + address(): { + port?: number | null | undefined; + }; + } + export function httpServerHandler(port: number): ExportedHandler; + export function httpServerHandler(options: { + port: number; + }): ExportedHandler; + export function httpServerHandler(server: NodeStyleServer): ExportedHandler; +} type Params

= Record; type EventContext = { request: Request>; @@ -7510,7 +11189,7 @@ declare module "cloudflare:pipelines" { protected ctx: ExecutionContext; constructor(ctx: ExecutionContext, env: Env); /** - * run recieves an array of PipelineRecord which can be + * run receives an array of PipelineRecord which can be * transformed and returned to the pipeline * @param records Incoming records from the pipeline to be transformed * @param metadata Information about the specific pipeline calling the transformation entrypoint @@ -7612,7 +11291,7 @@ declare namespace Rpc { // The reason for using a generic type here is to build a serializable subset of structured // cloneable composite types. This allows types defined with the "interface" keyword to pass the // serializable check as well. Otherwise, only types defined with the "type" keyword would pass. - type Serializable = + type Serializable = // Structured cloneables BaseType // Structured cloneable composites @@ -7676,9 +11355,9 @@ declare namespace Rpc { // Base type for all other types providing RPC-like interfaces. // Rewrites all methods/properties to be `MethodOrProperty`s, while preserving callable types. // `Reserved` names (e.g. stub method names like `dup()`) and symbols can't be accessed over RPC. - export type Provider = MaybeCallableProvider & { - [K in Exclude>]: MethodOrProperty; - }; + export type Provider = MaybeCallableProvider & Pick<{ + [K in keyof T]: MethodOrProperty; + }, Exclude>>; } declare namespace Cloudflare { // Type of `env`. @@ -7723,108 +11402,895 @@ declare namespace Cloudflare { & (K extends GlobalProp<"durableNamespaces", never> ? MainModule[K] extends new (...args: any[]) => infer DoInstance ? DoInstance extends Rpc.DurableObjectBranded ? DurableObjectNamespace : DurableObjectNamespace : DurableObjectNamespace : {}); }; } -declare module 'cloudflare:node' { - export interface DefaultHandler { - fetch?(request: Request): Response | Promise; - tail?(events: TraceItem[]): void | Promise; - trace?(traces: TraceItem[]): void | Promise; - scheduled?(controller: ScheduledController): void | Promise; - queue?(batch: MessageBatch): void | Promise; - test?(controller: TestController): void | Promise; - } - export function httpServerHandler(options: { - port: number; - }, handlers?: Omit): DefaultHandler; +declare namespace CloudflareWorkersModule { + export type RpcStub = Rpc.Stub; + export const RpcStub: { + new (value: T): Rpc.Stub; + }; + export abstract class RpcTarget implements Rpc.RpcTargetBranded { + [Rpc.__RPC_TARGET_BRAND]: never; + } + // `protected` fields don't appear in `keyof`s, so can't be accessed over RPC + export abstract class WorkerEntrypoint implements Rpc.WorkerEntrypointBranded { + [Rpc.__WORKER_ENTRYPOINT_BRAND]: never; + protected ctx: ExecutionContext; + protected env: Env; + constructor(ctx: ExecutionContext, env: Env); + email?(message: ForwardableEmailMessage): void | Promise; + fetch?(request: Request): Response | Promise; + connect?(socket: Socket): void | Promise; + queue?(batch: MessageBatch): void | Promise; + scheduled?(controller: ScheduledController): void | Promise; + tail?(events: TraceItem[]): void | Promise; + tailStream?(event: TailStream.TailEvent): TailStream.TailEventHandlerType | Promise; + test?(controller: TestController): void | Promise; + trace?(traces: TraceItem[]): void | Promise; + } + export abstract class DurableObject implements Rpc.DurableObjectBranded { + [Rpc.__DURABLE_OBJECT_BRAND]: never; + protected ctx: DurableObjectState; + protected env: Env; + constructor(ctx: DurableObjectState, env: Env); + alarm?(alarmInfo?: AlarmInvocationInfo): void | Promise; + fetch?(request: Request): Response | Promise; + connect?(socket: Socket): void | Promise; + webSocketMessage?(ws: WebSocket, message: string | ArrayBuffer): void | Promise; + webSocketClose?(ws: WebSocket, code: number, reason: string, wasClean: boolean): void | Promise; + webSocketError?(ws: WebSocket, error: unknown): void | Promise; + } + export type WorkflowDurationLabel = 'second' | 'minute' | 'hour' | 'day' | 'week' | 'month' | 'year'; + export type WorkflowSleepDuration = `${number} ${WorkflowDurationLabel}${'s' | ''}` | number; + export type WorkflowDelayDuration = WorkflowSleepDuration; + export type WorkflowTimeoutDuration = WorkflowSleepDuration; + export type WorkflowRetentionDuration = WorkflowSleepDuration; + export type WorkflowBackoff = 'constant' | 'linear' | 'exponential'; + export type WorkflowStepConfig = { + retries?: { + limit: number; + delay: WorkflowDelayDuration | number; + backoff?: WorkflowBackoff; + }; + timeout?: WorkflowTimeoutDuration | number; + }; + export type WorkflowEvent = { + payload: Readonly; + timestamp: Date; + instanceId: string; + }; + export type WorkflowStepEvent = { + payload: Readonly; + timestamp: Date; + type: string; + }; + export type WorkflowStepContext = { + attempt: number; + }; + export abstract class WorkflowStep { + do>(name: string, callback: (ctx: WorkflowStepContext) => Promise): Promise; + do>(name: string, config: WorkflowStepConfig, callback: (ctx: WorkflowStepContext) => Promise): Promise; + sleep: (name: string, duration: WorkflowSleepDuration) => Promise; + sleepUntil: (name: string, timestamp: Date | number) => Promise; + waitForEvent>(name: string, options: { + type: string; + timeout?: WorkflowTimeoutDuration | number; + }): Promise>; + } + export type WorkflowInstanceStatus = 'queued' | 'running' | 'paused' | 'errored' | 'terminated' | 'complete' | 'waiting' | 'waitingForPause' | 'unknown'; + export abstract class WorkflowEntrypoint | unknown = unknown> implements Rpc.WorkflowEntrypointBranded { + [Rpc.__WORKFLOW_ENTRYPOINT_BRAND]: never; + protected ctx: ExecutionContext; + protected env: Env; + constructor(ctx: ExecutionContext, env: Env); + run(event: Readonly>, step: WorkflowStep): Promise; + } + export function waitUntil(promise: Promise): void; + export function withEnv(newEnv: unknown, fn: () => unknown): unknown; + export function withExports(newExports: unknown, fn: () => unknown): unknown; + export function withEnvAndExports(newEnv: unknown, newExports: unknown, fn: () => unknown): unknown; + export const env: Cloudflare.Env; + export const exports: Cloudflare.Exports; +} +declare module 'cloudflare:workers' { + export = CloudflareWorkersModule; +} +interface SecretsStoreSecret { + /** + * Get a secret from the Secrets Store, returning a string of the secret value + * if it exists, or throws an error if it does not exist + */ + get(): Promise; +} +declare module "cloudflare:sockets" { + function _connect(address: string | SocketAddress, options?: SocketOptions): Socket; + export { _connect as connect }; +} +/** + * Binding entrypoint for Cloudflare Stream. + * + * Usage: + * - Binding-level operations: + * `await env.STREAM.videos.upload` + * `await env.STREAM.videos.createDirectUpload` + * `await env.STREAM.videos.*` + * `await env.STREAM.watermarks.*` + * - Per-video operations: + * `await env.STREAM.video(id).downloads.*` + * `await env.STREAM.video(id).captions.*` + * + * Example usage: + * ```ts + * await env.STREAM.video(id).downloads.generate(); + * + * const video = env.STREAM.video(id) + * const captions = video.captions.list(); + * const videoDetails = video.details() + * ``` + */ +interface StreamBinding { + /** + * Returns a handle scoped to a single video for per-video operations. + * @param id The unique identifier for the video. + * @returns A handle for per-video operations. + */ + video(id: string): StreamVideoHandle; + /** + * Uploads a new video from a provided URL. + * @param url The URL to upload from. + * @param params Optional upload parameters. + * @returns The uploaded video details. + * @throws {BadRequestError} if the upload parameter is invalid or the URL is invalid + * @throws {QuotaReachedError} if the account storage capacity is exceeded + * @throws {MaxFileSizeError} if the file size is too large + * @throws {RateLimitedError} if the server received too many requests + * @throws {AlreadyUploadedError} if a video was already uploaded to this URL + * @throws {InternalError} if an unexpected error occurs + */ + upload(url: string, params?: StreamUrlUploadParams): Promise; + /** + * Creates a direct upload that allows video uploads without an API key. + * @param params Parameters for the direct upload + * @returns The direct upload details. + * @throws {BadRequestError} if the parameters are invalid + * @throws {RateLimitedError} if the server received too many requests + * @throws {InternalError} if an unexpected error occurs + */ + createDirectUpload(params: StreamDirectUploadCreateParams): Promise; + videos: StreamVideos; + watermarks: StreamWatermarks; +} +/** + * Handle for operations scoped to a single Stream video. + */ +interface StreamVideoHandle { + /** + * The unique identifier for the video. + */ + id: string; + /** + * Get a full videos details + * @returns The full video details. + * @throws {NotFoundError} if the video is not found + * @throws {InternalError} if an unexpected error occurs + */ + details(): Promise; + /** + * Update details for a single video. + * @param params The fields to update for the video. + * @returns The updated video details. + * @throws {NotFoundError} if the video is not found + * @throws {BadRequestError} if the parameters are invalid + * @throws {InternalError} if an unexpected error occurs + */ + update(params: StreamUpdateVideoParams): Promise; + /** + * Deletes a video and its copies from Cloudflare Stream. + * @returns A promise that resolves when deletion completes. + * @throws {NotFoundError} if the video is not found + * @throws {InternalError} if an unexpected error occurs + */ + delete(): Promise; + /** + * Creates a signed URL token for a video. + * @returns The signed token that was created. + * @throws {InternalError} if the signing key cannot be retrieved or the token cannot be signed + */ + generateToken(): Promise; + downloads: StreamScopedDownloads; + captions: StreamScopedCaptions; +} +interface StreamVideo { + /** + * The unique identifier for the video. + */ + id: string; + /** + * A user-defined identifier for the media creator. + */ + creator: string | null; + /** + * The thumbnail URL for the video. + */ + thumbnail: string; + /** + * The thumbnail timestamp percentage. + */ + thumbnailTimestampPct: number; + /** + * Indicates whether the video is ready to stream. + */ + readyToStream: boolean; + /** + * The date and time the video became ready to stream. + */ + readyToStreamAt: string | null; + /** + * Processing status information. + */ + status: StreamVideoStatus; + /** + * A user modifiable key-value store. + */ + meta: Record; + /** + * The date and time the video was created. + */ + created: string; + /** + * The date and time the video was last modified. + */ + modified: string; + /** + * The date and time at which the video will be deleted. + */ + scheduledDeletion: string | null; + /** + * The size of the video in bytes. + */ + size: number; + /** + * The preview URL for the video. + */ + preview?: string; + /** + * Origins allowed to display the video. + */ + allowedOrigins: Array; + /** + * Indicates whether signed URLs are required. + */ + requireSignedURLs: boolean | null; + /** + * The date and time the video was uploaded. + */ + uploaded: string | null; + /** + * The date and time when the upload URL expires. + */ + uploadExpiry: string | null; + /** + * The maximum size in bytes for direct uploads. + */ + maxSizeBytes: number | null; + /** + * The maximum duration in seconds for direct uploads. + */ + maxDurationSeconds: number | null; + /** + * The video duration in seconds. -1 indicates unknown. + */ + duration: number; + /** + * Input metadata for the original upload. + */ + input: StreamVideoInput; + /** + * Playback URLs for the video. + */ + hlsPlaybackUrl: string; + dashPlaybackUrl: string; + /** + * The watermark applied to the video, if any. + */ + watermark: StreamWatermark | null; + /** + * The live input id associated with the video, if any. + */ + liveInputId?: string | null; + /** + * The source video id if this is a clip. + */ + clippedFromId: string | null; + /** + * Public details associated with the video. + */ + publicDetails: StreamPublicDetails | null; +} +type StreamVideoStatus = { + /** + * The current processing state. + */ + state: string; + /** + * The current processing step. + */ + step?: string; + /** + * The percent complete as a string. + */ + pctComplete?: string; + /** + * An error reason code, if applicable. + */ + errorReasonCode: string; + /** + * An error reason text, if applicable. + */ + errorReasonText: string; +}; +type StreamVideoInput = { + /** + * The input width in pixels. + */ + width: number; + /** + * The input height in pixels. + */ + height: number; +}; +type StreamPublicDetails = { + /** + * The public title for the video. + */ + title: string | null; + /** + * The public share link. + */ + share_link: string | null; + /** + * The public channel link. + */ + channel_link: string | null; + /** + * The public logo URL. + */ + logo: string | null; +}; +type StreamDirectUpload = { + /** + * The URL an unauthenticated upload can use for a single multipart request. + */ + uploadURL: string; + /** + * A Cloudflare-generated unique identifier for a media item. + */ + id: string; + /** + * The watermark profile applied to the upload. + */ + watermark: StreamWatermark | null; + /** + * The scheduled deletion time, if any. + */ + scheduledDeletion: string | null; +}; +type StreamDirectUploadCreateParams = { + /** + * The maximum duration in seconds for a video upload. + */ + maxDurationSeconds: number; + /** + * The date and time after upload when videos will not be accepted. + */ + expiry?: string; + /** + * A user-defined identifier for the media creator. + */ + creator?: string; + /** + * A user modifiable key-value store used to reference other systems of record for + * managing videos. + */ + meta?: Record; + /** + * Lists the origins allowed to display the video. + */ + allowedOrigins?: Array; + /** + * Indicates whether the video can be accessed using the id. When set to `true`, + * a signed token must be generated with a signing key to view the video. + */ + requireSignedURLs?: boolean; + /** + * The thumbnail timestamp percentage. + */ + thumbnailTimestampPct?: number; + /** + * The date and time at which the video will be deleted. Include `null` to remove + * a scheduled deletion. + */ + scheduledDeletion?: string | null; + /** + * The watermark profile to apply. + */ + watermark?: StreamDirectUploadWatermark; +}; +type StreamDirectUploadWatermark = { + /** + * The unique identifier for the watermark profile. + */ + id: string; +}; +type StreamUrlUploadParams = { + /** + * Lists the origins allowed to display the video. Enter allowed origin + * domains in an array and use `*` for wildcard subdomains. Empty arrays allow the + * video to be viewed on any origin. + */ + allowedOrigins?: Array; + /** + * A user-defined identifier for the media creator. + */ + creator?: string; + /** + * A user modifiable key-value store used to reference other systems of + * record for managing videos. + */ + meta?: Record; + /** + * Indicates whether the video can be a accessed using the id. When + * set to `true`, a signed token must be generated with a signing key to view the + * video. + */ + requireSignedURLs?: boolean; + /** + * Indicates the date and time at which the video will be deleted. Omit + * the field to indicate no change, or include with a `null` value to remove an + * existing scheduled deletion. If specified, must be at least 30 days from upload + * time. + */ + scheduledDeletion?: string | null; + /** + * The timestamp for a thumbnail image calculated as a percentage value + * of the video's duration. To convert from a second-wise timestamp to a + * percentage, divide the desired timestamp by the total duration of the video. If + * this value is not set, the default thumbnail image is taken from 0s of the + * video. + */ + thumbnailTimestampPct?: number; + /** + * The identifier for the watermark profile + */ + watermarkId?: string; +}; +interface StreamScopedCaptions { + /** + * Uploads the caption or subtitle file to the endpoint for a specific BCP47 language. + * One caption or subtitle file per language is allowed. + * @param language The BCP 47 language tag for the caption or subtitle. + * @param input The caption or subtitle stream to upload. + * @returns The created caption entry. + * @throws {NotFoundError} if the video is not found + * @throws {BadRequestError} if the language or file is invalid + * @throws {InternalError} if an unexpected error occurs + */ + upload(language: string, input: ReadableStream): Promise; + /** + * Generate captions or subtitles for the provided language via AI. + * @param language The BCP 47 language tag to generate. + * @returns The generated caption entry. + * @throws {NotFoundError} if the video is not found + * @throws {BadRequestError} if the language is invalid + * @throws {StreamError} if a generated caption already exists + * @throws {StreamError} if the video duration is too long + * @throws {StreamError} if the video is missing audio + * @throws {StreamError} if the requested language is not supported + * @throws {InternalError} if an unexpected error occurs + */ + generate(language: string): Promise; + /** + * Lists the captions or subtitles. + * Use the language parameter to filter by a specific language. + * @param language The optional BCP 47 language tag to filter by. + * @returns The list of captions or subtitles. + * @throws {NotFoundError} if the video or caption is not found + * @throws {InternalError} if an unexpected error occurs + */ + list(language?: string): Promise; + /** + * Removes the captions or subtitles from a video. + * @param language The BCP 47 language tag to remove. + * @returns A promise that resolves when deletion completes. + * @throws {NotFoundError} if the video or caption is not found + * @throws {InternalError} if an unexpected error occurs + */ + delete(language: string): Promise; +} +interface StreamScopedDownloads { + /** + * Generates a download for a video when a video is ready to view. Available + * types are `default` and `audio`. Defaults to `default` when omitted. + * @param downloadType The download type to create. + * @returns The current downloads for the video. + * @throws {NotFoundError} if the video is not found + * @throws {BadRequestError} if the download type is invalid + * @throws {StreamError} if the video duration is too long to generate a download + * @throws {StreamError} if the video is not ready to stream + * @throws {InternalError} if an unexpected error occurs + */ + generate(downloadType?: StreamDownloadType): Promise; + /** + * Lists the downloads created for a video. + * @returns The current downloads for the video. + * @throws {NotFoundError} if the video or downloads are not found + * @throws {InternalError} if an unexpected error occurs + */ + get(): Promise; + /** + * Delete the downloads for a video. Available types are `default` and `audio`. + * Defaults to `default` when omitted. + * @param downloadType The download type to delete. + * @returns A promise that resolves when deletion completes. + * @throws {NotFoundError} if the video or downloads are not found + * @throws {InternalError} if an unexpected error occurs + */ + delete(downloadType?: StreamDownloadType): Promise; } -declare namespace CloudflareWorkersModule { - export type RpcStub = Rpc.Stub; - export const RpcStub: { - new (value: T): Rpc.Stub; - }; - export abstract class RpcTarget implements Rpc.RpcTargetBranded { - [Rpc.__RPC_TARGET_BRAND]: never; - } - // `protected` fields don't appear in `keyof`s, so can't be accessed over RPC - export abstract class WorkerEntrypoint implements Rpc.WorkerEntrypointBranded { - [Rpc.__WORKER_ENTRYPOINT_BRAND]: never; - protected ctx: ExecutionContext; - protected env: Env; - constructor(ctx: ExecutionContext, env: Env); - fetch?(request: Request): Response | Promise; - tail?(events: TraceItem[]): void | Promise; - trace?(traces: TraceItem[]): void | Promise; - scheduled?(controller: ScheduledController): void | Promise; - queue?(batch: MessageBatch): void | Promise; - test?(controller: TestController): void | Promise; - } - export abstract class DurableObject implements Rpc.DurableObjectBranded { - [Rpc.__DURABLE_OBJECT_BRAND]: never; - protected ctx: DurableObjectState; - protected env: Env; - constructor(ctx: DurableObjectState, env: Env); - fetch?(request: Request): Response | Promise; - alarm?(alarmInfo?: AlarmInvocationInfo): void | Promise; - webSocketMessage?(ws: WebSocket, message: string | ArrayBuffer): void | Promise; - webSocketClose?(ws: WebSocket, code: number, reason: string, wasClean: boolean): void | Promise; - webSocketError?(ws: WebSocket, error: unknown): void | Promise; - } - export type WorkflowDurationLabel = 'second' | 'minute' | 'hour' | 'day' | 'week' | 'month' | 'year'; - export type WorkflowSleepDuration = `${number} ${WorkflowDurationLabel}${'s' | ''}` | number; - export type WorkflowDelayDuration = WorkflowSleepDuration; - export type WorkflowTimeoutDuration = WorkflowSleepDuration; - export type WorkflowRetentionDuration = WorkflowSleepDuration; - export type WorkflowBackoff = 'constant' | 'linear' | 'exponential'; - export type WorkflowStepConfig = { - retries?: { - limit: number; - delay: WorkflowDelayDuration | number; - backoff?: WorkflowBackoff; - }; - timeout?: WorkflowTimeoutDuration | number; - }; - export type WorkflowEvent = { - payload: Readonly; - timestamp: Date; - instanceId: string; - }; - export type WorkflowStepEvent = { - payload: Readonly; - timestamp: Date; - type: string; - }; - export abstract class WorkflowStep { - do>(name: string, callback: () => Promise): Promise; - do>(name: string, config: WorkflowStepConfig, callback: () => Promise): Promise; - sleep: (name: string, duration: WorkflowSleepDuration) => Promise; - sleepUntil: (name: string, timestamp: Date | number) => Promise; - waitForEvent>(name: string, options: { - type: string; - timeout?: WorkflowTimeoutDuration | number; - }): Promise>; - } - export abstract class WorkflowEntrypoint | unknown = unknown> implements Rpc.WorkflowEntrypointBranded { - [Rpc.__WORKFLOW_ENTRYPOINT_BRAND]: never; - protected ctx: ExecutionContext; - protected env: Env; - constructor(ctx: ExecutionContext, env: Env); - run(event: Readonly>, step: WorkflowStep): Promise; - } - export function waitUntil(promise: Promise): void; - export const env: Cloudflare.Env; +interface StreamVideos { + /** + * Lists all videos in a users account. + * @returns The list of videos. + * @throws {BadRequestError} if the parameters are invalid + * @throws {InternalError} if an unexpected error occurs + */ + list(params?: StreamVideosListParams): Promise; } -declare module 'cloudflare:workers' { - export = CloudflareWorkersModule; +interface StreamWatermarks { + /** + * Generate a new watermark profile + * @param input The image stream to upload + * @param params The watermark creation parameters. + * @returns The created watermark profile. + * @throws {BadRequestError} if the parameters are invalid + * @throws {InvalidURLError} if the URL is invalid + * @throws {TooManyWatermarksError} if the number of allowed watermarks is reached + * @throws {InternalError} if an unexpected error occurs + */ + generate(input: ReadableStream, params: StreamWatermarkCreateParams): Promise; + /** + * Generate a new watermark profile + * @param url The image url to upload + * @param params The watermark creation parameters. + * @returns The created watermark profile. + * @throws {BadRequestError} if the parameters are invalid + * @throws {InvalidURLError} if the URL is invalid + * @throws {TooManyWatermarksError} if the number of allowed watermarks is reached + * @throws {InternalError} if an unexpected error occurs + */ + generate(url: string, params: StreamWatermarkCreateParams): Promise; + /** + * Lists all watermark profiles for an account. + * @returns The list of watermark profiles. + * @throws {InternalError} if an unexpected error occurs + */ + list(): Promise; + /** + * Retrieves details for a single watermark profile. + * @param watermarkId The watermark profile identifier. + * @returns The watermark profile details. + * @throws {NotFoundError} if the watermark is not found + * @throws {InternalError} if an unexpected error occurs + */ + get(watermarkId: string): Promise; + /** + * Deletes a watermark profile. + * @param watermarkId The watermark profile identifier. + * @returns A promise that resolves when deletion completes. + * @throws {NotFoundError} if the watermark is not found + * @throws {InternalError} if an unexpected error occurs + */ + delete(watermarkId: string): Promise; } -interface SecretsStoreSecret { +type StreamUpdateVideoParams = { /** - * Get a secret from the Secrets Store, returning a string of the secret value - * if it exists, or throws an error if it does not exist + * Lists the origins allowed to display the video. Enter allowed origin + * domains in an array and use `*` for wildcard subdomains. Empty arrays allow the + * video to be viewed on any origin. */ - get(): Promise; + allowedOrigins?: Array; + /** + * A user-defined identifier for the media creator. + */ + creator?: string; + /** + * The maximum duration in seconds for a video upload. Can be set for a + * video that is not yet uploaded to limit its duration. Uploads that exceed the + * specified duration will fail during processing. A value of `-1` means the value + * is unknown. + */ + maxDurationSeconds?: number; + /** + * A user modifiable key-value store used to reference other systems of + * record for managing videos. + */ + meta?: Record; + /** + * Indicates whether the video can be a accessed using the id. When + * set to `true`, a signed token must be generated with a signing key to view the + * video. + */ + requireSignedURLs?: boolean; + /** + * Indicates the date and time at which the video will be deleted. Omit + * the field to indicate no change, or include with a `null` value to remove an + * existing scheduled deletion. If specified, must be at least 30 days from upload + * time. + */ + scheduledDeletion?: string | null; + /** + * The timestamp for a thumbnail image calculated as a percentage value + * of the video's duration. To convert from a second-wise timestamp to a + * percentage, divide the desired timestamp by the total duration of the video. If + * this value is not set, the default thumbnail image is taken from 0s of the + * video. + */ + thumbnailTimestampPct?: number; +}; +type StreamCaption = { + /** + * Whether the caption was generated via AI. + */ + generated?: boolean; + /** + * The language label displayed in the native language to users. + */ + label: string; + /** + * The language tag in BCP 47 format. + */ + language: string; + /** + * The status of a generated caption. + */ + status?: 'ready' | 'inprogress' | 'error'; +}; +type StreamDownloadStatus = 'ready' | 'inprogress' | 'error'; +type StreamDownloadType = 'default' | 'audio'; +type StreamDownload = { + /** + * Indicates the progress as a percentage between 0 and 100. + */ + percentComplete: number; + /** + * The status of a generated download. + */ + status: StreamDownloadStatus; + /** + * The URL to access the generated download. + */ + url?: string; +}; +/** + * An object with download type keys. Each key is optional and only present if that + * download type has been created. + */ +type StreamDownloadGetResponse = { + /** + * The audio-only download. Only present if this download type has been created. + */ + audio?: StreamDownload; + /** + * The default video download. Only present if this download type has been created. + */ + default?: StreamDownload; +}; +type StreamWatermarkPosition = 'upperRight' | 'upperLeft' | 'lowerLeft' | 'lowerRight' | 'center'; +type StreamWatermark = { + /** + * The unique identifier for a watermark profile. + */ + id: string; + /** + * The size of the image in bytes. + */ + size: number; + /** + * The height of the image in pixels. + */ + height: number; + /** + * The width of the image in pixels. + */ + width: number; + /** + * The date and a time a watermark profile was created. + */ + created: string; + /** + * The source URL for a downloaded image. If the watermark profile was created via + * direct upload, this field is null. + */ + downloadedFrom: string | null; + /** + * A short description of the watermark profile. + */ + name: string; + /** + * The translucency of the image. A value of `0.0` makes the image completely + * transparent, and `1.0` makes the image completely opaque. Note that if the image + * is already semi-transparent, setting this to `1.0` will not make the image + * completely opaque. + */ + opacity: number; + /** + * The whitespace between the adjacent edges (determined by position) of the video + * and the image. `0.0` indicates no padding, and `1.0` indicates a fully padded + * video width or length, as determined by the algorithm. + */ + padding: number; + /** + * The size of the image relative to the overall size of the video. This parameter + * will adapt to horizontal and vertical videos automatically. `0.0` indicates no + * scaling (use the size of the image as-is), and `1.0 `fills the entire video. + */ + scale: number; + /** + * The location of the image. Valid positions are: `upperRight`, `upperLeft`, + * `lowerLeft`, `lowerRight`, and `center`. Note that `center` ignores the + * `padding` parameter. + */ + position: StreamWatermarkPosition; +}; +type StreamWatermarkCreateParams = { + /** + * A short description of the watermark profile. + */ + name?: string; + /** + * The translucency of the image. A value of `0.0` makes the image completely + * transparent, and `1.0` makes the image completely opaque. Note that if the + * image is already semi-transparent, setting this to `1.0` will not make the + * image completely opaque. + */ + opacity?: number; + /** + * The whitespace between the adjacent edges (determined by position) of the + * video and the image. `0.0` indicates no padding, and `1.0` indicates a fully + * padded video width or length, as determined by the algorithm. + */ + padding?: number; + /** + * The size of the image relative to the overall size of the video. This + * parameter will adapt to horizontal and vertical videos automatically. `0.0` + * indicates no scaling (use the size of the image as-is), and `1.0 `fills the + * entire video. + */ + scale?: number; + /** + * The location of the image. + */ + position?: StreamWatermarkPosition; +}; +type StreamVideosListParams = { + /** + * The maximum number of videos to return. + */ + limit?: number; + /** + * Return videos created before this timestamp. + * (RFC3339/RFC3339Nano) + */ + before?: string; + /** + * Comparison operator for the `before` field. + * @default 'lt' + */ + beforeComp?: StreamPaginationComparison; + /** + * Return videos created after this timestamp. + * (RFC3339/RFC3339Nano) + */ + after?: string; + /** + * Comparison operator for the `after` field. + * @default 'gte' + */ + afterComp?: StreamPaginationComparison; +}; +type StreamPaginationComparison = 'eq' | 'gt' | 'gte' | 'lt' | 'lte'; +/** + * Error object for Stream binding operations. + */ +interface StreamError extends Error { + readonly code: number; + readonly statusCode: number; + readonly message: string; + readonly stack?: string; } -declare module "cloudflare:sockets" { - function _connect(address: string | SocketAddress, options?: SocketOptions): Socket; - export { _connect as connect }; +interface InternalError extends StreamError { + name: 'InternalError'; +} +interface BadRequestError extends StreamError { + name: 'BadRequestError'; +} +interface NotFoundError extends StreamError { + name: 'NotFoundError'; +} +interface ForbiddenError extends StreamError { + name: 'ForbiddenError'; +} +interface RateLimitedError extends StreamError { + name: 'RateLimitedError'; +} +interface QuotaReachedError extends StreamError { + name: 'QuotaReachedError'; +} +interface MaxFileSizeError extends StreamError { + name: 'MaxFileSizeError'; +} +interface InvalidURLError extends StreamError { + name: 'InvalidURLError'; +} +interface AlreadyUploadedError extends StreamError { + name: 'AlreadyUploadedError'; +} +interface TooManyWatermarksError extends StreamError { + name: 'TooManyWatermarksError'; +} +type MarkdownDocument = { + name: string; + blob: Blob; +}; +type ConversionResponse = { + id: string; + name: string; + mimeType: string; + format: 'markdown'; + tokens: number; + data: string; +} | { + id: string; + name: string; + mimeType: string; + format: 'error'; + error: string; +}; +type ImageConversionOptions = { + descriptionLanguage?: 'en' | 'es' | 'fr' | 'it' | 'pt' | 'de'; +}; +type EmbeddedImageConversionOptions = ImageConversionOptions & { + convert?: boolean; + maxConvertedImages?: number; +}; +type ConversionOptions = { + html?: { + images?: EmbeddedImageConversionOptions & { + convertOGImage?: boolean; + }; + hostname?: string; + cssSelector?: string; + }; + docx?: { + images?: EmbeddedImageConversionOptions; + }; + image?: ImageConversionOptions; + pdf?: { + images?: EmbeddedImageConversionOptions; + metadata?: boolean; + }; +}; +type ConversionRequestOptions = { + gateway?: GatewayOptions; + extraHeaders?: object; + conversionOptions?: ConversionOptions; +}; +type SupportedFileFormat = { + mimeType: string; + extension: string; +}; +declare abstract class ToMarkdownService { + transform(files: MarkdownDocument[], options?: ConversionRequestOptions): Promise; + transform(files: MarkdownDocument, options?: ConversionRequestOptions): Promise; + supported(): Promise; } declare namespace TailStream { interface Header { @@ -7887,6 +12353,9 @@ declare namespace TailStream { readonly type: "fetch"; readonly statusCode: number; } + interface ConnectEventInfo { + readonly type: "connect"; + } type EventOutcome = "ok" | "canceled" | "exception" | "unknown" | "killSwitch" | "daemonDown" | "exceededCpu" | "exceededMemory" | "loadShed" | "responseStreamDisconnected" | "scriptNotFound"; interface ScriptVersion { readonly id: string; @@ -7904,7 +12373,7 @@ declare namespace TailStream { readonly scriptName?: string; readonly scriptTags?: string[]; readonly scriptVersion?: ScriptVersion; - readonly info: FetchEventInfo | JsRpcEventInfo | ScheduledEventInfo | AlarmEventInfo | QueueEventInfo | EmailEventInfo | TraceEventInfo | HibernatableWebSocketEventInfo | CustomEventInfo; + readonly info: FetchEventInfo | ConnectEventInfo | JsRpcEventInfo | ScheduledEventInfo | AlarmEventInfo | QueueEventInfo | EmailEventInfo | TraceEventInfo | HibernatableWebSocketEventInfo | CustomEventInfo; } interface Outcome { readonly type: "outcome"; @@ -7939,6 +12408,15 @@ declare namespace TailStream { readonly level: "debug" | "error" | "info" | "log" | "warn"; readonly message: object; } + interface DroppedEventsDiagnostic { + readonly diagnosticsType: "droppedEvents"; + readonly count: number; + } + interface StreamDiagnostic { + readonly type: 'streamDiagnostic'; + // To add new diagnostic types, define a new interface and add it to this union type. + readonly diagnostic: DroppedEventsDiagnostic; + } // This marks the worker handler return information. // This is separate from Outcome because the worker invocation can live for a long time after // returning. For example - Websockets that return an http upgrade response but then continue @@ -7955,7 +12433,7 @@ declare namespace TailStream { readonly type: "attributes"; readonly info: Attribute[]; } - type EventType = Onset | Outcome | SpanOpen | SpanClose | DiagnosticChannelEvent | Exception | Log | Return | Attributes; + type EventType = Onset | Outcome | SpanOpen | SpanClose | DiagnosticChannelEvent | Exception | Log | StreamDiagnostic | Return | Attributes; // Context in which this trace event lives. interface SpanContext { // Single id for the entire top-level invocation @@ -7969,7 +12447,7 @@ declare namespace TailStream { // For Hibernate and Mark this would be the span under which they were emitted. // spanId is not set ONLY if: // 1. This is an Onset event - // 2. We are not inherting any SpanContext. (e.g. this is a cross-account service binding or a new top-level invocation) + // 2. We are not inheriting any SpanContext. (e.g. this is a cross-account service binding or a new top-level invocation) readonly spanId?: string; } interface TailEvent { @@ -8016,13 +12494,16 @@ interface VectorizeError { * * This list is expected to grow as support for more operations are released. */ -type VectorizeVectorMetadataFilterOp = "$eq" | "$ne"; +type VectorizeVectorMetadataFilterOp = '$eq' | '$ne' | '$lt' | '$lte' | '$gt' | '$gte'; +type VectorizeVectorMetadataFilterCollectionOp = '$in' | '$nin'; /** * Filter criteria for vector metadata used to limit the retrieved query result set. */ type VectorizeVectorMetadataFilter = { [field: string]: Exclude | null | { [Op in VectorizeVectorMetadataFilterOp]?: Exclude | null; + } | { + [Op in VectorizeVectorMetadataFilterCollectionOp]?: Exclude[]; }; }; /** @@ -8332,8 +12813,11 @@ type InstanceStatus = { | 'complete' | 'waiting' // instance is hibernating and waiting for sleep or event to finish | 'waitingForPause' // instance is finishing the current work to pause | 'unknown'; - error?: string; - output?: object; + error?: { + name: string; + message: string; + }; + output?: unknown; }; interface WorkflowError { code?: number; From 8545841e6844df027a969dbd8d3d2814399c544e Mon Sep 17 00:00:00 2001 From: Universe Date: Thu, 9 Apr 2026 15:04:41 +0900 Subject: [PATCH 6/8] feat: implement room seeding logic in DocumentSyncAdapter and enhance SyncClient handling for empty states --- .../plugins/sync/document-sync.ts | 37 +++++++++++++++++++ .../__tests__/client.test.ts | 29 ++++++++++++++- packages/grida-canvas-sync/src/client.ts | 1 + 3 files changed, 66 insertions(+), 1 deletion(-) diff --git a/editor/grida-canvas/plugins/sync/document-sync.ts b/editor/grida-canvas/plugins/sync/document-sync.ts index 4a7d13ef7..bab33bb44 100644 --- a/editor/grida-canvas/plugins/sync/document-sync.ts +++ b/editor/grida-canvas/plugins/sync/document-sync.ts @@ -23,6 +23,7 @@ import { documentToState, stateToDocument } from "./serialize"; export class DocumentSyncAdapter { private _unsubscribeEditor: (() => void) | null = null; private _unsubscribeClient: (() => void) | null = null; + private _unsubscribeStatus: (() => void) | null = null; /** * Mutex to prevent feedback loops: @@ -41,6 +42,9 @@ export class DocumentSyncAdapter { */ lastSyncedState: DocumentState; + /** Whether we've already seeded the room. */ + private _seeded = false; + constructor( private readonly _editor: Editor, private readonly _client: SyncClient @@ -49,6 +53,7 @@ export class DocumentSyncAdapter { this._setupEditorToClient(); this._setupClientToEditor(); + this._setupRoomSeed(); } // ----------------------------------------------------------------------- @@ -117,6 +122,36 @@ export class DocumentSyncAdapter { }); } + // ----------------------------------------------------------------------- + // Room seeding (adapter-level policy) + // ----------------------------------------------------------------------- + + /** + * When the client connects to an empty room (serverClock === 0), + * push the editor's local document to seed the room. + * + * This is an editor-level policy decision, not a sync protocol concern. + * The SyncClient is agnostic — it just faithfully applies server state. + * The adapter knows "I have a local document and the room is empty." + */ + private _setupRoomSeed(): void { + this._unsubscribeStatus = this._client.on("statusChange", (status) => { + if (status !== "ready") return; + if (this._seeded) return; + this._seeded = true; + + // If the server clock is 0, the room is empty — seed it. + if (this._client.serverClock === 0) { + const localState = documentToState(this._editor.doc.state.document); + const seedDiff = computeDiff({ nodes: {}, scenes: [] }, localState); + if (seedDiff) { + this._client.pushDiff(seedDiff); + this.lastSyncedState = localState; + } + } + }); + } + // ----------------------------------------------------------------------- // Cleanup // ----------------------------------------------------------------------- @@ -124,7 +159,9 @@ export class DocumentSyncAdapter { destroy(): void { this._unsubscribeEditor?.(); this._unsubscribeClient?.(); + this._unsubscribeStatus?.(); this._unsubscribeEditor = null; this._unsubscribeClient = null; + this._unsubscribeStatus = null; } } diff --git a/packages/grida-canvas-sync/__tests__/client.test.ts b/packages/grida-canvas-sync/__tests__/client.test.ts index 0fd8fcfa5..0eba41581 100644 --- a/packages/grida-canvas-sync/__tests__/client.test.ts +++ b/packages/grida-canvas-sync/__tests__/client.test.ts @@ -112,9 +112,11 @@ function connectClient( scenes: serverState.scenes, }); } else { + // clock: 1 means the server has been initialized (not empty). + // Use clock: 0 only for empty-room seed tests. transport.deliver({ type: "connect_ok", - clock: 0, + clock: 1, }); } } @@ -155,6 +157,31 @@ describe("SyncClient", () => { }); }); + it("preserves local state when server responds with no state and no diff", () => { + const initialState: DocumentState = { + nodes: { + n1: makeNode("n1", { width: 100 }), + scene: makeNode("scene", { name: "Main" }, "scene"), + }, + scenes: ["scene"], + }; + const { transport, client } = createClientAndTransport(initialState); + transport.simulateConnected(); + + // Server responds with empty room — no state, no diff + transport.deliver({ + type: "connect_ok", + clock: 0, + }); + + // Client should NOT overwrite its local state with empty + expect(client.state.nodes["n1"]).toEqual(makeNode("n1", { width: 100 })); + expect(client.state.nodes["scene"]).toBeDefined(); + expect(client.state.scenes).toEqual(["scene"]); + expect(client.serverClock).toBe(0); + // Note: SyncClient does NOT seed the room — that's the adapter's responsibility + }); + it("applies server state on connect_ok with full state", () => { const { transport, client } = createClientAndTransport(); const node = makeNode("n1", { width: 100 }); diff --git a/packages/grida-canvas-sync/src/client.ts b/packages/grida-canvas-sync/src/client.ts index bd1106c65..d545762f1 100644 --- a/packages/grida-canvas-sync/src/client.ts +++ b/packages/grida-canvas-sync/src/client.ts @@ -266,6 +266,7 @@ export class SyncClient { this._canonical = { ...this._canonical, scenes: msg.scenes }; } } + // else: no state, no diff — server has nothing new. Canonical stays as-is. // Any speculative diffs from the previous connection are stale — // they were never ack'd, so the server doesn't have them. From 5281460682547ef5455c1c0e05cbb3ed9f785941 Mon Sep 17 00:00:00 2001 From: Universe Date: Thu, 9 Apr 2026 20:00:23 +0900 Subject: [PATCH 7/8] chore --- .../__tests__/client.test.ts | 30 ++++++++++++++----- 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/packages/grida-canvas-sync/__tests__/client.test.ts b/packages/grida-canvas-sync/__tests__/client.test.ts index 0eba41581..4f487cbc9 100644 --- a/packages/grida-canvas-sync/__tests__/client.test.ts +++ b/packages/grida-canvas-sync/__tests__/client.test.ts @@ -21,6 +21,9 @@ class MockTransport implements ISyncTransport { private _statusHandlers = new Set<(status: TransportStatus) => void>(); send(message: ClientMessage): void { + if (this.status !== "connected") { + throw new Error("MockTransport: not connected"); + } this.sent.push(message); } @@ -68,9 +71,10 @@ class MockTransport implements ISyncTransport { function makeNode( id: string, - props: Record = {} + props: Record = {}, + type: string = "rectangle" ): SerializedNode { - return { type: "rectangle", id, ...props } as SerializedNode; + return { type, id, ...props } as SerializedNode; } function emptyState(): DocumentState { @@ -453,21 +457,31 @@ describe("SyncClient", () => { const { transport, client } = createClientAndTransport(); connectClient(transport, client); - const handler = vi.fn(); - client.on("stateChange", handler); + const stateHandler = vi.fn(); + const errorHandler = vi.fn(); + client.on("stateChange", stateHandler); + client.on("error", errorHandler); + + // Record call count before destroy + const callsBefore = stateHandler.mock.calls.length; client.destroy(); + expect(client.status).toBe("disconnected"); - // Delivering a message after destroy should not call the handler + // Delivering messages after destroy should not call any handler transport.deliver({ type: "patch", serverClock: 1, diff: { nodes: { n1: { op: "remove" } } }, }); + transport.deliver({ + type: "error", + code: "TEST", + message: "should be ignored", + }); - // Handler might have been called during destroy's disconnect, - // but the point is the event system is torn down - expect(client.status).toBe("disconnected"); + expect(stateHandler.mock.calls.length).toBe(callsBefore); + expect(errorHandler).not.toHaveBeenCalled(); }); }); }); From d52384954e05cdbc48750698a6dd0749e6725339 Mon Sep 17 00:00:00 2001 From: Universe Date: Thu, 9 Apr 2026 20:01:32 +0900 Subject: [PATCH 8/8] add tldraw crdt research --- docs/wg/research/crdt/tldraw.md | 499 ++++++++++++++++++++++++++++++++ 1 file changed, 499 insertions(+) create mode 100644 docs/wg/research/crdt/tldraw.md diff --git a/docs/wg/research/crdt/tldraw.md b/docs/wg/research/crdt/tldraw.md new file mode 100644 index 000000000..ca44fb47c --- /dev/null +++ b/docs/wg/research/crdt/tldraw.md @@ -0,0 +1,499 @@ +# TLDraw Sync: Real-Time Collaboration Architecture + +> Research document covering tldraw's sync engine — architecture, data model, +> protocol, and conflict resolution strategy. +> +> **Source repo:** `tldraw/tldraw` (main branch, ~2024-2026) +> **Key packages:** `@tldraw/store`, `@tldraw/sync-core`, `@tldraw/sync` + +--- + +## 1. Architecture Overview + +TLDraw sync uses a **server-authoritative, push/pull/rebase** model — not a +true CRDT. The architecture is closer to a centralized version control system +(git-like optimistic rebase) than to a peer-to-peer CRDT mesh. + +### Package Layering + +``` +@tldraw/store — Generic record store with typed IDs, diffs, and history +@tldraw/sync-core — Protocol types, TLSyncRoom (server), TLSyncClient, storage interfaces +@tldraw/sync — React hook (useSync) that wires TLSyncClient to a TLStore +@tldraw/tlschema — Schema definitions, migrations, record types for tldraw shapes +``` + +### Topology + +``` + ┌──────────┐ WebSocket ┌─────────────┐ WebSocket ┌──────────┐ + │ Client A │◄──────────────►│ TLSyncRoom │◄──────────────►│ Client B │ + │ TLSyncCli-│ │ (server) │ │ TLSyncCli-│ + │ ent │ │ │ │ ent │ + │ │ │ Storage │ │ │ + │ TLStore │ │ (InMemory/ │ │ TLStore │ + │ (local) │ │ SQLite) │ │ (local) │ + └──────────┘ └─────────────┘ └──────────┘ +``` + +- **One `TLSyncRoom` per document** — this is enforced as a hard invariant. + On Cloudflare, Durable Objects guarantee single-instance-per-room. +- **Server holds authoritative state** in a pluggable `TLSyncStorage` backend. +- **Clients hold optimistic local state** and rebase against the server. + +--- + +## 2. Data Model: Records and Store + +### `@tldraw/store` — The Record Store + +Everything in tldraw is a **record** — a flat JSON object identified by a typed +ID string (e.g. `shape:abc123`, `page:page1`, `instance_presence:xyz`). + +```ts +interface BaseRecord { + id: ID + typeName: TypeName +} + +// Example +interface TLShape extends BaseRecord<'shape'> { + x: number + y: number + props: { ... } +} +``` + +Key design decisions: + +- **Flat record map** — the store is `Map`, not a tree. + Parent-child relationships are expressed via fields on the records themselves. +- **Typed IDs** — IDs carry their record type in the TypeScript type system + (`ID` is a branded string like `"shape:abc123"`). +- **Scoped record types** — each record type has a `scope`: + - `'document'` — persisted and synced (shapes, pages, etc.) + - `'presence'` — ephemeral, not persisted (cursors, selections) +- **History tracking** — the store emits `RecordsDiff` on every change, + capturing `added`, `updated` (with `[from, to]` pairs), and `removed`. +- **`mergeRemoteChanges(fn)`** — applies changes from remote without + triggering the `'user'` source listener (prevents echo loops). + +### `RecordsDiff` + +The reversible diff format used internally: + +```ts +interface RecordsDiff { + added: Record; + updated: Record; + removed: Record; +} +``` + +This is the **internal** diff — verbose but reversible. It's what the client +uses for undo/redo and speculative rebase. + +--- + +## 3. Diff & Patch: The Network Format + +### `NetworkDiff` — compact, non-reversible + +For wire transmission, tldraw converts `RecordsDiff` into a compact +`NetworkDiff` that doesn't carry the "from" state: + +```ts +interface NetworkDiff { + [id: string]: RecordOp; +} + +type RecordOp = + | ["put", R] // full record replacement or creation + | ["patch", ObjectDiff] // partial property update + | ["remove"]; // deletion +``` + +### `ObjectDiff` — property-level diffing + +```ts +interface ObjectDiff { + [key: string]: ValueOp; +} + +type ValueOp = + | ["put", value] // replace value + | ["delete"] // remove key + | ["patch", ObjectDiff] // nested object diff + | ["append", value[] | string, offset]; // append to array/string +``` + +Key behaviors in `diffRecord()`: + +- **Nested keys** `props` and `meta` are always diff'd recursively (not replaced wholesale). +- **Arrays**: If same length, patches up to `len/5` elements; if longer, uses `append` op. +- **Strings**: If `nextValue.startsWith(prevValue)`, emits an `append` op (protocol v8+). +- **Everything else**: deep equality check → `put` if different. + +The `append` op is significant — it allows efficient incremental sync of text +content and array growth without sending the full value. + +--- + +## 4. Sync Protocol + +### Protocol Version + +Current: **v8** (`TLSYNC_PROTOCOL_VERSION = 8`). Backward compat is handled +with shims (v5→v6→v7→v8 normalization in `handleConnectRequest`). + +### Message Types + +**Client → Server:** + +| Type | Purpose | +| --------- | -------------------------------------------------------- | +| `connect` | Handshake with schema, protocol version, lastServerClock | +| `push` | Send local changes (document diff + presence op) | +| `ping` | Keep-alive | + +**Server → Client:** + +| Type | Purpose | +| ----------------------- | ----------------------------------------------------------------- | +| `connect` | Handshake response with full/partial diff, schema, serverClock | +| `patch` | Broadcast of changes from other clients | +| `push_result` | Ack for a client's push: `commit`, `discard`, or `rebaseWithDiff` | +| `pong` | Keep-alive response | +| `data` | Batched array of `patch` and `push_result` messages | +| `custom` | Application-defined messages | +| `incompatibility_error` | Legacy error (deprecated, replaced by WS close codes) | + +### Connection Handshake + +``` +Client Server + | | + |--- connect { | + | protocolVersion, | + | schema, | + | lastServerClock, | + | connectRequestId | + | } ─────────────────────► | + | | (validate version, migrate schema) + | | (compute diff since lastServerClock) + | ◄──────────────────────── | + | connect { | + | hydrationType: | + | 'wipe_all' | | + | 'wipe_presence', | + | diff: NetworkDiff, | + | schema, | + | serverClock, | + | isReadonly | + | } | + | | +``` + +- `hydrationType: 'wipe_presence'` — client keeps its document state, server + sends only changes since `lastServerClock`. (Normal reconnect.) +- `hydrationType: 'wipe_all'` — client must discard all local state and + hydrate from scratch. (Happens when tombstone history is too old.) + +### Push/Ack Cycle + +``` +Client Server + | | + |--- push { | + | clientClock: 5, | + | diff: { ... }, | + | presence: [op, data] | + | } ─────────────────────► | + | | (validate, migrate up, apply to storage) + | | (broadcast to other clients) + | ◄──────────────────────── | + | push_result { | + | clientClock: 5, | + | serverClock: 42, | + | action: 'commit' | | + | 'discard' | | + | { rebaseWithDiff }| + | } | +``` + +Three possible outcomes: + +- **`commit`** — server accepted the diff exactly as sent. +- **`discard`** — server ignored the diff (no effective changes). +- **`rebaseWithDiff`** — server modified the records (validation, normalization) + and returns the actual diff the client should use instead. + +### Presence + +Presence records (cursors, selections) are: + +- Stored **in-memory only** on the server (`PresenceStore` — not in `TLSyncStorage`). +- Sent as part of `push` messages alongside document diffs. +- **Not persisted** — wiped on reconnect (hence `wipe_presence` hydration type). +- Scoped to a **session** — each session gets a unique `presenceId`. + +### Message Batching + +Server debounces data messages at **60 fps** (`DATA_MESSAGE_DEBOUNCE_INTERVAL = 1000/60`). +Multiple `patch` and `push_result` messages are batched into a single +`{ type: 'data', data: [...] }` frame. + +--- + +## 5. Conflict Resolution: Optimistic Rebase + +TLDraw uses **optimistic concurrency with server-authoritative rebase** — not +CRDTs, not OT, not last-write-wins. + +### Client-Side Mechanics (`TLSyncClient`) + +The client maintains: + +- `speculativeChanges: RecordsDiff` — accumulated unconfirmed local changes +- `pendingPushRequests: TLPushRequest[]` — in-flight pushes awaiting server ack +- `unsentChanges` — buffered changes not yet sent + +**Rebase algorithm** (runs at ~30fps when collaborative, ~1fps when solo): + +``` +1. Flush store history +2. Undo speculative changes (apply reverse diff) +3. Apply all incoming server events in order: + - For 'patch': apply the NetworkDiff + - For 'push_result': + - 'commit': apply the original push diff as confirmed + - 'discard': drop the push + - 'rebaseWithDiff': apply the server's corrected diff instead +4. Re-apply remaining pending pushes + unsent changes +5. The resulting delta becomes the new speculativeChanges +``` + +This is essentially the same pattern as `git rebase`: + +- Undo your local commits +- Fast-forward to the server's state +- Re-apply your commits on top + +### Server-Side Conflict Resolution (`TLSyncRoom`) + +The server is the **single source of truth**. When it receives a push: + +1. **Migrate up** — if client is on an older schema version, migrate the + records up to the current server schema. +2. **Validate** — run the record type's `props` validator. +3. **Apply** — write to storage via a transaction. +4. **Diff** — compute the actual diff between old and new state. +5. **Respond** — if the applied diff matches the push exactly → `commit`. + If it differs (server normalized data) → `rebaseWithDiff`. + If no changes resulted → `discard`. +6. **Broadcast** — send the actual diff to all other connected clients, + migrating down to each client's schema version if needed. + +### Schema Migration During Sync + +A critical feature: clients on different schema versions can collaborate. +The server: + +- Migrates incoming records **up** from the client's version. +- Migrates outgoing diffs **down** to each client's version. +- Each session tracks its `serializedSchema` and `requiresDownMigrations` flag. + +--- + +## 6. Storage Layer + +### Interface: `TLSyncStorage` + +```ts +interface TLSyncStorage { + transaction(callback, opts?): TLSyncStorageTransactionResult; + getClock(): number; + onChange(callback): () => void; + getSnapshot?(): RoomSnapshot; +} +``` + +Transactions are **synchronous** — no async allowed. This simplifies +consistency guarantees (no need for distributed locks). + +### Clock System + +- **`documentClock`** — monotonically incrementing counter. Bumped on every + write transaction. Used for change tracking. +- Each document record stores its `lastChangedClock`. +- **Tombstones** — deleted record IDs mapped to their deletion clock. + Used to inform reconnecting clients of deletions. +- **`tombstoneHistoryStartsAtClock`** — pruning boundary. If a client's + `lastServerClock` is older than this, they must do a full `wipe_all` resync. +- Tombstones pruned when count > 5000 (with 1000 buffer). + +### Implementations + +1. **`InMemorySyncStorage`** — Default. Uses `AtomMap` (reactive maps from + `@tldraw/state`). Data lost on process restart. Supports `onChange` callback + for external persistence. + +2. **`SQLiteSyncStorage`** — Production-recommended. Persists to SQLite. + Supports Cloudflare Durable Objects (`DurableObjectSqliteSyncWrapper`) + and Node.js (`NodeSqliteWrapper` for `better-sqlite3` or `node:sqlite`). + +### `RoomSnapshot` + +The serialization format for persisting room state: + +```ts +interface RoomSnapshot { + clock?: number; + documentClock?: number; + documents: Array<{ state: UnknownRecord; lastChangedClock: number }>; + tombstones?: Record; + tombstoneHistoryStartsAtClock?: number; + schema?: SerializedSchema; +} +``` + +--- + +## 7. Server Wrapper: `TLSocketRoom` + +`TLSocketRoom` (in `TLSocketRoom.ts`) is the public-facing server class that +wraps `TLSyncRoom` and handles: + +- WebSocket lifecycle +- Session management +- Storage configuration +- Snapshot extraction for persistence + +`TLSyncRoom` (internal) handles: + +- Connection handshake +- Push processing +- Broadcast to connected sessions +- Session pruning (idle timeout, awaiting removal) +- Schema migration per-session + +### Session States + +``` +AwaitingConnectMessage → Connected → AwaitingRemoval → (removed) +``` + +- `AwaitingConnectMessage`: socket open but no handshake yet (10s timeout). +- `Connected`: actively syncing. +- `AwaitingRemoval`: socket closed, waiting for reconnect (10s grace period). + +--- + +## 8. Client Integration: `useSync` Hook + +The React hook creates and manages: + +1. A `ClientWebSocketAdapter` (reconnecting WebSocket wrapper) +2. A `TLStore` with schema, assets, and user configuration +3. A `TLSyncClient` that bridges the socket ↔ store +4. A presence derivation that reactively computes cursor/selection state + +Returns `RemoteTLStoreWithStatus`: + +- `{ status: 'loading' }` — connecting +- `{ status: 'synced-remote', connectionStatus, store }` — active +- `{ status: 'error', error }` — failed + +--- + +## 9. Pros and Cons + +### Pros + +| Aspect | Detail | +| ------------------------- | ------------------------------------------------------------------------------- | +| **Simplicity** | No CRDT library needed. Record-level granularity is natural for canvas objects. | +| **Server authority** | Single source of truth eliminates divergence. Easy to reason about consistency. | +| **Schema migrations** | Built-in version skew handling — clients on different versions can collaborate. | +| **Efficient diffs** | Property-level patching with append ops minimizes wire traffic. | +| **Predictable conflicts** | Rebase model is well-understood (git analogy). Server always wins. | +| **Low latency** | Optimistic local application + 60fps server batching = responsive UI. | +| **Flexible storage** | Pluggable backend (in-memory, SQLite, custom). | +| **Presence separation** | Ephemeral presence data kept out of persistent storage. | + +### Cons + +| Aspect | Detail | +| ------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| **Server required** | No peer-to-peer or offline-first without a server. Single point of failure per room. | +| **No true CRDT** | Concurrent edits to the same record field → last-write-wins via server. No automatic merge of, e.g., concurrent text edits within a single field. | +| **Record-level granularity** | Two users editing different properties of the same shape may conflict. The rebase resolves this, but the loser's change can be lost. | +| **Single-room-single-process** | Must guarantee exactly one `TLSyncRoom` per document globally. Requires Durable Objects or similar coordination. | +| **No partial sync** | Reconnect sends all changes since `lastServerClock` (or full state if too old). No sub-document subscriptions. | +| **Synchronous transactions** | Storage layer must be synchronous (no async DB calls in transactions). | +| **No offline persistence** | Client doesn't persist optimistic state. If browser tab closes during offline, speculative changes are lost. | +| **Tombstone growth** | Deleted records tracked as tombstones with clock values. Requires periodic pruning. | + +--- + +## 10. Key Constants + +| Constant | Value | Purpose | +| -------------------------------- | --------------- | ---------------------------------------------- | +| `TLSYNC_PROTOCOL_VERSION` | 8 | Wire protocol version | +| `DATA_MESSAGE_DEBOUNCE_INTERVAL` | ~16ms (1000/60) | Server message batching | +| `COLLABORATIVE_MODE_FPS` | 30 | Client sync rate with collaborators | +| `SOLO_MODE_FPS` | 1 | Client sync rate when alone | +| `PING_INTERVAL` | 5000ms | Client→server keepalive | +| `SESSION_IDLE_TIMEOUT` | (configurable) | Server prunes idle sessions | +| `SESSION_START_WAIT_TIME` | 10000ms | Time to wait for connect message | +| `SESSION_REMOVAL_WAIT_TIME` | 10000ms | Grace period before removing cancelled session | +| `MAX_TOMBSTONES` | 5000 | Trigger tombstone pruning | +| `TOMBSTONE_PRUNE_BUFFER_SIZE` | 1000 | Extra tombstones pruned beyond threshold | + +--- + +## 11. Source References + +| File | Description | +| ------------------------------------------------------ | -------------------------------------------------------------- | +| `packages/store/src/lib/Store.ts` | Core record store with history, diffs, and listeners | +| `packages/sync-core/src/lib/protocol.ts` | Protocol message type definitions | +| `packages/sync-core/src/lib/diff.ts` | `NetworkDiff`, `ObjectDiff`, `diffRecord`, `applyObjectDiff` | +| `packages/sync-core/src/lib/TLSyncRoom.ts` | Server-side room: session management, push handling, broadcast | +| `packages/sync-core/src/lib/TLSyncClient.ts` | Client-side sync: rebase, push queue, presence | +| `packages/sync-core/src/lib/TLSyncStorage.ts` | Storage interface, transaction types, snapshot loading | +| `packages/sync-core/src/lib/InMemorySyncStorage.ts` | In-memory storage with tombstone pruning | +| `packages/sync-core/src/lib/SQLiteSyncStorage.ts` | SQLite-backed persistent storage | +| `packages/sync-core/src/lib/TLSocketRoom.ts` | Public server wrapper class | +| `packages/sync/src/useSync.ts` | React hook for client-side integration | +| `packages/sync-core/src/lib/ClientWebSocketAdapter.ts` | Reconnecting WebSocket with chunking | +| `packages/sync-core/src/lib/RoomSession.ts` | Session state machine and timeouts | + +--- + +## 12. Relevance to Grida + +### What could be borrowed + +| TLDraw Concept | Grida Equivalent | Notes | +| --------------------------------------- | --------------------- | ---------------------------------------- | +| Record-based flat store | Grida node store | Natural fit for canvas objects | +| `NetworkDiff` with patch/put/remove ops | Wire diff format | Efficient for scene graph changes | +| Property-level `ObjectDiff` with append | Fine-grained sync | Good for text content in shapes | +| Server-authoritative rebase model | — | Simpler than CRDT for structured records | +| Schema migration during sync | — | Critical for versioned deployments | +| Presence as ephemeral separate scope | Cursor/selection sync | Keeps persistence layer clean | +| Tombstone-based deletion tracking | — | Simple clock-based change detection | +| 60fps server batching | — | Prevents message flood | + +### What would differ + +| Aspect | TLDraw | Grida Consideration | +| ------------------ | ----------------------- | ------------------------------------------------------- | +| Rendering | DOM/SVG (JS) | Skia/Rust (WASM) — store lives in different process | +| Data format | JSON records | FlatBuffers (.grida format) — need serialization bridge | +| Storage | JS in-memory / SQLite | Supabase (PostgreSQL) — async, not synchronous | +| Offline | None | Grida may want offline-first with local persistence | +| Scale | Per-room single process | May need multi-process for large documents | +| Text collaboration | Append ops on strings | May need richer text CRDT (e.g., Yjs for rich text) |