diff --git a/.gitignore b/.gitignore
index 59c47924..b2dec615 100644
--- a/.gitignore
+++ b/.gitignore
@@ -53,6 +53,10 @@ screenshots/
# npm lock file (using bun.lock)
package-lock.json
+# Build info
+*.tsbuildinfo
+next-env.d.ts
+
# Examples framework artifacts
examples/*/node_modules/
examples/plugins/*/node_modules/
diff --git a/bun.lock b/bun.lock
index 5cfb497b..b766c7db 100644
--- a/bun.lock
+++ b/bun.lock
@@ -38,14 +38,14 @@
},
"packages/agent-use": {
"name": "@eigenpal/docx-editor-agents",
- "version": "0.0.1",
- "peerDependencies": {
- "@eigenpal/docx-core": ">=0.0.1",
+ "version": "0.0.28",
+ "dependencies": {
+ "@eigenpal/docx-core": "workspace:*",
},
},
"packages/core": {
"name": "@eigenpal/docx-core",
- "version": "0.0.1",
+ "version": "0.0.28",
"bin": {
"docx-editor-mcp": "./dist/mcp-cli.js",
},
@@ -66,7 +66,7 @@
},
"packages/react": {
"name": "@eigenpal/docx-js-editor",
- "version": "0.0.27",
+ "version": "0.0.28",
"dependencies": {
"@radix-ui/react-select": "^2.2.6",
"clsx": "^2.1.0",
diff --git a/examples/agent-chat-demo/.env.example b/examples/agent-chat-demo/.env.example
new file mode 100644
index 00000000..ad2e4794
--- /dev/null
+++ b/examples/agent-chat-demo/.env.example
@@ -0,0 +1,5 @@
+# Get your API key at https://platform.openai.com/api-keys
+OPENAI_API_KEY=sk-...
+
+# Optional: override the model (default: gpt-4o)
+# OPENAI_MODEL=gpt-4o
diff --git a/examples/agent-chat-demo/app/api/chat/route.ts b/examples/agent-chat-demo/app/api/chat/route.ts
new file mode 100644
index 00000000..2db6d804
--- /dev/null
+++ b/examples/agent-chat-demo/app/api/chat/route.ts
@@ -0,0 +1,62 @@
+/**
+ * Chat API route — thin proxy to OpenAI.
+ *
+ * Does NOT touch the document. Tool definitions are passed to OpenAI,
+ * but tool execution happens on the client via the EditorBridge.
+ *
+ * Flow:
+ * 1. Client sends { messages, tools } to this route
+ * 2. Route calls OpenAI with the tools
+ * 3. If OpenAI returns tool_calls, route returns them to the client
+ * 4. Client executes tools via EditorBridge, sends results back
+ * 5. Repeat until OpenAI returns text
+ */
+
+import { NextRequest, NextResponse } from 'next/server';
+import OpenAI from 'openai';
+import type {
+ ChatCompletionMessageParam,
+ ChatCompletionTool,
+} from 'openai/resources/chat/completions';
+
+function getClient() {
+ return new OpenAI();
+}
+const model = process.env.OPENAI_MODEL || 'gpt-4o';
+
+export async function POST(request: NextRequest) {
+ try {
+ const body = await request.json();
+ const { messages, tools } = body as {
+ messages: ChatCompletionMessageParam[];
+ tools: ChatCompletionTool[];
+ };
+
+ if (!messages || messages.length === 0) {
+ return NextResponse.json({ error: 'No messages provided' }, { status: 400 });
+ }
+
+ const openai = getClient();
+ const response = await openai.chat.completions.create({
+ model,
+ messages,
+ tools: tools && tools.length > 0 ? tools : undefined,
+ });
+
+ const choice = response.choices[0];
+ if (!choice) {
+ return NextResponse.json({ error: 'Empty response from AI' }, { status: 502 });
+ }
+
+ return NextResponse.json({
+ message: choice.message,
+ finishReason: choice.finish_reason,
+ });
+ } catch (err) {
+ console.error('Chat API error:', err);
+ return NextResponse.json(
+ { error: err instanceof Error ? err.message : 'Internal error' },
+ { status: 500 }
+ );
+ }
+}
diff --git a/examples/agent-chat-demo/app/globals.css b/examples/agent-chat-demo/app/globals.css
new file mode 100644
index 00000000..83a2032b
--- /dev/null
+++ b/examples/agent-chat-demo/app/globals.css
@@ -0,0 +1,3 @@
+/* Import editor styles (CSS variables, toolbar layout, etc.)
+ In standalone usage: @import '@eigenpal/docx-js-editor/styles.css'; */
+@import '../../../packages/react/src/styles/editor.css';
diff --git a/examples/agent-chat-demo/app/layout.tsx b/examples/agent-chat-demo/app/layout.tsx
new file mode 100644
index 00000000..babb8e1f
--- /dev/null
+++ b/examples/agent-chat-demo/app/layout.tsx
@@ -0,0 +1,17 @@
+import type { Metadata } from 'next';
+import './globals.css';
+
+export const metadata: Metadata = {
+ title: 'Chat with your Doc',
+ description: 'Upload a DOCX and chat with AI — it can add comments and suggest changes live',
+};
+
+export default function RootLayout({ children }: { children: React.ReactNode }) {
+ return (
+
+
+ {children}
+
+
+ );
+}
diff --git a/examples/agent-chat-demo/app/page.tsx b/examples/agent-chat-demo/app/page.tsx
new file mode 100644
index 00000000..fb2065c6
--- /dev/null
+++ b/examples/agent-chat-demo/app/page.tsx
@@ -0,0 +1,649 @@
+'use client';
+
+import { useState, useRef, useCallback, useEffect } from 'react';
+import { DocxEditor, type DocxEditorRef } from '@eigenpal/docx-js-editor';
+import { useAgentChat, type EditorRefLike } from '@eigenpal/docx-editor-agents/bridge';
+
+// ── Types ───────────────────────────────────────────────────────────────────
+
+interface ChatMessage {
+ id: string;
+ role: 'user' | 'assistant';
+ content: string;
+ toolCalls?: ToolCallLog[];
+}
+
+interface ToolCallLog {
+ name: string;
+ input: Record;
+ result: string;
+}
+
+// Full OpenAI message for multi-turn context (keeps tool_calls + tool results)
+// eslint-disable-next-line @typescript-eslint/no-explicit-any
+type OpenAIMessage = any;
+
+// ── Helpers ─────────────────────────────────────────────────────────────────
+
+const TOOL_LABELS: Record = {
+ read_document: 'Read document',
+ read_comments: 'Read comments',
+ read_changes: 'Read tracked changes',
+ add_comment: 'Add comment',
+ suggest_replacement: 'Suggest change',
+ scroll_to: 'Scroll to',
+};
+
+const SYSTEM_PROMPT = `You are a helpful document assistant. The user has a DOCX document open and is chatting with you about it.
+
+You have tools to:
+- READ the document content (always do this first if you haven't seen the document yet)
+- ADD COMMENTS to specific paragraphs
+- SUGGEST REPLACEMENTS as tracked changes the user can accept/reject
+- SCROLL to specific paragraphs
+
+Guidelines:
+- Always read the document before making changes
+- When adding comments or suggesting changes, reference the paragraph index [N] from read_document
+- Keep comments concise and actionable
+- For replacements, use a short search phrase (3-8 words) that uniquely identifies the text
+- You can make multiple tool calls in a single turn
+- After making changes, briefly tell the user what you did`;
+
+// ── Main Component ──────────────────────────────────────────────────────────
+
+export default function Home() {
+ const [documentBuffer, setDocumentBuffer] = useState(null);
+ const [documentName, setDocumentName] = useState('');
+ const [messages, setMessages] = useState([]);
+ const [input, setInput] = useState('');
+ const [isLoading, setIsLoading] = useState(false);
+ const [error, setError] = useState(null);
+ const [dragOver, setDragOver] = useState(false);
+ const [expandedTools, setExpandedTools] = useState>(new Set());
+
+ const editorRef = useRef(null);
+ const fileInputRef = useRef(null);
+ const chatEndRef = useRef(null);
+ const openaiHistoryRef = useRef([]);
+ const msgIdRef = useRef(0);
+ const nextId = () => `msg-${++msgIdRef.current}`;
+
+ // Hook: wires agent tools to the live editor
+ const { executeToolCall, toolSchemas } = useAgentChat({
+ editorRef: editorRef as React.RefObject,
+ author: 'Assistant',
+ });
+
+ // Auto-scroll chat
+ useEffect(() => {
+ chatEndRef.current?.scrollIntoView({ behavior: 'smooth' });
+ }, [messages, isLoading]);
+
+ // ── File handling ─────────────────────────────────────────────────────────
+
+ const handleFile = useCallback((f: File) => {
+ if (!f.name.endsWith('.docx')) {
+ setError('Please upload a .docx file');
+ return;
+ }
+ setError(null);
+ setDocumentName(f.name);
+ f.arrayBuffer().then((buf) => {
+ setDocumentBuffer(buf);
+ setMessages([]);
+ openaiHistoryRef.current = [];
+ });
+ }, []);
+
+ const handleDrop = useCallback(
+ (e: React.DragEvent) => {
+ e.preventDefault();
+ setDragOver(false);
+ const f = e.dataTransfer.files[0];
+ if (f) handleFile(f);
+ },
+ [handleFile]
+ );
+
+ // ── Chat with client-side tool execution ──────────────────────────────────
+
+ const sendMessage = async () => {
+ const text = input.trim();
+ if (!text || !editorRef.current || isLoading) return;
+
+ const userMsg: ChatMessage = { id: nextId(), role: 'user', content: text };
+ setMessages((prev) => [...prev, userMsg]);
+ setInput('');
+ setIsLoading(true);
+ setError(null);
+
+ try {
+ openaiHistoryRef.current.push({ role: 'user', content: text });
+ const allToolCalls: ToolCallLog[] = [];
+
+ // Tool-use loop — call API, execute tools locally, repeat
+ const MAX_ITERATIONS = 10;
+ for (let i = 0; i < MAX_ITERATIONS; i++) {
+ const response = await fetch('/api/chat', {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ body: JSON.stringify({
+ messages: [{ role: 'system', content: SYSTEM_PROMPT }, ...openaiHistoryRef.current],
+ tools: toolSchemas,
+ }),
+ });
+
+ if (!response.ok) {
+ const err = await response.json();
+ throw new Error(err.error || 'Request failed');
+ }
+
+ const data = await response.json();
+ const msg = data.message;
+
+ // No tool calls — we're done, show the text response
+ if (!msg.tool_calls || msg.tool_calls.length === 0) {
+ openaiHistoryRef.current.push({ role: 'assistant', content: msg.content || '' });
+ const assistantMsg: ChatMessage = {
+ id: nextId(),
+ role: 'assistant',
+ content: msg.content || '',
+ toolCalls: allToolCalls.length > 0 ? allToolCalls : undefined,
+ };
+ setMessages((prev) => [...prev, assistantMsg]);
+ break;
+ }
+
+ // Execute tool calls on the client via EditorBridge
+ openaiHistoryRef.current.push(msg);
+
+ for (const toolCall of msg.tool_calls) {
+ let args: Record;
+ try {
+ args = JSON.parse(toolCall.function.arguments);
+ } catch {
+ args = {};
+ }
+ const result = executeToolCall(toolCall.function.name, args);
+
+ const resultStr =
+ typeof result.data === 'string'
+ ? result.data
+ : result.error || JSON.stringify(result.data);
+
+ allToolCalls.push({
+ name: toolCall.function.name,
+ input: args as Record,
+ result: resultStr,
+ });
+
+ // Append tool result to persistent history
+ openaiHistoryRef.current.push({
+ role: 'tool',
+ tool_call_id: toolCall.id,
+ content: resultStr,
+ });
+ }
+ }
+ } catch (err) {
+ setError(err instanceof Error ? err.message : 'Something went wrong');
+ } finally {
+ setIsLoading(false);
+ }
+ };
+
+ const handleKeyDown = (e: React.KeyboardEvent) => {
+ if (e.key === 'Enter' && !e.shiftKey) {
+ e.preventDefault();
+ sendMessage();
+ }
+ };
+
+ const toggleToolExpand = (id: string) => {
+ setExpandedTools((prev) => {
+ const next = new Set(prev);
+ if (next.has(id)) next.delete(id);
+ else next.add(id);
+ return next;
+ });
+ };
+
+ // ── Upload screen ─────────────────────────────────────────────────────────
+
+ if (!documentBuffer) {
+ return (
+
+
+
💬
+
Chat with your Doc
+
+ Upload a DOCX file and have a conversation with AI about it. The assistant can read your
+ document, add comments, and suggest changes — all live in the editor, no reloads.
+
+ );
+}
+
+// ── Animations ──────────────────────────────────────────────────────────────
+
+const animationCSS = `
+@keyframes dotPulse {
+ 0%, 80%, 100% { opacity: 0.3; transform: scale(0.8); }
+ 40% { opacity: 1; transform: scale(1); }
+}
+`;
+
+// ── Styles ──────────────────────────────────────────────────────────────────
+
+const styles: Record = {
+ fullScreen: {
+ minHeight: '100vh',
+ display: 'flex',
+ alignItems: 'center',
+ justifyContent: 'center',
+ background: 'linear-gradient(135deg, #0f172a 0%, #1e293b 50%, #334155 100%)',
+ padding: 20,
+ },
+ uploadCard: {
+ background: '#fff',
+ borderRadius: 20,
+ padding: '48px 40px',
+ maxWidth: 500,
+ width: '100%',
+ textAlign: 'center' as const,
+ boxShadow: '0 25px 50px -12px rgba(0,0,0,0.25)',
+ },
+ title: { fontSize: 32, fontWeight: 900, margin: '0 0 8px', color: '#0f172a' },
+ subtitle: { fontSize: 15, color: '#64748b', margin: '0 0 32px', lineHeight: 1.6 },
+ dropZone: {
+ border: '2px dashed #cbd5e1',
+ borderRadius: 14,
+ padding: '40px 20px',
+ cursor: 'pointer',
+ transition: 'all 0.2s',
+ marginBottom: 24,
+ },
+ dropZoneActive: { borderColor: '#3b82f6', background: '#eff6ff' },
+ dropText: { fontSize: 16, fontWeight: 600, color: '#334155' },
+ dropHint: { fontSize: 13, color: '#94a3b8', marginTop: 4 },
+ errorBox: {
+ background: '#fef2f2',
+ color: '#dc2626',
+ padding: '12px 16px',
+ borderRadius: 8,
+ fontSize: 14,
+ marginBottom: 20,
+ },
+ footer: { fontSize: 13, color: '#94a3b8' },
+ link: { color: '#3b82f6', textDecoration: 'none' },
+ layout: {
+ display: 'flex',
+ flexDirection: 'column' as const,
+ height: '100vh',
+ overflow: 'hidden',
+ background: '#f8fafc',
+ },
+ header: {
+ display: 'flex',
+ alignItems: 'center',
+ justifyContent: 'space-between',
+ padding: '10px 20px',
+ background: '#fff',
+ borderBottom: '1px solid #e2e8f0',
+ flexShrink: 0,
+ },
+ headerLeft: { display: 'flex', alignItems: 'center', gap: 8 },
+ headerTitle: { fontSize: 15, fontWeight: 700, color: '#0f172a' },
+ newDocBtn: {
+ padding: '6px 14px',
+ fontSize: 13,
+ fontWeight: 600,
+ color: '#334155',
+ background: '#f1f5f9',
+ border: '1px solid #e2e8f0',
+ borderRadius: 8,
+ cursor: 'pointer',
+ },
+ main: { flex: 1, display: 'flex', overflow: 'hidden' },
+ editorPane: { flex: 1, overflow: 'hidden', borderRight: '1px solid #e2e8f0' },
+ chatPane: {
+ width: 400,
+ flexShrink: 0,
+ display: 'flex',
+ flexDirection: 'column' as const,
+ background: '#fff',
+ },
+ messageList: { flex: 1, overflow: 'auto', padding: '16px 16px 8px' },
+ emptyChat: {
+ textAlign: 'center' as const,
+ color: '#64748b',
+ marginTop: 60,
+ fontSize: 15,
+ },
+ messageWrap: { marginBottom: 12 },
+ userBubble: {
+ background: '#3b82f6',
+ color: '#fff',
+ padding: '10px 14px',
+ borderRadius: '16px 16px 4px 16px',
+ fontSize: 14,
+ lineHeight: 1.5,
+ marginLeft: 40,
+ },
+ assistantBubble: {
+ background: '#f1f5f9',
+ color: '#1e293b',
+ padding: '10px 14px',
+ borderRadius: '16px 16px 16px 4px',
+ fontSize: 14,
+ lineHeight: 1.5,
+ marginRight: 40,
+ },
+ messageText: { whiteSpace: 'pre-wrap' as const, wordBreak: 'break-word' as const },
+ errorBubble: {
+ background: '#fef2f2',
+ color: '#dc2626',
+ padding: '10px 14px',
+ borderRadius: 12,
+ fontSize: 13,
+ marginRight: 40,
+ },
+ toolCallsWrap: {
+ marginTop: 6,
+ marginRight: 40,
+ display: 'flex',
+ flexDirection: 'column' as const,
+ gap: 4,
+ },
+ toolCallCard: {
+ border: '1px solid #e2e8f0',
+ borderRadius: 8,
+ overflow: 'hidden',
+ fontSize: 12,
+ },
+ toolCallHeader: {
+ display: 'flex',
+ alignItems: 'center',
+ gap: 6,
+ padding: '6px 10px',
+ background: '#f8fafc',
+ cursor: 'pointer',
+ userSelect: 'none' as const,
+ },
+ toolCallIcon: { fontSize: 12 },
+ toolCallName: { fontWeight: 600, color: '#334155' },
+ toolCallSummary: {
+ color: '#64748b',
+ flex: 1,
+ overflow: 'hidden',
+ textOverflow: 'ellipsis' as const,
+ whiteSpace: 'nowrap' as const,
+ },
+ toolCallChevron: { fontSize: 10, color: '#94a3b8' },
+ toolCallBody: { padding: '8px 10px', borderTop: '1px solid #e2e8f0' },
+ toolCallSection: { marginBottom: 6 },
+ toolCallPre: {
+ margin: '4px 0 0',
+ fontSize: 11,
+ background: '#f1f5f9',
+ padding: 8,
+ borderRadius: 6,
+ overflow: 'auto',
+ maxHeight: 200,
+ whiteSpace: 'pre-wrap' as const,
+ wordBreak: 'break-word' as const,
+ },
+ loadingDots: { display: 'flex', gap: 4, padding: '4px 0' },
+ dot: {
+ width: 8,
+ height: 8,
+ borderRadius: '50%',
+ background: '#94a3b8',
+ animation: 'dotPulse 1.4s ease-in-out infinite',
+ },
+ inputWrap: {
+ display: 'flex',
+ alignItems: 'flex-end',
+ gap: 8,
+ padding: '12px 16px',
+ borderTop: '1px solid #e2e8f0',
+ background: '#fff',
+ },
+ input: {
+ flex: 1,
+ padding: '10px 14px',
+ fontSize: 14,
+ border: '1px solid #e2e8f0',
+ borderRadius: 12,
+ outline: 'none',
+ resize: 'none' as const,
+ fontFamily: 'inherit',
+ lineHeight: 1.5,
+ maxHeight: 120,
+ },
+ sendBtn: {
+ width: 36,
+ height: 36,
+ borderRadius: '50%',
+ border: 'none',
+ background: '#3b82f6',
+ color: '#fff',
+ fontSize: 14,
+ cursor: 'pointer',
+ display: 'flex',
+ alignItems: 'center',
+ justifyContent: 'center',
+ flexShrink: 0,
+ },
+ sendBtnDisabled: { opacity: 0.3, cursor: 'not-allowed' },
+};
diff --git a/examples/agent-chat-demo/next.config.ts b/examples/agent-chat-demo/next.config.ts
new file mode 100644
index 00000000..d537c9c0
--- /dev/null
+++ b/examples/agent-chat-demo/next.config.ts
@@ -0,0 +1,7 @@
+import type { NextConfig } from 'next';
+
+const nextConfig: NextConfig = {
+ transpilePackages: ['@eigenpal/docx-js-editor', '@eigenpal/docx-core'],
+};
+
+export default nextConfig;
diff --git a/examples/agent-chat-demo/package.json b/examples/agent-chat-demo/package.json
new file mode 100644
index 00000000..a6afd473
--- /dev/null
+++ b/examples/agent-chat-demo/package.json
@@ -0,0 +1,24 @@
+{
+ "name": "agent-chat-demo",
+ "private": true,
+ "scripts": {
+ "dev": "next dev --port 3002",
+ "build": "next build",
+ "start": "next start"
+ },
+ "dependencies": {
+ "@eigenpal/docx-core": "workspace:*",
+ "@eigenpal/docx-editor-agents": "workspace:*",
+ "@eigenpal/docx-js-editor": "workspace:*",
+ "next": "^15.3.3",
+ "openai": "^4.78.0",
+ "react": "^19.0.0",
+ "react-dom": "^19.0.0"
+ },
+ "devDependencies": {
+ "@types/node": "^22.0.0",
+ "@types/react": "^19.0.0",
+ "@types/react-dom": "^19.0.0",
+ "typescript": "^5.3.3"
+ }
+}
diff --git a/examples/agent-chat-demo/postcss.config.js b/examples/agent-chat-demo/postcss.config.js
new file mode 100644
index 00000000..9361eff3
--- /dev/null
+++ b/examples/agent-chat-demo/postcss.config.js
@@ -0,0 +1,3 @@
+module.exports = {
+ plugins: {},
+};
diff --git a/examples/agent-chat-demo/tsconfig.json b/examples/agent-chat-demo/tsconfig.json
new file mode 100644
index 00000000..253bbef4
--- /dev/null
+++ b/examples/agent-chat-demo/tsconfig.json
@@ -0,0 +1,27 @@
+{
+ "compilerOptions": {
+ "target": "ES2017",
+ "lib": ["dom", "dom.iterable", "esnext"],
+ "allowJs": true,
+ "skipLibCheck": true,
+ "strict": true,
+ "noEmit": true,
+ "esModuleInterop": true,
+ "module": "esnext",
+ "moduleResolution": "bundler",
+ "resolveJsonModule": true,
+ "isolatedModules": true,
+ "jsx": "preserve",
+ "incremental": true,
+ "plugins": [
+ {
+ "name": "next"
+ }
+ ],
+ "paths": {
+ "@/*": ["./*"]
+ }
+ },
+ "include": ["**/*.ts", "**/*.tsx", "next-env.d.ts", ".next/types/**/*.ts"],
+ "exclude": ["node_modules"]
+}
diff --git a/packages/agent-use/src/__tests__/bridge.test.ts b/packages/agent-use/src/__tests__/bridge.test.ts
new file mode 100644
index 00000000..9111a8fd
--- /dev/null
+++ b/packages/agent-use/src/__tests__/bridge.test.ts
@@ -0,0 +1,234 @@
+import { describe, test, expect } from 'bun:test';
+import type {
+ Paragraph,
+ Run,
+ Table,
+ Document,
+ DocumentBody,
+ ParagraphContent,
+} from '@eigenpal/docx-core/headless';
+import { createEditorBridge, type EditorRefLike } from '../bridge';
+
+// ============================================================================
+// HELPERS
+// ============================================================================
+
+function makeRun(text: string): Run {
+ return { type: 'run', content: [{ type: 'text', text }] } as Run;
+}
+
+function makeParagraph(text: string): Paragraph {
+ return {
+ type: 'paragraph',
+ content: [makeRun(text)] as ParagraphContent[],
+ formatting: {},
+ } as Paragraph;
+}
+
+function makeTable(cells: string[][]): Table {
+ return {
+ type: 'table',
+ rows: cells.map((row) => ({
+ cells: row.map((text) => ({
+ content: [makeParagraph(text)],
+ })),
+ })),
+ } as unknown as Table;
+}
+
+function makeDoc(content: (Paragraph | Table)[]): Document {
+ return {
+ package: {
+ document: { content } as DocumentBody,
+ },
+ } as Document;
+}
+
+function makeMockRef(content: (Paragraph | Table)[]): EditorRefLike {
+ const doc = makeDoc(content);
+ const addedComments: Array<{
+ id: number;
+ author: string;
+ date?: string;
+ parentId?: number;
+ content: unknown[];
+ done?: boolean;
+ }> = [];
+ let replacementCalled = false;
+ let scrolledTo: number | undefined;
+
+ return {
+ getDocument: () => doc,
+ getEditorRef: () => ({ getDocument: () => doc }),
+ addComment: (opts) => {
+ const id = Date.now();
+ addedComments.push({
+ id,
+ author: opts.author,
+ content: [{ content: [{ content: [{ text: opts.text }] }] }],
+ });
+ return id;
+ },
+ replyToComment: (commentId, text, author) => {
+ const id = Date.now() + 1;
+ addedComments.push({
+ id,
+ author,
+ parentId: commentId,
+ content: [{ content: [{ content: [{ text }] }] }],
+ });
+ return id;
+ },
+ resolveComment: () => {},
+ proposeReplacement: () => {
+ replacementCalled = true;
+ return true;
+ },
+ scrollToIndex: (idx) => {
+ scrolledTo = idx;
+ },
+ getComments: () => addedComments,
+ // Expose internal state for assertions
+ get _replacementCalled() {
+ return replacementCalled;
+ },
+ get _scrolledTo() {
+ return scrolledTo;
+ },
+ } as EditorRefLike & { _replacementCalled: boolean; _scrolledTo: number | undefined };
+}
+
+// ============================================================================
+// createEditorBridge
+// ============================================================================
+
+describe('createEditorBridge', () => {
+ test('getContentAsText returns indexed text', () => {
+ const ref = makeMockRef([makeParagraph('Hello'), makeParagraph('World')]);
+ const bridge = createEditorBridge(ref, 'TestAgent');
+
+ const text = bridge.getContentAsText();
+ expect(text).toContain('[0]');
+ expect(text).toContain('Hello');
+ expect(text).toContain('[1]');
+ expect(text).toContain('World');
+ });
+
+ test('getContent returns structured blocks', () => {
+ const ref = makeMockRef([makeParagraph('First'), makeParagraph('Second')]);
+ const bridge = createEditorBridge(ref);
+
+ const blocks = bridge.getContent();
+ expect(blocks).toHaveLength(2);
+ expect(blocks[0].type).toBe('paragraph');
+ expect(blocks[0].index).toBe(0);
+ });
+
+ test('getContent handles tables', () => {
+ const ref = makeMockRef([
+ makeParagraph('Before'),
+ makeTable([['A', 'B']]),
+ makeParagraph('After'),
+ ]);
+ const bridge = createEditorBridge(ref);
+ const blocks = bridge.getContent();
+
+ const types = blocks.map((b) => b.type);
+ expect(types).toContain('paragraph');
+ expect(types).toContain('table');
+ });
+
+ test('addComment calls ref and returns id', () => {
+ const ref = makeMockRef([makeParagraph('Hello')]);
+ const bridge = createEditorBridge(ref, 'Agent');
+
+ const id = bridge.addComment({ paragraphIndex: 0, text: 'Nice paragraph' });
+ expect(id).not.toBeNull();
+ expect(typeof id).toBe('number');
+ });
+
+ test('addComment uses default author', () => {
+ let capturedAuthor = '';
+ const ref = makeMockRef([makeParagraph('Hello')]);
+ const origAdd = ref.addComment.bind(ref);
+ ref.addComment = (opts) => {
+ capturedAuthor = opts.author;
+ return origAdd(opts);
+ };
+
+ const bridge = createEditorBridge(ref, 'Claude');
+ bridge.addComment({ paragraphIndex: 0, text: 'Test' });
+ expect(capturedAuthor).toBe('Claude');
+ });
+
+ test('addComment allows author override', () => {
+ let capturedAuthor = '';
+ const ref = makeMockRef([makeParagraph('Hello')]);
+ ref.addComment = (opts) => {
+ capturedAuthor = opts.author;
+ return 1;
+ };
+
+ const bridge = createEditorBridge(ref, 'DefaultAuthor');
+ bridge.addComment({ paragraphIndex: 0, text: 'Test', author: 'CustomAuthor' });
+ expect(capturedAuthor).toBe('CustomAuthor');
+ });
+
+ test('replace calls proposeReplacement on ref', () => {
+ const ref = makeMockRef([makeParagraph('Hello world')]) as EditorRefLike & {
+ _replacementCalled: boolean;
+ };
+ const bridge = createEditorBridge(ref, 'Agent');
+
+ const ok = bridge.replace({
+ paragraphIndex: 0,
+ search: 'Hello',
+ replaceWith: 'Hi',
+ });
+ expect(ok).toBe(true);
+ expect(ref._replacementCalled).toBe(true);
+ });
+
+ test('scrollTo calls scrollToIndex on ref', () => {
+ const ref = makeMockRef([makeParagraph('Hello')]) as EditorRefLike & {
+ _scrolledTo: number | undefined;
+ };
+ const bridge = createEditorBridge(ref);
+
+ bridge.scrollTo(5);
+ expect(ref._scrolledTo).toBe(5);
+ });
+
+ test('getContentAsText with range', () => {
+ const ref = makeMockRef([
+ makeParagraph('Para 0'),
+ makeParagraph('Para 1'),
+ makeParagraph('Para 2'),
+ ]);
+ const bridge = createEditorBridge(ref);
+
+ const text = bridge.getContentAsText({ fromIndex: 1, toIndex: 1 });
+ expect(text).toContain('Para 1');
+ expect(text).not.toContain('Para 0');
+ expect(text).not.toContain('Para 2');
+ });
+
+ test('returns empty data when ref has no document', () => {
+ const ref: EditorRefLike = {
+ getDocument: () => null,
+ getEditorRef: () => null,
+ addComment: () => null,
+ replyToComment: () => null,
+ resolveComment: () => {},
+ proposeReplacement: () => false,
+ scrollToIndex: () => {},
+ getComments: () => [],
+ };
+ const bridge = createEditorBridge(ref);
+
+ expect(bridge.getContentAsText()).toBe('');
+ expect(bridge.getContent()).toEqual([]);
+ expect(bridge.getComments()).toEqual([]);
+ expect(bridge.getChanges()).toEqual([]);
+ });
+});
diff --git a/packages/agent-use/src/__tests__/tools.test.ts b/packages/agent-use/src/__tests__/tools.test.ts
new file mode 100644
index 00000000..2a04a272
--- /dev/null
+++ b/packages/agent-use/src/__tests__/tools.test.ts
@@ -0,0 +1,289 @@
+import { describe, test, expect } from 'bun:test';
+import type { EditorBridge } from '../bridge';
+import { agentTools, executeToolCall, getToolSchemas } from '../tools';
+import type { ReviewComment, ReviewChange, ContentBlock } from '../types';
+
+// ============================================================================
+// MOCK BRIDGE
+// ============================================================================
+
+function makeBridge(overrides: Partial = {}): EditorBridge {
+ return {
+ getContentAsText: () => '[0] Hello world\n[1] Second paragraph',
+ getContent: () =>
+ [
+ { type: 'paragraph', index: 0, text: 'Hello world' },
+ { type: 'paragraph', index: 1, text: 'Second paragraph' },
+ ] as ContentBlock[],
+ getComments: () => [],
+ getChanges: () => [],
+ addComment: () => 42,
+ replyTo: () => 43,
+ resolveComment: () => {},
+ replace: () => true,
+ scrollTo: () => {},
+ ...overrides,
+ };
+}
+
+// ============================================================================
+// TOOL REGISTRY
+// ============================================================================
+
+describe('agentTools', () => {
+ test('has 6 built-in tools', () => {
+ expect(agentTools).toHaveLength(6);
+ });
+
+ test('all tools have name, description, inputSchema, handler', () => {
+ for (const tool of agentTools) {
+ expect(tool.name).toBeTruthy();
+ expect(tool.description).toBeTruthy();
+ expect(tool.inputSchema).toBeDefined();
+ expect(typeof tool.handler).toBe('function');
+ }
+ });
+
+ test('tool names are unique', () => {
+ const names = agentTools.map((t) => t.name);
+ expect(new Set(names).size).toBe(names.length);
+ });
+});
+
+// ============================================================================
+// getToolSchemas (OpenAI format)
+// ============================================================================
+
+describe('getToolSchemas', () => {
+ test('returns OpenAI function calling format', () => {
+ const schemas = getToolSchemas();
+ expect(schemas.length).toBe(6);
+
+ for (const schema of schemas) {
+ expect(schema.type).toBe('function');
+ expect(schema.function.name).toBeTruthy();
+ expect(schema.function.description).toBeTruthy();
+ expect(schema.function.parameters).toBeDefined();
+ }
+ });
+
+ test('includes read_document tool', () => {
+ const schemas = getToolSchemas();
+ const readDoc = schemas.find((s) => s.function.name === 'read_document');
+ expect(readDoc).toBeDefined();
+ expect(readDoc!.function.parameters).toHaveProperty('properties');
+ });
+});
+
+// ============================================================================
+// executeToolCall
+// ============================================================================
+
+describe('executeToolCall', () => {
+ test('returns error for unknown tool', () => {
+ const bridge = makeBridge();
+ const result = executeToolCall('nonexistent_tool', {}, bridge);
+ expect(result.success).toBe(false);
+ expect(result.error).toContain('Unknown tool');
+ });
+
+ test('catches handler exceptions', () => {
+ const bridge = makeBridge({
+ getContentAsText: () => {
+ throw new Error('boom');
+ },
+ });
+ const result = executeToolCall('read_document', {}, bridge);
+ expect(result.success).toBe(false);
+ expect(result.error).toBe('boom');
+ });
+});
+
+// ============================================================================
+// read_document
+// ============================================================================
+
+describe('read_document', () => {
+ test('returns document content as text', () => {
+ const bridge = makeBridge();
+ const result = executeToolCall('read_document', {}, bridge);
+ expect(result.success).toBe(true);
+ expect(result.data).toContain('[0] Hello world');
+ expect(result.data).toContain('[1] Second paragraph');
+ });
+
+ test('passes fromIndex and toIndex', () => {
+ let capturedFrom: number | undefined;
+ let capturedTo: number | undefined;
+ const bridge = makeBridge({
+ getContentAsText: (options) => {
+ capturedFrom = options?.fromIndex;
+ capturedTo = options?.toIndex;
+ return '[5] Some text';
+ },
+ });
+ executeToolCall('read_document', { fromIndex: 5, toIndex: 10 }, bridge);
+ expect(capturedFrom).toBe(5);
+ expect(capturedTo).toBe(10);
+ });
+});
+
+// ============================================================================
+// read_comments
+// ============================================================================
+
+describe('read_comments', () => {
+ test('returns "no comments" when empty', () => {
+ const bridge = makeBridge();
+ const result = executeToolCall('read_comments', {}, bridge);
+ expect(result.success).toBe(true);
+ expect(result.data).toContain('No comments');
+ });
+
+ test('formats comments with id, paragraph, author', () => {
+ const bridge = makeBridge({
+ getComments: () =>
+ [
+ {
+ id: 1,
+ author: 'Alice',
+ date: null,
+ text: 'Fix this',
+ anchoredText: 'hello',
+ paragraphIndex: 3,
+ replies: [],
+ done: false,
+ },
+ ] as ReviewComment[],
+ });
+ const result = executeToolCall('read_comments', {}, bridge);
+ expect(result.success).toBe(true);
+ expect(result.data as string).toContain('Comment #1');
+ expect(result.data as string).toContain('paragraph 3');
+ expect(result.data as string).toContain('Alice');
+ expect(result.data as string).toContain('Fix this');
+ });
+});
+
+// ============================================================================
+// read_changes
+// ============================================================================
+
+describe('read_changes', () => {
+ test('returns "no tracked changes" when empty', () => {
+ const bridge = makeBridge();
+ const result = executeToolCall('read_changes', {}, bridge);
+ expect(result.success).toBe(true);
+ expect(result.data).toContain('No tracked changes');
+ });
+
+ test('formats changes with id, type, author', () => {
+ const bridge = makeBridge({
+ getChanges: () =>
+ [
+ {
+ id: 5,
+ type: 'insertion',
+ author: 'Bob',
+ date: null,
+ text: 'new text',
+ context: '',
+ paragraphIndex: 2,
+ },
+ ] as ReviewChange[],
+ });
+ const result = executeToolCall('read_changes', {}, bridge);
+ expect(result.success).toBe(true);
+ expect(result.data as string).toContain('Change #5');
+ expect(result.data as string).toContain('insertion');
+ expect(result.data as string).toContain('Bob');
+ });
+});
+
+// ============================================================================
+// add_comment
+// ============================================================================
+
+describe('add_comment', () => {
+ test('adds comment and returns success with id', () => {
+ const bridge = makeBridge({ addComment: () => 42 });
+ const result = executeToolCall(
+ 'add_comment',
+ { paragraphIndex: 3, text: 'Needs work' },
+ bridge
+ );
+ expect(result.success).toBe(true);
+ expect(result.data as string).toContain('42');
+ expect(result.data as string).toContain('paragraph 3');
+ });
+
+ test('passes search parameter', () => {
+ let capturedSearch: string | undefined;
+ const bridge = makeBridge({
+ addComment: (opts) => {
+ capturedSearch = opts.search;
+ return 1;
+ },
+ });
+ executeToolCall(
+ 'add_comment',
+ { paragraphIndex: 0, text: 'Fix', search: 'hello world' },
+ bridge
+ );
+ expect(capturedSearch).toBe('hello world');
+ });
+
+ test('returns error when paragraph not found', () => {
+ const bridge = makeBridge({ addComment: () => null });
+ const result = executeToolCall('add_comment', { paragraphIndex: 999, text: 'Nope' }, bridge);
+ expect(result.success).toBe(false);
+ expect(result.error).toContain('not found');
+ });
+});
+
+// ============================================================================
+// suggest_replacement
+// ============================================================================
+
+describe('suggest_replacement', () => {
+ test('creates tracked change and returns success', () => {
+ const bridge = makeBridge({ replace: () => true });
+ const result = executeToolCall(
+ 'suggest_replacement',
+ { paragraphIndex: 1, search: 'old text', replaceWith: 'new text' },
+ bridge
+ );
+ expect(result.success).toBe(true);
+ expect(result.data as string).toContain('old text');
+ expect(result.data as string).toContain('new text');
+ });
+
+ test('returns error when search text not found', () => {
+ const bridge = makeBridge({ replace: () => false });
+ const result = executeToolCall(
+ 'suggest_replacement',
+ { paragraphIndex: 0, search: 'nonexistent', replaceWith: 'replacement' },
+ bridge
+ );
+ expect(result.success).toBe(false);
+ expect(result.error).toContain('not found');
+ });
+});
+
+// ============================================================================
+// scroll_to
+// ============================================================================
+
+describe('scroll_to', () => {
+ test('calls scrollTo and returns success', () => {
+ let scrolledTo: number | undefined;
+ const bridge = makeBridge({
+ scrollTo: (idx) => {
+ scrolledTo = idx;
+ },
+ });
+ const result = executeToolCall('scroll_to', { paragraphIndex: 7 }, bridge);
+ expect(result.success).toBe(true);
+ expect(scrolledTo).toBe(7);
+ });
+});
diff --git a/packages/agent-use/src/bridge.ts b/packages/agent-use/src/bridge.ts
index 63a60f44..0fb0854d 100644
--- a/packages/agent-use/src/bridge.ts
+++ b/packages/agent-use/src/bridge.ts
@@ -1,12 +1,234 @@
/**
- * Editor ref bridge — optional client-side integration.
+ * Editor Bridge — connects agent tools to a live DocxEditor instance.
*
* Separate entry point: import from '@eigenpal/docx-editor-agents/bridge'
- * This file may import React/ProseMirror — NOT included in the main headless bundle.
*
- * TODO: Implement in task 9.1
+ * @example
+ * ```ts
+ * import { createEditorBridge, useAgentChat } from '@eigenpal/docx-editor-agents/bridge';
+ *
+ * // Hook (React) — simplest way
+ * const { executeToolCall, toolSchemas } = useAgentChat({ editorRef, author: 'Assistant' });
+ *
+ * // Manual
+ * const bridge = createEditorBridge(editorRef, 'Assistant');
+ * bridge.addComment({ paragraphIndex: 3, text: 'Fix this.' });
+ * ```
+ */
+
+// Re-export hook and tools for convenience
+export { useAgentChat, type UseAgentChatOptions, type UseAgentChatReturn } from './useAgentChat';
+export { agentTools, executeToolCall, getToolSchemas } from './tools';
+export type { AgentToolDefinition, AgentToolResult } from './tools';
+
+import type {
+ ContentBlock,
+ GetContentOptions,
+ ReviewComment,
+ ReviewChange,
+ ChangeFilter,
+ CommentFilter,
+ AddCommentOptions,
+ ReplyOptions,
+ ProposeReplacementOptions,
+} from './types';
+import { getContent, formatContentForLLM } from './content';
+import { getChanges, getComments } from './discovery';
+
+// ── Types ───────────────────────────────────────────────────────────────────
+
+/**
+ * Minimal DocxEditorRef interface — only the methods the bridge needs.
+ * This avoids importing the full React package at type level.
+ */
+export interface EditorRefLike {
+ getDocument(): unknown | null;
+ getEditorRef(): { getDocument(): unknown | null } | null;
+ addComment(options: {
+ paragraphIndex: number;
+ text: string;
+ author: string;
+ search?: string;
+ }): number | null;
+ replyToComment(commentId: number, text: string, author: string): number | null;
+ resolveComment(commentId: number): void;
+ proposeReplacement(options: {
+ paragraphIndex: number;
+ search: string;
+ replaceWith: string;
+ author: string;
+ }): boolean;
+ scrollToIndex(paragraphIndex: number): void;
+ getComments(): Array<{
+ id: number;
+ author: string;
+ date?: string;
+ parentId?: number;
+ content: unknown[];
+ done?: boolean;
+ }>;
+}
+
+export interface EditorBridge {
+ /** Get document content as indexed text lines for LLM prompts. */
+ getContentAsText(options?: GetContentOptions): string;
+ /** Get document content as structured blocks. */
+ getContent(options?: GetContentOptions): ContentBlock[];
+ /** Get all comments in the document. */
+ getComments(filter?: CommentFilter): ReviewComment[];
+ /** Get all tracked changes in the document. */
+ getChanges(filter?: ChangeFilter): ReviewChange[];
+ /** Add a comment to a paragraph. Returns the comment ID or null on failure. */
+ addComment(options: AddCommentOptions): number | null;
+ /** Reply to an existing comment. Returns the reply ID or null. */
+ replyTo(commentId: number, options: ReplyOptions): number | null;
+ /** Resolve a comment (mark as done). */
+ resolveComment(commentId: number): void;
+ /** Replace text, creating a tracked change. Returns true on success. */
+ replace(options: ProposeReplacementOptions): boolean;
+ /** Scroll to a paragraph by index. */
+ scrollTo(paragraphIndex: number): void;
+}
+
+// ── Implementation ──────────────────────────────────────────────────────────
+
+/** Extract plain text from a Comment's content paragraphs. */
+function getCommentText(content: unknown[]): string {
+ if (!content || content.length === 0) return '';
+ // Comment content is Paragraph[] — each paragraph has runs with text
+ return content
+ .map((para) => {
+ const p = para as { content?: Array<{ content?: Array<{ text?: string }> }> };
+ if (!p?.content) return '';
+ return p.content.map((run) => run.content?.map((t) => t.text || '').join('') || '').join('');
+ })
+ .join('\n');
+}
+
+/**
+ * Get the DocumentBody from the editor ref, using the live PM state.
*/
+function getDocumentBody(
+ editorRef: EditorRefLike
+): import('@eigenpal/docx-core/headless').DocumentBody | null {
+ // Prefer the live PM-based document (reflects user edits)
+ const pagedRef = editorRef.getEditorRef();
+ if (pagedRef) {
+ const doc = pagedRef.getDocument() as import('@eigenpal/docx-core/headless').Document | null;
+ if (doc?.package?.document) return doc.package.document;
+ }
+ // Fallback to the initial document
+ const doc = editorRef.getDocument() as import('@eigenpal/docx-core/headless').Document | null;
+ return doc?.package?.document ?? null;
+}
+
+/**
+ * Create an EditorBridge from a DocxEditorRef.
+ *
+ * @param editorRef - A DocxEditorRef (or anything matching EditorRefLike)
+ * @param author - Default author name for comments and changes. (default: 'AI')
+ */
+export function createEditorBridge(editorRef: EditorRefLike, author = 'AI'): EditorBridge {
+ function resolveAuthor(a?: string): string {
+ return a ?? author;
+ }
+
+ return {
+ getContentAsText(options?: GetContentOptions): string {
+ const body = getDocumentBody(editorRef);
+ if (!body) return '';
+ return formatContentForLLM(getContent(body, options));
+ },
+
+ getContent(options?: GetContentOptions): ContentBlock[] {
+ const body = getDocumentBody(editorRef);
+ if (!body) return [];
+ return getContent(body, options);
+ },
+
+ getComments(filter?: CommentFilter): ReviewComment[] {
+ const body = getDocumentBody(editorRef);
+ if (!body) return [];
+
+ // Prefer doc-level comments (include anchor/paragraph info)
+ const docComments = getComments(body, filter);
+ if (docComments.length > 0) return docComments;
+
+ // Fallback: build from live editor state (for comments added via bridge)
+ const liveComments = editorRef.getComments();
+ if (liveComments.length === 0) return [];
+
+ // Pre-group replies by parentId (O(n) instead of O(n^2))
+ const repliesByParent = new Map();
+ const topLevel: typeof liveComments = [];
+ for (const c of liveComments) {
+ if (c.parentId) {
+ const arr = repliesByParent.get(c.parentId);
+ if (arr) arr.push(c);
+ else repliesByParent.set(c.parentId, [c]);
+ } else {
+ topLevel.push(c);
+ }
+ }
+
+ const result: ReviewComment[] = [];
+ for (const c of topLevel) {
+ if (filter?.author && c.author !== filter.author) continue;
+ if (filter?.done !== undefined && (c.done ?? false) !== filter.done) continue;
+ const replies = repliesByParent.get(c.id) ?? [];
+ result.push({
+ id: c.id,
+ author: c.author,
+ date: c.date ?? null,
+ text: getCommentText(c.content),
+ anchoredText: '',
+ paragraphIndex: -1,
+ replies: replies.map((r) => ({
+ id: r.id,
+ author: r.author,
+ date: r.date ?? null,
+ text: getCommentText(r.content),
+ })),
+ done: c.done ?? false,
+ });
+ }
+ return result;
+ },
+
+ getChanges(filter?: ChangeFilter): ReviewChange[] {
+ const body = getDocumentBody(editorRef);
+ if (!body) return [];
+ return getChanges(body, filter);
+ },
+
+ addComment(options: AddCommentOptions): number | null {
+ return editorRef.addComment({
+ paragraphIndex: options.paragraphIndex,
+ text: options.text,
+ author: resolveAuthor(options.author),
+ search: options.search,
+ });
+ },
+
+ replyTo(commentId: number, options: ReplyOptions): number | null {
+ return editorRef.replyToComment(commentId, options.text, resolveAuthor(options.author));
+ },
+
+ resolveComment(commentId: number): void {
+ editorRef.resolveComment(commentId);
+ },
+
+ replace(options: ProposeReplacementOptions): boolean {
+ return editorRef.proposeReplacement({
+ paragraphIndex: options.paragraphIndex,
+ search: options.search,
+ replaceWith: options.replaceWith,
+ author: resolveAuthor(options.author),
+ });
+ },
-export function createReviewBridge(_editorRef: unknown): Record {
- throw new Error('createReviewBridge is not yet implemented');
+ scrollTo(paragraphIndex: number): void {
+ editorRef.scrollToIndex(paragraphIndex);
+ },
+ };
}
diff --git a/packages/agent-use/src/index.ts b/packages/agent-use/src/index.ts
index c5d4b306..37396cf5 100644
--- a/packages/agent-use/src/index.ts
+++ b/packages/agent-use/src/index.ts
@@ -42,3 +42,7 @@ export type {
} from './types';
export { TextNotFoundError, ChangeNotFoundError, CommentNotFoundError } from './errors';
+
+// Tools — reusable tool definitions for AI agents (OpenAI function calling format)
+export { agentTools, executeToolCall, getToolSchemas } from './tools';
+export type { AgentToolDefinition, AgentToolResult } from './tools';
diff --git a/packages/agent-use/src/tools/index.ts b/packages/agent-use/src/tools/index.ts
new file mode 100644
index 00000000..28bf4853
--- /dev/null
+++ b/packages/agent-use/src/tools/index.ts
@@ -0,0 +1,204 @@
+/**
+ * Agent tool definitions and execution.
+ *
+ * Tools use OpenAI function calling format (the de facto standard).
+ * Works with OpenAI, Anthropic (via adapter), Vercel AI SDK, etc.
+ */
+
+export type { AgentToolDefinition, AgentToolResult } from './types';
+import type { AgentToolDefinition, AgentToolResult } from './types';
+import type { EditorBridge } from '../bridge';
+
+// ── Tool definitions ────────────────────────────────────────────────────────
+
+const readDocument: AgentToolDefinition<{ fromIndex?: number; toIndex?: number }> = {
+ name: 'read_document',
+ description:
+ 'Read the document content. Returns indexed lines like "[0] First paragraph". ' +
+ 'Use fromIndex/toIndex to read a specific range. Always read before commenting or suggesting changes.',
+ inputSchema: {
+ type: 'object',
+ properties: {
+ fromIndex: { type: 'number', description: 'Start paragraph index (inclusive)' },
+ toIndex: { type: 'number', description: 'End paragraph index (inclusive)' },
+ },
+ },
+ handler: (input, bridge) => {
+ const text = bridge.getContentAsText({ fromIndex: input.fromIndex, toIndex: input.toIndex });
+ return { success: true, data: text };
+ },
+};
+
+const readComments: AgentToolDefinition = {
+ name: 'read_comments',
+ description: 'List all comments currently in the document.',
+ inputSchema: { type: 'object', properties: {} },
+ handler: (_input, bridge) => {
+ const comments = bridge.getComments();
+ if (comments.length === 0) return { success: true, data: 'No comments in the document.' };
+ const text = comments
+ .map(
+ (c) =>
+ `[Comment #${c.id} on paragraph ${c.paragraphIndex}] ${c.author}: "${c.text}"` +
+ (c.anchoredText ? ` (anchored to: "${c.anchoredText}")` : '') +
+ (c.replies.length > 0
+ ? '\n' + c.replies.map((r) => ` Reply by ${r.author}: "${r.text}"`).join('\n')
+ : '')
+ )
+ .join('\n');
+ return { success: true, data: text };
+ },
+};
+
+const readChanges: AgentToolDefinition = {
+ name: 'read_changes',
+ description: 'List all tracked changes (insertions/deletions) currently in the document.',
+ inputSchema: { type: 'object', properties: {} },
+ handler: (_input, bridge) => {
+ const changes = bridge.getChanges();
+ if (changes.length === 0) return { success: true, data: 'No tracked changes in the document.' };
+ const text = changes
+ .map(
+ (c) =>
+ `[Change #${c.id} in paragraph ${c.paragraphIndex}] ${c.type} by ${c.author}: "${c.text}"`
+ )
+ .join('\n');
+ return { success: true, data: text };
+ },
+};
+
+const addComment: AgentToolDefinition<{
+ paragraphIndex: number;
+ text: string;
+ search?: string;
+}> = {
+ name: 'add_comment',
+ description:
+ 'Add a comment to a specific paragraph. Optionally anchor it to a specific phrase using "search".',
+ inputSchema: {
+ type: 'object',
+ properties: {
+ paragraphIndex: { type: 'number', description: 'Paragraph index to comment on' },
+ text: { type: 'string', description: 'Comment text' },
+ search: {
+ type: 'string',
+ description: 'Optional: anchor the comment to this specific phrase (3-8 words)',
+ },
+ },
+ required: ['paragraphIndex', 'text'],
+ },
+ handler: (input, bridge) => {
+ const id = bridge.addComment({
+ paragraphIndex: input.paragraphIndex,
+ text: input.text,
+ search: input.search,
+ });
+ if (id === null)
+ return {
+ success: false,
+ error: 'Failed to add comment — paragraph not found or search text not found.',
+ };
+ return {
+ success: true,
+ data: `Comment added (id: ${id}) on paragraph ${input.paragraphIndex}.`,
+ };
+ },
+};
+
+const suggestReplacement: AgentToolDefinition<{
+ paragraphIndex: number;
+ search: string;
+ replaceWith: string;
+}> = {
+ name: 'suggest_replacement',
+ description:
+ 'Suggest replacing text in a paragraph. Creates a tracked change the user can accept or reject.',
+ inputSchema: {
+ type: 'object',
+ properties: {
+ paragraphIndex: { type: 'number', description: 'Paragraph index' },
+ search: { type: 'string', description: 'Short phrase to find (3-8 words)' },
+ replaceWith: { type: 'string', description: 'Replacement text' },
+ },
+ required: ['paragraphIndex', 'search', 'replaceWith'],
+ },
+ handler: (input, bridge) => {
+ const ok = bridge.replace({
+ paragraphIndex: input.paragraphIndex,
+ search: input.search,
+ replaceWith: input.replaceWith,
+ });
+ if (!ok)
+ return {
+ success: false,
+ error: `Text "${input.search}" not found in paragraph ${input.paragraphIndex}.`,
+ };
+ return {
+ success: true,
+ data: `Tracked change created: "${input.search}" → "${input.replaceWith}"`,
+ };
+ },
+};
+
+const scrollTo: AgentToolDefinition<{ paragraphIndex: number }> = {
+ name: 'scroll_to',
+ description: 'Scroll the document to a specific paragraph.',
+ inputSchema: {
+ type: 'object',
+ properties: {
+ paragraphIndex: { type: 'number', description: 'Paragraph index to scroll to' },
+ },
+ required: ['paragraphIndex'],
+ },
+ handler: (input, bridge) => {
+ bridge.scrollTo(input.paragraphIndex);
+ return { success: true, data: `Scrolled to paragraph ${input.paragraphIndex}.` };
+ },
+};
+
+// ── Registry ────────────────────────────────────────────────────────────────
+
+/** All built-in agent tools. */
+// eslint-disable-next-line @typescript-eslint/no-explicit-any
+export const agentTools: AgentToolDefinition[] = [
+ readDocument,
+ readComments,
+ readChanges,
+ addComment,
+ suggestReplacement,
+ scrollTo,
+];
+
+/**
+ * Execute a tool call against an EditorBridge.
+ * Returns the result (never throws).
+ */
+export function executeToolCall(
+ toolName: string,
+ input: Record,
+ bridge: EditorBridge
+): AgentToolResult {
+ const tool = agentTools.find((t) => t.name === toolName);
+ if (!tool) return { success: false, error: `Unknown tool: ${toolName}` };
+ try {
+ return tool.handler(input, bridge);
+ } catch (e) {
+ return { success: false, error: e instanceof Error ? e.message : String(e) };
+ }
+}
+
+/**
+ * Get tool schemas in OpenAI function calling format.
+ * This is the de facto standard — works with OpenAI, Vercel AI SDK,
+ * and most providers (Anthropic adapters accept this format too).
+ */
+export function getToolSchemas() {
+ return agentTools.map((t) => ({
+ type: 'function' as const,
+ function: {
+ name: t.name,
+ description: t.description,
+ parameters: t.inputSchema,
+ },
+ }));
+}
diff --git a/packages/agent-use/src/tools/types.ts b/packages/agent-use/src/tools/types.ts
new file mode 100644
index 00000000..97d7b8a8
--- /dev/null
+++ b/packages/agent-use/src/tools/types.ts
@@ -0,0 +1,22 @@
+/**
+ * Agent tool type definitions.
+ */
+
+import type { EditorBridge } from '../bridge';
+
+export interface AgentToolDefinition> {
+ /** Tool name (used in tool_use blocks) */
+ name: string;
+ /** Human-readable description for the LLM */
+ description: string;
+ /** JSON Schema for the input parameters */
+ inputSchema: Record;
+ /** Handler — receives parsed input + bridge, returns result */
+ handler: (input: TInput, bridge: EditorBridge) => AgentToolResult;
+}
+
+export interface AgentToolResult {
+ success: boolean;
+ data?: unknown;
+ error?: string;
+}
diff --git a/packages/agent-use/src/useAgentChat.ts b/packages/agent-use/src/useAgentChat.ts
new file mode 100644
index 00000000..0730cad7
--- /dev/null
+++ b/packages/agent-use/src/useAgentChat.ts
@@ -0,0 +1,63 @@
+/**
+ * useAgentChat — React hook that wires agent tools to a live DocxEditor.
+ *
+ * @example
+ * ```tsx
+ * import { useAgentChat } from '@eigenpal/docx-editor-agents/bridge';
+ *
+ * const { executeToolCall, toolSchemas } = useAgentChat({ editorRef, author: 'Assistant' });
+ *
+ * // Pass toolSchemas to your AI provider, execute tool calls on the client
+ * const result = executeToolCall('add_comment', { paragraphIndex: 3, text: 'Fix this.' });
+ * ```
+ */
+
+import { useCallback, useMemo } from 'react';
+import { createEditorBridge, type EditorRefLike } from './bridge';
+import { executeToolCall as execTool, getToolSchemas } from './tools';
+import type { AgentToolResult } from './tools';
+
+/** Computed once — tool definitions are static. */
+const TOOL_SCHEMAS = getToolSchemas();
+
+export interface UseAgentChatOptions {
+ /** Reference to the DocxEditor (must match EditorRefLike interface). */
+ editorRef: React.RefObject;
+ /** Default author name for comments and changes. Default: 'AI' */
+ author?: string;
+}
+
+export interface UseAgentChatReturn {
+ /** Execute a tool call through the bridge. */
+ executeToolCall: (toolName: string, input: Record) => AgentToolResult;
+ /** Tool schemas in OpenAI function calling format. Pass to your AI provider. */
+ toolSchemas: ReturnType;
+}
+
+/**
+ * Hook that creates an EditorBridge and provides tool execution.
+ */
+export function useAgentChat(options: UseAgentChatOptions): UseAgentChatReturn {
+ const { editorRef, author = 'AI' } = options;
+
+ // Bridge is created once per author change and reused across tool calls
+ const bridgeRef = useMemo(() => {
+ return {
+ get: () => (editorRef.current ? createEditorBridge(editorRef.current, author) : null),
+ };
+ }, [editorRef, author]);
+
+ const executeToolCall = useCallback(
+ (toolName: string, input: Record): AgentToolResult => {
+ const bridge = bridgeRef.get();
+ if (!bridge) return { success: false, error: 'Editor not ready' };
+ return execTool(toolName, input, bridge);
+ },
+ [bridgeRef]
+ );
+
+ return {
+ executeToolCall,
+ toolSchemas: TOOL_SCHEMAS,
+ };
+}
diff --git a/packages/agent-use/tsup.config.ts b/packages/agent-use/tsup.config.ts
index 618b06ea..faa47e07 100644
--- a/packages/agent-use/tsup.config.ts
+++ b/packages/agent-use/tsup.config.ts
@@ -11,5 +11,5 @@ export default defineConfig({
sourcemap: true,
clean: true,
treeshake: true,
- external: ['prosemirror-model', 'prosemirror-state', 'prosemirror-view'],
+ external: ['prosemirror-model', 'prosemirror-state', 'prosemirror-view', 'react'],
});
diff --git a/packages/react/src/components/DocxEditor.tsx b/packages/react/src/components/DocxEditor.tsx
index 6c12cb0d..0d4b9975 100644
--- a/packages/react/src/components/DocxEditor.tsx
+++ b/packages/react/src/components/DocxEditor.tsx
@@ -337,6 +337,28 @@ export interface DocxEditorRef {
loadDocument: (doc: Document) => void;
/** Load a DOCX buffer programmatically (ArrayBuffer, Uint8Array, Blob, or File) */
loadDocumentBuffer: (buffer: DocxInput) => Promise;
+ /** Add a comment programmatically. Returns the comment ID. */
+ addComment: (options: {
+ paragraphIndex: number;
+ text: string;
+ author: string;
+ search?: string;
+ }) => number | null;
+ /** Reply to an existing comment. Returns the reply comment ID. */
+ replyToComment: (commentId: number, text: string, author: string) => number | null;
+ /** Resolve (mark as done) a comment. */
+ resolveComment: (commentId: number) => void;
+ /** Create a tracked change (replacement). */
+ proposeReplacement: (options: {
+ paragraphIndex: number;
+ search: string;
+ replaceWith: string;
+ author: string;
+ }) => boolean;
+ /** Scroll to a paragraph by its document-wide index. */
+ scrollToIndex: (paragraphIndex: number) => void;
+ /** Get all comments. */
+ getComments: () => Comment[];
}
/**
@@ -597,6 +619,89 @@ function createComment(text: string, authorName: string, parentId?: number): Com
};
}
+/**
+ * Find the ProseMirror document position range for a paragraph at the given index.
+ * Counting matches forEachParagraph: paragraphs (including inside tables) and
+ * non-paragraph top-level blocks (sections) each increment the counter.
+ */
+function findParagraphPmRange(
+ doc: import('prosemirror-model').Node,
+ paragraphIndex: number
+): { from: number; to: number } | null {
+ let index = 0;
+ let result: { from: number; to: number } | null = null;
+
+ doc.descendants((node, pos) => {
+ if (result) return false;
+ if (node.type.name === 'paragraph') {
+ if (index === paragraphIndex) {
+ result = { from: pos, to: pos + node.nodeSize };
+ return false;
+ }
+ index++;
+ return false; // don't descend into paragraph children
+ }
+ // Descend into container nodes to find nested paragraphs
+ if (
+ node.type.name === 'table' ||
+ node.type.name === 'table_row' ||
+ node.type.name === 'table_cell' ||
+ node.type.name === 'doc'
+ ) {
+ return true;
+ }
+ // Other block nodes (sections, etc.) count as a paragraph index
+ if (node.isBlock && node.type.name !== 'doc') {
+ index++;
+ }
+ return false;
+ });
+
+ return result;
+}
+
+/**
+ * Find a text string within a ProseMirror paragraph node range and return its positions.
+ */
+function findTextInPmParagraph(
+ doc: import('prosemirror-model').Node,
+ paragraphFrom: number,
+ paragraphTo: number,
+ searchText: string
+): { from: number; to: number } | null {
+ let fullText = '';
+ const textPositions: { pos: number; len: number }[] = [];
+
+ doc.nodesBetween(paragraphFrom, paragraphTo, (node, pos) => {
+ if (node.isText && node.text) {
+ textPositions.push({ pos, len: node.text.length });
+ fullText += node.text;
+ }
+ });
+
+ const matchIndex = fullText.indexOf(searchText);
+ if (matchIndex === -1) return null;
+
+ // Map string offset to PM position
+ let charOffset = 0;
+ let fromPos = paragraphFrom;
+ let toPos = paragraphFrom;
+
+ for (const tp of textPositions) {
+ const segEnd = charOffset + tp.len;
+ if (charOffset <= matchIndex && matchIndex < segEnd) {
+ fromPos = tp.pos + (matchIndex - charOffset);
+ }
+ if (charOffset <= matchIndex + searchText.length && matchIndex + searchText.length <= segEnd) {
+ toPos = tp.pos + (matchIndex + searchText.length - charOffset);
+ break;
+ }
+ charOffset = segEnd;
+ }
+
+ return { from: fromPos, to: toPos };
+}
+
/**
* DocxEditor - Complete DOCX editor component
*/
@@ -2875,6 +2980,98 @@ body { background: white; }
print: handleDirectPrint,
loadDocument: loadParsedDocument,
loadDocumentBuffer: loadBuffer,
+
+ addComment: (options) => {
+ const view = pagedEditorRef.current?.getView();
+ if (!view) return null;
+ const range = findParagraphPmRange(view.state.doc, options.paragraphIndex);
+ if (!range) return null;
+
+ let from = range.from;
+ let to = range.to;
+
+ if (options.search) {
+ const textRange = findTextInPmParagraph(
+ view.state.doc,
+ range.from,
+ range.to,
+ options.search
+ );
+ if (!textRange) return null;
+ from = textRange.from;
+ to = textRange.to;
+ }
+
+ const comment = createComment(options.text, options.author);
+ const commentMark = view.state.schema.marks.comment.create({ commentId: comment.id });
+ view.dispatch(view.state.tr.addMark(from, to, commentMark));
+ setComments((prev) => [...prev, comment]);
+ setShowCommentsSidebar(true);
+ return comment.id;
+ },
+
+ replyToComment: (commentId, text, authorName) => {
+ if (!comments.some((c) => c.id === commentId)) return null;
+ const reply = createComment(text, authorName, commentId);
+ setComments((prev) => [...prev, reply]);
+ return reply.id;
+ },
+
+ resolveComment: (commentId) => {
+ setComments((prev) => prev.map((c) => (c.id === commentId ? { ...c, done: true } : c)));
+ },
+
+ proposeReplacement: (options) => {
+ const view = pagedEditorRef.current?.getView();
+ if (!view) return false;
+ const range = findParagraphPmRange(view.state.doc, options.paragraphIndex);
+ if (!range) return false;
+
+ const textRange = findTextInPmParagraph(
+ view.state.doc,
+ range.from,
+ range.to,
+ options.search
+ );
+ if (!textRange) return false;
+
+ const revisionId = Date.now();
+ const date = new Date().toISOString();
+ const { schema } = view.state;
+
+ const deletionMark = schema.marks.deletion.create({
+ revisionId,
+ author: options.author,
+ date,
+ });
+ const insertionMark = schema.marks.insertion.create({
+ revisionId,
+ author: options.author,
+ date,
+ });
+
+ const insertedNode = schema.text(options.replaceWith, [insertionMark]);
+ const tr = view.state.tr
+ .addMark(textRange.from, textRange.to, deletionMark)
+ .insert(textRange.to, insertedNode);
+ view.dispatch(tr);
+
+ extractTrackedChanges();
+ setShowCommentsSidebar(true);
+ return true;
+ },
+
+ scrollToIndex: (paragraphIndex) => {
+ const view = pagedEditorRef.current?.getView();
+ if (!view) return;
+ const range = findParagraphPmRange(view.state.doc, paragraphIndex);
+ if (!range) return;
+
+ const tr = view.state.tr.setSelection(TextSelection.create(view.state.doc, range.from));
+ view.dispatch(tr.scrollIntoView());
+ },
+
+ getComments: () => comments,
}),
[
history.state,
@@ -2884,6 +3081,8 @@ body { background: white; }
handleDirectPrint,
loadParsedDocument,
loadBuffer,
+ comments,
+ extractTrackedChanges,
]
);
diff --git a/specs/live-agent-chat.md b/specs/live-agent-chat.md
new file mode 100644
index 00000000..13f95987
--- /dev/null
+++ b/specs/live-agent-chat.md
@@ -0,0 +1,619 @@
+# Spec: Live Agent Chat with Document Tools
+
+## Problem
+
+Today `DocxReviewer` operates on a **static `Document` model** — you parse a DOCX, the agent reads/comments/proposes, you serialize back to DOCX. There's no connection to the live editor. The `bridge.ts` placeholder exists but is unimplemented.
+
+The goal: a **chat panel next to the document** where an AI agent can read the document content, add comments, suggest changes, and highlight text — all happening live in the editor UI, not just in a serialized file.
+
+## User Experience
+
+```
+┌────────────────────────────────────────┬─────────────────────────┐
+│ │ │
+│ DOCX Editor │ Agent Chat │
+│ │ │
+│ ┌──────────────────────────────┐ │ User: Review section 3 │
+│ │ Section 3: Payment Terms │ │ for legal issues │
+│ │ │ │ │
+│ │ The buyer shall pay $50k ←──────────── Agent: I found 2 │
+│ │ [💬 Agent: Liability cap...] │ │ issues in section 3: │
+│ │ │ │ │
+│ │ within 30 days of ←─────────────────── 1. Liability cap at │
+│ │ [💬 Agent: No late fee...] │ │ $50k seems low for │
+│ │ │ │ this deal size │
+│ └──────────────────────────────┘ │ │
+│ │ 2. No late payment │
+│ ┌─ Comments Sidebar ──────────┐ │ clause specified │
+│ │ 💬 Agent: Liability cap │ │ │
+│ │ at $50k is low... │ │ I've added comments │
+│ │ │ │ to both paragraphs. │
+│ │ 💬 Agent: No late fee │ │ │
+│ │ clause specified... │ │ [Apply suggested fix] │
+│ └──────────────────────────────┘ │ │
+│ │ User: Fix the first │
+│ │ one, change to $500k │
+│ │ │
+│ │ Agent: Done. Created │
+│ │ a tracked change: │
+│ │ $50k → $500k │
+└────────────────────────────────────────┴─────────────────────────┘
+```
+
+The agent's comments and tracked changes appear **instantly** in the editor — same as if a human collaborator added them. The existing `CommentsSidebar` renders them. The user can accept/reject tracked changes through the normal UI.
+
+## Architecture
+
+### Three layers
+
+```
+┌──────────────────────────────────────────────────────────────┐
+│ 1. CHAT UI (React component) │
+│ - Message list, input box, tool call display │
+│ - Lives in packages/react │
+│ - Pure presentation — no AI logic │
+└────────────┬─────────────────────────────────────────────────┘
+ │ calls
+┌────────────▼─────────────────────────────────────────────────┐
+│ 2. AGENT TOOLS (tool definitions + handlers) │
+│ - Tool schemas the AI can call │
+│ - Handlers that call into EditorBridge │
+│ - Lives in packages/agent-use │
+└────────────┬─────────────────────────────────────────────────┘
+ │ calls
+┌────────────▼─────────────────────────────────────────────────┐
+│ 3. EDITOR BRIDGE (client-side adapter) │
+│ - Connects agent tools → live editor state │
+│ - Reads from ProseMirror doc + Document model │
+│ - Writes comments/changes into editor state │
+│ - Lives in packages/agent-use/bridge + packages/react │
+└──────────────────────────────────────────────────────────────┘
+```
+
+### Key constraint: AI-provider agnostic
+
+The spec defines **tool schemas and a bridge API**. It does NOT include any AI SDK, API calls, or model-specific logic. The consumer (app developer) brings their own AI provider and wires tool calls through the bridge.
+
+This means the chat component receives messages and tool results as props — it doesn't make API calls itself.
+
+---
+
+## Layer 3: Editor Bridge (`packages/agent-use/src/bridge.ts`)
+
+The bridge connects agent tool handlers to the live editor. It wraps a `DocxEditorRef` and exposes the same operations as `DocxReviewer`, but operating on the **live editor state** instead of a static Document.
+
+### Interface
+
+```ts
+// packages/agent-use/src/bridge.ts
+
+import type { DocxEditorRef } from '@eigenpal/docx-js-editor';
+
+export interface EditorBridge {
+ // ── READ ──────────────────────────────────────────────────
+ /** Get document content as indexed text lines (same format as DocxReviewer.getContentAsText) */
+ getContentAsText(options?: GetContentOptions): string;
+
+ /** Get structured content blocks */
+ getContent(options?: GetContentOptions): ContentBlock[];
+
+ /** Get existing comments */
+ getComments(): ReviewComment[];
+
+ /** Get existing tracked changes */
+ getChanges(): ReviewChange[];
+
+ /** Get text around the user's current cursor/selection */
+ getSelectionContext(): SelectionContext | null;
+
+ // ── COMMENT ───────────────────────────────────────────────
+ /** Add a comment anchored to a paragraph (optionally to specific text within it) */
+ addComment(options: AddCommentOptions): number;
+
+ /** Reply to an existing comment */
+ replyTo(commentId: number, options: ReplyOptions): number;
+
+ /** Resolve a comment */
+ resolveComment(commentId: number): void;
+
+ // ── SUGGEST CHANGES ───────────────────────────────────────
+ /** Replace text, creating a tracked change visible in the editor */
+ replace(options: ProposeReplacementOptions): void;
+
+ /** Insert text as a tracked change */
+ proposeInsertion(options: ProposeInsertionOptions): void;
+
+ /** Delete text as a tracked change */
+ proposeDeletion(options: ProposeDeletionOptions): void;
+
+ // ── HIGHLIGHT ─────────────────────────────────────────────
+ /** Temporarily highlight a paragraph or text range (visual only, not persisted) */
+ highlight(paragraphIndex: number, options?: HighlightOptions): HighlightHandle;
+
+ // ── NAVIGATE ──────────────────────────────────────────────
+ /** Scroll to and optionally select a paragraph */
+ scrollTo(paragraphIndex: number): void;
+}
+
+export interface SelectionContext {
+ /** Currently selected text (empty string if cursor only) */
+ selectedText: string;
+ /** Paragraph index of the selection start */
+ paragraphIndex: number;
+ /** Full text of the paragraph containing the selection */
+ paragraphText: string;
+ /** Formatting at the selection */
+ formatting: TextFormatting;
+}
+
+export interface HighlightOptions {
+ /** Color of the highlight. Default: 'yellow' */
+ color?: string;
+ /** Optional: highlight only this text within the paragraph */
+ search?: string;
+ /** Auto-remove after N milliseconds. Default: no auto-remove */
+ duration?: number;
+}
+
+export interface HighlightHandle {
+ /** Remove the highlight */
+ remove(): void;
+}
+
+/** Create a bridge from a DocxEditor ref */
+export function createEditorBridge(editorRef: DocxEditorRef, author?: string): EditorBridge;
+```
+
+### Implementation strategy
+
+The bridge reads from the editor's internal state:
+
+- **Read operations**: Extract content from the ProseMirror document (same logic as `DocxReviewer` but reading from `editorRef.getDocument()` or the live PM state)
+- **Comment operations**: Call `editorRef`'s existing comment APIs (already wired in `DocxEditor.tsx` — `setComments`, `addComment` handlers exist)
+- **Change operations**: Dispatch ProseMirror transactions that create tracked changes (insertion/deletion marks with author metadata)
+- **Highlight**: Add a temporary decoration to the ProseMirror view (a `Decoration.inline` or `Decoration.node` — removed when the handle's `remove()` is called)
+- **Navigate**: Use `editorRef.scrollToIndex(paragraphIndex)` or dispatch a selection + scrollIntoView
+
+### What needs to be added to DocxEditorRef
+
+The existing `DocxEditorRef` needs a few new methods:
+
+```ts
+interface DocxEditorRef {
+ // ... existing methods ...
+
+ /** Get the current Document model (already exists as getDocument()) */
+ getDocument(): Document;
+
+ /** Add a comment programmatically (needs to be exposed) */
+ addComment(options: {
+ paragraphIndex: number;
+ text: string;
+ author: string;
+ search?: string;
+ }): number;
+
+ /** Reply to a comment */
+ replyToComment(commentId: number, text: string, author: string): number;
+
+ /** Resolve a comment */
+ resolveComment(commentId: number): void;
+
+ /** Create a tracked change (replacement) */
+ proposeReplacement(options: {
+ paragraphIndex: number;
+ search: string;
+ replaceWith: string;
+ author: string;
+ }): void;
+
+ /** Add a temporary highlight decoration */
+ addHighlight(
+ paragraphIndex: number,
+ options?: { search?: string; color?: string }
+ ): { remove(): void };
+
+ /** Scroll to a paragraph index */
+ scrollToIndex(paragraphIndex: number): void;
+}
+```
+
+---
+
+## Layer 2: Agent Tool Definitions (`packages/agent-use/src/tools/`)
+
+Tools are defined as **JSON schemas** (compatible with Anthropic, OpenAI, and Vercel AI SDK tool formats). Each tool has a schema + a handler function that calls into the `EditorBridge`.
+
+### Tool catalog
+
+| Tool Name | Description | Parameters |
+| --------------------- | ------------------------------------------------------ | ------------------------------------------------ |
+| `read_document` | Read document content as indexed text | `{ fromIndex?, toIndex? }` |
+| `read_selection` | Get text/context at the user's current cursor position | `{}` |
+| `read_comments` | List all comments in the document | `{ author? }` |
+| `read_changes` | List all tracked changes | `{ author?, type? }` |
+| `add_comment` | Add a comment on a paragraph | `{ paragraphIndex, text, search? }` |
+| `reply_to_comment` | Reply to an existing comment | `{ commentId, text }` |
+| `resolve_comment` | Mark a comment as resolved | `{ commentId }` |
+| `suggest_replacement` | Replace text (creates tracked change) | `{ paragraphIndex, search, replaceWith }` |
+| `suggest_insertion` | Insert text (creates tracked change) | `{ paragraphIndex, text, position?, search? }` |
+| `suggest_deletion` | Delete text (creates tracked change) | `{ paragraphIndex, search }` |
+| `highlight_text` | Temporarily highlight text to draw user attention | `{ paragraphIndex, search?, color?, duration? }` |
+| `scroll_to` | Scroll document to a paragraph | `{ paragraphIndex }` |
+
+### Tool definition format
+
+```ts
+// packages/agent-use/src/tools/types.ts
+
+export interface AgentToolDefinition {
+ /** Tool name (used in tool_use blocks) */
+ name: string;
+ /** Human-readable description for the LLM */
+ description: string;
+ /** JSON Schema for the input parameters */
+ inputSchema: Record;
+ /** Handler — receives parsed input + bridge, returns result for the LLM */
+ handler: (input: TInput, bridge: EditorBridge) => AgentToolResult;
+}
+
+export interface AgentToolResult {
+ /** Whether the operation succeeded */
+ success: boolean;
+ /** Data to return to the LLM (will be JSON.stringified) */
+ data?: unknown;
+ /** Error message if failed */
+ error?: string;
+}
+```
+
+### Example tool definition
+
+```ts
+// packages/agent-use/src/tools/readDocument.ts
+
+export const readDocumentTool: AgentToolDefinition<{ fromIndex?: number; toIndex?: number }> = {
+ name: 'read_document',
+ description:
+ 'Read the document content. Returns indexed text lines like "[0] First paragraph", ' +
+ '"[1] Second paragraph". Use fromIndex/toIndex to read a specific range. ' +
+ 'Always read the document before commenting or suggesting changes.',
+ inputSchema: {
+ type: 'object',
+ properties: {
+ fromIndex: {
+ type: 'number',
+ description: 'Start reading from this paragraph index (inclusive). Default: 0',
+ },
+ toIndex: {
+ type: 'number',
+ description: 'Stop reading at this paragraph index (inclusive). Default: end of document',
+ },
+ },
+ },
+ handler: (input, bridge) => {
+ const text = bridge.getContentAsText({
+ fromIndex: input.fromIndex,
+ toIndex: input.toIndex,
+ });
+ return { success: true, data: text };
+ },
+};
+```
+
+### Registry + helpers
+
+```ts
+// packages/agent-use/src/tools/index.ts
+
+/** All built-in tools */
+export const agentTools: AgentToolDefinition[];
+
+/** Get tool schemas in Anthropic format */
+export function getAnthropicTools(): AnthropicToolSchema[];
+
+/** Get tool schemas in OpenAI format */
+export function getOpenAITools(): OpenAIToolSchema[];
+
+/** Execute a tool call against an EditorBridge */
+export function executeToolCall(
+ toolName: string,
+ input: unknown,
+ bridge: EditorBridge
+): AgentToolResult;
+```
+
+---
+
+## Layer 1: Chat UI (`packages/react/src/components/AgentChat/`)
+
+### Components
+
+```
+AgentChat/
+├── AgentChatPanel.tsx — Main panel (message list + input)
+├── ChatMessage.tsx — Single message bubble
+├── ChatToolCall.tsx — Inline tool call display (collapsible)
+├── ChatInput.tsx — Text input + send button
+├── types.ts — Chat message types
+└── useAgentChat.ts — Hook that wires tools to the bridge
+```
+
+### Props — Provider-agnostic
+
+```ts
+// AgentChatPanel.tsx
+
+export interface AgentChatPanelProps {
+ /** Messages to display */
+ messages: ChatMessage[];
+
+ /** Whether the agent is currently generating */
+ isLoading?: boolean;
+
+ /** Called when the user sends a message. The consumer handles AI calls. */
+ onSendMessage: (text: string) => void;
+
+ /** Called when a tool call needs execution. Returns the result. */
+ onToolCall?: (toolName: string, input: unknown) => Promise;
+
+ /** Optional: pre-built bridge for automatic tool execution */
+ bridge?: EditorBridge;
+
+ /** Agent display name. Default: 'Agent' */
+ agentName?: string;
+
+ /** Width of the panel. Default: 360px */
+ width?: number;
+
+ /** Whether the panel is open */
+ isOpen: boolean;
+
+ /** Called when the user closes the panel */
+ onClose: () => void;
+}
+```
+
+### Message types
+
+```ts
+// types.ts
+
+export type ChatMessage = UserMessage | AgentMessage | ToolCallMessage | ToolResultMessage;
+
+export interface UserMessage {
+ role: 'user';
+ id: string;
+ content: string;
+ timestamp: number;
+}
+
+export interface AgentMessage {
+ role: 'agent';
+ id: string;
+ content: string;
+ timestamp: number;
+}
+
+export interface ToolCallMessage {
+ role: 'tool_call';
+ id: string;
+ toolName: string;
+ input: unknown;
+ timestamp: number;
+}
+
+export interface ToolResultMessage {
+ role: 'tool_result';
+ id: string;
+ toolCallId: string;
+ result: AgentToolResult;
+ timestamp: number;
+}
+```
+
+### `useAgentChat` hook
+
+Convenience hook that wires everything together:
+
+```ts
+export function useAgentChat(options: {
+ editorRef: React.RefObject;
+ author?: string;
+}): {
+ /** The bridge instance (stable ref) */
+ bridge: EditorBridge;
+
+ /** Execute a tool call through the bridge */
+ executeToolCall: (toolName: string, input: unknown) => AgentToolResult;
+
+ /** Get tool schemas for your AI provider */
+ getToolSchemas: () => AgentToolDefinition[];
+
+ /** System prompt snippet describing the document context */
+ getSystemContext: () => string;
+};
+```
+
+### Chat UI behavior
+
+- **Tool calls**: When the agent response includes tool calls, they appear as collapsible cards in the chat. The card shows the tool name, a human-readable summary of what it did, and the result (collapsed by default).
+- **Comments**: When `add_comment` is called, a comment appears instantly in the `CommentsSidebar`. The chat shows "Added comment on paragraph 5" with a clickable link that scrolls to the paragraph.
+- **Changes**: When `suggest_replacement` is called, a tracked change appears in the editor. The chat shows a mini-diff ("$50k → $500k").
+- **Highlights**: When `highlight_text` is called, the paragraph briefly glows in the editor to draw attention.
+
+---
+
+## Integration Example (Consumer Code)
+
+```tsx
+// Example: App using the editor + chat with Anthropic SDK
+
+import { DocxEditor, type DocxEditorRef } from '@eigenpal/docx-js-editor';
+import { AgentChatPanel, useAgentChat } from '@eigenpal/docx-js-editor/ui';
+import Anthropic from '@anthropic-ai/sdk';
+
+function App() {
+ const editorRef = useRef(null);
+ const [messages, setMessages] = useState([]);
+ const [isLoading, setIsLoading] = useState(false);
+ const [chatOpen, setChatOpen] = useState(true);
+
+ const { bridge, executeToolCall, getToolSchemas, getSystemContext } = useAgentChat({
+ editorRef,
+ author: 'Claude',
+ });
+
+ const handleSendMessage = async (text: string) => {
+ // Add user message
+ setMessages((prev) => [
+ ...prev,
+ { role: 'user', id: nanoid(), content: text, timestamp: Date.now() },
+ ]);
+ setIsLoading(true);
+
+ // Call your AI provider
+ const client = new Anthropic();
+ let response = await client.messages.create({
+ model: 'claude-sonnet-4-20250514',
+ system: `You are a document review assistant. ${getSystemContext()}`,
+ messages: messages.map((m) => ({
+ role: m.role === 'user' ? 'user' : 'assistant',
+ content: m.content,
+ })),
+ tools: getToolSchemas(), // ← tools from the bridge
+ });
+
+ // Handle tool calls in a loop
+ while (response.stop_reason === 'tool_use') {
+ for (const block of response.content) {
+ if (block.type === 'tool_use') {
+ const result = executeToolCall(block.name, block.input);
+ setMessages((prev) => [
+ ...prev,
+ {
+ role: 'tool_call',
+ id: block.id,
+ toolName: block.name,
+ input: block.input,
+ timestamp: Date.now(),
+ },
+ {
+ role: 'tool_result',
+ id: nanoid(),
+ toolCallId: block.id,
+ result,
+ timestamp: Date.now(),
+ },
+ ]);
+ }
+ }
+ // Continue the conversation with tool results
+ response = await client.messages.create({
+ /* ... */
+ });
+ }
+
+ // Add final agent message
+ const textBlock = response.content.find((b) => b.type === 'text');
+ if (textBlock) {
+ setMessages((prev) => [
+ ...prev,
+ { role: 'agent', id: nanoid(), content: textBlock.text, timestamp: Date.now() },
+ ]);
+ }
+ setIsLoading(false);
+ };
+
+ return (
+
+
+ setChatOpen(false)}
+ />
+
+ );
+}
+```
+
+---
+
+## Implementation Plan
+
+### Phase 1: Editor Bridge (packages/agent-use + packages/react)
+
+**Goal**: Make `createEditorBridge()` work against a live `DocxEditorRef`.
+
+1. **Expose missing methods on `DocxEditorRef`** (packages/react)
+ - `addComment()`, `replyToComment()`, `resolveComment()` — wire existing comment state handlers to the ref
+ - `proposeReplacement()` — dispatch PM transaction with tracked change marks
+ - `addHighlight()` — add/remove ProseMirror `Decoration`
+ - `scrollToIndex()` — scroll to paragraph by index
+
+2. **Implement `createEditorBridge()`** (packages/agent-use/bridge.ts)
+ - Read ops: call `editorRef.getDocument()` → pass body to existing `DocxReviewer` content/discovery functions
+ - Write ops: call the new `DocxEditorRef` methods above
+ - Selection: read from ProseMirror selection state
+
+### Phase 2: Tool Definitions (packages/agent-use)
+
+**Goal**: Define all 12 tools with schemas and handlers.
+
+3. **Create `src/tools/` directory** with one file per tool + index
+4. **Add format helpers** — `getAnthropicTools()`, `getOpenAITools()`
+5. **Add `executeToolCall()` dispatcher**
+6. **Tests** — unit test each tool handler against a mock bridge
+
+### Phase 3: Chat UI (packages/react)
+
+**Goal**: Ship the `AgentChatPanel` component and `useAgentChat` hook.
+
+7. **`useAgentChat` hook** — creates bridge from ref, exposes tool execution
+8. **`AgentChatPanel`** — message list, input, tool call cards
+9. **`ChatMessage` / `ChatToolCall`** — rendering components
+10. **Styling** — scoped within `.ep-root`, consistent with editor design
+
+### Phase 4: Polish
+
+11. **System prompt builder** — `getSystemContext()` generates a prompt snippet with document summary, available tools, and instructions
+12. **Streaming support** — `AgentChatPanel` accepts streaming text via a `streamingContent` prop
+13. **Documentation** — README with integration examples for Anthropic, OpenAI, Vercel AI SDK
+
+---
+
+## Scope Boundaries
+
+### In scope
+
+- EditorBridge API connecting agent tools to live editor
+- Tool definitions (schemas + handlers) — 12 tools
+- Chat UI components (presentation only)
+- `useAgentChat` hook
+- Format helpers for Anthropic/OpenAI tool schemas
+
+### Out of scope
+
+- AI provider integration (consumer brings their own)
+- Authentication / API key management
+- Chat message persistence
+- Multi-user / real-time collaboration
+- Custom tool registration (v2)
+- Voice input
+- File attachment in chat
+
+---
+
+## Open Questions
+
+1. **Should the bridge also support headless mode?** Today `DocxReviewer` is headless-only. The bridge is editor-only. Should there be a unified interface that works in both modes? (Probably yes — `DocxReviewer` could implement `EditorBridge` for headless use, making tools portable.)
+
+2. **Tool granularity**: Is `read_document` sufficient or do we need `read_paragraph(index)` for large documents? (Probably add `fromIndex`/`toIndex` params, which we already have.)
+
+3. **Streaming tool calls**: Some AI providers stream tool calls incrementally. Should the chat UI render tool calls as they stream in, or wait for completion? (Start with wait-for-completion, add streaming later.)
+
+4. **Highlight persistence**: Should highlights survive document edits or be purely ephemeral? (Ephemeral — they're for drawing attention, not annotation.)