From 45d02ae13accb02598c8546a2b3795c0c77e2d7c Mon Sep 17 00:00:00 2001 From: Ashok161 Date: Sun, 22 Mar 2026 14:31:43 +0530 Subject: [PATCH 1/2] feat: wire up tool calling execution in streaming responses --- .changeset/streaming-tool-calls.md | 7 + packages/core/src/index.ts | 2 + packages/core/src/providers/claude.ts | 174 ++++++++-- packages/core/src/providers/gemini.ts | 126 ++++++-- packages/core/src/providers/openai.ts | 184 +++++++++-- packages/core/src/providers/tool-execution.ts | 53 +++ packages/core/src/types.ts | 9 + .../core/tests/providers/tool-calling.test.ts | 301 ++++++++++++++++++ packages/server/src/handler.ts | 36 +++ packages/server/tests/handler.test.ts | 199 +++++++++++- packages/widget/src/api/types.ts | 1 + packages/widget/src/index.ts | 4 +- packages/widget/src/widget.ts | 8 +- packages/widget/tests/widget.test.ts | 33 ++ 14 files changed, 1047 insertions(+), 90 deletions(-) create mode 100644 .changeset/streaming-tool-calls.md create mode 100644 packages/core/src/providers/tool-execution.ts create mode 100644 packages/core/tests/providers/tool-calling.test.ts create mode 100644 packages/widget/tests/widget.test.ts diff --git a/.changeset/streaming-tool-calls.md b/.changeset/streaming-tool-calls.md new file mode 100644 index 0000000..a44b4d3 --- /dev/null +++ b/.changeset/streaming-tool-calls.md @@ -0,0 +1,7 @@ +--- +"@chatcops/core": patch +"@chatcops/server": patch +"@chatcops/widget": patch +--- + +Execute provider tool calls during chat loops so streaming and sync responses can continue after tool use. This also wires successful lead-capture tool executions into the server analytics, webhook flow, and widget lead-captured callbacks/events. diff --git a/packages/core/src/index.ts b/packages/core/src/index.ts index ce47d91..3ea3748 100644 --- a/packages/core/src/index.ts +++ b/packages/core/src/index.ts @@ -9,6 +9,8 @@ export type { ToolParameter, ToolDefinition, ToolResult, + ProviderToolCall, + ProviderToolExecutor, WebhookConfig, ProviderConfig, ProviderChatParams, diff --git a/packages/core/src/providers/claude.ts b/packages/core/src/providers/claude.ts index 499b270..e14f6e7 100644 --- a/packages/core/src/providers/claude.ts +++ b/packages/core/src/providers/claude.ts @@ -2,9 +2,45 @@ import Anthropic from '@anthropic-ai/sdk'; import type { ProviderConfig, ProviderChatParams } from '../types.js'; import type { AIProvider } from './base.js'; import { toProviderMessages, toClaudeTools } from './base.js'; +import { + MAX_TOOL_ROUNDS, + executeToolCall, + serializeToolResult, +} from './tool-execution.js'; const DEFAULT_MODEL = 'claude-haiku-4-5-20251001'; +function toClaudeMessages(params: ProviderChatParams): Anthropic.Messages.MessageParam[] { + return toProviderMessages(params.messages).map((message) => ({ + role: message.role as 'user' | 'assistant', + content: message.content, + })); +} + +function toAssistantContentBlocks( + content: Anthropic.Messages.Message['content'] +): Anthropic.Messages.ContentBlockParam[] { + const blocks: Anthropic.Messages.ContentBlockParam[] = []; + + for (const block of content) { + if (block.type === 'text') { + blocks.push({ type: 'text', text: block.text }); + continue; + } + + if (block.type === 'tool_use') { + blocks.push({ + type: 'tool_use', + id: block.id, + name: block.name, + input: block.input, + }); + } + } + + return blocks; +} + export class ClaudeProvider implements AIProvider { name = 'claude'; private client: Anthropic; @@ -16,43 +52,115 @@ export class ClaudeProvider implements AIProvider { } async *chat(params: ProviderChatParams): AsyncGenerator { - const messages = toProviderMessages(params.messages).map((m) => ({ - role: m.role as 'user' | 'assistant', - content: m.content, - })); - - const stream = this.client.messages.stream({ - model: this.model, - max_tokens: params.maxTokens ?? 1024, - system: params.systemPrompt, - messages, - ...(params.tools?.length ? { tools: toClaudeTools(params.tools) } : {}), - ...(params.temperature != null ? { temperature: params.temperature } : {}), - }); - - for await (const event of stream) { - if (event.type === 'content_block_delta' && event.delta.type === 'text_delta') { - yield event.delta.text; + const messages = toClaudeMessages(params); + let toolRounds = 0; + + while (true) { + const stream = this.client.messages.stream({ + model: this.model, + max_tokens: params.maxTokens ?? 1024, + system: params.systemPrompt, + messages, + ...(params.tools?.length ? { tools: toClaudeTools(params.tools) } : {}), + ...(params.temperature != null ? { temperature: params.temperature } : {}), + }); + + for await (const event of stream) { + if (event.type === 'content_block_delta' && event.delta.type === 'text_delta') { + yield event.delta.text; + } } + + const message = await stream.finalMessage(); + const toolUses = message.content.filter((block) => block.type === 'tool_use'); + + if (message.stop_reason !== 'tool_use' || toolUses.length === 0) { + return; + } + + if (toolRounds >= MAX_TOOL_ROUNDS) { + throw new Error(`Exceeded maximum tool rounds (${MAX_TOOL_ROUNDS}).`); + } + toolRounds += 1; + + messages.push({ + role: 'assistant', + content: toAssistantContentBlocks(message.content), + }); + + const toolResults = await Promise.all(toolUses.map(async (toolUse) => { + const result = await executeToolCall(params, { + id: toolUse.id, + name: toolUse.name, + input: toolUse.input as Record, + }); + + return { + type: 'tool_result' as const, + tool_use_id: toolUse.id, + content: JSON.stringify(serializeToolResult(result)), + is_error: !result.success, + }; + })); + + messages.push({ + role: 'user', + content: toolResults, + }); } } async chatSync(params: ProviderChatParams): Promise { - const messages = toProviderMessages(params.messages).map((m) => ({ - role: m.role as 'user' | 'assistant', - content: m.content, - })); - - const response = await this.client.messages.create({ - model: this.model, - max_tokens: params.maxTokens ?? 1024, - system: params.systemPrompt, - messages, - ...(params.tools?.length ? { tools: toClaudeTools(params.tools) } : {}), - ...(params.temperature != null ? { temperature: params.temperature } : {}), - }); - - const textBlock = response.content.find((b) => b.type === 'text'); - return textBlock?.type === 'text' ? textBlock.text : ''; + const messages = toClaudeMessages(params); + let toolRounds = 0; + + while (true) { + const response = await this.client.messages.create({ + model: this.model, + max_tokens: params.maxTokens ?? 1024, + system: params.systemPrompt, + messages, + ...(params.tools?.length ? { tools: toClaudeTools(params.tools) } : {}), + ...(params.temperature != null ? { temperature: params.temperature } : {}), + }); + + const toolUses = response.content.filter((block) => block.type === 'tool_use'); + if (response.stop_reason !== 'tool_use' || toolUses.length === 0) { + return response.content + .filter((block) => block.type === 'text') + .map((block) => block.text) + .join(''); + } + + if (toolRounds >= MAX_TOOL_ROUNDS) { + throw new Error(`Exceeded maximum tool rounds (${MAX_TOOL_ROUNDS}).`); + } + toolRounds += 1; + + messages.push({ + role: 'assistant', + content: toAssistantContentBlocks(response.content), + }); + + const toolResults = await Promise.all(toolUses.map(async (toolUse) => { + const result = await executeToolCall(params, { + id: toolUse.id, + name: toolUse.name, + input: toolUse.input as Record, + }); + + return { + type: 'tool_result' as const, + tool_use_id: toolUse.id, + content: JSON.stringify(serializeToolResult(result)), + is_error: !result.success, + }; + })); + + messages.push({ + role: 'user', + content: toolResults, + }); + } } } diff --git a/packages/core/src/providers/gemini.ts b/packages/core/src/providers/gemini.ts index e9ce804..ee3c4f3 100644 --- a/packages/core/src/providers/gemini.ts +++ b/packages/core/src/providers/gemini.ts @@ -1,9 +1,38 @@ -import { GoogleGenerativeAI } from '@google/generative-ai'; +import { GoogleGenerativeAI, type Content, type Part } from '@google/generative-ai'; import type { ProviderConfig, ProviderChatParams } from '../types.js'; import type { AIProvider } from './base.js'; import { toProviderMessages, toGeminiTools } from './base.js'; +import { + MAX_TOOL_ROUNDS, + executeToolCall, + serializeToolResult, +} from './tool-execution.js'; const DEFAULT_MODEL = 'gemini-2.0-flash'; +let toolCallSequence = 0; + +function createToolCallId() { + toolCallSequence += 1; + return `gemini_tool_call_${toolCallSequence}`; +} + +function toGeminiHistory(params: ProviderChatParams): Content[] { + return toProviderMessages(params.messages) + .slice(0, -1) + .map((message) => ({ + role: message.role === 'assistant' ? 'model' : 'user', + parts: [{ text: message.content }], + })); +} + +function getLastMessage(params: ProviderChatParams): string { + const lastMessage = toProviderMessages(params.messages).at(-1)?.content ?? ''; + if (!lastMessage) { + throw new Error('Cannot send empty message'); + } + + return lastMessage; +} export class GeminiProvider implements AIProvider { name = 'gemini'; @@ -22,29 +51,52 @@ export class GeminiProvider implements AIProvider { ...(params.tools?.length ? { tools: toGeminiTools(params.tools) } : {}), }); - const messages = toProviderMessages(params.messages); - const history = messages.slice(0, -1).map((m) => ({ - role: m.role === 'assistant' ? 'model' : 'user', - parts: [{ text: m.content }], - })); - const chat = model.startChat({ - history, + history: toGeminiHistory(params), generationConfig: { maxOutputTokens: params.maxTokens ?? 1024, ...(params.temperature != null ? { temperature: params.temperature } : {}), }, }); - const lastMessage = messages[messages.length - 1]?.content ?? ''; - if (!lastMessage) throw new Error('Cannot send empty message'); - const result = await chat.sendMessageStream(lastMessage); + let toolRounds = 0; + let request: string | Part[] = getLastMessage(params); + + while (true) { + const result = await chat.sendMessageStream(request); + + for await (const chunk of result.stream) { + const text = chunk.text(); + if (text) { + yield text; + } + } + + const response = await result.response; + const functionCalls = response.functionCalls() ?? []; + if (functionCalls.length === 0) { + return; + } - for await (const chunk of result.stream) { - const text = chunk.text(); - if (text) { - yield text; + if (toolRounds >= MAX_TOOL_ROUNDS) { + throw new Error(`Exceeded maximum tool rounds (${MAX_TOOL_ROUNDS}).`); } + toolRounds += 1; + + request = await Promise.all(functionCalls.map(async (call) => { + const result = await executeToolCall(params, { + id: createToolCallId(), + name: call.name, + input: call.args as Record, + }); + + return { + functionResponse: { + name: call.name, + response: serializeToolResult(result), + }, + }; + })); } } @@ -55,23 +107,45 @@ export class GeminiProvider implements AIProvider { ...(params.tools?.length ? { tools: toGeminiTools(params.tools) } : {}), }); - const messages = toProviderMessages(params.messages); - const history = messages.slice(0, -1).map((m) => ({ - role: m.role === 'assistant' ? 'model' : 'user', - parts: [{ text: m.content }], - })); - const chat = model.startChat({ - history, + history: toGeminiHistory(params), generationConfig: { maxOutputTokens: params.maxTokens ?? 1024, ...(params.temperature != null ? { temperature: params.temperature } : {}), }, }); - const lastMessage = messages[messages.length - 1]?.content ?? ''; - if (!lastMessage) throw new Error('Cannot send empty message'); - const result = await chat.sendMessage(lastMessage); - return result.response.text(); + let toolRounds = 0; + let request: string | Part[] = getLastMessage(params); + + while (true) { + const result = await chat.sendMessage(request); + const response = result.response; + const functionCalls = response.functionCalls() ?? []; + + if (functionCalls.length === 0) { + return response.text(); + } + + if (toolRounds >= MAX_TOOL_ROUNDS) { + throw new Error(`Exceeded maximum tool rounds (${MAX_TOOL_ROUNDS}).`); + } + toolRounds += 1; + + request = await Promise.all(functionCalls.map(async (call) => { + const toolResult = await executeToolCall(params, { + id: createToolCallId(), + name: call.name, + input: call.args as Record, + }); + + return { + functionResponse: { + name: call.name, + response: serializeToolResult(toolResult), + }, + }; + })); + } } } diff --git a/packages/core/src/providers/openai.ts b/packages/core/src/providers/openai.ts index 24ddead..74efce5 100644 --- a/packages/core/src/providers/openai.ts +++ b/packages/core/src/providers/openai.ts @@ -2,8 +2,21 @@ import OpenAI from 'openai'; import type { ProviderConfig, ProviderChatParams } from '../types.js'; import type { AIProvider } from './base.js'; import { toProviderMessages, toOpenAITools } from './base.js'; +import { + MAX_TOOL_ROUNDS, + executeToolCall, + parseToolInput, + serializeToolResult, + toToolFailure, +} from './tool-execution.js'; const DEFAULT_MODEL = 'gpt-4o-mini'; +let toolCallSequence = 0; + +function createToolCallId() { + toolCallSequence += 1; + return `tool_call_${toolCallSequence}`; +} export class OpenAIProvider implements AIProvider { name = 'openai'; @@ -18,25 +31,101 @@ export class OpenAIProvider implements AIProvider { async *chat(params: ProviderChatParams): AsyncGenerator { const messages: OpenAI.Chat.ChatCompletionMessageParam[] = [ { role: 'system', content: params.systemPrompt }, - ...toProviderMessages(params.messages).map((m) => ({ - role: m.role as 'user' | 'assistant', - content: m.content, + ...toProviderMessages(params.messages).map((message) => ({ + role: message.role as 'user' | 'assistant', + content: message.content, })), ]; - const stream = await this.client.chat.completions.create({ - model: this.model, - messages, - stream: true, - max_tokens: params.maxTokens ?? 1024, - ...(params.tools?.length ? { tools: toOpenAITools(params.tools) } : {}), - ...(params.temperature != null ? { temperature: params.temperature } : {}), - }); - - for await (const chunk of stream) { - const delta = chunk.choices[0]?.delta?.content; - if (delta) { - yield delta; + let toolRounds = 0; + + while (true) { + const stream = await this.client.chat.completions.create({ + model: this.model, + messages, + stream: true, + max_tokens: params.maxTokens ?? 1024, + ...(params.tools?.length ? { tools: toOpenAITools(params.tools) } : {}), + ...(params.temperature != null ? { temperature: params.temperature } : {}), + }); + + let finishReason: OpenAI.Chat.Completions.ChatCompletion.Choice['finish_reason'] | null = null; + let assistantText = ''; + const toolCalls = new Map(); + + for await (const chunk of stream) { + const choice = chunk.choices[0]; + if (!choice) continue; + + finishReason = choice.finish_reason ?? finishReason; + + const deltaText = choice.delta?.content; + if (deltaText) { + assistantText += deltaText; + yield deltaText; + } + + for (const toolCallDelta of choice.delta?.tool_calls ?? []) { + const existing = toolCalls.get(toolCallDelta.index) ?? { + id: toolCallDelta.id ?? createToolCallId(), + type: 'function' as const, + function: { + name: '', + arguments: '', + }, + }; + + if (toolCallDelta.id) { + existing.id = toolCallDelta.id; + } + if (toolCallDelta.function?.name) { + existing.function.name = toolCallDelta.function.name; + } + if (toolCallDelta.function?.arguments) { + existing.function.arguments += toolCallDelta.function.arguments; + } + + toolCalls.set(toolCallDelta.index, existing); + } + } + + const completedToolCalls = Array.from(toolCalls.entries()) + .sort(([left], [right]) => left - right) + .map(([, toolCall]) => toolCall); + + if (finishReason !== 'tool_calls' || completedToolCalls.length === 0) { + return; + } + + if (toolRounds >= MAX_TOOL_ROUNDS) { + throw new Error(`Exceeded maximum tool rounds (${MAX_TOOL_ROUNDS}).`); + } + toolRounds += 1; + + messages.push({ + role: 'assistant', + content: assistantText || null, + tool_calls: completedToolCalls, + }); + + for (const toolCall of completedToolCalls) { + const result = await (async () => { + try { + return await executeToolCall(params, { + id: toolCall.id, + name: toolCall.function.name, + input: parseToolInput(toolCall.function.arguments), + }); + } catch (error) { + return toToolFailure(error); + } + })(); + + messages.push({ + role: 'tool', + tool_call_id: toolCall.id, + content: JSON.stringify(serializeToolResult(result)), + }); } } } @@ -44,20 +133,61 @@ export class OpenAIProvider implements AIProvider { async chatSync(params: ProviderChatParams): Promise { const messages: OpenAI.Chat.ChatCompletionMessageParam[] = [ { role: 'system', content: params.systemPrompt }, - ...toProviderMessages(params.messages).map((m) => ({ - role: m.role as 'user' | 'assistant', - content: m.content, + ...toProviderMessages(params.messages).map((message) => ({ + role: message.role as 'user' | 'assistant', + content: message.content, })), ]; - const response = await this.client.chat.completions.create({ - model: this.model, - messages, - max_tokens: params.maxTokens ?? 1024, - ...(params.tools?.length ? { tools: toOpenAITools(params.tools) } : {}), - ...(params.temperature != null ? { temperature: params.temperature } : {}), - }); + let toolRounds = 0; + + while (true) { + const response = await this.client.chat.completions.create({ + model: this.model, + messages, + max_tokens: params.maxTokens ?? 1024, + ...(params.tools?.length ? { tools: toOpenAITools(params.tools) } : {}), + ...(params.temperature != null ? { temperature: params.temperature } : {}), + }); + + const choice = response.choices[0]; + const assistantMessage = choice?.message; + const toolCalls = assistantMessage?.tool_calls ?? []; + + if (choice?.finish_reason !== 'tool_calls' || toolCalls.length === 0) { + return assistantMessage?.content ?? ''; + } + + if (toolRounds >= MAX_TOOL_ROUNDS) { + throw new Error(`Exceeded maximum tool rounds (${MAX_TOOL_ROUNDS}).`); + } + toolRounds += 1; + + messages.push({ + role: 'assistant', + content: assistantMessage.content, + tool_calls: toolCalls, + }); - return response.choices[0]?.message?.content ?? ''; + for (const toolCall of toolCalls) { + const result = await (async () => { + try { + return await executeToolCall(params, { + id: toolCall.id, + name: toolCall.function.name, + input: parseToolInput(toolCall.function.arguments), + }); + } catch (error) { + return toToolFailure(error); + } + })(); + + messages.push({ + role: 'tool', + tool_call_id: toolCall.id, + content: JSON.stringify(serializeToolResult(result)), + }); + } + } } } diff --git a/packages/core/src/providers/tool-execution.ts b/packages/core/src/providers/tool-execution.ts new file mode 100644 index 0000000..a3da7dd --- /dev/null +++ b/packages/core/src/providers/tool-execution.ts @@ -0,0 +1,53 @@ +import type { ProviderChatParams, ProviderToolCall, ToolResult } from '../types.js'; + +export const MAX_TOOL_ROUNDS = 5; + +export function parseToolInput(rawInput: string): Record { + if (!rawInput.trim()) return {}; + + let parsed: unknown; + try { + parsed = JSON.parse(rawInput); + } catch { + throw new Error('Tool arguments must be valid JSON.'); + } + + if (!parsed || Array.isArray(parsed) || typeof parsed !== 'object') { + throw new Error('Tool arguments must be a JSON object.'); + } + + return parsed as Record; +} + +export function serializeToolResult(result: ToolResult): Record { + return { + success: result.success, + ...(result.data !== undefined ? { data: result.data } : {}), + ...(result.message ? { message: result.message } : {}), + }; +} + +export function toToolFailure(error: unknown): ToolResult { + return { + success: false, + message: error instanceof Error ? error.message : 'Tool execution failed.', + }; +} + +export async function executeToolCall( + params: ProviderChatParams, + call: ProviderToolCall +): Promise { + if (!params.toolExecutor) { + return { + success: false, + message: `No tool executor is configured for "${call.name}".`, + }; + } + + try { + return await params.toolExecutor(call); + } catch (error) { + return toToolFailure(error); + } +} diff --git a/packages/core/src/types.ts b/packages/core/src/types.ts index caa8cda..b5b6e5d 100644 --- a/packages/core/src/types.ts +++ b/packages/core/src/types.ts @@ -63,6 +63,14 @@ export interface ToolResult { message?: string; } +export interface ProviderToolCall { + id: string; + name: string; + input: Record; +} + +export type ProviderToolExecutor = (call: ProviderToolCall) => Promise; + export interface WebhookConfig { url: string; events: string[]; @@ -79,6 +87,7 @@ export interface ProviderChatParams { messages: ChatMessage[]; systemPrompt: string; tools?: ToolDefinition[]; + toolExecutor?: ProviderToolExecutor; maxTokens?: number; temperature?: number; } diff --git a/packages/core/tests/providers/tool-calling.test.ts b/packages/core/tests/providers/tool-calling.test.ts new file mode 100644 index 0000000..f8bc0de --- /dev/null +++ b/packages/core/tests/providers/tool-calling.test.ts @@ -0,0 +1,301 @@ +import { describe, expect, it, vi } from 'vitest'; +import { OpenAIProvider } from '../../src/providers/openai.js'; +import { ClaudeProvider } from '../../src/providers/claude.js'; +import { GeminiProvider } from '../../src/providers/gemini.js'; +import type { ProviderChatParams } from '../../src/types.js'; + +async function collect(stream: AsyncGenerator) { + const chunks: string[] = []; + for await (const chunk of stream) { + chunks.push(chunk); + } + return chunks; +} + +function createParams(overrides?: Partial): ProviderChatParams { + return { + messages: [{ id: '1', role: 'user', content: 'Check order 123', timestamp: Date.now() }], + systemPrompt: 'You are helpful.', + tools: [{ + name: 'lookup_order', + description: 'Look up an order by ID', + parameters: { + orderId: { type: 'string', description: 'Order ID' }, + }, + required: ['orderId'], + }], + ...overrides, + }; +} + +describe('provider tool execution loops', () => { + it('OpenAIProvider continues streaming after a tool call', async () => { + const provider = new OpenAIProvider({ type: 'openai', apiKey: 'test-key' }); + const create = vi.fn() + .mockResolvedValueOnce({ + async *[Symbol.asyncIterator]() { + yield { + choices: [{ + delta: { + tool_calls: [{ + index: 0, + id: 'call_1', + function: { + name: 'lookup_order', + arguments: '{"orderId":"123"}', + }, + }], + }, + }], + }; + yield { choices: [{ delta: {}, finish_reason: 'tool_calls' }] }; + }, + }) + .mockResolvedValueOnce({ + async *[Symbol.asyncIterator]() { + yield { choices: [{ delta: { content: 'Order 123 is shipped.' } }] }; + yield { choices: [{ delta: {}, finish_reason: 'stop' }] }; + }, + }); + + (provider as unknown as { client: { chat: { completions: { create: typeof create } } } }).client.chat.completions.create = create; + + const toolExecutor = vi.fn().mockResolvedValue({ + success: true, + data: { status: 'shipped' }, + message: 'Order found', + }); + + const chunks = await collect(provider.chat(createParams({ toolExecutor }))); + + expect(chunks).toEqual(['Order 123 is shipped.']); + expect(toolExecutor).toHaveBeenCalledWith({ + id: 'call_1', + name: 'lookup_order', + input: { orderId: '123' }, + }); + + const secondRequestMessages = create.mock.calls[1][0].messages; + expect(secondRequestMessages).toEqual(expect.arrayContaining([ + expect.objectContaining({ + role: 'assistant', + tool_calls: [expect.objectContaining({ id: 'call_1' })], + }), + expect.objectContaining({ + role: 'tool', + tool_call_id: 'call_1', + }), + ])); + }); + + it('OpenAIProvider converts malformed tool args into a tool failure instead of aborting', async () => { + const provider = new OpenAIProvider({ type: 'openai', apiKey: 'test-key' }); + const create = vi.fn() + .mockResolvedValueOnce({ + async *[Symbol.asyncIterator]() { + yield { + choices: [{ + delta: { + tool_calls: [{ + index: 0, + id: 'call_bad', + function: { + name: 'lookup_order', + arguments: '{"orderId"', + }, + }], + }, + }], + }; + yield { choices: [{ delta: {}, finish_reason: 'tool_calls' }] }; + }, + }) + .mockResolvedValueOnce({ + async *[Symbol.asyncIterator]() { + yield { choices: [{ delta: { content: 'I could not parse the tool input.' } }] }; + yield { choices: [{ delta: {}, finish_reason: 'stop' }] }; + }, + }); + + (provider as unknown as { client: { chat: { completions: { create: typeof create } } } }).client.chat.completions.create = create; + + const toolExecutor = vi.fn(); + const chunks = await collect(provider.chat(createParams({ toolExecutor }))); + + expect(chunks).toEqual(['I could not parse the tool input.']); + expect(toolExecutor).not.toHaveBeenCalled(); + + const toolMessage = create.mock.calls[1][0].messages.find((message: { role: string }) => message.role === 'tool'); + expect(toolMessage.content).toContain('"success":false'); + expect(toolMessage.content).toContain('valid JSON'); + }); + + it('OpenAIProvider chatSync loops through tool calls before returning text', async () => { + const provider = new OpenAIProvider({ type: 'openai', apiKey: 'test-key' }); + const create = vi.fn() + .mockResolvedValueOnce({ + choices: [{ + finish_reason: 'tool_calls', + message: { + content: null, + tool_calls: [{ + id: 'call_sync', + type: 'function', + function: { + name: 'lookup_order', + arguments: '{"orderId":"123"}', + }, + }], + }, + }], + }) + .mockResolvedValueOnce({ + choices: [{ + finish_reason: 'stop', + message: { + content: 'Order 123 is shipped.', + }, + }], + }); + + (provider as unknown as { client: { chat: { completions: { create: typeof create } } } }).client.chat.completions.create = create; + + const toolExecutor = vi.fn().mockResolvedValue({ + success: true, + data: { status: 'shipped' }, + }); + + const response = await provider.chatSync(createParams({ toolExecutor })); + + expect(response).toBe('Order 123 is shipped.'); + expect(toolExecutor).toHaveBeenCalledOnce(); + }); + + it('ClaudeProvider resumes after tool_use blocks', async () => { + const provider = new ClaudeProvider({ type: 'claude', apiKey: 'test-key' }); + const stream = vi.fn() + .mockReturnValueOnce({ + async *[Symbol.asyncIterator]() {}, + finalMessage: vi.fn().mockResolvedValue({ + stop_reason: 'tool_use', + content: [{ + type: 'tool_use', + id: 'toolu_1', + name: 'lookup_order', + input: { orderId: '123' }, + }], + }), + }) + .mockReturnValueOnce({ + async *[Symbol.asyncIterator]() { + yield { type: 'content_block_delta', delta: { type: 'text_delta', text: 'Order 123 is shipped.' } }; + }, + finalMessage: vi.fn().mockResolvedValue({ + stop_reason: 'end_turn', + content: [{ type: 'text', text: 'Order 123 is shipped.' }], + }), + }); + + (provider as unknown as { client: { messages: { stream: typeof stream } } }).client.messages.stream = stream; + + const toolExecutor = vi.fn().mockResolvedValue({ + success: true, + data: { status: 'shipped' }, + }); + + const chunks = await collect(provider.chat(createParams({ toolExecutor }))); + + expect(chunks).toEqual(['Order 123 is shipped.']); + expect(toolExecutor).toHaveBeenCalledWith({ + id: 'toolu_1', + name: 'lookup_order', + input: { orderId: '123' }, + }); + + const secondMessages = stream.mock.calls[1][0].messages; + expect(secondMessages).toEqual(expect.arrayContaining([ + expect.objectContaining({ role: 'assistant' }), + expect.objectContaining({ role: 'user' }), + ])); + }); + + it('GeminiProvider sends function responses back into the chat session', async () => { + const provider = new GeminiProvider({ type: 'gemini', apiKey: 'test-key' }); + const sendMessageStream = vi.fn() + .mockResolvedValueOnce({ + stream: (async function* () {})(), + response: Promise.resolve({ + functionCalls: () => [{ name: 'lookup_order', args: { orderId: '123' } }], + text: () => '', + }), + }) + .mockResolvedValueOnce({ + stream: (async function* () { + yield { text: () => 'Order 123 is shipped.' }; + })(), + response: Promise.resolve({ + functionCalls: () => undefined, + text: () => 'Order 123 is shipped.', + }), + }); + + const startChat = vi.fn().mockReturnValue({ sendMessageStream }); + const getGenerativeModel = vi.fn().mockReturnValue({ startChat }); + (provider as unknown as { genAI: { getGenerativeModel: typeof getGenerativeModel } }).genAI.getGenerativeModel = getGenerativeModel; + + const toolExecutor = vi.fn().mockResolvedValue({ + success: true, + data: { status: 'shipped' }, + }); + + const chunks = await collect(provider.chat(createParams({ toolExecutor }))); + + expect(chunks).toEqual(['Order 123 is shipped.']); + expect(toolExecutor).toHaveBeenCalledOnce(); + + const secondRequest = sendMessageStream.mock.calls[1][0]; + expect(secondRequest).toEqual([ + expect.objectContaining({ + functionResponse: { + name: 'lookup_order', + response: expect.objectContaining({ success: true }), + }, + }), + ]); + }); + + it('OpenAIProvider stops after the max tool-call iterations guard', async () => { + const provider = new OpenAIProvider({ type: 'openai', apiKey: 'test-key' }); + const create = vi.fn().mockImplementation(() => ({ + async *[Symbol.asyncIterator]() { + yield { + choices: [{ + delta: { + tool_calls: [{ + index: 0, + id: 'call_loop', + function: { + name: 'lookup_order', + arguments: '{"orderId":"123"}', + }, + }], + }, + }], + }; + yield { choices: [{ delta: {}, finish_reason: 'tool_calls' }] }; + }, + })); + + (provider as unknown as { client: { chat: { completions: { create: typeof create } } } }).client.chat.completions.create = create; + + const toolExecutor = vi.fn().mockResolvedValue({ + success: true, + data: { status: 'still-looping' }, + }); + + await expect(collect(provider.chat(createParams({ toolExecutor })))).rejects.toThrow( + 'Exceeded maximum tool rounds (5).' + ); + expect(toolExecutor).toHaveBeenCalledTimes(5); + }); +}); diff --git a/packages/server/src/handler.ts b/packages/server/src/handler.ts index 14b7bf0..e821cea 100644 --- a/packages/server/src/handler.ts +++ b/packages/server/src/handler.ts @@ -17,6 +17,7 @@ export function createChatHandler(config: ChatCopsServerConfig) { const analytics = config.analytics ? new AnalyticsCollector() : null; const webhooks = config.webhooks?.length ? new WebhookDispatcher(config.webhooks) : null; const toolDefs = config.tools?.map(toolToDefinition) ?? []; + const toolsByName = new Map((config.tools ?? []).map((tool) => [tool.name, tool])); // Periodic cleanup const cleanupInterval = setInterval(() => rateLimiter.cleanup(), 60_000); @@ -94,15 +95,50 @@ export function createChatHandler(config: ChatCopsServerConfig) { // Stream response let fullResponse = ''; + const pendingEvents: string[] = []; try { for await (const chunk of provider.chat({ messages, systemPrompt, tools: toolDefs.length > 0 ? toolDefs : undefined, + toolExecutor: async (toolCall) => { + const tool = toolsByName.get(toolCall.name); + if (!tool) { + return { + success: false, + message: `Unknown tool: ${toolCall.name}`, + }; + } + + const result = await tool.execute(toolCall.input); + + if (tool.name === 'capture_lead' && result.success) { + analytics?.track('lead:captured', { conversationId: req.conversationId }); + pendingEvents.push(JSON.stringify({ + leadCaptured: true, + leadData: result.data, + })); + if (webhooks) { + await webhooks.dispatch('lead:captured', { + lead: result.data, + conversationId: req.conversationId, + }); + } + } + + return result; + }, })) { + while (pendingEvents.length > 0) { + yield pendingEvents.shift()!; + } fullResponse += chunk; yield JSON.stringify({ content: chunk }); } + + while (pendingEvents.length > 0) { + yield pendingEvents.shift()!; + } } catch (err) { console.error('[chatcops] Provider error:', err); yield JSON.stringify({ error: 'provider_error' }); diff --git a/packages/server/tests/handler.test.ts b/packages/server/tests/handler.test.ts index d5a5c0c..5abc337 100644 --- a/packages/server/tests/handler.test.ts +++ b/packages/server/tests/handler.test.ts @@ -1,8 +1,31 @@ -import { describe, it, expect, vi, afterEach } from 'vitest'; +import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'; + +vi.mock('@chatcops/core', async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + createProvider: vi.fn(), + }; +}); + +import { createProvider } from '@chatcops/core'; import { chatRequestSchema } from '../src/config.js'; import { createChatHandler } from '../src/handler.js'; +const mockedCreateProvider = vi.mocked(createProvider); + describe('createChatHandler', () => { + beforeEach(() => { + mockedCreateProvider.mockReset(); + mockedCreateProvider.mockResolvedValue({ + name: 'test-provider', + async *chat() {}, + async chatSync() { + return ''; + }, + }); + }); + afterEach(() => { vi.restoreAllMocks(); }); @@ -19,6 +42,180 @@ describe('createChatHandler', () => { cleanup(); expect(clearIntervalSpy).toHaveBeenCalledOnce(); }); + + it('passes a working tool executor into the streaming provider', async () => { + const weatherTool = { + name: 'get_weather', + description: 'Look up the weather', + parameters: { + city: { type: 'string' as const, description: 'City name' }, + }, + required: ['city'], + execute: vi.fn().mockResolvedValue({ + success: true, + data: { forecast: 'sunny' }, + message: 'Sunny in Boston', + }), + }; + + mockedCreateProvider.mockResolvedValue({ + name: 'test-provider', + async *chat(params) { + const toolResult = await params.toolExecutor?.({ + id: 'tool-1', + name: 'get_weather', + input: { city: 'Boston' }, + }); + + yield `Forecast: ${(toolResult?.data as { forecast: string }).forecast}`; + }, + async chatSync() { + return ''; + }, + }); + + const { handleChat } = createChatHandler({ + provider: { type: 'openai', apiKey: 'test-key' }, + systemPrompt: 'Test', + cors: '*', + tools: [weatherTool], + }); + + const events: Array> = []; + for await (const chunk of handleChat({ + conversationId: 'conv-1', + message: 'What is the weather in Boston?', + })) { + events.push(JSON.parse(chunk)); + } + + expect(weatherTool.execute).toHaveBeenCalledWith({ city: 'Boston' }); + expect(events).toEqual([ + { content: 'Forecast: sunny' }, + { done: true }, + ]); + }); + + it('tracks lead capture analytics when a tool succeeds', async () => { + const leadTool = { + name: 'capture_lead', + description: 'Capture a lead', + parameters: { + email: { type: 'string' as const, description: 'Email address' }, + }, + required: ['email'], + execute: vi.fn().mockResolvedValue({ + success: true, + data: { email: 'lead@example.com' }, + message: 'Lead stored', + }), + }; + + mockedCreateProvider.mockResolvedValue({ + name: 'test-provider', + async *chat(params) { + await params.toolExecutor?.({ + id: 'tool-1', + name: 'capture_lead', + input: { email: 'lead@example.com' }, + }); + + yield 'Thanks, we will reach out shortly.'; + }, + async chatSync() { + return ''; + }, + }); + + const { handleChat, getAnalytics } = createChatHandler({ + provider: { type: 'claude', apiKey: 'test-key' }, + systemPrompt: 'Test', + cors: '*', + analytics: true, + tools: [leadTool], + }); + + for await (const _chunk of handleChat({ + conversationId: 'conv-1', + message: 'My email is lead@example.com', + })) { + // Drain the stream. + } + + expect(leadTool.execute).toHaveBeenCalledWith({ email: 'lead@example.com' }); + expect(getAnalytics()).toMatchObject({ + totalConversations: 1, + leadsCapture: 1, + }); + }); + + it('emits leadCaptured SSE metadata and dispatches webhook payload with conversation id', async () => { + const leadTool = { + name: 'capture_lead', + description: 'Capture a lead', + parameters: { + email: { type: 'string' as const, description: 'Email address' }, + }, + required: ['email'], + execute: vi.fn().mockResolvedValue({ + success: true, + data: { email: 'lead@example.com' }, + message: 'Lead stored', + }), + }; + + const fetchSpy = vi.spyOn(globalThis, 'fetch').mockResolvedValue( + new Response('', { status: 200 }) + ); + + mockedCreateProvider.mockResolvedValue({ + name: 'test-provider', + async *chat(params) { + await params.toolExecutor?.({ + id: 'tool-1', + name: 'capture_lead', + input: { email: 'lead@example.com' }, + }); + + yield 'Thanks, we will reach out shortly.'; + }, + async chatSync() { + return ''; + }, + }); + + const { handleChat } = createChatHandler({ + provider: { type: 'claude', apiKey: 'test-key' }, + systemPrompt: 'Test', + cors: '*', + tools: [leadTool], + webhooks: [{ url: 'https://hooks.test/lead', events: ['lead:captured'] }], + }); + + const events: Array> = []; + for await (const chunk of handleChat({ + conversationId: 'conv-42', + message: 'My email is lead@example.com', + })) { + events.push(JSON.parse(chunk)); + } + + expect(events).toEqual([ + { leadCaptured: true, leadData: { email: 'lead@example.com' } }, + { content: 'Thanks, we will reach out shortly.' }, + { done: true }, + ]); + + expect(fetchSpy).toHaveBeenCalledOnce(); + const requestBody = JSON.parse(String(fetchSpy.mock.calls[0]?.[1]?.body)); + expect(requestBody).toMatchObject({ + event: 'lead:captured', + data: { + lead: { email: 'lead@example.com' }, + conversationId: 'conv-42', + }, + }); + }); }); describe('chatRequestSchema', () => { diff --git a/packages/widget/src/api/types.ts b/packages/widget/src/api/types.ts index 597ec63..e04ef54 100644 --- a/packages/widget/src/api/types.ts +++ b/packages/widget/src/api/types.ts @@ -15,5 +15,6 @@ export interface WidgetChatChunk { done?: boolean; error?: string; leadCaptured?: boolean; + leadData?: Record; suggestedActions?: string[]; } diff --git a/packages/widget/src/index.ts b/packages/widget/src/index.ts index ca6b34e..136ef5b 100644 --- a/packages/widget/src/index.ts +++ b/packages/widget/src/index.ts @@ -28,11 +28,11 @@ const ChatCops = { }, on(event: string, handler: (...args: unknown[]) => void): void { - instance?.on(event as 'open' | 'close' | 'message' | 'error', handler); + instance?.on(event as 'open' | 'close' | 'message' | 'error' | 'leadCaptured', handler); }, off(event: string, handler: (...args: unknown[]) => void): void { - instance?.off(event as 'open' | 'close' | 'message' | 'error', handler); + instance?.off(event as 'open' | 'close' | 'message' | 'error' | 'leadCaptured', handler); }, }; diff --git a/packages/widget/src/widget.ts b/packages/widget/src/widget.ts index 7e563b9..950e2bc 100644 --- a/packages/widget/src/widget.ts +++ b/packages/widget/src/widget.ts @@ -42,9 +42,10 @@ export interface WidgetConfig { onClose?: () => void; onMessage?: (message: MessageData) => void; onError?: (error: Error) => void; + onLeadCaptured?: (leadData?: Record) => void; } -type WidgetEventType = 'open' | 'close' | 'message' | 'error'; +type WidgetEventType = 'open' | 'close' | 'message' | 'error' | 'leadCaptured'; type WidgetEventHandler = (...args: unknown[]) => void; export class Widget { @@ -272,6 +273,11 @@ export class Widget { if (chunk.done) break; + if (chunk.leadCaptured) { + this.config.onLeadCaptured?.(chunk.leadData); + this.emit('leadCaptured', chunk.leadData); + } + if (chunk.content) { if (firstChunk) { this.panel.messages.removeTyping(); diff --git a/packages/widget/tests/widget.test.ts b/packages/widget/tests/widget.test.ts new file mode 100644 index 0000000..74db6db --- /dev/null +++ b/packages/widget/tests/widget.test.ts @@ -0,0 +1,33 @@ +import { beforeEach, describe, expect, it, vi } from 'vitest'; +import { Widget } from '../src/widget.js'; + +describe('Widget lead capture handling', () => { + beforeEach(() => { + document.body.innerHTML = ''; + }); + + it('fires the leadCaptured callback and event when the stream includes lead data', async () => { + const onLeadCaptured = vi.fn(); + const widget = new Widget({ + apiUrl: 'https://api.test/chat', + onLeadCaptured, + }); + widget.init(); + + const eventHandler = vi.fn(); + widget.on('leadCaptured', eventHandler); + + const sendMessage = vi.fn(async function* () { + yield { leadCaptured: true, leadData: { email: 'lead@example.com' } }; + yield { content: 'Thanks, we will follow up soon.' }; + yield { done: true }; + }); + + (widget as unknown as { client: { sendMessage: typeof sendMessage } }).client.sendMessage = sendMessage; + + await (widget as unknown as { handleSend(text: string): Promise }).handleSend('Hello'); + + expect(onLeadCaptured).toHaveBeenCalledWith({ email: 'lead@example.com' }); + expect(eventHandler).toHaveBeenCalledWith({ email: 'lead@example.com' }); + }); +}); From 4d0209d2a128c6a0dc32ab93686d23cfc12680e1 Mon Sep 17 00:00:00 2001 From: Ashok161 Date: Sun, 22 Mar 2026 15:59:57 +0530 Subject: [PATCH 2/2] fix(widget): correct event typings --- packages/widget/src/index.ts | 4 ++-- packages/widget/src/widget.ts | 11 +++++++---- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/packages/widget/src/index.ts b/packages/widget/src/index.ts index 136ef5b..ccf71a1 100644 --- a/packages/widget/src/index.ts +++ b/packages/widget/src/index.ts @@ -28,11 +28,11 @@ const ChatCops = { }, on(event: string, handler: (...args: unknown[]) => void): void { - instance?.on(event as 'open' | 'close' | 'message' | 'error' | 'leadCaptured', handler); + instance?.on(event as 'open' | 'close' | 'message' | 'error' | 'leadCaptured' | 'preChatSubmit', handler); }, off(event: string, handler: (...args: unknown[]) => void): void { - instance?.off(event as 'open' | 'close' | 'message' | 'error' | 'leadCaptured', handler); + instance?.off(event as 'open' | 'close' | 'message' | 'error' | 'leadCaptured' | 'preChatSubmit', handler); }, }; diff --git a/packages/widget/src/widget.ts b/packages/widget/src/widget.ts index 13c96eb..bbfafb9 100644 --- a/packages/widget/src/widget.ts +++ b/packages/widget/src/widget.ts @@ -62,13 +62,16 @@ export interface WidgetConfig { onMessage?: (message: MessageData) => void; onError?: (error: Error) => void; onLeadCaptured?: (leadData?: Record) => void; -} - -type WidgetEventType = 'open' | 'close' | 'message' | 'error' | 'leadCaptured'; onPreChatSubmit?: (data: Record) => void; } -type WidgetEventType = 'open' | 'close' | 'message' | 'error' | 'preChatSubmit'; +type WidgetEventType = + | 'open' + | 'close' + | 'message' + | 'error' + | 'leadCaptured' + | 'preChatSubmit'; type WidgetEventHandler = (...args: unknown[]) => void; export class Widget {