Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions .changeset/streaming-tool-calls.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
---
"@chatcops/core": patch
"@chatcops/server": patch
"@chatcops/widget": patch
---

Execute provider tool calls during chat loops so streaming and sync responses can continue after tool use. This also wires successful lead-capture tool executions into the server analytics, webhook flow, and widget lead-captured callbacks/events.
2 changes: 2 additions & 0 deletions packages/core/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@ export type {
ToolParameter,
ToolDefinition,
ToolResult,
ProviderToolCall,
ProviderToolExecutor,
WebhookConfig,
ProviderConfig,
ProviderChatParams,
Expand Down
174 changes: 141 additions & 33 deletions packages/core/src/providers/claude.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,45 @@ import Anthropic from '@anthropic-ai/sdk';
import type { ProviderConfig, ProviderChatParams } from '../types.js';
import type { AIProvider } from './base.js';
import { toProviderMessages, toClaudeTools } from './base.js';
import {
MAX_TOOL_ROUNDS,
executeToolCall,
serializeToolResult,
} from './tool-execution.js';

const DEFAULT_MODEL = 'claude-haiku-4-5-20251001';

function toClaudeMessages(params: ProviderChatParams): Anthropic.Messages.MessageParam[] {
return toProviderMessages(params.messages).map((message) => ({
role: message.role as 'user' | 'assistant',
content: message.content,
}));
}

function toAssistantContentBlocks(
content: Anthropic.Messages.Message['content']
): Anthropic.Messages.ContentBlockParam[] {
const blocks: Anthropic.Messages.ContentBlockParam[] = [];

for (const block of content) {
if (block.type === 'text') {
blocks.push({ type: 'text', text: block.text });
continue;
}

if (block.type === 'tool_use') {
blocks.push({
type: 'tool_use',
id: block.id,
name: block.name,
input: block.input,
});
}
}

return blocks;
}

export class ClaudeProvider implements AIProvider {
name = 'claude';
private client: Anthropic;
Expand All @@ -16,43 +52,115 @@ export class ClaudeProvider implements AIProvider {
}

async *chat(params: ProviderChatParams): AsyncGenerator<string> {
const messages = toProviderMessages(params.messages).map((m) => ({
role: m.role as 'user' | 'assistant',
content: m.content,
}));

const stream = this.client.messages.stream({
model: this.model,
max_tokens: params.maxTokens ?? 1024,
system: params.systemPrompt,
messages,
...(params.tools?.length ? { tools: toClaudeTools(params.tools) } : {}),
...(params.temperature != null ? { temperature: params.temperature } : {}),
});

for await (const event of stream) {
if (event.type === 'content_block_delta' && event.delta.type === 'text_delta') {
yield event.delta.text;
const messages = toClaudeMessages(params);
let toolRounds = 0;

while (true) {
const stream = this.client.messages.stream({
model: this.model,
max_tokens: params.maxTokens ?? 1024,
system: params.systemPrompt,
messages,
...(params.tools?.length ? { tools: toClaudeTools(params.tools) } : {}),
...(params.temperature != null ? { temperature: params.temperature } : {}),
});

for await (const event of stream) {
if (event.type === 'content_block_delta' && event.delta.type === 'text_delta') {
yield event.delta.text;
}
}

const message = await stream.finalMessage();
const toolUses = message.content.filter((block) => block.type === 'tool_use');

if (message.stop_reason !== 'tool_use' || toolUses.length === 0) {
return;
}

if (toolRounds >= MAX_TOOL_ROUNDS) {
throw new Error(`Exceeded maximum tool rounds (${MAX_TOOL_ROUNDS}).`);
}
toolRounds += 1;

messages.push({
role: 'assistant',
content: toAssistantContentBlocks(message.content),
});

const toolResults = await Promise.all(toolUses.map(async (toolUse) => {
const result = await executeToolCall(params, {
id: toolUse.id,
name: toolUse.name,
input: toolUse.input as Record<string, unknown>,
});

return {
type: 'tool_result' as const,
tool_use_id: toolUse.id,
content: JSON.stringify(serializeToolResult(result)),
is_error: !result.success,
};
}));

messages.push({
role: 'user',
content: toolResults,
});
}
}

async chatSync(params: ProviderChatParams): Promise<string> {
const messages = toProviderMessages(params.messages).map((m) => ({
role: m.role as 'user' | 'assistant',
content: m.content,
}));

const response = await this.client.messages.create({
model: this.model,
max_tokens: params.maxTokens ?? 1024,
system: params.systemPrompt,
messages,
...(params.tools?.length ? { tools: toClaudeTools(params.tools) } : {}),
...(params.temperature != null ? { temperature: params.temperature } : {}),
});

const textBlock = response.content.find((b) => b.type === 'text');
return textBlock?.type === 'text' ? textBlock.text : '';
const messages = toClaudeMessages(params);
let toolRounds = 0;

while (true) {
const response = await this.client.messages.create({
model: this.model,
max_tokens: params.maxTokens ?? 1024,
system: params.systemPrompt,
messages,
...(params.tools?.length ? { tools: toClaudeTools(params.tools) } : {}),
...(params.temperature != null ? { temperature: params.temperature } : {}),
});

const toolUses = response.content.filter((block) => block.type === 'tool_use');
if (response.stop_reason !== 'tool_use' || toolUses.length === 0) {
return response.content
.filter((block) => block.type === 'text')
.map((block) => block.text)
.join('');
}

if (toolRounds >= MAX_TOOL_ROUNDS) {
throw new Error(`Exceeded maximum tool rounds (${MAX_TOOL_ROUNDS}).`);
}
toolRounds += 1;

messages.push({
role: 'assistant',
content: toAssistantContentBlocks(response.content),
});

const toolResults = await Promise.all(toolUses.map(async (toolUse) => {
const result = await executeToolCall(params, {
id: toolUse.id,
name: toolUse.name,
input: toolUse.input as Record<string, unknown>,
});

return {
type: 'tool_result' as const,
tool_use_id: toolUse.id,
content: JSON.stringify(serializeToolResult(result)),
is_error: !result.success,
};
}));

messages.push({
role: 'user',
content: toolResults,
});
}
}
}
126 changes: 100 additions & 26 deletions packages/core/src/providers/gemini.ts
Original file line number Diff line number Diff line change
@@ -1,9 +1,38 @@
import { GoogleGenerativeAI } from '@google/generative-ai';
import { GoogleGenerativeAI, type Content, type Part } from '@google/generative-ai';
import type { ProviderConfig, ProviderChatParams } from '../types.js';
import type { AIProvider } from './base.js';
import { toProviderMessages, toGeminiTools } from './base.js';
import {
MAX_TOOL_ROUNDS,
executeToolCall,
serializeToolResult,
} from './tool-execution.js';

const DEFAULT_MODEL = 'gemini-2.0-flash';
let toolCallSequence = 0;

function createToolCallId() {
toolCallSequence += 1;
return `gemini_tool_call_${toolCallSequence}`;
}

function toGeminiHistory(params: ProviderChatParams): Content[] {
return toProviderMessages(params.messages)
.slice(0, -1)
.map((message) => ({
role: message.role === 'assistant' ? 'model' : 'user',
parts: [{ text: message.content }],
}));
}

function getLastMessage(params: ProviderChatParams): string {
const lastMessage = toProviderMessages(params.messages).at(-1)?.content ?? '';
if (!lastMessage) {
throw new Error('Cannot send empty message');
}

return lastMessage;
}

export class GeminiProvider implements AIProvider {
name = 'gemini';
Expand All @@ -22,29 +51,52 @@ export class GeminiProvider implements AIProvider {
...(params.tools?.length ? { tools: toGeminiTools(params.tools) } : {}),
});

const messages = toProviderMessages(params.messages);
const history = messages.slice(0, -1).map((m) => ({
role: m.role === 'assistant' ? 'model' : 'user',
parts: [{ text: m.content }],
}));

const chat = model.startChat({
history,
history: toGeminiHistory(params),
generationConfig: {
maxOutputTokens: params.maxTokens ?? 1024,
...(params.temperature != null ? { temperature: params.temperature } : {}),
},
});

const lastMessage = messages[messages.length - 1]?.content ?? '';
if (!lastMessage) throw new Error('Cannot send empty message');
const result = await chat.sendMessageStream(lastMessage);
let toolRounds = 0;
let request: string | Part[] = getLastMessage(params);

while (true) {
const result = await chat.sendMessageStream(request);

for await (const chunk of result.stream) {
const text = chunk.text();
if (text) {
yield text;
}
}

const response = await result.response;
const functionCalls = response.functionCalls() ?? [];
if (functionCalls.length === 0) {
return;
}

for await (const chunk of result.stream) {
const text = chunk.text();
if (text) {
yield text;
if (toolRounds >= MAX_TOOL_ROUNDS) {
throw new Error(`Exceeded maximum tool rounds (${MAX_TOOL_ROUNDS}).`);
}
toolRounds += 1;

request = await Promise.all(functionCalls.map(async (call) => {
const result = await executeToolCall(params, {
id: createToolCallId(),
name: call.name,
input: call.args as Record<string, unknown>,
});

return {
functionResponse: {
name: call.name,
response: serializeToolResult(result),
},
};
}));
}
}

Expand All @@ -55,23 +107,45 @@ export class GeminiProvider implements AIProvider {
...(params.tools?.length ? { tools: toGeminiTools(params.tools) } : {}),
});

const messages = toProviderMessages(params.messages);
const history = messages.slice(0, -1).map((m) => ({
role: m.role === 'assistant' ? 'model' : 'user',
parts: [{ text: m.content }],
}));

const chat = model.startChat({
history,
history: toGeminiHistory(params),
generationConfig: {
maxOutputTokens: params.maxTokens ?? 1024,
...(params.temperature != null ? { temperature: params.temperature } : {}),
},
});

const lastMessage = messages[messages.length - 1]?.content ?? '';
if (!lastMessage) throw new Error('Cannot send empty message');
const result = await chat.sendMessage(lastMessage);
return result.response.text();
let toolRounds = 0;
let request: string | Part[] = getLastMessage(params);

while (true) {
const result = await chat.sendMessage(request);
const response = result.response;
const functionCalls = response.functionCalls() ?? [];

if (functionCalls.length === 0) {
return response.text();
}

if (toolRounds >= MAX_TOOL_ROUNDS) {
throw new Error(`Exceeded maximum tool rounds (${MAX_TOOL_ROUNDS}).`);
}
toolRounds += 1;

request = await Promise.all(functionCalls.map(async (call) => {
const toolResult = await executeToolCall(params, {
id: createToolCallId(),
name: call.name,
input: call.args as Record<string, unknown>,
});

return {
functionResponse: {
name: call.name,
response: serializeToolResult(toolResult),
},
};
}));
}
}
}
Loading
Loading