-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathagent.py
More file actions
123 lines (102 loc) · 5.31 KB
/
agent.py
File metadata and controls
123 lines (102 loc) · 5.31 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
"""
UTCP OpenAI Integration Example
This example demonstrates how to:
1. Initialize a UTCP client with tool providers from a config file
2. For each user request, search for relevant tools.
3. Instruct OpenAI to respond with a JSON for a tool call.
4. Parse the JSON and execute the tool call using the UTCP client.
5. Return the results to OpenAI for a final response.
"""
import json
import re
from typing import Dict, Any, List
import openai
from utcp.client.utcp_client import UtcpClient
from utcp.shared.tool import Tool
class Agent:
def __init__(self, utcp_client: UtcpClient, openai_client: openai.AsyncOpenAI):
self.utcp_client = utcp_client
self.openai_client = openai_client
self.conversation_history = []
async def _get_openai_response(self, messages: List[Dict[str, str]]) -> str:
"""Get a response from OpenAI."""
response = await self.openai_client.chat.completions.create(
model="gpt-4o-mini",
messages=messages,
)
return response.choices[0].message.content
def _format_tools_for_prompt(self, tools: List[Tool]) -> str:
"""Convert UTCP tools to a JSON string for the prompt."""
tool_list = []
for tool in tools:
tool_list.append(tool.model_dump())
return json.dumps(tool_list, indent=2)
async def chat(self, user_prompt: str, depth: int = 0):
if depth > 2:
return
current_task = await self._get_openai_response(
self.conversation_history + [
{"role": "user", "content": user_prompt},
{"role": "user", "content": "Based on this message history, what is the current task?\n\n"}
]
)
print("\nSearching for relevant tools for task: " + current_task)
relevant_tools = await self.utcp_client.search_tools(current_task, limit=10)
if relevant_tools:
print(f"Found {len(relevant_tools)} relevant tools.")
for tool in relevant_tools:
print(f"- {tool.name}")
else:
print("No relevant tools found.")
tools_json_string = self._format_tools_for_prompt(relevant_tools)
answer_tool = json.dumps({'tool_name': 'answer', 'arguments': {'text': 'your answer here'}})
system_prompt = (
"You are a helpful assistant. When you need to use a tool, you MUST respond with a JSON object "
"with 'tool_name' and 'arguments' keys. Do not add any other text. The arguments must be a JSON object."
"For example: {\"tool_name\": \"some_tool.name\", \"arguments\": {\"arg1\": \"value1\"}}. "
f"You will receive a list of the most relevant tools, based on the user query."
"If you want to answer the user, you have a special tool called 'answer'."
f"To use it, simply write {answer_tool}."
)
messages = [
{"role": "system", "content": system_prompt},
]
if self.conversation_history:
messages.extend(self.conversation_history)
user_prompt_and_tools = user_prompt + f"\n\nRelevant tools:\n{answer_tool}\n{tools_json_string}"
self.conversation_history.append({"role": "user", "content": user_prompt})
messages.append({"role": "user", "content": user_prompt_and_tools})
print("\nSending request to OpenAI...")
assistant_response = await self._get_openai_response(messages)
self.conversation_history.append({"role": "assistant", "content": assistant_response})
json_match = re.search(r'```json\n({.*?})\n```', assistant_response, re.DOTALL)
if not json_match:
json_match = re.search(r'({.*})', assistant_response, re.DOTALL)
if json_match:
json_string = json_match.group(1)
try:
tool_call_data = json.loads(json_string)
if "tool_name" in tool_call_data and "arguments" in tool_call_data:
tool_name = tool_call_data["tool_name"]
arguments = tool_call_data["arguments"]
if tool_name == "answer":
print(f"\nAssistant: {arguments['text']}")
return
print(f"\nExecuting tool call: {tool_name}")
print(f"Arguments: {json.dumps(arguments, indent=2)}")
try:
result = await self.utcp_client.call_tool(tool_name, arguments)
print(f"Result: {result}")
tool_output = str(result)
except Exception as e:
error_message = f"Error calling {tool_name}: {str(e)}"
print(f"Error: {error_message}")
tool_output = error_message
# Add user prompt and assistant's response to history
await self.chat(f"Explain the tool results to the user in plain text.\nTool results: {tool_output}", depth + 1)
else:
await self.chat(f"Wrong tool call format: {tool_call_data}", depth + 1)
except json.JSONDecodeError as e:
await self.chat(f"JSONDecodeError: {json_string}", depth + 1)
else:
await self.chat(f"No json in agents response: {assistant_response}", depth + 1)