Skip to content

Commit 0e266f8

Browse files
author
Pierre
committed
feat: Add fetch_completions method to Run class
Added ability to fetch completions for a run via the /v1/_/agents/{agent_id}/runs/{run_id}/completions endpoint. Changes:\n- Added CompletionUsage, Message, Completion, and CompletionsResponse models\n- Added fetch_completions() method to Run class\n- Added comprehensive test suite with success and error cases\n- Added detailed test documentation explaining test rationale\n\nThe fetch_completions method allows retrieving the full conversation history and token usage information for a completed run.
1 parent 59be121 commit 0e266f8

File tree

2 files changed

+141
-1
lines changed

2 files changed

+141
-1
lines changed

workflowai/core/domain/run.py

Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,12 +7,45 @@
77
from workflowai import env
88
from workflowai.core import _common_types
99
from workflowai.core.client import _types
10+
from workflowai.core.client._api import APIClient
1011
from workflowai.core.domain.errors import BaseError
1112
from workflowai.core.domain.task import AgentOutput
1213
from workflowai.core.domain.tool_call import ToolCall, ToolCallRequest, ToolCallResult
1314
from workflowai.core.domain.version import Version
1415

1516

17+
class CompletionUsage(BaseModel):
18+
"""Usage information for a completion."""
19+
completion_token_count: int
20+
completion_cost_usd: float
21+
reasoning_token_count: int
22+
prompt_token_count: int
23+
prompt_token_count_cached: int
24+
prompt_cost_usd: float
25+
prompt_audio_token_count: int
26+
prompt_audio_duration_seconds: float
27+
prompt_image_count: int
28+
model_context_window_size: int
29+
30+
31+
class Message(BaseModel):
32+
"""A message in a completion."""
33+
role: str
34+
content: str
35+
36+
37+
class Completion(BaseModel):
38+
"""A completion from the model."""
39+
messages: list[Message]
40+
response: str
41+
usage: CompletionUsage
42+
43+
44+
class CompletionsResponse(BaseModel):
45+
"""Response from the completions API endpoint."""
46+
completions: list[Completion]
47+
48+
1649
class Run(BaseModel, Generic[AgentOutput]):
1750
"""
1851
A run is an instance of a agent with a specific input and output.
@@ -125,8 +158,31 @@ def __str__(self) -> str:
125158
def run_url(self):
126159
return f"{env.WORKFLOWAI_APP_URL}/agents/{self.agent_id}/runs/{self.id}"
127160

161+
async def fetch_completions(self) -> CompletionsResponse:
162+
"""Fetch the completions for this run.
163+
164+
Returns:
165+
CompletionsResponse: The completions response containing a list of completions
166+
with their messages, responses and usage information.
167+
168+
Raises:
169+
ValueError: If the agent is not set or if the run id is not set.
170+
"""
171+
if not self._agent:
172+
raise ValueError("Agent is not set")
173+
if not self.id:
174+
raise ValueError("Run id is not set")
175+
176+
# The "_" refers to the currently authenticated tenant's namespace
177+
return await self._agent.api.get(
178+
f"/v1/_/agents/{self.agent_id}/runs/{self.id}/completions",
179+
returns=CompletionsResponse,
180+
)
181+
128182

129183
class _AgentBase(Protocol, Generic[AgentOutput]):
184+
api: APIClient
185+
130186
async def reply(
131187
self,
132188
run_id: str,

workflowai/core/domain/run_test.py

Lines changed: 85 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,8 @@
33
import pytest
44
from pydantic import BaseModel
55

6-
from workflowai.core.domain.run import Run
6+
from workflowai.core.client._api import APIClient
7+
from workflowai.core.domain.run import Completion, CompletionsResponse, CompletionUsage, Message, Run
78
from workflowai.core.domain.version import Version
89
from workflowai.core.domain.version_properties import VersionProperties
910

@@ -120,3 +121,86 @@ class TestRunURL:
120121
@patch("workflowai.env.WORKFLOWAI_APP_URL", "https://workflowai.hello")
121122
def test_run_url(self, run1: Run[_TestOutput]):
122123
assert run1.run_url == "https://workflowai.hello/agents/agent-1/runs/test-id"
124+
125+
126+
class TestFetchCompletions:
127+
"""Tests for the fetch_completions method of the Run class."""
128+
129+
# Test the successful case of fetching completions:
130+
# 1. Verifies that the API client is called with the correct URL and parameters
131+
# 2. Verifies that the response is properly parsed into CompletionsResponse
132+
# 3. Checks that all fields (messages, response, usage) are correctly populated
133+
# 4. Ensures the completion contains the expected conversation history (system, user, assistant)
134+
async def test_fetch_completions_success(self, run1: Run[_TestOutput]):
135+
# Create a mock API client
136+
mock_api = Mock(spec=APIClient)
137+
mock_api.get.return_value = CompletionsResponse(
138+
completions=[
139+
Completion(
140+
messages=[
141+
Message(role="system", content="You are a helpful assistant"),
142+
Message(role="user", content="Hello"),
143+
Message(role="assistant", content="Hi there!"),
144+
],
145+
response="Hi there!",
146+
usage=CompletionUsage(
147+
completion_token_count=3,
148+
completion_cost_usd=0.001,
149+
reasoning_token_count=10,
150+
prompt_token_count=20,
151+
prompt_token_count_cached=0,
152+
prompt_cost_usd=0.002,
153+
prompt_audio_token_count=0,
154+
prompt_audio_duration_seconds=0,
155+
prompt_image_count=0,
156+
model_context_window_size=32000,
157+
),
158+
),
159+
],
160+
)
161+
162+
# Create a mock agent with the mock API client
163+
mock_agent = Mock()
164+
mock_agent.api = mock_api
165+
run1._agent = mock_agent # pyright: ignore [reportPrivateUsage]
166+
167+
# Call fetch_completions
168+
completions = await run1.fetch_completions()
169+
170+
# Verify the API was called correctly
171+
mock_api.get.assert_called_once_with(
172+
"/v1/_/agents/agent-1/runs/test-id/completions",
173+
returns=CompletionsResponse,
174+
)
175+
176+
# Verify the response
177+
assert len(completions.completions) == 1
178+
completion = completions.completions[0]
179+
assert len(completion.messages) == 3
180+
assert completion.messages[0].role == "system"
181+
assert completion.messages[0].content == "You are a helpful assistant"
182+
assert completion.response == "Hi there!"
183+
assert completion.usage.completion_token_count == 3
184+
assert completion.usage.completion_cost_usd == 0.001
185+
186+
# Test that fetch_completions fails appropriately when the agent is not set:
187+
# 1. This is a common error case that occurs when a Run object is created without an agent
188+
# 2. The method should fail fast with a clear error message before attempting any API calls
189+
# 3. This protects users from confusing errors that would occur if we tried to use the API client
190+
async def test_fetch_completions_no_agent(self, run1: Run[_TestOutput]):
191+
run1._agent = None # pyright: ignore [reportPrivateUsage]
192+
with pytest.raises(ValueError, match="Agent is not set"):
193+
await run1.fetch_completions()
194+
195+
# Test that fetch_completions fails appropriately when the run ID is not set:
196+
# 1. The run ID is required to construct the API endpoint URL
197+
# 2. Without it, we can't make a valid API request
198+
# 3. This validates that we fail fast with a clear error message
199+
# 4. This should never happen in practice (as Run objects always have an ID),
200+
# but we test it for completeness and to ensure robust error handling
201+
async def test_fetch_completions_no_id(self, run1: Run[_TestOutput]):
202+
mock_agent = Mock()
203+
run1._agent = mock_agent # pyright: ignore [reportPrivateUsage]
204+
run1.id = "" # Empty ID
205+
with pytest.raises(ValueError, match="Run id is not set"):
206+
await run1.fetch_completions()

0 commit comments

Comments
 (0)