33import pytest
44from pydantic import BaseModel
55
6- from workflowai .core .domain .run import Run
6+ from workflowai .core .domain .completion import Completion , CompletionUsage , Message
7+ from workflowai .core .domain .run import (
8+ Run ,
9+ _AgentBase , # pyright: ignore [reportPrivateUsage]
10+ )
711from workflowai .core .domain .version import Version
812from workflowai .core .domain .version_properties import VersionProperties
913
@@ -13,8 +17,14 @@ class _TestOutput(BaseModel):
1317
1418
1519@pytest .fixture
16- def run1 () -> Run [_TestOutput ]:
17- return Run [_TestOutput ](
20+ def mock_agent () -> Mock :
21+ mock = Mock (spec = _AgentBase )
22+ return mock
23+
24+
25+ @pytest .fixture
26+ def run1 (mock_agent : Mock ) -> Run [_TestOutput ]:
27+ run = Run [_TestOutput ](
1828 id = "run-id" ,
1929 agent_id = "agent-id" ,
2030 schema_id = 1 ,
@@ -26,6 +36,8 @@ def run1() -> Run[_TestOutput]:
2636 tool_calls = [],
2737 tool_call_requests = [],
2838 )
39+ run ._agent = mock_agent # pyright: ignore [reportPrivateUsage]
40+ return run
2941
3042
3143@pytest .fixture
@@ -128,3 +140,70 @@ class TestRunURL:
128140 @patch ("workflowai.env.WORKFLOWAI_APP_URL" , "https://workflowai.hello" )
129141 def test_run_url (self , run1 : Run [_TestOutput ]):
130142 assert run1 .run_url == "https://workflowai.hello/_/agents/agent-id/runs/run-id"
143+
144+
145+ class TestFetchCompletions :
146+ """Tests for the fetch_completions method of the Run class."""
147+
148+ # Test that the underlying agent is called with the proper run id
149+ async def test_fetch_completions_success (self , run1 : Run [_TestOutput ], mock_agent : Mock ):
150+ mock_agent .fetch_completions .return_value = [
151+ Completion (
152+ messages = [
153+ Message (role = "system" , content = "You are a helpful assistant" ),
154+ Message (role = "user" , content = "Hello" ),
155+ Message (role = "assistant" , content = "Hi there!" ),
156+ ],
157+ response = "Hi there!" ,
158+ usage = CompletionUsage (
159+ completion_token_count = 3 ,
160+ completion_cost_usd = 0.001 ,
161+ reasoning_token_count = 10 ,
162+ prompt_token_count = 20 ,
163+ prompt_token_count_cached = 0 ,
164+ prompt_cost_usd = 0.002 ,
165+ prompt_audio_token_count = 0 ,
166+ prompt_audio_duration_seconds = 0 ,
167+ prompt_image_count = 0 ,
168+ model_context_window_size = 32000 ,
169+ ),
170+ ),
171+ ]
172+
173+ # Call fetch_completions
174+ completions = await run1 .fetch_completions ()
175+
176+ # Verify the API was called correctly
177+ mock_agent .fetch_completions .assert_called_once_with ("run-id" )
178+
179+ # Verify the response
180+ assert len (completions ) == 1
181+ completion = completions [0 ]
182+ assert len (completion .messages ) == 3
183+ assert completion .messages [0 ].role == "system"
184+ assert completion .messages [0 ].content == "You are a helpful assistant"
185+ assert completion .response == "Hi there!"
186+ assert completion .usage .completion_token_count == 3
187+ assert completion .usage .completion_cost_usd == 0.001
188+
189+ # Test that fetch_completions fails appropriately when the agent is not set:
190+ # 1. This is a common error case that occurs when a Run object is created without an agent
191+ # 2. The method should fail fast with a clear error message before attempting any API calls
192+ # 3. This protects users from confusing errors that would occur if we tried to use the API client
193+ async def test_fetch_completions_no_agent (self , run1 : Run [_TestOutput ]):
194+ run1 ._agent = None # pyright: ignore [reportPrivateUsage]
195+ with pytest .raises (ValueError , match = "Agent is not set" ):
196+ await run1 .fetch_completions ()
197+
198+ # Test that fetch_completions fails appropriately when the run ID is not set:
199+ # 1. The run ID is required to construct the API endpoint URL
200+ # 2. Without it, we can't make a valid API request
201+ # 3. This validates that we fail fast with a clear error message
202+ # 4. This should never happen in practice (as Run objects always have an ID),
203+ # but we test it for completeness and to ensure robust error handling
204+ async def test_fetch_completions_no_id (self , run1 : Run [_TestOutput ]):
205+ mock_agent = Mock ()
206+ run1 ._agent = mock_agent # pyright: ignore [reportPrivateUsage]
207+ run1 .id = "" # Empty ID
208+ with pytest .raises (ValueError , match = "Run id is not set" ):
209+ await run1 .fetch_completions ()
0 commit comments