|
3 | 3 | import pytest |
4 | 4 | from pydantic import BaseModel |
5 | 5 |
|
6 | | -from workflowai.core.domain.run import Run |
| 6 | +from workflowai.core.client._api import APIClient |
| 7 | +from workflowai.core.domain.run import Completion, CompletionsResponse, CompletionUsage, Message, Run |
7 | 8 | from workflowai.core.domain.version import Version |
8 | 9 | from workflowai.core.domain.version_properties import VersionProperties |
9 | 10 |
|
@@ -120,3 +121,86 @@ class TestRunURL: |
120 | 121 | @patch("workflowai.env.WORKFLOWAI_APP_URL", "https://workflowai.hello") |
121 | 122 | def test_run_url(self, run1: Run[_TestOutput]): |
122 | 123 | assert run1.run_url == "https://workflowai.hello/agents/agent-1/runs/test-id" |
| 124 | + |
| 125 | + |
| 126 | +class TestFetchCompletions: |
| 127 | + """Tests for the fetch_completions method of the Run class.""" |
| 128 | + |
| 129 | + # Test the successful case of fetching completions: |
| 130 | + # 1. Verifies that the API client is called with the correct URL and parameters |
| 131 | + # 2. Verifies that the response is properly parsed into CompletionsResponse |
| 132 | + # 3. Checks that all fields (messages, response, usage) are correctly populated |
| 133 | + # 4. Ensures the completion contains the expected conversation history (system, user, assistant) |
| 134 | + async def test_fetch_completions_success(self, run1: Run[_TestOutput]): |
| 135 | + # Create a mock API client |
| 136 | + mock_api = Mock(spec=APIClient) |
| 137 | + mock_api.get.return_value = CompletionsResponse( |
| 138 | + completions=[ |
| 139 | + Completion( |
| 140 | + messages=[ |
| 141 | + Message(role="system", content="You are a helpful assistant"), |
| 142 | + Message(role="user", content="Hello"), |
| 143 | + Message(role="assistant", content="Hi there!"), |
| 144 | + ], |
| 145 | + response="Hi there!", |
| 146 | + usage=CompletionUsage( |
| 147 | + completion_token_count=3, |
| 148 | + completion_cost_usd=0.001, |
| 149 | + reasoning_token_count=10, |
| 150 | + prompt_token_count=20, |
| 151 | + prompt_token_count_cached=0, |
| 152 | + prompt_cost_usd=0.002, |
| 153 | + prompt_audio_token_count=0, |
| 154 | + prompt_audio_duration_seconds=0, |
| 155 | + prompt_image_count=0, |
| 156 | + model_context_window_size=32000, |
| 157 | + ), |
| 158 | + ), |
| 159 | + ], |
| 160 | + ) |
| 161 | + |
| 162 | + # Create a mock agent with the mock API client |
| 163 | + mock_agent = Mock() |
| 164 | + mock_agent.api = mock_api |
| 165 | + run1._agent = mock_agent # pyright: ignore [reportPrivateUsage] |
| 166 | + |
| 167 | + # Call fetch_completions |
| 168 | + completions = await run1.fetch_completions() |
| 169 | + |
| 170 | + # Verify the API was called correctly |
| 171 | + mock_api.get.assert_called_once_with( |
| 172 | + "/v1/_/agents/agent-1/runs/test-id/completions", |
| 173 | + returns=CompletionsResponse, |
| 174 | + ) |
| 175 | + |
| 176 | + # Verify the response |
| 177 | + assert len(completions.completions) == 1 |
| 178 | + completion = completions.completions[0] |
| 179 | + assert len(completion.messages) == 3 |
| 180 | + assert completion.messages[0].role == "system" |
| 181 | + assert completion.messages[0].content == "You are a helpful assistant" |
| 182 | + assert completion.response == "Hi there!" |
| 183 | + assert completion.usage.completion_token_count == 3 |
| 184 | + assert completion.usage.completion_cost_usd == 0.001 |
| 185 | + |
| 186 | + # Test that fetch_completions fails appropriately when the agent is not set: |
| 187 | + # 1. This is a common error case that occurs when a Run object is created without an agent |
| 188 | + # 2. The method should fail fast with a clear error message before attempting any API calls |
| 189 | + # 3. This protects users from confusing errors that would occur if we tried to use the API client |
| 190 | + async def test_fetch_completions_no_agent(self, run1: Run[_TestOutput]): |
| 191 | + run1._agent = None # pyright: ignore [reportPrivateUsage] |
| 192 | + with pytest.raises(ValueError, match="Agent is not set"): |
| 193 | + await run1.fetch_completions() |
| 194 | + |
| 195 | + # Test that fetch_completions fails appropriately when the run ID is not set: |
| 196 | + # 1. The run ID is required to construct the API endpoint URL |
| 197 | + # 2. Without it, we can't make a valid API request |
| 198 | + # 3. This validates that we fail fast with a clear error message |
| 199 | + # 4. This should never happen in practice (as Run objects always have an ID), |
| 200 | + # but we test it for completeness and to ensure robust error handling |
| 201 | + async def test_fetch_completions_no_id(self, run1: Run[_TestOutput]): |
| 202 | + mock_agent = Mock() |
| 203 | + run1._agent = mock_agent # pyright: ignore [reportPrivateUsage] |
| 204 | + run1.id = "" # Empty ID |
| 205 | + with pytest.raises(ValueError, match="Run id is not set"): |
| 206 | + await run1.fetch_completions() |
0 commit comments