diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index c58a8e4..8e8c806 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -19,7 +19,7 @@ jobs:
timeout-minutes: 10
name: lint
runs-on: ${{ github.repository == 'stainless-sdks/runwayml-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }}
- if: github.event_name == 'push' || github.event.pull_request.head.repo.fork
+ if: (github.event_name == 'push' || github.event.pull_request.head.repo.fork) && (github.event_name != 'push' || github.event.head_commit.message != 'codegen metadata')
steps:
- uses: actions/checkout@v6
@@ -38,7 +38,7 @@ jobs:
run: ./scripts/lint
build:
- if: github.event_name == 'push' || github.event.pull_request.head.repo.fork
+ if: (github.event_name == 'push' || github.event.pull_request.head.repo.fork) && (github.event_name != 'push' || github.event.head_commit.message != 'codegen metadata')
timeout-minutes: 10
name: build
permissions:
diff --git a/.gitignore b/.gitignore
index 95ceb18..3824f4c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,4 +1,5 @@
.prism.log
+.stdy.log
_dev
__pycache__
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 90eeef6..6b46767 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "4.8.0"
+ ".": "4.9.0"
}
\ No newline at end of file
diff --git a/.stats.yml b/.stats.yml
index c1bbd75..257316b 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 32
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/runwayml%2Frunwayml-618b98a98e5da6561ad0483de2b77dd74712efbb9104b81c48d66942ce03c387.yml
-openapi_spec_hash: d2d504e004bd96cea746569a487446b2
-config_hash: e8ee175cde2895c3897be991e3bb3ecb
+configured_endpoints: 36
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/runwayml%2Frunwayml-80f24c85d40f5f07c19ce2f7bda8e87218a30efaf6611262d490d431b1efcfbb.yml
+openapi_spec_hash: 8aa96c2e29a8dece4f244069ebbcea76
+config_hash: 8e05a8613b4c0e602d485566da4e5264
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 59f37e9..3cf6528 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,35 @@
# Changelog
+## 4.9.0 (2026-03-25)
+
+Full Changelog: [v4.8.0...v4.9.0](https://github.com/runwayml/sdk-python/compare/v4.8.0...v4.9.0)
+
+### Features
+
+* **api:** add workflow invocation polling support ([6eb6fc5](https://github.com/runwayml/sdk-python/commit/6eb6fc507c6aae61ef3bcb5cceaee1b5049c63f3))
+* **api:** add workflows and workflow invocations resources ([35d8573](https://github.com/runwayml/sdk-python/commit/35d857393bdac4aa3862dd5f0b91f23ad217fce5))
+
+
+### Bug Fixes
+
+* sanitize endpoint path params ([84efc12](https://github.com/runwayml/sdk-python/commit/84efc1283d565f857572bb99a0458bd3c4e78e2b))
+
+
+### Chores
+
+* **ci:** skip lint on metadata-only changes ([1899411](https://github.com/runwayml/sdk-python/commit/1899411b75920727e70ab42e77b3c5cb11a86fab))
+* **internal:** update gitignore ([8bdac54](https://github.com/runwayml/sdk-python/commit/8bdac549eec0ce17ca630553f1ca956614d076d0))
+* **tests:** bump steady to v0.19.4 ([8841072](https://github.com/runwayml/sdk-python/commit/884107238b269129227807a141ef05dc5b0ca994))
+* **tests:** bump steady to v0.19.5 ([5adc696](https://github.com/runwayml/sdk-python/commit/5adc696396fe8031562bc7ff4a57dae92edf5d9c))
+* **tests:** bump steady to v0.19.6 ([135a06b](https://github.com/runwayml/sdk-python/commit/135a06b19e761316826b966f9c977268b7058b28))
+* **tests:** bump steady to v0.19.7 ([b7eeb81](https://github.com/runwayml/sdk-python/commit/b7eeb8168582d3b3682d6b61fa5aa5adc377c656))
+
+
+### Refactors
+
+* introduce dedicated error types for workflow invocation polling ([420f91c](https://github.com/runwayml/sdk-python/commit/420f91cda06553beba5d075aa951b2f505f68b9c))
+* **tests:** switch from prism to steady ([0a1bb5a](https://github.com/runwayml/sdk-python/commit/0a1bb5ae959de737e80775a09e45c24081729bf2))
+
## 4.8.0 (2026-03-18)
Full Changelog: [v4.7.1...v4.8.0](https://github.com/runwayml/sdk-python/compare/v4.7.1...v4.8.0)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 2fb1fdf..773a99e 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -85,7 +85,7 @@ $ pip install ./path-to-wheel-file.whl
## Running tests
-Most tests require you to [set up a mock server](https://github.com/stoplightio/prism) against the OpenAPI spec to run the tests.
+Most tests require you to [set up a mock server](https://github.com/dgellow/steady) against the OpenAPI spec to run the tests.
```sh
$ ./scripts/mock
diff --git a/api.md b/api.md
index 46ed157..0697df7 100644
--- a/api.md
+++ b/api.md
@@ -215,3 +215,29 @@ Methods:
- client.voices.list(\*\*params) -> SyncCursorPage[VoiceListResponse]
- client.voices.delete(id) -> None
- client.voices.preview(\*\*params) -> VoicePreviewResponse
+
+# Workflows
+
+Types:
+
+```python
+from runwayml.types import WorkflowRetrieveResponse, WorkflowListResponse, WorkflowRunResponse
+```
+
+Methods:
+
+- client.workflows.retrieve(id) -> WorkflowRetrieveResponse
+- client.workflows.list() -> WorkflowListResponse
+- client.workflows.run(id, \*\*params) -> WorkflowRunResponse
+
+# WorkflowInvocations
+
+Types:
+
+```python
+from runwayml.types import WorkflowInvocationRetrieveResponse
+```
+
+Methods:
+
+- client.workflow_invocations.retrieve(id) -> WorkflowInvocationRetrieveResponse
diff --git a/pyproject.toml b/pyproject.toml
index f544444..249574e 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "runwayml"
-version = "4.8.0"
+version = "4.9.0"
description = "The official Python library for the runwayml API"
dynamic = ["readme"]
license = "Apache-2.0"
diff --git a/scripts/mock b/scripts/mock
index bcf3b39..09eb49f 100755
--- a/scripts/mock
+++ b/scripts/mock
@@ -19,34 +19,34 @@ fi
echo "==> Starting mock server with URL ${URL}"
-# Run prism mock on the given spec
+# Run steady mock on the given spec
if [ "$1" == "--daemon" ]; then
# Pre-install the package so the download doesn't eat into the startup timeout
- npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism --version
+ npm exec --package=@stdy/cli@0.19.7 -- steady --version
- npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock "$URL" &> .prism.log &
+ npm exec --package=@stdy/cli@0.19.7 -- steady --host 127.0.0.1 -p 4010 --validator-form-array-format=comma --validator-query-array-format=comma --validator-form-object-format=brackets --validator-query-object-format=brackets "$URL" &> .stdy.log &
- # Wait for server to come online (max 30s)
+ # Wait for server to come online via health endpoint (max 30s)
echo -n "Waiting for server"
attempts=0
- while ! grep -q "✖ fatal\|Prism is listening" ".prism.log" ; do
+ while ! curl --silent --fail "http://127.0.0.1:4010/_x-steady/health" >/dev/null 2>&1; do
+ if ! kill -0 $! 2>/dev/null; then
+ echo
+ cat .stdy.log
+ exit 1
+ fi
attempts=$((attempts + 1))
if [ "$attempts" -ge 300 ]; then
echo
- echo "Timed out waiting for Prism server to start"
- cat .prism.log
+ echo "Timed out waiting for Steady server to start"
+ cat .stdy.log
exit 1
fi
echo -n "."
sleep 0.1
done
- if grep -q "✖ fatal" ".prism.log"; then
- cat .prism.log
- exit 1
- fi
-
echo
else
- npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock "$URL"
+ npm exec --package=@stdy/cli@0.19.7 -- steady --host 127.0.0.1 -p 4010 --validator-form-array-format=comma --validator-query-array-format=comma --validator-form-object-format=brackets --validator-query-object-format=brackets "$URL"
fi
diff --git a/scripts/test b/scripts/test
index dbeda2d..e46b9b5 100755
--- a/scripts/test
+++ b/scripts/test
@@ -9,8 +9,8 @@ GREEN='\033[0;32m'
YELLOW='\033[0;33m'
NC='\033[0m' # No Color
-function prism_is_running() {
- curl --silent "http://localhost:4010" >/dev/null 2>&1
+function steady_is_running() {
+ curl --silent "http://127.0.0.1:4010/_x-steady/health" >/dev/null 2>&1
}
kill_server_on_port() {
@@ -25,7 +25,7 @@ function is_overriding_api_base_url() {
[ -n "$TEST_API_BASE_URL" ]
}
-if ! is_overriding_api_base_url && ! prism_is_running ; then
+if ! is_overriding_api_base_url && ! steady_is_running ; then
# When we exit this script, make sure to kill the background mock server process
trap 'kill_server_on_port 4010' EXIT
@@ -36,19 +36,19 @@ fi
if is_overriding_api_base_url ; then
echo -e "${GREEN}✔ Running tests against ${TEST_API_BASE_URL}${NC}"
echo
-elif ! prism_is_running ; then
- echo -e "${RED}ERROR:${NC} The test suite will not run without a mock Prism server"
+elif ! steady_is_running ; then
+ echo -e "${RED}ERROR:${NC} The test suite will not run without a mock Steady server"
echo -e "running against your OpenAPI spec."
echo
echo -e "To run the server, pass in the path or url of your OpenAPI"
- echo -e "spec to the prism command:"
+ echo -e "spec to the steady command:"
echo
- echo -e " \$ ${YELLOW}npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock path/to/your.openapi.yml${NC}"
+ echo -e " \$ ${YELLOW}npm exec --package=@stdy/cli@0.19.7 -- steady path/to/your.openapi.yml --host 127.0.0.1 -p 4010 --validator-form-array-format=comma --validator-query-array-format=comma --validator-form-object-format=brackets --validator-query-object-format=brackets${NC}"
echo
exit 1
else
- echo -e "${GREEN}✔ Mock prism server is running with your OpenAPI spec${NC}"
+ echo -e "${GREEN}✔ Mock steady server is running with your OpenAPI spec${NC}"
echo
fi
diff --git a/src/runwayml/__init__.py b/src/runwayml/__init__.py
index 6fd5319..80eec83 100644
--- a/src/runwayml/__init__.py
+++ b/src/runwayml/__init__.py
@@ -36,7 +36,12 @@
UnprocessableEntityError,
APIResponseValidationError,
)
-from .lib.polling import TaskFailedError, TaskTimeoutError
+from .lib.polling import (
+ TaskFailedError,
+ TaskTimeoutError,
+ WorkflowInvocationFailedError,
+ WorkflowInvocationTimeoutError,
+)
from ._base_client import DefaultHttpxClient, DefaultAioHttpClient, DefaultAsyncHttpxClient
from ._utils._logs import setup_logging as _setup_logging
@@ -84,6 +89,8 @@
"DefaultAioHttpClient",
"TaskFailedError",
"TaskTimeoutError",
+ "WorkflowInvocationFailedError",
+ "WorkflowInvocationTimeoutError",
]
if not _t.TYPE_CHECKING:
diff --git a/src/runwayml/_client.py b/src/runwayml/_client.py
index 4bee591..898ef6e 100644
--- a/src/runwayml/_client.py
+++ b/src/runwayml/_client.py
@@ -37,6 +37,7 @@
avatars,
uploads,
documents,
+ workflows,
organization,
sound_effect,
text_to_image,
@@ -48,6 +49,7 @@
voice_isolation,
speech_to_speech,
realtime_sessions,
+ workflow_invocations,
character_performance,
)
from .resources.tasks import TasksResource, AsyncTasksResource
@@ -55,6 +57,7 @@
from .resources.avatars import AvatarsResource, AsyncAvatarsResource
from .resources.uploads import UploadsResource, AsyncUploadsResource
from .resources.documents import DocumentsResource, AsyncDocumentsResource
+ from .resources.workflows import WorkflowsResource, AsyncWorkflowsResource
from .resources.organization import OrganizationResource, AsyncOrganizationResource
from .resources.sound_effect import SoundEffectResource, AsyncSoundEffectResource
from .resources.text_to_image import TextToImageResource, AsyncTextToImageResource
@@ -66,6 +69,7 @@
from .resources.voice_isolation import VoiceIsolationResource, AsyncVoiceIsolationResource
from .resources.speech_to_speech import SpeechToSpeechResource, AsyncSpeechToSpeechResource
from .resources.realtime_sessions import RealtimeSessionsResource, AsyncRealtimeSessionsResource
+ from .resources.workflow_invocations import WorkflowInvocationsResource, AsyncWorkflowInvocationsResource
from .resources.character_performance import CharacterPerformanceResource, AsyncCharacterPerformanceResource
__all__ = [
@@ -254,6 +258,18 @@ def voices(self) -> VoicesResource:
return VoicesResource(self)
+ @cached_property
+ def workflows(self) -> WorkflowsResource:
+ from .resources.workflows import WorkflowsResource
+
+ return WorkflowsResource(self)
+
+ @cached_property
+ def workflow_invocations(self) -> WorkflowInvocationsResource:
+ from .resources.workflow_invocations import WorkflowInvocationsResource
+
+ return WorkflowInvocationsResource(self)
+
@cached_property
def with_raw_response(self) -> RunwayMLWithRawResponse:
return RunwayMLWithRawResponse(self)
@@ -544,6 +560,18 @@ def voices(self) -> AsyncVoicesResource:
return AsyncVoicesResource(self)
+ @cached_property
+ def workflows(self) -> AsyncWorkflowsResource:
+ from .resources.workflows import AsyncWorkflowsResource
+
+ return AsyncWorkflowsResource(self)
+
+ @cached_property
+ def workflow_invocations(self) -> AsyncWorkflowInvocationsResource:
+ from .resources.workflow_invocations import AsyncWorkflowInvocationsResource
+
+ return AsyncWorkflowInvocationsResource(self)
+
@cached_property
def with_raw_response(self) -> AsyncRunwayMLWithRawResponse:
return AsyncRunwayMLWithRawResponse(self)
@@ -779,6 +807,18 @@ def voices(self) -> voices.VoicesResourceWithRawResponse:
return VoicesResourceWithRawResponse(self._client.voices)
+ @cached_property
+ def workflows(self) -> workflows.WorkflowsResourceWithRawResponse:
+ from .resources.workflows import WorkflowsResourceWithRawResponse
+
+ return WorkflowsResourceWithRawResponse(self._client.workflows)
+
+ @cached_property
+ def workflow_invocations(self) -> workflow_invocations.WorkflowInvocationsResourceWithRawResponse:
+ from .resources.workflow_invocations import WorkflowInvocationsResourceWithRawResponse
+
+ return WorkflowInvocationsResourceWithRawResponse(self._client.workflow_invocations)
+
class AsyncRunwayMLWithRawResponse:
_client: AsyncRunwayML
@@ -899,6 +939,18 @@ def voices(self) -> voices.AsyncVoicesResourceWithRawResponse:
return AsyncVoicesResourceWithRawResponse(self._client.voices)
+ @cached_property
+ def workflows(self) -> workflows.AsyncWorkflowsResourceWithRawResponse:
+ from .resources.workflows import AsyncWorkflowsResourceWithRawResponse
+
+ return AsyncWorkflowsResourceWithRawResponse(self._client.workflows)
+
+ @cached_property
+ def workflow_invocations(self) -> workflow_invocations.AsyncWorkflowInvocationsResourceWithRawResponse:
+ from .resources.workflow_invocations import AsyncWorkflowInvocationsResourceWithRawResponse
+
+ return AsyncWorkflowInvocationsResourceWithRawResponse(self._client.workflow_invocations)
+
class RunwayMLWithStreamedResponse:
_client: RunwayML
@@ -1019,6 +1071,18 @@ def voices(self) -> voices.VoicesResourceWithStreamingResponse:
return VoicesResourceWithStreamingResponse(self._client.voices)
+ @cached_property
+ def workflows(self) -> workflows.WorkflowsResourceWithStreamingResponse:
+ from .resources.workflows import WorkflowsResourceWithStreamingResponse
+
+ return WorkflowsResourceWithStreamingResponse(self._client.workflows)
+
+ @cached_property
+ def workflow_invocations(self) -> workflow_invocations.WorkflowInvocationsResourceWithStreamingResponse:
+ from .resources.workflow_invocations import WorkflowInvocationsResourceWithStreamingResponse
+
+ return WorkflowInvocationsResourceWithStreamingResponse(self._client.workflow_invocations)
+
class AsyncRunwayMLWithStreamedResponse:
_client: AsyncRunwayML
@@ -1139,6 +1203,18 @@ def voices(self) -> voices.AsyncVoicesResourceWithStreamingResponse:
return AsyncVoicesResourceWithStreamingResponse(self._client.voices)
+ @cached_property
+ def workflows(self) -> workflows.AsyncWorkflowsResourceWithStreamingResponse:
+ from .resources.workflows import AsyncWorkflowsResourceWithStreamingResponse
+
+ return AsyncWorkflowsResourceWithStreamingResponse(self._client.workflows)
+
+ @cached_property
+ def workflow_invocations(self) -> workflow_invocations.AsyncWorkflowInvocationsResourceWithStreamingResponse:
+ from .resources.workflow_invocations import AsyncWorkflowInvocationsResourceWithStreamingResponse
+
+ return AsyncWorkflowInvocationsResourceWithStreamingResponse(self._client.workflow_invocations)
+
Client = RunwayML
diff --git a/src/runwayml/_utils/__init__.py b/src/runwayml/_utils/__init__.py
index dc64e29..10cb66d 100644
--- a/src/runwayml/_utils/__init__.py
+++ b/src/runwayml/_utils/__init__.py
@@ -1,3 +1,4 @@
+from ._path import path_template as path_template
from ._sync import asyncify as asyncify
from ._proxy import LazyProxy as LazyProxy
from ._utils import (
diff --git a/src/runwayml/_utils/_path.py b/src/runwayml/_utils/_path.py
new file mode 100644
index 0000000..4d6e1e4
--- /dev/null
+++ b/src/runwayml/_utils/_path.py
@@ -0,0 +1,127 @@
+from __future__ import annotations
+
+import re
+from typing import (
+ Any,
+ Mapping,
+ Callable,
+)
+from urllib.parse import quote
+
+# Matches '.' or '..' where each dot is either literal or percent-encoded (%2e / %2E).
+_DOT_SEGMENT_RE = re.compile(r"^(?:\.|%2[eE]){1,2}$")
+
+_PLACEHOLDER_RE = re.compile(r"\{(\w+)\}")
+
+
+def _quote_path_segment_part(value: str) -> str:
+ """Percent-encode `value` for use in a URI path segment.
+
+ Considers characters not in `pchar` set from RFC 3986 §3.3 to be unsafe.
+ https://datatracker.ietf.org/doc/html/rfc3986#section-3.3
+ """
+ # quote() already treats unreserved characters (letters, digits, and -._~)
+ # as safe, so we only need to add sub-delims, ':', and '@'.
+ # Notably, unlike the default `safe` for quote(), / is unsafe and must be quoted.
+ return quote(value, safe="!$&'()*+,;=:@")
+
+
+def _quote_query_part(value: str) -> str:
+ """Percent-encode `value` for use in a URI query string.
+
+ Considers &, = and characters not in `query` set from RFC 3986 §3.4 to be unsafe.
+ https://datatracker.ietf.org/doc/html/rfc3986#section-3.4
+ """
+ return quote(value, safe="!$'()*+,;:@/?")
+
+
+def _quote_fragment_part(value: str) -> str:
+ """Percent-encode `value` for use in a URI fragment.
+
+ Considers characters not in `fragment` set from RFC 3986 §3.5 to be unsafe.
+ https://datatracker.ietf.org/doc/html/rfc3986#section-3.5
+ """
+ return quote(value, safe="!$&'()*+,;=:@/?")
+
+
+def _interpolate(
+ template: str,
+ values: Mapping[str, Any],
+ quoter: Callable[[str], str],
+) -> str:
+ """Replace {name} placeholders in `template`, quoting each value with `quoter`.
+
+ Placeholder names are looked up in `values`.
+
+ Raises:
+ KeyError: If a placeholder is not found in `values`.
+ """
+ # re.split with a capturing group returns alternating
+ # [text, name, text, name, ..., text] elements.
+ parts = _PLACEHOLDER_RE.split(template)
+
+ for i in range(1, len(parts), 2):
+ name = parts[i]
+ if name not in values:
+ raise KeyError(f"a value for placeholder {{{name}}} was not provided")
+ val = values[name]
+ if val is None:
+ parts[i] = "null"
+ elif isinstance(val, bool):
+ parts[i] = "true" if val else "false"
+ else:
+ parts[i] = quoter(str(values[name]))
+
+ return "".join(parts)
+
+
+def path_template(template: str, /, **kwargs: Any) -> str:
+ """Interpolate {name} placeholders in `template` from keyword arguments.
+
+ Args:
+ template: The template string containing {name} placeholders.
+ **kwargs: Keyword arguments to interpolate into the template.
+
+ Returns:
+ The template with placeholders interpolated and percent-encoded.
+
+ Safe characters for percent-encoding are dependent on the URI component.
+ Placeholders in path and fragment portions are percent-encoded where the `segment`
+ and `fragment` sets from RFC 3986 respectively are considered safe.
+ Placeholders in the query portion are percent-encoded where the `query` set from
+ RFC 3986 §3.3 is considered safe except for = and & characters.
+
+ Raises:
+ KeyError: If a placeholder is not found in `kwargs`.
+ ValueError: If resulting path contains /./ or /../ segments (including percent-encoded dot-segments).
+ """
+ # Split the template into path, query, and fragment portions.
+ fragment_template: str | None = None
+ query_template: str | None = None
+
+ rest = template
+ if "#" in rest:
+ rest, fragment_template = rest.split("#", 1)
+ if "?" in rest:
+ rest, query_template = rest.split("?", 1)
+ path_template = rest
+
+ # Interpolate each portion with the appropriate quoting rules.
+ path_result = _interpolate(path_template, kwargs, _quote_path_segment_part)
+
+ # Reject dot-segments (. and ..) in the final assembled path. The check
+ # runs after interpolation so that adjacent placeholders or a mix of static
+ # text and placeholders that together form a dot-segment are caught.
+ # Also reject percent-encoded dot-segments to protect against incorrectly
+ # implemented normalization in servers/proxies.
+ for segment in path_result.split("/"):
+ if _DOT_SEGMENT_RE.match(segment):
+ raise ValueError(f"Constructed path {path_result!r} contains dot-segment {segment!r} which is not allowed")
+
+ result = path_result
+ if query_template is not None:
+ result += "?" + _interpolate(query_template, kwargs, _quote_query_part)
+ if fragment_template is not None:
+ result += "#" + _interpolate(fragment_template, kwargs, _quote_fragment_part)
+
+ return result
diff --git a/src/runwayml/_version.py b/src/runwayml/_version.py
index 641fea6..4e4cfdc 100644
--- a/src/runwayml/_version.py
+++ b/src/runwayml/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "runwayml"
-__version__ = "4.8.0" # x-release-please-version
+__version__ = "4.9.0" # x-release-please-version
diff --git a/src/runwayml/lib/polling.py b/src/runwayml/lib/polling.py
index b3b8750..4c1f737 100644
--- a/src/runwayml/lib/polling.py
+++ b/src/runwayml/lib/polling.py
@@ -17,6 +17,15 @@
Throttled,
TaskRetrieveResponse,
)
+from ..types.workflow_invocation_retrieve_response import (
+ Failed as WIFailed,
+ Pending as WIPending,
+ Running as WIRunning,
+ Cancelled as WICancelled,
+ Succeeded as WISucceeded,
+ Throttled as WIThrottled,
+ WorkflowInvocationRetrieveResponse,
+)
if TYPE_CHECKING:
from .._client import RunwayML, AsyncRunwayML
@@ -244,3 +253,186 @@ def inject_async_wait_method(client: "AsyncRunwayML", response: T) -> T:
response.wait_for_task_output = types.MethodType(_make_async_wait_for_task_output(client), response) # type: ignore[attr-defined]
return response
+
+
+# ---------------------------------------------------------------------------
+# Workflow Invocation polling
+# ---------------------------------------------------------------------------
+
+
+class AwaitableWorkflowInvocationResponseMixin:
+ def wait_for_task_output(self, timeout: Union[float, None] = 60 * 10) -> WorkflowInvocationRetrieveResponse: # type: ignore[empty-body]
+ """
+ When called, this will block until the workflow invocation is complete.
+
+ If the invocation fails or is cancelled, a `WorkflowInvocationFailedError`
+ will be raised.
+
+ Args:
+ timeout: The maximum amount of time to wait in seconds. If not specified,
+ the default timeout is 10 minutes. Will raise a
+ `WorkflowInvocationTimeoutError` if the invocation does not complete
+ within the timeout.
+
+ Returns:
+ The workflow invocation details, equivalent to calling
+ `client.workflow_invocations.retrieve(id)`.
+ """
+ ...
+
+
+class AsyncAwaitableWorkflowInvocationResponseMixin:
+ async def wait_for_task_output(self, timeout: Union[float, None] = 60 * 10) -> WorkflowInvocationRetrieveResponse: # type: ignore[empty-body]
+ """
+ When called, this will wait until the workflow invocation is complete.
+
+ If the invocation fails or is cancelled, a `WorkflowInvocationFailedError`
+ will be raised.
+
+ Args:
+ timeout: The maximum amount of time to wait in seconds. If not specified,
+ the default timeout is 10 minutes. Will raise a
+ `WorkflowInvocationTimeoutError` if the invocation does not complete
+ within the timeout. Setting this to `None` will wait indefinitely
+ (disabling the timeout).
+
+ Returns:
+ The workflow invocation details, equivalent to awaiting
+ `client.workflow_invocations.retrieve(id)`.
+ """
+ ...
+
+
+class AwaitableWIPending(AwaitableWorkflowInvocationResponseMixin, WIPending): ...
+
+
+class AwaitableWIThrottled(AwaitableWorkflowInvocationResponseMixin, WIThrottled): ...
+
+
+class AwaitableWICancelled(AwaitableWorkflowInvocationResponseMixin, WICancelled): ...
+
+
+class AwaitableWIRunning(AwaitableWorkflowInvocationResponseMixin, WIRunning): ...
+
+
+class AwaitableWIFailed(AwaitableWorkflowInvocationResponseMixin, WIFailed): ...
+
+
+class AwaitableWISucceeded(AwaitableWorkflowInvocationResponseMixin, WISucceeded): ...
+
+
+AwaitableWorkflowInvocationRetrieveResponse: TypeAlias = Annotated[
+ Union[
+ AwaitableWIPending,
+ AwaitableWIThrottled,
+ AwaitableWICancelled,
+ AwaitableWIRunning,
+ AwaitableWIFailed,
+ AwaitableWISucceeded,
+ ],
+ PropertyInfo(discriminator="status"),
+]
+
+
+class AsyncAwaitableWIPending(AsyncAwaitableWorkflowInvocationResponseMixin, WIPending): ...
+
+
+class AsyncAwaitableWIThrottled(AsyncAwaitableWorkflowInvocationResponseMixin, WIThrottled): ...
+
+
+class AsyncAwaitableWICancelled(AsyncAwaitableWorkflowInvocationResponseMixin, WICancelled): ...
+
+
+class AsyncAwaitableWIRunning(AsyncAwaitableWorkflowInvocationResponseMixin, WIRunning): ...
+
+
+class AsyncAwaitableWIFailed(AsyncAwaitableWorkflowInvocationResponseMixin, WIFailed): ...
+
+
+class AsyncAwaitableWISucceeded(AsyncAwaitableWorkflowInvocationResponseMixin, WISucceeded): ...
+
+
+AsyncAwaitableWorkflowInvocationRetrieveResponse: TypeAlias = Annotated[
+ Union[
+ AsyncAwaitableWIPending,
+ AsyncAwaitableWIThrottled,
+ AsyncAwaitableWICancelled,
+ AsyncAwaitableWIRunning,
+ AsyncAwaitableWIFailed,
+ AsyncAwaitableWISucceeded,
+ ],
+ PropertyInfo(discriminator="status"),
+]
+
+
+class WorkflowInvocationFailedError(Exception):
+ def __init__(self, invocation_details: WorkflowInvocationRetrieveResponse):
+ self.invocation_details = invocation_details
+ super().__init__("Workflow invocation failed")
+
+
+class WorkflowInvocationTimeoutError(Exception):
+ def __init__(self, invocation_details: WorkflowInvocationRetrieveResponse):
+ self.invocation_details = invocation_details
+ super().__init__("Workflow invocation timed out")
+
+
+def _make_sync_wait_for_workflow_invocation_output(
+ client: "RunwayML",
+) -> Callable[["AwaitableWorkflowInvocationResponseMixin", Union[float, None]], WorkflowInvocationRetrieveResponse]:
+ def wait_for_task_output(
+ self: "AwaitableWorkflowInvocationResponseMixin", timeout: Union[float, None] = 60 * 10
+ ) -> WorkflowInvocationRetrieveResponse:
+ start_time = time.time()
+ while True:
+ time.sleep(POLL_TIME + random.random() * POLL_JITTER - POLL_JITTER / 2)
+ details = client.workflow_invocations.retrieve(self.id) # type: ignore[attr-defined]
+ if details.status == "SUCCEEDED":
+ return details
+ if details.status == "FAILED" or details.status == "CANCELLED":
+ raise WorkflowInvocationFailedError(details)
+ if timeout is not None and time.time() - start_time > timeout:
+ raise WorkflowInvocationTimeoutError(details)
+
+ return wait_for_task_output
+
+
+def inject_sync_workflow_invocation_wait_method(client: "RunwayML", response: T) -> T:
+ import types
+
+ response.wait_for_task_output = types.MethodType( # type: ignore[attr-defined]
+ _make_sync_wait_for_workflow_invocation_output(client), response
+ )
+ return response
+
+
+def _make_async_wait_for_workflow_invocation_output(
+ client: "AsyncRunwayML",
+) -> Callable[
+ ["AsyncAwaitableWorkflowInvocationResponseMixin", Union[float, None]],
+ Coroutine[None, None, WorkflowInvocationRetrieveResponse],
+]:
+ async def wait_for_task_output(
+ self: "AsyncAwaitableWorkflowInvocationResponseMixin", timeout: Union[float, None] = 60 * 10
+ ) -> WorkflowInvocationRetrieveResponse:
+ start_time = anyio.current_time()
+ while True:
+ await anyio.sleep(POLL_TIME + random.random() * POLL_JITTER - POLL_JITTER / 2)
+ details = await client.workflow_invocations.retrieve(self.id) # type: ignore[attr-defined]
+ if details.status == "SUCCEEDED":
+ return details
+ if details.status == "FAILED" or details.status == "CANCELLED":
+ raise WorkflowInvocationFailedError(details)
+ if timeout is not None and anyio.current_time() - start_time > timeout:
+ raise WorkflowInvocationTimeoutError(details)
+
+ return wait_for_task_output
+
+
+def inject_async_workflow_invocation_wait_method(client: "AsyncRunwayML", response: T) -> T:
+ import types
+
+ response.wait_for_task_output = types.MethodType( # type: ignore[attr-defined]
+ _make_async_wait_for_workflow_invocation_output(client), response
+ )
+ return response
diff --git a/src/runwayml/resources/__init__.py b/src/runwayml/resources/__init__.py
index cd9219d..ff71c4e 100644
--- a/src/runwayml/resources/__init__.py
+++ b/src/runwayml/resources/__init__.py
@@ -8,6 +8,14 @@
TasksResourceWithStreamingResponse,
AsyncTasksResourceWithStreamingResponse,
)
+from .voices import (
+ VoicesResource,
+ AsyncVoicesResource,
+ VoicesResourceWithRawResponse,
+ AsyncVoicesResourceWithRawResponse,
+ VoicesResourceWithStreamingResponse,
+ AsyncVoicesResourceWithStreamingResponse,
+)
from .uploads import (
UploadsResource,
AsyncUploadsResource,
@@ -16,6 +24,14 @@
UploadsResourceWithStreamingResponse,
AsyncUploadsResourceWithStreamingResponse,
)
+from .workflows import (
+ WorkflowsResource,
+ AsyncWorkflowsResource,
+ WorkflowsResourceWithRawResponse,
+ AsyncWorkflowsResourceWithRawResponse,
+ WorkflowsResourceWithStreamingResponse,
+ AsyncWorkflowsResourceWithStreamingResponse,
+)
from .organization import (
OrganizationResource,
AsyncOrganizationResource,
@@ -104,6 +120,14 @@
RealtimeSessionsResourceWithStreamingResponse,
AsyncRealtimeSessionsResourceWithStreamingResponse,
)
+from .workflow_invocations import (
+ WorkflowInvocationsResource,
+ AsyncWorkflowInvocationsResource,
+ WorkflowInvocationsResourceWithRawResponse,
+ AsyncWorkflowInvocationsResourceWithRawResponse,
+ WorkflowInvocationsResourceWithStreamingResponse,
+ AsyncWorkflowInvocationsResourceWithStreamingResponse,
+)
from .character_performance import (
CharacterPerformanceResource,
AsyncCharacterPerformanceResource,
@@ -198,4 +222,22 @@
"AsyncRealtimeSessionsResourceWithRawResponse",
"RealtimeSessionsResourceWithStreamingResponse",
"AsyncRealtimeSessionsResourceWithStreamingResponse",
+ "VoicesResource",
+ "AsyncVoicesResource",
+ "VoicesResourceWithRawResponse",
+ "AsyncVoicesResourceWithRawResponse",
+ "VoicesResourceWithStreamingResponse",
+ "AsyncVoicesResourceWithStreamingResponse",
+ "WorkflowsResource",
+ "AsyncWorkflowsResource",
+ "WorkflowsResourceWithRawResponse",
+ "AsyncWorkflowsResourceWithRawResponse",
+ "WorkflowsResourceWithStreamingResponse",
+ "AsyncWorkflowsResourceWithStreamingResponse",
+ "WorkflowInvocationsResource",
+ "AsyncWorkflowInvocationsResource",
+ "WorkflowInvocationsResourceWithRawResponse",
+ "AsyncWorkflowInvocationsResourceWithRawResponse",
+ "WorkflowInvocationsResourceWithStreamingResponse",
+ "AsyncWorkflowInvocationsResourceWithStreamingResponse",
]
diff --git a/src/runwayml/resources/avatars.py b/src/runwayml/resources/avatars.py
index 4556c2c..fc00ef6 100644
--- a/src/runwayml/resources/avatars.py
+++ b/src/runwayml/resources/avatars.py
@@ -9,7 +9,7 @@
from ..types import avatar_list_params, avatar_create_params, avatar_update_params
from .._types import Body, Omit, Query, Headers, NoneType, NotGiven, SequenceNotStr, omit, not_given
-from .._utils import maybe_transform, async_maybe_transform
+from .._utils import path_template, maybe_transform, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import (
@@ -146,7 +146,7 @@ def retrieve(
return cast(
AvatarRetrieveResponse,
self._get(
- f"/v1/avatars/{id}",
+ path_template("/v1/avatars/{id}", id=id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -209,7 +209,7 @@ def update(
return cast(
AvatarUpdateResponse,
self._patch(
- f"/v1/avatars/{id}",
+ path_template("/v1/avatars/{id}", id=id),
body=maybe_transform(
{
"document_ids": document_ids,
@@ -305,7 +305,7 @@ def delete(
raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._delete(
- f"/v1/avatars/{id}",
+ path_template("/v1/avatars/{id}", id=id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -431,7 +431,7 @@ async def retrieve(
return cast(
AvatarRetrieveResponse,
await self._get(
- f"/v1/avatars/{id}",
+ path_template("/v1/avatars/{id}", id=id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -494,7 +494,7 @@ async def update(
return cast(
AvatarUpdateResponse,
await self._patch(
- f"/v1/avatars/{id}",
+ path_template("/v1/avatars/{id}", id=id),
body=await async_maybe_transform(
{
"document_ids": document_ids,
@@ -590,7 +590,7 @@ async def delete(
raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return await self._delete(
- f"/v1/avatars/{id}",
+ path_template("/v1/avatars/{id}", id=id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
diff --git a/src/runwayml/resources/documents.py b/src/runwayml/resources/documents.py
index c72ee67..93d1746 100644
--- a/src/runwayml/resources/documents.py
+++ b/src/runwayml/resources/documents.py
@@ -6,7 +6,7 @@
from ..types import document_list_params, document_create_params, document_update_params
from .._types import Body, Omit, Query, Headers, NoneType, NotGiven, omit, not_given
-from .._utils import maybe_transform, async_maybe_transform
+from .._utils import path_template, maybe_transform, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import (
@@ -115,7 +115,7 @@ def retrieve(
if not id:
raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
return self._get(
- f"/v1/documents/{id}",
+ path_template("/v1/documents/{id}", id=id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -157,7 +157,7 @@ def update(
raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._patch(
- f"/v1/documents/{id}",
+ path_template("/v1/documents/{id}", id=id),
body=maybe_transform(
{
"content": content,
@@ -248,7 +248,7 @@ def delete(
raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._delete(
- f"/v1/documents/{id}",
+ path_template("/v1/documents/{id}", id=id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -347,7 +347,7 @@ async def retrieve(
if not id:
raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
return await self._get(
- f"/v1/documents/{id}",
+ path_template("/v1/documents/{id}", id=id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -389,7 +389,7 @@ async def update(
raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return await self._patch(
- f"/v1/documents/{id}",
+ path_template("/v1/documents/{id}", id=id),
body=await async_maybe_transform(
{
"content": content,
@@ -480,7 +480,7 @@ async def delete(
raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return await self._delete(
- f"/v1/documents/{id}",
+ path_template("/v1/documents/{id}", id=id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
diff --git a/src/runwayml/resources/realtime_sessions.py b/src/runwayml/resources/realtime_sessions.py
index 13214ac..23dbfbf 100644
--- a/src/runwayml/resources/realtime_sessions.py
+++ b/src/runwayml/resources/realtime_sessions.py
@@ -9,7 +9,7 @@
from ..types import realtime_session_create_params
from .._types import Body, Omit, Query, Headers, NoneType, NotGiven, omit, not_given
-from .._utils import maybe_transform, async_maybe_transform
+from .._utils import path_template, maybe_transform, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import (
@@ -130,7 +130,7 @@ def retrieve(
return cast(
RealtimeSessionRetrieveResponse,
self._get(
- f"/v1/realtime_sessions/{id}",
+ path_template("/v1/realtime_sessions/{id}", id=id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -167,7 +167,7 @@ def delete(
raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._delete(
- f"/v1/realtime_sessions/{id}",
+ path_template("/v1/realtime_sessions/{id}", id=id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -280,7 +280,7 @@ async def retrieve(
return cast(
RealtimeSessionRetrieveResponse,
await self._get(
- f"/v1/realtime_sessions/{id}",
+ path_template("/v1/realtime_sessions/{id}", id=id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -317,7 +317,7 @@ async def delete(
raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return await self._delete(
- f"/v1/realtime_sessions/{id}",
+ path_template("/v1/realtime_sessions/{id}", id=id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
diff --git a/src/runwayml/resources/tasks.py b/src/runwayml/resources/tasks.py
index 6c1a637..22ca86a 100644
--- a/src/runwayml/resources/tasks.py
+++ b/src/runwayml/resources/tasks.py
@@ -7,6 +7,7 @@
import httpx
from .._types import Body, Query, Headers, NoneType, NotGiven, not_given
+from .._utils import path_template
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import (
@@ -118,7 +119,7 @@ def delete(
raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._delete(
- f"/v1/tasks/{id}",
+ path_template("/v1/tasks/{id}", id=id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -218,7 +219,7 @@ async def delete(
raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return await self._delete(
- f"/v1/tasks/{id}",
+ path_template("/v1/tasks/{id}", id=id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
diff --git a/src/runwayml/resources/voices.py b/src/runwayml/resources/voices.py
index 3636ad9..7ec7166 100644
--- a/src/runwayml/resources/voices.py
+++ b/src/runwayml/resources/voices.py
@@ -9,7 +9,7 @@
from ..types import voice_list_params, voice_create_params, voice_preview_params
from .._types import Body, Omit, Query, Headers, NoneType, NotGiven, omit, not_given
-from .._utils import maybe_transform, async_maybe_transform
+from .._utils import path_template, maybe_transform, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import (
@@ -123,7 +123,7 @@ def retrieve(
return cast(
VoiceRetrieveResponse,
self._get(
- f"/v1/voices/{id}",
+ path_template("/v1/voices/{id}", id=id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -208,7 +208,7 @@ def delete(
raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._delete(
- f"/v1/voices/{id}",
+ path_template("/v1/voices/{id}", id=id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -357,7 +357,7 @@ async def retrieve(
return cast(
VoiceRetrieveResponse,
await self._get(
- f"/v1/voices/{id}",
+ path_template("/v1/voices/{id}", id=id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -442,7 +442,7 @@ async def delete(
raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return await self._delete(
- f"/v1/voices/{id}",
+ path_template("/v1/voices/{id}", id=id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
diff --git a/src/runwayml/resources/workflow_invocations.py b/src/runwayml/resources/workflow_invocations.py
new file mode 100644
index 0000000..7a599a9
--- /dev/null
+++ b/src/runwayml/resources/workflow_invocations.py
@@ -0,0 +1,189 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Any, cast
+
+import httpx
+
+from .._types import Body, Query, Headers, NotGiven, not_given
+from .._utils import path_template
+from .._compat import cached_property
+from .._resource import SyncAPIResource, AsyncAPIResource
+from .._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ..lib.polling import (
+ AwaitableWorkflowInvocationRetrieveResponse,
+ AsyncAwaitableWorkflowInvocationRetrieveResponse,
+ inject_sync_workflow_invocation_wait_method,
+ inject_async_workflow_invocation_wait_method,
+)
+from .._base_client import make_request_options
+
+__all__ = ["WorkflowInvocationsResource", "AsyncWorkflowInvocationsResource"]
+
+
+class WorkflowInvocationsResource(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> WorkflowInvocationsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/runwayml/sdk-python#accessing-raw-response-data-eg-headers
+ """
+ return WorkflowInvocationsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> WorkflowInvocationsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/runwayml/sdk-python#with_streaming_response
+ """
+ return WorkflowInvocationsResourceWithStreamingResponse(self)
+
+ def retrieve(
+ self,
+ id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AwaitableWorkflowInvocationRetrieveResponse:
+ """Return details about a workflow invocation.
+
+ Consumers of this API should not
+ expect updates more frequent than once every five seconds for a given workflow
+ invocation.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not id:
+ raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
+ response = self._get(
+ path_template("/v1/workflow_invocations/{id}", id=id),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=cast(
+ Any, AwaitableWorkflowInvocationRetrieveResponse
+ ), # Union types cannot be passed in as arguments in the type system
+ )
+ return cast(
+ AwaitableWorkflowInvocationRetrieveResponse,
+ inject_sync_workflow_invocation_wait_method(self._client, response),
+ )
+
+
+class AsyncWorkflowInvocationsResource(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncWorkflowInvocationsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/runwayml/sdk-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncWorkflowInvocationsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncWorkflowInvocationsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/runwayml/sdk-python#with_streaming_response
+ """
+ return AsyncWorkflowInvocationsResourceWithStreamingResponse(self)
+
+ async def retrieve(
+ self,
+ id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AsyncAwaitableWorkflowInvocationRetrieveResponse:
+ """Return details about a workflow invocation.
+
+ Consumers of this API should not
+ expect updates more frequent than once every five seconds for a given workflow
+ invocation.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not id:
+ raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
+ response = await self._get(
+ path_template("/v1/workflow_invocations/{id}", id=id),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=cast(
+ Any, AsyncAwaitableWorkflowInvocationRetrieveResponse
+ ), # Union types cannot be passed in as arguments in the type system
+ )
+ return cast(
+ AsyncAwaitableWorkflowInvocationRetrieveResponse,
+ inject_async_workflow_invocation_wait_method(self._client, response),
+ )
+
+
+class WorkflowInvocationsResourceWithRawResponse:
+ def __init__(self, workflow_invocations: WorkflowInvocationsResource) -> None:
+ self._workflow_invocations = workflow_invocations
+
+ self.retrieve = to_raw_response_wrapper(
+ workflow_invocations.retrieve,
+ )
+
+
+class AsyncWorkflowInvocationsResourceWithRawResponse:
+ def __init__(self, workflow_invocations: AsyncWorkflowInvocationsResource) -> None:
+ self._workflow_invocations = workflow_invocations
+
+ self.retrieve = async_to_raw_response_wrapper(
+ workflow_invocations.retrieve,
+ )
+
+
+class WorkflowInvocationsResourceWithStreamingResponse:
+ def __init__(self, workflow_invocations: WorkflowInvocationsResource) -> None:
+ self._workflow_invocations = workflow_invocations
+
+ self.retrieve = to_streamed_response_wrapper(
+ workflow_invocations.retrieve,
+ )
+
+
+class AsyncWorkflowInvocationsResourceWithStreamingResponse:
+ def __init__(self, workflow_invocations: AsyncWorkflowInvocationsResource) -> None:
+ self._workflow_invocations = workflow_invocations
+
+ self.retrieve = async_to_streamed_response_wrapper(
+ workflow_invocations.retrieve,
+ )
diff --git a/src/runwayml/resources/workflows.py b/src/runwayml/resources/workflows.py
new file mode 100644
index 0000000..a7038eb
--- /dev/null
+++ b/src/runwayml/resources/workflows.py
@@ -0,0 +1,319 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict
+
+import httpx
+
+from ..types import workflow_run_params
+from .._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from .._utils import path_template, maybe_transform, async_maybe_transform
+from .._compat import cached_property
+from .._resource import SyncAPIResource, AsyncAPIResource
+from .._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from .._base_client import make_request_options
+from ..types.workflow_run_response import WorkflowRunResponse
+from ..types.workflow_list_response import WorkflowListResponse
+from ..types.workflow_retrieve_response import WorkflowRetrieveResponse
+
+__all__ = ["WorkflowsResource", "AsyncWorkflowsResource"]
+
+
+class WorkflowsResource(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> WorkflowsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/runwayml/sdk-python#accessing-raw-response-data-eg-headers
+ """
+ return WorkflowsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> WorkflowsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/runwayml/sdk-python#with_streaming_response
+ """
+ return WorkflowsResourceWithStreamingResponse(self)
+
+ def retrieve(
+ self,
+ id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> WorkflowRetrieveResponse:
+ """
+ Returns details about a specific published workflow, including its graph schema.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not id:
+ raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
+ return self._get(
+ path_template("/v1/workflows/{id}", id=id),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=WorkflowRetrieveResponse,
+ )
+
+ def list(
+ self,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> WorkflowListResponse:
+ """
+ Returns a list of all published workflows for the authenticated user, grouped by
+ source workflow with their published versions.
+ """
+ return self._get(
+ "/v1/workflows",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=WorkflowListResponse,
+ )
+
+ def run(
+ self,
+ id: str,
+ *,
+ node_outputs: Dict[str, Dict[str, workflow_run_params.NodeOutputsNodeOutputsItem]] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> WorkflowRunResponse:
+ """Start a new task to execute a published workflow.
+
+ You can optionally provide
+ custom input values via `nodeOutputs` to override the defaults defined in the
+ workflow graph.
+
+ Args:
+ node_outputs: Optional node outputs to override default values. Keys are node IDs from the
+ workflow graph, values are objects mapping output keys to typed values.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not id:
+ raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
+ return self._post(
+ path_template("/v1/workflows/{id}", id=id),
+ body=maybe_transform({"node_outputs": node_outputs}, workflow_run_params.WorkflowRunParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=WorkflowRunResponse,
+ )
+
+
+class AsyncWorkflowsResource(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncWorkflowsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/runwayml/sdk-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncWorkflowsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncWorkflowsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/runwayml/sdk-python#with_streaming_response
+ """
+ return AsyncWorkflowsResourceWithStreamingResponse(self)
+
+ async def retrieve(
+ self,
+ id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> WorkflowRetrieveResponse:
+ """
+ Returns details about a specific published workflow, including its graph schema.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not id:
+ raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
+ return await self._get(
+ path_template("/v1/workflows/{id}", id=id),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=WorkflowRetrieveResponse,
+ )
+
+ async def list(
+ self,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> WorkflowListResponse:
+ """
+ Returns a list of all published workflows for the authenticated user, grouped by
+ source workflow with their published versions.
+ """
+ return await self._get(
+ "/v1/workflows",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=WorkflowListResponse,
+ )
+
+ async def run(
+ self,
+ id: str,
+ *,
+ node_outputs: Dict[str, Dict[str, workflow_run_params.NodeOutputsNodeOutputsItem]] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> WorkflowRunResponse:
+ """Start a new task to execute a published workflow.
+
+ You can optionally provide
+ custom input values via `nodeOutputs` to override the defaults defined in the
+ workflow graph.
+
+ Args:
+ node_outputs: Optional node outputs to override default values. Keys are node IDs from the
+ workflow graph, values are objects mapping output keys to typed values.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not id:
+ raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
+ return await self._post(
+ path_template("/v1/workflows/{id}", id=id),
+ body=await async_maybe_transform({"node_outputs": node_outputs}, workflow_run_params.WorkflowRunParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=WorkflowRunResponse,
+ )
+
+
+class WorkflowsResourceWithRawResponse:
+ def __init__(self, workflows: WorkflowsResource) -> None:
+ self._workflows = workflows
+
+ self.retrieve = to_raw_response_wrapper(
+ workflows.retrieve,
+ )
+ self.list = to_raw_response_wrapper(
+ workflows.list,
+ )
+ self.run = to_raw_response_wrapper(
+ workflows.run,
+ )
+
+
+class AsyncWorkflowsResourceWithRawResponse:
+ def __init__(self, workflows: AsyncWorkflowsResource) -> None:
+ self._workflows = workflows
+
+ self.retrieve = async_to_raw_response_wrapper(
+ workflows.retrieve,
+ )
+ self.list = async_to_raw_response_wrapper(
+ workflows.list,
+ )
+ self.run = async_to_raw_response_wrapper(
+ workflows.run,
+ )
+
+
+class WorkflowsResourceWithStreamingResponse:
+ def __init__(self, workflows: WorkflowsResource) -> None:
+ self._workflows = workflows
+
+ self.retrieve = to_streamed_response_wrapper(
+ workflows.retrieve,
+ )
+ self.list = to_streamed_response_wrapper(
+ workflows.list,
+ )
+ self.run = to_streamed_response_wrapper(
+ workflows.run,
+ )
+
+
+class AsyncWorkflowsResourceWithStreamingResponse:
+ def __init__(self, workflows: AsyncWorkflowsResource) -> None:
+ self._workflows = workflows
+
+ self.retrieve = async_to_streamed_response_wrapper(
+ workflows.retrieve,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ workflows.list,
+ )
+ self.run = async_to_streamed_response_wrapper(
+ workflows.run,
+ )
diff --git a/src/runwayml/types/__init__.py b/src/runwayml/types/__init__.py
index f7a7212..a3e8611 100644
--- a/src/runwayml/types/__init__.py
+++ b/src/runwayml/types/__init__.py
@@ -6,12 +6,14 @@
from .avatar_list_params import AvatarListParams as AvatarListParams
from .voice_create_params import VoiceCreateParams as VoiceCreateParams
from .voice_list_response import VoiceListResponse as VoiceListResponse
+from .workflow_run_params import WorkflowRunParams as WorkflowRunParams
from .avatar_create_params import AvatarCreateParams as AvatarCreateParams
from .avatar_list_response import AvatarListResponse as AvatarListResponse
from .avatar_update_params import AvatarUpdateParams as AvatarUpdateParams
from .document_list_params import DocumentListParams as DocumentListParams
from .voice_preview_params import VoicePreviewParams as VoicePreviewParams
from .voice_create_response import VoiceCreateResponse as VoiceCreateResponse
+from .workflow_run_response import WorkflowRunResponse as WorkflowRunResponse
from .avatar_create_response import AvatarCreateResponse as AvatarCreateResponse
from .avatar_update_response import AvatarUpdateResponse as AvatarUpdateResponse
from .document_create_params import DocumentCreateParams as DocumentCreateParams
@@ -19,11 +21,13 @@
from .document_update_params import DocumentUpdateParams as DocumentUpdateParams
from .task_retrieve_response import TaskRetrieveResponse as TaskRetrieveResponse
from .voice_preview_response import VoicePreviewResponse as VoicePreviewResponse
+from .workflow_list_response import WorkflowListResponse as WorkflowListResponse
from .voice_retrieve_response import VoiceRetrieveResponse as VoiceRetrieveResponse
from .avatar_retrieve_response import AvatarRetrieveResponse as AvatarRetrieveResponse
from .document_create_response import DocumentCreateResponse as DocumentCreateResponse
from .document_retrieve_response import DocumentRetrieveResponse as DocumentRetrieveResponse
from .sound_effect_create_params import SoundEffectCreateParams as SoundEffectCreateParams
+from .workflow_retrieve_response import WorkflowRetrieveResponse as WorkflowRetrieveResponse
from .text_to_image_create_params import TextToImageCreateParams as TextToImageCreateParams
from .text_to_video_create_params import TextToVideoCreateParams as TextToVideoCreateParams
from .voice_dubbing_create_params import VoiceDubbingCreateParams as VoiceDubbingCreateParams
@@ -51,3 +55,6 @@
from .character_performance_create_response import (
CharacterPerformanceCreateResponse as CharacterPerformanceCreateResponse,
)
+from .workflow_invocation_retrieve_response import (
+ WorkflowInvocationRetrieveResponse as WorkflowInvocationRetrieveResponse,
+)
diff --git a/src/runwayml/types/workflow_invocation_retrieve_response.py b/src/runwayml/types/workflow_invocation_retrieve_response.py
new file mode 100644
index 0000000..2e79b0f
--- /dev/null
+++ b/src/runwayml/types/workflow_invocation_retrieve_response.py
@@ -0,0 +1,174 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Union, Optional
+from datetime import datetime
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from pydantic import Field as FieldInfo
+
+from .._utils import PropertyInfo
+from .._models import BaseModel
+
+__all__ = [
+ "WorkflowInvocationRetrieveResponse",
+ "Pending",
+ "Throttled",
+ "Cancelled",
+ "Running",
+ "RunningNodeErrors",
+ "Failed",
+ "FailedNodeErrors",
+ "Succeeded",
+ "SucceededNodeErrors",
+]
+
+
+class Pending(BaseModel):
+ """A pending workflow invocation"""
+
+ id: str
+ """The ID of the workflow invocation being returned."""
+
+ created_at: datetime = FieldInfo(alias="createdAt")
+ """The timestamp that the workflow invocation was submitted at."""
+
+ status: Literal["PENDING"]
+
+
+class Throttled(BaseModel):
+ """A throttled workflow invocation"""
+
+ id: str
+ """The ID of the workflow invocation being returned."""
+
+ created_at: datetime = FieldInfo(alias="createdAt")
+ """The timestamp that the workflow invocation was submitted at."""
+
+ status: Literal["THROTTLED"]
+
+
+class Cancelled(BaseModel):
+ """A cancelled or deleted workflow invocation"""
+
+ id: str
+ """The ID of the workflow invocation being returned."""
+
+ created_at: datetime = FieldInfo(alias="createdAt")
+ """The timestamp that the workflow invocation was submitted at."""
+
+ status: Literal["CANCELLED"]
+
+
+class RunningNodeErrors(BaseModel):
+ message: str
+ """A human-readable description of the node error."""
+
+ node_name: Optional[str] = FieldInfo(alias="nodeName", default=None)
+ """The human-readable name of the node that errored."""
+
+
+class Running(BaseModel):
+ """A running workflow invocation"""
+
+ id: str
+ """The ID of the workflow invocation being returned."""
+
+ created_at: datetime = FieldInfo(alias="createdAt")
+ """The timestamp that the workflow invocation was submitted at."""
+
+ output: Dict[str, List[str]]
+ """
+ A record mapping workflow node IDs to arrays of output URLs for nodes that have
+ already completed. This allows streaming partial results while the workflow is
+ still running.
+ """
+
+ progress: float
+ """A number between 0 and 1 representing the overall workflow execution progress."""
+
+ status: Literal["RUNNING"]
+
+ node_errors: Optional[Dict[str, RunningNodeErrors]] = FieldInfo(alias="nodeErrors", default=None)
+ """A record mapping workflow node IDs to their error details.
+
+ Only present when one or more nodes have errored.
+ """
+
+
+class FailedNodeErrors(BaseModel):
+ message: str
+ """A human-readable description of the node error."""
+
+ node_name: Optional[str] = FieldInfo(alias="nodeName", default=None)
+ """The human-readable name of the node that errored."""
+
+
+class Failed(BaseModel):
+ """A failed workflow invocation"""
+
+ id: str
+ """The ID of the workflow invocation being returned."""
+
+ created_at: datetime = FieldInfo(alias="createdAt")
+ """The timestamp that the workflow invocation was submitted at."""
+
+ failure: str
+ """A human-friendly reason for the failure.
+
+ We do not recommend returning this to users directly without adding context.
+ """
+
+ status: Literal["FAILED"]
+
+ failure_code: Optional[str] = FieldInfo(alias="failureCode", default=None)
+ """A machine-readable error code for the failure.
+
+ See https://docs.dev.runwayml.com/errors/task-failures/ for more information.
+ """
+
+ node_errors: Optional[Dict[str, FailedNodeErrors]] = FieldInfo(alias="nodeErrors", default=None)
+ """A record mapping workflow node IDs to their error details.
+
+ Only present when one or more nodes have errored.
+ """
+
+
+class SucceededNodeErrors(BaseModel):
+ message: str
+ """A human-readable description of the node error."""
+
+ node_name: Optional[str] = FieldInfo(alias="nodeName", default=None)
+ """The human-readable name of the node that errored."""
+
+
+class Succeeded(BaseModel):
+ """A succeeded workflow invocation"""
+
+ id: str
+ """The ID of the workflow invocation being returned."""
+
+ created_at: datetime = FieldInfo(alias="createdAt")
+ """The timestamp that the workflow invocation was submitted at."""
+
+ output: Dict[str, List[str]]
+ """A record mapping workflow node IDs to arrays of output URLs.
+
+ Each key is the UUID of a workflow node that produced output, and each value is
+ an array of URLs for that node's artifacts. These URLs will expire within 24-48
+ hours; fetch the invocation again to get fresh URLs. It is expected that you
+ download the assets at these URLs and store them in your own storage system.
+ """
+
+ status: Literal["SUCCEEDED"]
+
+ node_errors: Optional[Dict[str, SucceededNodeErrors]] = FieldInfo(alias="nodeErrors", default=None)
+ """A record mapping workflow node IDs to their error details.
+
+ Even when the overall workflow succeeds, individual nodes may have encountered
+ non-fatal errors. Only present when one or more nodes have errored.
+ """
+
+
+WorkflowInvocationRetrieveResponse: TypeAlias = Annotated[
+ Union[Pending, Throttled, Cancelled, Running, Failed, Succeeded], PropertyInfo(discriminator="status")
+]
diff --git a/src/runwayml/types/workflow_list_response.py b/src/runwayml/types/workflow_list_response.py
new file mode 100644
index 0000000..77bdef3
--- /dev/null
+++ b/src/runwayml/types/workflow_list_response.py
@@ -0,0 +1,42 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+from datetime import datetime
+
+from pydantic import Field as FieldInfo
+
+from .._models import BaseModel
+
+__all__ = ["WorkflowListResponse", "Data", "DataVersion"]
+
+
+class DataVersion(BaseModel):
+ """A specific published version of a workflow."""
+
+ id: str
+ """The globally unique ID of this published workflow version."""
+
+ created_at: datetime = FieldInfo(alias="createdAt")
+ """When this version was published"""
+
+ version: int
+ """A monotonically increasing version number.
+
+ Each workflow version for the same published workflow has a unique version
+ number.
+ """
+
+
+class Data(BaseModel):
+ """A published workflow with all its available versions."""
+
+ name: str
+ """The name of the published workflow."""
+
+ versions: List[DataVersion]
+ """The published versions of this workflow, newest first."""
+
+
+class WorkflowListResponse(BaseModel):
+ data: List[Data]
+ """A list of published workflows grouped by source workflow."""
diff --git a/src/runwayml/types/workflow_retrieve_response.py b/src/runwayml/types/workflow_retrieve_response.py
new file mode 100644
index 0000000..d541e0e
--- /dev/null
+++ b/src/runwayml/types/workflow_retrieve_response.py
@@ -0,0 +1,50 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from datetime import datetime
+
+from pydantic import Field as FieldInfo
+
+from .._models import BaseModel
+
+__all__ = ["WorkflowRetrieveResponse", "Graph"]
+
+
+class Graph(BaseModel):
+ """The workflow graph definition."""
+
+ edges: List[object]
+ """The list of edges connecting nodes in the workflow graph."""
+
+ nodes: List[object]
+ """The list of nodes in the workflow graph."""
+
+ version: int
+ """The schema version of the workflow graph format."""
+
+
+class WorkflowRetrieveResponse(BaseModel):
+ id: str
+ """The globally unique ID of the published workflow."""
+
+ created_at: datetime = FieldInfo(alias="createdAt")
+ """When this version was published"""
+
+ description: Optional[str] = None
+ """The description of the published workflow."""
+
+ graph: Graph
+ """The workflow graph definition."""
+
+ name: str
+ """The name of the published workflow."""
+
+ updated_at: datetime = FieldInfo(alias="updatedAt")
+ """When this version was last updated"""
+
+ version: int
+ """A monotonically increasing version number.
+
+ Each workflow version for the same published workflow has a unique version
+ number.
+ """
diff --git a/src/runwayml/types/workflow_run_params.py b/src/runwayml/types/workflow_run_params.py
new file mode 100644
index 0000000..dd8ea73
--- /dev/null
+++ b/src/runwayml/types/workflow_run_params.py
@@ -0,0 +1,69 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Union
+from typing_extensions import Literal, Required, Annotated, TypeAlias, TypedDict
+
+from .._utils import PropertyInfo
+
+__all__ = [
+ "WorkflowRunParams",
+ "NodeOutputsNodeOutputsItem",
+ "NodeOutputsNodeOutputsItemPrimitive",
+ "NodeOutputsNodeOutputsItemImage",
+ "NodeOutputsNodeOutputsItemVideo",
+ "NodeOutputsNodeOutputsItemAudio",
+]
+
+
+class WorkflowRunParams(TypedDict, total=False):
+ node_outputs: Annotated[Dict[str, Dict[str, NodeOutputsNodeOutputsItem]], PropertyInfo(alias="nodeOutputs")]
+ """Optional node outputs to override default values.
+
+ Keys are node IDs from the workflow graph, values are objects mapping output
+ keys to typed values.
+ """
+
+
+class NodeOutputsNodeOutputsItemPrimitive(TypedDict, total=False):
+ """A primitive value (string, number, or boolean)"""
+
+ type: Required[Literal["primitive"]]
+
+ value: Required[Union[str, float, bool]]
+
+
+class NodeOutputsNodeOutputsItemImage(TypedDict, total=False):
+ """An image asset"""
+
+ type: Required[Literal["image"]]
+
+ uri: Required[str]
+ """A HTTPS URL."""
+
+
+class NodeOutputsNodeOutputsItemVideo(TypedDict, total=False):
+ """A video asset"""
+
+ type: Required[Literal["video"]]
+
+ uri: Required[str]
+ """A HTTPS URL."""
+
+
+class NodeOutputsNodeOutputsItemAudio(TypedDict, total=False):
+ """An audio asset"""
+
+ type: Required[Literal["audio"]]
+
+ uri: Required[str]
+ """A HTTPS URL."""
+
+
+NodeOutputsNodeOutputsItem: TypeAlias = Union[
+ NodeOutputsNodeOutputsItemPrimitive,
+ NodeOutputsNodeOutputsItemImage,
+ NodeOutputsNodeOutputsItemVideo,
+ NodeOutputsNodeOutputsItemAudio,
+]
diff --git a/src/runwayml/types/workflow_run_response.py b/src/runwayml/types/workflow_run_response.py
new file mode 100644
index 0000000..91b5297
--- /dev/null
+++ b/src/runwayml/types/workflow_run_response.py
@@ -0,0 +1,10 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .._models import BaseModel
+
+__all__ = ["WorkflowRunResponse"]
+
+
+class WorkflowRunResponse(BaseModel):
+ id: str
+ """The ID of the workflow invocation that was created."""
diff --git a/tests/api_resources/test_workflow_invocations.py b/tests/api_resources/test_workflow_invocations.py
new file mode 100644
index 0000000..6cbf1d2
--- /dev/null
+++ b/tests/api_resources/test_workflow_invocations.py
@@ -0,0 +1,100 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from runwayml import RunwayML, AsyncRunwayML
+from tests.utils import assert_matches_type
+from runwayml.types import WorkflowInvocationRetrieveResponse
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestWorkflowInvocations:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_retrieve(self, client: RunwayML) -> None:
+ workflow_invocation = client.workflow_invocations.retrieve(
+ "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+ assert_matches_type(WorkflowInvocationRetrieveResponse, workflow_invocation, path=["response"])
+
+ @parametrize
+ def test_raw_response_retrieve(self, client: RunwayML) -> None:
+ response = client.workflow_invocations.with_raw_response.retrieve(
+ "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ workflow_invocation = response.parse()
+ assert_matches_type(WorkflowInvocationRetrieveResponse, workflow_invocation, path=["response"])
+
+ @parametrize
+ def test_streaming_response_retrieve(self, client: RunwayML) -> None:
+ with client.workflow_invocations.with_streaming_response.retrieve(
+ "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ workflow_invocation = response.parse()
+ assert_matches_type(WorkflowInvocationRetrieveResponse, workflow_invocation, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_retrieve(self, client: RunwayML) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
+ client.workflow_invocations.with_raw_response.retrieve(
+ "",
+ )
+
+
+class TestAsyncWorkflowInvocations:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncRunwayML) -> None:
+ workflow_invocation = await async_client.workflow_invocations.retrieve(
+ "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+ assert_matches_type(WorkflowInvocationRetrieveResponse, workflow_invocation, path=["response"])
+
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncRunwayML) -> None:
+ response = await async_client.workflow_invocations.with_raw_response.retrieve(
+ "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ workflow_invocation = await response.parse()
+ assert_matches_type(WorkflowInvocationRetrieveResponse, workflow_invocation, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncRunwayML) -> None:
+ async with async_client.workflow_invocations.with_streaming_response.retrieve(
+ "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ workflow_invocation = await response.parse()
+ assert_matches_type(WorkflowInvocationRetrieveResponse, workflow_invocation, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncRunwayML) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
+ await async_client.workflow_invocations.with_raw_response.retrieve(
+ "",
+ )
diff --git a/tests/api_resources/test_workflows.py b/tests/api_resources/test_workflows.py
new file mode 100644
index 0000000..43f9487
--- /dev/null
+++ b/tests/api_resources/test_workflows.py
@@ -0,0 +1,256 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from runwayml import RunwayML, AsyncRunwayML
+from tests.utils import assert_matches_type
+from runwayml.types import WorkflowRunResponse, WorkflowListResponse, WorkflowRetrieveResponse
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestWorkflows:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_retrieve(self, client: RunwayML) -> None:
+ workflow = client.workflows.retrieve(
+ "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+ assert_matches_type(WorkflowRetrieveResponse, workflow, path=["response"])
+
+ @parametrize
+ def test_raw_response_retrieve(self, client: RunwayML) -> None:
+ response = client.workflows.with_raw_response.retrieve(
+ "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ workflow = response.parse()
+ assert_matches_type(WorkflowRetrieveResponse, workflow, path=["response"])
+
+ @parametrize
+ def test_streaming_response_retrieve(self, client: RunwayML) -> None:
+ with client.workflows.with_streaming_response.retrieve(
+ "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ workflow = response.parse()
+ assert_matches_type(WorkflowRetrieveResponse, workflow, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_retrieve(self, client: RunwayML) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
+ client.workflows.with_raw_response.retrieve(
+ "",
+ )
+
+ @parametrize
+ def test_method_list(self, client: RunwayML) -> None:
+ workflow = client.workflows.list()
+ assert_matches_type(WorkflowListResponse, workflow, path=["response"])
+
+ @parametrize
+ def test_raw_response_list(self, client: RunwayML) -> None:
+ response = client.workflows.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ workflow = response.parse()
+ assert_matches_type(WorkflowListResponse, workflow, path=["response"])
+
+ @parametrize
+ def test_streaming_response_list(self, client: RunwayML) -> None:
+ with client.workflows.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ workflow = response.parse()
+ assert_matches_type(WorkflowListResponse, workflow, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_run(self, client: RunwayML) -> None:
+ workflow = client.workflows.run(
+ id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+ assert_matches_type(WorkflowRunResponse, workflow, path=["response"])
+
+ @parametrize
+ def test_method_run_with_all_params(self, client: RunwayML) -> None:
+ workflow = client.workflows.run(
+ id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ node_outputs={
+ "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e": {
+ "foo": {
+ "type": "primitive",
+ "value": "string",
+ }
+ }
+ },
+ )
+ assert_matches_type(WorkflowRunResponse, workflow, path=["response"])
+
+ @parametrize
+ def test_raw_response_run(self, client: RunwayML) -> None:
+ response = client.workflows.with_raw_response.run(
+ id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ workflow = response.parse()
+ assert_matches_type(WorkflowRunResponse, workflow, path=["response"])
+
+ @parametrize
+ def test_streaming_response_run(self, client: RunwayML) -> None:
+ with client.workflows.with_streaming_response.run(
+ id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ workflow = response.parse()
+ assert_matches_type(WorkflowRunResponse, workflow, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_run(self, client: RunwayML) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
+ client.workflows.with_raw_response.run(
+ id="",
+ )
+
+
+class TestAsyncWorkflows:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncRunwayML) -> None:
+ workflow = await async_client.workflows.retrieve(
+ "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+ assert_matches_type(WorkflowRetrieveResponse, workflow, path=["response"])
+
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncRunwayML) -> None:
+ response = await async_client.workflows.with_raw_response.retrieve(
+ "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ workflow = await response.parse()
+ assert_matches_type(WorkflowRetrieveResponse, workflow, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncRunwayML) -> None:
+ async with async_client.workflows.with_streaming_response.retrieve(
+ "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ workflow = await response.parse()
+ assert_matches_type(WorkflowRetrieveResponse, workflow, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncRunwayML) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
+ await async_client.workflows.with_raw_response.retrieve(
+ "",
+ )
+
+ @parametrize
+ async def test_method_list(self, async_client: AsyncRunwayML) -> None:
+ workflow = await async_client.workflows.list()
+ assert_matches_type(WorkflowListResponse, workflow, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncRunwayML) -> None:
+ response = await async_client.workflows.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ workflow = await response.parse()
+ assert_matches_type(WorkflowListResponse, workflow, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncRunwayML) -> None:
+ async with async_client.workflows.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ workflow = await response.parse()
+ assert_matches_type(WorkflowListResponse, workflow, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_run(self, async_client: AsyncRunwayML) -> None:
+ workflow = await async_client.workflows.run(
+ id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+ assert_matches_type(WorkflowRunResponse, workflow, path=["response"])
+
+ @parametrize
+ async def test_method_run_with_all_params(self, async_client: AsyncRunwayML) -> None:
+ workflow = await async_client.workflows.run(
+ id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ node_outputs={
+ "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e": {
+ "foo": {
+ "type": "primitive",
+ "value": "string",
+ }
+ }
+ },
+ )
+ assert_matches_type(WorkflowRunResponse, workflow, path=["response"])
+
+ @parametrize
+ async def test_raw_response_run(self, async_client: AsyncRunwayML) -> None:
+ response = await async_client.workflows.with_raw_response.run(
+ id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ workflow = await response.parse()
+ assert_matches_type(WorkflowRunResponse, workflow, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_run(self, async_client: AsyncRunwayML) -> None:
+ async with async_client.workflows.with_streaming_response.run(
+ id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ workflow = await response.parse()
+ assert_matches_type(WorkflowRunResponse, workflow, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_run(self, async_client: AsyncRunwayML) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
+ await async_client.workflows.with_raw_response.run(
+ id="",
+ )
diff --git a/tests/test_utils/test_path.py b/tests/test_utils/test_path.py
new file mode 100644
index 0000000..0621849
--- /dev/null
+++ b/tests/test_utils/test_path.py
@@ -0,0 +1,89 @@
+from __future__ import annotations
+
+from typing import Any
+
+import pytest
+
+from runwayml._utils._path import path_template
+
+
+@pytest.mark.parametrize(
+ "template, kwargs, expected",
+ [
+ ("/v1/{id}", dict(id="abc"), "/v1/abc"),
+ ("/v1/{a}/{b}", dict(a="x", b="y"), "/v1/x/y"),
+ ("/v1/{a}{b}/path/{c}?val={d}#{e}", dict(a="x", b="y", c="z", d="u", e="v"), "/v1/xy/path/z?val=u#v"),
+ ("/{w}/{w}", dict(w="echo"), "/echo/echo"),
+ ("/v1/static", {}, "/v1/static"),
+ ("", {}, ""),
+ ("/v1/?q={n}&count=10", dict(n=42), "/v1/?q=42&count=10"),
+ ("/v1/{v}", dict(v=None), "/v1/null"),
+ ("/v1/{v}", dict(v=True), "/v1/true"),
+ ("/v1/{v}", dict(v=False), "/v1/false"),
+ ("/v1/{v}", dict(v=".hidden"), "/v1/.hidden"), # dot prefix ok
+ ("/v1/{v}", dict(v="file.txt"), "/v1/file.txt"), # dot in middle ok
+ ("/v1/{v}", dict(v="..."), "/v1/..."), # triple dot ok
+ ("/v1/{a}{b}", dict(a=".", b="txt"), "/v1/.txt"), # dot var combining with adjacent to be ok
+ ("/items?q={v}#{f}", dict(v=".", f=".."), "/items?q=.#.."), # dots in query/fragment are fine
+ (
+ "/v1/{a}?query={b}",
+ dict(a="../../other/endpoint", b="a&bad=true"),
+ "/v1/..%2F..%2Fother%2Fendpoint?query=a%26bad%3Dtrue",
+ ),
+ ("/v1/{val}", dict(val="a/b/c"), "/v1/a%2Fb%2Fc"),
+ ("/v1/{val}", dict(val="a/b/c?query=value"), "/v1/a%2Fb%2Fc%3Fquery=value"),
+ ("/v1/{val}", dict(val="a/b/c?query=value&bad=true"), "/v1/a%2Fb%2Fc%3Fquery=value&bad=true"),
+ ("/v1/{val}", dict(val="%20"), "/v1/%2520"), # escapes escape sequences in input
+ # Query: slash and ? are safe, # is not
+ ("/items?q={v}", dict(v="a/b"), "/items?q=a/b"),
+ ("/items?q={v}", dict(v="a?b"), "/items?q=a?b"),
+ ("/items?q={v}", dict(v="a#b"), "/items?q=a%23b"),
+ ("/items?q={v}", dict(v="a b"), "/items?q=a%20b"),
+ # Fragment: slash and ? are safe
+ ("/docs#{v}", dict(v="a/b"), "/docs#a/b"),
+ ("/docs#{v}", dict(v="a?b"), "/docs#a?b"),
+ # Path: slash, ? and # are all encoded
+ ("/v1/{v}", dict(v="a/b"), "/v1/a%2Fb"),
+ ("/v1/{v}", dict(v="a?b"), "/v1/a%3Fb"),
+ ("/v1/{v}", dict(v="a#b"), "/v1/a%23b"),
+ # same var encoded differently by component
+ (
+ "/v1/{v}?q={v}#{v}",
+ dict(v="a/b?c#d"),
+ "/v1/a%2Fb%3Fc%23d?q=a/b?c%23d#a/b?c%23d",
+ ),
+ ("/v1/{val}", dict(val="x?admin=true"), "/v1/x%3Fadmin=true"), # query injection
+ ("/v1/{val}", dict(val="x#admin"), "/v1/x%23admin"), # fragment injection
+ ],
+)
+def test_interpolation(template: str, kwargs: dict[str, Any], expected: str) -> None:
+ assert path_template(template, **kwargs) == expected
+
+
+def test_missing_kwarg_raises_key_error() -> None:
+ with pytest.raises(KeyError, match="org_id"):
+ path_template("/v1/{org_id}")
+
+
+@pytest.mark.parametrize(
+ "template, kwargs",
+ [
+ ("{a}/path", dict(a=".")),
+ ("{a}/path", dict(a="..")),
+ ("/v1/{a}", dict(a=".")),
+ ("/v1/{a}", dict(a="..")),
+ ("/v1/{a}/path", dict(a=".")),
+ ("/v1/{a}/path", dict(a="..")),
+ ("/v1/{a}{b}", dict(a=".", b=".")), # adjacent vars → ".."
+ ("/v1/{a}.", dict(a=".")), # var + static → ".."
+ ("/v1/{a}{b}", dict(a="", b=".")), # empty + dot → "."
+ ("/v1/%2e/{x}", dict(x="ok")), # encoded dot in static text
+ ("/v1/%2e./{x}", dict(x="ok")), # mixed encoded ".." in static
+ ("/v1/.%2E/{x}", dict(x="ok")), # mixed encoded ".." in static
+ ("/v1/{v}?q=1", dict(v="..")),
+ ("/v1/{v}#frag", dict(v="..")),
+ ],
+)
+def test_dot_segment_rejected(template: str, kwargs: dict[str, Any]) -> None:
+ with pytest.raises(ValueError, match="dot-segment"):
+ path_template(template, **kwargs)
diff --git a/tests/test_workflow_invocation_polling.py b/tests/test_workflow_invocation_polling.py
new file mode 100644
index 0000000..9290cae
--- /dev/null
+++ b/tests/test_workflow_invocation_polling.py
@@ -0,0 +1,209 @@
+from __future__ import annotations
+
+from typing import Any
+from datetime import datetime, timezone
+from unittest.mock import AsyncMock, MagicMock, patch
+
+import pytest
+
+from runwayml import RunwayML, AsyncRunwayML, WorkflowInvocationFailedError, WorkflowInvocationTimeoutError
+from runwayml.types.workflow_invocation_retrieve_response import (
+ Failed,
+ Pending,
+ Cancelled,
+ Succeeded,
+)
+
+
+def _make_pending(id: str = "inv-1") -> Pending:
+ return Pending(id=id, createdAt=datetime.now(tz=timezone.utc), status="PENDING")
+
+
+def _make_succeeded(id: str = "inv-1") -> Succeeded:
+ return Succeeded(
+ id=id,
+ createdAt=datetime.now(tz=timezone.utc),
+ status="SUCCEEDED",
+ output={"node-1": ["https://example.com/output.mp4"]},
+ )
+
+
+def _make_failed(id: str = "inv-1") -> Failed:
+ return Failed(
+ id=id,
+ createdAt=datetime.now(tz=timezone.utc),
+ status="FAILED",
+ failure="Generation failed",
+ )
+
+
+def _make_cancelled(id: str = "inv-1") -> Cancelled:
+ return Cancelled(id=id, createdAt=datetime.now(tz=timezone.utc), status="CANCELLED")
+
+
+class TestSyncWorkflowInvocationPolling:
+ @patch("runwayml.lib.polling.time.sleep", return_value=None)
+ def test_resolves_on_succeeded(self, _mock_sleep: MagicMock) -> None:
+ client = MagicMock(spec=RunwayML)
+ client.workflow_invocations = MagicMock()
+ client.workflow_invocations.retrieve = MagicMock(return_value=_make_succeeded())
+
+ from runwayml.lib.polling import inject_sync_workflow_invocation_wait_method
+
+ response = _make_pending()
+ patched: Any = inject_sync_workflow_invocation_wait_method(client, response)
+ result = patched.wait_for_task_output()
+
+ assert result.status == "SUCCEEDED"
+ client.workflow_invocations.retrieve.assert_called_once_with(response.id)
+
+ @patch("runwayml.lib.polling.time.sleep", return_value=None)
+ def test_polls_until_succeeded(self, _mock_sleep: MagicMock) -> None:
+ client = MagicMock(spec=RunwayML)
+ client.workflow_invocations = MagicMock()
+ responses = [_make_pending(), _make_pending(), _make_succeeded()]
+ client.workflow_invocations.retrieve = MagicMock(side_effect=responses)
+
+ from runwayml.lib.polling import inject_sync_workflow_invocation_wait_method
+
+ response = _make_pending()
+ patched: Any = inject_sync_workflow_invocation_wait_method(client, response)
+ result = patched.wait_for_task_output()
+
+ assert result.status == "SUCCEEDED"
+ assert client.workflow_invocations.retrieve.call_count == 3
+
+ @patch("runwayml.lib.polling.time.sleep", return_value=None)
+ def test_raises_on_failed(self, _mock_sleep: MagicMock) -> None:
+ client = MagicMock(spec=RunwayML)
+ client.workflow_invocations = MagicMock()
+ client.workflow_invocations.retrieve = MagicMock(return_value=_make_failed())
+
+ from runwayml.lib.polling import inject_sync_workflow_invocation_wait_method
+
+ response = _make_pending()
+ patched: Any = inject_sync_workflow_invocation_wait_method(client, response)
+
+ with pytest.raises(WorkflowInvocationFailedError) as exc_info:
+ patched.wait_for_task_output()
+
+ assert exc_info.value.invocation_details.status == "FAILED"
+
+ @patch("runwayml.lib.polling.time.sleep", return_value=None)
+ def test_raises_on_cancelled(self, _mock_sleep: MagicMock) -> None:
+ client = MagicMock(spec=RunwayML)
+ client.workflow_invocations = MagicMock()
+ client.workflow_invocations.retrieve = MagicMock(return_value=_make_cancelled())
+
+ from runwayml.lib.polling import inject_sync_workflow_invocation_wait_method
+
+ response = _make_pending()
+ patched: Any = inject_sync_workflow_invocation_wait_method(client, response)
+
+ with pytest.raises(WorkflowInvocationFailedError) as exc_info:
+ patched.wait_for_task_output()
+
+ assert exc_info.value.invocation_details.status == "CANCELLED"
+
+ @patch("runwayml.lib.polling.time.time")
+ @patch("runwayml.lib.polling.time.sleep", return_value=None)
+ def test_raises_on_timeout(self, _mock_sleep: MagicMock, mock_time: MagicMock) -> None:
+ mock_time.side_effect = [0.0, 700.0]
+
+ client = MagicMock(spec=RunwayML)
+ client.workflow_invocations = MagicMock()
+ client.workflow_invocations.retrieve = MagicMock(return_value=_make_pending())
+
+ from runwayml.lib.polling import inject_sync_workflow_invocation_wait_method
+
+ response = _make_pending()
+ patched: Any = inject_sync_workflow_invocation_wait_method(client, response)
+
+ with pytest.raises(WorkflowInvocationTimeoutError):
+ patched.wait_for_task_output(timeout=600)
+
+
+class TestAsyncWorkflowInvocationPolling:
+ @patch("runwayml.lib.polling.anyio.sleep", new_callable=AsyncMock)
+ @patch("runwayml.lib.polling.anyio.current_time", return_value=0.0)
+ async def test_resolves_on_succeeded(self, _mock_time: MagicMock, _mock_sleep: AsyncMock) -> None:
+ client = MagicMock(spec=AsyncRunwayML)
+ client.workflow_invocations = MagicMock()
+ client.workflow_invocations.retrieve = AsyncMock(return_value=_make_succeeded())
+
+ from runwayml.lib.polling import inject_async_workflow_invocation_wait_method
+
+ response = _make_pending()
+ patched: Any = inject_async_workflow_invocation_wait_method(client, response)
+ result = await patched.wait_for_task_output()
+
+ assert result.status == "SUCCEEDED"
+ client.workflow_invocations.retrieve.assert_called_once_with(response.id)
+
+ @patch("runwayml.lib.polling.anyio.sleep", new_callable=AsyncMock)
+ @patch("runwayml.lib.polling.anyio.current_time", return_value=0.0)
+ async def test_polls_until_succeeded(self, _mock_time: MagicMock, _mock_sleep: AsyncMock) -> None:
+ client = MagicMock(spec=AsyncRunwayML)
+ client.workflow_invocations = MagicMock()
+ responses = [_make_pending(), _make_pending(), _make_succeeded()]
+ client.workflow_invocations.retrieve = AsyncMock(side_effect=responses)
+
+ from runwayml.lib.polling import inject_async_workflow_invocation_wait_method
+
+ response = _make_pending()
+ patched: Any = inject_async_workflow_invocation_wait_method(client, response)
+ result = await patched.wait_for_task_output()
+
+ assert result.status == "SUCCEEDED"
+ assert client.workflow_invocations.retrieve.call_count == 3
+
+ @patch("runwayml.lib.polling.anyio.sleep", new_callable=AsyncMock)
+ @patch("runwayml.lib.polling.anyio.current_time", return_value=0.0)
+ async def test_raises_on_failed(self, _mock_time: MagicMock, _mock_sleep: AsyncMock) -> None:
+ client = MagicMock(spec=AsyncRunwayML)
+ client.workflow_invocations = MagicMock()
+ client.workflow_invocations.retrieve = AsyncMock(return_value=_make_failed())
+
+ from runwayml.lib.polling import inject_async_workflow_invocation_wait_method
+
+ response = _make_pending()
+ patched: Any = inject_async_workflow_invocation_wait_method(client, response)
+
+ with pytest.raises(WorkflowInvocationFailedError) as exc_info:
+ await patched.wait_for_task_output()
+
+ assert exc_info.value.invocation_details.status == "FAILED"
+
+ @patch("runwayml.lib.polling.anyio.sleep", new_callable=AsyncMock)
+ @patch("runwayml.lib.polling.anyio.current_time", return_value=0.0)
+ async def test_raises_on_cancelled(self, _mock_time: MagicMock, _mock_sleep: AsyncMock) -> None:
+ client = MagicMock(spec=AsyncRunwayML)
+ client.workflow_invocations = MagicMock()
+ client.workflow_invocations.retrieve = AsyncMock(return_value=_make_cancelled())
+
+ from runwayml.lib.polling import inject_async_workflow_invocation_wait_method
+
+ response = _make_pending()
+ patched: Any = inject_async_workflow_invocation_wait_method(client, response)
+
+ with pytest.raises(WorkflowInvocationFailedError) as exc_info:
+ await patched.wait_for_task_output()
+
+ assert exc_info.value.invocation_details.status == "CANCELLED"
+
+ @patch("runwayml.lib.polling.anyio.sleep", new_callable=AsyncMock)
+ @patch("runwayml.lib.polling.anyio.current_time")
+ async def test_raises_on_timeout(self, mock_time: MagicMock, _mock_sleep: AsyncMock) -> None:
+ mock_time.side_effect = [0.0, 700.0]
+
+ client = MagicMock(spec=AsyncRunwayML)
+ client.workflow_invocations = MagicMock()
+ client.workflow_invocations.retrieve = AsyncMock(return_value=_make_pending())
+
+ from runwayml.lib.polling import inject_async_workflow_invocation_wait_method
+
+ response = _make_pending()
+ patched: Any = inject_async_workflow_invocation_wait_method(client, response)
+
+ with pytest.raises(WorkflowInvocationTimeoutError):
+ await patched.wait_for_task_output(timeout=600)