diff --git a/examples/hello_world_portkey_adk.py b/examples/hello_world_portkey_adk.py
index 1ebe6fea..5a7b95bf 100644
--- a/examples/hello_world_portkey_adk.py
+++ b/examples/hello_world_portkey_adk.py
@@ -43,9 +43,10 @@ async def main() -> None:
final_text: List[str] = []
async for resp in llm.generate_content_async(req, stream=False):
if resp.content and getattr(resp.content, "parts", None):
- for p in resp.content.parts:
- if getattr(p, "text", None):
- final_text.append(p.text)
+ for p in resp.content.parts or []:
+ text = getattr(p, "text", None)
+ if text:
+ final_text.append(text)
print("".join(final_text))
diff --git a/examples/hello_world_portkey_strands.py b/examples/hello_world_portkey_strands.py
index a3453000..edef0425 100644
--- a/examples/hello_world_portkey_strands.py
+++ b/examples/hello_world_portkey_strands.py
@@ -31,7 +31,7 @@ async def main() -> None:
]
print(f"Streaming with model: {model_id}")
- async for event in model.stream(messages=messages):
+ async for event in model.stream(messages=messages): # type: ignore[arg-type]
# Events follow the Strands stream event shape produced by our adapter.
if isinstance(event, dict) and "contentBlockDelta" in event:
delta = event["contentBlockDelta"].get("delta", {})
diff --git a/portkey_ai/__init__.py b/portkey_ai/__init__.py
index 8f785a81..31735830 100644
--- a/portkey_ai/__init__.py
+++ b/portkey_ai/__init__.py
@@ -147,6 +147,14 @@
AsyncConversationsItems,
Videos,
AsyncVideos,
+ Skills,
+ AsyncSkills,
+ SkillsContent,
+ AsyncSkillsContent,
+ SkillsVersions,
+ AsyncSkillsVersions,
+ SkillsVersionsContent,
+ AsyncSkillsVersionsContent,
ChatKit,
AsyncChatKit,
ChatKitSessions,
@@ -347,6 +355,14 @@
"AsyncConversationsItems",
"Videos",
"AsyncVideos",
+ "Skills",
+ "AsyncSkills",
+ "SkillsContent",
+ "AsyncSkillsContent",
+ "SkillsVersions",
+ "AsyncSkillsVersions",
+ "SkillsVersionsContent",
+ "AsyncSkillsVersionsContent",
"ChatKit",
"AsyncChatKit",
"ChatKitSessions",
diff --git a/portkey_ai/_vendor/openai-2.16.0.dist-info/INSTALLER b/portkey_ai/_vendor/openai-2.30.0.dist-info/INSTALLER
similarity index 100%
rename from portkey_ai/_vendor/openai-2.16.0.dist-info/INSTALLER
rename to portkey_ai/_vendor/openai-2.30.0.dist-info/INSTALLER
diff --git a/portkey_ai/_vendor/openai-2.16.0.dist-info/METADATA b/portkey_ai/_vendor/openai-2.30.0.dist-info/METADATA
similarity index 99%
rename from portkey_ai/_vendor/openai-2.16.0.dist-info/METADATA
rename to portkey_ai/_vendor/openai-2.30.0.dist-info/METADATA
index 69197f70..6b3f09ae 100644
--- a/portkey_ai/_vendor/openai-2.16.0.dist-info/METADATA
+++ b/portkey_ai/_vendor/openai-2.30.0.dist-info/METADATA
@@ -1,6 +1,6 @@
Metadata-Version: 2.3
Name: openai
-Version: 2.16.0
+Version: 2.30.0
Summary: The official Python library for the openai API
Project-URL: Homepage, https://github.com/openai/openai-python
Project-URL: Repository, https://github.com/openai/openai-python
@@ -29,7 +29,7 @@ Requires-Dist: jiter<1,>=0.10.0
Requires-Dist: pydantic<3,>=1.9.0
Requires-Dist: sniffio
Requires-Dist: tqdm>4
-Requires-Dist: typing-extensions<5,>=4.11
+Requires-Dist: typing-extensions<5,>=4.14
Provides-Extra: aiohttp
Requires-Dist: aiohttp; extra == 'aiohttp'
Requires-Dist: httpx-aiohttp>=0.1.9; extra == 'aiohttp'
diff --git a/portkey_ai/_vendor/openai-2.16.0.dist-info/RECORD b/portkey_ai/_vendor/openai-2.30.0.dist-info/RECORD
similarity index 67%
rename from portkey_ai/_vendor/openai-2.16.0.dist-info/RECORD
rename to portkey_ai/_vendor/openai-2.30.0.dist-info/RECORD
index 22342e7e..6fb6d4aa 100644
--- a/portkey_ai/_vendor/openai-2.16.0.dist-info/RECORD
+++ b/portkey_ai/_vendor/openai-2.30.0.dist-info/RECORD
@@ -1,72 +1,76 @@
-../../bin/openai,sha256=5Jct5kiiHC4vRDm7YaFyG9z68HSoNekUZLWDLWR-2pk,266
-openai-2.16.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
-openai-2.16.0.dist-info/METADATA,sha256=ysvsnWaf9QhIY9i9Bpvw_Te-aRUR3ZmSwCyFrvUl0bs,29167
-openai-2.16.0.dist-info/RECORD,,
-openai-2.16.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-openai-2.16.0.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87
-openai-2.16.0.dist-info/entry_points.txt,sha256=kAYhQEmziJwsKs5raYAIOvJ2LWmbz5dulEXOzsY71ro,43
-openai-2.16.0.dist-info/licenses/LICENSE,sha256=Y263152pu21RWks_1BeqJmees88WOW3atLxV-nTmFuQ,11336
-openai/__init__.py,sha256=Fvc0dwOoaIZDN_s3iV62jlxeU5d7qn-Q8eQIaPIdD8g,11196
+../../bin/openai,sha256=cjWmuDARcocjwKDAUknB51A6sDRrmhrt1TqB6iZzkC0,203
+openai-2.30.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+openai-2.30.0.dist-info/METADATA,sha256=JdY3AlShidRD7M5Wi6XRFGtg8hwSkQR-RJaK1UacY18,29209
+openai-2.30.0.dist-info/RECORD,,
+openai-2.30.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+openai-2.30.0.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87
+openai-2.30.0.dist-info/entry_points.txt,sha256=kAYhQEmziJwsKs5raYAIOvJ2LWmbz5dulEXOzsY71ro,43
+openai-2.30.0.dist-info/licenses/LICENSE,sha256=Y263152pu21RWks_1BeqJmees88WOW3atLxV-nTmFuQ,11336
+openai/__init__.py,sha256=f8_rebSbi5YEvA0jN-ijE6kVjokQP-6-SIsr_rAoa7I,11218
openai/__main__.py,sha256=bYt9eEaoRQWdejEHFD8REx9jxVEdZptECFsV7F49Ink,30
-openai/__pycache__/__init__.cpython-310.pyc,,
-openai/__pycache__/__main__.cpython-310.pyc,,
-openai/__pycache__/_base_client.cpython-310.pyc,,
-openai/__pycache__/_client.cpython-310.pyc,,
-openai/__pycache__/_compat.cpython-310.pyc,,
-openai/__pycache__/_constants.cpython-310.pyc,,
-openai/__pycache__/_exceptions.cpython-310.pyc,,
-openai/__pycache__/_files.cpython-310.pyc,,
-openai/__pycache__/_legacy_response.cpython-310.pyc,,
-openai/__pycache__/_models.cpython-310.pyc,,
-openai/__pycache__/_module_client.cpython-310.pyc,,
-openai/__pycache__/_qs.cpython-310.pyc,,
-openai/__pycache__/_resource.cpython-310.pyc,,
-openai/__pycache__/_response.cpython-310.pyc,,
-openai/__pycache__/_streaming.cpython-310.pyc,,
-openai/__pycache__/_types.cpython-310.pyc,,
-openai/__pycache__/_version.cpython-310.pyc,,
-openai/__pycache__/pagination.cpython-310.pyc,,
-openai/__pycache__/version.cpython-310.pyc,,
-openai/_base_client.py,sha256=oaTAUxbeNjdxieHoF3bw5pcXYM_OTZZrkbqNxZPCWxI,74599
-openai/_client.py,sha256=uZlue35miO_zv84wqaCRvqchb6WH3UaKPbZGIvIs0Gw,44645
-openai/_compat.py,sha256=k2XpUhYfgp5ZXkZkQAftJHt_UWFjUct1Sm2ye2kPBXo,6964
+openai/__pycache__/__init__.cpython-314.pyc,,
+openai/__pycache__/__main__.cpython-314.pyc,,
+openai/__pycache__/_base_client.cpython-314.pyc,,
+openai/__pycache__/_client.cpython-314.pyc,,
+openai/__pycache__/_compat.cpython-314.pyc,,
+openai/__pycache__/_constants.cpython-314.pyc,,
+openai/__pycache__/_exceptions.cpython-314.pyc,,
+openai/__pycache__/_files.cpython-314.pyc,,
+openai/__pycache__/_legacy_response.cpython-314.pyc,,
+openai/__pycache__/_models.cpython-314.pyc,,
+openai/__pycache__/_module_client.cpython-314.pyc,,
+openai/__pycache__/_qs.cpython-314.pyc,,
+openai/__pycache__/_resource.cpython-314.pyc,,
+openai/__pycache__/_response.cpython-314.pyc,,
+openai/__pycache__/_streaming.cpython-314.pyc,,
+openai/__pycache__/_types.cpython-314.pyc,,
+openai/__pycache__/_version.cpython-314.pyc,,
+openai/__pycache__/pagination.cpython-314.pyc,,
+openai/__pycache__/version.cpython-314.pyc,,
+openai/_base_client.py,sha256=ODsaz53lakFTPKf9GjmDb2ijlvISuTipx19opxyJU0c,75019
+openai/_client.py,sha256=VCLRKREmm73FvLy79mlaFtr1TVsdZK3SCIPoUrY1C4U,51842
+openai/_compat.py,sha256=yntbvsd6TcP1hjcLEpqhozb1JW5pZWy81caHiE8rv50,7213
openai/_constants.py,sha256=WmCwgT4tGmFsSrltb26f3bM8ftUyFYkzh32Ny5yl-So,467
openai/_exceptions.py,sha256=TYcCxnfT7fln5duvVnCVJ0znuUHXSAbCT5sAMnaeKjU,5008
openai/_extras/__init__.py,sha256=sainrYWujCxIyL24wNpKfMVr-ZyBPlnSZfqXcg2S6Xg,165
-openai/_extras/__pycache__/__init__.cpython-310.pyc,,
-openai/_extras/__pycache__/_common.cpython-310.pyc,,
-openai/_extras/__pycache__/numpy_proxy.cpython-310.pyc,,
-openai/_extras/__pycache__/pandas_proxy.cpython-310.pyc,,
-openai/_extras/__pycache__/sounddevice_proxy.cpython-310.pyc,,
+openai/_extras/__pycache__/__init__.cpython-314.pyc,,
+openai/_extras/__pycache__/_common.cpython-314.pyc,,
+openai/_extras/__pycache__/numpy_proxy.cpython-314.pyc,,
+openai/_extras/__pycache__/pandas_proxy.cpython-314.pyc,,
+openai/_extras/__pycache__/sounddevice_proxy.cpython-314.pyc,,
openai/_extras/_common.py,sha256=NWWtgbdJsO3hQGQxaXGfVk0LjeIE5AFZ8VS_795hhMc,364
openai/_extras/numpy_proxy.py,sha256=LyTZkKDdnjz0h1SKLsphrhmXyUsJ_xEUhTFMrCf7k7g,805
openai/_extras/pandas_proxy.py,sha256=NCEt1Dqwc_0H85YdsWPDE3lPDJtYnBT8G-gJE_BCeEc,637
openai/_extras/sounddevice_proxy.py,sha256=xDoE21YGu13dSAJJkiOM9Qdb7uOIv5zskaJRX6xciEg,725
openai/_files.py,sha256=cQOoF0UFpnyH5JMIdu_EvGpj_dGzH1ojtJvyX7Xwqn0,3612
-openai/_legacy_response.py,sha256=fx9I0IInZY1zr2bUmpqW2ZUcL9JW2xS6S4NqFuwhdPM,16237
-openai/_models.py,sha256=mJ9fhqRLDypXY73XoPy5CpCZa5zwce6N3-OV9nX-GzI,33856
-openai/_module_client.py,sha256=33fORSMWHuxqpvlROvYVMEIvaWUishUpSeaqpsOjWuI,5033
+openai/_legacy_response.py,sha256=PosLYYYw_SAIfQEJUDP9V1HuFkgyXk0bsqi2NMzU0e4,16374
+openai/_models.py,sha256=Y41b0440TPhSXXfwFpT9UwFLUof0CEMTvKnssA1SwOY,33945
+openai/_module_client.py,sha256=oX583D833ihXIYGRqWAZtIn3gdgCxMVSmwIy7c2Qu88,5265
openai/_qs.py,sha256=craIKyvPktJ94cvf9zn8j8ekG9dWJzhWv0ob34lIOv4,4828
openai/_resource.py,sha256=IQihFzFLhGOiGSlT2dO1ESWSTg2XypgbtAldtGdTOqU,1100
-openai/_response.py,sha256=zLVaMPYE1o2Tz1eS5_bnJNGMikRN1byMpMcVpW1tgIU,29510
-openai/_streaming.py,sha256=dPO6F5Klse8Nax72QfB_R3VGLoPzJwsw1Yrj0sgEo2Y,13769
-openai/_types.py,sha256=EBBMTWsJ2GtJsDhUFLrZhXHF22zmwLOJ70ncG--ODO8,7722
-openai/_utils/__init__.py,sha256=qiOG_n0G-sP5r5jNvD4OUaeaVLFEw5s-h7h7b0nD7Nk,2465
-openai/_utils/__pycache__/__init__.cpython-310.pyc,,
-openai/_utils/__pycache__/_compat.cpython-310.pyc,,
-openai/_utils/__pycache__/_datetime_parse.cpython-310.pyc,,
-openai/_utils/__pycache__/_logs.cpython-310.pyc,,
-openai/_utils/__pycache__/_proxy.cpython-310.pyc,,
-openai/_utils/__pycache__/_reflection.cpython-310.pyc,,
-openai/_utils/__pycache__/_resources_proxy.cpython-310.pyc,,
-openai/_utils/__pycache__/_streams.cpython-310.pyc,,
-openai/_utils/__pycache__/_sync.cpython-310.pyc,,
-openai/_utils/__pycache__/_transform.cpython-310.pyc,,
-openai/_utils/__pycache__/_typing.cpython-310.pyc,,
-openai/_utils/__pycache__/_utils.cpython-310.pyc,,
-openai/_utils/_compat.py,sha256=D8gtAvjJQrDWt9upS0XaG9Rr5l1QhiAx_I_1utT_tt0,1195
+openai/_response.py,sha256=bwodmRyBD5CiLox8d9fjZM3rAgzHBBOxqTuRJnARTUU,29647
+openai/_streaming.py,sha256=MXjrtyH0_zzabUeVILor3c3HCW4qmYJ8agvBUP5EU0o,14608
+openai/_types.py,sha256=J0P9Obqh4MHmHN-YmDKeVow_Wc2pEOCBgMSElDHOG_w,7758
+openai/_utils/__init__.py,sha256=vVCHF-1fxQQuBKVH6_ymxdk4b1UX8V8bScc8XriXxoY,2515
+openai/_utils/__pycache__/__init__.cpython-314.pyc,,
+openai/_utils/__pycache__/_compat.cpython-314.pyc,,
+openai/_utils/__pycache__/_datetime_parse.cpython-314.pyc,,
+openai/_utils/__pycache__/_json.cpython-314.pyc,,
+openai/_utils/__pycache__/_logs.cpython-314.pyc,,
+openai/_utils/__pycache__/_path.cpython-314.pyc,,
+openai/_utils/__pycache__/_proxy.cpython-314.pyc,,
+openai/_utils/__pycache__/_reflection.cpython-314.pyc,,
+openai/_utils/__pycache__/_resources_proxy.cpython-314.pyc,,
+openai/_utils/__pycache__/_streams.cpython-314.pyc,,
+openai/_utils/__pycache__/_sync.cpython-314.pyc,,
+openai/_utils/__pycache__/_transform.cpython-314.pyc,,
+openai/_utils/__pycache__/_typing.cpython-314.pyc,,
+openai/_utils/__pycache__/_utils.cpython-314.pyc,,
+openai/_utils/_compat.py,sha256=33246eDcl3pwL6kWsEhVuT4Akrd8gZEW9LPTm465ohk,1231
openai/_utils/_datetime_parse.py,sha256=bABTs0Bc6rabdFvnIwXjEhWL15TcRgWZ_6XGTqN8xUk,4204
+openai/_utils/_json.py,sha256=bl95uuIWwgSfXX-gP1trK_lDAPwJujYfJ05Cxo2SEC4,962
openai/_utils/_logs.py,sha256=IC5iwPflwelNpJEpWsvK3up-pol5hR8k_VL9fSukk_Y,1351
+openai/_utils/_path.py,sha256=Dk294levuJXP5e4m20YRDDZFPl7zegXvc9Kb3dUCwJI,4701
openai/_utils/_proxy.py,sha256=aglnj2yBTDyGX9Akk2crZHrl10oqRmceUy2Zp008XEs,1975
openai/_utils/_reflection.py,sha256=aTXm-W0Kww4PJo5LPkUnQ92N-2UvrK1-D67cJVBlIgw,1426
openai/_utils/_resources_proxy.py,sha256=AHHZCOgv-2CRqB4B52dB7ySlE5q6QCWj0bsTqNmzikw,589
@@ -75,33 +79,33 @@ openai/_utils/_sync.py,sha256=HBnZkkBnzxtwOZe0212C4EyoRvxhTVtTrLFDz2_xVCg,1589
openai/_utils/_transform.py,sha256=hzILp2ijV9J7D-uoEDmadtyCmzMK6DprJP8IlwEg0ZY,15999
openai/_utils/_typing.py,sha256=N_5PPuFNsaygbtA_npZd98SVN1LQQvFTKL6bkWPBZGU,4786
openai/_utils/_utils.py,sha256=Z2y9rNbK-worRedH9Ub9tO_FSIjl0SH2AV9Tdgz9LUA,12667
-openai/_version.py,sha256=bJPiIwvwNSWU56_IZn2UmZYbi97vB6FNxxSZpgEx2I8,159
+openai/_version.py,sha256=DFPGpyiLJN7psBxmmdiQ6IyhCurlVnR7i5__d325q0g,159
openai/cli/__init__.py,sha256=soGgtqyomgddl92H0KJRqHqGuaXIaghq86qkzLuVp7U,31
-openai/cli/__pycache__/__init__.cpython-310.pyc,,
-openai/cli/__pycache__/_cli.cpython-310.pyc,,
-openai/cli/__pycache__/_errors.cpython-310.pyc,,
-openai/cli/__pycache__/_models.cpython-310.pyc,,
-openai/cli/__pycache__/_progress.cpython-310.pyc,,
-openai/cli/__pycache__/_utils.cpython-310.pyc,,
+openai/cli/__pycache__/__init__.cpython-314.pyc,,
+openai/cli/__pycache__/_cli.cpython-314.pyc,,
+openai/cli/__pycache__/_errors.cpython-314.pyc,,
+openai/cli/__pycache__/_models.cpython-314.pyc,,
+openai/cli/__pycache__/_progress.cpython-314.pyc,,
+openai/cli/__pycache__/_utils.cpython-314.pyc,,
openai/cli/_api/__init__.py,sha256=cj92MZq-9_1PQM8A4TQVsqKn5mcTDAGxHllJ0UvJOPE,58
-openai/cli/_api/__pycache__/__init__.cpython-310.pyc,,
-openai/cli/_api/__pycache__/_main.cpython-310.pyc,,
-openai/cli/_api/__pycache__/audio.cpython-310.pyc,,
-openai/cli/_api/__pycache__/completions.cpython-310.pyc,,
-openai/cli/_api/__pycache__/files.cpython-310.pyc,,
-openai/cli/_api/__pycache__/image.cpython-310.pyc,,
-openai/cli/_api/__pycache__/models.cpython-310.pyc,,
+openai/cli/_api/__pycache__/__init__.cpython-314.pyc,,
+openai/cli/_api/__pycache__/_main.cpython-314.pyc,,
+openai/cli/_api/__pycache__/audio.cpython-314.pyc,,
+openai/cli/_api/__pycache__/completions.cpython-314.pyc,,
+openai/cli/_api/__pycache__/files.cpython-314.pyc,,
+openai/cli/_api/__pycache__/image.cpython-314.pyc,,
+openai/cli/_api/__pycache__/models.cpython-314.pyc,,
openai/cli/_api/_main.py,sha256=3xVyycq-4HEYMBdMDJFk893PTXpr8yvkGL3eCiuSx8E,501
openai/cli/_api/audio.py,sha256=0GU49a-XurLlyVEy2V9IZ_pDmjL1XEBI7Jp7fQfJ5Sk,3757
openai/cli/_api/chat/__init__.py,sha256=MhFUQH9F6QCtbPMlbsU_DWTd7wc5DSCZ7Wy3FBGVij0,300
-openai/cli/_api/chat/__pycache__/__init__.cpython-310.pyc,,
-openai/cli/_api/chat/__pycache__/completions.cpython-310.pyc,,
+openai/cli/_api/chat/__pycache__/__init__.cpython-314.pyc,,
+openai/cli/_api/chat/__pycache__/completions.cpython-314.pyc,,
openai/cli/_api/chat/completions.py,sha256=GyfAo3B2w2ySV0dK9D2IIVA4fOb0zqJZadQ-Yc8a_yU,5536
openai/cli/_api/completions.py,sha256=Jy1rlQqw__12ZfbTrnZJgoGBbDKJ58kOUAT-vkLr5kE,6334
openai/cli/_api/files.py,sha256=6nKXFnsC2QE0bGnVUAG7BTLSu6K1_MhPE0ZJACmzgRY,2345
openai/cli/_api/fine_tuning/__init__.py,sha256=hZeWhTZtIRAl1xgSbznjpCYy9lnUUXngh8uEIbVn__Y,286
-openai/cli/_api/fine_tuning/__pycache__/__init__.cpython-310.pyc,,
-openai/cli/_api/fine_tuning/__pycache__/jobs.cpython-310.pyc,,
+openai/cli/_api/fine_tuning/__pycache__/__init__.cpython-314.pyc,,
+openai/cli/_api/fine_tuning/__pycache__/jobs.cpython-314.pyc,,
openai/cli/_api/fine_tuning/jobs.py,sha256=4wj9DPfw3343fJQW9j52Q-ga4jYa1haOTn4yYsH_zqk,5311
openai/cli/_api/image.py,sha256=3UDZ1R8SjYh4IOhhdJqf20FPqPgPdhpRxqu3eo5BKhU,5014
openai/cli/_api/models.py,sha256=pGmIGZToj3raGGpKvPSq_EVUR-dqg4Vi0PNfZH98D2E,1295
@@ -110,330 +114,360 @@ openai/cli/_errors.py,sha256=nejlu1HnOyAIr2n7uqpFtWn8XclWj_9N8FwgfT3BPK8,471
openai/cli/_models.py,sha256=_budygMbXh3Fv-w-TDfWecZNiKfox6f0lliCUytxE1Q,491
openai/cli/_progress.py,sha256=aMLssU9jh-LoqRYH3608jNos7r6vZKnHTRlHxFznzv4,1406
openai/cli/_tools/__init__.py,sha256=cj92MZq-9_1PQM8A4TQVsqKn5mcTDAGxHllJ0UvJOPE,58
-openai/cli/_tools/__pycache__/__init__.cpython-310.pyc,,
-openai/cli/_tools/__pycache__/_main.cpython-310.pyc,,
-openai/cli/_tools/__pycache__/fine_tunes.cpython-310.pyc,,
-openai/cli/_tools/__pycache__/migrate.cpython-310.pyc,,
+openai/cli/_tools/__pycache__/__init__.cpython-314.pyc,,
+openai/cli/_tools/__pycache__/_main.cpython-314.pyc,,
+openai/cli/_tools/__pycache__/fine_tunes.cpython-314.pyc,,
+openai/cli/_tools/__pycache__/migrate.cpython-314.pyc,,
openai/cli/_tools/_main.py,sha256=pakjEXHRHqYlTml-RxV7fNrRtRXzmZBinoPi1AJipFY,467
openai/cli/_tools/fine_tunes.py,sha256=RQgYMzifk6S7Y1I1K6huqco2QxmXa7gVUlHl6SrKTSU,1543
openai/cli/_tools/migrate.py,sha256=o-iomzhtC6N6X5H5GDlgQ_QOaIovE2YA9oHc_tIAUj8,4497
openai/cli/_utils.py,sha256=oiTc9MnxQh_zxAZ1OIHPkoDpCll0NF9ZgkdFHz4T-Bs,848
openai/helpers/__init__.py,sha256=F0x_Pguq1XC2KXZYbfxUG-G_FxJ3mlsi7HaFZ1x-g9A,130
-openai/helpers/__pycache__/__init__.cpython-310.pyc,,
-openai/helpers/__pycache__/local_audio_player.cpython-310.pyc,,
-openai/helpers/__pycache__/microphone.cpython-310.pyc,,
+openai/helpers/__pycache__/__init__.cpython-314.pyc,,
+openai/helpers/__pycache__/local_audio_player.cpython-314.pyc,,
+openai/helpers/__pycache__/microphone.cpython-314.pyc,,
openai/helpers/local_audio_player.py,sha256=7MWwt1BYEh579z1brnQ2mUEB0Ble4UoGMHDKusOfZJQ,5852
openai/helpers/microphone.py,sha256=6tIHWZGpRA5XvUoer-nPBvHbrmxK7CWx3_Ta-qp1H54,3341
openai/lib/.keep,sha256=wuNrz-5SXo3jJaJOJgz4vFHM41YH_g20F5cRQo0vLes,224
openai/lib/__init__.py,sha256=BMTfMnlbugMgDA1STDIAlx4bI4t4l_8bQmJxd0th0n8,126
-openai/lib/__pycache__/__init__.cpython-310.pyc,,
-openai/lib/__pycache__/_old_api.cpython-310.pyc,,
-openai/lib/__pycache__/_pydantic.cpython-310.pyc,,
-openai/lib/__pycache__/_realtime.cpython-310.pyc,,
-openai/lib/__pycache__/_tools.cpython-310.pyc,,
-openai/lib/__pycache__/_validators.cpython-310.pyc,,
-openai/lib/__pycache__/azure.cpython-310.pyc,,
+openai/lib/__pycache__/__init__.cpython-314.pyc,,
+openai/lib/__pycache__/_old_api.cpython-314.pyc,,
+openai/lib/__pycache__/_pydantic.cpython-314.pyc,,
+openai/lib/__pycache__/_realtime.cpython-314.pyc,,
+openai/lib/__pycache__/_tools.cpython-314.pyc,,
+openai/lib/__pycache__/_validators.cpython-314.pyc,,
+openai/lib/__pycache__/azure.cpython-314.pyc,,
openai/lib/_old_api.py,sha256=XZnXBrEKuTd70iJirj5mGW35fZoqruJobbBTq6bvg10,1947
-openai/lib/_parsing/__init__.py,sha256=wS3BYvMGj9TqiPqOe3rO1sleaAJqHVuCaQuCE5rZIUw,539
-openai/lib/_parsing/__pycache__/__init__.cpython-310.pyc,,
-openai/lib/_parsing/__pycache__/_completions.cpython-310.pyc,,
-openai/lib/_parsing/__pycache__/_responses.cpython-310.pyc,,
-openai/lib/_parsing/_completions.py,sha256=3vihFrFWJIrToaWYjJMqn42gTyNmrQhXvi2vr5Wduo8,10629
-openai/lib/_parsing/_responses.py,sha256=g47-6Vbw4cAjkUrHRAG_PAeJzJlwSxngiezog5UUYwI,6246
+openai/lib/_parsing/__init__.py,sha256=_tOYASbvaNQLj6jf6knPP3_mRFRr1vGJXudxf0npVFo,483
+openai/lib/_parsing/__pycache__/__init__.cpython-314.pyc,,
+openai/lib/_parsing/__pycache__/_completions.cpython-314.pyc,,
+openai/lib/_parsing/__pycache__/_responses.cpython-314.pyc,,
+openai/lib/_parsing/_completions.py,sha256=2Rj_Kujo39aOrE36vV1QmRTjAQpLQFr5lhasYdmTo58,10000
+openai/lib/_parsing/_responses.py,sha256=VMqXRy2COxrSUq28FTWMEIE1b7EmWT8kn3Au1xPa-7k,6430
openai/lib/_pydantic.py,sha256=Cf0vGwuWdNEuIUg8WNREjWRGApMObgl8DjdLU4f5jAc,5623
openai/lib/_realtime.py,sha256=4ani2j6lt21SXrC6Ep_GQnLA7eEOo4UoZ8I1JAuqtn0,3980
openai/lib/_tools.py,sha256=Dc4U2TXKvfAvVUvDS30SDeftrwgGM2vZ85t5ojLHiEg,1969
openai/lib/_validators.py,sha256=cXJXFuaAl7jeJcYHXXnFa4NHGtHs-_zt3Zs1VVCmQo4,35288
openai/lib/azure.py,sha256=dLzUXTXUOnfarLdDyO6dVzp8wY2vTMFFHUJZLuFznWY,26537
openai/lib/streaming/__init__.py,sha256=kD3LpjsqU7caDQDhB-YjTUl9qqbb5sPnGGSI2yQYC70,379
-openai/lib/streaming/__pycache__/__init__.cpython-310.pyc,,
-openai/lib/streaming/__pycache__/_assistants.cpython-310.pyc,,
-openai/lib/streaming/__pycache__/_deltas.cpython-310.pyc,,
+openai/lib/streaming/__pycache__/__init__.cpython-314.pyc,,
+openai/lib/streaming/__pycache__/_assistants.cpython-314.pyc,,
+openai/lib/streaming/__pycache__/_deltas.cpython-314.pyc,,
openai/lib/streaming/_assistants.py,sha256=LUWSinmYopQIkQ5xSg73b6BWbkRkQS5JvX62w_V9xSw,40692
openai/lib/streaming/_deltas.py,sha256=I7B_AznXZwlBmE8Puau7ayTQUx6hMIEVE8FYTQm2fjs,2502
openai/lib/streaming/chat/__init__.py,sha256=7krL_atOvvpQkY_byWSglSfDsMs5hdoxHmz4Ulq7lcc,1305
-openai/lib/streaming/chat/__pycache__/__init__.cpython-310.pyc,,
-openai/lib/streaming/chat/__pycache__/_completions.cpython-310.pyc,,
-openai/lib/streaming/chat/__pycache__/_events.cpython-310.pyc,,
-openai/lib/streaming/chat/__pycache__/_types.cpython-310.pyc,,
-openai/lib/streaming/chat/_completions.py,sha256=4PDLu_1-wQOrAwHY-Gz8NIQ8UnJ9gshwrmxuMDesFp8,30775
+openai/lib/streaming/chat/__pycache__/__init__.cpython-314.pyc,,
+openai/lib/streaming/chat/__pycache__/_completions.cpython-314.pyc,,
+openai/lib/streaming/chat/__pycache__/_events.cpython-314.pyc,,
+openai/lib/streaming/chat/__pycache__/_types.cpython-314.pyc,,
+openai/lib/streaming/chat/_completions.py,sha256=3xwZSiM7uA37GqxpSgTNWQbe_inCecJkHYRt4qqNKqQ,30704
openai/lib/streaming/chat/_events.py,sha256=lstVmM6YR2Cs9drikzrY9JCZn9Nbfym0aKIPtNpxL6w,2618
openai/lib/streaming/chat/_types.py,sha256=-SYVBNhGkOUoJ-8dotxpCRqPJpfyOQ8hwR2_HrsQCRI,739
openai/lib/streaming/responses/__init__.py,sha256=MwE1Oc3OIiXjtuRFsuP_k5Ra8pNiqKpc1GZum-8ZRJM,543
-openai/lib/streaming/responses/__pycache__/__init__.cpython-310.pyc,,
-openai/lib/streaming/responses/__pycache__/_events.cpython-310.pyc,,
-openai/lib/streaming/responses/__pycache__/_responses.cpython-310.pyc,,
-openai/lib/streaming/responses/__pycache__/_types.cpython-310.pyc,,
+openai/lib/streaming/responses/__pycache__/__init__.cpython-314.pyc,,
+openai/lib/streaming/responses/__pycache__/_events.cpython-314.pyc,,
+openai/lib/streaming/responses/__pycache__/_responses.cpython-314.pyc,,
+openai/lib/streaming/responses/__pycache__/_types.cpython-314.pyc,,
openai/lib/streaming/responses/_events.py,sha256=3UWmeYgg23E3XTkYVlrpXJPnhBM2kmQFoXh3WiT9CrE,5576
openai/lib/streaming/responses/_responses.py,sha256=Myeo4so-aMFrzEyNCjX0ypYWTWvY5uDelhe2ygC93lY,13614
openai/lib/streaming/responses/_types.py,sha256=msq1KWj3e3BLn7NKu5j2kzHgj9kShuoitgXEyTmQxus,276
openai/pagination.py,sha256=dtPji3wApb_0rkvYDwh50rl8cjxT3i6EUS6PfTXwhQI,4770
openai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-openai/resources/__init__.py,sha256=YDrG7nC0vTf4xk-JCSs0132OA5XWmqAMtjWu4wypnB4,6067
-openai/resources/__pycache__/__init__.cpython-310.pyc,,
-openai/resources/__pycache__/batches.cpython-310.pyc,,
-openai/resources/__pycache__/completions.cpython-310.pyc,,
-openai/resources/__pycache__/embeddings.cpython-310.pyc,,
-openai/resources/__pycache__/files.cpython-310.pyc,,
-openai/resources/__pycache__/images.cpython-310.pyc,,
-openai/resources/__pycache__/models.cpython-310.pyc,,
-openai/resources/__pycache__/moderations.cpython-310.pyc,,
-openai/resources/__pycache__/videos.cpython-310.pyc,,
-openai/resources/__pycache__/webhooks.cpython-310.pyc,,
+openai/resources/__init__.py,sha256=9z6J2zeOeo2FDsWZ4AiIhEjDjMNyYrifAnvcM29-zL8,6421
+openai/resources/__pycache__/__init__.cpython-314.pyc,,
+openai/resources/__pycache__/batches.cpython-314.pyc,,
+openai/resources/__pycache__/completions.cpython-314.pyc,,
+openai/resources/__pycache__/embeddings.cpython-314.pyc,,
+openai/resources/__pycache__/files.cpython-314.pyc,,
+openai/resources/__pycache__/images.cpython-314.pyc,,
+openai/resources/__pycache__/models.cpython-314.pyc,,
+openai/resources/__pycache__/moderations.cpython-314.pyc,,
+openai/resources/__pycache__/videos.cpython-314.pyc,,
openai/resources/audio/__init__.py,sha256=YM7FHvPKVlj_v6EIgfpUQsb6q4hS2hVQ3gfkgic0sP0,1687
-openai/resources/audio/__pycache__/__init__.cpython-310.pyc,,
-openai/resources/audio/__pycache__/audio.cpython-310.pyc,,
-openai/resources/audio/__pycache__/speech.cpython-310.pyc,,
-openai/resources/audio/__pycache__/transcriptions.cpython-310.pyc,,
-openai/resources/audio/__pycache__/translations.cpython-310.pyc,,
-openai/resources/audio/audio.py,sha256=nEIB4q7a1MSYdQkcYH2O6jB-_rNCMDCBJyUuqOL67CI,5491
-openai/resources/audio/speech.py,sha256=OT6cTwSeQWx_9zBu4eeCGnJ2KoBcK82WoNXM2d7dsXQ,10316
-openai/resources/audio/transcriptions.py,sha256=SHB5wio25FMIxL12_ErjuGCtuQ3fRpkT2Zo0aDFthAY,52213
-openai/resources/audio/translations.py,sha256=IsPiYZtr9BLS7pgAWAneU7yNq1E9igDCa-QXN12PhZM,15505
-openai/resources/batches.py,sha256=kdQbfxnHMZ7sB8Z5ZQriwxYE2u9_6_Z4LM4wxulYwjA,21002
+openai/resources/audio/__pycache__/__init__.cpython-314.pyc,,
+openai/resources/audio/__pycache__/audio.cpython-314.pyc,,
+openai/resources/audio/__pycache__/speech.cpython-314.pyc,,
+openai/resources/audio/__pycache__/transcriptions.cpython-314.pyc,,
+openai/resources/audio/__pycache__/translations.cpython-314.pyc,,
+openai/resources/audio/audio.py,sha256=_UsPa7eB21y0wAyxtgrsisEVP7xKDCh_sFtYtHO7Wfo,6481
+openai/resources/audio/speech.py,sha256=DWaCXcKFqZcXbaWVzLkhL5SmVdUIRYkT4bT115UU7jg,10570
+openai/resources/audio/transcriptions.py,sha256=WTj2lIGcKjgHQOacu6LuW-dVInuNU4Gi5pjel8O-FLY,53133
+openai/resources/audio/translations.py,sha256=zd3ZpiIO28oCJcw-C99xDg8CNzVyoT-tmz9iA3QRXhA,15609
+openai/resources/batches.py,sha256=LFA3fK0O4LUmblTDIfLa-CKHdAjIIWg14NXu-JBwniY,21729
openai/resources/beta/__init__.py,sha256=chKjkpkqNxO1Dbl9OsCJNXVC1AbDcvTrvfvvAIh5B5I,1570
-openai/resources/beta/__pycache__/__init__.cpython-310.pyc,,
-openai/resources/beta/__pycache__/assistants.cpython-310.pyc,,
-openai/resources/beta/__pycache__/beta.cpython-310.pyc,,
-openai/resources/beta/assistants.py,sha256=UUUeaqFksHnx8lRgxlvkv6gNtTv4VSgxrs8LomgSc-0,50594
-openai/resources/beta/beta.py,sha256=Lrsu8f9haXb4bZphmw9wgHzn8uZUBUUC11mZa3XRbr0,5725
+openai/resources/beta/__pycache__/__init__.cpython-314.pyc,,
+openai/resources/beta/__pycache__/assistants.cpython-314.pyc,,
+openai/resources/beta/__pycache__/beta.cpython-314.pyc,,
+openai/resources/beta/assistants.py,sha256=KgEboAjYjRagOlF3apcEcjb2hb5GG0Q4pFRmxPyYM68,50983
+openai/resources/beta/beta.py,sha256=r1556zGWoMhrap6FSh1biLDhw70cy5xw5uuhrTn7FrQ,6529
openai/resources/beta/chatkit/__init__.py,sha256=lJAQpi-JogtnSAlOegSae6WfCfgRLMd8rpPBuT9_2FE,1216
-openai/resources/beta/chatkit/__pycache__/__init__.cpython-310.pyc,,
-openai/resources/beta/chatkit/__pycache__/chatkit.cpython-310.pyc,,
-openai/resources/beta/chatkit/__pycache__/sessions.cpython-310.pyc,,
-openai/resources/beta/chatkit/__pycache__/threads.cpython-310.pyc,,
+openai/resources/beta/chatkit/__pycache__/__init__.cpython-314.pyc,,
+openai/resources/beta/chatkit/__pycache__/chatkit.cpython-314.pyc,,
+openai/resources/beta/chatkit/__pycache__/sessions.cpython-314.pyc,,
+openai/resources/beta/chatkit/__pycache__/threads.cpython-314.pyc,,
openai/resources/beta/chatkit/chatkit.py,sha256=CleguF_80H_gAsyX_tLoGxFTD7YItZPshRAZ2QAqfzY,4333
-openai/resources/beta/chatkit/sessions.py,sha256=H8oIyd8V553LruOilYuTnXwU8Mh_z5xOjuu7GunaGIc,11837
-openai/resources/beta/chatkit/threads.py,sha256=kqngETyzx9uRBKtgfq9r9WrtOXpNfnHej4PkrVfnklo,20077
+openai/resources/beta/chatkit/sessions.py,sha256=11C4qy5aVEmyAztX6Q3nTBQtsXZdU6YT9Rk4gJNJges,12176
+openai/resources/beta/chatkit/threads.py,sha256=FBPRkDx9KiEl5MTAx-IpYQRQhjf0n2zPbft9ZpcRZno,20552
openai/resources/beta/realtime/__init__.py,sha256=dOXRjPiDqRJXIFoGKSVjzKh3IwSXnLbwHx4ND5OdnVs,1412
-openai/resources/beta/realtime/__pycache__/__init__.cpython-310.pyc,,
-openai/resources/beta/realtime/__pycache__/realtime.cpython-310.pyc,,
-openai/resources/beta/realtime/__pycache__/sessions.cpython-310.pyc,,
-openai/resources/beta/realtime/__pycache__/transcription_sessions.cpython-310.pyc,,
+openai/resources/beta/realtime/__pycache__/__init__.cpython-314.pyc,,
+openai/resources/beta/realtime/__pycache__/realtime.cpython-314.pyc,,
+openai/resources/beta/realtime/__pycache__/sessions.cpython-314.pyc,,
+openai/resources/beta/realtime/__pycache__/transcription_sessions.cpython-314.pyc,,
openai/resources/beta/realtime/realtime.py,sha256=tuiq_0PdFmC2p-LNOfQNrVuDEMlLAHKEgeAsPsHLUHU,43694
openai/resources/beta/realtime/sessions.py,sha256=EQva_qI71CgS35qkK9TGxuibviHwUQ6VzErIzunP4gU,22098
openai/resources/beta/realtime/transcription_sessions.py,sha256=uTDGEat50lojdD0N8slnZu2RVzMP96rlicpDp4tpl34,14124
openai/resources/beta/threads/__init__.py,sha256=fQ_qdUVSfouVS5h47DlTb5mamChT4K-v-siPuuAB6do,1177
-openai/resources/beta/threads/__pycache__/__init__.cpython-310.pyc,,
-openai/resources/beta/threads/__pycache__/messages.cpython-310.pyc,,
-openai/resources/beta/threads/__pycache__/threads.cpython-310.pyc,,
-openai/resources/beta/threads/messages.py,sha256=a8HEG-QKIgG8r4XtE0M7ixRBikAmdQEUDWUDf1gkaSg,30794
+openai/resources/beta/threads/__pycache__/__init__.cpython-314.pyc,,
+openai/resources/beta/threads/__pycache__/messages.cpython-314.pyc,,
+openai/resources/beta/threads/__pycache__/threads.cpython-314.pyc,,
+openai/resources/beta/threads/messages.py,sha256=w-48lHdCg5wCP1dzl9UDdmzv9z6AXWu00ZgIcsPwly4,31425
openai/resources/beta/threads/runs/__init__.py,sha256=2FfDaqwmJJCd-IVpY_CrzWcFvw0KFyQ3cm5jnTfI-DQ,771
-openai/resources/beta/threads/runs/__pycache__/__init__.cpython-310.pyc,,
-openai/resources/beta/threads/runs/__pycache__/runs.cpython-310.pyc,,
-openai/resources/beta/threads/runs/__pycache__/steps.cpython-310.pyc,,
-openai/resources/beta/threads/runs/runs.py,sha256=JQ5LaI33KcXLumleh-TxyTTJWYkbeQhTh8FxhHNmYzg,155523
-openai/resources/beta/threads/runs/steps.py,sha256=YkoPMeMXEzoL09AWF7Eh1lNaJocykV1igmcsZpXKw5Y,16981
-openai/resources/beta/threads/threads.py,sha256=3C3OzlgL0S1mDdnRBowU14Di8W7T81C2BEGFm5Mx42Y,97651
+openai/resources/beta/threads/runs/__pycache__/__init__.cpython-314.pyc,,
+openai/resources/beta/threads/runs/__pycache__/runs.cpython-314.pyc,,
+openai/resources/beta/threads/runs/__pycache__/steps.cpython-314.pyc,,
+openai/resources/beta/threads/runs/runs.py,sha256=uswVS3ij8MwpRACofwQUzwarzSbxXgDpunvdxuQqe3U,157142
+openai/resources/beta/threads/runs/steps.py,sha256=j6foQlOVOwf-3pYoroSAn2N6Q9lg5HQzry5Hoq85Hok,17516
+openai/resources/beta/threads/threads.py,sha256=e8jY5_I8zxzAjNXJNEmMsoB_Cx75K0eqZJnDI-jXIGA,99002
openai/resources/chat/__init__.py,sha256=8Q9ODRo1wIpFa34VaNwuaWFmxqFxagDtUhIAkQNvxEU,849
-openai/resources/chat/__pycache__/__init__.cpython-310.pyc,,
-openai/resources/chat/__pycache__/chat.cpython-310.pyc,,
-openai/resources/chat/chat.py,sha256=HjcasSCmt-g3-J-RkZQ9HRj_-hPfImakFxdUvvk5mCg,3364
+openai/resources/chat/__pycache__/__init__.cpython-314.pyc,,
+openai/resources/chat/__pycache__/chat.cpython-314.pyc,,
+openai/resources/chat/chat.py,sha256=NdZ-a8Y_lVk7rCBkgGGpWK6JybfpyYTFyJWpcBO9zSE,4072
openai/resources/chat/completions/__init__.py,sha256=KOi8blzNyHWD7nKgcoW3CxZ4428IcNVP0gCU74HySf8,901
-openai/resources/chat/completions/__pycache__/__init__.cpython-310.pyc,,
-openai/resources/chat/completions/__pycache__/completions.cpython-310.pyc,,
-openai/resources/chat/completions/__pycache__/messages.cpython-310.pyc,,
-openai/resources/chat/completions/completions.py,sha256=r5M4ICb-GHJUsbrokxXvQizh_iBsygiRX-kho_X78JQ,164608
-openai/resources/chat/completions/messages.py,sha256=AYVwQ24jPQGs2Y-vE6Yjl5nbCECtuw-HpcBEEpCgC-0,8010
-openai/resources/completions.py,sha256=wO39_sLxmSzTI6Mp13KzjqaxMgFZw4l-t0_9xxDbX_4,59201
+openai/resources/chat/completions/__pycache__/__init__.cpython-314.pyc,,
+openai/resources/chat/completions/__pycache__/completions.cpython-314.pyc,,
+openai/resources/chat/completions/__pycache__/messages.cpython-314.pyc,,
+openai/resources/chat/completions/completions.py,sha256=l2pPeNnYOt-hxi35s9Dkb-Ey6xILKhHqGFLI1QacqzI,166461
+openai/resources/chat/completions/messages.py,sha256=tNqnOam17jodvnu2j4UEfItFlg9_MVCVv33ECF1DbR8,8325
+openai/resources/completions.py,sha256=6Vz1yGDy8qHsE8gQr0TkXWpoIPJxmbmB2DBs5aqjofc,60205
openai/resources/containers/__init__.py,sha256=7VzY-TFwG3x5D_kUCs_iAQaaCKAswt1Jk70KpmnU8Do,849
-openai/resources/containers/__pycache__/__init__.cpython-310.pyc,,
-openai/resources/containers/__pycache__/containers.cpython-310.pyc,,
-openai/resources/containers/containers.py,sha256=IjkEbXITLjCMt0rpZpvocfBro_M0ZEB4rF5fYEfLWs8,19650
+openai/resources/containers/__pycache__/__init__.cpython-314.pyc,,
+openai/resources/containers/__pycache__/containers.cpython-314.pyc,,
+openai/resources/containers/containers.py,sha256=RQMB9804eMOnO1II5WTLnm0Df0SNwT1OWnoGJeAmYcs,20875
openai/resources/containers/files/__init__.py,sha256=nDhg0wY7eHRMO-xOErno0mV0Ya_ynlmKAp-4a3nj-us,810
-openai/resources/containers/files/__pycache__/__init__.cpython-310.pyc,,
-openai/resources/containers/files/__pycache__/content.cpython-310.pyc,,
-openai/resources/containers/files/__pycache__/files.cpython-310.pyc,,
-openai/resources/containers/files/content.py,sha256=-jupriq97X2kq_yCdYihZ1h2qCx-IMbaaR10M4lz6TA,6491
-openai/resources/containers/files/files.py,sha256=jjiRGS489CzoOXb3nvsD-i3qTSINE9CrAo2jZPWxyLI,21042
+openai/resources/containers/files/__pycache__/__init__.cpython-314.pyc,,
+openai/resources/containers/files/__pycache__/content.cpython-314.pyc,,
+openai/resources/containers/files/__pycache__/files.cpython-314.pyc,,
+openai/resources/containers/files/content.py,sha256=ZzBefQwKNLx0HhqYjLiKiZ05foq28U38gYMZcl7pV2I,6704
+openai/resources/containers/files/files.py,sha256=DyfkQCJ0EtVRioL_L4DEc0Xk1Rd-ZMXtSkQ0XdRouz0,21521
openai/resources/conversations/__init__.py,sha256=Uslb4pakT8pQJGQ29CvoiN-SvN2AgMum-TeIDyYTzQE,888
-openai/resources/conversations/__pycache__/__init__.cpython-310.pyc,,
-openai/resources/conversations/__pycache__/conversations.cpython-310.pyc,,
-openai/resources/conversations/__pycache__/items.cpython-310.pyc,,
-openai/resources/conversations/conversations.py,sha256=IjnSvilsJG_yK4IoRP86R6_5MFlHSpZt6lWxgpbGP-Y,19151
-openai/resources/conversations/items.py,sha256=q3XbPsh09Gb9qYisb6BEa9BExX4HF5oMu-Z0khdAFlY,23969
-openai/resources/embeddings.py,sha256=GYA_sI2h5auPwyHKm44-brPxRxqvcQaH0JQMZW13bMA,12374
+openai/resources/conversations/__pycache__/__init__.cpython-314.pyc,,
+openai/resources/conversations/__pycache__/conversations.cpython-314.pyc,,
+openai/resources/conversations/__pycache__/items.cpython-314.pyc,,
+openai/resources/conversations/api.md,sha256=aja7OOHpcYKeuoZV_lBWBT950-4tdVVOf_M2GwRXv3k,3047
+openai/resources/conversations/conversations.py,sha256=-g37CLsQKKaD8Fu1fH9GTu-j5xWgEDPLBSDuqfxOw3A,19914
+openai/resources/conversations/items.py,sha256=6IrWLOdFLDZnWnF5T810y4Yf1V-t430rFfQLCgkt99c,24676
+openai/resources/embeddings.py,sha256=qbwgBd5PtAPdL0yDRLJq2vv33u-CwS3IsSOABFCsPoM,12648
openai/resources/evals/__init__.py,sha256=DXhYb6mCKKY2bDdS3s4raH1SvwPUyaBFvdHgPEbwRWY,771
-openai/resources/evals/__pycache__/__init__.cpython-310.pyc,,
-openai/resources/evals/__pycache__/evals.cpython-310.pyc,,
-openai/resources/evals/evals.py,sha256=goQ9ek2_xI34SG7GkwpqKhXO2hZouq5bxS26EejY-cI,25904
+openai/resources/evals/__pycache__/__init__.cpython-314.pyc,,
+openai/resources/evals/__pycache__/evals.cpython-314.pyc,,
+openai/resources/evals/evals.py,sha256=4NfCTgLa1uGrEQSCk2fwxACou5bSr40lUKHHxOsgmEE,26571
openai/resources/evals/runs/__init__.py,sha256=7EtKZ43tGlmAOYyDdyFXy80tk2X8AmXb5taTWRRXBXE,850
-openai/resources/evals/runs/__pycache__/__init__.cpython-310.pyc,,
-openai/resources/evals/runs/__pycache__/output_items.cpython-310.pyc,,
-openai/resources/evals/runs/__pycache__/runs.cpython-310.pyc,,
-openai/resources/evals/runs/output_items.py,sha256=7pcGpGc61Df4jQIgxRYLX-27wz_8qc0Ux-ni_EfVvwA,12530
-openai/resources/evals/runs/runs.py,sha256=228Vf9S8_dz0tZAWCh2ehECQYg_Z4JXNV5MRuvUtDh4,24359
-openai/resources/files.py,sha256=kCQlINr3EBeknrv9tc_SluYWhlRHx5W1oDh4bH8XMqc,30610
+openai/resources/evals/runs/__pycache__/__init__.cpython-314.pyc,,
+openai/resources/evals/runs/__pycache__/output_items.cpython-314.pyc,,
+openai/resources/evals/runs/__pycache__/runs.cpython-314.pyc,,
+openai/resources/evals/runs/output_items.py,sha256=Bom9UpNSimUc17yGyDZv8lhYOxjVGiFePCYjJ9b9IM0,13061
+openai/resources/evals/runs/runs.py,sha256=TjiZLPwIUpp7rzSXJxIXYGHrYyqlWNmxPtMHgq1aOac,25240
+openai/resources/files.py,sha256=M5r6k5baiyfu3G8tC6MCeCjCWcW56zUuVEwz7uWlD1o,31113
openai/resources/fine_tuning/__init__.py,sha256=RQPC5QfqE-ByhRQbJK-j7ooUrkBO9s9bKt5xkzOL8ls,1597
-openai/resources/fine_tuning/__pycache__/__init__.cpython-310.pyc,,
-openai/resources/fine_tuning/__pycache__/fine_tuning.cpython-310.pyc,,
+openai/resources/fine_tuning/__pycache__/__init__.cpython-314.pyc,,
+openai/resources/fine_tuning/__pycache__/fine_tuning.cpython-314.pyc,,
openai/resources/fine_tuning/alpha/__init__.py,sha256=QKAYZscx1Fw3GLD8cVdZAYG9L_i6MnPGeifn8GgcztU,810
-openai/resources/fine_tuning/alpha/__pycache__/__init__.cpython-310.pyc,,
-openai/resources/fine_tuning/alpha/__pycache__/alpha.cpython-310.pyc,,
-openai/resources/fine_tuning/alpha/__pycache__/graders.cpython-310.pyc,,
-openai/resources/fine_tuning/alpha/alpha.py,sha256=P-zLOHpI-Aa0jUUWspkanL7WpUtfjwIGDH8KTGDNeHY,3274
-openai/resources/fine_tuning/alpha/graders.py,sha256=TA39PsdXWjxsts6p_UjPhyTwE4a1O7nQOkUC0V2ZHbU,10758
+openai/resources/fine_tuning/alpha/__pycache__/__init__.cpython-314.pyc,,
+openai/resources/fine_tuning/alpha/__pycache__/alpha.cpython-314.pyc,,
+openai/resources/fine_tuning/alpha/__pycache__/graders.cpython-314.pyc,,
+openai/resources/fine_tuning/alpha/alpha.py,sha256=V_dcJVRnCQllhZTHf62OchMII2lUiuHMy11Ie6ll_ls,3802
+openai/resources/fine_tuning/alpha/graders.py,sha256=IgsyDfOBOG-qK1TkJ5Zjroh4myggXXKDYTCWM5FxBj0,10928
openai/resources/fine_tuning/checkpoints/__init__.py,sha256=rvsbut5FCQNAr-VjvL-14GFT3Tld49FlFuBJDpfxBug,940
-openai/resources/fine_tuning/checkpoints/__pycache__/__init__.cpython-310.pyc,,
-openai/resources/fine_tuning/checkpoints/__pycache__/checkpoints.cpython-310.pyc,,
-openai/resources/fine_tuning/checkpoints/__pycache__/permissions.cpython-310.pyc,,
-openai/resources/fine_tuning/checkpoints/checkpoints.py,sha256=njpz496JifeZ8RXjoYUb1Tj9tBItuXRxGJHW2jrrfwo,3606
-openai/resources/fine_tuning/checkpoints/permissions.py,sha256=A9SfSQk7o0gbqhu2NMZTW53Tq5c3zbBDSgL_0K0t1WQ,17103
-openai/resources/fine_tuning/fine_tuning.py,sha256=UL4MXoUqEnbSZ5e4dnbUPTtd4tE-1p2L7Hh_0CQ_0s0,5410
+openai/resources/fine_tuning/checkpoints/__pycache__/__init__.cpython-314.pyc,,
+openai/resources/fine_tuning/checkpoints/__pycache__/checkpoints.cpython-314.pyc,,
+openai/resources/fine_tuning/checkpoints/__pycache__/permissions.cpython-314.pyc,,
+openai/resources/fine_tuning/checkpoints/checkpoints.py,sha256=v6s1-CcegIffX3KL2hY44iTzQR_xVAvwysqd1Xd2kYo,4134
+openai/resources/fine_tuning/checkpoints/permissions.py,sha256=232H7x9binOq579MjNMypA5ZTYDrZRQiwDtXFc1AUN8,24721
+openai/resources/fine_tuning/fine_tuning.py,sha256=7uHBPvrGerktNx4578OwE2e-XkeFlGzWvSu1MlfThQs,5938
openai/resources/fine_tuning/jobs/__init__.py,sha256=_smlrwijZOCcsDWqKnofLxQM2QLucZzXgboL9zJBPHw,849
-openai/resources/fine_tuning/jobs/__pycache__/__init__.cpython-310.pyc,,
-openai/resources/fine_tuning/jobs/__pycache__/checkpoints.cpython-310.pyc,,
-openai/resources/fine_tuning/jobs/__pycache__/jobs.cpython-310.pyc,,
-openai/resources/fine_tuning/jobs/checkpoints.py,sha256=-QQNOZJplnCJyHCFTFO-DMN-AWc1Dp8p9Hifffgz5a0,7442
-openai/resources/fine_tuning/jobs/jobs.py,sha256=jIXuCijf7v9ufH3SqgWBrQAFg5uqPKAuyXgNDmLEXK4,37033
-openai/resources/images.py,sha256=CbfPekwgHdib4TZH7Wj3nKd_JaUggcX4ot9wjVjrLKI,97665
-openai/resources/models.py,sha256=1PDMpmdtaGiNHZNWPL-sI_I-SDOjuK-yfm2oq7mKiGI,11232
-openai/resources/moderations.py,sha256=8BWoTw8QHsSUbgByBlLxHHYEeeozFsY8n3j-ah13YdI,7808
+openai/resources/fine_tuning/jobs/__pycache__/__init__.cpython-314.pyc,,
+openai/resources/fine_tuning/jobs/__pycache__/checkpoints.cpython-314.pyc,,
+openai/resources/fine_tuning/jobs/__pycache__/jobs.cpython-314.pyc,,
+openai/resources/fine_tuning/jobs/checkpoints.py,sha256=l4AtFgCez2MpZyXCUaQmTv0VOC8X0Cy6aSRH02tHlvQ,7733
+openai/resources/fine_tuning/jobs/jobs.py,sha256=6BK9ajcq8kkNad4xLMswFnk4vrhry2raWadG76jA6xo,38276
+openai/resources/images.py,sha256=nutRWNXXmDLhLb_k_iFQI99cxV9uj94r5F_fD45CRtg,97447
+openai/resources/models.py,sha256=yyn7S0RApgi4937OZLgcIhYYXLt4RKDgPeJ_PoVzjOs,11515
+openai/resources/moderations.py,sha256=LClm7t2Yvv9KrZNap8Oh3dluSjcCxdCYrJ2CBnx9f4Q,8018
openai/resources/realtime/__init__.py,sha256=5v7pt2NQKz1j-X1z4bTqupmE3G8O5_G4PYCyw3F3-eo,1269
-openai/resources/realtime/__pycache__/__init__.cpython-310.pyc,,
-openai/resources/realtime/__pycache__/calls.cpython-310.pyc,,
-openai/resources/realtime/__pycache__/client_secrets.cpython-310.pyc,,
-openai/resources/realtime/__pycache__/realtime.cpython-310.pyc,,
-openai/resources/realtime/calls.py,sha256=DIwWlEkd_6IYYnXptYeiuBpEJpP1cDrGoSwXc-G-s9A,33417
-openai/resources/realtime/client_secrets.py,sha256=Z8NmSg2GGN3we3w89Un26jWp5OO9lxOi8oS4lSYMrUg,7700
-openai/resources/realtime/realtime.py,sha256=ISKvDwqzbHeBM8OHlOW3L7oO1NwyrUM5joj_g9EH2aY,44876
+openai/resources/realtime/__pycache__/__init__.cpython-314.pyc,,
+openai/resources/realtime/__pycache__/calls.cpython-314.pyc,,
+openai/resources/realtime/__pycache__/client_secrets.cpython-314.pyc,,
+openai/resources/realtime/__pycache__/realtime.cpython-314.pyc,,
+openai/resources/realtime/api.md,sha256=xbP01YUJLOVUDqHXAIJiDuo7NTLsjpn8dqCltFf6zB4,5162
+openai/resources/realtime/calls.py,sha256=e-eg6WEz6VVa-tEJsEw1BUsdQD6W4cUI7D1l2h7cgh4,33818
+openai/resources/realtime/client_secrets.py,sha256=T0DI0ydTy136wN3yRB4JqPwfPD95Sh2_SuMdBDgSFAc,9262
+openai/resources/realtime/realtime.py,sha256=Gdpco58Ieg44OpYIY1pQtiX9r7zpnGB_YniUjyERZVc,45108
openai/resources/responses/__init__.py,sha256=9LkjQomOIh6B5Qg1HbdCgjMRoCzIBzyRaYNyt3moA38,1322
-openai/resources/responses/__pycache__/__init__.cpython-310.pyc,,
-openai/resources/responses/__pycache__/input_items.cpython-310.pyc,,
-openai/resources/responses/__pycache__/input_tokens.cpython-310.pyc,,
-openai/resources/responses/__pycache__/responses.cpython-310.pyc,,
-openai/resources/responses/input_items.py,sha256=tzg31yUowcCMqU32TBHI18YzRjqNs_EGwKdpSU8bSTs,8774
-openai/resources/responses/input_tokens.py,sha256=cQvZuYjdhAf6fcmXsDavpuhA-LBjmpQkm2KgaOD5iSk,14208
-openai/resources/responses/responses.py,sha256=2O-bCqpqw952DXPcngyh11C8f-B9BmpVAQpxe45YXGQ,173163
+openai/resources/responses/__pycache__/__init__.cpython-314.pyc,,
+openai/resources/responses/__pycache__/input_items.cpython-314.pyc,,
+openai/resources/responses/__pycache__/input_tokens.cpython-314.pyc,,
+openai/resources/responses/__pycache__/responses.cpython-314.pyc,,
+openai/resources/responses/api.md,sha256=4LSDAG15GoAUaSDgw7JzxPR15dDRC3zn7skIfLGFZ64,6744
+openai/resources/responses/input_items.py,sha256=BQG1coS3jCfOwTJlxuwp-YDKwsKvRupGWGDdLoJeMcU,8867
+openai/resources/responses/input_tokens.py,sha256=-WrtgqtgssS6aN5gC4M48HxWubvlohqs-otfgihPO4Y,14466
+openai/resources/responses/responses.py,sha256=bugWEOEcQXLWbxNyy1MfMFWbyNqJHIbyS366gtMoaLc,199013
+openai/resources/skills/__init__.py,sha256=O6Jcz0qgE-Eyn0cH2qF2y1uSHK7Wne3N2v8UR30aB7Y,1203
+openai/resources/skills/__pycache__/__init__.cpython-314.pyc,,
+openai/resources/skills/__pycache__/content.cpython-314.pyc,,
+openai/resources/skills/__pycache__/skills.cpython-314.pyc,,
+openai/resources/skills/content.py,sha256=GSdRhJ3qPfvFYDDnNCgVyxV9pRZw2Z9QnkiwcuDGCMM,6231
+openai/resources/skills/skills.py,sha256=ABfQTRhgck9ioALp_N30HDEMU72TDfiO_mKS43udDJg,22851
+openai/resources/skills/versions/__init__.py,sha256=22fGWbCZg6G2wQzG9qdq0h2wWccFCurCYdSMnXhYWBg,849
+openai/resources/skills/versions/__pycache__/__init__.cpython-314.pyc,,
+openai/resources/skills/versions/__pycache__/content.cpython-314.pyc,,
+openai/resources/skills/versions/__pycache__/versions.cpython-314.pyc,,
+openai/resources/skills/versions/content.py,sha256=rbg8jBbe0DbJi4J1RqqRRsWOoB4gjA4ZCKOuqO5WhWU,6688
+openai/resources/skills/versions/versions.py,sha256=kWgrnoaX28vnbQWNezEtZOmSNa6F800yfXkDasY_ILc,20611
openai/resources/uploads/__init__.py,sha256=HmY3WQgvUI2bN3CjfWHWQOk7UUC6Ozna97_lHhrrRSA,810
-openai/resources/uploads/__pycache__/__init__.cpython-310.pyc,,
-openai/resources/uploads/__pycache__/parts.cpython-310.pyc,,
-openai/resources/uploads/__pycache__/uploads.cpython-310.pyc,,
-openai/resources/uploads/parts.py,sha256=2Vov0reg5wdOSGSJ7hhs9pqsIofkhqjoUoE_AgXHLZM,8121
-openai/resources/uploads/uploads.py,sha256=OeCCAEK1W1ICTfraOBbYRrBclnzroEOaAOpuT05Fyvg,25443
+openai/resources/uploads/__pycache__/__init__.cpython-314.pyc,,
+openai/resources/uploads/__pycache__/parts.cpython-314.pyc,,
+openai/resources/uploads/__pycache__/uploads.cpython-314.pyc,,
+openai/resources/uploads/parts.py,sha256=5JAamxcjxsCvWlurOdp1rCBSTWNXVxIfOjpAMce-Ko8,8334
+openai/resources/uploads/uploads.py,sha256=ymtTB_fcJ3QgxQrjpIEVUbAxOnkLibpdMUJ57tqFuzI,26664
openai/resources/vector_stores/__init__.py,sha256=11Xn1vhgndWiI0defJHv31vmbtbDgh2GwZT3gX8GgHk,1296
-openai/resources/vector_stores/__pycache__/__init__.cpython-310.pyc,,
-openai/resources/vector_stores/__pycache__/file_batches.cpython-310.pyc,,
-openai/resources/vector_stores/__pycache__/files.cpython-310.pyc,,
-openai/resources/vector_stores/__pycache__/vector_stores.cpython-310.pyc,,
-openai/resources/vector_stores/file_batches.py,sha256=eSrMBeNf-bKsHu40HHyKkejo8BNoFAE32LnG119FxIs,34196
-openai/resources/vector_stores/files.py,sha256=xJStwcbKIzVzqIXK7G-Mfll61wbt154SObua945XXEI,39703
-openai/resources/vector_stores/vector_stores.py,sha256=L1vifq5tiw7EnBuxYREA_VPMzyRcePiQG4QLQL5vd18,35451
-openai/resources/videos.py,sha256=FGyEsn-h57V6guzhUEZuls5JWx0RzFXXRn74-Nvlx88,32017
-openai/resources/webhooks.py,sha256=wz3filqxxUEhhW5RSa-1LiN10MzafKXJPl5-Wb1mCew,7820
-openai/types/__init__.py,sha256=S82oA2tM4b7YnQ7R8xFqxViIUOy5bFzoRUAFA2efN2I,7630
-openai/types/__pycache__/__init__.cpython-310.pyc,,
-openai/types/__pycache__/audio_model.cpython-310.pyc,,
-openai/types/__pycache__/audio_response_format.cpython-310.pyc,,
-openai/types/__pycache__/auto_file_chunking_strategy_param.cpython-310.pyc,,
-openai/types/__pycache__/batch.cpython-310.pyc,,
-openai/types/__pycache__/batch_create_params.cpython-310.pyc,,
-openai/types/__pycache__/batch_error.cpython-310.pyc,,
-openai/types/__pycache__/batch_list_params.cpython-310.pyc,,
-openai/types/__pycache__/batch_request_counts.cpython-310.pyc,,
-openai/types/__pycache__/batch_usage.cpython-310.pyc,,
-openai/types/__pycache__/chat_model.cpython-310.pyc,,
-openai/types/__pycache__/completion.cpython-310.pyc,,
-openai/types/__pycache__/completion_choice.cpython-310.pyc,,
-openai/types/__pycache__/completion_create_params.cpython-310.pyc,,
-openai/types/__pycache__/completion_usage.cpython-310.pyc,,
-openai/types/__pycache__/container_create_params.cpython-310.pyc,,
-openai/types/__pycache__/container_create_response.cpython-310.pyc,,
-openai/types/__pycache__/container_list_params.cpython-310.pyc,,
-openai/types/__pycache__/container_list_response.cpython-310.pyc,,
-openai/types/__pycache__/container_retrieve_response.cpython-310.pyc,,
-openai/types/__pycache__/create_embedding_response.cpython-310.pyc,,
-openai/types/__pycache__/embedding.cpython-310.pyc,,
-openai/types/__pycache__/embedding_create_params.cpython-310.pyc,,
-openai/types/__pycache__/embedding_model.cpython-310.pyc,,
-openai/types/__pycache__/eval_create_params.cpython-310.pyc,,
-openai/types/__pycache__/eval_create_response.cpython-310.pyc,,
-openai/types/__pycache__/eval_custom_data_source_config.cpython-310.pyc,,
-openai/types/__pycache__/eval_delete_response.cpython-310.pyc,,
-openai/types/__pycache__/eval_list_params.cpython-310.pyc,,
-openai/types/__pycache__/eval_list_response.cpython-310.pyc,,
-openai/types/__pycache__/eval_retrieve_response.cpython-310.pyc,,
-openai/types/__pycache__/eval_stored_completions_data_source_config.cpython-310.pyc,,
-openai/types/__pycache__/eval_update_params.cpython-310.pyc,,
-openai/types/__pycache__/eval_update_response.cpython-310.pyc,,
-openai/types/__pycache__/file_chunking_strategy.cpython-310.pyc,,
-openai/types/__pycache__/file_chunking_strategy_param.cpython-310.pyc,,
-openai/types/__pycache__/file_content.cpython-310.pyc,,
-openai/types/__pycache__/file_create_params.cpython-310.pyc,,
-openai/types/__pycache__/file_deleted.cpython-310.pyc,,
-openai/types/__pycache__/file_list_params.cpython-310.pyc,,
-openai/types/__pycache__/file_object.cpython-310.pyc,,
-openai/types/__pycache__/file_purpose.cpython-310.pyc,,
-openai/types/__pycache__/image.cpython-310.pyc,,
-openai/types/__pycache__/image_create_variation_params.cpython-310.pyc,,
-openai/types/__pycache__/image_edit_completed_event.cpython-310.pyc,,
-openai/types/__pycache__/image_edit_params.cpython-310.pyc,,
-openai/types/__pycache__/image_edit_partial_image_event.cpython-310.pyc,,
-openai/types/__pycache__/image_edit_stream_event.cpython-310.pyc,,
-openai/types/__pycache__/image_gen_completed_event.cpython-310.pyc,,
-openai/types/__pycache__/image_gen_partial_image_event.cpython-310.pyc,,
-openai/types/__pycache__/image_gen_stream_event.cpython-310.pyc,,
-openai/types/__pycache__/image_generate_params.cpython-310.pyc,,
-openai/types/__pycache__/image_model.cpython-310.pyc,,
-openai/types/__pycache__/images_response.cpython-310.pyc,,
-openai/types/__pycache__/model.cpython-310.pyc,,
-openai/types/__pycache__/model_deleted.cpython-310.pyc,,
-openai/types/__pycache__/moderation.cpython-310.pyc,,
-openai/types/__pycache__/moderation_create_params.cpython-310.pyc,,
-openai/types/__pycache__/moderation_create_response.cpython-310.pyc,,
-openai/types/__pycache__/moderation_image_url_input_param.cpython-310.pyc,,
-openai/types/__pycache__/moderation_model.cpython-310.pyc,,
-openai/types/__pycache__/moderation_multi_modal_input_param.cpython-310.pyc,,
-openai/types/__pycache__/moderation_text_input_param.cpython-310.pyc,,
-openai/types/__pycache__/other_file_chunking_strategy_object.cpython-310.pyc,,
-openai/types/__pycache__/static_file_chunking_strategy.cpython-310.pyc,,
-openai/types/__pycache__/static_file_chunking_strategy_object.cpython-310.pyc,,
-openai/types/__pycache__/static_file_chunking_strategy_object_param.cpython-310.pyc,,
-openai/types/__pycache__/static_file_chunking_strategy_param.cpython-310.pyc,,
-openai/types/__pycache__/upload.cpython-310.pyc,,
-openai/types/__pycache__/upload_complete_params.cpython-310.pyc,,
-openai/types/__pycache__/upload_create_params.cpython-310.pyc,,
-openai/types/__pycache__/vector_store.cpython-310.pyc,,
-openai/types/__pycache__/vector_store_create_params.cpython-310.pyc,,
-openai/types/__pycache__/vector_store_deleted.cpython-310.pyc,,
-openai/types/__pycache__/vector_store_list_params.cpython-310.pyc,,
-openai/types/__pycache__/vector_store_search_params.cpython-310.pyc,,
-openai/types/__pycache__/vector_store_search_response.cpython-310.pyc,,
-openai/types/__pycache__/vector_store_update_params.cpython-310.pyc,,
-openai/types/__pycache__/video.cpython-310.pyc,,
-openai/types/__pycache__/video_create_error.cpython-310.pyc,,
-openai/types/__pycache__/video_create_params.cpython-310.pyc,,
-openai/types/__pycache__/video_delete_response.cpython-310.pyc,,
-openai/types/__pycache__/video_download_content_params.cpython-310.pyc,,
-openai/types/__pycache__/video_list_params.cpython-310.pyc,,
-openai/types/__pycache__/video_model.cpython-310.pyc,,
-openai/types/__pycache__/video_model_param.cpython-310.pyc,,
-openai/types/__pycache__/video_remix_params.cpython-310.pyc,,
-openai/types/__pycache__/video_seconds.cpython-310.pyc,,
-openai/types/__pycache__/video_size.cpython-310.pyc,,
-openai/types/__pycache__/websocket_connection_options.cpython-310.pyc,,
+openai/resources/vector_stores/__pycache__/__init__.cpython-314.pyc,,
+openai/resources/vector_stores/__pycache__/file_batches.cpython-314.pyc,,
+openai/resources/vector_stores/__pycache__/files.cpython-314.pyc,,
+openai/resources/vector_stores/__pycache__/vector_stores.cpython-314.pyc,,
+openai/resources/vector_stores/file_batches.py,sha256=P9I8PEoqaQAjS9CNBW2Mq9VkhakbMEGELUGR3BtM5hU,36893
+openai/resources/vector_stores/files.py,sha256=qJArcuouqR5hBBOM_db7BqVgpILSYJTJ_5CENZDoA4o,41956
+openai/resources/vector_stores/vector_stores.py,sha256=G3AfkVRoxM4rCZ3jWlBInIm28U0VyG5mPM3qQS4mvnc,35842
+openai/resources/videos.py,sha256=Ae1drtJKDiXJ1K9AeVz7TZFgxsp11FAXmQhqiHsopo0,49967
+openai/resources/webhooks/__init__.py,sha256=B6Ku4PhIQmf4VSVnpeTqqPXdrB0bgL5UklqrJVcPyj4,292
+openai/resources/webhooks/__pycache__/__init__.cpython-314.pyc,,
+openai/resources/webhooks/__pycache__/webhooks.cpython-314.pyc,,
+openai/resources/webhooks/api.md,sha256=QBUWdJuTiC_NNhbr7U4Zmvc09x6CX5PEiyhTizX2yQU,610
+openai/resources/webhooks/webhooks.py,sha256=-KsfTOl5QrwLfwzwTmLQOg6DKSzbh9sVdaSyVgYYoNA,7826
+openai/types/__init__.py,sha256=trmo4jyPKTH49WURi0llaKiKxUZqzOddlCT5Od-5Hp8,8583
+openai/types/__pycache__/__init__.cpython-314.pyc,,
+openai/types/__pycache__/audio_model.cpython-314.pyc,,
+openai/types/__pycache__/audio_response_format.cpython-314.pyc,,
+openai/types/__pycache__/auto_file_chunking_strategy_param.cpython-314.pyc,,
+openai/types/__pycache__/batch.cpython-314.pyc,,
+openai/types/__pycache__/batch_create_params.cpython-314.pyc,,
+openai/types/__pycache__/batch_error.cpython-314.pyc,,
+openai/types/__pycache__/batch_list_params.cpython-314.pyc,,
+openai/types/__pycache__/batch_request_counts.cpython-314.pyc,,
+openai/types/__pycache__/batch_usage.cpython-314.pyc,,
+openai/types/__pycache__/chat_model.cpython-314.pyc,,
+openai/types/__pycache__/completion.cpython-314.pyc,,
+openai/types/__pycache__/completion_choice.cpython-314.pyc,,
+openai/types/__pycache__/completion_create_params.cpython-314.pyc,,
+openai/types/__pycache__/completion_usage.cpython-314.pyc,,
+openai/types/__pycache__/container_create_params.cpython-314.pyc,,
+openai/types/__pycache__/container_create_response.cpython-314.pyc,,
+openai/types/__pycache__/container_list_params.cpython-314.pyc,,
+openai/types/__pycache__/container_list_response.cpython-314.pyc,,
+openai/types/__pycache__/container_retrieve_response.cpython-314.pyc,,
+openai/types/__pycache__/create_embedding_response.cpython-314.pyc,,
+openai/types/__pycache__/deleted_skill.cpython-314.pyc,,
+openai/types/__pycache__/embedding.cpython-314.pyc,,
+openai/types/__pycache__/embedding_create_params.cpython-314.pyc,,
+openai/types/__pycache__/embedding_model.cpython-314.pyc,,
+openai/types/__pycache__/eval_create_params.cpython-314.pyc,,
+openai/types/__pycache__/eval_create_response.cpython-314.pyc,,
+openai/types/__pycache__/eval_custom_data_source_config.cpython-314.pyc,,
+openai/types/__pycache__/eval_delete_response.cpython-314.pyc,,
+openai/types/__pycache__/eval_list_params.cpython-314.pyc,,
+openai/types/__pycache__/eval_list_response.cpython-314.pyc,,
+openai/types/__pycache__/eval_retrieve_response.cpython-314.pyc,,
+openai/types/__pycache__/eval_stored_completions_data_source_config.cpython-314.pyc,,
+openai/types/__pycache__/eval_update_params.cpython-314.pyc,,
+openai/types/__pycache__/eval_update_response.cpython-314.pyc,,
+openai/types/__pycache__/file_chunking_strategy.cpython-314.pyc,,
+openai/types/__pycache__/file_chunking_strategy_param.cpython-314.pyc,,
+openai/types/__pycache__/file_content.cpython-314.pyc,,
+openai/types/__pycache__/file_create_params.cpython-314.pyc,,
+openai/types/__pycache__/file_deleted.cpython-314.pyc,,
+openai/types/__pycache__/file_list_params.cpython-314.pyc,,
+openai/types/__pycache__/file_object.cpython-314.pyc,,
+openai/types/__pycache__/file_purpose.cpython-314.pyc,,
+openai/types/__pycache__/image.cpython-314.pyc,,
+openai/types/__pycache__/image_create_variation_params.cpython-314.pyc,,
+openai/types/__pycache__/image_edit_completed_event.cpython-314.pyc,,
+openai/types/__pycache__/image_edit_params.cpython-314.pyc,,
+openai/types/__pycache__/image_edit_partial_image_event.cpython-314.pyc,,
+openai/types/__pycache__/image_edit_stream_event.cpython-314.pyc,,
+openai/types/__pycache__/image_gen_completed_event.cpython-314.pyc,,
+openai/types/__pycache__/image_gen_partial_image_event.cpython-314.pyc,,
+openai/types/__pycache__/image_gen_stream_event.cpython-314.pyc,,
+openai/types/__pycache__/image_generate_params.cpython-314.pyc,,
+openai/types/__pycache__/image_input_reference_param.cpython-314.pyc,,
+openai/types/__pycache__/image_model.cpython-314.pyc,,
+openai/types/__pycache__/images_response.cpython-314.pyc,,
+openai/types/__pycache__/model.cpython-314.pyc,,
+openai/types/__pycache__/model_deleted.cpython-314.pyc,,
+openai/types/__pycache__/moderation.cpython-314.pyc,,
+openai/types/__pycache__/moderation_create_params.cpython-314.pyc,,
+openai/types/__pycache__/moderation_create_response.cpython-314.pyc,,
+openai/types/__pycache__/moderation_image_url_input_param.cpython-314.pyc,,
+openai/types/__pycache__/moderation_model.cpython-314.pyc,,
+openai/types/__pycache__/moderation_multi_modal_input_param.cpython-314.pyc,,
+openai/types/__pycache__/moderation_text_input_param.cpython-314.pyc,,
+openai/types/__pycache__/other_file_chunking_strategy_object.cpython-314.pyc,,
+openai/types/__pycache__/skill.cpython-314.pyc,,
+openai/types/__pycache__/skill_create_params.cpython-314.pyc,,
+openai/types/__pycache__/skill_list.cpython-314.pyc,,
+openai/types/__pycache__/skill_list_params.cpython-314.pyc,,
+openai/types/__pycache__/skill_update_params.cpython-314.pyc,,
+openai/types/__pycache__/static_file_chunking_strategy.cpython-314.pyc,,
+openai/types/__pycache__/static_file_chunking_strategy_object.cpython-314.pyc,,
+openai/types/__pycache__/static_file_chunking_strategy_object_param.cpython-314.pyc,,
+openai/types/__pycache__/static_file_chunking_strategy_param.cpython-314.pyc,,
+openai/types/__pycache__/upload.cpython-314.pyc,,
+openai/types/__pycache__/upload_complete_params.cpython-314.pyc,,
+openai/types/__pycache__/upload_create_params.cpython-314.pyc,,
+openai/types/__pycache__/vector_store.cpython-314.pyc,,
+openai/types/__pycache__/vector_store_create_params.cpython-314.pyc,,
+openai/types/__pycache__/vector_store_deleted.cpython-314.pyc,,
+openai/types/__pycache__/vector_store_list_params.cpython-314.pyc,,
+openai/types/__pycache__/vector_store_search_params.cpython-314.pyc,,
+openai/types/__pycache__/vector_store_search_response.cpython-314.pyc,,
+openai/types/__pycache__/vector_store_update_params.cpython-314.pyc,,
+openai/types/__pycache__/video.cpython-314.pyc,,
+openai/types/__pycache__/video_create_character_params.cpython-314.pyc,,
+openai/types/__pycache__/video_create_character_response.cpython-314.pyc,,
+openai/types/__pycache__/video_create_error.cpython-314.pyc,,
+openai/types/__pycache__/video_create_params.cpython-314.pyc,,
+openai/types/__pycache__/video_delete_response.cpython-314.pyc,,
+openai/types/__pycache__/video_download_content_params.cpython-314.pyc,,
+openai/types/__pycache__/video_edit_params.cpython-314.pyc,,
+openai/types/__pycache__/video_extend_params.cpython-314.pyc,,
+openai/types/__pycache__/video_get_character_response.cpython-314.pyc,,
+openai/types/__pycache__/video_list_params.cpython-314.pyc,,
+openai/types/__pycache__/video_model.cpython-314.pyc,,
+openai/types/__pycache__/video_model_param.cpython-314.pyc,,
+openai/types/__pycache__/video_remix_params.cpython-314.pyc,,
+openai/types/__pycache__/video_seconds.cpython-314.pyc,,
+openai/types/__pycache__/video_size.cpython-314.pyc,,
+openai/types/__pycache__/websocket_connection_options.cpython-314.pyc,,
openai/types/audio/__init__.py,sha256=8DwArrrSRwIemWLhWLItaV3F_EgXgtVPSu4yUIf8iyM,1723
-openai/types/audio/__pycache__/__init__.cpython-310.pyc,,
-openai/types/audio/__pycache__/speech_create_params.cpython-310.pyc,,
-openai/types/audio/__pycache__/speech_model.cpython-310.pyc,,
-openai/types/audio/__pycache__/transcription.cpython-310.pyc,,
-openai/types/audio/__pycache__/transcription_create_params.cpython-310.pyc,,
-openai/types/audio/__pycache__/transcription_create_response.cpython-310.pyc,,
-openai/types/audio/__pycache__/transcription_diarized.cpython-310.pyc,,
-openai/types/audio/__pycache__/transcription_diarized_segment.cpython-310.pyc,,
-openai/types/audio/__pycache__/transcription_include.cpython-310.pyc,,
-openai/types/audio/__pycache__/transcription_segment.cpython-310.pyc,,
-openai/types/audio/__pycache__/transcription_stream_event.cpython-310.pyc,,
-openai/types/audio/__pycache__/transcription_text_delta_event.cpython-310.pyc,,
-openai/types/audio/__pycache__/transcription_text_done_event.cpython-310.pyc,,
-openai/types/audio/__pycache__/transcription_text_segment_event.cpython-310.pyc,,
-openai/types/audio/__pycache__/transcription_verbose.cpython-310.pyc,,
-openai/types/audio/__pycache__/transcription_word.cpython-310.pyc,,
-openai/types/audio/__pycache__/translation.cpython-310.pyc,,
-openai/types/audio/__pycache__/translation_create_params.cpython-310.pyc,,
-openai/types/audio/__pycache__/translation_create_response.cpython-310.pyc,,
-openai/types/audio/__pycache__/translation_verbose.cpython-310.pyc,,
-openai/types/audio/speech_create_params.py,sha256=HReviZr1BsD038PCmbw_NlhJqb-5V7IN4ezr1iPfqnw,1838
+openai/types/audio/__pycache__/__init__.cpython-314.pyc,,
+openai/types/audio/__pycache__/speech_create_params.cpython-314.pyc,,
+openai/types/audio/__pycache__/speech_model.cpython-314.pyc,,
+openai/types/audio/__pycache__/transcription.cpython-314.pyc,,
+openai/types/audio/__pycache__/transcription_create_params.cpython-314.pyc,,
+openai/types/audio/__pycache__/transcription_create_response.cpython-314.pyc,,
+openai/types/audio/__pycache__/transcription_diarized.cpython-314.pyc,,
+openai/types/audio/__pycache__/transcription_diarized_segment.cpython-314.pyc,,
+openai/types/audio/__pycache__/transcription_include.cpython-314.pyc,,
+openai/types/audio/__pycache__/transcription_segment.cpython-314.pyc,,
+openai/types/audio/__pycache__/transcription_stream_event.cpython-314.pyc,,
+openai/types/audio/__pycache__/transcription_text_delta_event.cpython-314.pyc,,
+openai/types/audio/__pycache__/transcription_text_done_event.cpython-314.pyc,,
+openai/types/audio/__pycache__/transcription_text_segment_event.cpython-314.pyc,,
+openai/types/audio/__pycache__/transcription_verbose.cpython-314.pyc,,
+openai/types/audio/__pycache__/transcription_word.cpython-314.pyc,,
+openai/types/audio/__pycache__/translation.cpython-314.pyc,,
+openai/types/audio/__pycache__/translation_create_params.cpython-314.pyc,,
+openai/types/audio/__pycache__/translation_create_response.cpython-314.pyc,,
+openai/types/audio/__pycache__/translation_verbose.cpython-314.pyc,,
+openai/types/audio/speech_create_params.py,sha256=4e8Zvljk1x5XTFVUpJOlB4S6Gt9fjDzFgjPcVYqlkvs,2144
openai/types/audio/speech_model.py,sha256=swuN1lLQxGSuYj_X_OeQuUx5abIUpEfZZqNjDES7kU0,267
openai/types/audio/transcription.py,sha256=7q7nfUTe-GDMpicrWOugFU4bSBIyqhQkndnKCTD1X2M,2407
openai/types/audio/transcription_create_params.py,sha256=Mxl-LoHCiEkO41fGUcsSmN35fOtNWXYDgMa-nP90DNg,6977
@@ -456,43 +490,43 @@ openai/types/audio_model.py,sha256=U5nv4NKBd3A5k3mDw0BmK0PiSc0VQADOkirXnKsC-eo,3
openai/types/audio_response_format.py,sha256=67QSPDpT9_yYhxFYYd15N3nukwKrHJ7f8pvVQiVOQuk,276
openai/types/auto_file_chunking_strategy_param.py,sha256=wvFMNI7RvIPLBoGZpdRMgVa-VlQkovurGi1aypefqwg,495
openai/types/batch.py,sha256=o8ADxSZQe7F_1VTGSC5_RDUajU03SbWvN1wPiH98dVQ,3517
-openai/types/batch_create_params.py,sha256=XDHXPpI1PFDpTr3HXYecAgYPA8XAckyBY0xbMKnb3jo,2627
+openai/types/batch_create_params.py,sha256=4BruiTfBF9RfDIVK4gTfZcDDoUI4G7FxUusINvWrv70,2858
openai/types/batch_error.py,sha256=Xxl-gYm0jerpYyI-mKSSVxRMQRubkoLUiOP9U3v72EM,622
openai/types/batch_list_params.py,sha256=X1_sfRspuIMSDyXWVh0YnJ9vJLeOOH66TrvgEHueC84,705
openai/types/batch_request_counts.py,sha256=iIPVKk4s5FcHlfLvLYetuXb_RxGPUvCGhRGYTryszV8,479
openai/types/batch_usage.py,sha256=myAsCyOT7xWiZ_GDznGptUHeJVYx_Mg7c1cEtBMKmEc,1260
openai/types/beta/__init__.py,sha256=kl4wEKnYF_urBLZV6wZ6ZCTwaLhlNYSOfFR64jO-Adc,2393
-openai/types/beta/__pycache__/__init__.cpython-310.pyc,,
-openai/types/beta/__pycache__/assistant.cpython-310.pyc,,
-openai/types/beta/__pycache__/assistant_create_params.cpython-310.pyc,,
-openai/types/beta/__pycache__/assistant_deleted.cpython-310.pyc,,
-openai/types/beta/__pycache__/assistant_list_params.cpython-310.pyc,,
-openai/types/beta/__pycache__/assistant_response_format_option.cpython-310.pyc,,
-openai/types/beta/__pycache__/assistant_response_format_option_param.cpython-310.pyc,,
-openai/types/beta/__pycache__/assistant_stream_event.cpython-310.pyc,,
-openai/types/beta/__pycache__/assistant_tool.cpython-310.pyc,,
-openai/types/beta/__pycache__/assistant_tool_choice.cpython-310.pyc,,
-openai/types/beta/__pycache__/assistant_tool_choice_function.cpython-310.pyc,,
-openai/types/beta/__pycache__/assistant_tool_choice_function_param.cpython-310.pyc,,
-openai/types/beta/__pycache__/assistant_tool_choice_option.cpython-310.pyc,,
-openai/types/beta/__pycache__/assistant_tool_choice_option_param.cpython-310.pyc,,
-openai/types/beta/__pycache__/assistant_tool_choice_param.cpython-310.pyc,,
-openai/types/beta/__pycache__/assistant_tool_param.cpython-310.pyc,,
-openai/types/beta/__pycache__/assistant_update_params.cpython-310.pyc,,
-openai/types/beta/__pycache__/chatkit_workflow.cpython-310.pyc,,
-openai/types/beta/__pycache__/code_interpreter_tool.cpython-310.pyc,,
-openai/types/beta/__pycache__/code_interpreter_tool_param.cpython-310.pyc,,
-openai/types/beta/__pycache__/file_search_tool.cpython-310.pyc,,
-openai/types/beta/__pycache__/file_search_tool_param.cpython-310.pyc,,
-openai/types/beta/__pycache__/function_tool.cpython-310.pyc,,
-openai/types/beta/__pycache__/function_tool_param.cpython-310.pyc,,
-openai/types/beta/__pycache__/thread.cpython-310.pyc,,
-openai/types/beta/__pycache__/thread_create_and_run_params.cpython-310.pyc,,
-openai/types/beta/__pycache__/thread_create_params.cpython-310.pyc,,
-openai/types/beta/__pycache__/thread_deleted.cpython-310.pyc,,
-openai/types/beta/__pycache__/thread_update_params.cpython-310.pyc,,
+openai/types/beta/__pycache__/__init__.cpython-314.pyc,,
+openai/types/beta/__pycache__/assistant.cpython-314.pyc,,
+openai/types/beta/__pycache__/assistant_create_params.cpython-314.pyc,,
+openai/types/beta/__pycache__/assistant_deleted.cpython-314.pyc,,
+openai/types/beta/__pycache__/assistant_list_params.cpython-314.pyc,,
+openai/types/beta/__pycache__/assistant_response_format_option.cpython-314.pyc,,
+openai/types/beta/__pycache__/assistant_response_format_option_param.cpython-314.pyc,,
+openai/types/beta/__pycache__/assistant_stream_event.cpython-314.pyc,,
+openai/types/beta/__pycache__/assistant_tool.cpython-314.pyc,,
+openai/types/beta/__pycache__/assistant_tool_choice.cpython-314.pyc,,
+openai/types/beta/__pycache__/assistant_tool_choice_function.cpython-314.pyc,,
+openai/types/beta/__pycache__/assistant_tool_choice_function_param.cpython-314.pyc,,
+openai/types/beta/__pycache__/assistant_tool_choice_option.cpython-314.pyc,,
+openai/types/beta/__pycache__/assistant_tool_choice_option_param.cpython-314.pyc,,
+openai/types/beta/__pycache__/assistant_tool_choice_param.cpython-314.pyc,,
+openai/types/beta/__pycache__/assistant_tool_param.cpython-314.pyc,,
+openai/types/beta/__pycache__/assistant_update_params.cpython-314.pyc,,
+openai/types/beta/__pycache__/chatkit_workflow.cpython-314.pyc,,
+openai/types/beta/__pycache__/code_interpreter_tool.cpython-314.pyc,,
+openai/types/beta/__pycache__/code_interpreter_tool_param.cpython-314.pyc,,
+openai/types/beta/__pycache__/file_search_tool.cpython-314.pyc,,
+openai/types/beta/__pycache__/file_search_tool_param.cpython-314.pyc,,
+openai/types/beta/__pycache__/function_tool.cpython-314.pyc,,
+openai/types/beta/__pycache__/function_tool_param.cpython-314.pyc,,
+openai/types/beta/__pycache__/thread.cpython-314.pyc,,
+openai/types/beta/__pycache__/thread_create_and_run_params.cpython-314.pyc,,
+openai/types/beta/__pycache__/thread_create_params.cpython-314.pyc,,
+openai/types/beta/__pycache__/thread_deleted.cpython-314.pyc,,
+openai/types/beta/__pycache__/thread_update_params.cpython-314.pyc,,
openai/types/beta/assistant.py,sha256=K97cr0lg4fiZuLO6zNqIZVuUBjMFxtRtoszjyNI70DA,5394
-openai/types/beta/assistant_create_params.py,sha256=3q29vKotDs9f_oGs5HDmfBVBUYAW8dZqevw9GKaVH2g,8863
+openai/types/beta/assistant_create_params.py,sha256=0TrKE4rMWP9y2sE1VL9T9vSyDv9Ty6fvuo3OnzHiyLo,8991
openai/types/beta/assistant_deleted.py,sha256=bTTUl5FPHTBI5nRm7d0sGuR9VCSBDZ-IbOn9G_IpmJQ,301
openai/types/beta/assistant_list_params.py,sha256=yW-lj6AUkG0IRZQKre0veEr9p4VMN-9YdELFMYs74Cw,1222
openai/types/beta/assistant_response_format_option.py,sha256=yNeoAWxM-_8Sjmwqu8exqyKRFhVZIKeTypetPY55VFA,561
@@ -508,31 +542,31 @@ openai/types/beta/assistant_tool_choice_param.py,sha256=vB7bLafOjDZ4Ww3GMT_CXzcp
openai/types/beta/assistant_tool_param.py,sha256=6DcaU3nMjurur2VkVIYcCaRAY1QLQscXXjCd0ZHHGho,501
openai/types/beta/assistant_update_params.py,sha256=sveL8Z489CV1SemVlQzGlNPjDy6JINfDLYoGgCFdrsg,7417
openai/types/beta/chat/__init__.py,sha256=OKfJYcKb4NObdiRObqJV_dOyDQ8feXekDUge2o_4pXQ,122
-openai/types/beta/chat/__pycache__/__init__.cpython-310.pyc,,
+openai/types/beta/chat/__pycache__/__init__.cpython-314.pyc,,
openai/types/beta/chatkit/__init__.py,sha256=K4Q3JL1OSz8tRSJyUoReRkBKsksw5QZdBy4HBvuBjZ4,2116
-openai/types/beta/chatkit/__pycache__/__init__.cpython-310.pyc,,
-openai/types/beta/chatkit/__pycache__/chat_session.cpython-310.pyc,,
-openai/types/beta/chatkit/__pycache__/chat_session_automatic_thread_titling.cpython-310.pyc,,
-openai/types/beta/chatkit/__pycache__/chat_session_chatkit_configuration.cpython-310.pyc,,
-openai/types/beta/chatkit/__pycache__/chat_session_chatkit_configuration_param.cpython-310.pyc,,
-openai/types/beta/chatkit/__pycache__/chat_session_expires_after_param.cpython-310.pyc,,
-openai/types/beta/chatkit/__pycache__/chat_session_file_upload.cpython-310.pyc,,
-openai/types/beta/chatkit/__pycache__/chat_session_history.cpython-310.pyc,,
-openai/types/beta/chatkit/__pycache__/chat_session_rate_limits.cpython-310.pyc,,
-openai/types/beta/chatkit/__pycache__/chat_session_rate_limits_param.cpython-310.pyc,,
-openai/types/beta/chatkit/__pycache__/chat_session_status.cpython-310.pyc,,
-openai/types/beta/chatkit/__pycache__/chat_session_workflow_param.cpython-310.pyc,,
-openai/types/beta/chatkit/__pycache__/chatkit_attachment.cpython-310.pyc,,
-openai/types/beta/chatkit/__pycache__/chatkit_response_output_text.cpython-310.pyc,,
-openai/types/beta/chatkit/__pycache__/chatkit_thread.cpython-310.pyc,,
-openai/types/beta/chatkit/__pycache__/chatkit_thread_assistant_message_item.cpython-310.pyc,,
-openai/types/beta/chatkit/__pycache__/chatkit_thread_item_list.cpython-310.pyc,,
-openai/types/beta/chatkit/__pycache__/chatkit_thread_user_message_item.cpython-310.pyc,,
-openai/types/beta/chatkit/__pycache__/chatkit_widget_item.cpython-310.pyc,,
-openai/types/beta/chatkit/__pycache__/session_create_params.cpython-310.pyc,,
-openai/types/beta/chatkit/__pycache__/thread_delete_response.cpython-310.pyc,,
-openai/types/beta/chatkit/__pycache__/thread_list_items_params.cpython-310.pyc,,
-openai/types/beta/chatkit/__pycache__/thread_list_params.cpython-310.pyc,,
+openai/types/beta/chatkit/__pycache__/__init__.cpython-314.pyc,,
+openai/types/beta/chatkit/__pycache__/chat_session.cpython-314.pyc,,
+openai/types/beta/chatkit/__pycache__/chat_session_automatic_thread_titling.cpython-314.pyc,,
+openai/types/beta/chatkit/__pycache__/chat_session_chatkit_configuration.cpython-314.pyc,,
+openai/types/beta/chatkit/__pycache__/chat_session_chatkit_configuration_param.cpython-314.pyc,,
+openai/types/beta/chatkit/__pycache__/chat_session_expires_after_param.cpython-314.pyc,,
+openai/types/beta/chatkit/__pycache__/chat_session_file_upload.cpython-314.pyc,,
+openai/types/beta/chatkit/__pycache__/chat_session_history.cpython-314.pyc,,
+openai/types/beta/chatkit/__pycache__/chat_session_rate_limits.cpython-314.pyc,,
+openai/types/beta/chatkit/__pycache__/chat_session_rate_limits_param.cpython-314.pyc,,
+openai/types/beta/chatkit/__pycache__/chat_session_status.cpython-314.pyc,,
+openai/types/beta/chatkit/__pycache__/chat_session_workflow_param.cpython-314.pyc,,
+openai/types/beta/chatkit/__pycache__/chatkit_attachment.cpython-314.pyc,,
+openai/types/beta/chatkit/__pycache__/chatkit_response_output_text.cpython-314.pyc,,
+openai/types/beta/chatkit/__pycache__/chatkit_thread.cpython-314.pyc,,
+openai/types/beta/chatkit/__pycache__/chatkit_thread_assistant_message_item.cpython-314.pyc,,
+openai/types/beta/chatkit/__pycache__/chatkit_thread_item_list.cpython-314.pyc,,
+openai/types/beta/chatkit/__pycache__/chatkit_thread_user_message_item.cpython-314.pyc,,
+openai/types/beta/chatkit/__pycache__/chatkit_widget_item.cpython-314.pyc,,
+openai/types/beta/chatkit/__pycache__/session_create_params.cpython-314.pyc,,
+openai/types/beta/chatkit/__pycache__/thread_delete_response.cpython-314.pyc,,
+openai/types/beta/chatkit/__pycache__/thread_list_items_params.cpython-314.pyc,,
+openai/types/beta/chatkit/__pycache__/thread_list_params.cpython-314.pyc,,
openai/types/beta/chatkit/chat_session.py,sha256=NjVT5SQUWNroXynit10bwKsj8mntUSLouST46C9sBhM,1401
openai/types/beta/chatkit/chat_session_automatic_thread_titling.py,sha256=R0PpWfBT5VMZp--Xxn082UjWbha7U5K9lJeSa4Cbhuk,360
openai/types/beta/chatkit/chat_session_chatkit_configuration.py,sha256=-ailkIhSj6W2rHS3xPNc7XJ0duVG0fTmJaaVqqoE9RE,739
@@ -563,77 +597,77 @@ openai/types/beta/file_search_tool_param.py,sha256=LZYlCZoHFOzqQnE7y55Oq5k20R9S0
openai/types/beta/function_tool.py,sha256=oYGJfcfPpUohKw2ikgshDjOI1HXCK-5pAWyegYNezeU,397
openai/types/beta/function_tool_param.py,sha256=hCclpGO4Re-TxiGy_QxX75g1kcN6_ElubicO6SdJ_YI,471
openai/types/beta/realtime/__init__.py,sha256=trJb-lqh3vHHMYdohrgiU2cHwReFZyw4cXM-Xj8Dwq8,7364
-openai/types/beta/realtime/__pycache__/__init__.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/conversation_created_event.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/conversation_item.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/conversation_item_content.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/conversation_item_content_param.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/conversation_item_create_event.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/conversation_item_create_event_param.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/conversation_item_created_event.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/conversation_item_delete_event.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/conversation_item_delete_event_param.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/conversation_item_deleted_event.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/conversation_item_input_audio_transcription_completed_event.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/conversation_item_input_audio_transcription_delta_event.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/conversation_item_input_audio_transcription_failed_event.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/conversation_item_param.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/conversation_item_retrieve_event.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/conversation_item_retrieve_event_param.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/conversation_item_truncate_event.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/conversation_item_truncate_event_param.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/conversation_item_truncated_event.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/conversation_item_with_reference.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/conversation_item_with_reference_param.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/error_event.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/input_audio_buffer_append_event.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/input_audio_buffer_append_event_param.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/input_audio_buffer_clear_event.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/input_audio_buffer_clear_event_param.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/input_audio_buffer_cleared_event.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/input_audio_buffer_commit_event.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/input_audio_buffer_commit_event_param.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/input_audio_buffer_committed_event.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/input_audio_buffer_speech_started_event.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/input_audio_buffer_speech_stopped_event.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/rate_limits_updated_event.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/realtime_client_event.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/realtime_client_event_param.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/realtime_connect_params.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/realtime_response.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/realtime_response_status.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/realtime_response_usage.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/realtime_server_event.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/response_audio_delta_event.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/response_audio_done_event.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/response_audio_transcript_delta_event.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/response_audio_transcript_done_event.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/response_cancel_event.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/response_cancel_event_param.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/response_content_part_added_event.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/response_content_part_done_event.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/response_create_event.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/response_create_event_param.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/response_created_event.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/response_done_event.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/response_function_call_arguments_delta_event.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/response_function_call_arguments_done_event.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/response_output_item_added_event.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/response_output_item_done_event.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/response_text_delta_event.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/response_text_done_event.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/session.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/session_create_params.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/session_create_response.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/session_created_event.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/session_update_event.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/session_update_event_param.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/session_updated_event.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/transcription_session.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/transcription_session_create_params.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/transcription_session_update.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/transcription_session_update_param.cpython-310.pyc,,
-openai/types/beta/realtime/__pycache__/transcription_session_updated_event.cpython-310.pyc,,
+openai/types/beta/realtime/__pycache__/__init__.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/conversation_created_event.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/conversation_item.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/conversation_item_content.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/conversation_item_content_param.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/conversation_item_create_event.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/conversation_item_create_event_param.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/conversation_item_created_event.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/conversation_item_delete_event.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/conversation_item_delete_event_param.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/conversation_item_deleted_event.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/conversation_item_input_audio_transcription_completed_event.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/conversation_item_input_audio_transcription_delta_event.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/conversation_item_input_audio_transcription_failed_event.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/conversation_item_param.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/conversation_item_retrieve_event.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/conversation_item_retrieve_event_param.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/conversation_item_truncate_event.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/conversation_item_truncate_event_param.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/conversation_item_truncated_event.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/conversation_item_with_reference.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/conversation_item_with_reference_param.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/error_event.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/input_audio_buffer_append_event.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/input_audio_buffer_append_event_param.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/input_audio_buffer_clear_event.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/input_audio_buffer_clear_event_param.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/input_audio_buffer_cleared_event.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/input_audio_buffer_commit_event.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/input_audio_buffer_commit_event_param.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/input_audio_buffer_committed_event.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/input_audio_buffer_speech_started_event.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/input_audio_buffer_speech_stopped_event.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/rate_limits_updated_event.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/realtime_client_event.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/realtime_client_event_param.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/realtime_connect_params.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/realtime_response.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/realtime_response_status.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/realtime_response_usage.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/realtime_server_event.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/response_audio_delta_event.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/response_audio_done_event.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/response_audio_transcript_delta_event.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/response_audio_transcript_done_event.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/response_cancel_event.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/response_cancel_event_param.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/response_content_part_added_event.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/response_content_part_done_event.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/response_create_event.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/response_create_event_param.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/response_created_event.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/response_done_event.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/response_function_call_arguments_delta_event.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/response_function_call_arguments_done_event.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/response_output_item_added_event.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/response_output_item_done_event.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/response_text_delta_event.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/response_text_done_event.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/session.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/session_create_params.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/session_create_response.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/session_created_event.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/session_update_event.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/session_update_event_param.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/session_updated_event.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/transcription_session.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/transcription_session_create_params.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/transcription_session_update.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/transcription_session_update_param.cpython-314.pyc,,
+openai/types/beta/realtime/__pycache__/transcription_session_updated_event.cpython-314.pyc,,
openai/types/beta/realtime/conversation_created_event.py,sha256=U4-nesN8rAep2_25E2DrkXUMafQejj3NE_0llXKj5Y8,752
openai/types/beta/realtime/conversation_item.py,sha256=eIFg9zl3qzEijcQZvCnkvVLpSZgvEdubasgxGsQuFM4,2327
openai/types/beta/realtime/conversation_item_content.py,sha256=KWZY8EUkjAi6K_IkWVjjrNZLG3KK2vGCy2_O30CEhzY,1050
@@ -705,54 +739,54 @@ openai/types/beta/realtime/transcription_session_update.py,sha256=YMP9OB9P5FaSwa
openai/types/beta/realtime/transcription_session_update_param.py,sha256=b99v4yKnB2lC_cnYGiaxKnQuHB4eUW-v3eKT2UDsamk,6453
openai/types/beta/realtime/transcription_session_updated_event.py,sha256=CKAS98QL7CuOVEWF6qGcC9qhTktdG2CPPJXbrW75GIM,833
openai/types/beta/thread.py,sha256=bVlpfXpyA_tApIDBNAWJCgusLEiE-7xR-ptcIq1obg4,2545
-openai/types/beta/thread_create_and_run_params.py,sha256=PYD9furmj02jdHoET7O9D4QZn9DArGzR0L_YZGFGmiA,15853
-openai/types/beta/thread_create_params.py,sha256=BKzfwUsYNcGuIZluOAdqpUqFvoQ2tAkAEpGC-zD9EsA,6994
+openai/types/beta/thread_create_and_run_params.py,sha256=4JQRzg8p4dNpsbGt2P479Uz_X_Q5O2-PZ2iDBxS03uM,15981
+openai/types/beta/thread_create_params.py,sha256=wfpOgxuEZhE81n2nWBU5uH5-kZscsMVqVuzwI0A6YYY,7122
openai/types/beta/thread_deleted.py,sha256=MaYG_jZIjSiB9h_ZBiTtpMsRSwFKkCY83ziM5GO_oUk,292
openai/types/beta/thread_update_params.py,sha256=oPdEz_th3PNmWB9jMA3ZdkOPalaCfMEVminirac7Cas,2217
openai/types/beta/threads/__init__.py,sha256=0WsJo0tXp08CgayozR7Tqc3b8sqzotWzvBun19CEIWc,3066
-openai/types/beta/threads/__pycache__/__init__.cpython-310.pyc,,
-openai/types/beta/threads/__pycache__/annotation.cpython-310.pyc,,
-openai/types/beta/threads/__pycache__/annotation_delta.cpython-310.pyc,,
-openai/types/beta/threads/__pycache__/file_citation_annotation.cpython-310.pyc,,
-openai/types/beta/threads/__pycache__/file_citation_delta_annotation.cpython-310.pyc,,
-openai/types/beta/threads/__pycache__/file_path_annotation.cpython-310.pyc,,
-openai/types/beta/threads/__pycache__/file_path_delta_annotation.cpython-310.pyc,,
-openai/types/beta/threads/__pycache__/image_file.cpython-310.pyc,,
-openai/types/beta/threads/__pycache__/image_file_content_block.cpython-310.pyc,,
-openai/types/beta/threads/__pycache__/image_file_content_block_param.cpython-310.pyc,,
-openai/types/beta/threads/__pycache__/image_file_delta.cpython-310.pyc,,
-openai/types/beta/threads/__pycache__/image_file_delta_block.cpython-310.pyc,,
-openai/types/beta/threads/__pycache__/image_file_param.cpython-310.pyc,,
-openai/types/beta/threads/__pycache__/image_url.cpython-310.pyc,,
-openai/types/beta/threads/__pycache__/image_url_content_block.cpython-310.pyc,,
-openai/types/beta/threads/__pycache__/image_url_content_block_param.cpython-310.pyc,,
-openai/types/beta/threads/__pycache__/image_url_delta.cpython-310.pyc,,
-openai/types/beta/threads/__pycache__/image_url_delta_block.cpython-310.pyc,,
-openai/types/beta/threads/__pycache__/image_url_param.cpython-310.pyc,,
-openai/types/beta/threads/__pycache__/message.cpython-310.pyc,,
-openai/types/beta/threads/__pycache__/message_content.cpython-310.pyc,,
-openai/types/beta/threads/__pycache__/message_content_delta.cpython-310.pyc,,
-openai/types/beta/threads/__pycache__/message_content_part_param.cpython-310.pyc,,
-openai/types/beta/threads/__pycache__/message_create_params.cpython-310.pyc,,
-openai/types/beta/threads/__pycache__/message_deleted.cpython-310.pyc,,
-openai/types/beta/threads/__pycache__/message_delta.cpython-310.pyc,,
-openai/types/beta/threads/__pycache__/message_delta_event.cpython-310.pyc,,
-openai/types/beta/threads/__pycache__/message_list_params.cpython-310.pyc,,
-openai/types/beta/threads/__pycache__/message_update_params.cpython-310.pyc,,
-openai/types/beta/threads/__pycache__/refusal_content_block.cpython-310.pyc,,
-openai/types/beta/threads/__pycache__/refusal_delta_block.cpython-310.pyc,,
-openai/types/beta/threads/__pycache__/required_action_function_tool_call.cpython-310.pyc,,
-openai/types/beta/threads/__pycache__/run.cpython-310.pyc,,
-openai/types/beta/threads/__pycache__/run_create_params.cpython-310.pyc,,
-openai/types/beta/threads/__pycache__/run_list_params.cpython-310.pyc,,
-openai/types/beta/threads/__pycache__/run_status.cpython-310.pyc,,
-openai/types/beta/threads/__pycache__/run_submit_tool_outputs_params.cpython-310.pyc,,
-openai/types/beta/threads/__pycache__/run_update_params.cpython-310.pyc,,
-openai/types/beta/threads/__pycache__/text.cpython-310.pyc,,
-openai/types/beta/threads/__pycache__/text_content_block.cpython-310.pyc,,
-openai/types/beta/threads/__pycache__/text_content_block_param.cpython-310.pyc,,
-openai/types/beta/threads/__pycache__/text_delta.cpython-310.pyc,,
-openai/types/beta/threads/__pycache__/text_delta_block.cpython-310.pyc,,
+openai/types/beta/threads/__pycache__/__init__.cpython-314.pyc,,
+openai/types/beta/threads/__pycache__/annotation.cpython-314.pyc,,
+openai/types/beta/threads/__pycache__/annotation_delta.cpython-314.pyc,,
+openai/types/beta/threads/__pycache__/file_citation_annotation.cpython-314.pyc,,
+openai/types/beta/threads/__pycache__/file_citation_delta_annotation.cpython-314.pyc,,
+openai/types/beta/threads/__pycache__/file_path_annotation.cpython-314.pyc,,
+openai/types/beta/threads/__pycache__/file_path_delta_annotation.cpython-314.pyc,,
+openai/types/beta/threads/__pycache__/image_file.cpython-314.pyc,,
+openai/types/beta/threads/__pycache__/image_file_content_block.cpython-314.pyc,,
+openai/types/beta/threads/__pycache__/image_file_content_block_param.cpython-314.pyc,,
+openai/types/beta/threads/__pycache__/image_file_delta.cpython-314.pyc,,
+openai/types/beta/threads/__pycache__/image_file_delta_block.cpython-314.pyc,,
+openai/types/beta/threads/__pycache__/image_file_param.cpython-314.pyc,,
+openai/types/beta/threads/__pycache__/image_url.cpython-314.pyc,,
+openai/types/beta/threads/__pycache__/image_url_content_block.cpython-314.pyc,,
+openai/types/beta/threads/__pycache__/image_url_content_block_param.cpython-314.pyc,,
+openai/types/beta/threads/__pycache__/image_url_delta.cpython-314.pyc,,
+openai/types/beta/threads/__pycache__/image_url_delta_block.cpython-314.pyc,,
+openai/types/beta/threads/__pycache__/image_url_param.cpython-314.pyc,,
+openai/types/beta/threads/__pycache__/message.cpython-314.pyc,,
+openai/types/beta/threads/__pycache__/message_content.cpython-314.pyc,,
+openai/types/beta/threads/__pycache__/message_content_delta.cpython-314.pyc,,
+openai/types/beta/threads/__pycache__/message_content_part_param.cpython-314.pyc,,
+openai/types/beta/threads/__pycache__/message_create_params.cpython-314.pyc,,
+openai/types/beta/threads/__pycache__/message_deleted.cpython-314.pyc,,
+openai/types/beta/threads/__pycache__/message_delta.cpython-314.pyc,,
+openai/types/beta/threads/__pycache__/message_delta_event.cpython-314.pyc,,
+openai/types/beta/threads/__pycache__/message_list_params.cpython-314.pyc,,
+openai/types/beta/threads/__pycache__/message_update_params.cpython-314.pyc,,
+openai/types/beta/threads/__pycache__/refusal_content_block.cpython-314.pyc,,
+openai/types/beta/threads/__pycache__/refusal_delta_block.cpython-314.pyc,,
+openai/types/beta/threads/__pycache__/required_action_function_tool_call.cpython-314.pyc,,
+openai/types/beta/threads/__pycache__/run.cpython-314.pyc,,
+openai/types/beta/threads/__pycache__/run_create_params.cpython-314.pyc,,
+openai/types/beta/threads/__pycache__/run_list_params.cpython-314.pyc,,
+openai/types/beta/threads/__pycache__/run_status.cpython-314.pyc,,
+openai/types/beta/threads/__pycache__/run_submit_tool_outputs_params.cpython-314.pyc,,
+openai/types/beta/threads/__pycache__/run_update_params.cpython-314.pyc,,
+openai/types/beta/threads/__pycache__/text.cpython-314.pyc,,
+openai/types/beta/threads/__pycache__/text_content_block.cpython-314.pyc,,
+openai/types/beta/threads/__pycache__/text_content_block_param.cpython-314.pyc,,
+openai/types/beta/threads/__pycache__/text_delta.cpython-314.pyc,,
+openai/types/beta/threads/__pycache__/text_delta_block.cpython-314.pyc,,
openai/types/beta/threads/annotation.py,sha256=Ce3Y0mSodmYRkoqyhtyIdep6WfWew6KJJgtrENOnfek,462
openai/types/beta/threads/annotation_delta.py,sha256=iNsE-1Gn1yU0TlTHoxqKbOvPRUxWuXsF72qY_mMnWGY,510
openai/types/beta/threads/file_citation_annotation.py,sha256=BNpkIVngBZKLi8Db-yrJP52Kz4Q3z4Zv4JPjGsyxHqo,819
@@ -791,27 +825,27 @@ openai/types/beta/threads/run_status.py,sha256=OU1hzoyYXaRJ3lupX4YcZ-HZkTpctNE4t
openai/types/beta/threads/run_submit_tool_outputs_params.py,sha256=cKiyD374BsZN_Oih5o5n5gOf_DYsxErVrbgxveNhmPI,1643
openai/types/beta/threads/run_update_params.py,sha256=sVjkl6ayjU75Tk8t69r6xgIg80OlTikyRdS0sa2Gavg,749
openai/types/beta/threads/runs/__init__.py,sha256=mg_roY9yL1bClJ8isizkQgHOAkN17iSdVr2m65iyBrs,1653
-openai/types/beta/threads/runs/__pycache__/__init__.cpython-310.pyc,,
-openai/types/beta/threads/runs/__pycache__/code_interpreter_logs.cpython-310.pyc,,
-openai/types/beta/threads/runs/__pycache__/code_interpreter_output_image.cpython-310.pyc,,
-openai/types/beta/threads/runs/__pycache__/code_interpreter_tool_call.cpython-310.pyc,,
-openai/types/beta/threads/runs/__pycache__/code_interpreter_tool_call_delta.cpython-310.pyc,,
-openai/types/beta/threads/runs/__pycache__/file_search_tool_call.cpython-310.pyc,,
-openai/types/beta/threads/runs/__pycache__/file_search_tool_call_delta.cpython-310.pyc,,
-openai/types/beta/threads/runs/__pycache__/function_tool_call.cpython-310.pyc,,
-openai/types/beta/threads/runs/__pycache__/function_tool_call_delta.cpython-310.pyc,,
-openai/types/beta/threads/runs/__pycache__/message_creation_step_details.cpython-310.pyc,,
-openai/types/beta/threads/runs/__pycache__/run_step.cpython-310.pyc,,
-openai/types/beta/threads/runs/__pycache__/run_step_delta.cpython-310.pyc,,
-openai/types/beta/threads/runs/__pycache__/run_step_delta_event.cpython-310.pyc,,
-openai/types/beta/threads/runs/__pycache__/run_step_delta_message_delta.cpython-310.pyc,,
-openai/types/beta/threads/runs/__pycache__/run_step_include.cpython-310.pyc,,
-openai/types/beta/threads/runs/__pycache__/step_list_params.cpython-310.pyc,,
-openai/types/beta/threads/runs/__pycache__/step_retrieve_params.cpython-310.pyc,,
-openai/types/beta/threads/runs/__pycache__/tool_call.cpython-310.pyc,,
-openai/types/beta/threads/runs/__pycache__/tool_call_delta.cpython-310.pyc,,
-openai/types/beta/threads/runs/__pycache__/tool_call_delta_object.cpython-310.pyc,,
-openai/types/beta/threads/runs/__pycache__/tool_calls_step_details.cpython-310.pyc,,
+openai/types/beta/threads/runs/__pycache__/__init__.cpython-314.pyc,,
+openai/types/beta/threads/runs/__pycache__/code_interpreter_logs.cpython-314.pyc,,
+openai/types/beta/threads/runs/__pycache__/code_interpreter_output_image.cpython-314.pyc,,
+openai/types/beta/threads/runs/__pycache__/code_interpreter_tool_call.cpython-314.pyc,,
+openai/types/beta/threads/runs/__pycache__/code_interpreter_tool_call_delta.cpython-314.pyc,,
+openai/types/beta/threads/runs/__pycache__/file_search_tool_call.cpython-314.pyc,,
+openai/types/beta/threads/runs/__pycache__/file_search_tool_call_delta.cpython-314.pyc,,
+openai/types/beta/threads/runs/__pycache__/function_tool_call.cpython-314.pyc,,
+openai/types/beta/threads/runs/__pycache__/function_tool_call_delta.cpython-314.pyc,,
+openai/types/beta/threads/runs/__pycache__/message_creation_step_details.cpython-314.pyc,,
+openai/types/beta/threads/runs/__pycache__/run_step.cpython-314.pyc,,
+openai/types/beta/threads/runs/__pycache__/run_step_delta.cpython-314.pyc,,
+openai/types/beta/threads/runs/__pycache__/run_step_delta_event.cpython-314.pyc,,
+openai/types/beta/threads/runs/__pycache__/run_step_delta_message_delta.cpython-314.pyc,,
+openai/types/beta/threads/runs/__pycache__/run_step_include.cpython-314.pyc,,
+openai/types/beta/threads/runs/__pycache__/step_list_params.cpython-314.pyc,,
+openai/types/beta/threads/runs/__pycache__/step_retrieve_params.cpython-314.pyc,,
+openai/types/beta/threads/runs/__pycache__/tool_call.cpython-314.pyc,,
+openai/types/beta/threads/runs/__pycache__/tool_call_delta.cpython-314.pyc,,
+openai/types/beta/threads/runs/__pycache__/tool_call_delta_object.cpython-314.pyc,,
+openai/types/beta/threads/runs/__pycache__/tool_calls_step_details.cpython-314.pyc,,
openai/types/beta/threads/runs/code_interpreter_logs.py,sha256=UhfSrLKN3NSIOTYGCZhJCv1s_d_8_gTdppFIbBPdgSk,564
openai/types/beta/threads/runs/code_interpreter_output_image.py,sha256=8o99k0ZHMHpqH0taXkOkYR9WaDUpCN-G0Ifd5XsJpb8,613
openai/types/beta/threads/runs/code_interpreter_tool_call.py,sha256=u8htH0HfF8Yxc4cBOr6QgBSS1Q465rKz5z0L0bYBz-s,2029
@@ -838,63 +872,63 @@ openai/types/beta/threads/text_content_block_param.py,sha256=PQ8yKhUp2REWA5opa-8
openai/types/beta/threads/text_delta.py,sha256=2EFeQCkg_cc8nYEJ6BtYAA3_TqgMTbmEXoMvLjzaB34,389
openai/types/beta/threads/text_delta_block.py,sha256=cyGoOBssDwxWn0YDKx8p665-v4AB9M3TWZUfgXgT_MM,503
openai/types/chat/__init__.py,sha256=wyA0EWb0utj19dX0tCeGh4Jg5GrO3TGjmfQkR9HVxxE,6102
-openai/types/chat/__pycache__/__init__.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_allowed_tool_choice_param.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_allowed_tools_param.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_assistant_message_param.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_audio.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_audio_param.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_chunk.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_content_part_image.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_content_part_image_param.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_content_part_input_audio_param.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_content_part_param.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_content_part_refusal_param.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_content_part_text.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_content_part_text_param.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_custom_tool_param.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_deleted.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_developer_message_param.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_function_call_option_param.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_function_message_param.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_function_tool.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_function_tool_param.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_message.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_message_custom_tool_call.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_message_custom_tool_call_param.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_message_function_tool_call.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_message_function_tool_call_param.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_message_param.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_message_tool_call.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_message_tool_call_param.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_message_tool_call_union_param.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_modality.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_named_tool_choice_custom_param.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_named_tool_choice_param.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_prediction_content_param.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_reasoning_effort.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_role.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_store_message.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_stream_options_param.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_system_message_param.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_token_logprob.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_tool_choice_option_param.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_tool_message_param.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_tool_param.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_tool_union_param.cpython-310.pyc,,
-openai/types/chat/__pycache__/chat_completion_user_message_param.cpython-310.pyc,,
-openai/types/chat/__pycache__/completion_create_params.cpython-310.pyc,,
-openai/types/chat/__pycache__/completion_list_params.cpython-310.pyc,,
-openai/types/chat/__pycache__/completion_update_params.cpython-310.pyc,,
-openai/types/chat/__pycache__/parsed_chat_completion.cpython-310.pyc,,
-openai/types/chat/__pycache__/parsed_function_tool_call.cpython-310.pyc,,
+openai/types/chat/__pycache__/__init__.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_allowed_tool_choice_param.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_allowed_tools_param.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_assistant_message_param.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_audio.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_audio_param.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_chunk.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_content_part_image.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_content_part_image_param.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_content_part_input_audio_param.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_content_part_param.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_content_part_refusal_param.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_content_part_text.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_content_part_text_param.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_custom_tool_param.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_deleted.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_developer_message_param.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_function_call_option_param.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_function_message_param.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_function_tool.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_function_tool_param.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_message.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_message_custom_tool_call.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_message_custom_tool_call_param.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_message_function_tool_call.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_message_function_tool_call_param.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_message_param.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_message_tool_call.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_message_tool_call_param.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_message_tool_call_union_param.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_modality.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_named_tool_choice_custom_param.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_named_tool_choice_param.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_prediction_content_param.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_reasoning_effort.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_role.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_store_message.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_stream_options_param.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_system_message_param.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_token_logprob.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_tool_choice_option_param.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_tool_message_param.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_tool_param.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_tool_union_param.cpython-314.pyc,,
+openai/types/chat/__pycache__/chat_completion_user_message_param.cpython-314.pyc,,
+openai/types/chat/__pycache__/completion_create_params.cpython-314.pyc,,
+openai/types/chat/__pycache__/completion_list_params.cpython-314.pyc,,
+openai/types/chat/__pycache__/completion_update_params.cpython-314.pyc,,
+openai/types/chat/__pycache__/parsed_chat_completion.cpython-314.pyc,,
+openai/types/chat/__pycache__/parsed_function_tool_call.cpython-314.pyc,,
openai/types/chat/chat_completion.py,sha256=rPTJBRPwYAvISXFRhupNZjBZtr6UEHRgIWBJkgJQj3o,3650
openai/types/chat/chat_completion_allowed_tool_choice_param.py,sha256=iBicIuMGQ8vj-LNdat4huVSoalJuWfG3d4qzFyomOK8,713
openai/types/chat/chat_completion_allowed_tools_param.py,sha256=Ya6LVup7cF69D0_vs-Xfk74L40i6_mz_0JSxVNxPpF0,1087
openai/types/chat/chat_completion_assistant_message_param.py,sha256=TxOgyqhTYK5Q5pusSKqgEE1qqjkprbUZb9zuzHBASI0,2797
openai/types/chat/chat_completion_audio.py,sha256=wOZxqzRU4G8TEm5e1syagWtILAEvgIY_Zyo-7MEktGY,851
-openai/types/chat/chat_completion_audio_param.py,sha256=0fxKAeYxliX8N7wTsJw895qYhlwU9TPlbHQ3NYFMs6s,1023
+openai/types/chat/chat_completion_audio_param.py,sha256=EiYmoyjugmbx7zvN1QNFLvh5UxGPzLuIHnOXUfcm0Yo,1333
openai/types/chat/chat_completion_chunk.py,sha256=rq8dljAql3jIRC10Qc9h0kBuKhb_Ju7DXBw6CWM3onA,6498
openai/types/chat/chat_completion_content_part_image.py,sha256=9OD-MwO3od5471z5Il_w6EEunuuOAkKQQo25438KKyU,840
openai/types/chat/chat_completion_content_part_image_param.py,sha256=U_HLJUD9WlpBuoX6Jg3YmS6MXdQG4ZWAbSonTNYLidM,884
@@ -934,12 +968,12 @@ openai/types/chat/chat_completion_tool_message_param.py,sha256=5K7jfKpwTuKNi1PTF
openai/types/chat/chat_completion_tool_param.py,sha256=5hFt0Izat_o50JMJzspCYeB0gubilRDB3a6yIfGHoN8,431
openai/types/chat/chat_completion_tool_union_param.py,sha256=smpIoekwuuXKQx9jRRB2cqc3L7_fmN5lB4IIJHlKhys,504
openai/types/chat/chat_completion_user_message_param.py,sha256=znqcR_CPZX8YxQpFq1xNQISVR3dM8hj5rjN33b00KHo,901
-openai/types/chat/completion_create_params.py,sha256=67j_HV9PRg6d5MQetiHAyivSozbdnUX92hLVRuvAKfY,18433
-openai/types/chat/completion_list_params.py,sha256=jOAiZ6vYSrxyD-3qzIXvXofJkejl6bet9_yNsC9p5ws,1154
+openai/types/chat/completion_create_params.py,sha256=fPMvQpraGlIEo94M6XfVoAtbAj2ueSX8HOep31wV_cE,18477
+openai/types/chat/completion_list_params.py,sha256=QBKLa941_4fU2PAT2uLImYIfPZj-WdTqqpsy0vQ1b0c,931
openai/types/chat/completion_update_params.py,sha256=VRDF28qoonjrveHhw8BT4Yo_NlLsV2Qzd_KUUQ6AEG8,742
openai/types/chat/completions/__init__.py,sha256=nmKlohYbZmr7Pzv1qCDMSDbthcH6ySPFIgvXpHZtxK8,195
-openai/types/chat/completions/__pycache__/__init__.cpython-310.pyc,,
-openai/types/chat/completions/__pycache__/message_list_params.cpython-310.pyc,,
+openai/types/chat/completions/__pycache__/__init__.cpython-314.pyc,,
+openai/types/chat/completions/__pycache__/message_list_params.cpython-314.pyc,,
openai/types/chat/completions/message_list_params.py,sha256=IArlye40xGlMVIDHxsK9RX_5usPL71wXPMgdwI7_wYU,583
openai/types/chat/parsed_chat_completion.py,sha256=KwcwCtj0yexl6gB7yuOnyETRW-uUvNRYbVzPMkwCe5Q,1437
openai/types/chat/parsed_function_tool_call.py,sha256=JDWYo1XhTDQ8CxssbgjpzBhUw8jeXAmEd5Tr_CqFrVA,945
@@ -948,55 +982,55 @@ openai/types/completion.py,sha256=mwIFVtTYVKOmvIQJz6M6jQS1r48_rvbVvOztDp0C9Wo,13
openai/types/completion_choice.py,sha256=PUk77T3Cp34UJSXoMfSzTKGWDK0rQQwq84X_PSlOUJo,965
openai/types/completion_create_params.py,sha256=UqgYjUpYbQYPdYETVxhkwgbGRKTQCBoyeSFtrB8iuAo,7652
openai/types/completion_usage.py,sha256=siq8f0jlCP3GYFHQr8Zzflf-BYxOggE_OjtsGs9ur4A,1895
-openai/types/container_create_params.py,sha256=AhtgxFOFr8vIayvK8pTJq0G9j_Mgnze1UlhIGb4P6ik,1015
-openai/types/container_create_response.py,sha256=4yCPrrUA9tIvgU64kNwVEtLrSBGwLYee1Uo1HYCbxis,1589
-openai/types/container_list_params.py,sha256=7RiUMBOEJj9QH9LYtPiwUrIufx8czF6kk2JcfO_LP_s,893
-openai/types/container_list_response.py,sha256=jnoI1Fyem9m8D7eVhDAhir1R6P3kWOO8SfhJmxYrHAA,1585
-openai/types/container_retrieve_response.py,sha256=wFYvtDPJzAxCZNAZFo82WOX7M6NEirorHJUH38i-ugE,1593
+openai/types/container_create_params.py,sha256=hKLUqHR3O8P3ovPRAHSf8F9ksdg8mzzyYKxX4SDalSw,1771
+openai/types/container_create_response.py,sha256=u6CGmXJGKQN2dL7m2aBjSxf5u3E2qTI8JQAd-3CGMu4,1991
+openai/types/container_list_params.py,sha256=nT4iLO9FdooggoR867JNy0ycb0RnUvwhz-giYo9mos8,952
+openai/types/container_list_response.py,sha256=m5lCUUSEArG02a7ex8xW1AZTgljTAjkEGHz54cX31IY,1987
+openai/types/container_retrieve_response.py,sha256=zSR77D4CoWLSGpbRecWxSIcjctW0fjB-51J4KdZC2pU,1995
openai/types/containers/__init__.py,sha256=SCdMa4GNxw-I23CwW03iVOoHRfDybyKEMmpDkdVuUcI,480
-openai/types/containers/__pycache__/__init__.cpython-310.pyc,,
-openai/types/containers/__pycache__/file_create_params.cpython-310.pyc,,
-openai/types/containers/__pycache__/file_create_response.cpython-310.pyc,,
-openai/types/containers/__pycache__/file_list_params.cpython-310.pyc,,
-openai/types/containers/__pycache__/file_list_response.cpython-310.pyc,,
-openai/types/containers/__pycache__/file_retrieve_response.cpython-310.pyc,,
+openai/types/containers/__pycache__/__init__.cpython-314.pyc,,
+openai/types/containers/__pycache__/file_create_params.cpython-314.pyc,,
+openai/types/containers/__pycache__/file_create_response.cpython-314.pyc,,
+openai/types/containers/__pycache__/file_list_params.cpython-314.pyc,,
+openai/types/containers/__pycache__/file_list_response.cpython-314.pyc,,
+openai/types/containers/__pycache__/file_retrieve_response.cpython-314.pyc,,
openai/types/containers/file_create_params.py,sha256=KXoZNG4DpiD7NDeQixdKJsuOv-iCZAlSN4sz7AQm49k,412
openai/types/containers/file_create_response.py,sha256=Dh1OWf86XNMfmvVwfRGezfihdDuuAcdiQxT_3iefBzw,722
openai/types/containers/file_list_params.py,sha256=9bU7uKeXPk7adFzwvKHFitFOV4phnIbbfFx5u6n1OFY,883
openai/types/containers/file_list_response.py,sha256=xwvdMIUafkHSXJGQT1_mxt6T_8nJo-isp9M_5YTq-J8,718
openai/types/containers/file_retrieve_response.py,sha256=wGPU9o5SKkg8s4aUJXhwC38u8KfTFKmIUk1ItUdYxJg,726
openai/types/containers/files/__init__.py,sha256=OKfJYcKb4NObdiRObqJV_dOyDQ8feXekDUge2o_4pXQ,122
-openai/types/containers/files/__pycache__/__init__.cpython-310.pyc,,
+openai/types/containers/files/__pycache__/__init__.cpython-314.pyc,,
openai/types/conversations/__init__.py,sha256=N7GRumNq1KeGR4X9STSKWLM1axUntyaMI_OwPihZmjI,1854
-openai/types/conversations/__pycache__/__init__.cpython-310.pyc,,
-openai/types/conversations/__pycache__/computer_screenshot_content.cpython-310.pyc,,
-openai/types/conversations/__pycache__/conversation.cpython-310.pyc,,
-openai/types/conversations/__pycache__/conversation_create_params.cpython-310.pyc,,
-openai/types/conversations/__pycache__/conversation_deleted_resource.cpython-310.pyc,,
-openai/types/conversations/__pycache__/conversation_item.cpython-310.pyc,,
-openai/types/conversations/__pycache__/conversation_item_list.cpython-310.pyc,,
-openai/types/conversations/__pycache__/conversation_update_params.cpython-310.pyc,,
-openai/types/conversations/__pycache__/input_file_content.cpython-310.pyc,,
-openai/types/conversations/__pycache__/input_file_content_param.cpython-310.pyc,,
-openai/types/conversations/__pycache__/input_image_content.cpython-310.pyc,,
-openai/types/conversations/__pycache__/input_image_content_param.cpython-310.pyc,,
-openai/types/conversations/__pycache__/input_text_content.cpython-310.pyc,,
-openai/types/conversations/__pycache__/input_text_content_param.cpython-310.pyc,,
-openai/types/conversations/__pycache__/item_create_params.cpython-310.pyc,,
-openai/types/conversations/__pycache__/item_list_params.cpython-310.pyc,,
-openai/types/conversations/__pycache__/item_retrieve_params.cpython-310.pyc,,
-openai/types/conversations/__pycache__/message.cpython-310.pyc,,
-openai/types/conversations/__pycache__/output_text_content.cpython-310.pyc,,
-openai/types/conversations/__pycache__/output_text_content_param.cpython-310.pyc,,
-openai/types/conversations/__pycache__/refusal_content.cpython-310.pyc,,
-openai/types/conversations/__pycache__/refusal_content_param.cpython-310.pyc,,
-openai/types/conversations/__pycache__/summary_text_content.cpython-310.pyc,,
-openai/types/conversations/__pycache__/text_content.cpython-310.pyc,,
-openai/types/conversations/computer_screenshot_content.py,sha256=Q-YXldRA9F_NiDRpDEu7w8IDI86HvUCLc9EDH9ElS-c,671
+openai/types/conversations/__pycache__/__init__.cpython-314.pyc,,
+openai/types/conversations/__pycache__/computer_screenshot_content.cpython-314.pyc,,
+openai/types/conversations/__pycache__/conversation.cpython-314.pyc,,
+openai/types/conversations/__pycache__/conversation_create_params.cpython-314.pyc,,
+openai/types/conversations/__pycache__/conversation_deleted_resource.cpython-314.pyc,,
+openai/types/conversations/__pycache__/conversation_item.cpython-314.pyc,,
+openai/types/conversations/__pycache__/conversation_item_list.cpython-314.pyc,,
+openai/types/conversations/__pycache__/conversation_update_params.cpython-314.pyc,,
+openai/types/conversations/__pycache__/input_file_content.cpython-314.pyc,,
+openai/types/conversations/__pycache__/input_file_content_param.cpython-314.pyc,,
+openai/types/conversations/__pycache__/input_image_content.cpython-314.pyc,,
+openai/types/conversations/__pycache__/input_image_content_param.cpython-314.pyc,,
+openai/types/conversations/__pycache__/input_text_content.cpython-314.pyc,,
+openai/types/conversations/__pycache__/input_text_content_param.cpython-314.pyc,,
+openai/types/conversations/__pycache__/item_create_params.cpython-314.pyc,,
+openai/types/conversations/__pycache__/item_list_params.cpython-314.pyc,,
+openai/types/conversations/__pycache__/item_retrieve_params.cpython-314.pyc,,
+openai/types/conversations/__pycache__/message.cpython-314.pyc,,
+openai/types/conversations/__pycache__/output_text_content.cpython-314.pyc,,
+openai/types/conversations/__pycache__/output_text_content_param.cpython-314.pyc,,
+openai/types/conversations/__pycache__/refusal_content.cpython-314.pyc,,
+openai/types/conversations/__pycache__/refusal_content_param.cpython-314.pyc,,
+openai/types/conversations/__pycache__/summary_text_content.cpython-314.pyc,,
+openai/types/conversations/__pycache__/text_content.cpython-314.pyc,,
+openai/types/conversations/computer_screenshot_content.py,sha256=hhFabvZFNyIiTvAnoxPMVmI6Y1TBHhzN_4miuIgieK8,878
openai/types/conversations/conversation.py,sha256=BVpec4hLHle_8iRf6v5y4CPYHtMhEntP0m8PDG_5GSY,886
openai/types/conversations/conversation_create_params.py,sha256=dtgIXlZj1yKP3oJUYdFCb2MKIk6BH8e4QbKIPPGjHf8,976
openai/types/conversations/conversation_deleted_resource.py,sha256=HagMTsOrDL7QYQSeZqMbBMfRzhWAgnrxtinGT5uhog4,326
-openai/types/conversations/conversation_item.py,sha256=BfsUtqxwdYKTyi2eL-gpSgHiE1iduphQbxAv7gNoMQc,7672
+openai/types/conversations/conversation_item.py,sha256=LlKb4MEg1hlnCu_bxM7SQpR9L9DVrg_u91XJk7acpAI,8005
openai/types/conversations/conversation_item_list.py,sha256=bFXSZFmB1H9-IwjDRTCGtszxt57B3RAbHGZaL08gcYA,708
openai/types/conversations/conversation_update_params.py,sha256=YMyThjw2ObnqY-dhI4iy2pqf0cZW7rNV0TcxpBMs1bs,746
openai/types/conversations/input_file_content.py,sha256=xxG8_PMhnjH1F6jXs6vZyj_T1HdO--48fTYFrvWCPzk,219
@@ -1016,6 +1050,7 @@ openai/types/conversations/refusal_content_param.py,sha256=hWb2AoU0oTKCNLRZs5kzx
openai/types/conversations/summary_text_content.py,sha256=TuGepAPiMlauu9CdEwkQdkLwErBjx6kNXlIG-CSb-4g,447
openai/types/conversations/text_content.py,sha256=eya3kB6nXl0KEGlvpH_LlE8CIPzD5GVg5r851-oWR0g,286
openai/types/create_embedding_response.py,sha256=S_HDPpkr_2us12j1M8NsXTrTg6RJT2rgI3zAsZpMSZg,848
+openai/types/deleted_skill.py,sha256=S2SZKHQIla9-2lwbtYtVao5eIeH9936pn-PDHXBGrGw,288
openai/types/embedding.py,sha256=PDZAZRp7mzlvz5R2FMMf50vRIphHNCgyST2gbo2NdA4,711
openai/types/embedding_create_params.py,sha256=asahWWNcMvXGDfbTMz4uDy7DU9g6OJ9wowqZByghzw8,2039
openai/types/embedding_model.py,sha256=0dDL87len4vZ4DR6eCp7JZJCJpgwWphRmJhMK3Se8f4,281
@@ -1030,19 +1065,19 @@ openai/types/eval_stored_completions_data_source_config.py,sha256=OGMNipFZes_Eae
openai/types/eval_update_params.py,sha256=Wooz-3SDznbC3ihrhOs-10y9cxpTKGQgobDLfZ-23c0,757
openai/types/eval_update_response.py,sha256=ZH82tSoBv64c_4VUXKOcGBLJQ6xSersZz8gnGzbvjw8,4468
openai/types/evals/__init__.py,sha256=wiXRqdkT-SkjE0Sgv6MixeECZjF0xaoCPdSGFEh0rEs,1193
-openai/types/evals/__pycache__/__init__.cpython-310.pyc,,
-openai/types/evals/__pycache__/create_eval_completions_run_data_source.cpython-310.pyc,,
-openai/types/evals/__pycache__/create_eval_completions_run_data_source_param.cpython-310.pyc,,
-openai/types/evals/__pycache__/create_eval_jsonl_run_data_source.cpython-310.pyc,,
-openai/types/evals/__pycache__/create_eval_jsonl_run_data_source_param.cpython-310.pyc,,
-openai/types/evals/__pycache__/eval_api_error.cpython-310.pyc,,
-openai/types/evals/__pycache__/run_cancel_response.cpython-310.pyc,,
-openai/types/evals/__pycache__/run_create_params.cpython-310.pyc,,
-openai/types/evals/__pycache__/run_create_response.cpython-310.pyc,,
-openai/types/evals/__pycache__/run_delete_response.cpython-310.pyc,,
-openai/types/evals/__pycache__/run_list_params.cpython-310.pyc,,
-openai/types/evals/__pycache__/run_list_response.cpython-310.pyc,,
-openai/types/evals/__pycache__/run_retrieve_response.cpython-310.pyc,,
+openai/types/evals/__pycache__/__init__.cpython-314.pyc,,
+openai/types/evals/__pycache__/create_eval_completions_run_data_source.cpython-314.pyc,,
+openai/types/evals/__pycache__/create_eval_completions_run_data_source_param.cpython-314.pyc,,
+openai/types/evals/__pycache__/create_eval_jsonl_run_data_source.cpython-314.pyc,,
+openai/types/evals/__pycache__/create_eval_jsonl_run_data_source_param.cpython-314.pyc,,
+openai/types/evals/__pycache__/eval_api_error.cpython-314.pyc,,
+openai/types/evals/__pycache__/run_cancel_response.cpython-314.pyc,,
+openai/types/evals/__pycache__/run_create_params.cpython-314.pyc,,
+openai/types/evals/__pycache__/run_create_response.cpython-314.pyc,,
+openai/types/evals/__pycache__/run_delete_response.cpython-314.pyc,,
+openai/types/evals/__pycache__/run_list_params.cpython-314.pyc,,
+openai/types/evals/__pycache__/run_list_response.cpython-314.pyc,,
+openai/types/evals/__pycache__/run_retrieve_response.cpython-314.pyc,,
openai/types/evals/create_eval_completions_run_data_source.py,sha256=91mh923L-jJ2AyqzA3dh87hJR0KPDvaKaKr3OuH-4yY,9127
openai/types/evals/create_eval_completions_run_data_source_param.py,sha256=Qa7TlKjpsJlUkVsxuI8q4pyBgyI-XWQKvQVqOnUIo6c,9210
openai/types/evals/create_eval_jsonl_run_data_source.py,sha256=4BsR_n4iiFNoQ2-_Y8X0O2BONcSqCNY4Vav4RQPHX78,1323
@@ -1056,10 +1091,10 @@ openai/types/evals/run_list_params.py,sha256=vgbJMYybzCep7e9rxUVHlWy_o4GNy4tJyGT
openai/types/evals/run_list_response.py,sha256=VOCF7XSR3bloPGQR1froYWqmcJpEPb6vjmfzkQsyJ2g,15901
openai/types/evals/run_retrieve_response.py,sha256=ImzKvC_ZMA_YaesVYfuHm1TZnWQ6-3OkpWZIJFX7qug,15909
openai/types/evals/runs/__init__.py,sha256=sltNV1VwseIVr09gQ5E4IKbRKJuWJSLY1xUvAuC97Ec,393
-openai/types/evals/runs/__pycache__/__init__.cpython-310.pyc,,
-openai/types/evals/runs/__pycache__/output_item_list_params.cpython-310.pyc,,
-openai/types/evals/runs/__pycache__/output_item_list_response.cpython-310.pyc,,
-openai/types/evals/runs/__pycache__/output_item_retrieve_response.cpython-310.pyc,,
+openai/types/evals/runs/__pycache__/__init__.cpython-314.pyc,,
+openai/types/evals/runs/__pycache__/output_item_list_params.cpython-314.pyc,,
+openai/types/evals/runs/__pycache__/output_item_list_response.cpython-314.pyc,,
+openai/types/evals/runs/__pycache__/output_item_retrieve_response.cpython-314.pyc,,
openai/types/evals/runs/output_item_list_params.py,sha256=Lp1OQV1qXeEUwMS90_-BpOnO1jICwJOo9QgNC9OGJ2U,821
openai/types/evals/runs/output_item_list_response.py,sha256=YWkB3RtLae4hl0xs6gQvllYOcTwViR806LA_W3n9Kyg,4120
openai/types/evals/runs/output_item_retrieve_response.py,sha256=CG3oTPKn9OOyHMLa3A-EAmo09mb_j2mJz6zw9hJ2C00,4128
@@ -1072,47 +1107,51 @@ openai/types/file_list_params.py,sha256=TmmqvM7droAJ49YlgpeFzrhPv5uVkSZDxqlG6hhu
openai/types/file_object.py,sha256=s0hqehIWHSMHbiIRTrURTi4iiCu2PXmEGrd4tUB8lW8,1589
openai/types/file_purpose.py,sha256=aNd8G-GC1UVCL9bvTgtL4kfkiF0uEjfiimRS-eh8VrY,265
openai/types/fine_tuning/__init__.py,sha256=f8GH2rKGcIU1Kjrfjw5J0QoqlsC4jRmH96bU6axGD64,1832
-openai/types/fine_tuning/__pycache__/__init__.cpython-310.pyc,,
-openai/types/fine_tuning/__pycache__/dpo_hyperparameters.cpython-310.pyc,,
-openai/types/fine_tuning/__pycache__/dpo_hyperparameters_param.cpython-310.pyc,,
-openai/types/fine_tuning/__pycache__/dpo_method.cpython-310.pyc,,
-openai/types/fine_tuning/__pycache__/dpo_method_param.cpython-310.pyc,,
-openai/types/fine_tuning/__pycache__/fine_tuning_job.cpython-310.pyc,,
-openai/types/fine_tuning/__pycache__/fine_tuning_job_event.cpython-310.pyc,,
-openai/types/fine_tuning/__pycache__/fine_tuning_job_integration.cpython-310.pyc,,
-openai/types/fine_tuning/__pycache__/fine_tuning_job_wandb_integration.cpython-310.pyc,,
-openai/types/fine_tuning/__pycache__/fine_tuning_job_wandb_integration_object.cpython-310.pyc,,
-openai/types/fine_tuning/__pycache__/job_create_params.cpython-310.pyc,,
-openai/types/fine_tuning/__pycache__/job_list_events_params.cpython-310.pyc,,
-openai/types/fine_tuning/__pycache__/job_list_params.cpython-310.pyc,,
-openai/types/fine_tuning/__pycache__/reinforcement_hyperparameters.cpython-310.pyc,,
-openai/types/fine_tuning/__pycache__/reinforcement_hyperparameters_param.cpython-310.pyc,,
-openai/types/fine_tuning/__pycache__/reinforcement_method.cpython-310.pyc,,
-openai/types/fine_tuning/__pycache__/reinforcement_method_param.cpython-310.pyc,,
-openai/types/fine_tuning/__pycache__/supervised_hyperparameters.cpython-310.pyc,,
-openai/types/fine_tuning/__pycache__/supervised_hyperparameters_param.cpython-310.pyc,,
-openai/types/fine_tuning/__pycache__/supervised_method.cpython-310.pyc,,
-openai/types/fine_tuning/__pycache__/supervised_method_param.cpython-310.pyc,,
+openai/types/fine_tuning/__pycache__/__init__.cpython-314.pyc,,
+openai/types/fine_tuning/__pycache__/dpo_hyperparameters.cpython-314.pyc,,
+openai/types/fine_tuning/__pycache__/dpo_hyperparameters_param.cpython-314.pyc,,
+openai/types/fine_tuning/__pycache__/dpo_method.cpython-314.pyc,,
+openai/types/fine_tuning/__pycache__/dpo_method_param.cpython-314.pyc,,
+openai/types/fine_tuning/__pycache__/fine_tuning_job.cpython-314.pyc,,
+openai/types/fine_tuning/__pycache__/fine_tuning_job_event.cpython-314.pyc,,
+openai/types/fine_tuning/__pycache__/fine_tuning_job_integration.cpython-314.pyc,,
+openai/types/fine_tuning/__pycache__/fine_tuning_job_wandb_integration.cpython-314.pyc,,
+openai/types/fine_tuning/__pycache__/fine_tuning_job_wandb_integration_object.cpython-314.pyc,,
+openai/types/fine_tuning/__pycache__/job_create_params.cpython-314.pyc,,
+openai/types/fine_tuning/__pycache__/job_list_events_params.cpython-314.pyc,,
+openai/types/fine_tuning/__pycache__/job_list_params.cpython-314.pyc,,
+openai/types/fine_tuning/__pycache__/reinforcement_hyperparameters.cpython-314.pyc,,
+openai/types/fine_tuning/__pycache__/reinforcement_hyperparameters_param.cpython-314.pyc,,
+openai/types/fine_tuning/__pycache__/reinforcement_method.cpython-314.pyc,,
+openai/types/fine_tuning/__pycache__/reinforcement_method_param.cpython-314.pyc,,
+openai/types/fine_tuning/__pycache__/supervised_hyperparameters.cpython-314.pyc,,
+openai/types/fine_tuning/__pycache__/supervised_hyperparameters_param.cpython-314.pyc,,
+openai/types/fine_tuning/__pycache__/supervised_method.cpython-314.pyc,,
+openai/types/fine_tuning/__pycache__/supervised_method_param.cpython-314.pyc,,
openai/types/fine_tuning/alpha/__init__.py,sha256=e_Evj3xLs7o_SONlqoXDM75oZMbxuGWhxBW-azsXD_w,429
-openai/types/fine_tuning/alpha/__pycache__/__init__.cpython-310.pyc,,
-openai/types/fine_tuning/alpha/__pycache__/grader_run_params.cpython-310.pyc,,
-openai/types/fine_tuning/alpha/__pycache__/grader_run_response.cpython-310.pyc,,
-openai/types/fine_tuning/alpha/__pycache__/grader_validate_params.cpython-310.pyc,,
-openai/types/fine_tuning/alpha/__pycache__/grader_validate_response.cpython-310.pyc,,
+openai/types/fine_tuning/alpha/__pycache__/__init__.cpython-314.pyc,,
+openai/types/fine_tuning/alpha/__pycache__/grader_run_params.cpython-314.pyc,,
+openai/types/fine_tuning/alpha/__pycache__/grader_run_response.cpython-314.pyc,,
+openai/types/fine_tuning/alpha/__pycache__/grader_validate_params.cpython-314.pyc,,
+openai/types/fine_tuning/alpha/__pycache__/grader_validate_response.cpython-314.pyc,,
openai/types/fine_tuning/alpha/grader_run_params.py,sha256=ECVczgghTZ8J9xfqAbNc_VvAHfhOpkaVzQw_wUmE4r8,1414
openai/types/fine_tuning/alpha/grader_run_response.py,sha256=So-fvQMRvpccsSYb0jfKGQ_MNWdqqS71OcE9GbeLASs,1556
openai/types/fine_tuning/alpha/grader_validate_params.py,sha256=Jd6m3DjIZAUNY-PlLUWDbH3ojm8ztnfjHmPjKw2DrLM,875
openai/types/fine_tuning/alpha/grader_validate_response.py,sha256=nLldMLyNG-ICS3HwykDWdKuAPKu4gR2A2I0C79C4khs,773
-openai/types/fine_tuning/checkpoints/__init__.py,sha256=xA69SYwf79pe8QIq9u9vXPjjCw7lf3ZW2arzg9c_bus,588
-openai/types/fine_tuning/checkpoints/__pycache__/__init__.cpython-310.pyc,,
-openai/types/fine_tuning/checkpoints/__pycache__/permission_create_params.cpython-310.pyc,,
-openai/types/fine_tuning/checkpoints/__pycache__/permission_create_response.cpython-310.pyc,,
-openai/types/fine_tuning/checkpoints/__pycache__/permission_delete_response.cpython-310.pyc,,
-openai/types/fine_tuning/checkpoints/__pycache__/permission_retrieve_params.cpython-310.pyc,,
-openai/types/fine_tuning/checkpoints/__pycache__/permission_retrieve_response.cpython-310.pyc,,
+openai/types/fine_tuning/checkpoints/__init__.py,sha256=7I1e_nAa8YIuKob-VPOLtlRyUYuPynpUrbGGRN6TUQU,756
+openai/types/fine_tuning/checkpoints/__pycache__/__init__.cpython-314.pyc,,
+openai/types/fine_tuning/checkpoints/__pycache__/permission_create_params.cpython-314.pyc,,
+openai/types/fine_tuning/checkpoints/__pycache__/permission_create_response.cpython-314.pyc,,
+openai/types/fine_tuning/checkpoints/__pycache__/permission_delete_response.cpython-314.pyc,,
+openai/types/fine_tuning/checkpoints/__pycache__/permission_list_params.cpython-314.pyc,,
+openai/types/fine_tuning/checkpoints/__pycache__/permission_list_response.cpython-314.pyc,,
+openai/types/fine_tuning/checkpoints/__pycache__/permission_retrieve_params.cpython-314.pyc,,
+openai/types/fine_tuning/checkpoints/__pycache__/permission_retrieve_response.cpython-314.pyc,,
openai/types/fine_tuning/checkpoints/permission_create_params.py,sha256=TI90xY-4dv8vDKKZ0FBdbly9JTCrw4FgXkcXz_gTUlk,407
openai/types/fine_tuning/checkpoints/permission_create_response.py,sha256=ATIeO4fFBTtaylBYdC6Id-wvirln4lKh2tRLMaJW01Y,751
openai/types/fine_tuning/checkpoints/permission_delete_response.py,sha256=X_RuOvxa6i3wGLP5joHixv4tNLUpuK-2umiUf6P7Ha8,558
+openai/types/fine_tuning/checkpoints/permission_list_params.py,sha256=5BYaVDcV8LaBk9yBueZD60h1mi1xCmf7WwLZEHh36i4,602
+openai/types/fine_tuning/checkpoints/permission_list_response.py,sha256=ufUBrKKR15VJmz4HSK6m4KSDFqxjASV7USqSxkj0P1Q,747
openai/types/fine_tuning/checkpoints/permission_retrieve_params.py,sha256=3zVCOq1676MizKhKSba2OLmBSPlBx6Az2ZdxyVl580o,610
openai/types/fine_tuning/checkpoints/permission_retrieve_response.py,sha256=o8wXyRWH80oA8o80crPkaGDyPIwuQZysRK1ic6mPqj8,963
openai/types/fine_tuning/dpo_hyperparameters.py,sha256=Kkylxhw94kImWo6-SS_7Jq66nftP0Hy0bbRxtcrChXM,1129
@@ -1128,9 +1167,9 @@ openai/types/fine_tuning/job_create_params.py,sha256=-79Le_1QQI0c8W5M1NWLKlmRs9W
openai/types/fine_tuning/job_list_events_params.py,sha256=4xOED4H2ky2mI9sIDytjmfJz5bNAdNWb70WIb_0bBWs,400
openai/types/fine_tuning/job_list_params.py,sha256=wUGXsQ4UDCKvAjHDZAZ-JDU6XAouiTGThb0Jo_9XX08,623
openai/types/fine_tuning/jobs/__init__.py,sha256=nuWhOUsmsoVKTKMU35kknmr8sfpTF-kkIzyuOlRbJj0,295
-openai/types/fine_tuning/jobs/__pycache__/__init__.cpython-310.pyc,,
-openai/types/fine_tuning/jobs/__pycache__/checkpoint_list_params.cpython-310.pyc,,
-openai/types/fine_tuning/jobs/__pycache__/fine_tuning_job_checkpoint.cpython-310.pyc,,
+openai/types/fine_tuning/jobs/__pycache__/__init__.cpython-314.pyc,,
+openai/types/fine_tuning/jobs/__pycache__/checkpoint_list_params.cpython-314.pyc,,
+openai/types/fine_tuning/jobs/__pycache__/fine_tuning_job_checkpoint.cpython-314.pyc,,
openai/types/fine_tuning/jobs/checkpoint_list_params.py,sha256=XoDLkkKCWmf5an5rnoVEpNK8mtQHq1fHw9EqmezfrXM,415
openai/types/fine_tuning/jobs/fine_tuning_job_checkpoint.py,sha256=UG9keznPmpRFAM4ivkMJ8KlBbojdF3m2bSmiftBCnT8,1548
openai/types/fine_tuning/reinforcement_hyperparameters.py,sha256=DwLBbYDa4QDJQup7aeg_KUu5wwC4tSLDpp60axNsmC0,1501
@@ -1142,21 +1181,21 @@ openai/types/fine_tuning/supervised_hyperparameters_param.py,sha256=9nnnvgvHbT-x
openai/types/fine_tuning/supervised_method.py,sha256=sGXoTMI_r3BN53I7FA45dz_JU9d_7eQlPXHvRSswgfY,472
openai/types/fine_tuning/supervised_method_param.py,sha256=GKw3VgMuZ6V0B0NKwHh_lrHRSM4gYp0dPBPoamYPze8,509
openai/types/graders/__init__.py,sha256=y-002SPDfVfefLY1hTugtFHv74beH51tCTXi6dZrCDk,1147
-openai/types/graders/__pycache__/__init__.cpython-310.pyc,,
-openai/types/graders/__pycache__/grader_inputs.cpython-310.pyc,,
-openai/types/graders/__pycache__/grader_inputs_param.cpython-310.pyc,,
-openai/types/graders/__pycache__/label_model_grader.cpython-310.pyc,,
-openai/types/graders/__pycache__/label_model_grader_param.cpython-310.pyc,,
-openai/types/graders/__pycache__/multi_grader.cpython-310.pyc,,
-openai/types/graders/__pycache__/multi_grader_param.cpython-310.pyc,,
-openai/types/graders/__pycache__/python_grader.cpython-310.pyc,,
-openai/types/graders/__pycache__/python_grader_param.cpython-310.pyc,,
-openai/types/graders/__pycache__/score_model_grader.cpython-310.pyc,,
-openai/types/graders/__pycache__/score_model_grader_param.cpython-310.pyc,,
-openai/types/graders/__pycache__/string_check_grader.cpython-310.pyc,,
-openai/types/graders/__pycache__/string_check_grader_param.cpython-310.pyc,,
-openai/types/graders/__pycache__/text_similarity_grader.cpython-310.pyc,,
-openai/types/graders/__pycache__/text_similarity_grader_param.cpython-310.pyc,,
+openai/types/graders/__pycache__/__init__.cpython-314.pyc,,
+openai/types/graders/__pycache__/grader_inputs.cpython-314.pyc,,
+openai/types/graders/__pycache__/grader_inputs_param.cpython-314.pyc,,
+openai/types/graders/__pycache__/label_model_grader.cpython-314.pyc,,
+openai/types/graders/__pycache__/label_model_grader_param.cpython-314.pyc,,
+openai/types/graders/__pycache__/multi_grader.cpython-314.pyc,,
+openai/types/graders/__pycache__/multi_grader_param.cpython-314.pyc,,
+openai/types/graders/__pycache__/python_grader.cpython-314.pyc,,
+openai/types/graders/__pycache__/python_grader_param.cpython-314.pyc,,
+openai/types/graders/__pycache__/score_model_grader.cpython-314.pyc,,
+openai/types/graders/__pycache__/score_model_grader_param.cpython-314.pyc,,
+openai/types/graders/__pycache__/string_check_grader.cpython-314.pyc,,
+openai/types/graders/__pycache__/string_check_grader_param.cpython-314.pyc,,
+openai/types/graders/__pycache__/text_similarity_grader.cpython-314.pyc,,
+openai/types/graders/__pycache__/text_similarity_grader_param.cpython-314.pyc,,
openai/types/graders/grader_inputs.py,sha256=rboj5sT1i5dwbXH3gGw6NgACSchjECaFeNg9a4Iq4dc,1299
openai/types/graders/grader_inputs_param.py,sha256=-kXE-BoLSannXeqP0SSdcU9x6ssRu0xxMdgnmffMxCA,1494
openai/types/graders/label_model_grader.py,sha256=UwzUzamECdsrNpyAnKYfFqTmE_R-ORzIKDIvtJmFCSU,2765
@@ -1174,13 +1213,14 @@ openai/types/graders/text_similarity_grader_param.py,sha256=KW-fjo4H3S7mdZbFMXj7
openai/types/image.py,sha256=21v1I9bcjq7p-rw20iSNxW0CQG7OLUB1VP3d3zTjID4,872
openai/types/image_create_variation_params.py,sha256=Xeka4vp5V0o8R_6vnLsqiQhWH5O6tUSCyO3FKGVmAeU,1426
openai/types/image_edit_completed_event.py,sha256=u8Rj9eW4C7htO0jl59OP0u4CKEhO0TcTL1dqLGN2JQU,2024
-openai/types/image_edit_params.py,sha256=jWwiZhG3m1ZzU2PTPfAF_gtaBGduDw3ziuEX5BH5qN8,5500
+openai/types/image_edit_params.py,sha256=oWMCYIIRm3C_5AyeDuCIbvcHOgvVETouEhzsdvvkZoE,5429
openai/types/image_edit_partial_image_event.py,sha256=tJR59-lg3QQfoDNsItBoScAMhS-IdOHDon655vRz0CA,1180
openai/types/image_edit_stream_event.py,sha256=GtHKc8VdumW5RnQtIiyMqhwIIaqYogKXZF1QNuq9Bd4,516
openai/types/image_gen_completed_event.py,sha256=rpjnocJQ5imYRrHHxEz5yDzWppi9W6sxxHYX1dKamQg,2036
openai/types/image_gen_partial_image_event.py,sha256=5VJhxTf6ZgjVFbQn72iOkolIHCMAa4LExMhXhq35NRw,1165
openai/types/image_gen_stream_event.py,sha256=gVzdE6qzBPpK3kEFM7EdoUzBa4DgCaS3AdF9gjd0pUs,508
openai/types/image_generate_params.py,sha256=-G3-zsmpaaKtFnCbD42Tc_g_i45WiDVT72E7pVOYlDo,5469
+openai/types/image_input_reference_param.py,sha256=2RGuIde_YVPaml9QAqXVea7BQ6h5rMCGT2seLyPMOIg,358
openai/types/image_model.py,sha256=LXjOC6iPeoDDrrZcUcKS6R4CpdL16DL3OkwhGTJUNys,271
openai/types/images_response.py,sha256=GyUSpxaUXJfr0qHdabXdeYn15MXv1vD2SziISO01Qdg,2574
openai/types/model.py,sha256=cmrjNhjHXnJUfgp3al0B2s4O-PvFD-nHni7h8h2p6FM,609
@@ -1194,164 +1234,164 @@ openai/types/moderation_multi_modal_input_param.py,sha256=RFdiEPsakWIscutX896ir5
openai/types/moderation_text_input_param.py,sha256=4egKHVxB5niYopwD6j3DRU1fIq3vcG2Q7utGQj24kGM,456
openai/types/other_file_chunking_strategy_object.py,sha256=h4JKlz4p__U1vbZeXKO9v4DrPsDLIE5IilYdNrrnhFs,501
openai/types/realtime/__init__.py,sha256=hnjSirz0039Qais1VXg42nRrZNClsrmLGWYW-6SQP8Y,17085
-openai/types/realtime/__pycache__/__init__.cpython-310.pyc,,
-openai/types/realtime/__pycache__/audio_transcription.cpython-310.pyc,,
-openai/types/realtime/__pycache__/audio_transcription_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/call_accept_params.cpython-310.pyc,,
-openai/types/realtime/__pycache__/call_create_params.cpython-310.pyc,,
-openai/types/realtime/__pycache__/call_refer_params.cpython-310.pyc,,
-openai/types/realtime/__pycache__/call_reject_params.cpython-310.pyc,,
-openai/types/realtime/__pycache__/client_secret_create_params.cpython-310.pyc,,
-openai/types/realtime/__pycache__/client_secret_create_response.cpython-310.pyc,,
-openai/types/realtime/__pycache__/conversation_created_event.cpython-310.pyc,,
-openai/types/realtime/__pycache__/conversation_item.cpython-310.pyc,,
-openai/types/realtime/__pycache__/conversation_item_added.cpython-310.pyc,,
-openai/types/realtime/__pycache__/conversation_item_create_event.cpython-310.pyc,,
-openai/types/realtime/__pycache__/conversation_item_create_event_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/conversation_item_created_event.cpython-310.pyc,,
-openai/types/realtime/__pycache__/conversation_item_delete_event.cpython-310.pyc,,
-openai/types/realtime/__pycache__/conversation_item_delete_event_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/conversation_item_deleted_event.cpython-310.pyc,,
-openai/types/realtime/__pycache__/conversation_item_done.cpython-310.pyc,,
-openai/types/realtime/__pycache__/conversation_item_input_audio_transcription_completed_event.cpython-310.pyc,,
-openai/types/realtime/__pycache__/conversation_item_input_audio_transcription_delta_event.cpython-310.pyc,,
-openai/types/realtime/__pycache__/conversation_item_input_audio_transcription_failed_event.cpython-310.pyc,,
-openai/types/realtime/__pycache__/conversation_item_input_audio_transcription_segment.cpython-310.pyc,,
-openai/types/realtime/__pycache__/conversation_item_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/conversation_item_retrieve_event.cpython-310.pyc,,
-openai/types/realtime/__pycache__/conversation_item_retrieve_event_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/conversation_item_truncate_event.cpython-310.pyc,,
-openai/types/realtime/__pycache__/conversation_item_truncate_event_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/conversation_item_truncated_event.cpython-310.pyc,,
-openai/types/realtime/__pycache__/input_audio_buffer_append_event.cpython-310.pyc,,
-openai/types/realtime/__pycache__/input_audio_buffer_append_event_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/input_audio_buffer_clear_event.cpython-310.pyc,,
-openai/types/realtime/__pycache__/input_audio_buffer_clear_event_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/input_audio_buffer_cleared_event.cpython-310.pyc,,
-openai/types/realtime/__pycache__/input_audio_buffer_commit_event.cpython-310.pyc,,
-openai/types/realtime/__pycache__/input_audio_buffer_commit_event_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/input_audio_buffer_committed_event.cpython-310.pyc,,
-openai/types/realtime/__pycache__/input_audio_buffer_dtmf_event_received_event.cpython-310.pyc,,
-openai/types/realtime/__pycache__/input_audio_buffer_speech_started_event.cpython-310.pyc,,
-openai/types/realtime/__pycache__/input_audio_buffer_speech_stopped_event.cpython-310.pyc,,
-openai/types/realtime/__pycache__/input_audio_buffer_timeout_triggered.cpython-310.pyc,,
-openai/types/realtime/__pycache__/log_prob_properties.cpython-310.pyc,,
-openai/types/realtime/__pycache__/mcp_list_tools_completed.cpython-310.pyc,,
-openai/types/realtime/__pycache__/mcp_list_tools_failed.cpython-310.pyc,,
-openai/types/realtime/__pycache__/mcp_list_tools_in_progress.cpython-310.pyc,,
-openai/types/realtime/__pycache__/noise_reduction_type.cpython-310.pyc,,
-openai/types/realtime/__pycache__/output_audio_buffer_clear_event.cpython-310.pyc,,
-openai/types/realtime/__pycache__/output_audio_buffer_clear_event_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/rate_limits_updated_event.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_audio_config.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_audio_config_input.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_audio_config_input_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_audio_config_output.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_audio_config_output_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_audio_config_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_audio_formats.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_audio_formats_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_audio_input_turn_detection.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_audio_input_turn_detection_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_client_event.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_client_event_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_connect_params.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_conversation_item_assistant_message.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_conversation_item_assistant_message_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_conversation_item_function_call.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_conversation_item_function_call_output.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_conversation_item_function_call_output_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_conversation_item_function_call_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_conversation_item_system_message.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_conversation_item_system_message_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_conversation_item_user_message.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_conversation_item_user_message_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_error.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_error_event.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_function_tool.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_function_tool_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_mcp_approval_request.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_mcp_approval_request_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_mcp_approval_response.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_mcp_approval_response_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_mcp_list_tools.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_mcp_list_tools_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_mcp_protocol_error.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_mcp_protocol_error_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_mcp_tool_call.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_mcp_tool_call_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_mcp_tool_execution_error.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_mcp_tool_execution_error_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_mcphttp_error.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_mcphttp_error_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_response.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_response_create_audio_output.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_response_create_audio_output_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_response_create_mcp_tool.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_response_create_mcp_tool_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_response_create_params.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_response_create_params_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_response_status.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_response_usage.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_response_usage_input_token_details.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_response_usage_output_token_details.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_server_event.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_session_client_secret.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_session_create_request.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_session_create_request_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_session_create_response.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_tool_choice_config.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_tool_choice_config_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_tools_config.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_tools_config_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_tools_config_union.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_tools_config_union_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_tracing_config.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_tracing_config_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_transcription_session_audio.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_transcription_session_audio_input.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_transcription_session_audio_input_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_transcription_session_audio_input_turn_detection.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_transcription_session_audio_input_turn_detection_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_transcription_session_audio_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_transcription_session_create_request.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_transcription_session_create_request_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_transcription_session_create_response.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_transcription_session_turn_detection.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_truncation.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_truncation_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_truncation_retention_ratio.cpython-310.pyc,,
-openai/types/realtime/__pycache__/realtime_truncation_retention_ratio_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/response_audio_delta_event.cpython-310.pyc,,
-openai/types/realtime/__pycache__/response_audio_done_event.cpython-310.pyc,,
-openai/types/realtime/__pycache__/response_audio_transcript_delta_event.cpython-310.pyc,,
-openai/types/realtime/__pycache__/response_audio_transcript_done_event.cpython-310.pyc,,
-openai/types/realtime/__pycache__/response_cancel_event.cpython-310.pyc,,
-openai/types/realtime/__pycache__/response_cancel_event_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/response_content_part_added_event.cpython-310.pyc,,
-openai/types/realtime/__pycache__/response_content_part_done_event.cpython-310.pyc,,
-openai/types/realtime/__pycache__/response_create_event.cpython-310.pyc,,
-openai/types/realtime/__pycache__/response_create_event_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/response_created_event.cpython-310.pyc,,
-openai/types/realtime/__pycache__/response_done_event.cpython-310.pyc,,
-openai/types/realtime/__pycache__/response_function_call_arguments_delta_event.cpython-310.pyc,,
-openai/types/realtime/__pycache__/response_function_call_arguments_done_event.cpython-310.pyc,,
-openai/types/realtime/__pycache__/response_mcp_call_arguments_delta.cpython-310.pyc,,
-openai/types/realtime/__pycache__/response_mcp_call_arguments_done.cpython-310.pyc,,
-openai/types/realtime/__pycache__/response_mcp_call_completed.cpython-310.pyc,,
-openai/types/realtime/__pycache__/response_mcp_call_failed.cpython-310.pyc,,
-openai/types/realtime/__pycache__/response_mcp_call_in_progress.cpython-310.pyc,,
-openai/types/realtime/__pycache__/response_output_item_added_event.cpython-310.pyc,,
-openai/types/realtime/__pycache__/response_output_item_done_event.cpython-310.pyc,,
-openai/types/realtime/__pycache__/response_text_delta_event.cpython-310.pyc,,
-openai/types/realtime/__pycache__/response_text_done_event.cpython-310.pyc,,
-openai/types/realtime/__pycache__/session_created_event.cpython-310.pyc,,
-openai/types/realtime/__pycache__/session_update_event.cpython-310.pyc,,
-openai/types/realtime/__pycache__/session_update_event_param.cpython-310.pyc,,
-openai/types/realtime/__pycache__/session_updated_event.cpython-310.pyc,,
+openai/types/realtime/__pycache__/__init__.cpython-314.pyc,,
+openai/types/realtime/__pycache__/audio_transcription.cpython-314.pyc,,
+openai/types/realtime/__pycache__/audio_transcription_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/call_accept_params.cpython-314.pyc,,
+openai/types/realtime/__pycache__/call_create_params.cpython-314.pyc,,
+openai/types/realtime/__pycache__/call_refer_params.cpython-314.pyc,,
+openai/types/realtime/__pycache__/call_reject_params.cpython-314.pyc,,
+openai/types/realtime/__pycache__/client_secret_create_params.cpython-314.pyc,,
+openai/types/realtime/__pycache__/client_secret_create_response.cpython-314.pyc,,
+openai/types/realtime/__pycache__/conversation_created_event.cpython-314.pyc,,
+openai/types/realtime/__pycache__/conversation_item.cpython-314.pyc,,
+openai/types/realtime/__pycache__/conversation_item_added.cpython-314.pyc,,
+openai/types/realtime/__pycache__/conversation_item_create_event.cpython-314.pyc,,
+openai/types/realtime/__pycache__/conversation_item_create_event_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/conversation_item_created_event.cpython-314.pyc,,
+openai/types/realtime/__pycache__/conversation_item_delete_event.cpython-314.pyc,,
+openai/types/realtime/__pycache__/conversation_item_delete_event_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/conversation_item_deleted_event.cpython-314.pyc,,
+openai/types/realtime/__pycache__/conversation_item_done.cpython-314.pyc,,
+openai/types/realtime/__pycache__/conversation_item_input_audio_transcription_completed_event.cpython-314.pyc,,
+openai/types/realtime/__pycache__/conversation_item_input_audio_transcription_delta_event.cpython-314.pyc,,
+openai/types/realtime/__pycache__/conversation_item_input_audio_transcription_failed_event.cpython-314.pyc,,
+openai/types/realtime/__pycache__/conversation_item_input_audio_transcription_segment.cpython-314.pyc,,
+openai/types/realtime/__pycache__/conversation_item_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/conversation_item_retrieve_event.cpython-314.pyc,,
+openai/types/realtime/__pycache__/conversation_item_retrieve_event_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/conversation_item_truncate_event.cpython-314.pyc,,
+openai/types/realtime/__pycache__/conversation_item_truncate_event_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/conversation_item_truncated_event.cpython-314.pyc,,
+openai/types/realtime/__pycache__/input_audio_buffer_append_event.cpython-314.pyc,,
+openai/types/realtime/__pycache__/input_audio_buffer_append_event_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/input_audio_buffer_clear_event.cpython-314.pyc,,
+openai/types/realtime/__pycache__/input_audio_buffer_clear_event_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/input_audio_buffer_cleared_event.cpython-314.pyc,,
+openai/types/realtime/__pycache__/input_audio_buffer_commit_event.cpython-314.pyc,,
+openai/types/realtime/__pycache__/input_audio_buffer_commit_event_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/input_audio_buffer_committed_event.cpython-314.pyc,,
+openai/types/realtime/__pycache__/input_audio_buffer_dtmf_event_received_event.cpython-314.pyc,,
+openai/types/realtime/__pycache__/input_audio_buffer_speech_started_event.cpython-314.pyc,,
+openai/types/realtime/__pycache__/input_audio_buffer_speech_stopped_event.cpython-314.pyc,,
+openai/types/realtime/__pycache__/input_audio_buffer_timeout_triggered.cpython-314.pyc,,
+openai/types/realtime/__pycache__/log_prob_properties.cpython-314.pyc,,
+openai/types/realtime/__pycache__/mcp_list_tools_completed.cpython-314.pyc,,
+openai/types/realtime/__pycache__/mcp_list_tools_failed.cpython-314.pyc,,
+openai/types/realtime/__pycache__/mcp_list_tools_in_progress.cpython-314.pyc,,
+openai/types/realtime/__pycache__/noise_reduction_type.cpython-314.pyc,,
+openai/types/realtime/__pycache__/output_audio_buffer_clear_event.cpython-314.pyc,,
+openai/types/realtime/__pycache__/output_audio_buffer_clear_event_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/rate_limits_updated_event.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_audio_config.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_audio_config_input.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_audio_config_input_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_audio_config_output.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_audio_config_output_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_audio_config_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_audio_formats.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_audio_formats_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_audio_input_turn_detection.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_audio_input_turn_detection_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_client_event.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_client_event_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_connect_params.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_conversation_item_assistant_message.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_conversation_item_assistant_message_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_conversation_item_function_call.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_conversation_item_function_call_output.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_conversation_item_function_call_output_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_conversation_item_function_call_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_conversation_item_system_message.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_conversation_item_system_message_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_conversation_item_user_message.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_conversation_item_user_message_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_error.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_error_event.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_function_tool.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_function_tool_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_mcp_approval_request.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_mcp_approval_request_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_mcp_approval_response.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_mcp_approval_response_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_mcp_list_tools.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_mcp_list_tools_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_mcp_protocol_error.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_mcp_protocol_error_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_mcp_tool_call.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_mcp_tool_call_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_mcp_tool_execution_error.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_mcp_tool_execution_error_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_mcphttp_error.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_mcphttp_error_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_response.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_response_create_audio_output.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_response_create_audio_output_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_response_create_mcp_tool.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_response_create_mcp_tool_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_response_create_params.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_response_create_params_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_response_status.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_response_usage.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_response_usage_input_token_details.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_response_usage_output_token_details.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_server_event.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_session_client_secret.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_session_create_request.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_session_create_request_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_session_create_response.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_tool_choice_config.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_tool_choice_config_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_tools_config.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_tools_config_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_tools_config_union.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_tools_config_union_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_tracing_config.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_tracing_config_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_transcription_session_audio.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_transcription_session_audio_input.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_transcription_session_audio_input_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_transcription_session_audio_input_turn_detection.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_transcription_session_audio_input_turn_detection_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_transcription_session_audio_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_transcription_session_create_request.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_transcription_session_create_request_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_transcription_session_create_response.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_transcription_session_turn_detection.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_truncation.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_truncation_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_truncation_retention_ratio.cpython-314.pyc,,
+openai/types/realtime/__pycache__/realtime_truncation_retention_ratio_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/response_audio_delta_event.cpython-314.pyc,,
+openai/types/realtime/__pycache__/response_audio_done_event.cpython-314.pyc,,
+openai/types/realtime/__pycache__/response_audio_transcript_delta_event.cpython-314.pyc,,
+openai/types/realtime/__pycache__/response_audio_transcript_done_event.cpython-314.pyc,,
+openai/types/realtime/__pycache__/response_cancel_event.cpython-314.pyc,,
+openai/types/realtime/__pycache__/response_cancel_event_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/response_content_part_added_event.cpython-314.pyc,,
+openai/types/realtime/__pycache__/response_content_part_done_event.cpython-314.pyc,,
+openai/types/realtime/__pycache__/response_create_event.cpython-314.pyc,,
+openai/types/realtime/__pycache__/response_create_event_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/response_created_event.cpython-314.pyc,,
+openai/types/realtime/__pycache__/response_done_event.cpython-314.pyc,,
+openai/types/realtime/__pycache__/response_function_call_arguments_delta_event.cpython-314.pyc,,
+openai/types/realtime/__pycache__/response_function_call_arguments_done_event.cpython-314.pyc,,
+openai/types/realtime/__pycache__/response_mcp_call_arguments_delta.cpython-314.pyc,,
+openai/types/realtime/__pycache__/response_mcp_call_arguments_done.cpython-314.pyc,,
+openai/types/realtime/__pycache__/response_mcp_call_completed.cpython-314.pyc,,
+openai/types/realtime/__pycache__/response_mcp_call_failed.cpython-314.pyc,,
+openai/types/realtime/__pycache__/response_mcp_call_in_progress.cpython-314.pyc,,
+openai/types/realtime/__pycache__/response_output_item_added_event.cpython-314.pyc,,
+openai/types/realtime/__pycache__/response_output_item_done_event.cpython-314.pyc,,
+openai/types/realtime/__pycache__/response_text_delta_event.cpython-314.pyc,,
+openai/types/realtime/__pycache__/response_text_done_event.cpython-314.pyc,,
+openai/types/realtime/__pycache__/session_created_event.cpython-314.pyc,,
+openai/types/realtime/__pycache__/session_update_event.cpython-314.pyc,,
+openai/types/realtime/__pycache__/session_update_event_param.cpython-314.pyc,,
+openai/types/realtime/__pycache__/session_updated_event.cpython-314.pyc,,
openai/types/realtime/audio_transcription.py,sha256=E0x4I29kjvVbJOvsjNMSUQP8iIofodsWjwuGdbPynN4,1513
openai/types/realtime/audio_transcription_param.py,sha256=ecAesrI27A1iH01YyPXPx0G0bB7GwYxd8eS7WRouJkE,1484
-openai/types/realtime/call_accept_params.py,sha256=KSB7hkvjWTU-ooaKXqJRa2rpSIuTHU52P97212W_jlQ,5267
+openai/types/realtime/call_accept_params.py,sha256=5olH1jVc9XK0bB13Fz61Y4R7ONPZINq5wgHj-PG9rfo,5328
openai/types/realtime/call_create_params.py,sha256=r0vyhcjvDAKZF8DSbLP7bEQAVh92hgzNBnXBQWJ56no,544
openai/types/realtime/call_refer_params.py,sha256=Zhy_H0Jv0leRL6HS_WH7Oca7HUlZ0feINePxN-hms0s,422
openai/types/realtime/call_reject_params.py,sha256=cyz55zIN5DzSL74uhzeKQOSecl4V0UkpySt7zykoGIA,374
@@ -1400,8 +1440,8 @@ openai/types/realtime/rate_limits_updated_event.py,sha256=rsYK1Yc0WrtMFG8shtaYQv
openai/types/realtime/realtime_audio_config.py,sha256=CFWqdebuucyHyL4gqIZTuh78PME5cQPLmeq1VsD9Z3Y,520
openai/types/realtime/realtime_audio_config_input.py,sha256=eQohrH7bIyaHA7W4qIerP61cUO_FAd9Cb-X36ZmNjd0,3248
openai/types/realtime/realtime_audio_config_input_param.py,sha256=mdQg38uNnx9dOck83iWhsmdafe_OXLTtH9K8oLtq3LY,3300
-openai/types/realtime/realtime_audio_config_output.py,sha256=JgABZNG00eseAgYVZiJF_mf32iK3n7BsxwyI3Y5nAfw,1393
-openai/types/realtime/realtime_audio_config_output_param.py,sha256=m1Z2NNR2jWprIF94sLq5j1uwilleezSm51L1rb6u3As,1375
+openai/types/realtime/realtime_audio_config_output.py,sha256=hi8_fMZigCBSpA4_nc0dwYQI3UlOqgmvPMrjqxFQILw,1680
+openai/types/realtime/realtime_audio_config_output_param.py,sha256=001WNkdy5Olxc0-X6cL_nHpHYVtP0pUAkMSjEF2zAGE,1705
openai/types/realtime/realtime_audio_config_param.py,sha256=d5gwlXPsJIc53Nh1A9QYefyHd5YJ-E1VRRfuK2IOafk,556
openai/types/realtime/realtime_audio_formats.py,sha256=QZTTihJeSWCwEFUK2kjf4nvaZNXWnFfCKSXZee-Xa-Q,1069
openai/types/realtime/realtime_audio_formats_param.py,sha256=N0NFD1pbh6ZlcbENKv10Np-K2QVPWIpFsLzl0koPirs,960
@@ -1439,10 +1479,10 @@ openai/types/realtime/realtime_mcp_tool_execution_error_param.py,sha256=3IuPmvy5
openai/types/realtime/realtime_mcphttp_error.py,sha256=-Zqz0xr2gPs6peG_wC3S8qVgtEUJNrZm4Mm5BIvmZw0,301
openai/types/realtime/realtime_mcphttp_error_param.py,sha256=GcmAMBvZVNrN9p_tneHPu_pyN7D8wCytaAKruFtMfwI,377
openai/types/realtime/realtime_response.py,sha256=zaUF7moDzT5foAYhiqg1zALpA-4pxUCvy1ImIFprD6g,3903
-openai/types/realtime/realtime_response_create_audio_output.py,sha256=HSSmi2e3Xg5Cp5ONpBVtwVvCchSy0xyK6voM4LN-Xc4,1006
-openai/types/realtime/realtime_response_create_audio_output_param.py,sha256=ioIoPVwiTweGns-5sdJLOS-X7sPjhYSY5KsLRVwNSv4,1001
-openai/types/realtime/realtime_response_create_mcp_tool.py,sha256=lUpF8l6G9q2sMcxHY7LyQ9pzoZMmGygFtXhP_NT9vBM,5076
-openai/types/realtime/realtime_response_create_mcp_tool_param.py,sha256=HJ3UYYO-saxPWzfz4v9TvZCFNMu_gCGQ0OxDl6OC-rM,4979
+openai/types/realtime/realtime_response_create_audio_output.py,sha256=C9uxTOMzYseGLPbLcVxn9puvudBtrqSYANGhSLWsOZQ,1384
+openai/types/realtime/realtime_response_create_audio_output_param.py,sha256=g4jfVNQb32DGH_d7QorydEtxk9OnFW_ArIYkERLsPrg,1422
+openai/types/realtime/realtime_response_create_mcp_tool.py,sha256=_KSSVpVYb-j2Gad1Ku2TUeKcAmp47p0uciewINqTB7U,5194
+openai/types/realtime/realtime_response_create_mcp_tool_param.py,sha256=I9kn6YZuVUPhJwyjBfYS6awDVAtsiy-pti62xl1LYJg,5080
openai/types/realtime/realtime_response_create_params.py,sha256=kQ1B1iLiK7IS4ACTMtUvbj-toYGJ0d4yi-EoV6piXNQ,4348
openai/types/realtime/realtime_response_create_params_param.py,sha256=J3ch3svShqtnVCeDcPF0hV7o1WbwjSIZP7WtV7PHEgw,4380
openai/types/realtime/realtime_response_status.py,sha256=QjUdgjUKfX8tHVk-qRofl33vnjdoYzwTFhlk6knUGCo,1500
@@ -1451,15 +1491,15 @@ openai/types/realtime/realtime_response_usage_input_token_details.py,sha256=Pt38
openai/types/realtime/realtime_response_usage_output_token_details.py,sha256=3K2DIIvd92aJe9X38tSALn_t_-OyU4ivG3ckBXspkng,524
openai/types/realtime/realtime_server_event.py,sha256=WkL6yNqJXjKbkhegGZr00_Uw84NVITGhV-zPlQdLGeA,8248
openai/types/realtime/realtime_session_client_secret.py,sha256=oIOEWToHr1J_6e5VsDYA6xl7klWCaVoqWvoWVr4YDRs,629
-openai/types/realtime/realtime_session_create_request.py,sha256=ZrXWm3Vn7huXn52G_ctxSYNX99sSlqA0GejzQ863d7U,5365
-openai/types/realtime/realtime_session_create_request_param.py,sha256=toIWRT0Q2IV2cPYjNzKEeeiSbM49lvJH9qtmYv_Up0A,5351
-openai/types/realtime/realtime_session_create_response.py,sha256=P4MEjrOSicIGv2MmUbK4j8__noih8qxwZyIkT9eW43A,19935
+openai/types/realtime/realtime_session_create_request.py,sha256=PYgkFcOlsTWBqFpVu87HSmOLcmmVMgdFmxxaWYm3ojg,5426
+openai/types/realtime/realtime_session_create_request_param.py,sha256=05uYKF40qjZwiocjkuZuGOm2vG6trw1TB7paprDXFDc,5412
+openai/types/realtime/realtime_session_create_response.py,sha256=KSYtSqXTU2FJEpTTrK3b-2CjMJtA9M2CbMtF3Mv9duo,20114
openai/types/realtime/realtime_tool_choice_config.py,sha256=DV0uuyfK59paj5NC9adQskUF2km5TRSiHAlMDu1Fmdo,472
openai/types/realtime/realtime_tool_choice_config_param.py,sha256=0vqYNM4MkU5d8GXfitT6AoE9AubKeLZOSHGOH8q73QU,550
openai/types/realtime/realtime_tools_config.py,sha256=JSxehiQnA_tJUeXvi2h9H6wlYsnhhtRWB_o5S20V-oQ,318
-openai/types/realtime/realtime_tools_config_param.py,sha256=ux7AlLllQQozenBmkr5dzGOhdUp3b_LD9TN_StFxtfM,5272
-openai/types/realtime/realtime_tools_config_union.py,sha256=u_ss-JAdDLzZ4qsF0BumBXi3s_0LOlTjLCEszdsdpVk,5333
-openai/types/realtime/realtime_tools_config_union_param.py,sha256=B6LRLV6jkDeeQAp5UtAi0QwFDTTKxZHZR2LrwM31Ubc,5159
+openai/types/realtime/realtime_tools_config_param.py,sha256=4mGkT-5n2BaZN6BxvlZaKr5jbo_Yv4SvFxzhU2n2A8o,5373
+openai/types/realtime/realtime_tools_config_union.py,sha256=vT4Q17Y6otLQFRcMsCrT0FKytDSKE9GUmiXR_ViWmlA,5451
+openai/types/realtime/realtime_tools_config_union_param.py,sha256=AueIp62Meupmtyui8_tC6n-rJH2CFOMc3ydUEEVroy0,5260
openai/types/realtime/realtime_tracing_config.py,sha256=gkG557bdUH4-kAoyH9TEFp12VJFwQkGfQkPDc4ESh-E,918
openai/types/realtime/realtime_tracing_config_param.py,sha256=fo2An_bdLxQrCLhZ6d34vcxiDAHTj2BoxsTqoyT6yjw,887
openai/types/realtime/realtime_transcription_session_audio.py,sha256=cUETz-wzN-6qaupdCF06nmLmgH846dp7zu_wGOyl-Ho,467
@@ -1489,7 +1529,7 @@ openai/types/realtime/response_create_event_param.py,sha256=3hLwSZv8ROLu11eU17Yg
openai/types/realtime/response_created_event.py,sha256=02IS6XqoTnQr9gvPhUimQ0bchMW3LmjtYbL7V6H6Xlw,669
openai/types/realtime/response_done_event.py,sha256=Vt88mKQS72w7ysbkcU3yMxkIoYV4AoDyWqxeth1rJj0,1053
openai/types/realtime/response_function_call_arguments_delta_event.py,sha256=hceOwsJTdQ_vW8wMPebwj6tkt0nDPYbwCpWPs76nmrQ,874
-openai/types/realtime/response_function_call_arguments_done_event.py,sha256=orgPc3_ACCQhqXihyZwi04t27D4l8Llz_fuyZABu7TY,966
+openai/types/realtime/response_function_call_arguments_done_event.py,sha256=JENjWrP1ErF3WfegEUp9WpFySA_VvvAcOzqSqxOZXw4,1033
openai/types/realtime/response_mcp_call_arguments_delta.py,sha256=STwpfYmz3P6m-IwlUkHbI9rfauQn2HrmwNprY5NMaP4,920
openai/types/realtime/response_mcp_call_arguments_done.py,sha256=3nZoOA7m2Bj1JyMPTwhiqIvsLwb8UBnFUVGZ5KviNmg,799
openai/types/realtime/response_mcp_call_completed.py,sha256=8SfUc0zoVadfToShvtxw_wQ-wdh1q0aw68TSdS07fAE,633
@@ -1503,204 +1543,273 @@ openai/types/realtime/session_created_event.py,sha256=vcxq7m7QFXsoLHiNRB6_Oh6zzx
openai/types/realtime/session_update_event.py,sha256=R5YvfGKxhk0E4b02kQbYbg9MenVbUf-q-ZEHL8Gf5m4,1735
openai/types/realtime/session_update_event_param.py,sha256=uSxqGmfwGW0raKiTkMFwBuPkTxEsDu-L8I1IR53530M,1807
openai/types/realtime/session_updated_event.py,sha256=Zp9HaAopCZ1mdpt8BGp8t7JA5J4o3Cm_FbIobqvJhpM,887
-openai/types/responses/__init__.py,sha256=DOdOxzHyOleFM6lEY18XMz1hbskIXwEjuQnmApXJJ3M,18173
-openai/types/responses/__pycache__/__init__.cpython-310.pyc,,
-openai/types/responses/__pycache__/apply_patch_tool.cpython-310.pyc,,
-openai/types/responses/__pycache__/apply_patch_tool_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/compacted_response.cpython-310.pyc,,
-openai/types/responses/__pycache__/computer_tool.cpython-310.pyc,,
-openai/types/responses/__pycache__/computer_tool_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/custom_tool.cpython-310.pyc,,
-openai/types/responses/__pycache__/custom_tool_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/easy_input_message.cpython-310.pyc,,
-openai/types/responses/__pycache__/easy_input_message_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/file_search_tool.cpython-310.pyc,,
-openai/types/responses/__pycache__/file_search_tool_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/function_shell_tool.cpython-310.pyc,,
-openai/types/responses/__pycache__/function_shell_tool_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/function_tool.cpython-310.pyc,,
-openai/types/responses/__pycache__/function_tool_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/input_item_list_params.cpython-310.pyc,,
-openai/types/responses/__pycache__/input_token_count_params.cpython-310.pyc,,
-openai/types/responses/__pycache__/input_token_count_response.cpython-310.pyc,,
-openai/types/responses/__pycache__/parsed_response.cpython-310.pyc,,
-openai/types/responses/__pycache__/response.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_apply_patch_tool_call.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_apply_patch_tool_call_output.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_audio_delta_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_audio_done_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_audio_transcript_delta_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_audio_transcript_done_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_code_interpreter_call_code_delta_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_code_interpreter_call_code_done_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_code_interpreter_call_completed_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_code_interpreter_call_in_progress_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_code_interpreter_call_interpreting_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_code_interpreter_tool_call.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_code_interpreter_tool_call_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_compact_params.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_compaction_item.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_compaction_item_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_compaction_item_param_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_completed_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_computer_tool_call.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_computer_tool_call_output_item.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_computer_tool_call_output_screenshot.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_computer_tool_call_output_screenshot_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_computer_tool_call_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_content_part_added_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_content_part_done_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_conversation_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_create_params.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_created_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_custom_tool_call.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_custom_tool_call_input_delta_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_custom_tool_call_input_done_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_custom_tool_call_output.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_custom_tool_call_output_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_custom_tool_call_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_error.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_error_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_failed_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_file_search_call_completed_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_file_search_call_in_progress_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_file_search_call_searching_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_file_search_tool_call.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_file_search_tool_call_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_format_text_config.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_format_text_config_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_format_text_json_schema_config.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_format_text_json_schema_config_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_function_call_arguments_delta_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_function_call_arguments_done_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_function_call_output_item.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_function_call_output_item_list.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_function_call_output_item_list_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_function_call_output_item_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_function_shell_call_output_content.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_function_shell_call_output_content_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_function_shell_tool_call.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_function_shell_tool_call_output.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_function_tool_call.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_function_tool_call_item.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_function_tool_call_output_item.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_function_tool_call_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_function_web_search.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_function_web_search_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_image_gen_call_completed_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_image_gen_call_generating_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_image_gen_call_in_progress_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_image_gen_call_partial_image_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_in_progress_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_includable.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_incomplete_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_input_audio.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_input_audio_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_input_content.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_input_content_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_input_file.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_input_file_content.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_input_file_content_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_input_file_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_input_image.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_input_image_content.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_input_image_content_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_input_image_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_input_item.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_input_item_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_input_message_content_list.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_input_message_content_list_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_input_message_item.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_input_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_input_text.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_input_text_content.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_input_text_content_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_input_text_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_item.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_item_list.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_mcp_call_arguments_delta_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_mcp_call_arguments_done_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_mcp_call_completed_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_mcp_call_failed_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_mcp_call_in_progress_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_mcp_list_tools_completed_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_mcp_list_tools_failed_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_mcp_list_tools_in_progress_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_output_item.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_output_item_added_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_output_item_done_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_output_message.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_output_message_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_output_refusal.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_output_refusal_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_output_text.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_output_text_annotation_added_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_output_text_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_prompt.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_prompt_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_queued_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_reasoning_item.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_reasoning_item_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_reasoning_summary_part_added_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_reasoning_summary_part_done_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_reasoning_summary_text_delta_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_reasoning_summary_text_done_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_reasoning_text_delta_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_reasoning_text_done_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_refusal_delta_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_refusal_done_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_retrieve_params.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_status.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_stream_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_text_config.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_text_config_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_text_delta_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_text_done_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_usage.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_web_search_call_completed_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_web_search_call_in_progress_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/response_web_search_call_searching_event.cpython-310.pyc,,
-openai/types/responses/__pycache__/tool.cpython-310.pyc,,
-openai/types/responses/__pycache__/tool_choice_allowed.cpython-310.pyc,,
-openai/types/responses/__pycache__/tool_choice_allowed_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/tool_choice_apply_patch.cpython-310.pyc,,
-openai/types/responses/__pycache__/tool_choice_apply_patch_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/tool_choice_custom.cpython-310.pyc,,
-openai/types/responses/__pycache__/tool_choice_custom_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/tool_choice_function.cpython-310.pyc,,
-openai/types/responses/__pycache__/tool_choice_function_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/tool_choice_mcp.cpython-310.pyc,,
-openai/types/responses/__pycache__/tool_choice_mcp_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/tool_choice_options.cpython-310.pyc,,
-openai/types/responses/__pycache__/tool_choice_shell.cpython-310.pyc,,
-openai/types/responses/__pycache__/tool_choice_shell_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/tool_choice_types.cpython-310.pyc,,
-openai/types/responses/__pycache__/tool_choice_types_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/tool_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/web_search_preview_tool.cpython-310.pyc,,
-openai/types/responses/__pycache__/web_search_preview_tool_param.cpython-310.pyc,,
-openai/types/responses/__pycache__/web_search_tool.cpython-310.pyc,,
-openai/types/responses/__pycache__/web_search_tool_param.cpython-310.pyc,,
+openai/types/responses/__init__.py,sha256=1ExeJOcIHGhwoHUyzJlnVz_JuJkAK9UWKI0NcoTnXvs,22041
+openai/types/responses/__pycache__/__init__.cpython-314.pyc,,
+openai/types/responses/__pycache__/apply_patch_tool.cpython-314.pyc,,
+openai/types/responses/__pycache__/apply_patch_tool_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/compacted_response.cpython-314.pyc,,
+openai/types/responses/__pycache__/computer_action.cpython-314.pyc,,
+openai/types/responses/__pycache__/computer_action_list.cpython-314.pyc,,
+openai/types/responses/__pycache__/computer_action_list_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/computer_action_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/computer_tool.cpython-314.pyc,,
+openai/types/responses/__pycache__/computer_tool_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/computer_use_preview_tool.cpython-314.pyc,,
+openai/types/responses/__pycache__/computer_use_preview_tool_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/container_auto.cpython-314.pyc,,
+openai/types/responses/__pycache__/container_auto_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/container_network_policy_allowlist.cpython-314.pyc,,
+openai/types/responses/__pycache__/container_network_policy_allowlist_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/container_network_policy_disabled.cpython-314.pyc,,
+openai/types/responses/__pycache__/container_network_policy_disabled_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/container_network_policy_domain_secret.cpython-314.pyc,,
+openai/types/responses/__pycache__/container_network_policy_domain_secret_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/container_reference.cpython-314.pyc,,
+openai/types/responses/__pycache__/container_reference_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/custom_tool.cpython-314.pyc,,
+openai/types/responses/__pycache__/custom_tool_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/easy_input_message.cpython-314.pyc,,
+openai/types/responses/__pycache__/easy_input_message_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/file_search_tool.cpython-314.pyc,,
+openai/types/responses/__pycache__/file_search_tool_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/function_shell_tool.cpython-314.pyc,,
+openai/types/responses/__pycache__/function_shell_tool_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/function_tool.cpython-314.pyc,,
+openai/types/responses/__pycache__/function_tool_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/inline_skill.cpython-314.pyc,,
+openai/types/responses/__pycache__/inline_skill_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/inline_skill_source.cpython-314.pyc,,
+openai/types/responses/__pycache__/inline_skill_source_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/input_item_list_params.cpython-314.pyc,,
+openai/types/responses/__pycache__/input_token_count_params.cpython-314.pyc,,
+openai/types/responses/__pycache__/input_token_count_response.cpython-314.pyc,,
+openai/types/responses/__pycache__/local_environment.cpython-314.pyc,,
+openai/types/responses/__pycache__/local_environment_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/local_skill.cpython-314.pyc,,
+openai/types/responses/__pycache__/local_skill_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/namespace_tool.cpython-314.pyc,,
+openai/types/responses/__pycache__/namespace_tool_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/parsed_response.cpython-314.pyc,,
+openai/types/responses/__pycache__/response.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_apply_patch_tool_call.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_apply_patch_tool_call_output.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_audio_delta_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_audio_done_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_audio_transcript_delta_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_audio_transcript_done_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_code_interpreter_call_code_delta_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_code_interpreter_call_code_done_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_code_interpreter_call_completed_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_code_interpreter_call_in_progress_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_code_interpreter_call_interpreting_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_code_interpreter_tool_call.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_code_interpreter_tool_call_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_compact_params.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_compaction_item.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_compaction_item_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_compaction_item_param_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_completed_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_computer_tool_call.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_computer_tool_call_output_item.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_computer_tool_call_output_screenshot.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_computer_tool_call_output_screenshot_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_computer_tool_call_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_container_reference.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_content_part_added_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_content_part_done_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_conversation_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_conversation_param_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_create_params.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_created_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_custom_tool_call.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_custom_tool_call_input_delta_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_custom_tool_call_input_done_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_custom_tool_call_item.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_custom_tool_call_output.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_custom_tool_call_output_item.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_custom_tool_call_output_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_custom_tool_call_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_error.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_error_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_failed_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_file_search_call_completed_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_file_search_call_in_progress_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_file_search_call_searching_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_file_search_tool_call.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_file_search_tool_call_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_format_text_config.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_format_text_config_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_format_text_json_schema_config.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_format_text_json_schema_config_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_function_call_arguments_delta_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_function_call_arguments_done_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_function_call_output_item.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_function_call_output_item_list.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_function_call_output_item_list_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_function_call_output_item_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_function_shell_call_output_content.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_function_shell_call_output_content_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_function_shell_tool_call.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_function_shell_tool_call_output.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_function_tool_call.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_function_tool_call_item.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_function_tool_call_output_item.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_function_tool_call_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_function_web_search.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_function_web_search_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_image_gen_call_completed_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_image_gen_call_generating_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_image_gen_call_in_progress_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_image_gen_call_partial_image_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_in_progress_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_includable.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_incomplete_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_input.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_input_audio.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_input_audio_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_input_content.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_input_content_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_input_file.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_input_file_content.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_input_file_content_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_input_file_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_input_image.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_input_image_content.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_input_image_content_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_input_image_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_input_item.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_input_item_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_input_message_content_list.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_input_message_content_list_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_input_message_item.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_input_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_input_text.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_input_text_content.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_input_text_content_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_input_text_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_item.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_item_list.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_local_environment.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_mcp_call_arguments_delta_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_mcp_call_arguments_done_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_mcp_call_completed_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_mcp_call_failed_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_mcp_call_in_progress_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_mcp_list_tools_completed_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_mcp_list_tools_failed_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_mcp_list_tools_in_progress_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_output_item.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_output_item_added_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_output_item_done_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_output_message.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_output_message_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_output_refusal.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_output_refusal_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_output_text.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_output_text_annotation_added_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_output_text_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_prompt.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_prompt_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_queued_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_reasoning_item.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_reasoning_item_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_reasoning_summary_part_added_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_reasoning_summary_part_done_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_reasoning_summary_text_delta_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_reasoning_summary_text_done_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_reasoning_text_delta_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_reasoning_text_done_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_refusal_delta_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_refusal_done_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_retrieve_params.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_status.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_stream_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_text_config.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_text_config_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_text_delta_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_text_done_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_tool_search_call.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_tool_search_output_item.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_tool_search_output_item_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_tool_search_output_item_param_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_usage.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_web_search_call_completed_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_web_search_call_in_progress_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/response_web_search_call_searching_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/responses_client_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/responses_client_event_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/responses_server_event.cpython-314.pyc,,
+openai/types/responses/__pycache__/skill_reference.cpython-314.pyc,,
+openai/types/responses/__pycache__/skill_reference_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/tool.cpython-314.pyc,,
+openai/types/responses/__pycache__/tool_choice_allowed.cpython-314.pyc,,
+openai/types/responses/__pycache__/tool_choice_allowed_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/tool_choice_apply_patch.cpython-314.pyc,,
+openai/types/responses/__pycache__/tool_choice_apply_patch_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/tool_choice_custom.cpython-314.pyc,,
+openai/types/responses/__pycache__/tool_choice_custom_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/tool_choice_function.cpython-314.pyc,,
+openai/types/responses/__pycache__/tool_choice_function_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/tool_choice_mcp.cpython-314.pyc,,
+openai/types/responses/__pycache__/tool_choice_mcp_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/tool_choice_options.cpython-314.pyc,,
+openai/types/responses/__pycache__/tool_choice_shell.cpython-314.pyc,,
+openai/types/responses/__pycache__/tool_choice_shell_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/tool_choice_types.cpython-314.pyc,,
+openai/types/responses/__pycache__/tool_choice_types_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/tool_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/tool_search_tool.cpython-314.pyc,,
+openai/types/responses/__pycache__/tool_search_tool_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/web_search_preview_tool.cpython-314.pyc,,
+openai/types/responses/__pycache__/web_search_preview_tool_param.cpython-314.pyc,,
+openai/types/responses/__pycache__/web_search_tool.cpython-314.pyc,,
+openai/types/responses/__pycache__/web_search_tool_param.cpython-314.pyc,,
openai/types/responses/apply_patch_tool.py,sha256=gO_CtGczaXdBPe8zSW-aEqV_dVJhn22GAh9AE4-xEq4,399
openai/types/responses/apply_patch_tool_param.py,sha256=k3kqGLeZ_f9vyXh7ZMmYVeQ-xcVUbGmo4ZAqM9HhgzI,455
openai/types/responses/compacted_response.py,sha256=qlkdM6GmpFWCcqU11xODEMqD16DinPF0W-o70Hg5VOk,934
-openai/types/responses/computer_tool.py,sha256=ExJaomK4IFKSprXIaQju1nGVi2CWNXV6QvDyrvNV7Pk,767
-openai/types/responses/computer_tool_param.py,sha256=SvgG7R0e63WUd20qbImia5VnEbJKjYPW7YF4F4G26QM,853
-openai/types/responses/custom_tool.py,sha256=3T6L-3w8A7TyPFcsw5_zYXNraP4nPbShg6lnoVt3K40,923
-openai/types/responses/custom_tool_param.py,sha256=hdnZol_ta8KGMlolyBsTY9RuwYBf4NyxDr1QK5MJwiA,935
-openai/types/responses/easy_input_message.py,sha256=VDVZoLHoRrgfSgBNlQM6rmdqFjzSD8I7n6325sQdmFo,1168
-openai/types/responses/easy_input_message_param.py,sha256=uK5_Oo_8iTJbUG9w8-hujQmfsH5lfepCpTNUgcpcU0c,1224
+openai/types/responses/computer_action.py,sha256=DjDkbkXQ0fcrofKYUFnFCYYvOWmeFlcfWUXZgb-FsW0,4264
+openai/types/responses/computer_action_list.py,sha256=li-EdxfB7-WEEJuv9bKadJDK7DTayj24JHbL0g7OVjo,284
+openai/types/responses/computer_action_list_param.py,sha256=ZEmGBg7rEMfCYWyp6XYQi3ScDmo171K41T7r_J4lzmg,4760
+openai/types/responses/computer_action_param.py,sha256=EFGvt1dYVWbJWbt9vworHOBYRKPJ9_6VknuntwfCBLk,4659
+openai/types/responses/computer_tool.py,sha256=tlBdHzMlmGKcExeoT-bUUOQkXayhzqjSs9RAbQOn93Q,470
+openai/types/responses/computer_tool_param.py,sha256=bIdba_E8QIJWyNu0Dp8Wy1Dm3VaCPclU_zCUGxmPIZc,526
+openai/types/responses/computer_use_preview_tool.py,sha256=zcfYzJ4pXXo9acw3_SjhtsgrtlvePrsLaO-CwJXiNt8,787
+openai/types/responses/computer_use_preview_tool_param.py,sha256=_-XqPZkUSIh5RJxgIXzl1m1EmKXpYvOTl81rcs0bRTY,873
+openai/types/responses/container_auto.py,sha256=ALGiZ7omgS-nhmnvwcB52B54OfmeDFFCKqriztG5Dlo,1387
+openai/types/responses/container_auto_param.py,sha256=IzrcQ54ZTaoHEa4dq8jygBUktBjBhQpGnrnw3V-cbjw,1359
+openai/types/responses/container_network_policy_allowlist.py,sha256=xKQdNX8ddFxLAmEmkJv5qORr807h6gWA2QS3131n_d8,736
+openai/types/responses/container_network_policy_allowlist_param.py,sha256=E8qJGGSTxtmlI7FYAGuRGOFUzAMqc7sMyGGA8JnRXmw,847
+openai/types/responses/container_network_policy_disabled.py,sha256=gmrNAer5PmJhpZQAMDHQuyN8od2SHDIyjAb6KMXg7rI,348
+openai/types/responses/container_network_policy_disabled_param.py,sha256=k2LMUTt7jqPRJgERySqUsXs6UkX5x-a650yHHT8ZAas,404
+openai/types/responses/container_network_policy_domain_secret.py,sha256=It8cbjYtQgm6wiFGx-c4YW2Z-sFEm6H_ClRpJBqourM,433
+openai/types/responses/container_network_policy_domain_secret_param.py,sha256=Se-NZucKb8zChehCbPsXWes5drDMk5gfWEtQetroBF8,539
+openai/types/responses/container_reference.py,sha256=8BLN-nu_nRDb27wriaC7C8BSKdFUo27zYjIaYnxtdOM,416
+openai/types/responses/container_reference_param.py,sha256=R-W99lrUaKLRHuOTKc155AMPD9e47f6VezcoTfn4mt8,482
+openai/types/responses/custom_tool.py,sha256=VLB-bfof6pjbc63QaQWmYUIdr5sPjtiqL8yytn4JFuw,1044
+openai/types/responses/custom_tool_param.py,sha256=4PgHoduxMi9eCPz-PqhS84ngLRdITI0jzKYjxgc8yA8,1039
+openai/types/responses/easy_input_message.py,sha256=b7oxdNML4LOUCkwYjQCF9wzCXzgOgxsphqr-Sb5RrDg,1575
+openai/types/responses/easy_input_message_param.py,sha256=-FNC4c6OU37r1qSZUoAf2x7a7M7GFpxEHxGdyefbw7U,1634
openai/types/responses/file_search_tool.py,sha256=HpPWubu_JTVOxtRefDRDZkhTtWfoekHCqiL6B702j9w,2259
openai/types/responses/file_search_tool_param.py,sha256=CvxYOB2DFpRC18WWa05En3heqTNd9Lv2SCZ7uP-ZOG8,2329
-openai/types/responses/function_shell_tool.py,sha256=nkYhOasTJYmE2CCJ8la94cadqThV4EdgVrB5-Fr3ecs,378
-openai/types/responses/function_shell_tool_param.py,sha256=wBsstq7T75MJRucaLjvHpTBdy7H5Ex-ExmoSyAugKCc,434
-openai/types/responses/function_tool.py,sha256=XatrZMskh15WXVnDmYF8WcxrMnt9Di6u-W5z_tS8Las,978
-openai/types/responses/function_tool_param.py,sha256=YnvnWFZijbVnEeoaruUBGidPEPhLRnNXnpZ8Pp3yrnU,1043
+openai/types/responses/function_shell_tool.py,sha256=JYwVq4qI0-kSFZC8AzUpURpWtmUFWp7vd7NnAA6WCTw,818
+openai/types/responses/function_shell_tool_param.py,sha256=dr84qrYV3cLajnpWoM3yukVGkJ6mjmo2br1fyAnAfyQ,811
+openai/types/responses/function_tool.py,sha256=yc-CSVGOxKtOfbeiroFSlEoU4fJNMWkg6WCD063_2q8,1092
+openai/types/responses/function_tool_param.py,sha256=DX4IeY_F9adyHF0LD-RvRgr6OAMj00_VmiaZOnO3xGQ,1140
+openai/types/responses/inline_skill.py,sha256=4LgbMvau2oySN6IX61oweApFwtgr8h4-6ZyvfslBpAw,521
+openai/types/responses/inline_skill_param.py,sha256=jK6lTQy3e0zRsGPwh6K__N2UBDl558UuGPxOcz_v-CI,624
+openai/types/responses/inline_skill_source.py,sha256=IY_jsg4K_jn5AAq8ak_kwN2NR02OmKOnaa8CYvEzybc,538
+openai/types/responses/inline_skill_source_param.py,sha256=0QPSdkUhMvlzP8jbKVdLrm0fjKxrlNs2UXY86yND9Qc,614
openai/types/responses/input_item_list_params.py,sha256=wazm2tELpbpBWdAQrXGBq88Bm5RsxWXmlVJAV3f_k-I,964
-openai/types/responses/input_token_count_params.py,sha256=s_KKPE-q6j4KU_0DoIi7hVHQNQoykxdP7MaJYW0EB-U,5668
+openai/types/responses/input_token_count_params.py,sha256=xyV3RTi5jJBEYh-dKVl0tZ1yvLgQo22p8HvJZ3qPDKU,5684
openai/types/responses/input_token_count_response.py,sha256=w8LogkiqiItGuty0-bXts8xy1Ug-bbSYKEm7DDKmoP0,310
-openai/types/responses/parsed_response.py,sha256=65aPRFaHpbX81f23JesWawUZcK3uGsY2N0DRbn3_rSk,3846
-openai/types/responses/response.py,sha256=yehCZVZjB_DKbaSdSpChPbNNfOVwYhb-HU4D4ZgCThA,12591
+openai/types/responses/local_environment.py,sha256=HZR6za_LqRHnYOxCD8kIYdzKTUlUrZ3M2bBLssLDg5g,454
+openai/types/responses/local_environment_param.py,sha256=LAiH3gO24X8JmKSXI70bxVX65CQRGIyA3dSidozZMeA,508
+openai/types/responses/local_skill.py,sha256=OElgxKtJ_APlKukl5JFsqsvC8iq9WFPYOYTcw0GYIJs,359
+openai/types/responses/local_skill_param.py,sha256=DLP8b5tptYNCXrIDu2CorO3X38eZ0gm1YX90mdKRPas,465
+openai/types/responses/namespace_tool.py,sha256=_HpZ0gs_09yniaqHozCUyVwgbbQtVZoy8MYzt8Lh10c,1206
+openai/types/responses/namespace_tool_param.py,sha256=M4-_H-hmQh1j_O1F-fgl5ZLVrUsyuuubBXKSxePq-e8,1219
+openai/types/responses/parsed_response.py,sha256=hU-MkhoRK2VKaYVKFt9w3Pvp0GL2IufXCxPuPHSsTU8,4553
+openai/types/responses/response.py,sha256=FPr95a7Nw7NKDxxzRfPMRyPIGIyGbUGbBx8gBKKFbf0,12635
openai/types/responses/response_apply_patch_tool_call.py,sha256=dmqjz9eTdk5WNkxxWhuJayZ-ALygsPXTosRW6B9r4oQ,2217
openai/types/responses/response_apply_patch_tool_call_output.py,sha256=Y3ZxBSHXFAv1yaUfpf_XOFZwvAgSKiBB6Bw8fOYEsvw,1024
openai/types/responses/response_audio_delta_event.py,sha256=JWICz6tC6Zs-8Ax3y3hkUahWE2vxwJLLVguhDQJWzhk,574
@@ -1714,27 +1823,31 @@ openai/types/responses/response_code_interpreter_call_in_progress_event.py,sha25
openai/types/responses/response_code_interpreter_call_interpreting_event.py,sha256=gOWRUvIgcMVrp-KcmtAttp8PbeVknhmCmfgYg04-2K4,862
openai/types/responses/response_code_interpreter_tool_call.py,sha256=r1NjHpBWawfNwy5-2gVoJjc1rjrHk2jJjlGsoAIU55I,1795
openai/types/responses/response_code_interpreter_tool_call_param.py,sha256=uaywHPXN7U5uh9b7bi5xFL6xvXRM7lTJihYdZbi84Zw,1868
-openai/types/responses/response_compact_params.py,sha256=vO8L88q5sVvGe9tfbVxholqvnfflsHV0Im-QUFTYyIY,4783
+openai/types/responses/response_compact_params.py,sha256=zFyH38imAUfJtFxHMgdZNTKiYvHgQZ7_lpIQsWYNf6E,5109
openai/types/responses/response_compaction_item.py,sha256=FevR74gnDtJkGYJKhwaPHUMqDXkHxpPojBBgip93NK4,758
openai/types/responses/response_compaction_item_param.py,sha256=cCRvAVU1LSLWhryYp54lU2j00wwHiaogpDcXBULI3PQ,673
openai/types/responses/response_compaction_item_param_param.py,sha256=UYpgNuwAxAd11LxXJJ-EPHGLWR3TDP00lylHxytE6K4,732
openai/types/responses/response_completed_event.py,sha256=fe5mGXdeFLkozUpasDitkCHe7Qv22xKUesRDNFcXazY,573
-openai/types/responses/response_computer_tool_call.py,sha256=PwgncJ5PbXv8PfNDYT5F1xP7sWDPaPQrEKKjDllE1Ew,5276
-openai/types/responses/response_computer_tool_call_output_item.py,sha256=4ezZAGHMVIe88MQRCVprLbhzOFvVv7mu7eQ2NLukEew,1584
+openai/types/responses/response_computer_tool_call.py,sha256=LzcCIS2apardV9yWVh7s4RWCf4p4IH2E8lbaDrcVm38,5981
+openai/types/responses/response_computer_tool_call_output_item.py,sha256=WdgOAAba3uuG6qnbRjSnhm-QO_B31jfvUvDrIzZTrj4,1676
openai/types/responses/response_computer_tool_call_output_screenshot.py,sha256=rhc-E2ECWZOqpvIFcVo7fi-j9rS94AwXU_1xuVyUYKc,734
openai/types/responses/response_computer_tool_call_output_screenshot_param.py,sha256=iRdRiNE-wpwaNhBfnilJ4GaouMGh-oWn121LkSzDBA4,728
-openai/types/responses/response_computer_tool_call_param.py,sha256=O2IdaoOcDAsETrNu5D6n-ue_WKVLBJnJoCr9i4u6sjs,5687
+openai/types/responses/response_computer_tool_call_param.py,sha256=kYKqyhHvQf_JrykguqPL26BCzLBE3P5RWo4pI9dQqSE,6389
+openai/types/responses/response_container_reference.py,sha256=QSKDvn-y5snt0t0wFD9tAU9hphgxlyrEGW9zQPE87ak,437
openai/types/responses/response_content_part_added_event.py,sha256=lpdjp8MXrniMjY5aEchgOXAYx9nCRzqsGcm10A7k9oA,1432
openai/types/responses/response_content_part_done_event.py,sha256=QHR1gB9E8AeAxL30xp2pTVr8YQxEojwozkZqJ-s_45E,1419
-openai/types/responses/response_conversation_param.py,sha256=diMeoJm5-D3AideV9XtPNfd1gdRTyAEnYz0HeRXvzPE,399
-openai/types/responses/response_create_params.py,sha256=yPNDnw9d4oDPKSMI7RQp6fVO1hCQPm7lQoTmkvzkOlM,14008
+openai/types/responses/response_conversation_param.py,sha256=NjSUgUHujBYPv_r1A3lYCWZrlZ8LOOGeRQX4TGWROss,323
+openai/types/responses/response_conversation_param_param.py,sha256=m-Uv1dNpMKXzHBrTF_lRTL5PzVYrEMykki-oAQnUvVo,409
+openai/types/responses/response_create_params.py,sha256=SXcB_XeFnlTHz-ppovxvPM5ZKvuLyd97G7TR3CTizLE,14499
openai/types/responses/response_created_event.py,sha256=3q0JhFTkp3vgkkJdjFzgLG690GfDFFfnpXrsjl81ES8,566
-openai/types/responses/response_custom_tool_call.py,sha256=DhCtiuQYuyNm1c2DxSxLnqrDqiexmL4hsjCf7zXhRQY,787
+openai/types/responses/response_custom_tool_call.py,sha256=bPBoDszKZZkIUGvN_5FMnrhzmDqpKg-opmFZA6kBWC8,881
openai/types/responses/response_custom_tool_call_input_delta_event.py,sha256=70JVIwG8KfTnW2zzovAdDv-qIyvJU8cY2aQcxHiDPKw,786
openai/types/responses/response_custom_tool_call_input_done_event.py,sha256=QQ8V5TNyMOdLRSQ1hxCCw2_LjxERccGqvXjjnUrvc_8,756
+openai/types/responses/response_custom_tool_call_item.py,sha256=68cRQA21U3HpFZqu5mCxHH8ReGAZCWEfdfIvD5qNhDY,762
openai/types/responses/response_custom_tool_call_output.py,sha256=w2yJcnkB48PfpBvLikpUx_Iz8ryKuS7WtqyqsECr0ps,1307
+openai/types/responses/response_custom_tool_call_output_item.py,sha256=-IgN0G70UnJZPSRf6-25pfiiaH1kyD5D1ekJJs7x6gQ,833
openai/types/responses/response_custom_tool_call_output_param.py,sha256=Nr6hV5PHkOeDRR3INuiyPtpk3ybd4zwLbMKYEfkv1Hw,1314
-openai/types/responses/response_custom_tool_call_param.py,sha256=8baPSm1t7KXm8ABHUgdjw68vDf6yoNLY7scZyR1BX_I,828
+openai/types/responses/response_custom_tool_call_param.py,sha256=BJk84KI0XoOeL_-wtbcqsa-EN6q9NADYJZ35XJiesCc,905
openai/types/responses/response_error.py,sha256=AszYOuu1LOWaglgKy9tX9zO9q7Yid9alJc2cKINk9ro,996
openai/types/responses/response_error_event.py,sha256=fjB964eSWEh0ciSk5s64TQGJyTsYjtNT8duSqztjhW0,617
openai/types/responses/response_failed_event.py,sha256=FG812T2EKLJwrSyO8KPbs-QSpsg1e4n8YHpXiANlb04,551
@@ -1755,14 +1868,14 @@ openai/types/responses/response_function_call_output_item_list_param.py,sha256=y
openai/types/responses/response_function_call_output_item_param.py,sha256=VEe_wQ8z9PN0qJbLuCwfg9n4Lwe5-WNljzmNJ-fqnwM,629
openai/types/responses/response_function_shell_call_output_content.py,sha256=QZlIb_3DHE9BX5W6HmdDEY14Vwy5c295707BzgH1Z_Q,1260
openai/types/responses/response_function_shell_call_output_content_param.py,sha256=e8nFAciWLudLRQxr-CNH8U_9Nge65cZ6M1d-ZqE_h8g,1299
-openai/types/responses/response_function_shell_tool_call.py,sha256=0kGBz7g4SZOnSAoU3-yjMeOXM681abrTQ4EFRs02quY,1395
+openai/types/responses/response_function_shell_tool_call.py,sha256=Zi7ccDiGXEMZGw3BKDDEhpK9m_wR3XWt0Q8cHDw-9nU,1879
openai/types/responses/response_function_shell_tool_call_output.py,sha256=mH94PBtKr9zXzb2RirDijvWXoNidElxPidqdYskMZ2c,2435
-openai/types/responses/response_function_tool_call.py,sha256=Rf1IGPksyVcGFSOMPJuj5h0h7oJs5UQEKiwRYZ-R0Sc,1086
-openai/types/responses/response_function_tool_call_item.py,sha256=aiw34m4uKx8J_Lk33SZ_oqENRKm3m72q-8qiVrgNykk,509
-openai/types/responses/response_function_tool_call_output_item.py,sha256=F8lyiugcU6q6tG9kJUpTnF-g_xGBWgIje6mTYMErXL4,1394
-openai/types/responses/response_function_tool_call_param.py,sha256=L6T3MtKCuaiUDzK8YaJZRysMEADbuWBrqJrUpSgoSAk,1110
-openai/types/responses/response_function_web_search.py,sha256=LtyQ1svmci8bRs36fexPL9aFq008zuH-LBwAap0s0Jc,2307
-openai/types/responses/response_function_web_search_param.py,sha256=QjDva_BhIuapFyCnuOYdefVAGvrGm0g0_78ByQ3NWEs,2481
+openai/types/responses/response_function_tool_call.py,sha256=V4Cf292PPbK_agJEA-IfcQ5xzVowLy3sRO7dNtCXpVM,1171
+openai/types/responses/response_function_tool_call_item.py,sha256=PHesdM51kgNuiScu2b-UoYewAA3Gf93fiwoEuDJwL00,897
+openai/types/responses/response_function_tool_call_output_item.py,sha256=dPLQ5D0dq2YM-gQcOm3Gf37fW1Sg1k0NSoa7rp-k5G4,1476
+openai/types/responses/response_function_tool_call_param.py,sha256=YLIx22-QGyB9NdwZQRWK6Hh2vrNPQTCN9DKa_vOcfYQ,1178
+openai/types/responses/response_function_web_search.py,sha256=-CoTjkdMSJHmmdpDWlj5kcJ9ov6wA66LLFfsAhC1_Js,2375
+openai/types/responses/response_function_web_search_param.py,sha256=HgcLvlhOrRGsV4ylv_OqC6hTiGggQ9Jrqm92dwspuwI,2515
openai/types/responses/response_image_gen_call_completed_event.py,sha256=4EVne_sRTbCxuPTTdt7YMRBblpH8nR3in4PkzzrHxBE,783
openai/types/responses/response_image_gen_call_generating_event.py,sha256=Xu-lJZAHRZTDbiOGRVOBLCmXnAbHFQBYNRmPab2pFug,824
openai/types/responses/response_image_gen_call_in_progress_event.py,sha256=LvCFXfC7VNqmZegn_b4xy021H-qO08SzGOspvK-6Wew,778
@@ -1770,6 +1883,7 @@ openai/types/responses/response_image_gen_call_partial_image_event.py,sha256=Iss
openai/types/responses/response_in_progress_event.py,sha256=oi8YtsItiH0TloP7rbkm0-XOY1-FKTV4o6Ia_y4pS6Q,571
openai/types/responses/response_includable.py,sha256=tkia-hgFh4ttgy53H5lJtoBWsSQh6G2DzCXj-14o8Ko,505
openai/types/responses/response_incomplete_event.py,sha256=WGBjWI-kwdaQpGBqzrIKfTehJ4Phzxsn2fvfDYoTV6w,592
+openai/types/responses/response_input.py,sha256=AMsHCZPd90LJYdaIbsDdKt_PtFHi9JbKUZIl8r07PUg,284
openai/types/responses/response_input_audio.py,sha256=CLnaiuQjjF11emjSxbVBLL0yF_kONznqXgIB6m8lric,614
openai/types/responses/response_input_audio_param.py,sha256=MXXWtLXdToypWHQYLSpfUXtC2U_PllaffzJLMy5LNYY,713
openai/types/responses/response_input_content.py,sha256=MaZ-MNnZvhM2stSUKdhofXrdM9BzFjSJQal7UDVAQaI,542
@@ -1778,22 +1892,23 @@ openai/types/responses/response_input_file.py,sha256=lgoRiDEn_dPiga4dgHZsHcjgbzE
openai/types/responses/response_input_file_content.py,sha256=SdUicUGXlg_OICLMdySuJLTmmU0WDTywjRIWFVEYOwg,781
openai/types/responses/response_input_file_content_param.py,sha256=F2cwpQbEZtgt4p4dxs7zl_2mM7rB9OsNjbbLMRq8WV4,809
openai/types/responses/response_input_file_param.py,sha256=TFYJQXKEOWB3cdiAIVkRrMgeO_-ZjqNUAidCl7KKwK0,753
-openai/types/responses/response_input_image.py,sha256=djDER2-m43vk1YaX3GXF5dNg9W-vYF26Itto5kQtNYs,904
-openai/types/responses/response_input_image_content.py,sha256=0fVlJtV-LnTYP51zwqcBkKyyDdCq7OZQZkFcbeQihjY,934
-openai/types/responses/response_input_image_content_param.py,sha256=Lf0hJTLtG0CRvtKt6Yb_ldtbcUge5IP-wK-_UjoCdn4,969
-openai/types/responses/response_input_image_param.py,sha256=lxWoE5udEUHohLRw9KtfNTcwMuabydqQy1gxZ7oAtqk,956
-openai/types/responses/response_input_item.py,sha256=LmxxV68vNIfspCf0l13pnRnZI8lXMtsfpjddxiT4t1c,15925
-openai/types/responses/response_input_item_param.py,sha256=BaA20QXJmRpcXSxVd5Z9bMVD_GI9R4KZ6pUoodzbPRI,16693
+openai/types/responses/response_input_image.py,sha256=bS-0UYC6BXCDdwQ-b_vNoLT__hbjvdYK36ofwD_BIJw,928
+openai/types/responses/response_input_image_content.py,sha256=zlPxUmcAH3odBUY1XeIgWWstjGgUAP765ahZE8miktY,958
+openai/types/responses/response_input_image_content_param.py,sha256=nsll-Xnjkzv-EdZdLGrtXv7qrqK5ZM5DuYgnuRPs0K0,993
+openai/types/responses/response_input_image_param.py,sha256=jNhUX0syVx65g56bsOMNW4dNASfvrq7WHCw8kg56Qtg,980
+openai/types/responses/response_input_item.py,sha256=8kBwcRi5WLFXn08z390ZJhFLoWic68EKR74r1wUtluE,17265
+openai/types/responses/response_input_item_param.py,sha256=9yQKi1UaGcd0gRiFZjyReSuhDOg-hb8dnWdk6z-a914,17995
openai/types/responses/response_input_message_content_list.py,sha256=LEaQ_x6dRt3w5Sl7R-Ewu89KlLyGFhMf31OHAHPD3U8,329
openai/types/responses/response_input_message_content_list_param.py,sha256=cbbqvs4PcK8CRsNCQqoA4w6stJCRNOQSiJozwC18urs,666
-openai/types/responses/response_input_message_item.py,sha256=_zXthGtO0zstLvIHg9XesNAme6yNa8JOejkBYLwXm70,1029
-openai/types/responses/response_input_param.py,sha256=EgiARo-Co4lsATiwFkntXs1OrGV8IQ_RNhYRbCr2Ndg,16787
+openai/types/responses/response_input_message_item.py,sha256=4nxX-UlZ7CFeso4-NiHaKc1mIGD-akFBGCMjy-TETD0,1012
+openai/types/responses/response_input_param.py,sha256=BKgnfnLJhcMBPF0rClznh2RSuPnqkv4UOr5kZeEKfl8,18089
openai/types/responses/response_input_text.py,sha256=uCT-nKv5EEjOHmTyBlKJ01gLSCGue8lcjrBwzwWzPrE,413
openai/types/responses/response_input_text_content.py,sha256=UPb4d4KHkbgN0rS6wkvoaTPZVGN_2aYo-VbL-zwMkpU,427
openai/types/responses/response_input_text_content_param.py,sha256=nAOZRT6FsZqPr0va99wAFVB1lz5W8cinZ-9iEuAHgN0,493
openai/types/responses/response_input_text_param.py,sha256=9DbrdxWlak_wHmcPhw9BIVlyWkBnuAfmYC4TDtM_Lqo,479
-openai/types/responses/response_item.py,sha256=hGxb4N8Ue0bXbnDQ9Oqf2TStEhboo8_Cbiga2Ve93QE,7357
+openai/types/responses/response_item.py,sha256=RceWRQm4--slsdQRaMjXLtSeXG_u9gpuey7toydYTr8,7980
openai/types/responses/response_item_list.py,sha256=JclJxBBJda7fjXVbeLfYeVUH8A_swQN24LWpUjIsuus,702
+openai/types/responses/response_local_environment.py,sha256=n5-0mDusTkCjp9CFxFHW3lUxM3_RApWz-Qav4dnsNrU,398
openai/types/responses/response_mcp_call_arguments_delta_event.py,sha256=EUCaFYd-EgZS9zikqlXi8xqUzeC94MzVx2qrhUgm4a0,884
openai/types/responses/response_mcp_call_arguments_done_event.py,sha256=LWMaw5HKV8AgiYNJxTB-14kmCcFc_T22tSCdqJjlp3Y,826
openai/types/responses/response_mcp_call_completed_event.py,sha256=MFjkI21vxzlTzvEAvmRjntJ3vGCeDTViMFek6AIyQXM,670
@@ -1802,11 +1917,11 @@ openai/types/responses/response_mcp_call_in_progress_event.py,sha256=oTLlZyhdLsn
openai/types/responses/response_mcp_list_tools_completed_event.py,sha256=3OdrqmMVVUgVTwl2nOf5vQ-4VVcbfS6VoF3G7z5DOjU,726
openai/types/responses/response_mcp_list_tools_failed_event.py,sha256=9nvFQ-02kRXV6nOf_4o5aQhYw7-tSzppLbN-C7a2zHE,680
openai/types/responses/response_mcp_list_tools_in_progress_event.py,sha256=V9IqBnh-oLKqtthuGA3LDNveWzx10kd2N6x3MpzATzM,756
-openai/types/responses/response_output_item.py,sha256=mYMzKnpJLEQqIQCEZp2FU29QhdWecQHVGsOM3h1Gvig,6101
+openai/types/responses/response_output_item.py,sha256=FAp4k-SgthM72nKlxQKvZ3GvzQcEk7yiX9388A86OHc,7866
openai/types/responses/response_output_item_added_event.py,sha256=6rIwK1Dtgu2K0t1R2ForK1C70CUpcnihqMpWqlM0jtg,696
openai/types/responses/response_output_item_done_event.py,sha256=n-T9NinnYm0xhOmPfDhqKso0JQYVhfs0IXa1MhkA2cc,707
-openai/types/responses/response_output_message.py,sha256=OkOnbst0qUMmnFDOwKgPbK4Ii7JLxoKYV-9aLOrYNHI,1149
-openai/types/responses/response_output_message_param.py,sha256=YwLkQ0QRwWyon_RMzA09uQbqLO949cIpQZwlUQUIWpY,1193
+openai/types/responses/response_output_message.py,sha256=AH_2iyIM4mueix_xUv8b6PsEPTNUTX0aLHjYTwH7m3A,1566
+openai/types/responses/response_output_message_param.py,sha256=POWvupoVO0TWALFzZgZ-M5IhP89IrPraVFdokpCsaTs,1603
openai/types/responses/response_output_refusal.py,sha256=VkWPvAhqWz3mZ_7X8CZA4CPjgCfkWpAsrUL_EEAp3mY,425
openai/types/responses/response_output_refusal_param.py,sha256=RO2CI71fbzQPwsr3URyR-EP20oRNMaWAmLj9dMDeu1o,491
openai/types/responses/response_output_text.py,sha256=LPESiPQE6UcQFa1bHRFr-Dxsvbyqc6UoCDsauGDAev0,3157
@@ -1832,11 +1947,20 @@ openai/types/responses/response_text_config.py,sha256=8mk0TYDQB9VNJxKAazvCHy3WIz
openai/types/responses/response_text_config_param.py,sha256=Wq9RvKd2ZlOBRsTaUxWRmSekcQxH8mMoYJZGp2QHY30,1684
openai/types/responses/response_text_delta_event.py,sha256=fdEVcZTuoTzxIhFwcp48YPUTdEHXwI4ocSwDM2kfcEM,1689
openai/types/responses/response_text_done_event.py,sha256=KbaXsGeQvNJTGN8vOpEaA-_mGBSr6Viv-9rEAFrY6zw,1687
+openai/types/responses/response_tool_search_call.py,sha256=eLW0R-W4o2G3uYG3w9aqQSHDutOpjtFbmz69VZj7kew,963
+openai/types/responses/response_tool_search_output_item.py,sha256=92tXUFGFGZv_LDxWjJ7kaPjVR2w0j2HvhS4yiA-sg7k,1024
+openai/types/responses/response_tool_search_output_item_param.py,sha256=Pb4EshS1m1b_cypFI95R4beSiSztaJcfm8xDefs3ZVM,963
+openai/types/responses/response_tool_search_output_item_param_param.py,sha256=c46h1RVfeV_76h6TgadTBMie6WDyaOOid6NVkQHg8ig,1016
openai/types/responses/response_usage.py,sha256=g1YqV35FbWEsFkDyXyQ95gK1CPT2vXaEj7dx35Nyllo,1204
openai/types/responses/response_web_search_call_completed_event.py,sha256=huESBkfYVrHZUHUHeRknBgw7bMrIYEsVryEZrXHMiGc,754
openai/types/responses/response_web_search_call_in_progress_event.py,sha256=MQIZTtd3LqdX4C46cG-vOSeNaLxQFvM2FaCfiDwUits,760
openai/types/responses/response_web_search_call_searching_event.py,sha256=UucDvOvM6VjzBgCkwVO-UvRbttfidcBVCQdvgw-NUJk,754
-openai/types/responses/tool.py,sha256=UtSKlLaCHSaHCEj8KIgqzgnxZZSgaHG55zRmF7hkJqk,9895
+openai/types/responses/responses_client_event.py,sha256=DfV_zohVrzQXZi5F6H23SMBBY2UkELwGuIguyj0LR_s,13817
+openai/types/responses/responses_client_event_param.py,sha256=jDryEfp43LXxLIaCfEzAiGNH0TxaC2G-mFXb0ClLfgs,13832
+openai/types/responses/responses_server_event.py,sha256=LIMNujT4lLQDQ61xPmP62fE_nqhKExpNtYC54xkUXno,6918
+openai/types/responses/skill_reference.py,sha256=Ar8pzoXWFt5fDiHd9qtsrCsWalGHAoj1VjFCdacq1gw,540
+openai/types/responses/skill_reference_param.py,sha256=A8PjP6gdQ9q7V_Qmrcj8qi0lgCP-yNTiB1FlgVTkMIc,561
+openai/types/responses/tool.py,sha256=XJWfIOHTYTVetFnjXAZK6K5k_Cm5kI_HKO7ciYXHOu4,11023
openai/types/responses/tool_choice_allowed.py,sha256=Mq18Us-Yd-59WZppbXaWYjBPtUqSbHHE48IWCHdLFCI,1100
openai/types/responses/tool_choice_allowed_param.py,sha256=6Yk0zOe8QGzTyrReDIEy56o3opFvPbS0FeuBN6DW8CM,1184
openai/types/responses/tool_choice_apply_patch.py,sha256=zUFufN0Okg40AkOM3QmzUxtRX4DQKwR52fYQYdQDECk,404
@@ -1850,35 +1974,37 @@ openai/types/responses/tool_choice_mcp_param.py,sha256=-cnA3zk3wMmx93CkCwwOBiw3C
openai/types/responses/tool_choice_options.py,sha256=gJHrNT72mRECrN7hQKRHAOA-OS0JJo51YnXvUcMfqMQ,237
openai/types/responses/tool_choice_shell.py,sha256=5k4381juLRA_qZc_h4x0DGtnW6wkSKAogWfnYLMEPxk,378
openai/types/responses/tool_choice_shell_param.py,sha256=Qy0ySbnh_0eDYPJwzn5iHoHHuo79HAHwkk8zRhNYtdY,434
-openai/types/responses/tool_choice_types.py,sha256=djkzycVSxYunU9WIGeRbS6nZa-tjEsSEtI2r1MwzMG0,923
-openai/types/responses/tool_choice_types_param.py,sha256=tuaS5Azelo0hDgpjBjZprfwjZKfmAcFT9zLq2QIIBik,1021
-openai/types/responses/tool_param.py,sha256=U6cO_JJY9YxrHdMou1yxDqOWX16zMif8rFUuf_vyW7w,9870
-openai/types/responses/web_search_preview_tool.py,sha256=jJkDvIC9p9aS5TAoLFhP5oW0sVVZ0m2FOqs-Bv_r_zA,1690
-openai/types/responses/web_search_preview_tool_param.py,sha256=Nmk4AtPCKWMXFNwQCfSh6yfT1JT46xTWAsCodri2BVE,1717
+openai/types/responses/tool_choice_types.py,sha256=a4hzq9AUvvXSPrjENn2osPPv984GYPbxn4UtTC9ivCc,1005
+openai/types/responses/tool_choice_types_param.py,sha256=HlrHFQ_RQw8hU2mqJjGKTmCA0wxK3KybQOfJQ-YhqJo,1111
+openai/types/responses/tool_param.py,sha256=wC5JXXf2D3P9-01cVV3grg4IatB9z7zOJI5PspOp-Kw,10968
+openai/types/responses/tool_search_tool.py,sha256=2PQnoOHaX4l2SFbaWmyYkqE6cCJm8gAYZqB-maAwtL4,774
+openai/types/responses/tool_search_tool_param.py,sha256=-GgHVU7HJE1xla2Vq8JqJ2sUgu2PJiXiHKFaSScQgj4,799
+openai/types/responses/web_search_preview_tool.py,sha256=ju0wvshkP_qFTGdXvT04bz06F0vSIsv_pCSNBhRfCn8,1771
+openai/types/responses/web_search_preview_tool_param.py,sha256=et38nUI-q2mElq8-8Xe0vVeUMbzZkOaA2D5rBr6AWvg,1781
openai/types/responses/web_search_tool.py,sha256=rsN2LrO25vAvIVEl3sKCiLS5pAqvwxJyam0Cgys419g,2083
openai/types/responses/web_search_tool_param.py,sha256=pXX6qVRcsVNBDhy91nHM0svtpzvKVoAPIg7ciOXmctg,2124
openai/types/shared/__init__.py,sha256=EVk-X1P3R7YWmlYmrbpMrjAeZEfVfudF-Tw7fbOC90o,1267
-openai/types/shared/__pycache__/__init__.cpython-310.pyc,,
-openai/types/shared/__pycache__/all_models.cpython-310.pyc,,
-openai/types/shared/__pycache__/chat_model.cpython-310.pyc,,
-openai/types/shared/__pycache__/comparison_filter.cpython-310.pyc,,
-openai/types/shared/__pycache__/compound_filter.cpython-310.pyc,,
-openai/types/shared/__pycache__/custom_tool_input_format.cpython-310.pyc,,
-openai/types/shared/__pycache__/error_object.cpython-310.pyc,,
-openai/types/shared/__pycache__/function_definition.cpython-310.pyc,,
-openai/types/shared/__pycache__/function_parameters.cpython-310.pyc,,
-openai/types/shared/__pycache__/metadata.cpython-310.pyc,,
-openai/types/shared/__pycache__/reasoning.cpython-310.pyc,,
-openai/types/shared/__pycache__/reasoning_effort.cpython-310.pyc,,
-openai/types/shared/__pycache__/response_format_json_object.cpython-310.pyc,,
-openai/types/shared/__pycache__/response_format_json_schema.cpython-310.pyc,,
-openai/types/shared/__pycache__/response_format_text.cpython-310.pyc,,
-openai/types/shared/__pycache__/response_format_text_grammar.cpython-310.pyc,,
-openai/types/shared/__pycache__/response_format_text_python.cpython-310.pyc,,
-openai/types/shared/__pycache__/responses_model.cpython-310.pyc,,
+openai/types/shared/__pycache__/__init__.cpython-314.pyc,,
+openai/types/shared/__pycache__/all_models.cpython-314.pyc,,
+openai/types/shared/__pycache__/chat_model.cpython-314.pyc,,
+openai/types/shared/__pycache__/comparison_filter.cpython-314.pyc,,
+openai/types/shared/__pycache__/compound_filter.cpython-314.pyc,,
+openai/types/shared/__pycache__/custom_tool_input_format.cpython-314.pyc,,
+openai/types/shared/__pycache__/error_object.cpython-314.pyc,,
+openai/types/shared/__pycache__/function_definition.cpython-314.pyc,,
+openai/types/shared/__pycache__/function_parameters.cpython-314.pyc,,
+openai/types/shared/__pycache__/metadata.cpython-314.pyc,,
+openai/types/shared/__pycache__/reasoning.cpython-314.pyc,,
+openai/types/shared/__pycache__/reasoning_effort.cpython-314.pyc,,
+openai/types/shared/__pycache__/response_format_json_object.cpython-314.pyc,,
+openai/types/shared/__pycache__/response_format_json_schema.cpython-314.pyc,,
+openai/types/shared/__pycache__/response_format_text.cpython-314.pyc,,
+openai/types/shared/__pycache__/response_format_text_grammar.cpython-314.pyc,,
+openai/types/shared/__pycache__/response_format_text_python.cpython-314.pyc,,
+openai/types/shared/__pycache__/responses_model.cpython-314.pyc,,
openai/types/shared/all_models.py,sha256=OggdrF27d8_oCWAsE-LyQQmtjflAesmOyogIvi-atAs,716
-openai/types/shared/chat_model.py,sha256=sMmuHjOzYGegtDSqONSt2HXKYLuG66ncZRMFmeXuCTI,1957
-openai/types/shared/comparison_filter.py,sha256=9mpikD4dkjYTWni13i7VBKnfGUWJMIcug4PFWGaMGfc,981
+openai/types/shared/chat_model.py,sha256=eanzUPrDvIwQq6oXG7sYa701rHy4uFgwosQltBuXeyw,2101
+openai/types/shared/comparison_filter.py,sha256=bpckFBnrbznCZ1lf7l9Xha7AZ_uama7_qSbAsuJPRr0,994
openai/types/shared/compound_filter.py,sha256=Dk2EVAI9kgojEKyeaXnIsu93rz8kKPERW0y5Y9LpdzY,638
openai/types/shared/custom_tool_input_format.py,sha256=qgYtTA-5KQssG4TCdI2V2s83GcNQHhHczjSQVj4oIhg,856
openai/types/shared/error_object.py,sha256=G7SGPZ9Qw3gewTKbi3fK69eM6L2Ur0C2D57N8iEapJA,305
@@ -1894,22 +2020,22 @@ openai/types/shared/response_format_text_grammar.py,sha256=zjIlUfFTCfAyLCc4fmiV3
openai/types/shared/response_format_text_python.py,sha256=hWrzEgdP1hUAX80Slc-UddodqFE3HDEWM8w0T0tADr0,525
openai/types/shared/responses_model.py,sha256=Ot5_u8itwSMhVUhZ8rHnt5Bdj2eI_Ux76WHQ8xKHt7E,726
openai/types/shared_params/__init__.py,sha256=Jtx94DUXqIaXTb7Sgsx3MPoB9nViBlYEy0DlQ3VcOJU,976
-openai/types/shared_params/__pycache__/__init__.cpython-310.pyc,,
-openai/types/shared_params/__pycache__/chat_model.cpython-310.pyc,,
-openai/types/shared_params/__pycache__/comparison_filter.cpython-310.pyc,,
-openai/types/shared_params/__pycache__/compound_filter.cpython-310.pyc,,
-openai/types/shared_params/__pycache__/custom_tool_input_format.cpython-310.pyc,,
-openai/types/shared_params/__pycache__/function_definition.cpython-310.pyc,,
-openai/types/shared_params/__pycache__/function_parameters.cpython-310.pyc,,
-openai/types/shared_params/__pycache__/metadata.cpython-310.pyc,,
-openai/types/shared_params/__pycache__/reasoning.cpython-310.pyc,,
-openai/types/shared_params/__pycache__/reasoning_effort.cpython-310.pyc,,
-openai/types/shared_params/__pycache__/response_format_json_object.cpython-310.pyc,,
-openai/types/shared_params/__pycache__/response_format_json_schema.cpython-310.pyc,,
-openai/types/shared_params/__pycache__/response_format_text.cpython-310.pyc,,
-openai/types/shared_params/__pycache__/responses_model.cpython-310.pyc,,
-openai/types/shared_params/chat_model.py,sha256=EcH6f6e8-ylkT206MlIK-nrk1NbfbIkirDYuMKR6gWE,1993
-openai/types/shared_params/comparison_filter.py,sha256=xtHLwK5uBnkRyecsqrbjYXnlHPoB66uf3wJyGUjR3DY,1089
+openai/types/shared_params/__pycache__/__init__.cpython-314.pyc,,
+openai/types/shared_params/__pycache__/chat_model.cpython-314.pyc,,
+openai/types/shared_params/__pycache__/comparison_filter.cpython-314.pyc,,
+openai/types/shared_params/__pycache__/compound_filter.cpython-314.pyc,,
+openai/types/shared_params/__pycache__/custom_tool_input_format.cpython-314.pyc,,
+openai/types/shared_params/__pycache__/function_definition.cpython-314.pyc,,
+openai/types/shared_params/__pycache__/function_parameters.cpython-314.pyc,,
+openai/types/shared_params/__pycache__/metadata.cpython-314.pyc,,
+openai/types/shared_params/__pycache__/reasoning.cpython-314.pyc,,
+openai/types/shared_params/__pycache__/reasoning_effort.cpython-314.pyc,,
+openai/types/shared_params/__pycache__/response_format_json_object.cpython-314.pyc,,
+openai/types/shared_params/__pycache__/response_format_json_schema.cpython-314.pyc,,
+openai/types/shared_params/__pycache__/response_format_text.cpython-314.pyc,,
+openai/types/shared_params/__pycache__/responses_model.cpython-314.pyc,,
+openai/types/shared_params/chat_model.py,sha256=wAR-4oRZzueww1WHi5KvccbeVP5HPxe7aNfDlVl-QXU,2137
+openai/types/shared_params/comparison_filter.py,sha256=OTaWOJD15rkcK2aB1f29lIyd3UHCU3C1fWYhAA8Fx6A,1102
openai/types/shared_params/compound_filter.py,sha256=kpjER_a7NZT4rvAHxEj3hd6CgF_JHgBFl5WI9-HBzkY,703
openai/types/shared_params/custom_tool_input_format.py,sha256=uv6tIPrdbzJ3_erNTbz7bDjVbMzAi8o22QG2wOhpRGQ,852
openai/types/shared_params/function_definition.py,sha256=6JjuRmXIofTv76GCC4XFssqgZw-iKbBazjWqKerfq6Q,1510
@@ -1921,6 +2047,25 @@ openai/types/shared_params/response_format_json_object.py,sha256=eEG54vILrwf5es7
openai/types/shared_params/response_format_json_schema.py,sha256=Rx2m7tbaMVWO0FQABF0B7jc8Cxo8_EmTq_tQwCX9XqU,1804
openai/types/shared_params/response_format_text.py,sha256=zCKpz3Fl_w-EICrTTrarsBhxea_LvzaKG6J864zjF1c,441
openai/types/shared_params/responses_model.py,sha256=r1tGQ9j25cW84o01POd2p74wb18DdSBe2OeBTJhVOc8,770
+openai/types/skill.py,sha256=xCnmIVIrzbmkdQZHuIxmIAS5wa2rzjmYuOQDAah0KHI,657
+openai/types/skill_create_params.py,sha256=tskcK2JrqgOHsOZ0uuNYn8KITbQoBzbq0UZGvgCbYTk,448
+openai/types/skill_list.py,sha256=10R_5DUqfjvhr3Ve_hDWUFyJbx7No4LZ_GR7JFHhur4,640
+openai/types/skill_list_params.py,sha256=JBkg2WnMVoXd86aEwF9YUwAWRGibg0Z1-YiQvd8N_pE,550
+openai/types/skill_update_params.py,sha256=cC1iOF7tBCM9dBm7rXlohfSfXO_8evxma1BY3vC_bsk,346
+openai/types/skills/__init__.py,sha256=9n6vxblrac4DrM-8_fK6R5BkyepfCtZJu1i3orvSWAw,476
+openai/types/skills/__pycache__/__init__.cpython-314.pyc,,
+openai/types/skills/__pycache__/deleted_skill_version.cpython-314.pyc,,
+openai/types/skills/__pycache__/skill_version.cpython-314.pyc,,
+openai/types/skills/__pycache__/skill_version_list.cpython-314.pyc,,
+openai/types/skills/__pycache__/version_create_params.cpython-314.pyc,,
+openai/types/skills/__pycache__/version_list_params.cpython-314.pyc,,
+openai/types/skills/deleted_skill_version.py,sha256=0BYiz1FTElv8DkpSD7H49K0ZWzASOJC4yNTg0P9sDys,366
+openai/types/skills/skill_version.py,sha256=aF549GDKzxlw7TnCUC1z8WbYXdl7Rv0vogAcHNDrCQU,712
+openai/types/skills/skill_version_list.py,sha256=_jevI1qhI6-ZEevLFkKsflI-02kTXJBq9WpeBfB_HeQ,677
+openai/types/skills/version_create_params.py,sha256=dqfTwBQn5ode8JnuwQ3lr7Uv5Ja26aCnoJsFu5-x0F4,526
+openai/types/skills/version_list_params.py,sha256=C_B2XBJuiXiZWCWNcykuTZTlkO4b1ghjjC5664g3qLY,462
+openai/types/skills/versions/__init__.py,sha256=OKfJYcKb4NObdiRObqJV_dOyDQ8feXekDUge2o_4pXQ,122
+openai/types/skills/versions/__pycache__/__init__.cpython-314.pyc,,
openai/types/static_file_chunking_strategy.py,sha256=JmAzT2-9eaG9ZTH8X0jS1IVCOE3Jgi1PzE11oMST3Fc,595
openai/types/static_file_chunking_strategy_object.py,sha256=MTwQ1olGZHoC26xxCKw0U0RvWORIJLgWzNWRQ1V0KmA,424
openai/types/static_file_chunking_strategy_object_param.py,sha256=tUsAYwR07qefkjFgt_qNwdUDbo2Rd-k9Xgu9OvtK9EE,597
@@ -1929,9 +2074,9 @@ openai/types/upload.py,sha256=_ePK_A-Hxr0bctSI3PfiAiJh22YRZwWXsBt0xdEQIk4,1281
openai/types/upload_complete_params.py,sha256=PW5mCxJt7eg7F5sttX5LCE43m9FX8oZs3P5i9HvjRoU,527
openai/types/upload_create_params.py,sha256=uOXPb_sdZhCqoR3gSSvpb4RpZ5K_Ppl1oAmJGbIAT3Y,1689
openai/types/uploads/__init__.py,sha256=fDsmd3L0nIWbFldbViOLvcQavsFA4SL3jsXDfAueAck,242
-openai/types/uploads/__pycache__/__init__.cpython-310.pyc,,
-openai/types/uploads/__pycache__/part_create_params.cpython-310.pyc,,
-openai/types/uploads/__pycache__/upload_part.cpython-310.pyc,,
+openai/types/uploads/__pycache__/__init__.cpython-314.pyc,,
+openai/types/uploads/__pycache__/part_create_params.cpython-314.pyc,,
+openai/types/uploads/__pycache__/upload_part.cpython-314.pyc,,
openai/types/uploads/part_create_params.py,sha256=pBByUzngaj70ov1knoSo_gpeBjaWP9D5EdiHwiG4G7U,362
openai/types/uploads/upload_part.py,sha256=A_6PT8ptLJtR-jbU1b11jlpnVNLE10Kwoh1U985j9Y4,677
openai/types/vector_store.py,sha256=rbjldzgYE-1TsFvAeBQYSOJqSiSWISrhYpmnkJTVsL0,2633
@@ -1942,17 +2087,17 @@ openai/types/vector_store_search_params.py,sha256=Uglni3jSE8d8-4c7eKRlxxEsxFNnhz
openai/types/vector_store_search_response.py,sha256=qlhdAjqLPZg_JQmsqQCzAgT2Pxc2C-vGZmh64kR8y-M,1156
openai/types/vector_store_update_params.py,sha256=-RQr2LnJzmpI8iFx-cGSZK8hg-24mYx9c497xtN_36k,1293
openai/types/vector_stores/__init__.py,sha256=F_DyW6EqxOJTBPKE5LUSzgTibcZM6axMo-irysr52ro,818
-openai/types/vector_stores/__pycache__/__init__.cpython-310.pyc,,
-openai/types/vector_stores/__pycache__/file_batch_create_params.cpython-310.pyc,,
-openai/types/vector_stores/__pycache__/file_batch_list_files_params.cpython-310.pyc,,
-openai/types/vector_stores/__pycache__/file_content_response.cpython-310.pyc,,
-openai/types/vector_stores/__pycache__/file_create_params.cpython-310.pyc,,
-openai/types/vector_stores/__pycache__/file_list_params.cpython-310.pyc,,
-openai/types/vector_stores/__pycache__/file_update_params.cpython-310.pyc,,
-openai/types/vector_stores/__pycache__/vector_store_file.cpython-310.pyc,,
-openai/types/vector_stores/__pycache__/vector_store_file_batch.cpython-310.pyc,,
-openai/types/vector_stores/__pycache__/vector_store_file_deleted.cpython-310.pyc,,
-openai/types/vector_stores/file_batch_create_params.py,sha256=rHysxuqX1vfxUqsIfaLYJMi4CkmMSJEmDWBjTb_ntdg,2707
+openai/types/vector_stores/__pycache__/__init__.cpython-314.pyc,,
+openai/types/vector_stores/__pycache__/file_batch_create_params.cpython-314.pyc,,
+openai/types/vector_stores/__pycache__/file_batch_list_files_params.cpython-314.pyc,,
+openai/types/vector_stores/__pycache__/file_content_response.cpython-314.pyc,,
+openai/types/vector_stores/__pycache__/file_create_params.cpython-314.pyc,,
+openai/types/vector_stores/__pycache__/file_list_params.cpython-314.pyc,,
+openai/types/vector_stores/__pycache__/file_update_params.cpython-314.pyc,,
+openai/types/vector_stores/__pycache__/vector_store_file.cpython-314.pyc,,
+openai/types/vector_stores/__pycache__/vector_store_file_batch.cpython-314.pyc,,
+openai/types/vector_stores/__pycache__/vector_store_file_deleted.cpython-314.pyc,,
+openai/types/vector_stores/file_batch_create_params.py,sha256=_ezNbyz_tGXnRFPOV6O0sPcmAWJruFqyza2PUVN1d2Y,2791
openai/types/vector_stores/file_batch_list_files_params.py,sha256=FPpQvCQI2skyLB8YCuwdCj7RbO9ba1UjaHAtvrWxAbs,1451
openai/types/vector_stores/file_content_response.py,sha256=uAFvFDE_NVRzg0xm1fLJ2zEd62qzq8rPYko7xpDjbaU,367
openai/types/vector_stores/file_create_params.py,sha256=nTHWG0OMqqLRjWFH2qbif89fpCJQCzGGdXDjCqPbq1Y,1229
@@ -1961,11 +2106,16 @@ openai/types/vector_stores/file_update_params.py,sha256=NGah01luDW_W3psfsYa3Shls
openai/types/vector_stores/vector_store_file.py,sha256=uHAXG0fdkbeJHS21gWmXourPYlc4GyyXkdam-EENwtU,2431
openai/types/vector_stores/vector_store_file_batch.py,sha256=W1VoZE_PaiiOxRKG3empVJfr22oc7bE14dL9jheMG14,1512
openai/types/vector_stores/vector_store_file_deleted.py,sha256=sOds3FSmDBFhe25zoSAz2vHsmG2bo4s2PASgB_M6UU0,321
-openai/types/video.py,sha256=3jT9tcZYJDAA93XgdKxpMT2WgpzwYcxKmVj9pJweWec,1698
+openai/types/video.py,sha256=Ggn_umAkyqVuwZdN0bWNuj5CfRRUOkt9fpUx_iSzqgI,1780
+openai/types/video_create_character_params.py,sha256=qGoGgMTLc3_PtUrnBz5Ze2VaG-6Gf5JwEowa8-WcGGI,459
+openai/types/video_create_character_response.py,sha256=qbqptjZ3jYkhZhJ4FZUg58P2ZEGenz9ZmE9JxYECAC8,490
openai/types/video_create_error.py,sha256=DZpLbIAIOXOaZDNZk1dyVYMZHuxu81xB34krRLF6ddU,415
-openai/types/video_create_params.py,sha256=xe09Ac0l_M_PsKFIAdw90jJfZIs2QePxu_x5Qw1oUvU,1015
+openai/types/video_create_params.py,sha256=GxMfiZsty_DdNNI8jtO007Ei6rWQWQ9v-paC1dlKjZA,1240
openai/types/video_delete_response.py,sha256=eiD7lHgtxXIl0sY-JzhrKzWfRFdiGne24LfPZ9tQIho,529
openai/types/video_download_content_params.py,sha256=MXcSQOL67hzODH__CRf7g6i74hjXJG9I0zPIqqBjnlU,405
+openai/types/video_edit_params.py,sha256=vdYgJuW1vaVRDeUCzlYf_-1nzRF40R5mME8tiTziAas,784
+openai/types/video_extend_params.py,sha256=7FleFZMG05Yyzh5dWyGbi8ViFfESbfIrIejWGHVFfDs,988
+openai/types/video_get_character_response.py,sha256=SM_DKGQFfAtFm7GhvFZ5O8ymL1sxIrTlV3Zow6olvfU,484
openai/types/video_list_params.py,sha256=pa8Nd6-hrc2fF8ZQRf4udebbMXpMDEKDrAAH9niSlgk,550
openai/types/video_model.py,sha256=fkUBLAJ37g6TOBcXZGUgAMUhNNCDtSYCoEeMWiw0iJc,329
openai/types/video_model_param.py,sha256=TeQQgBVyDxOuVo5qibiJIS9qJObnCtjbHGZEuam-SZc,375
@@ -1973,23 +2123,23 @@ openai/types/video_remix_params.py,sha256=cFh9Tuaa1HH-cWyScfHPlw7N8nU-fg_AW0BL7S
openai/types/video_seconds.py,sha256=HyRb-NR4sVEGe2DoYZIQGig4kOrbbFfRYiqVejAgFbg,215
openai/types/video_size.py,sha256=H1o0EhMbmicXdvaTC3wL-DnghhXzB7EkBChHL-gqdbI,243
openai/types/webhooks/__init__.py,sha256=T8XC8KrJNXiNUPevxpO4PJi__C-HZgd0TMg7D2bRPh4,1828
-openai/types/webhooks/__pycache__/__init__.cpython-310.pyc,,
-openai/types/webhooks/__pycache__/batch_cancelled_webhook_event.cpython-310.pyc,,
-openai/types/webhooks/__pycache__/batch_completed_webhook_event.cpython-310.pyc,,
-openai/types/webhooks/__pycache__/batch_expired_webhook_event.cpython-310.pyc,,
-openai/types/webhooks/__pycache__/batch_failed_webhook_event.cpython-310.pyc,,
-openai/types/webhooks/__pycache__/eval_run_canceled_webhook_event.cpython-310.pyc,,
-openai/types/webhooks/__pycache__/eval_run_failed_webhook_event.cpython-310.pyc,,
-openai/types/webhooks/__pycache__/eval_run_succeeded_webhook_event.cpython-310.pyc,,
-openai/types/webhooks/__pycache__/fine_tuning_job_cancelled_webhook_event.cpython-310.pyc,,
-openai/types/webhooks/__pycache__/fine_tuning_job_failed_webhook_event.cpython-310.pyc,,
-openai/types/webhooks/__pycache__/fine_tuning_job_succeeded_webhook_event.cpython-310.pyc,,
-openai/types/webhooks/__pycache__/realtime_call_incoming_webhook_event.cpython-310.pyc,,
-openai/types/webhooks/__pycache__/response_cancelled_webhook_event.cpython-310.pyc,,
-openai/types/webhooks/__pycache__/response_completed_webhook_event.cpython-310.pyc,,
-openai/types/webhooks/__pycache__/response_failed_webhook_event.cpython-310.pyc,,
-openai/types/webhooks/__pycache__/response_incomplete_webhook_event.cpython-310.pyc,,
-openai/types/webhooks/__pycache__/unwrap_webhook_event.cpython-310.pyc,,
+openai/types/webhooks/__pycache__/__init__.cpython-314.pyc,,
+openai/types/webhooks/__pycache__/batch_cancelled_webhook_event.cpython-314.pyc,,
+openai/types/webhooks/__pycache__/batch_completed_webhook_event.cpython-314.pyc,,
+openai/types/webhooks/__pycache__/batch_expired_webhook_event.cpython-314.pyc,,
+openai/types/webhooks/__pycache__/batch_failed_webhook_event.cpython-314.pyc,,
+openai/types/webhooks/__pycache__/eval_run_canceled_webhook_event.cpython-314.pyc,,
+openai/types/webhooks/__pycache__/eval_run_failed_webhook_event.cpython-314.pyc,,
+openai/types/webhooks/__pycache__/eval_run_succeeded_webhook_event.cpython-314.pyc,,
+openai/types/webhooks/__pycache__/fine_tuning_job_cancelled_webhook_event.cpython-314.pyc,,
+openai/types/webhooks/__pycache__/fine_tuning_job_failed_webhook_event.cpython-314.pyc,,
+openai/types/webhooks/__pycache__/fine_tuning_job_succeeded_webhook_event.cpython-314.pyc,,
+openai/types/webhooks/__pycache__/realtime_call_incoming_webhook_event.cpython-314.pyc,,
+openai/types/webhooks/__pycache__/response_cancelled_webhook_event.cpython-314.pyc,,
+openai/types/webhooks/__pycache__/response_completed_webhook_event.cpython-314.pyc,,
+openai/types/webhooks/__pycache__/response_failed_webhook_event.cpython-314.pyc,,
+openai/types/webhooks/__pycache__/response_incomplete_webhook_event.cpython-314.pyc,,
+openai/types/webhooks/__pycache__/unwrap_webhook_event.cpython-314.pyc,,
openai/types/webhooks/batch_cancelled_webhook_event.py,sha256=1iE0xOSTWzU8FJD5ruqgZazkOdCjmBZ_PB9_4Gmif7Y,862
openai/types/webhooks/batch_completed_webhook_event.py,sha256=XCFcMnvn5xosPWdUwp3sO8wi4zYNefHWc_z6btzdGAE,862
openai/types/webhooks/batch_expired_webhook_event.py,sha256=wNL76DW5xg9Jm8hqpPP-X5GHz3_wajmoNwc0jufgXtI,841
@@ -2006,5 +2156,5 @@ openai/types/webhooks/response_completed_webhook_event.py,sha256=mwNAM5x1-uqL39j
openai/types/webhooks/response_failed_webhook_event.py,sha256=ka3VuDmBV7d9hxoFWktSZFXmtQrd4vrL4OinnMriyvQ,843
openai/types/webhooks/response_incomplete_webhook_event.py,sha256=egeszUAnZW5j7-JBYXqNNUPNcDJc_Rl_shR2Lyz3iLM,878
openai/types/webhooks/unwrap_webhook_event.py,sha256=KrfVL0-NsOuWHtRGiJfGMYwI8blUr09vUqUVJdZNpDQ,2039
-openai/types/websocket_connection_options.py,sha256=4cAWpv1KKp_9pvnez7pGYzO3s8zh1WvX2xpBhpe-96k,1840
+openai/types/websocket_connection_options.py,sha256=EA3rjUkBMdvNtsF31iW0gki0WDkEIWntW_cSqdHQTfQ,2041
openai/version.py,sha256=cjbXKO8Ut3aiv4YlQnugff7AdC48MpSndcx96q88Yb8,62
diff --git a/portkey_ai/_vendor/openai-2.16.0.dist-info/REQUESTED b/portkey_ai/_vendor/openai-2.30.0.dist-info/REQUESTED
similarity index 100%
rename from portkey_ai/_vendor/openai-2.16.0.dist-info/REQUESTED
rename to portkey_ai/_vendor/openai-2.30.0.dist-info/REQUESTED
diff --git a/portkey_ai/_vendor/openai-2.16.0.dist-info/WHEEL b/portkey_ai/_vendor/openai-2.30.0.dist-info/WHEEL
similarity index 100%
rename from portkey_ai/_vendor/openai-2.16.0.dist-info/WHEEL
rename to portkey_ai/_vendor/openai-2.30.0.dist-info/WHEEL
diff --git a/portkey_ai/_vendor/openai-2.16.0.dist-info/entry_points.txt b/portkey_ai/_vendor/openai-2.30.0.dist-info/entry_points.txt
similarity index 100%
rename from portkey_ai/_vendor/openai-2.16.0.dist-info/entry_points.txt
rename to portkey_ai/_vendor/openai-2.30.0.dist-info/entry_points.txt
diff --git a/portkey_ai/_vendor/openai-2.16.0.dist-info/licenses/LICENSE b/portkey_ai/_vendor/openai-2.30.0.dist-info/licenses/LICENSE
similarity index 100%
rename from portkey_ai/_vendor/openai-2.16.0.dist-info/licenses/LICENSE
rename to portkey_ai/_vendor/openai-2.30.0.dist-info/licenses/LICENSE
diff --git a/portkey_ai/_vendor/openai/__init__.py b/portkey_ai/_vendor/openai/__init__.py
index e7411b38..b2093ada 100644
--- a/portkey_ai/_vendor/openai/__init__.py
+++ b/portkey_ai/_vendor/openai/__init__.py
@@ -379,6 +379,7 @@ def _reset_client() -> None: # type: ignore[reportUnusedFunction]
files as files,
images as images,
models as models,
+ skills as skills,
videos as videos,
batches as batches,
uploads as uploads,
diff --git a/portkey_ai/_vendor/openai/_base_client.py b/portkey_ai/_vendor/openai/_base_client.py
index 94c2d1b5..e6a57693 100644
--- a/portkey_ai/_vendor/openai/_base_client.py
+++ b/portkey_ai/_vendor/openai/_base_client.py
@@ -86,6 +86,7 @@
APIConnectionError,
APIResponseValidationError,
)
+from ._utils._json import openapi_dumps
from ._legacy_response import LegacyAPIResponse
log: logging.Logger = logging.getLogger(__name__)
@@ -556,8 +557,10 @@ def _build_request(
kwargs["content"] = options.content
elif isinstance(json_data, bytes):
kwargs["content"] = json_data
- else:
- kwargs["json"] = json_data if is_given(json_data) else None
+ elif not files:
+ # Don't set content when JSON is sent as multipart/form-data,
+ # since httpx's content param overrides other body arguments
+ kwargs["content"] = openapi_dumps(json_data) if is_given(json_data) and json_data is not None else None
kwargs["files"] = files
else:
headers.pop("Content-Type", None)
@@ -784,6 +787,9 @@ def _should_retry(self, response: httpx.Response) -> bool:
return True
+ log.debug("Not retrying")
+ return False
+
def _idempotency_key(self) -> str:
return f"stainless-python-retry-{uuid.uuid4()}"
@@ -1963,6 +1969,7 @@ def make_request_options(
idempotency_key: str | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
post_parser: PostParser | NotGiven = not_given,
+ synthesize_event_and_data: bool | None = None,
) -> RequestOptions:
"""Create a dict of type RequestOptions without keys of NotGiven values."""
options: RequestOptions = {}
@@ -1988,6 +1995,9 @@ def make_request_options(
# internal
options["post_parser"] = post_parser # type: ignore
+ if synthesize_event_and_data is not None:
+ options["synthesize_event_and_data"] = synthesize_event_and_data
+
return options
diff --git a/portkey_ai/_vendor/openai/_client.py b/portkey_ai/_vendor/openai/_client.py
index a3b01b2c..aadf3601 100644
--- a/portkey_ai/_vendor/openai/_client.py
+++ b/portkey_ai/_vendor/openai/_client.py
@@ -44,6 +44,7 @@
files,
images,
models,
+ skills,
videos,
batches,
uploads,
@@ -62,7 +63,6 @@
from .resources.models import Models, AsyncModels
from .resources.videos import Videos, AsyncVideos
from .resources.batches import Batches, AsyncBatches
- from .resources.webhooks import Webhooks, AsyncWebhooks
from .resources.beta.beta import Beta, AsyncBeta
from .resources.chat.chat import Chat, AsyncChat
from .resources.embeddings import Embeddings, AsyncEmbeddings
@@ -70,8 +70,10 @@
from .resources.completions import Completions, AsyncCompletions
from .resources.evals.evals import Evals, AsyncEvals
from .resources.moderations import Moderations, AsyncModerations
+ from .resources.skills.skills import Skills, AsyncSkills
from .resources.uploads.uploads import Uploads, AsyncUploads
from .resources.realtime.realtime import Realtime, AsyncRealtime
+ from .resources.webhooks.webhooks import Webhooks, AsyncWebhooks
from .resources.responses.responses import Responses, AsyncResponses
from .resources.containers.containers import Containers, AsyncContainers
from .resources.fine_tuning.fine_tuning import FineTuning, AsyncFineTuning
@@ -178,6 +180,9 @@ def __init__(
@cached_property
def completions(self) -> Completions:
+ """
+ Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position.
+ """
from .resources.completions import Completions
return Completions(self)
@@ -190,18 +195,25 @@ def chat(self) -> Chat:
@cached_property
def embeddings(self) -> Embeddings:
+ """
+ Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms.
+ """
from .resources.embeddings import Embeddings
return Embeddings(self)
@cached_property
def files(self) -> Files:
+ """
+ Files are used to upload documents that can be used with features like Assistants and Fine-tuning.
+ """
from .resources.files import Files
return Files(self)
@cached_property
def images(self) -> Images:
+ """Given a prompt and/or an input image, the model will generate a new image."""
from .resources.images import Images
return Images(self)
@@ -214,12 +226,16 @@ def audio(self) -> Audio:
@cached_property
def moderations(self) -> Moderations:
+ """
+ Given text and/or image inputs, classifies if those inputs are potentially harmful.
+ """
from .resources.moderations import Moderations
return Moderations(self)
@cached_property
def models(self) -> Models:
+ """List and describe the various models available in the API."""
from .resources.models import Models
return Models(self)
@@ -250,12 +266,14 @@ def beta(self) -> Beta:
@cached_property
def batches(self) -> Batches:
+ """Create large batches of API requests to run asynchronously."""
from .resources.batches import Batches
return Batches(self)
@cached_property
def uploads(self) -> Uploads:
+ """Use Uploads to upload large files in multiple parts."""
from .resources.uploads import Uploads
return Uploads(self)
@@ -274,12 +292,14 @@ def realtime(self) -> Realtime:
@cached_property
def conversations(self) -> Conversations:
+ """Manage conversations and conversation items."""
from .resources.conversations import Conversations
return Conversations(self)
@cached_property
def evals(self) -> Evals:
+ """Manage and run evals in the OpenAI platform."""
from .resources.evals import Evals
return Evals(self)
@@ -290,6 +310,12 @@ def containers(self) -> Containers:
return Containers(self)
+ @cached_property
+ def skills(self) -> Skills:
+ from .resources.skills import Skills
+
+ return Skills(self)
+
@cached_property
def videos(self) -> Videos:
from .resources.videos import Videos
@@ -529,6 +555,9 @@ def __init__(
@cached_property
def completions(self) -> AsyncCompletions:
+ """
+ Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position.
+ """
from .resources.completions import AsyncCompletions
return AsyncCompletions(self)
@@ -541,18 +570,25 @@ def chat(self) -> AsyncChat:
@cached_property
def embeddings(self) -> AsyncEmbeddings:
+ """
+ Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms.
+ """
from .resources.embeddings import AsyncEmbeddings
return AsyncEmbeddings(self)
@cached_property
def files(self) -> AsyncFiles:
+ """
+ Files are used to upload documents that can be used with features like Assistants and Fine-tuning.
+ """
from .resources.files import AsyncFiles
return AsyncFiles(self)
@cached_property
def images(self) -> AsyncImages:
+ """Given a prompt and/or an input image, the model will generate a new image."""
from .resources.images import AsyncImages
return AsyncImages(self)
@@ -565,12 +601,16 @@ def audio(self) -> AsyncAudio:
@cached_property
def moderations(self) -> AsyncModerations:
+ """
+ Given text and/or image inputs, classifies if those inputs are potentially harmful.
+ """
from .resources.moderations import AsyncModerations
return AsyncModerations(self)
@cached_property
def models(self) -> AsyncModels:
+ """List and describe the various models available in the API."""
from .resources.models import AsyncModels
return AsyncModels(self)
@@ -601,12 +641,14 @@ def beta(self) -> AsyncBeta:
@cached_property
def batches(self) -> AsyncBatches:
+ """Create large batches of API requests to run asynchronously."""
from .resources.batches import AsyncBatches
return AsyncBatches(self)
@cached_property
def uploads(self) -> AsyncUploads:
+ """Use Uploads to upload large files in multiple parts."""
from .resources.uploads import AsyncUploads
return AsyncUploads(self)
@@ -625,12 +667,14 @@ def realtime(self) -> AsyncRealtime:
@cached_property
def conversations(self) -> AsyncConversations:
+ """Manage conversations and conversation items."""
from .resources.conversations import AsyncConversations
return AsyncConversations(self)
@cached_property
def evals(self) -> AsyncEvals:
+ """Manage and run evals in the OpenAI platform."""
from .resources.evals import AsyncEvals
return AsyncEvals(self)
@@ -641,6 +685,12 @@ def containers(self) -> AsyncContainers:
return AsyncContainers(self)
+ @cached_property
+ def skills(self) -> AsyncSkills:
+ from .resources.skills import AsyncSkills
+
+ return AsyncSkills(self)
+
@cached_property
def videos(self) -> AsyncVideos:
from .resources.videos import AsyncVideos
@@ -791,6 +841,9 @@ def __init__(self, client: OpenAI) -> None:
@cached_property
def completions(self) -> completions.CompletionsWithRawResponse:
+ """
+ Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position.
+ """
from .resources.completions import CompletionsWithRawResponse
return CompletionsWithRawResponse(self._client.completions)
@@ -803,18 +856,25 @@ def chat(self) -> chat.ChatWithRawResponse:
@cached_property
def embeddings(self) -> embeddings.EmbeddingsWithRawResponse:
+ """
+ Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms.
+ """
from .resources.embeddings import EmbeddingsWithRawResponse
return EmbeddingsWithRawResponse(self._client.embeddings)
@cached_property
def files(self) -> files.FilesWithRawResponse:
+ """
+ Files are used to upload documents that can be used with features like Assistants and Fine-tuning.
+ """
from .resources.files import FilesWithRawResponse
return FilesWithRawResponse(self._client.files)
@cached_property
def images(self) -> images.ImagesWithRawResponse:
+ """Given a prompt and/or an input image, the model will generate a new image."""
from .resources.images import ImagesWithRawResponse
return ImagesWithRawResponse(self._client.images)
@@ -827,12 +887,16 @@ def audio(self) -> audio.AudioWithRawResponse:
@cached_property
def moderations(self) -> moderations.ModerationsWithRawResponse:
+ """
+ Given text and/or image inputs, classifies if those inputs are potentially harmful.
+ """
from .resources.moderations import ModerationsWithRawResponse
return ModerationsWithRawResponse(self._client.moderations)
@cached_property
def models(self) -> models.ModelsWithRawResponse:
+ """List and describe the various models available in the API."""
from .resources.models import ModelsWithRawResponse
return ModelsWithRawResponse(self._client.models)
@@ -857,12 +921,14 @@ def beta(self) -> beta.BetaWithRawResponse:
@cached_property
def batches(self) -> batches.BatchesWithRawResponse:
+ """Create large batches of API requests to run asynchronously."""
from .resources.batches import BatchesWithRawResponse
return BatchesWithRawResponse(self._client.batches)
@cached_property
def uploads(self) -> uploads.UploadsWithRawResponse:
+ """Use Uploads to upload large files in multiple parts."""
from .resources.uploads import UploadsWithRawResponse
return UploadsWithRawResponse(self._client.uploads)
@@ -881,12 +947,14 @@ def realtime(self) -> realtime.RealtimeWithRawResponse:
@cached_property
def conversations(self) -> conversations.ConversationsWithRawResponse:
+ """Manage conversations and conversation items."""
from .resources.conversations import ConversationsWithRawResponse
return ConversationsWithRawResponse(self._client.conversations)
@cached_property
def evals(self) -> evals.EvalsWithRawResponse:
+ """Manage and run evals in the OpenAI platform."""
from .resources.evals import EvalsWithRawResponse
return EvalsWithRawResponse(self._client.evals)
@@ -897,6 +965,12 @@ def containers(self) -> containers.ContainersWithRawResponse:
return ContainersWithRawResponse(self._client.containers)
+ @cached_property
+ def skills(self) -> skills.SkillsWithRawResponse:
+ from .resources.skills import SkillsWithRawResponse
+
+ return SkillsWithRawResponse(self._client.skills)
+
@cached_property
def videos(self) -> videos.VideosWithRawResponse:
from .resources.videos import VideosWithRawResponse
@@ -912,6 +986,9 @@ def __init__(self, client: AsyncOpenAI) -> None:
@cached_property
def completions(self) -> completions.AsyncCompletionsWithRawResponse:
+ """
+ Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position.
+ """
from .resources.completions import AsyncCompletionsWithRawResponse
return AsyncCompletionsWithRawResponse(self._client.completions)
@@ -924,18 +1001,25 @@ def chat(self) -> chat.AsyncChatWithRawResponse:
@cached_property
def embeddings(self) -> embeddings.AsyncEmbeddingsWithRawResponse:
+ """
+ Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms.
+ """
from .resources.embeddings import AsyncEmbeddingsWithRawResponse
return AsyncEmbeddingsWithRawResponse(self._client.embeddings)
@cached_property
def files(self) -> files.AsyncFilesWithRawResponse:
+ """
+ Files are used to upload documents that can be used with features like Assistants and Fine-tuning.
+ """
from .resources.files import AsyncFilesWithRawResponse
return AsyncFilesWithRawResponse(self._client.files)
@cached_property
def images(self) -> images.AsyncImagesWithRawResponse:
+ """Given a prompt and/or an input image, the model will generate a new image."""
from .resources.images import AsyncImagesWithRawResponse
return AsyncImagesWithRawResponse(self._client.images)
@@ -948,12 +1032,16 @@ def audio(self) -> audio.AsyncAudioWithRawResponse:
@cached_property
def moderations(self) -> moderations.AsyncModerationsWithRawResponse:
+ """
+ Given text and/or image inputs, classifies if those inputs are potentially harmful.
+ """
from .resources.moderations import AsyncModerationsWithRawResponse
return AsyncModerationsWithRawResponse(self._client.moderations)
@cached_property
def models(self) -> models.AsyncModelsWithRawResponse:
+ """List and describe the various models available in the API."""
from .resources.models import AsyncModelsWithRawResponse
return AsyncModelsWithRawResponse(self._client.models)
@@ -978,12 +1066,14 @@ def beta(self) -> beta.AsyncBetaWithRawResponse:
@cached_property
def batches(self) -> batches.AsyncBatchesWithRawResponse:
+ """Create large batches of API requests to run asynchronously."""
from .resources.batches import AsyncBatchesWithRawResponse
return AsyncBatchesWithRawResponse(self._client.batches)
@cached_property
def uploads(self) -> uploads.AsyncUploadsWithRawResponse:
+ """Use Uploads to upload large files in multiple parts."""
from .resources.uploads import AsyncUploadsWithRawResponse
return AsyncUploadsWithRawResponse(self._client.uploads)
@@ -1002,12 +1092,14 @@ def realtime(self) -> realtime.AsyncRealtimeWithRawResponse:
@cached_property
def conversations(self) -> conversations.AsyncConversationsWithRawResponse:
+ """Manage conversations and conversation items."""
from .resources.conversations import AsyncConversationsWithRawResponse
return AsyncConversationsWithRawResponse(self._client.conversations)
@cached_property
def evals(self) -> evals.AsyncEvalsWithRawResponse:
+ """Manage and run evals in the OpenAI platform."""
from .resources.evals import AsyncEvalsWithRawResponse
return AsyncEvalsWithRawResponse(self._client.evals)
@@ -1018,6 +1110,12 @@ def containers(self) -> containers.AsyncContainersWithRawResponse:
return AsyncContainersWithRawResponse(self._client.containers)
+ @cached_property
+ def skills(self) -> skills.AsyncSkillsWithRawResponse:
+ from .resources.skills import AsyncSkillsWithRawResponse
+
+ return AsyncSkillsWithRawResponse(self._client.skills)
+
@cached_property
def videos(self) -> videos.AsyncVideosWithRawResponse:
from .resources.videos import AsyncVideosWithRawResponse
@@ -1033,6 +1131,9 @@ def __init__(self, client: OpenAI) -> None:
@cached_property
def completions(self) -> completions.CompletionsWithStreamingResponse:
+ """
+ Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position.
+ """
from .resources.completions import CompletionsWithStreamingResponse
return CompletionsWithStreamingResponse(self._client.completions)
@@ -1045,18 +1146,25 @@ def chat(self) -> chat.ChatWithStreamingResponse:
@cached_property
def embeddings(self) -> embeddings.EmbeddingsWithStreamingResponse:
+ """
+ Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms.
+ """
from .resources.embeddings import EmbeddingsWithStreamingResponse
return EmbeddingsWithStreamingResponse(self._client.embeddings)
@cached_property
def files(self) -> files.FilesWithStreamingResponse:
+ """
+ Files are used to upload documents that can be used with features like Assistants and Fine-tuning.
+ """
from .resources.files import FilesWithStreamingResponse
return FilesWithStreamingResponse(self._client.files)
@cached_property
def images(self) -> images.ImagesWithStreamingResponse:
+ """Given a prompt and/or an input image, the model will generate a new image."""
from .resources.images import ImagesWithStreamingResponse
return ImagesWithStreamingResponse(self._client.images)
@@ -1069,12 +1177,16 @@ def audio(self) -> audio.AudioWithStreamingResponse:
@cached_property
def moderations(self) -> moderations.ModerationsWithStreamingResponse:
+ """
+ Given text and/or image inputs, classifies if those inputs are potentially harmful.
+ """
from .resources.moderations import ModerationsWithStreamingResponse
return ModerationsWithStreamingResponse(self._client.moderations)
@cached_property
def models(self) -> models.ModelsWithStreamingResponse:
+ """List and describe the various models available in the API."""
from .resources.models import ModelsWithStreamingResponse
return ModelsWithStreamingResponse(self._client.models)
@@ -1099,12 +1211,14 @@ def beta(self) -> beta.BetaWithStreamingResponse:
@cached_property
def batches(self) -> batches.BatchesWithStreamingResponse:
+ """Create large batches of API requests to run asynchronously."""
from .resources.batches import BatchesWithStreamingResponse
return BatchesWithStreamingResponse(self._client.batches)
@cached_property
def uploads(self) -> uploads.UploadsWithStreamingResponse:
+ """Use Uploads to upload large files in multiple parts."""
from .resources.uploads import UploadsWithStreamingResponse
return UploadsWithStreamingResponse(self._client.uploads)
@@ -1123,12 +1237,14 @@ def realtime(self) -> realtime.RealtimeWithStreamingResponse:
@cached_property
def conversations(self) -> conversations.ConversationsWithStreamingResponse:
+ """Manage conversations and conversation items."""
from .resources.conversations import ConversationsWithStreamingResponse
return ConversationsWithStreamingResponse(self._client.conversations)
@cached_property
def evals(self) -> evals.EvalsWithStreamingResponse:
+ """Manage and run evals in the OpenAI platform."""
from .resources.evals import EvalsWithStreamingResponse
return EvalsWithStreamingResponse(self._client.evals)
@@ -1139,6 +1255,12 @@ def containers(self) -> containers.ContainersWithStreamingResponse:
return ContainersWithStreamingResponse(self._client.containers)
+ @cached_property
+ def skills(self) -> skills.SkillsWithStreamingResponse:
+ from .resources.skills import SkillsWithStreamingResponse
+
+ return SkillsWithStreamingResponse(self._client.skills)
+
@cached_property
def videos(self) -> videos.VideosWithStreamingResponse:
from .resources.videos import VideosWithStreamingResponse
@@ -1154,6 +1276,9 @@ def __init__(self, client: AsyncOpenAI) -> None:
@cached_property
def completions(self) -> completions.AsyncCompletionsWithStreamingResponse:
+ """
+ Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position.
+ """
from .resources.completions import AsyncCompletionsWithStreamingResponse
return AsyncCompletionsWithStreamingResponse(self._client.completions)
@@ -1166,18 +1291,25 @@ def chat(self) -> chat.AsyncChatWithStreamingResponse:
@cached_property
def embeddings(self) -> embeddings.AsyncEmbeddingsWithStreamingResponse:
+ """
+ Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms.
+ """
from .resources.embeddings import AsyncEmbeddingsWithStreamingResponse
return AsyncEmbeddingsWithStreamingResponse(self._client.embeddings)
@cached_property
def files(self) -> files.AsyncFilesWithStreamingResponse:
+ """
+ Files are used to upload documents that can be used with features like Assistants and Fine-tuning.
+ """
from .resources.files import AsyncFilesWithStreamingResponse
return AsyncFilesWithStreamingResponse(self._client.files)
@cached_property
def images(self) -> images.AsyncImagesWithStreamingResponse:
+ """Given a prompt and/or an input image, the model will generate a new image."""
from .resources.images import AsyncImagesWithStreamingResponse
return AsyncImagesWithStreamingResponse(self._client.images)
@@ -1190,12 +1322,16 @@ def audio(self) -> audio.AsyncAudioWithStreamingResponse:
@cached_property
def moderations(self) -> moderations.AsyncModerationsWithStreamingResponse:
+ """
+ Given text and/or image inputs, classifies if those inputs are potentially harmful.
+ """
from .resources.moderations import AsyncModerationsWithStreamingResponse
return AsyncModerationsWithStreamingResponse(self._client.moderations)
@cached_property
def models(self) -> models.AsyncModelsWithStreamingResponse:
+ """List and describe the various models available in the API."""
from .resources.models import AsyncModelsWithStreamingResponse
return AsyncModelsWithStreamingResponse(self._client.models)
@@ -1220,12 +1356,14 @@ def beta(self) -> beta.AsyncBetaWithStreamingResponse:
@cached_property
def batches(self) -> batches.AsyncBatchesWithStreamingResponse:
+ """Create large batches of API requests to run asynchronously."""
from .resources.batches import AsyncBatchesWithStreamingResponse
return AsyncBatchesWithStreamingResponse(self._client.batches)
@cached_property
def uploads(self) -> uploads.AsyncUploadsWithStreamingResponse:
+ """Use Uploads to upload large files in multiple parts."""
from .resources.uploads import AsyncUploadsWithStreamingResponse
return AsyncUploadsWithStreamingResponse(self._client.uploads)
@@ -1244,12 +1382,14 @@ def realtime(self) -> realtime.AsyncRealtimeWithStreamingResponse:
@cached_property
def conversations(self) -> conversations.AsyncConversationsWithStreamingResponse:
+ """Manage conversations and conversation items."""
from .resources.conversations import AsyncConversationsWithStreamingResponse
return AsyncConversationsWithStreamingResponse(self._client.conversations)
@cached_property
def evals(self) -> evals.AsyncEvalsWithStreamingResponse:
+ """Manage and run evals in the OpenAI platform."""
from .resources.evals import AsyncEvalsWithStreamingResponse
return AsyncEvalsWithStreamingResponse(self._client.evals)
@@ -1260,6 +1400,12 @@ def containers(self) -> containers.AsyncContainersWithStreamingResponse:
return AsyncContainersWithStreamingResponse(self._client.containers)
+ @cached_property
+ def skills(self) -> skills.AsyncSkillsWithStreamingResponse:
+ from .resources.skills import AsyncSkillsWithStreamingResponse
+
+ return AsyncSkillsWithStreamingResponse(self._client.skills)
+
@cached_property
def videos(self) -> videos.AsyncVideosWithStreamingResponse:
from .resources.videos import AsyncVideosWithStreamingResponse
diff --git a/portkey_ai/_vendor/openai/_compat.py b/portkey_ai/_vendor/openai/_compat.py
index 73a1f3ea..340c91a6 100644
--- a/portkey_ai/_vendor/openai/_compat.py
+++ b/portkey_ai/_vendor/openai/_compat.py
@@ -2,7 +2,7 @@
from typing import TYPE_CHECKING, Any, Union, Generic, TypeVar, Callable, cast, overload
from datetime import date, datetime
-from typing_extensions import Self, Literal
+from typing_extensions import Self, Literal, TypedDict
import pydantic
from pydantic.fields import FieldInfo
@@ -131,6 +131,10 @@ def model_json(model: pydantic.BaseModel, *, indent: int | None = None) -> str:
return model.model_dump_json(indent=indent)
+class _ModelDumpKwargs(TypedDict, total=False):
+ by_alias: bool
+
+
def model_dump(
model: pydantic.BaseModel,
*,
@@ -139,8 +143,12 @@ def model_dump(
exclude_defaults: bool = False,
warnings: bool = True,
mode: Literal["json", "python"] = "python",
+ by_alias: bool | None = None,
) -> dict[str, Any]:
if (not PYDANTIC_V1) or hasattr(model, "model_dump"):
+ kwargs: _ModelDumpKwargs = {}
+ if by_alias is not None:
+ kwargs["by_alias"] = by_alias
return model.model_dump(
mode=mode,
exclude=exclude,
@@ -148,13 +156,12 @@ def model_dump(
exclude_defaults=exclude_defaults,
# warnings are not supported in Pydantic v1
warnings=True if PYDANTIC_V1 else warnings,
+ **kwargs,
)
return cast(
"dict[str, Any]",
model.dict( # pyright: ignore[reportDeprecated, reportUnnecessaryCast]
- exclude=exclude,
- exclude_unset=exclude_unset,
- exclude_defaults=exclude_defaults,
+ exclude=exclude, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, by_alias=bool(by_alias)
),
)
diff --git a/portkey_ai/_vendor/openai/_legacy_response.py b/portkey_ai/_vendor/openai/_legacy_response.py
index cfabaa2f..1a58c2df 100644
--- a/portkey_ai/_vendor/openai/_legacy_response.py
+++ b/portkey_ai/_vendor/openai/_legacy_response.py
@@ -221,6 +221,7 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T:
),
response=self.http_response,
client=cast(Any, self._client),
+ options=self._options,
),
)
@@ -231,6 +232,7 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T:
cast_to=extract_stream_chunk_type(self._stream_cls),
response=self.http_response,
client=cast(Any, self._client),
+ options=self._options,
),
)
@@ -244,6 +246,7 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T:
cast_to=cast_to,
response=self.http_response,
client=cast(Any, self._client),
+ options=self._options,
),
)
diff --git a/portkey_ai/_vendor/openai/_models.py b/portkey_ai/_vendor/openai/_models.py
index 5cca20c6..810e49df 100644
--- a/portkey_ai/_vendor/openai/_models.py
+++ b/portkey_ai/_vendor/openai/_models.py
@@ -845,6 +845,7 @@ class FinalRequestOptionsInput(TypedDict, total=False):
json_data: Body
extra_json: AnyMapping
follow_redirects: bool
+ synthesize_event_and_data: bool
@final
@@ -859,6 +860,7 @@ class FinalRequestOptions(pydantic.BaseModel):
idempotency_key: Union[str, None] = None
post_parser: Union[Callable[[Any], Any], NotGiven] = NotGiven()
follow_redirects: Union[bool, None] = None
+ synthesize_event_and_data: Optional[bool] = None
content: Union[bytes, bytearray, IO[bytes], Iterable[bytes], AsyncIterable[bytes], None] = None
# It should be noted that we cannot use `json` here as that would override
diff --git a/portkey_ai/_vendor/openai/_module_client.py b/portkey_ai/_vendor/openai/_module_client.py
index d0d72188..98901c04 100644
--- a/portkey_ai/_vendor/openai/_module_client.py
+++ b/portkey_ai/_vendor/openai/_module_client.py
@@ -11,7 +11,6 @@
from .resources.models import Models
from .resources.videos import Videos
from .resources.batches import Batches
- from .resources.webhooks import Webhooks
from .resources.beta.beta import Beta
from .resources.chat.chat import Chat
from .resources.embeddings import Embeddings
@@ -19,8 +18,10 @@
from .resources.completions import Completions
from .resources.evals.evals import Evals
from .resources.moderations import Moderations
+ from .resources.skills.skills import Skills
from .resources.uploads.uploads import Uploads
from .resources.realtime.realtime import Realtime
+ from .resources.webhooks.webhooks import Webhooks
from .resources.responses.responses import Responses
from .resources.containers.containers import Containers
from .resources.fine_tuning.fine_tuning import FineTuning
@@ -73,6 +74,12 @@ def __load__(self) -> Models:
return _load_client().models
+class SkillsProxy(LazyProxy["Skills"]):
+ @override
+ def __load__(self) -> Skills:
+ return _load_client().skills
+
+
class VideosProxy(LazyProxy["Videos"]):
@override
def __load__(self) -> Videos:
@@ -158,6 +165,7 @@ def __load__(self) -> Conversations:
evals: Evals = EvalsProxy().__as_proxied__()
images: Images = ImagesProxy().__as_proxied__()
models: Models = ModelsProxy().__as_proxied__()
+skills: Skills = SkillsProxy().__as_proxied__()
videos: Videos = VideosProxy().__as_proxied__()
batches: Batches = BatchesProxy().__as_proxied__()
uploads: Uploads = UploadsProxy().__as_proxied__()
diff --git a/portkey_ai/_vendor/openai/_response.py b/portkey_ai/_vendor/openai/_response.py
index 350da38d..f286d38e 100644
--- a/portkey_ai/_vendor/openai/_response.py
+++ b/portkey_ai/_vendor/openai/_response.py
@@ -152,6 +152,7 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T:
),
response=self.http_response,
client=cast(Any, self._client),
+ options=self._options,
),
)
@@ -162,6 +163,7 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T:
cast_to=extract_stream_chunk_type(self._stream_cls),
response=self.http_response,
client=cast(Any, self._client),
+ options=self._options,
),
)
@@ -175,6 +177,7 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T:
cast_to=cast_to,
response=self.http_response,
client=cast(Any, self._client),
+ options=self._options,
),
)
diff --git a/portkey_ai/_vendor/openai/_streaming.py b/portkey_ai/_vendor/openai/_streaming.py
index 61a74266..45c13cc1 100644
--- a/portkey_ai/_vendor/openai/_streaming.py
+++ b/portkey_ai/_vendor/openai/_streaming.py
@@ -4,7 +4,7 @@
import json
import inspect
from types import TracebackType
-from typing import TYPE_CHECKING, Any, Generic, TypeVar, Iterator, AsyncIterator, cast
+from typing import TYPE_CHECKING, Any, Generic, TypeVar, Iterator, Optional, AsyncIterator, cast
from typing_extensions import Self, Protocol, TypeGuard, override, get_origin, runtime_checkable
import httpx
@@ -14,6 +14,7 @@
if TYPE_CHECKING:
from ._client import OpenAI, AsyncOpenAI
+ from ._models import FinalRequestOptions
_T = TypeVar("_T")
@@ -23,7 +24,7 @@ class Stream(Generic[_T]):
"""Provides the core interface to iterate over a synchronous stream response."""
response: httpx.Response
-
+ _options: Optional[FinalRequestOptions] = None
_decoder: SSEBytesDecoder
def __init__(
@@ -32,10 +33,12 @@ def __init__(
cast_to: type[_T],
response: httpx.Response,
client: OpenAI,
+ options: Optional[FinalRequestOptions] = None,
) -> None:
self.response = response
self._cast_to = cast_to
self._client = client
+ self._options = options
self._decoder = client._make_sse_decoder()
self._iterator = self.__stream__()
@@ -95,8 +98,13 @@ def __stream__(self) -> Iterator[_T]:
body=data["error"],
)
- yield process_data(data=data, cast_to=cast_to, response=response)
-
+ yield process_data(
+ data={"data": data, "event": sse.event}
+ if self._options is not None and self._options.synthesize_event_and_data
+ else data,
+ cast_to=cast_to,
+ response=response,
+ )
finally:
# Ensure the response is closed even if the consumer doesn't read all data
response.close()
@@ -125,7 +133,7 @@ class AsyncStream(Generic[_T]):
"""Provides the core interface to iterate over an asynchronous stream response."""
response: httpx.Response
-
+ _options: Optional[FinalRequestOptions] = None
_decoder: SSEDecoder | SSEBytesDecoder
def __init__(
@@ -134,10 +142,12 @@ def __init__(
cast_to: type[_T],
response: httpx.Response,
client: AsyncOpenAI,
+ options: Optional[FinalRequestOptions] = None,
) -> None:
self.response = response
self._cast_to = cast_to
self._client = client
+ self._options = options
self._decoder = client._make_sse_decoder()
self._iterator = self.__stream__()
@@ -198,8 +208,13 @@ async def __stream__(self) -> AsyncIterator[_T]:
body=data["error"],
)
- yield process_data(data=data, cast_to=cast_to, response=response)
-
+ yield process_data(
+ data={"data": data, "event": sse.event}
+ if self._options is not None and self._options.synthesize_event_and_data
+ else data,
+ cast_to=cast_to,
+ response=response,
+ )
finally:
# Ensure the response is closed even if the consumer doesn't read all data
await response.aclose()
diff --git a/portkey_ai/_vendor/openai/_types.py b/portkey_ai/_vendor/openai/_types.py
index 42f9df23..c55c6f80 100644
--- a/portkey_ai/_vendor/openai/_types.py
+++ b/portkey_ai/_vendor/openai/_types.py
@@ -122,6 +122,7 @@ class RequestOptions(TypedDict, total=False):
extra_json: AnyMapping
idempotency_key: str
follow_redirects: bool
+ synthesize_event_and_data: bool
# Sentinel class used until PEP 0661 is accepted
diff --git a/portkey_ai/_vendor/openai/_utils/__init__.py b/portkey_ai/_vendor/openai/_utils/__init__.py
index 963c83b6..52853aaf 100644
--- a/portkey_ai/_vendor/openai/_utils/__init__.py
+++ b/portkey_ai/_vendor/openai/_utils/__init__.py
@@ -1,4 +1,5 @@
from ._logs import SensitiveHeadersFilter as SensitiveHeadersFilter
+from ._path import path_template as path_template
from ._sync import asyncify as asyncify
from ._proxy import LazyProxy as LazyProxy
from ._utils import (
diff --git a/portkey_ai/_vendor/openai/_utils/_compat.py b/portkey_ai/_vendor/openai/_utils/_compat.py
index dd703233..2c70b299 100644
--- a/portkey_ai/_vendor/openai/_utils/_compat.py
+++ b/portkey_ai/_vendor/openai/_utils/_compat.py
@@ -26,7 +26,7 @@ def is_union(tp: Optional[Type[Any]]) -> bool:
else:
import types
- return tp is Union or tp is types.UnionType
+ return tp is Union or tp is types.UnionType # type: ignore[comparison-overlap]
def is_typeddict(tp: Type[Any]) -> bool:
diff --git a/portkey_ai/_vendor/openai/_utils/_json.py b/portkey_ai/_vendor/openai/_utils/_json.py
new file mode 100644
index 00000000..60584214
--- /dev/null
+++ b/portkey_ai/_vendor/openai/_utils/_json.py
@@ -0,0 +1,35 @@
+import json
+from typing import Any
+from datetime import datetime
+from typing_extensions import override
+
+import pydantic
+
+from .._compat import model_dump
+
+
+def openapi_dumps(obj: Any) -> bytes:
+ """
+ Serialize an object to UTF-8 encoded JSON bytes.
+
+ Extends the standard json.dumps with support for additional types
+ commonly used in the SDK, such as `datetime`, `pydantic.BaseModel`, etc.
+ """
+ return json.dumps(
+ obj,
+ cls=_CustomEncoder,
+ # Uses the same defaults as httpx's JSON serialization
+ ensure_ascii=False,
+ separators=(",", ":"),
+ allow_nan=False,
+ ).encode()
+
+
+class _CustomEncoder(json.JSONEncoder):
+ @override
+ def default(self, o: Any) -> Any:
+ if isinstance(o, datetime):
+ return o.isoformat()
+ if isinstance(o, pydantic.BaseModel):
+ return model_dump(o, exclude_unset=True, mode="json", by_alias=True)
+ return super().default(o)
diff --git a/portkey_ai/_vendor/openai/_utils/_path.py b/portkey_ai/_vendor/openai/_utils/_path.py
new file mode 100644
index 00000000..4d6e1e4c
--- /dev/null
+++ b/portkey_ai/_vendor/openai/_utils/_path.py
@@ -0,0 +1,127 @@
+from __future__ import annotations
+
+import re
+from typing import (
+ Any,
+ Mapping,
+ Callable,
+)
+from urllib.parse import quote
+
+# Matches '.' or '..' where each dot is either literal or percent-encoded (%2e / %2E).
+_DOT_SEGMENT_RE = re.compile(r"^(?:\.|%2[eE]){1,2}$")
+
+_PLACEHOLDER_RE = re.compile(r"\{(\w+)\}")
+
+
+def _quote_path_segment_part(value: str) -> str:
+ """Percent-encode `value` for use in a URI path segment.
+
+ Considers characters not in `pchar` set from RFC 3986 §3.3 to be unsafe.
+ https://datatracker.ietf.org/doc/html/rfc3986#section-3.3
+ """
+ # quote() already treats unreserved characters (letters, digits, and -._~)
+ # as safe, so we only need to add sub-delims, ':', and '@'.
+ # Notably, unlike the default `safe` for quote(), / is unsafe and must be quoted.
+ return quote(value, safe="!$&'()*+,;=:@")
+
+
+def _quote_query_part(value: str) -> str:
+ """Percent-encode `value` for use in a URI query string.
+
+ Considers &, = and characters not in `query` set from RFC 3986 §3.4 to be unsafe.
+ https://datatracker.ietf.org/doc/html/rfc3986#section-3.4
+ """
+ return quote(value, safe="!$'()*+,;:@/?")
+
+
+def _quote_fragment_part(value: str) -> str:
+ """Percent-encode `value` for use in a URI fragment.
+
+ Considers characters not in `fragment` set from RFC 3986 §3.5 to be unsafe.
+ https://datatracker.ietf.org/doc/html/rfc3986#section-3.5
+ """
+ return quote(value, safe="!$&'()*+,;=:@/?")
+
+
+def _interpolate(
+ template: str,
+ values: Mapping[str, Any],
+ quoter: Callable[[str], str],
+) -> str:
+ """Replace {name} placeholders in `template`, quoting each value with `quoter`.
+
+ Placeholder names are looked up in `values`.
+
+ Raises:
+ KeyError: If a placeholder is not found in `values`.
+ """
+ # re.split with a capturing group returns alternating
+ # [text, name, text, name, ..., text] elements.
+ parts = _PLACEHOLDER_RE.split(template)
+
+ for i in range(1, len(parts), 2):
+ name = parts[i]
+ if name not in values:
+ raise KeyError(f"a value for placeholder {{{name}}} was not provided")
+ val = values[name]
+ if val is None:
+ parts[i] = "null"
+ elif isinstance(val, bool):
+ parts[i] = "true" if val else "false"
+ else:
+ parts[i] = quoter(str(values[name]))
+
+ return "".join(parts)
+
+
+def path_template(template: str, /, **kwargs: Any) -> str:
+ """Interpolate {name} placeholders in `template` from keyword arguments.
+
+ Args:
+ template: The template string containing {name} placeholders.
+ **kwargs: Keyword arguments to interpolate into the template.
+
+ Returns:
+ The template with placeholders interpolated and percent-encoded.
+
+ Safe characters for percent-encoding are dependent on the URI component.
+ Placeholders in path and fragment portions are percent-encoded where the `segment`
+ and `fragment` sets from RFC 3986 respectively are considered safe.
+ Placeholders in the query portion are percent-encoded where the `query` set from
+ RFC 3986 §3.3 is considered safe except for = and & characters.
+
+ Raises:
+ KeyError: If a placeholder is not found in `kwargs`.
+ ValueError: If resulting path contains /./ or /../ segments (including percent-encoded dot-segments).
+ """
+ # Split the template into path, query, and fragment portions.
+ fragment_template: str | None = None
+ query_template: str | None = None
+
+ rest = template
+ if "#" in rest:
+ rest, fragment_template = rest.split("#", 1)
+ if "?" in rest:
+ rest, query_template = rest.split("?", 1)
+ path_template = rest
+
+ # Interpolate each portion with the appropriate quoting rules.
+ path_result = _interpolate(path_template, kwargs, _quote_path_segment_part)
+
+ # Reject dot-segments (. and ..) in the final assembled path. The check
+ # runs after interpolation so that adjacent placeholders or a mix of static
+ # text and placeholders that together form a dot-segment are caught.
+ # Also reject percent-encoded dot-segments to protect against incorrectly
+ # implemented normalization in servers/proxies.
+ for segment in path_result.split("/"):
+ if _DOT_SEGMENT_RE.match(segment):
+ raise ValueError(f"Constructed path {path_result!r} contains dot-segment {segment!r} which is not allowed")
+
+ result = path_result
+ if query_template is not None:
+ result += "?" + _interpolate(query_template, kwargs, _quote_query_part)
+ if fragment_template is not None:
+ result += "#" + _interpolate(fragment_template, kwargs, _quote_fragment_part)
+
+ return result
diff --git a/portkey_ai/_vendor/openai/_version.py b/portkey_ai/_vendor/openai/_version.py
index eb61bdd2..788e82e0 100644
--- a/portkey_ai/_vendor/openai/_version.py
+++ b/portkey_ai/_vendor/openai/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "openai"
-__version__ = "2.16.0" # x-release-please-version
+__version__ = "2.30.0" # x-release-please-version
diff --git a/portkey_ai/_vendor/openai/lib/_parsing/__init__.py b/portkey_ai/_vendor/openai/lib/_parsing/__init__.py
index 4d454c3a..08591f43 100644
--- a/portkey_ai/_vendor/openai/lib/_parsing/__init__.py
+++ b/portkey_ai/_vendor/openai/lib/_parsing/__init__.py
@@ -6,7 +6,6 @@
validate_input_tools as validate_input_tools,
parse_chat_completion as parse_chat_completion,
get_input_tool_by_name as get_input_tool_by_name,
- solve_response_format_t as solve_response_format_t,
parse_function_tool_arguments as parse_function_tool_arguments,
type_to_response_format_param as type_to_response_format_param,
)
diff --git a/portkey_ai/_vendor/openai/lib/_parsing/_completions.py b/portkey_ai/_vendor/openai/lib/_parsing/_completions.py
index 7903732a..7a1bded1 100644
--- a/portkey_ai/_vendor/openai/lib/_parsing/_completions.py
+++ b/portkey_ai/_vendor/openai/lib/_parsing/_completions.py
@@ -138,7 +138,7 @@ def parse_chat_completion(
choices.append(
construct_type_unchecked(
- type_=cast(Any, ParsedChoice)[solve_response_format_t(response_format)],
+ type_=ParsedChoice[ResponseFormatT],
value={
**choice.to_dict(),
"message": {
@@ -153,15 +153,12 @@ def parse_chat_completion(
)
)
- return cast(
- ParsedChatCompletion[ResponseFormatT],
- construct_type_unchecked(
- type_=cast(Any, ParsedChatCompletion)[solve_response_format_t(response_format)],
- value={
- **chat_completion.to_dict(),
- "choices": choices,
- },
- ),
+ return construct_type_unchecked(
+ type_=ParsedChatCompletion[ResponseFormatT],
+ value={
+ **chat_completion.to_dict(),
+ "choices": choices,
+ },
)
@@ -201,20 +198,6 @@ def maybe_parse_content(
return None
-def solve_response_format_t(
- response_format: type[ResponseFormatT] | ResponseFormatParam | Omit,
-) -> type[ResponseFormatT]:
- """Return the runtime type for the given response format.
-
- If no response format is given, or if we won't auto-parse the response format
- then we default to `None`.
- """
- if has_rich_response_format(response_format):
- return response_format
-
- return cast("type[ResponseFormatT]", _default_response_format)
-
-
def has_parseable_input(
*,
response_format: type | ResponseFormatParam | Omit,
diff --git a/portkey_ai/_vendor/openai/lib/_parsing/_responses.py b/portkey_ai/_vendor/openai/lib/_parsing/_responses.py
index 4bed171d..8853a074 100644
--- a/portkey_ai/_vendor/openai/lib/_parsing/_responses.py
+++ b/portkey_ai/_vendor/openai/lib/_parsing/_responses.py
@@ -1,7 +1,7 @@
from __future__ import annotations
import json
-from typing import TYPE_CHECKING, Any, List, Iterable, cast
+from typing import TYPE_CHECKING, List, Iterable, cast
from typing_extensions import TypeVar, assert_never
import pydantic
@@ -12,7 +12,7 @@
from ..._compat import PYDANTIC_V1, model_parse_json
from ..._models import construct_type_unchecked
from .._pydantic import is_basemodel_type, is_dataclass_like_type
-from ._completions import solve_response_format_t, type_to_response_format_param
+from ._completions import type_to_response_format_param
from ...types.responses import (
Response,
ToolParam,
@@ -56,7 +56,6 @@ def parse_response(
input_tools: Iterable[ToolParam] | Omit | None,
response: Response | ParsedResponse[object],
) -> ParsedResponse[TextFormatT]:
- solved_t = solve_response_format_t(text_format)
output_list: List[ParsedResponseOutputItem[TextFormatT]] = []
for output in response.output:
@@ -69,7 +68,7 @@ def parse_response(
content_list.append(
construct_type_unchecked(
- type_=cast(Any, ParsedResponseOutputText)[solved_t],
+ type_=ParsedResponseOutputText[TextFormatT],
value={
**item.to_dict(),
"parsed": parse_text(item.text, text_format=text_format),
@@ -79,7 +78,7 @@ def parse_response(
output_list.append(
construct_type_unchecked(
- type_=cast(Any, ParsedResponseOutputMessage)[solved_t],
+ type_=ParsedResponseOutputMessage[TextFormatT],
value={
**output.to_dict(),
"content": content_list,
@@ -102,13 +101,17 @@ def parse_response(
output.type == "computer_call"
or output.type == "file_search_call"
or output.type == "web_search_call"
+ or output.type == "tool_search_call"
+ or output.type == "tool_search_output"
or output.type == "reasoning"
or output.type == "compaction"
or output.type == "mcp_call"
or output.type == "mcp_approval_request"
+ or output.type == "mcp_approval_response"
or output.type == "image_generation_call"
or output.type == "code_interpreter_call"
or output.type == "local_shell_call"
+ or output.type == "local_shell_call_output"
or output.type == "shell_call"
or output.type == "shell_call_output"
or output.type == "apply_patch_call"
@@ -116,6 +119,9 @@ def parse_response(
or output.type == "mcp_list_tools"
or output.type == "exec"
or output.type == "custom_tool_call"
+ or output.type == "function_call_output"
+ or output.type == "computer_call_output"
+ or output.type == "custom_tool_call_output"
):
output_list.append(output)
elif TYPE_CHECKING: # type: ignore
@@ -123,15 +129,12 @@ def parse_response(
else:
output_list.append(output)
- return cast(
- ParsedResponse[TextFormatT],
- construct_type_unchecked(
- type_=cast(Any, ParsedResponse)[solved_t],
- value={
- **response.to_dict(),
- "output": output_list,
- },
- ),
+ return construct_type_unchecked(
+ type_=ParsedResponse[TextFormatT],
+ value={
+ **response.to_dict(),
+ "output": output_list,
+ },
)
diff --git a/portkey_ai/_vendor/openai/lib/streaming/chat/_completions.py b/portkey_ai/_vendor/openai/lib/streaming/chat/_completions.py
index c4610e21..5f072caf 100644
--- a/portkey_ai/_vendor/openai/lib/streaming/chat/_completions.py
+++ b/portkey_ai/_vendor/openai/lib/streaming/chat/_completions.py
@@ -33,7 +33,6 @@
maybe_parse_content,
parse_chat_completion,
get_input_tool_by_name,
- solve_response_format_t,
parse_function_tool_arguments,
)
from ...._streaming import Stream, AsyncStream
@@ -663,7 +662,7 @@ def _content_done_events(
# type variable, e.g. `ContentDoneEvent[MyModelType]`
cast( # pyright: ignore[reportUnnecessaryCast]
"type[ContentDoneEvent[ResponseFormatT]]",
- cast(Any, ContentDoneEvent)[solve_response_format_t(response_format)],
+ cast(Any, ContentDoneEvent),
),
type="content.done",
content=choice_snapshot.message.content,
diff --git a/portkey_ai/_vendor/openai/resources/__init__.py b/portkey_ai/_vendor/openai/resources/__init__.py
index b793fbc7..ed030f71 100644
--- a/portkey_ai/_vendor/openai/resources/__init__.py
+++ b/portkey_ai/_vendor/openai/resources/__init__.py
@@ -56,6 +56,14 @@
ModelsWithStreamingResponse,
AsyncModelsWithStreamingResponse,
)
+from .skills import (
+ Skills,
+ AsyncSkills,
+ SkillsWithRawResponse,
+ AsyncSkillsWithRawResponse,
+ SkillsWithStreamingResponse,
+ AsyncSkillsWithStreamingResponse,
+)
from .videos import (
Videos,
AsyncVideos,
@@ -220,6 +228,12 @@
"AsyncContainersWithRawResponse",
"ContainersWithStreamingResponse",
"AsyncContainersWithStreamingResponse",
+ "Skills",
+ "AsyncSkills",
+ "SkillsWithRawResponse",
+ "AsyncSkillsWithRawResponse",
+ "SkillsWithStreamingResponse",
+ "AsyncSkillsWithStreamingResponse",
"Videos",
"AsyncVideos",
"VideosWithRawResponse",
diff --git a/portkey_ai/_vendor/openai/resources/audio/audio.py b/portkey_ai/_vendor/openai/resources/audio/audio.py
index 383b7073..040a058d 100644
--- a/portkey_ai/_vendor/openai/resources/audio/audio.py
+++ b/portkey_ai/_vendor/openai/resources/audio/audio.py
@@ -35,14 +35,17 @@
class Audio(SyncAPIResource):
@cached_property
def transcriptions(self) -> Transcriptions:
+ """Turn audio into text or text into audio."""
return Transcriptions(self._client)
@cached_property
def translations(self) -> Translations:
+ """Turn audio into text or text into audio."""
return Translations(self._client)
@cached_property
def speech(self) -> Speech:
+ """Turn audio into text or text into audio."""
return Speech(self._client)
@cached_property
@@ -68,14 +71,17 @@ def with_streaming_response(self) -> AudioWithStreamingResponse:
class AsyncAudio(AsyncAPIResource):
@cached_property
def transcriptions(self) -> AsyncTranscriptions:
+ """Turn audio into text or text into audio."""
return AsyncTranscriptions(self._client)
@cached_property
def translations(self) -> AsyncTranslations:
+ """Turn audio into text or text into audio."""
return AsyncTranslations(self._client)
@cached_property
def speech(self) -> AsyncSpeech:
+ """Turn audio into text or text into audio."""
return AsyncSpeech(self._client)
@cached_property
@@ -104,14 +110,17 @@ def __init__(self, audio: Audio) -> None:
@cached_property
def transcriptions(self) -> TranscriptionsWithRawResponse:
+ """Turn audio into text or text into audio."""
return TranscriptionsWithRawResponse(self._audio.transcriptions)
@cached_property
def translations(self) -> TranslationsWithRawResponse:
+ """Turn audio into text or text into audio."""
return TranslationsWithRawResponse(self._audio.translations)
@cached_property
def speech(self) -> SpeechWithRawResponse:
+ """Turn audio into text or text into audio."""
return SpeechWithRawResponse(self._audio.speech)
@@ -121,14 +130,17 @@ def __init__(self, audio: AsyncAudio) -> None:
@cached_property
def transcriptions(self) -> AsyncTranscriptionsWithRawResponse:
+ """Turn audio into text or text into audio."""
return AsyncTranscriptionsWithRawResponse(self._audio.transcriptions)
@cached_property
def translations(self) -> AsyncTranslationsWithRawResponse:
+ """Turn audio into text or text into audio."""
return AsyncTranslationsWithRawResponse(self._audio.translations)
@cached_property
def speech(self) -> AsyncSpeechWithRawResponse:
+ """Turn audio into text or text into audio."""
return AsyncSpeechWithRawResponse(self._audio.speech)
@@ -138,14 +150,17 @@ def __init__(self, audio: Audio) -> None:
@cached_property
def transcriptions(self) -> TranscriptionsWithStreamingResponse:
+ """Turn audio into text or text into audio."""
return TranscriptionsWithStreamingResponse(self._audio.transcriptions)
@cached_property
def translations(self) -> TranslationsWithStreamingResponse:
+ """Turn audio into text or text into audio."""
return TranslationsWithStreamingResponse(self._audio.translations)
@cached_property
def speech(self) -> SpeechWithStreamingResponse:
+ """Turn audio into text or text into audio."""
return SpeechWithStreamingResponse(self._audio.speech)
@@ -155,12 +170,15 @@ def __init__(self, audio: AsyncAudio) -> None:
@cached_property
def transcriptions(self) -> AsyncTranscriptionsWithStreamingResponse:
+ """Turn audio into text or text into audio."""
return AsyncTranscriptionsWithStreamingResponse(self._audio.transcriptions)
@cached_property
def translations(self) -> AsyncTranslationsWithStreamingResponse:
+ """Turn audio into text or text into audio."""
return AsyncTranslationsWithStreamingResponse(self._audio.translations)
@cached_property
def speech(self) -> AsyncSpeechWithStreamingResponse:
+ """Turn audio into text or text into audio."""
return AsyncSpeechWithStreamingResponse(self._audio.speech)
diff --git a/portkey_ai/_vendor/openai/resources/audio/speech.py b/portkey_ai/_vendor/openai/resources/audio/speech.py
index f2c8d635..80dbb440 100644
--- a/portkey_ai/_vendor/openai/resources/audio/speech.py
+++ b/portkey_ai/_vendor/openai/resources/audio/speech.py
@@ -26,6 +26,8 @@
class Speech(SyncAPIResource):
+ """Turn audio into text or text into audio."""
+
@cached_property
def with_raw_response(self) -> SpeechWithRawResponse:
"""
@@ -50,9 +52,7 @@ def create(
*,
input: str,
model: Union[str, SpeechModel],
- voice: Union[
- str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"]
- ],
+ voice: speech_create_params.Voice,
instructions: str | Omit = omit,
response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | Omit = omit,
speed: float | Omit = omit,
@@ -67,6 +67,8 @@ def create(
"""
Generates audio from the input text.
+ Returns the audio file content, or a stream of audio events.
+
Args:
input: The text to generate audio for. The maximum length is 4096 characters.
@@ -76,8 +78,9 @@ def create(
voice: The voice to use when generating the audio. Supported built-in voices are
`alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`,
- `shimmer`, `verse`, `marin`, and `cedar`. Previews of the voices are available
- in the
+ `shimmer`, `verse`, `marin`, and `cedar`. You may also provide a custom voice
+ object with an `id`, for example `{ "id": "voice_1234" }`. Previews of the
+ voices are available in the
[Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options).
instructions: Control the voice of your generated audio with additional instructions. Does not
@@ -123,6 +126,8 @@ def create(
class AsyncSpeech(AsyncAPIResource):
+ """Turn audio into text or text into audio."""
+
@cached_property
def with_raw_response(self) -> AsyncSpeechWithRawResponse:
"""
@@ -147,9 +152,7 @@ async def create(
*,
input: str,
model: Union[str, SpeechModel],
- voice: Union[
- str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"]
- ],
+ voice: speech_create_params.Voice,
instructions: str | Omit = omit,
response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | Omit = omit,
speed: float | Omit = omit,
@@ -164,6 +167,8 @@ async def create(
"""
Generates audio from the input text.
+ Returns the audio file content, or a stream of audio events.
+
Args:
input: The text to generate audio for. The maximum length is 4096 characters.
@@ -173,8 +178,9 @@ async def create(
voice: The voice to use when generating the audio. Supported built-in voices are
`alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`,
- `shimmer`, `verse`, `marin`, and `cedar`. Previews of the voices are available
- in the
+ `shimmer`, `verse`, `marin`, and `cedar`. You may also provide a custom voice
+ object with an `id`, for example `{ "id": "voice_1234" }`. Previews of the
+ voices are available in the
[Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options).
instructions: Control the voice of your generated audio with additional instructions. Does not
diff --git a/portkey_ai/_vendor/openai/resources/audio/transcriptions.py b/portkey_ai/_vendor/openai/resources/audio/transcriptions.py
index 59953485..25e6e0cb 100644
--- a/portkey_ai/_vendor/openai/resources/audio/transcriptions.py
+++ b/portkey_ai/_vendor/openai/resources/audio/transcriptions.py
@@ -42,6 +42,8 @@
class Transcriptions(SyncAPIResource):
+ """Turn audio into text or text into audio."""
+
@cached_property
def with_raw_response(self) -> TranscriptionsWithRawResponse:
"""
@@ -85,6 +87,9 @@ def create(
"""
Transcribes audio into the input language.
+ Returns a transcription object in `json`, `diarized_json`, or `verbose_json`
+ format, or a stream of transcript events.
+
Args:
file:
The audio file object (not file name) to transcribe, in one of these formats:
@@ -235,6 +240,9 @@ def create(
"""
Transcribes audio into the input language.
+ Returns a transcription object in `json`, `diarized_json`, or `verbose_json`
+ format, or a stream of transcript events.
+
Args:
file:
The audio file object (not file name) to transcribe, in one of these formats:
@@ -343,6 +351,9 @@ def create(
"""
Transcribes audio into the input language.
+ Returns a transcription object in `json`, `diarized_json`, or `verbose_json`
+ format, or a stream of transcript events.
+
Args:
file:
The audio file object (not file name) to transcribe, in one of these formats:
@@ -488,6 +499,8 @@ def create(
class AsyncTranscriptions(AsyncAPIResource):
+ """Turn audio into text or text into audio."""
+
@cached_property
def with_raw_response(self) -> AsyncTranscriptionsWithRawResponse:
"""
@@ -533,6 +546,9 @@ async def create(
"""
Transcribes audio into the input language.
+ Returns a transcription object in `json`, `diarized_json`, or `verbose_json`
+ format, or a stream of transcript events.
+
Args:
file:
The audio file object (not file name) to transcribe, in one of these formats:
@@ -678,6 +694,9 @@ async def create(
"""
Transcribes audio into the input language.
+ Returns a transcription object in `json`, `diarized_json`, or `verbose_json`
+ format, or a stream of transcript events.
+
Args:
file:
The audio file object (not file name) to transcribe, in one of these formats:
@@ -786,6 +805,9 @@ async def create(
"""
Transcribes audio into the input language.
+ Returns a transcription object in `json`, `diarized_json`, or `verbose_json`
+ format, or a stream of transcript events.
+
Args:
file:
The audio file object (not file name) to transcribe, in one of these formats:
diff --git a/portkey_ai/_vendor/openai/resources/audio/translations.py b/portkey_ai/_vendor/openai/resources/audio/translations.py
index 310f901f..0751a655 100644
--- a/portkey_ai/_vendor/openai/resources/audio/translations.py
+++ b/portkey_ai/_vendor/openai/resources/audio/translations.py
@@ -27,6 +27,8 @@
class Translations(SyncAPIResource):
+ """Turn audio into text or text into audio."""
+
@cached_property
def with_raw_response(self) -> TranslationsWithRawResponse:
"""
@@ -170,6 +172,8 @@ def create(
class AsyncTranslations(AsyncAPIResource):
+ """Turn audio into text or text into audio."""
+
@cached_property
def with_raw_response(self) -> AsyncTranslationsWithRawResponse:
"""
diff --git a/portkey_ai/_vendor/openai/resources/batches.py b/portkey_ai/_vendor/openai/resources/batches.py
index 80400839..6cdb50c2 100644
--- a/portkey_ai/_vendor/openai/resources/batches.py
+++ b/portkey_ai/_vendor/openai/resources/batches.py
@@ -10,7 +10,7 @@
from .. import _legacy_response
from ..types import batch_list_params, batch_create_params
from .._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
-from .._utils import maybe_transform, async_maybe_transform
+from .._utils import path_template, maybe_transform, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
@@ -23,6 +23,8 @@
class Batches(SyncAPIResource):
+ """Create large batches of API requests to run asynchronously."""
+
@cached_property
def with_raw_response(self) -> BatchesWithRawResponse:
"""
@@ -47,7 +49,14 @@ def create(
*,
completion_window: Literal["24h"],
endpoint: Literal[
- "/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions", "/v1/moderations"
+ "/v1/responses",
+ "/v1/chat/completions",
+ "/v1/embeddings",
+ "/v1/completions",
+ "/v1/moderations",
+ "/v1/images/generations",
+ "/v1/images/edits",
+ "/v1/videos",
],
input_file_id: str,
metadata: Optional[Metadata] | Omit = omit,
@@ -68,7 +77,8 @@ def create(
endpoint: The endpoint to be used for all requests in the batch. Currently
`/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, `/v1/completions`,
- and `/v1/moderations` are supported. Note that `/v1/embeddings` batches are also
+ `/v1/moderations`, `/v1/images/generations`, `/v1/images/edits`, and
+ `/v1/videos` are supported. Note that `/v1/embeddings` batches are also
restricted to a maximum of 50,000 embedding inputs across all requests in the
batch.
@@ -144,7 +154,7 @@ def retrieve(
if not batch_id:
raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
return self._get(
- f"/batches/{batch_id}",
+ path_template("/batches/{batch_id}", batch_id=batch_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -232,7 +242,7 @@ def cancel(
if not batch_id:
raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
return self._post(
- f"/batches/{batch_id}/cancel",
+ path_template("/batches/{batch_id}/cancel", batch_id=batch_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -241,6 +251,8 @@ def cancel(
class AsyncBatches(AsyncAPIResource):
+ """Create large batches of API requests to run asynchronously."""
+
@cached_property
def with_raw_response(self) -> AsyncBatchesWithRawResponse:
"""
@@ -265,7 +277,14 @@ async def create(
*,
completion_window: Literal["24h"],
endpoint: Literal[
- "/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions", "/v1/moderations"
+ "/v1/responses",
+ "/v1/chat/completions",
+ "/v1/embeddings",
+ "/v1/completions",
+ "/v1/moderations",
+ "/v1/images/generations",
+ "/v1/images/edits",
+ "/v1/videos",
],
input_file_id: str,
metadata: Optional[Metadata] | Omit = omit,
@@ -286,7 +305,8 @@ async def create(
endpoint: The endpoint to be used for all requests in the batch. Currently
`/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, `/v1/completions`,
- and `/v1/moderations` are supported. Note that `/v1/embeddings` batches are also
+ `/v1/moderations`, `/v1/images/generations`, `/v1/images/edits`, and
+ `/v1/videos` are supported. Note that `/v1/embeddings` batches are also
restricted to a maximum of 50,000 embedding inputs across all requests in the
batch.
@@ -362,7 +382,7 @@ async def retrieve(
if not batch_id:
raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
return await self._get(
- f"/batches/{batch_id}",
+ path_template("/batches/{batch_id}", batch_id=batch_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -450,7 +470,7 @@ async def cancel(
if not batch_id:
raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
return await self._post(
- f"/batches/{batch_id}/cancel",
+ path_template("/batches/{batch_id}/cancel", batch_id=batch_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
diff --git a/portkey_ai/_vendor/openai/resources/beta/assistants.py b/portkey_ai/_vendor/openai/resources/beta/assistants.py
index 8c697000..7ea8a918 100644
--- a/portkey_ai/_vendor/openai/resources/beta/assistants.py
+++ b/portkey_ai/_vendor/openai/resources/beta/assistants.py
@@ -10,7 +10,7 @@
from ... import _legacy_response
from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
-from ..._utils import maybe_transform, async_maybe_transform
+from ..._utils import path_template, maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
@@ -33,6 +33,8 @@
class Assistants(SyncAPIResource):
+ """Build Assistants that can call models and use tools."""
+
@cached_property
def with_raw_response(self) -> AssistantsWithRawResponse:
"""
@@ -213,7 +215,7 @@ def retrieve(
raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get(
- f"/assistants/{assistant_id}",
+ path_template("/assistants/{assistant_id}", assistant_id=assistant_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -381,7 +383,7 @@ def update(
raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
- f"/assistants/{assistant_id}",
+ path_template("/assistants/{assistant_id}", assistant_id=assistant_id),
body=maybe_transform(
{
"description": description,
@@ -498,7 +500,7 @@ def delete(
raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._delete(
- f"/assistants/{assistant_id}",
+ path_template("/assistants/{assistant_id}", assistant_id=assistant_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -507,6 +509,8 @@ def delete(
class AsyncAssistants(AsyncAPIResource):
+ """Build Assistants that can call models and use tools."""
+
@cached_property
def with_raw_response(self) -> AsyncAssistantsWithRawResponse:
"""
@@ -687,7 +691,7 @@ async def retrieve(
raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._get(
- f"/assistants/{assistant_id}",
+ path_template("/assistants/{assistant_id}", assistant_id=assistant_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -855,7 +859,7 @@ async def update(
raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
- f"/assistants/{assistant_id}",
+ path_template("/assistants/{assistant_id}", assistant_id=assistant_id),
body=await async_maybe_transform(
{
"description": description,
@@ -972,7 +976,7 @@ async def delete(
raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._delete(
- f"/assistants/{assistant_id}",
+ path_template("/assistants/{assistant_id}", assistant_id=assistant_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
diff --git a/portkey_ai/_vendor/openai/resources/beta/beta.py b/portkey_ai/_vendor/openai/resources/beta/beta.py
index 5ee3639d..388a1c5d 100644
--- a/portkey_ai/_vendor/openai/resources/beta/beta.py
+++ b/portkey_ai/_vendor/openai/resources/beta/beta.py
@@ -52,10 +52,12 @@ def chatkit(self) -> ChatKit:
@cached_property
def assistants(self) -> Assistants:
+ """Build Assistants that can call models and use tools."""
return Assistants(self._client)
@cached_property
def threads(self) -> Threads:
+ """Build Assistants that can call models and use tools."""
return Threads(self._client)
@cached_property
@@ -93,10 +95,12 @@ def chatkit(self) -> AsyncChatKit:
@cached_property
def assistants(self) -> AsyncAssistants:
+ """Build Assistants that can call models and use tools."""
return AsyncAssistants(self._client)
@cached_property
def threads(self) -> AsyncThreads:
+ """Build Assistants that can call models and use tools."""
return AsyncThreads(self._client)
@cached_property
@@ -129,10 +133,12 @@ def chatkit(self) -> ChatKitWithRawResponse:
@cached_property
def assistants(self) -> AssistantsWithRawResponse:
+ """Build Assistants that can call models and use tools."""
return AssistantsWithRawResponse(self._beta.assistants)
@cached_property
def threads(self) -> ThreadsWithRawResponse:
+ """Build Assistants that can call models and use tools."""
return ThreadsWithRawResponse(self._beta.threads)
@@ -146,10 +152,12 @@ def chatkit(self) -> AsyncChatKitWithRawResponse:
@cached_property
def assistants(self) -> AsyncAssistantsWithRawResponse:
+ """Build Assistants that can call models and use tools."""
return AsyncAssistantsWithRawResponse(self._beta.assistants)
@cached_property
def threads(self) -> AsyncThreadsWithRawResponse:
+ """Build Assistants that can call models and use tools."""
return AsyncThreadsWithRawResponse(self._beta.threads)
@@ -163,10 +171,12 @@ def chatkit(self) -> ChatKitWithStreamingResponse:
@cached_property
def assistants(self) -> AssistantsWithStreamingResponse:
+ """Build Assistants that can call models and use tools."""
return AssistantsWithStreamingResponse(self._beta.assistants)
@cached_property
def threads(self) -> ThreadsWithStreamingResponse:
+ """Build Assistants that can call models and use tools."""
return ThreadsWithStreamingResponse(self._beta.threads)
@@ -180,8 +190,10 @@ def chatkit(self) -> AsyncChatKitWithStreamingResponse:
@cached_property
def assistants(self) -> AsyncAssistantsWithStreamingResponse:
+ """Build Assistants that can call models and use tools."""
return AsyncAssistantsWithStreamingResponse(self._beta.assistants)
@cached_property
def threads(self) -> AsyncThreadsWithStreamingResponse:
+ """Build Assistants that can call models and use tools."""
return AsyncThreadsWithStreamingResponse(self._beta.threads)
diff --git a/portkey_ai/_vendor/openai/resources/beta/chatkit/sessions.py b/portkey_ai/_vendor/openai/resources/beta/chatkit/sessions.py
index a814f105..6e95fd65 100644
--- a/portkey_ai/_vendor/openai/resources/beta/chatkit/sessions.py
+++ b/portkey_ai/_vendor/openai/resources/beta/chatkit/sessions.py
@@ -6,7 +6,7 @@
from .... import _legacy_response
from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
-from ...._utils import maybe_transform, async_maybe_transform
+from ...._utils import path_template, maybe_transform, async_maybe_transform
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
@@ -63,7 +63,7 @@ def create(
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ChatSession:
"""
- Create a ChatKit session
+ Create a ChatKit session.
Args:
user: A free-form string that identifies your end user; ensures this Session can
@@ -117,7 +117,9 @@ def cancel(
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ChatSession:
"""
- Cancel a ChatKit session
+ Cancel an active ChatKit session and return its most recent metadata.
+
+ Cancelling prevents new requests from using the issued client secret.
Args:
extra_headers: Send extra headers
@@ -132,7 +134,7 @@ def cancel(
raise ValueError(f"Expected a non-empty value for `session_id` but received {session_id!r}")
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
return self._post(
- f"/chatkit/sessions/{session_id}/cancel",
+ path_template("/chatkit/sessions/{session_id}/cancel", session_id=session_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -176,7 +178,7 @@ async def create(
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ChatSession:
"""
- Create a ChatKit session
+ Create a ChatKit session.
Args:
user: A free-form string that identifies your end user; ensures this Session can
@@ -230,7 +232,9 @@ async def cancel(
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ChatSession:
"""
- Cancel a ChatKit session
+ Cancel an active ChatKit session and return its most recent metadata.
+
+ Cancelling prevents new requests from using the issued client secret.
Args:
extra_headers: Send extra headers
@@ -245,7 +249,7 @@ async def cancel(
raise ValueError(f"Expected a non-empty value for `session_id` but received {session_id!r}")
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
return await self._post(
- f"/chatkit/sessions/{session_id}/cancel",
+ path_template("/chatkit/sessions/{session_id}/cancel", session_id=session_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
diff --git a/portkey_ai/_vendor/openai/resources/beta/chatkit/threads.py b/portkey_ai/_vendor/openai/resources/beta/chatkit/threads.py
index 37cd5729..16e0e11a 100644
--- a/portkey_ai/_vendor/openai/resources/beta/chatkit/threads.py
+++ b/portkey_ai/_vendor/openai/resources/beta/chatkit/threads.py
@@ -9,7 +9,7 @@
from .... import _legacy_response
from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
-from ...._utils import maybe_transform
+from ...._utils import path_template, maybe_transform
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
@@ -55,7 +55,7 @@ def retrieve(
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ChatKitThread:
"""
- Retrieve a ChatKit thread
+ Retrieve a ChatKit thread by its identifier.
Args:
extra_headers: Send extra headers
@@ -70,7 +70,7 @@ def retrieve(
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
return self._get(
- f"/chatkit/threads/{thread_id}",
+ path_template("/chatkit/threads/{thread_id}", thread_id=thread_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -93,7 +93,7 @@ def list(
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncConversationCursorPage[ChatKitThread]:
"""
- List ChatKit threads
+ List ChatKit threads with optional pagination and user filters.
Args:
after: List items created after this thread item ID. Defaults to null for the first
@@ -152,7 +152,7 @@ def delete(
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ThreadDeleteResponse:
"""
- Delete a ChatKit thread
+ Delete a ChatKit thread along with its items and stored attachments.
Args:
extra_headers: Send extra headers
@@ -167,7 +167,7 @@ def delete(
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
return self._delete(
- f"/chatkit/threads/{thread_id}",
+ path_template("/chatkit/threads/{thread_id}", thread_id=thread_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -190,7 +190,7 @@ def list_items(
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncConversationCursorPage[Data]:
"""
- List ChatKit thread items
+ List items that belong to a ChatKit thread.
Args:
after: List items created after this thread item ID. Defaults to null for the first
@@ -215,7 +215,7 @@ def list_items(
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
return self._get_api_list(
- f"/chatkit/threads/{thread_id}/items",
+ path_template("/chatkit/threads/{thread_id}/items", thread_id=thread_id),
page=SyncConversationCursorPage[Data],
options=make_request_options(
extra_headers=extra_headers,
@@ -268,7 +268,7 @@ async def retrieve(
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ChatKitThread:
"""
- Retrieve a ChatKit thread
+ Retrieve a ChatKit thread by its identifier.
Args:
extra_headers: Send extra headers
@@ -283,7 +283,7 @@ async def retrieve(
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
return await self._get(
- f"/chatkit/threads/{thread_id}",
+ path_template("/chatkit/threads/{thread_id}", thread_id=thread_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -306,7 +306,7 @@ def list(
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[ChatKitThread, AsyncConversationCursorPage[ChatKitThread]]:
"""
- List ChatKit threads
+ List ChatKit threads with optional pagination and user filters.
Args:
after: List items created after this thread item ID. Defaults to null for the first
@@ -365,7 +365,7 @@ async def delete(
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ThreadDeleteResponse:
"""
- Delete a ChatKit thread
+ Delete a ChatKit thread along with its items and stored attachments.
Args:
extra_headers: Send extra headers
@@ -380,7 +380,7 @@ async def delete(
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
return await self._delete(
- f"/chatkit/threads/{thread_id}",
+ path_template("/chatkit/threads/{thread_id}", thread_id=thread_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -403,7 +403,7 @@ def list_items(
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[Data, AsyncConversationCursorPage[Data]]:
"""
- List ChatKit thread items
+ List items that belong to a ChatKit thread.
Args:
after: List items created after this thread item ID. Defaults to null for the first
@@ -428,7 +428,7 @@ def list_items(
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
return self._get_api_list(
- f"/chatkit/threads/{thread_id}/items",
+ path_template("/chatkit/threads/{thread_id}/items", thread_id=thread_id),
page=AsyncConversationCursorPage[Data],
options=make_request_options(
extra_headers=extra_headers,
diff --git a/portkey_ai/_vendor/openai/resources/beta/threads/messages.py b/portkey_ai/_vendor/openai/resources/beta/threads/messages.py
index d94ecca9..95b750d4 100644
--- a/portkey_ai/_vendor/openai/resources/beta/threads/messages.py
+++ b/portkey_ai/_vendor/openai/resources/beta/threads/messages.py
@@ -10,7 +10,7 @@
from .... import _legacy_response
from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
-from ...._utils import maybe_transform, async_maybe_transform
+from ...._utils import path_template, maybe_transform, async_maybe_transform
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
@@ -29,6 +29,8 @@
class Messages(SyncAPIResource):
+ """Build Assistants that can call models and use tools."""
+
@cached_property
def with_raw_response(self) -> MessagesWithRawResponse:
"""
@@ -99,7 +101,7 @@ def create(
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
- f"/threads/{thread_id}/messages",
+ path_template("/threads/{thread_id}/messages", thread_id=thread_id),
body=maybe_transform(
{
"content": content,
@@ -146,7 +148,7 @@ def retrieve(
raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get(
- f"/threads/{thread_id}/messages/{message_id}",
+ path_template("/threads/{thread_id}/messages/{message_id}", thread_id=thread_id, message_id=message_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -192,7 +194,7 @@ def update(
raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
- f"/threads/{thread_id}/messages/{message_id}",
+ path_template("/threads/{thread_id}/messages/{message_id}", thread_id=thread_id, message_id=message_id),
body=maybe_transform({"metadata": metadata}, message_update_params.MessageUpdateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
@@ -251,7 +253,7 @@ def list(
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
- f"/threads/{thread_id}/messages",
+ path_template("/threads/{thread_id}/messages", thread_id=thread_id),
page=SyncCursorPage[Message],
options=make_request_options(
extra_headers=extra_headers,
@@ -303,7 +305,7 @@ def delete(
raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._delete(
- f"/threads/{thread_id}/messages/{message_id}",
+ path_template("/threads/{thread_id}/messages/{message_id}", thread_id=thread_id, message_id=message_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -312,6 +314,8 @@ def delete(
class AsyncMessages(AsyncAPIResource):
+ """Build Assistants that can call models and use tools."""
+
@cached_property
def with_raw_response(self) -> AsyncMessagesWithRawResponse:
"""
@@ -382,7 +386,7 @@ async def create(
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
- f"/threads/{thread_id}/messages",
+ path_template("/threads/{thread_id}/messages", thread_id=thread_id),
body=await async_maybe_transform(
{
"content": content,
@@ -429,7 +433,7 @@ async def retrieve(
raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._get(
- f"/threads/{thread_id}/messages/{message_id}",
+ path_template("/threads/{thread_id}/messages/{message_id}", thread_id=thread_id, message_id=message_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -475,7 +479,7 @@ async def update(
raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
- f"/threads/{thread_id}/messages/{message_id}",
+ path_template("/threads/{thread_id}/messages/{message_id}", thread_id=thread_id, message_id=message_id),
body=await async_maybe_transform({"metadata": metadata}, message_update_params.MessageUpdateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
@@ -534,7 +538,7 @@ def list(
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
- f"/threads/{thread_id}/messages",
+ path_template("/threads/{thread_id}/messages", thread_id=thread_id),
page=AsyncCursorPage[Message],
options=make_request_options(
extra_headers=extra_headers,
@@ -586,7 +590,7 @@ async def delete(
raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._delete(
- f"/threads/{thread_id}/messages/{message_id}",
+ path_template("/threads/{thread_id}/messages/{message_id}", thread_id=thread_id, message_id=message_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
diff --git a/portkey_ai/_vendor/openai/resources/beta/threads/runs/runs.py b/portkey_ai/_vendor/openai/resources/beta/threads/runs/runs.py
index 8a58e91f..882e88df 100644
--- a/portkey_ai/_vendor/openai/resources/beta/threads/runs/runs.py
+++ b/portkey_ai/_vendor/openai/resources/beta/threads/runs/runs.py
@@ -21,6 +21,7 @@
from ....._types import NOT_GIVEN, Body, Omit, Query, Headers, NotGiven, omit, not_given
from ....._utils import (
is_given,
+ path_template,
required_args,
maybe_transform,
async_maybe_transform,
@@ -59,8 +60,11 @@
class Runs(SyncAPIResource):
+ """Build Assistants that can call models and use tools."""
+
@cached_property
def steps(self) -> Steps:
+ """Build Assistants that can call models and use tools."""
return Steps(self._client)
@cached_property
@@ -591,7 +595,7 @@ def create(
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
- f"/threads/{thread_id}/runs",
+ path_template("/threads/{thread_id}/runs", thread_id=thread_id),
body=maybe_transform(
{
"assistant_id": assistant_id,
@@ -620,6 +624,7 @@ def create(
extra_body=extra_body,
timeout=timeout,
query=maybe_transform({"include": include}, run_create_params.RunCreateParams),
+ synthesize_event_and_data=True,
),
cast_to=Run,
stream=stream or False,
@@ -657,7 +662,7 @@ def retrieve(
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get(
- f"/threads/{thread_id}/runs/{run_id}",
+ path_template("/threads/{thread_id}/runs/{run_id}", thread_id=thread_id, run_id=run_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -703,7 +708,7 @@ def update(
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
- f"/threads/{thread_id}/runs/{run_id}",
+ path_template("/threads/{thread_id}/runs/{run_id}", thread_id=thread_id, run_id=run_id),
body=maybe_transform({"metadata": metadata}, run_update_params.RunUpdateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
@@ -759,7 +764,7 @@ def list(
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
- f"/threads/{thread_id}/runs",
+ path_template("/threads/{thread_id}/runs", thread_id=thread_id),
page=SyncCursorPage[Run],
options=make_request_options(
extra_headers=extra_headers,
@@ -810,7 +815,7 @@ def cancel(
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
- f"/threads/{thread_id}/runs/{run_id}/cancel",
+ path_template("/threads/{thread_id}/runs/{run_id}/cancel", thread_id=thread_id, run_id=run_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -994,7 +999,7 @@ def create_and_stream(
}
make_request = partial(
self._post,
- f"/threads/{thread_id}/runs",
+ path_template("/threads/{thread_id}/runs", thread_id=thread_id),
body=maybe_transform(
{
"assistant_id": assistant_id,
@@ -1181,7 +1186,7 @@ def stream(
}
make_request = partial(
self._post,
- f"/threads/{thread_id}/runs",
+ path_template("/threads/{thread_id}/runs", thread_id=thread_id),
body=maybe_transform(
{
"assistant_id": assistant_id,
@@ -1357,7 +1362,7 @@ def submit_tool_outputs(
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
- f"/threads/{thread_id}/runs/{run_id}/submit_tool_outputs",
+ path_template("/threads/{thread_id}/runs/{run_id}/submit_tool_outputs", thread_id=thread_id, run_id=run_id),
body=maybe_transform(
{
"tool_outputs": tool_outputs,
@@ -1368,7 +1373,11 @@ def submit_tool_outputs(
else run_submit_tool_outputs_params.RunSubmitToolOutputsParamsNonStreaming,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ synthesize_event_and_data=True,
),
cast_to=Run,
stream=stream or False,
@@ -1494,7 +1503,7 @@ def submit_tool_outputs_stream(
}
request = partial(
self._post,
- f"/threads/{thread_id}/runs/{run_id}/submit_tool_outputs",
+ path_template("/threads/{thread_id}/runs/{run_id}/submit_tool_outputs", thread_id=thread_id, run_id=run_id),
body=maybe_transform(
{
"tool_outputs": tool_outputs,
@@ -1513,8 +1522,11 @@ def submit_tool_outputs_stream(
class AsyncRuns(AsyncAPIResource):
+ """Build Assistants that can call models and use tools."""
+
@cached_property
def steps(self) -> AsyncSteps:
+ """Build Assistants that can call models and use tools."""
return AsyncSteps(self._client)
@cached_property
@@ -2046,7 +2058,7 @@ async def create(
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
- f"/threads/{thread_id}/runs",
+ path_template("/threads/{thread_id}/runs", thread_id=thread_id),
body=await async_maybe_transform(
{
"assistant_id": assistant_id,
@@ -2075,6 +2087,7 @@ async def create(
extra_body=extra_body,
timeout=timeout,
query=await async_maybe_transform({"include": include}, run_create_params.RunCreateParams),
+ synthesize_event_and_data=True,
),
cast_to=Run,
stream=stream or False,
@@ -2112,7 +2125,7 @@ async def retrieve(
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._get(
- f"/threads/{thread_id}/runs/{run_id}",
+ path_template("/threads/{thread_id}/runs/{run_id}", thread_id=thread_id, run_id=run_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -2158,7 +2171,7 @@ async def update(
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
- f"/threads/{thread_id}/runs/{run_id}",
+ path_template("/threads/{thread_id}/runs/{run_id}", thread_id=thread_id, run_id=run_id),
body=await async_maybe_transform({"metadata": metadata}, run_update_params.RunUpdateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
@@ -2214,7 +2227,7 @@ def list(
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
- f"/threads/{thread_id}/runs",
+ path_template("/threads/{thread_id}/runs", thread_id=thread_id),
page=AsyncCursorPage[Run],
options=make_request_options(
extra_headers=extra_headers,
@@ -2265,7 +2278,7 @@ async def cancel(
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
- f"/threads/{thread_id}/runs/{run_id}/cancel",
+ path_template("/threads/{thread_id}/runs/{run_id}/cancel", thread_id=thread_id, run_id=run_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -2448,7 +2461,7 @@ def create_and_stream(
**(extra_headers or {}),
}
request = self._post(
- f"/threads/{thread_id}/runs",
+ path_template("/threads/{thread_id}/runs", thread_id=thread_id),
body=maybe_transform(
{
"assistant_id": assistant_id,
@@ -2635,7 +2648,7 @@ def stream(
**(extra_headers or {}),
}
request = self._post(
- f"/threads/{thread_id}/runs",
+ path_template("/threads/{thread_id}/runs", thread_id=thread_id),
body=maybe_transform(
{
"assistant_id": assistant_id,
@@ -2811,7 +2824,7 @@ async def submit_tool_outputs(
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
- f"/threads/{thread_id}/runs/{run_id}/submit_tool_outputs",
+ path_template("/threads/{thread_id}/runs/{run_id}/submit_tool_outputs", thread_id=thread_id, run_id=run_id),
body=await async_maybe_transform(
{
"tool_outputs": tool_outputs,
@@ -2822,7 +2835,11 @@ async def submit_tool_outputs(
else run_submit_tool_outputs_params.RunSubmitToolOutputsParamsNonStreaming,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ synthesize_event_and_data=True,
),
cast_to=Run,
stream=stream or False,
@@ -2950,7 +2967,7 @@ def submit_tool_outputs_stream(
**(extra_headers or {}),
}
request = self._post(
- f"/threads/{thread_id}/runs/{run_id}/submit_tool_outputs",
+ path_template("/threads/{thread_id}/runs/{run_id}/submit_tool_outputs", thread_id=thread_id, run_id=run_id),
body=maybe_transform(
{
"tool_outputs": tool_outputs,
@@ -3005,6 +3022,7 @@ def __init__(self, runs: Runs) -> None:
@cached_property
def steps(self) -> StepsWithRawResponse:
+ """Build Assistants that can call models and use tools."""
return StepsWithRawResponse(self._runs.steps)
@@ -3045,6 +3063,7 @@ def __init__(self, runs: AsyncRuns) -> None:
@cached_property
def steps(self) -> AsyncStepsWithRawResponse:
+ """Build Assistants that can call models and use tools."""
return AsyncStepsWithRawResponse(self._runs.steps)
@@ -3085,6 +3104,7 @@ def __init__(self, runs: Runs) -> None:
@cached_property
def steps(self) -> StepsWithStreamingResponse:
+ """Build Assistants that can call models and use tools."""
return StepsWithStreamingResponse(self._runs.steps)
@@ -3125,4 +3145,5 @@ def __init__(self, runs: AsyncRuns) -> None:
@cached_property
def steps(self) -> AsyncStepsWithStreamingResponse:
+ """Build Assistants that can call models and use tools."""
return AsyncStepsWithStreamingResponse(self._runs.steps)
diff --git a/portkey_ai/_vendor/openai/resources/beta/threads/runs/steps.py b/portkey_ai/_vendor/openai/resources/beta/threads/runs/steps.py
index 254a9443..9a6402b2 100644
--- a/portkey_ai/_vendor/openai/resources/beta/threads/runs/steps.py
+++ b/portkey_ai/_vendor/openai/resources/beta/threads/runs/steps.py
@@ -10,7 +10,7 @@
from ..... import _legacy_response
from ....._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
-from ....._utils import maybe_transform, async_maybe_transform
+from ....._utils import path_template, maybe_transform, async_maybe_transform
from ....._compat import cached_property
from ....._resource import SyncAPIResource, AsyncAPIResource
from ....._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
@@ -24,6 +24,8 @@
class Steps(SyncAPIResource):
+ """Build Assistants that can call models and use tools."""
+
@cached_property
def with_raw_response(self) -> StepsWithRawResponse:
"""
@@ -86,7 +88,12 @@ def retrieve(
raise ValueError(f"Expected a non-empty value for `step_id` but received {step_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get(
- f"/threads/{thread_id}/runs/{run_id}/steps/{step_id}",
+ path_template(
+ "/threads/{thread_id}/runs/{run_id}/steps/{step_id}",
+ thread_id=thread_id,
+ run_id=run_id,
+ step_id=step_id,
+ ),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -157,7 +164,7 @@ def list(
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
- f"/threads/{thread_id}/runs/{run_id}/steps",
+ path_template("/threads/{thread_id}/runs/{run_id}/steps", thread_id=thread_id, run_id=run_id),
page=SyncCursorPage[RunStep],
options=make_request_options(
extra_headers=extra_headers,
@@ -180,6 +187,8 @@ def list(
class AsyncSteps(AsyncAPIResource):
+ """Build Assistants that can call models and use tools."""
+
@cached_property
def with_raw_response(self) -> AsyncStepsWithRawResponse:
"""
@@ -242,7 +251,12 @@ async def retrieve(
raise ValueError(f"Expected a non-empty value for `step_id` but received {step_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._get(
- f"/threads/{thread_id}/runs/{run_id}/steps/{step_id}",
+ path_template(
+ "/threads/{thread_id}/runs/{run_id}/steps/{step_id}",
+ thread_id=thread_id,
+ run_id=run_id,
+ step_id=step_id,
+ ),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -313,7 +327,7 @@ def list(
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
- f"/threads/{thread_id}/runs/{run_id}/steps",
+ path_template("/threads/{thread_id}/runs/{run_id}/steps", thread_id=thread_id, run_id=run_id),
page=AsyncCursorPage[RunStep],
options=make_request_options(
extra_headers=extra_headers,
diff --git a/portkey_ai/_vendor/openai/resources/beta/threads/threads.py b/portkey_ai/_vendor/openai/resources/beta/threads/threads.py
index 681d3c29..4b0f18fe 100644
--- a/portkey_ai/_vendor/openai/resources/beta/threads/threads.py
+++ b/portkey_ai/_vendor/openai/resources/beta/threads/threads.py
@@ -19,7 +19,7 @@
AsyncMessagesWithStreamingResponse,
)
from ...._types import NOT_GIVEN, Body, Omit, Query, Headers, NotGiven, omit, not_given
-from ...._utils import required_args, maybe_transform, async_maybe_transform
+from ...._utils import path_template, required_args, maybe_transform, async_maybe_transform
from .runs.runs import (
Runs,
AsyncRuns,
@@ -60,12 +60,16 @@
class Threads(SyncAPIResource):
+ """Build Assistants that can call models and use tools."""
+
@cached_property
def runs(self) -> Runs:
+ """Build Assistants that can call models and use tools."""
return Runs(self._client)
@cached_property
def messages(self) -> Messages:
+ """Build Assistants that can call models and use tools."""
return Messages(self._client)
@cached_property
@@ -173,7 +177,7 @@ def retrieve(
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get(
- f"/threads/{thread_id}",
+ path_template("/threads/{thread_id}", thread_id=thread_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -222,7 +226,7 @@ def update(
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
- f"/threads/{thread_id}",
+ path_template("/threads/{thread_id}", thread_id=thread_id),
body=maybe_transform(
{
"metadata": metadata,
@@ -264,7 +268,7 @@ def delete(
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._delete(
- f"/threads/{thread_id}",
+ path_template("/threads/{thread_id}", thread_id=thread_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -729,7 +733,11 @@ def create_and_run(
else thread_create_and_run_params.ThreadCreateAndRunParamsNonStreaming,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ synthesize_event_and_data=True,
),
cast_to=Run,
stream=stream or False,
@@ -918,12 +926,16 @@ def create_and_run_stream(
class AsyncThreads(AsyncAPIResource):
+ """Build Assistants that can call models and use tools."""
+
@cached_property
def runs(self) -> AsyncRuns:
+ """Build Assistants that can call models and use tools."""
return AsyncRuns(self._client)
@cached_property
def messages(self) -> AsyncMessages:
+ """Build Assistants that can call models and use tools."""
return AsyncMessages(self._client)
@cached_property
@@ -1031,7 +1043,7 @@ async def retrieve(
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._get(
- f"/threads/{thread_id}",
+ path_template("/threads/{thread_id}", thread_id=thread_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -1080,7 +1092,7 @@ async def update(
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
- f"/threads/{thread_id}",
+ path_template("/threads/{thread_id}", thread_id=thread_id),
body=await async_maybe_transform(
{
"metadata": metadata,
@@ -1122,7 +1134,7 @@ async def delete(
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._delete(
- f"/threads/{thread_id}",
+ path_template("/threads/{thread_id}", thread_id=thread_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -1587,7 +1599,11 @@ async def create_and_run(
else thread_create_and_run_params.ThreadCreateAndRunParamsNonStreaming,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ synthesize_event_and_data=True,
),
cast_to=Run,
stream=stream or False,
@@ -1811,10 +1827,12 @@ def __init__(self, threads: Threads) -> None:
@cached_property
def runs(self) -> RunsWithRawResponse:
+ """Build Assistants that can call models and use tools."""
return RunsWithRawResponse(self._threads.runs)
@cached_property
def messages(self) -> MessagesWithRawResponse:
+ """Build Assistants that can call models and use tools."""
return MessagesWithRawResponse(self._threads.messages)
@@ -1850,10 +1868,12 @@ def __init__(self, threads: AsyncThreads) -> None:
@cached_property
def runs(self) -> AsyncRunsWithRawResponse:
+ """Build Assistants that can call models and use tools."""
return AsyncRunsWithRawResponse(self._threads.runs)
@cached_property
def messages(self) -> AsyncMessagesWithRawResponse:
+ """Build Assistants that can call models and use tools."""
return AsyncMessagesWithRawResponse(self._threads.messages)
@@ -1889,10 +1909,12 @@ def __init__(self, threads: Threads) -> None:
@cached_property
def runs(self) -> RunsWithStreamingResponse:
+ """Build Assistants that can call models and use tools."""
return RunsWithStreamingResponse(self._threads.runs)
@cached_property
def messages(self) -> MessagesWithStreamingResponse:
+ """Build Assistants that can call models and use tools."""
return MessagesWithStreamingResponse(self._threads.messages)
@@ -1928,8 +1950,10 @@ def __init__(self, threads: AsyncThreads) -> None:
@cached_property
def runs(self) -> AsyncRunsWithStreamingResponse:
+ """Build Assistants that can call models and use tools."""
return AsyncRunsWithStreamingResponse(self._threads.runs)
@cached_property
def messages(self) -> AsyncMessagesWithStreamingResponse:
+ """Build Assistants that can call models and use tools."""
return AsyncMessagesWithStreamingResponse(self._threads.messages)
diff --git a/portkey_ai/_vendor/openai/resources/chat/chat.py b/portkey_ai/_vendor/openai/resources/chat/chat.py
index 14f9224b..2c921e74 100644
--- a/portkey_ai/_vendor/openai/resources/chat/chat.py
+++ b/portkey_ai/_vendor/openai/resources/chat/chat.py
@@ -19,6 +19,9 @@
class Chat(SyncAPIResource):
@cached_property
def completions(self) -> Completions:
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
return Completions(self._client)
@cached_property
@@ -44,6 +47,9 @@ def with_streaming_response(self) -> ChatWithStreamingResponse:
class AsyncChat(AsyncAPIResource):
@cached_property
def completions(self) -> AsyncCompletions:
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
return AsyncCompletions(self._client)
@cached_property
@@ -72,6 +78,9 @@ def __init__(self, chat: Chat) -> None:
@cached_property
def completions(self) -> CompletionsWithRawResponse:
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
return CompletionsWithRawResponse(self._chat.completions)
@@ -81,6 +90,9 @@ def __init__(self, chat: AsyncChat) -> None:
@cached_property
def completions(self) -> AsyncCompletionsWithRawResponse:
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
return AsyncCompletionsWithRawResponse(self._chat.completions)
@@ -90,6 +102,9 @@ def __init__(self, chat: Chat) -> None:
@cached_property
def completions(self) -> CompletionsWithStreamingResponse:
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
return CompletionsWithStreamingResponse(self._chat.completions)
@@ -99,4 +114,7 @@ def __init__(self, chat: AsyncChat) -> None:
@cached_property
def completions(self) -> AsyncCompletionsWithStreamingResponse:
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
return AsyncCompletionsWithStreamingResponse(self._chat.completions)
diff --git a/portkey_ai/_vendor/openai/resources/chat/completions/completions.py b/portkey_ai/_vendor/openai/resources/chat/completions/completions.py
index 9c0b74b8..845bd1a1 100644
--- a/portkey_ai/_vendor/openai/resources/chat/completions/completions.py
+++ b/portkey_ai/_vendor/openai/resources/chat/completions/completions.py
@@ -20,7 +20,7 @@
AsyncMessagesWithStreamingResponse,
)
from ...._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
-from ...._utils import required_args, maybe_transform, async_maybe_transform
+from ...._utils import path_template, required_args, maybe_transform, async_maybe_transform
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
@@ -58,8 +58,15 @@
class Completions(SyncAPIResource):
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
+
@cached_property
def messages(self) -> Messages:
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
return Messages(self._client)
@cached_property
@@ -301,6 +308,9 @@ def create(
unsupported parameters in reasoning models,
[refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning).
+ Returns a chat completion object, or a streamed sequence of chat completion
+ chunk objects if the request is streamed.
+
Args:
messages: A list of messages comprising the conversation so far. Depending on the
[model](https://platform.openai.com/docs/models) you use, different message
@@ -436,8 +446,9 @@ def create(
safety_identifier: A stable identifier used to help detect users of your application that may be
violating OpenAI's usage policies. The IDs should be a string that uniquely
- identifies each user. We recommend hashing their username or email address, in
- order to avoid sending us any identifying information.
+ identifies each user, with a maximum length of 64 characters. We recommend
+ hashing their username or email address, in order to avoid sending us any
+ identifying information.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
seed: This feature is in Beta. If specified, our system will make a best effort to
@@ -603,6 +614,9 @@ def create(
unsupported parameters in reasoning models,
[refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning).
+ Returns a chat completion object, or a streamed sequence of chat completion
+ chunk objects if the request is streamed.
+
Args:
messages: A list of messages comprising the conversation so far. Depending on the
[model](https://platform.openai.com/docs/models) you use, different message
@@ -747,8 +761,9 @@ def create(
safety_identifier: A stable identifier used to help detect users of your application that may be
violating OpenAI's usage policies. The IDs should be a string that uniquely
- identifies each user. We recommend hashing their username or email address, in
- order to avoid sending us any identifying information.
+ identifies each user, with a maximum length of 64 characters. We recommend
+ hashing their username or email address, in order to avoid sending us any
+ identifying information.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
seed: This feature is in Beta. If specified, our system will make a best effort to
@@ -905,6 +920,9 @@ def create(
unsupported parameters in reasoning models,
[refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning).
+ Returns a chat completion object, or a streamed sequence of chat completion
+ chunk objects if the request is streamed.
+
Args:
messages: A list of messages comprising the conversation so far. Depending on the
[model](https://platform.openai.com/docs/models) you use, different message
@@ -1049,8 +1067,9 @@ def create(
safety_identifier: A stable identifier used to help detect users of your application that may be
violating OpenAI's usage policies. The IDs should be a string that uniquely
- identifies each user. We recommend hashing their username or email address, in
- order to avoid sending us any identifying information.
+ identifies each user, with a maximum length of 64 characters. We recommend
+ hashing their username or email address, in order to avoid sending us any
+ identifying information.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
seed: This feature is in Beta. If specified, our system will make a best effort to
@@ -1269,7 +1288,7 @@ def retrieve(
if not completion_id:
raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
return self._get(
- f"/chat/completions/{completion_id}",
+ path_template("/chat/completions/{completion_id}", completion_id=completion_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -1313,7 +1332,7 @@ def update(
if not completion_id:
raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
return self._post(
- f"/chat/completions/{completion_id}",
+ path_template("/chat/completions/{completion_id}", completion_id=completion_id),
body=maybe_transform({"metadata": metadata}, completion_update_params.CompletionUpdateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
@@ -1346,12 +1365,10 @@ def list(
limit: Number of Chat Completions to retrieve.
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
+ metadata:
+ A list of metadata keys to filter the Chat Completions by. Example:
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
+ `metadata[key1]=value1&metadata[key2]=value2`
model: The model used to generate the Chat Completions.
@@ -1416,7 +1433,7 @@ def delete(
if not completion_id:
raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
return self._delete(
- f"/chat/completions/{completion_id}",
+ path_template("/chat/completions/{completion_id}", completion_id=completion_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -1544,8 +1561,15 @@ def stream(
class AsyncCompletions(AsyncAPIResource):
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
+
@cached_property
def messages(self) -> AsyncMessages:
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
return AsyncMessages(self._client)
@cached_property
@@ -1787,6 +1811,9 @@ async def create(
unsupported parameters in reasoning models,
[refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning).
+ Returns a chat completion object, or a streamed sequence of chat completion
+ chunk objects if the request is streamed.
+
Args:
messages: A list of messages comprising the conversation so far. Depending on the
[model](https://platform.openai.com/docs/models) you use, different message
@@ -1922,8 +1949,9 @@ async def create(
safety_identifier: A stable identifier used to help detect users of your application that may be
violating OpenAI's usage policies. The IDs should be a string that uniquely
- identifies each user. We recommend hashing their username or email address, in
- order to avoid sending us any identifying information.
+ identifies each user, with a maximum length of 64 characters. We recommend
+ hashing their username or email address, in order to avoid sending us any
+ identifying information.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
seed: This feature is in Beta. If specified, our system will make a best effort to
@@ -2089,6 +2117,9 @@ async def create(
unsupported parameters in reasoning models,
[refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning).
+ Returns a chat completion object, or a streamed sequence of chat completion
+ chunk objects if the request is streamed.
+
Args:
messages: A list of messages comprising the conversation so far. Depending on the
[model](https://platform.openai.com/docs/models) you use, different message
@@ -2233,8 +2264,9 @@ async def create(
safety_identifier: A stable identifier used to help detect users of your application that may be
violating OpenAI's usage policies. The IDs should be a string that uniquely
- identifies each user. We recommend hashing their username or email address, in
- order to avoid sending us any identifying information.
+ identifies each user, with a maximum length of 64 characters. We recommend
+ hashing their username or email address, in order to avoid sending us any
+ identifying information.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
seed: This feature is in Beta. If specified, our system will make a best effort to
@@ -2391,6 +2423,9 @@ async def create(
unsupported parameters in reasoning models,
[refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning).
+ Returns a chat completion object, or a streamed sequence of chat completion
+ chunk objects if the request is streamed.
+
Args:
messages: A list of messages comprising the conversation so far. Depending on the
[model](https://platform.openai.com/docs/models) you use, different message
@@ -2535,8 +2570,9 @@ async def create(
safety_identifier: A stable identifier used to help detect users of your application that may be
violating OpenAI's usage policies. The IDs should be a string that uniquely
- identifies each user. We recommend hashing their username or email address, in
- order to avoid sending us any identifying information.
+ identifies each user, with a maximum length of 64 characters. We recommend
+ hashing their username or email address, in order to avoid sending us any
+ identifying information.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
seed: This feature is in Beta. If specified, our system will make a best effort to
@@ -2755,7 +2791,7 @@ async def retrieve(
if not completion_id:
raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
return await self._get(
- f"/chat/completions/{completion_id}",
+ path_template("/chat/completions/{completion_id}", completion_id=completion_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -2799,7 +2835,7 @@ async def update(
if not completion_id:
raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
return await self._post(
- f"/chat/completions/{completion_id}",
+ path_template("/chat/completions/{completion_id}", completion_id=completion_id),
body=await async_maybe_transform({"metadata": metadata}, completion_update_params.CompletionUpdateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
@@ -2832,12 +2868,10 @@ def list(
limit: Number of Chat Completions to retrieve.
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
+ metadata:
+ A list of metadata keys to filter the Chat Completions by. Example:
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
+ `metadata[key1]=value1&metadata[key2]=value2`
model: The model used to generate the Chat Completions.
@@ -2902,7 +2936,7 @@ async def delete(
if not completion_id:
raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
return await self._delete(
- f"/chat/completions/{completion_id}",
+ path_template("/chat/completions/{completion_id}", completion_id=completion_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -3055,6 +3089,9 @@ def __init__(self, completions: Completions) -> None:
@cached_property
def messages(self) -> MessagesWithRawResponse:
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
return MessagesWithRawResponse(self._completions.messages)
@@ -3083,6 +3120,9 @@ def __init__(self, completions: AsyncCompletions) -> None:
@cached_property
def messages(self) -> AsyncMessagesWithRawResponse:
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
return AsyncMessagesWithRawResponse(self._completions.messages)
@@ -3111,6 +3151,9 @@ def __init__(self, completions: Completions) -> None:
@cached_property
def messages(self) -> MessagesWithStreamingResponse:
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
return MessagesWithStreamingResponse(self._completions.messages)
@@ -3139,6 +3182,9 @@ def __init__(self, completions: AsyncCompletions) -> None:
@cached_property
def messages(self) -> AsyncMessagesWithStreamingResponse:
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
return AsyncMessagesWithStreamingResponse(self._completions.messages)
diff --git a/portkey_ai/_vendor/openai/resources/chat/completions/messages.py b/portkey_ai/_vendor/openai/resources/chat/completions/messages.py
index 3d6dc79c..ffbff566 100644
--- a/portkey_ai/_vendor/openai/resources/chat/completions/messages.py
+++ b/portkey_ai/_vendor/openai/resources/chat/completions/messages.py
@@ -8,7 +8,7 @@
from .... import _legacy_response
from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
-from ...._utils import maybe_transform
+from ...._utils import path_template, maybe_transform
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
@@ -21,6 +21,10 @@
class Messages(SyncAPIResource):
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
+
@cached_property
def with_raw_response(self) -> MessagesWithRawResponse:
"""
@@ -78,7 +82,7 @@ def list(
if not completion_id:
raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
return self._get_api_list(
- f"/chat/completions/{completion_id}/messages",
+ path_template("/chat/completions/{completion_id}/messages", completion_id=completion_id),
page=SyncCursorPage[ChatCompletionStoreMessage],
options=make_request_options(
extra_headers=extra_headers,
@@ -99,6 +103,10 @@ def list(
class AsyncMessages(AsyncAPIResource):
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
+
@cached_property
def with_raw_response(self) -> AsyncMessagesWithRawResponse:
"""
@@ -156,7 +164,7 @@ def list(
if not completion_id:
raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
return self._get_api_list(
- f"/chat/completions/{completion_id}/messages",
+ path_template("/chat/completions/{completion_id}/messages", completion_id=completion_id),
page=AsyncCursorPage[ChatCompletionStoreMessage],
options=make_request_options(
extra_headers=extra_headers,
diff --git a/portkey_ai/_vendor/openai/resources/completions.py b/portkey_ai/_vendor/openai/resources/completions.py
index 2f2284a6..4c9e2667 100644
--- a/portkey_ai/_vendor/openai/resources/completions.py
+++ b/portkey_ai/_vendor/openai/resources/completions.py
@@ -25,6 +25,10 @@
class Completions(SyncAPIResource):
+ """
+ Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position.
+ """
+
@cached_property
def with_raw_response(self) -> CompletionsWithRawResponse:
"""
@@ -76,6 +80,9 @@ def create(
"""
Creates a completion for the provided prompt and parameters.
+ Returns a completion object, or a sequence of completion objects if the request
+ is streamed.
+
Args:
model: ID of the model to use. You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
@@ -231,6 +238,9 @@ def create(
"""
Creates a completion for the provided prompt and parameters.
+ Returns a completion object, or a sequence of completion objects if the request
+ is streamed.
+
Args:
model: ID of the model to use. You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
@@ -386,6 +396,9 @@ def create(
"""
Creates a completion for the provided prompt and parameters.
+ Returns a completion object, or a sequence of completion objects if the request
+ is streamed.
+
Args:
model: ID of the model to use. You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
@@ -575,6 +588,10 @@ def create(
class AsyncCompletions(AsyncAPIResource):
+ """
+ Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position.
+ """
+
@cached_property
def with_raw_response(self) -> AsyncCompletionsWithRawResponse:
"""
@@ -626,6 +643,9 @@ async def create(
"""
Creates a completion for the provided prompt and parameters.
+ Returns a completion object, or a sequence of completion objects if the request
+ is streamed.
+
Args:
model: ID of the model to use. You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
@@ -781,6 +801,9 @@ async def create(
"""
Creates a completion for the provided prompt and parameters.
+ Returns a completion object, or a sequence of completion objects if the request
+ is streamed.
+
Args:
model: ID of the model to use. You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
@@ -936,6 +959,9 @@ async def create(
"""
Creates a completion for the provided prompt and parameters.
+ Returns a completion object, or a sequence of completion objects if the request
+ is streamed.
+
Args:
model: ID of the model to use. You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
diff --git a/portkey_ai/_vendor/openai/resources/containers/containers.py b/portkey_ai/_vendor/openai/resources/containers/containers.py
index 0cbb400d..f6b8c33c 100644
--- a/portkey_ai/_vendor/openai/resources/containers/containers.py
+++ b/portkey_ai/_vendor/openai/resources/containers/containers.py
@@ -2,6 +2,7 @@
from __future__ import annotations
+from typing import Iterable
from typing_extensions import Literal
import httpx
@@ -9,7 +10,7 @@
from ... import _legacy_response
from ...types import container_list_params, container_create_params
from ..._types import Body, Omit, Query, Headers, NoneType, NotGiven, SequenceNotStr, omit, not_given
-from ..._utils import maybe_transform, async_maybe_transform
+from ..._utils import path_template, maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
@@ -61,6 +62,8 @@ def create(
expires_after: container_create_params.ExpiresAfter | Omit = omit,
file_ids: SequenceNotStr[str] | Omit = omit,
memory_limit: Literal["1g", "4g", "16g", "64g"] | Omit = omit,
+ network_policy: container_create_params.NetworkPolicy | Omit = omit,
+ skills: Iterable[container_create_params.Skill] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -80,6 +83,10 @@ def create(
memory_limit: Optional memory limit for the container. Defaults to "1g".
+ network_policy: Network access policy for the container.
+
+ skills: An optional list of skills referenced by id or inline data.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -96,6 +103,8 @@ def create(
"expires_after": expires_after,
"file_ids": file_ids,
"memory_limit": memory_limit,
+ "network_policy": network_policy,
+ "skills": skills,
},
container_create_params.ContainerCreateParams,
),
@@ -131,7 +140,7 @@ def retrieve(
if not container_id:
raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
return self._get(
- f"/containers/{container_id}",
+ path_template("/containers/{container_id}", container_id=container_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -143,6 +152,7 @@ def list(
*,
after: str | Omit = omit,
limit: int | Omit = omit,
+ name: str | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -164,6 +174,8 @@ def list(
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
+ name: Filter results by container name.
+
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
order and `desc` for descending order.
@@ -187,6 +199,7 @@ def list(
{
"after": after,
"limit": limit,
+ "name": name,
"order": order,
},
container_list_params.ContainerListParams,
@@ -222,7 +235,7 @@ def delete(
raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._delete(
- f"/containers/{container_id}",
+ path_template("/containers/{container_id}", container_id=container_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -261,6 +274,8 @@ async def create(
expires_after: container_create_params.ExpiresAfter | Omit = omit,
file_ids: SequenceNotStr[str] | Omit = omit,
memory_limit: Literal["1g", "4g", "16g", "64g"] | Omit = omit,
+ network_policy: container_create_params.NetworkPolicy | Omit = omit,
+ skills: Iterable[container_create_params.Skill] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -280,6 +295,10 @@ async def create(
memory_limit: Optional memory limit for the container. Defaults to "1g".
+ network_policy: Network access policy for the container.
+
+ skills: An optional list of skills referenced by id or inline data.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -296,6 +315,8 @@ async def create(
"expires_after": expires_after,
"file_ids": file_ids,
"memory_limit": memory_limit,
+ "network_policy": network_policy,
+ "skills": skills,
},
container_create_params.ContainerCreateParams,
),
@@ -331,7 +352,7 @@ async def retrieve(
if not container_id:
raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
return await self._get(
- f"/containers/{container_id}",
+ path_template("/containers/{container_id}", container_id=container_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -343,6 +364,7 @@ def list(
*,
after: str | Omit = omit,
limit: int | Omit = omit,
+ name: str | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -364,6 +386,8 @@ def list(
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
+ name: Filter results by container name.
+
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
order and `desc` for descending order.
@@ -387,6 +411,7 @@ def list(
{
"after": after,
"limit": limit,
+ "name": name,
"order": order,
},
container_list_params.ContainerListParams,
@@ -422,7 +447,7 @@ async def delete(
raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return await self._delete(
- f"/containers/{container_id}",
+ path_template("/containers/{container_id}", container_id=container_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
diff --git a/portkey_ai/_vendor/openai/resources/containers/files/content.py b/portkey_ai/_vendor/openai/resources/containers/files/content.py
index a3dbd0e8..eb915b9c 100644
--- a/portkey_ai/_vendor/openai/resources/containers/files/content.py
+++ b/portkey_ai/_vendor/openai/resources/containers/files/content.py
@@ -6,6 +6,7 @@
from .... import _legacy_response
from ...._types import Body, Query, Headers, NotGiven, not_given
+from ...._utils import path_template
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import (
@@ -69,7 +70,9 @@ def retrieve(
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
return self._get(
- f"/containers/{container_id}/files/{file_id}/content",
+ path_template(
+ "/containers/{container_id}/files/{file_id}/content", container_id=container_id, file_id=file_id
+ ),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -127,7 +130,9 @@ async def retrieve(
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
return await self._get(
- f"/containers/{container_id}/files/{file_id}/content",
+ path_template(
+ "/containers/{container_id}/files/{file_id}/content", container_id=container_id, file_id=file_id
+ ),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
diff --git a/portkey_ai/_vendor/openai/resources/containers/files/files.py b/portkey_ai/_vendor/openai/resources/containers/files/files.py
index a472cfc9..f48adf3a 100644
--- a/portkey_ai/_vendor/openai/resources/containers/files/files.py
+++ b/portkey_ai/_vendor/openai/resources/containers/files/files.py
@@ -17,7 +17,7 @@
AsyncContentWithStreamingResponse,
)
from ...._types import Body, Omit, Query, Headers, NoneType, NotGiven, FileTypes, omit, not_given
-from ...._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform
+from ...._utils import extract_files, path_template, maybe_transform, deepcopy_minimal, async_maybe_transform
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
@@ -96,12 +96,13 @@ def create(
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
- # It should be noted that the actual Content-Type header that will be
- # sent to the server will contain a `boundary` parameter, e.g.
- # multipart/form-data; boundary=---abc--
- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
+ if files:
+ # It should be noted that the actual Content-Type header that will be
+ # sent to the server will contain a `boundary` parameter, e.g.
+ # multipart/form-data; boundary=---abc--
+ extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return self._post(
- f"/containers/{container_id}/files",
+ path_template("/containers/{container_id}/files", container_id=container_id),
body=maybe_transform(body, file_create_params.FileCreateParams),
files=files,
options=make_request_options(
@@ -139,7 +140,7 @@ def retrieve(
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return self._get(
- f"/containers/{container_id}/files/{file_id}",
+ path_template("/containers/{container_id}/files/{file_id}", container_id=container_id, file_id=file_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -187,7 +188,7 @@ def list(
if not container_id:
raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
return self._get_api_list(
- f"/containers/{container_id}/files",
+ path_template("/containers/{container_id}/files", container_id=container_id),
page=SyncCursorPage[FileListResponse],
options=make_request_options(
extra_headers=extra_headers,
@@ -236,7 +237,7 @@ def delete(
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._delete(
- f"/containers/{container_id}/files/{file_id}",
+ path_template("/containers/{container_id}/files/{file_id}", container_id=container_id, file_id=file_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -309,12 +310,13 @@ async def create(
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
- # It should be noted that the actual Content-Type header that will be
- # sent to the server will contain a `boundary` parameter, e.g.
- # multipart/form-data; boundary=---abc--
- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
+ if files:
+ # It should be noted that the actual Content-Type header that will be
+ # sent to the server will contain a `boundary` parameter, e.g.
+ # multipart/form-data; boundary=---abc--
+ extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return await self._post(
- f"/containers/{container_id}/files",
+ path_template("/containers/{container_id}/files", container_id=container_id),
body=await async_maybe_transform(body, file_create_params.FileCreateParams),
files=files,
options=make_request_options(
@@ -352,7 +354,7 @@ async def retrieve(
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return await self._get(
- f"/containers/{container_id}/files/{file_id}",
+ path_template("/containers/{container_id}/files/{file_id}", container_id=container_id, file_id=file_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -400,7 +402,7 @@ def list(
if not container_id:
raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
return self._get_api_list(
- f"/containers/{container_id}/files",
+ path_template("/containers/{container_id}/files", container_id=container_id),
page=AsyncCursorPage[FileListResponse],
options=make_request_options(
extra_headers=extra_headers,
@@ -449,7 +451,7 @@ async def delete(
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return await self._delete(
- f"/containers/{container_id}/files/{file_id}",
+ path_template("/containers/{container_id}/files/{file_id}", container_id=container_id, file_id=file_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
diff --git a/portkey_ai/_vendor/openai/resources/conversations/api.md b/portkey_ai/_vendor/openai/resources/conversations/api.md
new file mode 100644
index 00000000..9e9181a3
--- /dev/null
+++ b/portkey_ai/_vendor/openai/resources/conversations/api.md
@@ -0,0 +1,42 @@
+# Conversations
+
+Types:
+
+```python
+from openai.types.conversations import (
+ ComputerScreenshotContent,
+ Conversation,
+ ConversationDeleted,
+ ConversationDeletedResource,
+ Message,
+ SummaryTextContent,
+ TextContent,
+ InputTextContent,
+ OutputTextContent,
+ RefusalContent,
+ InputImageContent,
+ InputFileContent,
+)
+```
+
+Methods:
+
+- client.conversations.create(\*\*params) -> Conversation
+- client.conversations.retrieve(conversation_id) -> Conversation
+- client.conversations.update(conversation_id, \*\*params) -> Conversation
+- client.conversations.delete(conversation_id) -> ConversationDeletedResource
+
+## Items
+
+Types:
+
+```python
+from openai.types.conversations import ConversationItem, ConversationItemList
+```
+
+Methods:
+
+- client.conversations.items.create(conversation_id, \*\*params) -> ConversationItemList
+- client.conversations.items.retrieve(item_id, \*, conversation_id, \*\*params) -> ConversationItem
+- client.conversations.items.list(conversation_id, \*\*params) -> SyncConversationCursorPage[ConversationItem]
+- client.conversations.items.delete(item_id, \*, conversation_id) -> Conversation
diff --git a/portkey_ai/_vendor/openai/resources/conversations/conversations.py b/portkey_ai/_vendor/openai/resources/conversations/conversations.py
index da037a4e..d349f385 100644
--- a/portkey_ai/_vendor/openai/resources/conversations/conversations.py
+++ b/portkey_ai/_vendor/openai/resources/conversations/conversations.py
@@ -16,7 +16,7 @@
AsyncItemsWithStreamingResponse,
)
from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
-from ..._utils import maybe_transform, async_maybe_transform
+from ..._utils import path_template, maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
@@ -31,8 +31,11 @@
class Conversations(SyncAPIResource):
+ """Manage conversations and conversation items."""
+
@cached_property
def items(self) -> Items:
+ """Manage conversations and conversation items."""
return Items(self._client)
@cached_property
@@ -129,7 +132,7 @@ def retrieve(
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return self._get(
- f"/conversations/{conversation_id}",
+ path_template("/conversations/{conversation_id}", conversation_id=conversation_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -170,7 +173,7 @@ def update(
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return self._post(
- f"/conversations/{conversation_id}",
+ path_template("/conversations/{conversation_id}", conversation_id=conversation_id),
body=maybe_transform({"metadata": metadata}, conversation_update_params.ConversationUpdateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
@@ -205,7 +208,7 @@ def delete(
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return self._delete(
- f"/conversations/{conversation_id}",
+ path_template("/conversations/{conversation_id}", conversation_id=conversation_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -214,8 +217,11 @@ def delete(
class AsyncConversations(AsyncAPIResource):
+ """Manage conversations and conversation items."""
+
@cached_property
def items(self) -> AsyncItems:
+ """Manage conversations and conversation items."""
return AsyncItems(self._client)
@cached_property
@@ -312,7 +318,7 @@ async def retrieve(
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return await self._get(
- f"/conversations/{conversation_id}",
+ path_template("/conversations/{conversation_id}", conversation_id=conversation_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -353,7 +359,7 @@ async def update(
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return await self._post(
- f"/conversations/{conversation_id}",
+ path_template("/conversations/{conversation_id}", conversation_id=conversation_id),
body=await async_maybe_transform(
{"metadata": metadata}, conversation_update_params.ConversationUpdateParams
),
@@ -390,7 +396,7 @@ async def delete(
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return await self._delete(
- f"/conversations/{conversation_id}",
+ path_template("/conversations/{conversation_id}", conversation_id=conversation_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -417,6 +423,7 @@ def __init__(self, conversations: Conversations) -> None:
@cached_property
def items(self) -> ItemsWithRawResponse:
+ """Manage conversations and conversation items."""
return ItemsWithRawResponse(self._conversations.items)
@@ -439,6 +446,7 @@ def __init__(self, conversations: AsyncConversations) -> None:
@cached_property
def items(self) -> AsyncItemsWithRawResponse:
+ """Manage conversations and conversation items."""
return AsyncItemsWithRawResponse(self._conversations.items)
@@ -461,6 +469,7 @@ def __init__(self, conversations: Conversations) -> None:
@cached_property
def items(self) -> ItemsWithStreamingResponse:
+ """Manage conversations and conversation items."""
return ItemsWithStreamingResponse(self._conversations.items)
@@ -483,4 +492,5 @@ def __init__(self, conversations: AsyncConversations) -> None:
@cached_property
def items(self) -> AsyncItemsWithStreamingResponse:
+ """Manage conversations and conversation items."""
return AsyncItemsWithStreamingResponse(self._conversations.items)
diff --git a/portkey_ai/_vendor/openai/resources/conversations/items.py b/portkey_ai/_vendor/openai/resources/conversations/items.py
index 3dba1448..7d7c9a4a 100644
--- a/portkey_ai/_vendor/openai/resources/conversations/items.py
+++ b/portkey_ai/_vendor/openai/resources/conversations/items.py
@@ -9,7 +9,7 @@
from ... import _legacy_response
from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
-from ..._utils import maybe_transform, async_maybe_transform
+from ..._utils import path_template, maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
@@ -26,6 +26,8 @@
class Items(SyncAPIResource):
+ """Manage conversations and conversation items."""
+
@cached_property
def with_raw_response(self) -> ItemsWithRawResponse:
"""
@@ -79,7 +81,7 @@ def create(
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return self._post(
- f"/conversations/{conversation_id}/items",
+ path_template("/conversations/{conversation_id}/items", conversation_id=conversation_id),
body=maybe_transform({"items": items}, item_create_params.ItemCreateParams),
options=make_request_options(
extra_headers=extra_headers,
@@ -127,7 +129,9 @@ def retrieve(
return cast(
ConversationItem,
self._get(
- f"/conversations/{conversation_id}/items/{item_id}",
+ path_template(
+ "/conversations/{conversation_id}/items/{item_id}", conversation_id=conversation_id, item_id=item_id
+ ),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -198,7 +202,7 @@ def list(
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return self._get_api_list(
- f"/conversations/{conversation_id}/items",
+ path_template("/conversations/{conversation_id}/items", conversation_id=conversation_id),
page=SyncConversationCursorPage[ConversationItem],
options=make_request_options(
extra_headers=extra_headers,
@@ -247,7 +251,9 @@ def delete(
if not item_id:
raise ValueError(f"Expected a non-empty value for `item_id` but received {item_id!r}")
return self._delete(
- f"/conversations/{conversation_id}/items/{item_id}",
+ path_template(
+ "/conversations/{conversation_id}/items/{item_id}", conversation_id=conversation_id, item_id=item_id
+ ),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -256,6 +262,8 @@ def delete(
class AsyncItems(AsyncAPIResource):
+ """Manage conversations and conversation items."""
+
@cached_property
def with_raw_response(self) -> AsyncItemsWithRawResponse:
"""
@@ -309,7 +317,7 @@ async def create(
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return await self._post(
- f"/conversations/{conversation_id}/items",
+ path_template("/conversations/{conversation_id}/items", conversation_id=conversation_id),
body=await async_maybe_transform({"items": items}, item_create_params.ItemCreateParams),
options=make_request_options(
extra_headers=extra_headers,
@@ -357,7 +365,9 @@ async def retrieve(
return cast(
ConversationItem,
await self._get(
- f"/conversations/{conversation_id}/items/{item_id}",
+ path_template(
+ "/conversations/{conversation_id}/items/{item_id}", conversation_id=conversation_id, item_id=item_id
+ ),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -428,7 +438,7 @@ def list(
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return self._get_api_list(
- f"/conversations/{conversation_id}/items",
+ path_template("/conversations/{conversation_id}/items", conversation_id=conversation_id),
page=AsyncConversationCursorPage[ConversationItem],
options=make_request_options(
extra_headers=extra_headers,
@@ -477,7 +487,9 @@ async def delete(
if not item_id:
raise ValueError(f"Expected a non-empty value for `item_id` but received {item_id!r}")
return await self._delete(
- f"/conversations/{conversation_id}/items/{item_id}",
+ path_template(
+ "/conversations/{conversation_id}/items/{item_id}", conversation_id=conversation_id, item_id=item_id
+ ),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
diff --git a/portkey_ai/_vendor/openai/resources/embeddings.py b/portkey_ai/_vendor/openai/resources/embeddings.py
index 5dc3dfa9..86eb949a 100644
--- a/portkey_ai/_vendor/openai/resources/embeddings.py
+++ b/portkey_ai/_vendor/openai/resources/embeddings.py
@@ -25,6 +25,10 @@
class Embeddings(SyncAPIResource):
+ """
+ Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms.
+ """
+
@cached_property
def with_raw_response(self) -> EmbeddingsWithRawResponse:
"""
@@ -144,6 +148,10 @@ def parser(obj: CreateEmbeddingResponse) -> CreateEmbeddingResponse:
class AsyncEmbeddings(AsyncAPIResource):
+ """
+ Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms.
+ """
+
@cached_property
def with_raw_response(self) -> AsyncEmbeddingsWithRawResponse:
"""
diff --git a/portkey_ai/_vendor/openai/resources/evals/evals.py b/portkey_ai/_vendor/openai/resources/evals/evals.py
index 40c4a3e9..6acd669a 100644
--- a/portkey_ai/_vendor/openai/resources/evals/evals.py
+++ b/portkey_ai/_vendor/openai/resources/evals/evals.py
@@ -10,7 +10,7 @@
from ... import _legacy_response
from ...types import eval_list_params, eval_create_params, eval_update_params
from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
-from ..._utils import maybe_transform, async_maybe_transform
+from ..._utils import path_template, maybe_transform, async_maybe_transform
from ..._compat import cached_property
from .runs.runs import (
Runs,
@@ -35,8 +35,11 @@
class Evals(SyncAPIResource):
+ """Manage and run evals in the OpenAI platform."""
+
@cached_property
def runs(self) -> Runs:
+ """Manage and run evals in the OpenAI platform."""
return Runs(self._client)
@cached_property
@@ -149,7 +152,7 @@ def retrieve(
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
return self._get(
- f"/evals/{eval_id}",
+ path_template("/evals/{eval_id}", eval_id=eval_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -193,7 +196,7 @@ def update(
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
return self._post(
- f"/evals/{eval_id}",
+ path_template("/evals/{eval_id}", eval_id=eval_id),
body=maybe_transform(
{
"metadata": metadata,
@@ -290,7 +293,7 @@ def delete(
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
return self._delete(
- f"/evals/{eval_id}",
+ path_template("/evals/{eval_id}", eval_id=eval_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -299,8 +302,11 @@ def delete(
class AsyncEvals(AsyncAPIResource):
+ """Manage and run evals in the OpenAI platform."""
+
@cached_property
def runs(self) -> AsyncRuns:
+ """Manage and run evals in the OpenAI platform."""
return AsyncRuns(self._client)
@cached_property
@@ -413,7 +419,7 @@ async def retrieve(
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
return await self._get(
- f"/evals/{eval_id}",
+ path_template("/evals/{eval_id}", eval_id=eval_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -457,7 +463,7 @@ async def update(
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
return await self._post(
- f"/evals/{eval_id}",
+ path_template("/evals/{eval_id}", eval_id=eval_id),
body=await async_maybe_transform(
{
"metadata": metadata,
@@ -554,7 +560,7 @@ async def delete(
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
return await self._delete(
- f"/evals/{eval_id}",
+ path_template("/evals/{eval_id}", eval_id=eval_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -584,6 +590,7 @@ def __init__(self, evals: Evals) -> None:
@cached_property
def runs(self) -> RunsWithRawResponse:
+ """Manage and run evals in the OpenAI platform."""
return RunsWithRawResponse(self._evals.runs)
@@ -609,6 +616,7 @@ def __init__(self, evals: AsyncEvals) -> None:
@cached_property
def runs(self) -> AsyncRunsWithRawResponse:
+ """Manage and run evals in the OpenAI platform."""
return AsyncRunsWithRawResponse(self._evals.runs)
@@ -634,6 +642,7 @@ def __init__(self, evals: Evals) -> None:
@cached_property
def runs(self) -> RunsWithStreamingResponse:
+ """Manage and run evals in the OpenAI platform."""
return RunsWithStreamingResponse(self._evals.runs)
@@ -659,4 +668,5 @@ def __init__(self, evals: AsyncEvals) -> None:
@cached_property
def runs(self) -> AsyncRunsWithStreamingResponse:
+ """Manage and run evals in the OpenAI platform."""
return AsyncRunsWithStreamingResponse(self._evals.runs)
diff --git a/portkey_ai/_vendor/openai/resources/evals/runs/output_items.py b/portkey_ai/_vendor/openai/resources/evals/runs/output_items.py
index c2dee721..7a498a7e 100644
--- a/portkey_ai/_vendor/openai/resources/evals/runs/output_items.py
+++ b/portkey_ai/_vendor/openai/resources/evals/runs/output_items.py
@@ -8,7 +8,7 @@
from .... import _legacy_response
from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
-from ...._utils import maybe_transform
+from ...._utils import path_template, maybe_transform
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
@@ -22,6 +22,8 @@
class OutputItems(SyncAPIResource):
+ """Manage and run evals in the OpenAI platform."""
+
@cached_property
def with_raw_response(self) -> OutputItemsWithRawResponse:
"""
@@ -73,7 +75,12 @@ def retrieve(
if not output_item_id:
raise ValueError(f"Expected a non-empty value for `output_item_id` but received {output_item_id!r}")
return self._get(
- f"/evals/{eval_id}/runs/{run_id}/output_items/{output_item_id}",
+ path_template(
+ "/evals/{eval_id}/runs/{run_id}/output_items/{output_item_id}",
+ eval_id=eval_id,
+ run_id=run_id,
+ output_item_id=output_item_id,
+ ),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -123,7 +130,7 @@ def list(
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
return self._get_api_list(
- f"/evals/{eval_id}/runs/{run_id}/output_items",
+ path_template("/evals/{eval_id}/runs/{run_id}/output_items", eval_id=eval_id, run_id=run_id),
page=SyncCursorPage[OutputItemListResponse],
options=make_request_options(
extra_headers=extra_headers,
@@ -145,6 +152,8 @@ def list(
class AsyncOutputItems(AsyncAPIResource):
+ """Manage and run evals in the OpenAI platform."""
+
@cached_property
def with_raw_response(self) -> AsyncOutputItemsWithRawResponse:
"""
@@ -196,7 +205,12 @@ async def retrieve(
if not output_item_id:
raise ValueError(f"Expected a non-empty value for `output_item_id` but received {output_item_id!r}")
return await self._get(
- f"/evals/{eval_id}/runs/{run_id}/output_items/{output_item_id}",
+ path_template(
+ "/evals/{eval_id}/runs/{run_id}/output_items/{output_item_id}",
+ eval_id=eval_id,
+ run_id=run_id,
+ output_item_id=output_item_id,
+ ),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -246,7 +260,7 @@ def list(
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
return self._get_api_list(
- f"/evals/{eval_id}/runs/{run_id}/output_items",
+ path_template("/evals/{eval_id}/runs/{run_id}/output_items", eval_id=eval_id, run_id=run_id),
page=AsyncCursorPage[OutputItemListResponse],
options=make_request_options(
extra_headers=extra_headers,
diff --git a/portkey_ai/_vendor/openai/resources/evals/runs/runs.py b/portkey_ai/_vendor/openai/resources/evals/runs/runs.py
index b747b198..152ce9cb 100644
--- a/portkey_ai/_vendor/openai/resources/evals/runs/runs.py
+++ b/portkey_ai/_vendor/openai/resources/evals/runs/runs.py
@@ -9,7 +9,7 @@
from .... import _legacy_response
from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
-from ...._utils import maybe_transform, async_maybe_transform
+from ...._utils import path_template, maybe_transform, async_maybe_transform
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
@@ -35,8 +35,11 @@
class Runs(SyncAPIResource):
+ """Manage and run evals in the OpenAI platform."""
+
@cached_property
def output_items(self) -> OutputItems:
+ """Manage and run evals in the OpenAI platform."""
return OutputItems(self._client)
@cached_property
@@ -100,7 +103,7 @@ def create(
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
return self._post(
- f"/evals/{eval_id}/runs",
+ path_template("/evals/{eval_id}/runs", eval_id=eval_id),
body=maybe_transform(
{
"data_source": data_source,
@@ -144,7 +147,7 @@ def retrieve(
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
return self._get(
- f"/evals/{eval_id}/runs/{run_id}",
+ path_template("/evals/{eval_id}/runs/{run_id}", eval_id=eval_id, run_id=run_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -191,7 +194,7 @@ def list(
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
return self._get_api_list(
- f"/evals/{eval_id}/runs",
+ path_template("/evals/{eval_id}/runs", eval_id=eval_id),
page=SyncCursorPage[RunListResponse],
options=make_request_options(
extra_headers=extra_headers,
@@ -240,7 +243,7 @@ def delete(
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
return self._delete(
- f"/evals/{eval_id}/runs/{run_id}",
+ path_template("/evals/{eval_id}/runs/{run_id}", eval_id=eval_id, run_id=run_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -276,7 +279,7 @@ def cancel(
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
return self._post(
- f"/evals/{eval_id}/runs/{run_id}",
+ path_template("/evals/{eval_id}/runs/{run_id}", eval_id=eval_id, run_id=run_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -285,8 +288,11 @@ def cancel(
class AsyncRuns(AsyncAPIResource):
+ """Manage and run evals in the OpenAI platform."""
+
@cached_property
def output_items(self) -> AsyncOutputItems:
+ """Manage and run evals in the OpenAI platform."""
return AsyncOutputItems(self._client)
@cached_property
@@ -350,7 +356,7 @@ async def create(
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
return await self._post(
- f"/evals/{eval_id}/runs",
+ path_template("/evals/{eval_id}/runs", eval_id=eval_id),
body=await async_maybe_transform(
{
"data_source": data_source,
@@ -394,7 +400,7 @@ async def retrieve(
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
return await self._get(
- f"/evals/{eval_id}/runs/{run_id}",
+ path_template("/evals/{eval_id}/runs/{run_id}", eval_id=eval_id, run_id=run_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -441,7 +447,7 @@ def list(
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
return self._get_api_list(
- f"/evals/{eval_id}/runs",
+ path_template("/evals/{eval_id}/runs", eval_id=eval_id),
page=AsyncCursorPage[RunListResponse],
options=make_request_options(
extra_headers=extra_headers,
@@ -490,7 +496,7 @@ async def delete(
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
return await self._delete(
- f"/evals/{eval_id}/runs/{run_id}",
+ path_template("/evals/{eval_id}/runs/{run_id}", eval_id=eval_id, run_id=run_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -526,7 +532,7 @@ async def cancel(
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
return await self._post(
- f"/evals/{eval_id}/runs/{run_id}",
+ path_template("/evals/{eval_id}/runs/{run_id}", eval_id=eval_id, run_id=run_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -556,6 +562,7 @@ def __init__(self, runs: Runs) -> None:
@cached_property
def output_items(self) -> OutputItemsWithRawResponse:
+ """Manage and run evals in the OpenAI platform."""
return OutputItemsWithRawResponse(self._runs.output_items)
@@ -581,6 +588,7 @@ def __init__(self, runs: AsyncRuns) -> None:
@cached_property
def output_items(self) -> AsyncOutputItemsWithRawResponse:
+ """Manage and run evals in the OpenAI platform."""
return AsyncOutputItemsWithRawResponse(self._runs.output_items)
@@ -606,6 +614,7 @@ def __init__(self, runs: Runs) -> None:
@cached_property
def output_items(self) -> OutputItemsWithStreamingResponse:
+ """Manage and run evals in the OpenAI platform."""
return OutputItemsWithStreamingResponse(self._runs.output_items)
@@ -631,4 +640,5 @@ def __init__(self, runs: AsyncRuns) -> None:
@cached_property
def output_items(self) -> AsyncOutputItemsWithStreamingResponse:
+ """Manage and run evals in the OpenAI platform."""
return AsyncOutputItemsWithStreamingResponse(self._runs.output_items)
diff --git a/portkey_ai/_vendor/openai/resources/files.py b/portkey_ai/_vendor/openai/resources/files.py
index 964d6505..b03f11b0 100644
--- a/portkey_ai/_vendor/openai/resources/files.py
+++ b/portkey_ai/_vendor/openai/resources/files.py
@@ -12,7 +12,7 @@
from .. import _legacy_response
from ..types import FilePurpose, file_list_params, file_create_params
from .._types import Body, Omit, Query, Headers, NotGiven, FileTypes, omit, not_given
-from .._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform
+from .._utils import extract_files, path_template, maybe_transform, deepcopy_minimal, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import (
@@ -33,6 +33,10 @@
class Files(SyncAPIResource):
+ """
+ Files are used to upload documents that can be used with features like Assistants and Fine-tuning.
+ """
+
@cached_property
def with_raw_response(self) -> FilesWithRawResponse:
"""
@@ -160,7 +164,7 @@ def retrieve(
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return self._get(
- f"/files/{file_id}",
+ path_template("/files/{file_id}", file_id=file_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -254,7 +258,7 @@ def delete(
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return self._delete(
- f"/files/{file_id}",
+ path_template("/files/{file_id}", file_id=file_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -288,7 +292,7 @@ def content(
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
return self._get(
- f"/files/{file_id}/content",
+ path_template("/files/{file_id}/content", file_id=file_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -322,7 +326,7 @@ def retrieve_content(
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return self._get(
- f"/files/{file_id}/content",
+ path_template("/files/{file_id}/content", file_id=file_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -354,6 +358,10 @@ def wait_for_processing(
class AsyncFiles(AsyncAPIResource):
+ """
+ Files are used to upload documents that can be used with features like Assistants and Fine-tuning.
+ """
+
@cached_property
def with_raw_response(self) -> AsyncFilesWithRawResponse:
"""
@@ -481,7 +489,7 @@ async def retrieve(
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return await self._get(
- f"/files/{file_id}",
+ path_template("/files/{file_id}", file_id=file_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -575,7 +583,7 @@ async def delete(
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return await self._delete(
- f"/files/{file_id}",
+ path_template("/files/{file_id}", file_id=file_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -609,7 +617,7 @@ async def content(
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
return await self._get(
- f"/files/{file_id}/content",
+ path_template("/files/{file_id}/content", file_id=file_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -643,7 +651,7 @@ async def retrieve_content(
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return await self._get(
- f"/files/{file_id}/content",
+ path_template("/files/{file_id}/content", file_id=file_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
diff --git a/portkey_ai/_vendor/openai/resources/fine_tuning/alpha/alpha.py b/portkey_ai/_vendor/openai/resources/fine_tuning/alpha/alpha.py
index 54c05fab..183208d0 100644
--- a/portkey_ai/_vendor/openai/resources/fine_tuning/alpha/alpha.py
+++ b/portkey_ai/_vendor/openai/resources/fine_tuning/alpha/alpha.py
@@ -19,6 +19,7 @@
class Alpha(SyncAPIResource):
@cached_property
def graders(self) -> Graders:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return Graders(self._client)
@cached_property
@@ -44,6 +45,7 @@ def with_streaming_response(self) -> AlphaWithStreamingResponse:
class AsyncAlpha(AsyncAPIResource):
@cached_property
def graders(self) -> AsyncGraders:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return AsyncGraders(self._client)
@cached_property
@@ -72,6 +74,7 @@ def __init__(self, alpha: Alpha) -> None:
@cached_property
def graders(self) -> GradersWithRawResponse:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return GradersWithRawResponse(self._alpha.graders)
@@ -81,6 +84,7 @@ def __init__(self, alpha: AsyncAlpha) -> None:
@cached_property
def graders(self) -> AsyncGradersWithRawResponse:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return AsyncGradersWithRawResponse(self._alpha.graders)
@@ -90,6 +94,7 @@ def __init__(self, alpha: Alpha) -> None:
@cached_property
def graders(self) -> GradersWithStreamingResponse:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return GradersWithStreamingResponse(self._alpha.graders)
@@ -99,4 +104,5 @@ def __init__(self, alpha: AsyncAlpha) -> None:
@cached_property
def graders(self) -> AsyncGradersWithStreamingResponse:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return AsyncGradersWithStreamingResponse(self._alpha.graders)
diff --git a/portkey_ai/_vendor/openai/resources/fine_tuning/alpha/graders.py b/portkey_ai/_vendor/openai/resources/fine_tuning/alpha/graders.py
index e7a9b925..e5d5dea5 100644
--- a/portkey_ai/_vendor/openai/resources/fine_tuning/alpha/graders.py
+++ b/portkey_ai/_vendor/openai/resources/fine_tuning/alpha/graders.py
@@ -19,6 +19,8 @@
class Graders(SyncAPIResource):
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
+
@cached_property
def with_raw_response(self) -> GradersWithRawResponse:
"""
@@ -127,6 +129,8 @@ def validate(
class AsyncGraders(AsyncAPIResource):
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
+
@cached_property
def with_raw_response(self) -> AsyncGradersWithRawResponse:
"""
diff --git a/portkey_ai/_vendor/openai/resources/fine_tuning/checkpoints/checkpoints.py b/portkey_ai/_vendor/openai/resources/fine_tuning/checkpoints/checkpoints.py
index f59976a2..9c2ed6f5 100644
--- a/portkey_ai/_vendor/openai/resources/fine_tuning/checkpoints/checkpoints.py
+++ b/portkey_ai/_vendor/openai/resources/fine_tuning/checkpoints/checkpoints.py
@@ -19,6 +19,7 @@
class Checkpoints(SyncAPIResource):
@cached_property
def permissions(self) -> Permissions:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return Permissions(self._client)
@cached_property
@@ -44,6 +45,7 @@ def with_streaming_response(self) -> CheckpointsWithStreamingResponse:
class AsyncCheckpoints(AsyncAPIResource):
@cached_property
def permissions(self) -> AsyncPermissions:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return AsyncPermissions(self._client)
@cached_property
@@ -72,6 +74,7 @@ def __init__(self, checkpoints: Checkpoints) -> None:
@cached_property
def permissions(self) -> PermissionsWithRawResponse:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return PermissionsWithRawResponse(self._checkpoints.permissions)
@@ -81,6 +84,7 @@ def __init__(self, checkpoints: AsyncCheckpoints) -> None:
@cached_property
def permissions(self) -> AsyncPermissionsWithRawResponse:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return AsyncPermissionsWithRawResponse(self._checkpoints.permissions)
@@ -90,6 +94,7 @@ def __init__(self, checkpoints: Checkpoints) -> None:
@cached_property
def permissions(self) -> PermissionsWithStreamingResponse:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return PermissionsWithStreamingResponse(self._checkpoints.permissions)
@@ -99,4 +104,5 @@ def __init__(self, checkpoints: AsyncCheckpoints) -> None:
@cached_property
def permissions(self) -> AsyncPermissionsWithStreamingResponse:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return AsyncPermissionsWithStreamingResponse(self._checkpoints.permissions)
diff --git a/portkey_ai/_vendor/openai/resources/fine_tuning/checkpoints/permissions.py b/portkey_ai/_vendor/openai/resources/fine_tuning/checkpoints/permissions.py
index e7f55b82..15184e13 100644
--- a/portkey_ai/_vendor/openai/resources/fine_tuning/checkpoints/permissions.py
+++ b/portkey_ai/_vendor/openai/resources/fine_tuning/checkpoints/permissions.py
@@ -2,19 +2,25 @@
from __future__ import annotations
+import typing_extensions
from typing_extensions import Literal
import httpx
from .... import _legacy_response
from ...._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
-from ...._utils import maybe_transform, async_maybe_transform
+from ...._utils import path_template, maybe_transform, async_maybe_transform
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
-from ....pagination import SyncPage, AsyncPage
+from ....pagination import SyncPage, AsyncPage, SyncConversationCursorPage, AsyncConversationCursorPage
from ...._base_client import AsyncPaginator, make_request_options
-from ....types.fine_tuning.checkpoints import permission_create_params, permission_retrieve_params
+from ....types.fine_tuning.checkpoints import (
+ permission_list_params,
+ permission_create_params,
+ permission_retrieve_params,
+)
+from ....types.fine_tuning.checkpoints.permission_list_response import PermissionListResponse
from ....types.fine_tuning.checkpoints.permission_create_response import PermissionCreateResponse
from ....types.fine_tuning.checkpoints.permission_delete_response import PermissionDeleteResponse
from ....types.fine_tuning.checkpoints.permission_retrieve_response import PermissionRetrieveResponse
@@ -23,6 +29,8 @@
class Permissions(SyncAPIResource):
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
+
@cached_property
def with_raw_response(self) -> PermissionsWithRawResponse:
"""
@@ -76,7 +84,10 @@ def create(
f"Expected a non-empty value for `fine_tuned_model_checkpoint` but received {fine_tuned_model_checkpoint!r}"
)
return self._get_api_list(
- f"/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions",
+ path_template(
+ "/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions",
+ fine_tuned_model_checkpoint=fine_tuned_model_checkpoint,
+ ),
page=SyncPage[PermissionCreateResponse],
body=maybe_transform({"project_ids": project_ids}, permission_create_params.PermissionCreateParams),
options=make_request_options(
@@ -86,6 +97,7 @@ def create(
method="post",
)
+ @typing_extensions.deprecated("Retrieve is deprecated. Please swap to the paginated list method instead.")
def retrieve(
self,
fine_tuned_model_checkpoint: str,
@@ -129,7 +141,10 @@ def retrieve(
f"Expected a non-empty value for `fine_tuned_model_checkpoint` but received {fine_tuned_model_checkpoint!r}"
)
return self._get(
- f"/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions",
+ path_template(
+ "/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions",
+ fine_tuned_model_checkpoint=fine_tuned_model_checkpoint,
+ ),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -148,6 +163,72 @@ def retrieve(
cast_to=PermissionRetrieveResponse,
)
+ def list(
+ self,
+ fine_tuned_model_checkpoint: str,
+ *,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ order: Literal["ascending", "descending"] | Omit = omit,
+ project_id: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SyncConversationCursorPage[PermissionListResponse]:
+ """
+ **NOTE:** This endpoint requires an [admin API key](../admin-api-keys).
+
+ Organization owners can use this endpoint to view all permissions for a
+ fine-tuned model checkpoint.
+
+ Args:
+ after: Identifier for the last permission ID from the previous pagination request.
+
+ limit: Number of permissions to retrieve.
+
+ order: The order in which to retrieve permissions.
+
+ project_id: The ID of the project to get permissions for.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not fine_tuned_model_checkpoint:
+ raise ValueError(
+ f"Expected a non-empty value for `fine_tuned_model_checkpoint` but received {fine_tuned_model_checkpoint!r}"
+ )
+ return self._get_api_list(
+ path_template(
+ "/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions",
+ fine_tuned_model_checkpoint=fine_tuned_model_checkpoint,
+ ),
+ page=SyncConversationCursorPage[PermissionListResponse],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ "project_id": project_id,
+ },
+ permission_list_params.PermissionListParams,
+ ),
+ ),
+ model=PermissionListResponse,
+ )
+
def delete(
self,
permission_id: str,
@@ -182,7 +263,11 @@ def delete(
if not permission_id:
raise ValueError(f"Expected a non-empty value for `permission_id` but received {permission_id!r}")
return self._delete(
- f"/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions/{permission_id}",
+ path_template(
+ "/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions/{permission_id}",
+ fine_tuned_model_checkpoint=fine_tuned_model_checkpoint,
+ permission_id=permission_id,
+ ),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -191,6 +276,8 @@ def delete(
class AsyncPermissions(AsyncAPIResource):
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
+
@cached_property
def with_raw_response(self) -> AsyncPermissionsWithRawResponse:
"""
@@ -244,7 +331,10 @@ def create(
f"Expected a non-empty value for `fine_tuned_model_checkpoint` but received {fine_tuned_model_checkpoint!r}"
)
return self._get_api_list(
- f"/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions",
+ path_template(
+ "/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions",
+ fine_tuned_model_checkpoint=fine_tuned_model_checkpoint,
+ ),
page=AsyncPage[PermissionCreateResponse],
body=maybe_transform({"project_ids": project_ids}, permission_create_params.PermissionCreateParams),
options=make_request_options(
@@ -254,6 +344,7 @@ def create(
method="post",
)
+ @typing_extensions.deprecated("Retrieve is deprecated. Please swap to the paginated list method instead.")
async def retrieve(
self,
fine_tuned_model_checkpoint: str,
@@ -297,7 +388,10 @@ async def retrieve(
f"Expected a non-empty value for `fine_tuned_model_checkpoint` but received {fine_tuned_model_checkpoint!r}"
)
return await self._get(
- f"/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions",
+ path_template(
+ "/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions",
+ fine_tuned_model_checkpoint=fine_tuned_model_checkpoint,
+ ),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -316,6 +410,72 @@ async def retrieve(
cast_to=PermissionRetrieveResponse,
)
+ def list(
+ self,
+ fine_tuned_model_checkpoint: str,
+ *,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ order: Literal["ascending", "descending"] | Omit = omit,
+ project_id: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AsyncPaginator[PermissionListResponse, AsyncConversationCursorPage[PermissionListResponse]]:
+ """
+ **NOTE:** This endpoint requires an [admin API key](../admin-api-keys).
+
+ Organization owners can use this endpoint to view all permissions for a
+ fine-tuned model checkpoint.
+
+ Args:
+ after: Identifier for the last permission ID from the previous pagination request.
+
+ limit: Number of permissions to retrieve.
+
+ order: The order in which to retrieve permissions.
+
+ project_id: The ID of the project to get permissions for.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not fine_tuned_model_checkpoint:
+ raise ValueError(
+ f"Expected a non-empty value for `fine_tuned_model_checkpoint` but received {fine_tuned_model_checkpoint!r}"
+ )
+ return self._get_api_list(
+ path_template(
+ "/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions",
+ fine_tuned_model_checkpoint=fine_tuned_model_checkpoint,
+ ),
+ page=AsyncConversationCursorPage[PermissionListResponse],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ "project_id": project_id,
+ },
+ permission_list_params.PermissionListParams,
+ ),
+ ),
+ model=PermissionListResponse,
+ )
+
async def delete(
self,
permission_id: str,
@@ -350,7 +510,11 @@ async def delete(
if not permission_id:
raise ValueError(f"Expected a non-empty value for `permission_id` but received {permission_id!r}")
return await self._delete(
- f"/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions/{permission_id}",
+ path_template(
+ "/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions/{permission_id}",
+ fine_tuned_model_checkpoint=fine_tuned_model_checkpoint,
+ permission_id=permission_id,
+ ),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -365,8 +529,13 @@ def __init__(self, permissions: Permissions) -> None:
self.create = _legacy_response.to_raw_response_wrapper(
permissions.create,
)
- self.retrieve = _legacy_response.to_raw_response_wrapper(
- permissions.retrieve,
+ self.retrieve = ( # pyright: ignore[reportDeprecated]
+ _legacy_response.to_raw_response_wrapper(
+ permissions.retrieve, # pyright: ignore[reportDeprecated],
+ )
+ )
+ self.list = _legacy_response.to_raw_response_wrapper(
+ permissions.list,
)
self.delete = _legacy_response.to_raw_response_wrapper(
permissions.delete,
@@ -380,8 +549,13 @@ def __init__(self, permissions: AsyncPermissions) -> None:
self.create = _legacy_response.async_to_raw_response_wrapper(
permissions.create,
)
- self.retrieve = _legacy_response.async_to_raw_response_wrapper(
- permissions.retrieve,
+ self.retrieve = ( # pyright: ignore[reportDeprecated]
+ _legacy_response.async_to_raw_response_wrapper(
+ permissions.retrieve, # pyright: ignore[reportDeprecated],
+ )
+ )
+ self.list = _legacy_response.async_to_raw_response_wrapper(
+ permissions.list,
)
self.delete = _legacy_response.async_to_raw_response_wrapper(
permissions.delete,
@@ -395,8 +569,13 @@ def __init__(self, permissions: Permissions) -> None:
self.create = to_streamed_response_wrapper(
permissions.create,
)
- self.retrieve = to_streamed_response_wrapper(
- permissions.retrieve,
+ self.retrieve = ( # pyright: ignore[reportDeprecated]
+ to_streamed_response_wrapper(
+ permissions.retrieve, # pyright: ignore[reportDeprecated],
+ )
+ )
+ self.list = to_streamed_response_wrapper(
+ permissions.list,
)
self.delete = to_streamed_response_wrapper(
permissions.delete,
@@ -410,8 +589,13 @@ def __init__(self, permissions: AsyncPermissions) -> None:
self.create = async_to_streamed_response_wrapper(
permissions.create,
)
- self.retrieve = async_to_streamed_response_wrapper(
- permissions.retrieve,
+ self.retrieve = ( # pyright: ignore[reportDeprecated]
+ async_to_streamed_response_wrapper(
+ permissions.retrieve, # pyright: ignore[reportDeprecated],
+ )
+ )
+ self.list = async_to_streamed_response_wrapper(
+ permissions.list,
)
self.delete = async_to_streamed_response_wrapper(
permissions.delete,
diff --git a/portkey_ai/_vendor/openai/resources/fine_tuning/fine_tuning.py b/portkey_ai/_vendor/openai/resources/fine_tuning/fine_tuning.py
index 25ae3e8c..60f1f44b 100644
--- a/portkey_ai/_vendor/openai/resources/fine_tuning/fine_tuning.py
+++ b/portkey_ai/_vendor/openai/resources/fine_tuning/fine_tuning.py
@@ -35,6 +35,7 @@
class FineTuning(SyncAPIResource):
@cached_property
def jobs(self) -> Jobs:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return Jobs(self._client)
@cached_property
@@ -68,6 +69,7 @@ def with_streaming_response(self) -> FineTuningWithStreamingResponse:
class AsyncFineTuning(AsyncAPIResource):
@cached_property
def jobs(self) -> AsyncJobs:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return AsyncJobs(self._client)
@cached_property
@@ -104,6 +106,7 @@ def __init__(self, fine_tuning: FineTuning) -> None:
@cached_property
def jobs(self) -> JobsWithRawResponse:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return JobsWithRawResponse(self._fine_tuning.jobs)
@cached_property
@@ -121,6 +124,7 @@ def __init__(self, fine_tuning: AsyncFineTuning) -> None:
@cached_property
def jobs(self) -> AsyncJobsWithRawResponse:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return AsyncJobsWithRawResponse(self._fine_tuning.jobs)
@cached_property
@@ -138,6 +142,7 @@ def __init__(self, fine_tuning: FineTuning) -> None:
@cached_property
def jobs(self) -> JobsWithStreamingResponse:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return JobsWithStreamingResponse(self._fine_tuning.jobs)
@cached_property
@@ -155,6 +160,7 @@ def __init__(self, fine_tuning: AsyncFineTuning) -> None:
@cached_property
def jobs(self) -> AsyncJobsWithStreamingResponse:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return AsyncJobsWithStreamingResponse(self._fine_tuning.jobs)
@cached_property
diff --git a/portkey_ai/_vendor/openai/resources/fine_tuning/jobs/checkpoints.py b/portkey_ai/_vendor/openai/resources/fine_tuning/jobs/checkpoints.py
index f65856f0..0f91a621 100644
--- a/portkey_ai/_vendor/openai/resources/fine_tuning/jobs/checkpoints.py
+++ b/portkey_ai/_vendor/openai/resources/fine_tuning/jobs/checkpoints.py
@@ -6,7 +6,7 @@
from .... import _legacy_response
from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
-from ...._utils import maybe_transform
+from ...._utils import path_template, maybe_transform
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
@@ -22,6 +22,8 @@
class Checkpoints(SyncAPIResource):
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
+
@cached_property
def with_raw_response(self) -> CheckpointsWithRawResponse:
"""
@@ -73,7 +75,7 @@ def list(
if not fine_tuning_job_id:
raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
return self._get_api_list(
- f"/fine_tuning/jobs/{fine_tuning_job_id}/checkpoints",
+ path_template("/fine_tuning/jobs/{fine_tuning_job_id}/checkpoints", fine_tuning_job_id=fine_tuning_job_id),
page=SyncCursorPage[FineTuningJobCheckpoint],
options=make_request_options(
extra_headers=extra_headers,
@@ -93,6 +95,8 @@ def list(
class AsyncCheckpoints(AsyncAPIResource):
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
+
@cached_property
def with_raw_response(self) -> AsyncCheckpointsWithRawResponse:
"""
@@ -144,7 +148,7 @@ def list(
if not fine_tuning_job_id:
raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
return self._get_api_list(
- f"/fine_tuning/jobs/{fine_tuning_job_id}/checkpoints",
+ path_template("/fine_tuning/jobs/{fine_tuning_job_id}/checkpoints", fine_tuning_job_id=fine_tuning_job_id),
page=AsyncCursorPage[FineTuningJobCheckpoint],
options=make_request_options(
extra_headers=extra_headers,
diff --git a/portkey_ai/_vendor/openai/resources/fine_tuning/jobs/jobs.py b/portkey_ai/_vendor/openai/resources/fine_tuning/jobs/jobs.py
index b292e057..a948b103 100644
--- a/portkey_ai/_vendor/openai/resources/fine_tuning/jobs/jobs.py
+++ b/portkey_ai/_vendor/openai/resources/fine_tuning/jobs/jobs.py
@@ -9,7 +9,7 @@
from .... import _legacy_response
from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
-from ...._utils import maybe_transform, async_maybe_transform
+from ...._utils import path_template, maybe_transform, async_maybe_transform
from ...._compat import cached_property
from .checkpoints import (
Checkpoints,
@@ -35,8 +35,11 @@
class Jobs(SyncAPIResource):
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
+
@cached_property
def checkpoints(self) -> Checkpoints:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return Checkpoints(self._client)
@cached_property
@@ -205,7 +208,7 @@ def retrieve(
if not fine_tuning_job_id:
raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
return self._get(
- f"/fine_tuning/jobs/{fine_tuning_job_id}",
+ path_template("/fine_tuning/jobs/{fine_tuning_job_id}", fine_tuning_job_id=fine_tuning_job_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -290,7 +293,7 @@ def cancel(
if not fine_tuning_job_id:
raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
return self._post(
- f"/fine_tuning/jobs/{fine_tuning_job_id}/cancel",
+ path_template("/fine_tuning/jobs/{fine_tuning_job_id}/cancel", fine_tuning_job_id=fine_tuning_job_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -329,7 +332,7 @@ def list_events(
if not fine_tuning_job_id:
raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
return self._get_api_list(
- f"/fine_tuning/jobs/{fine_tuning_job_id}/events",
+ path_template("/fine_tuning/jobs/{fine_tuning_job_id}/events", fine_tuning_job_id=fine_tuning_job_id),
page=SyncCursorPage[FineTuningJobEvent],
options=make_request_options(
extra_headers=extra_headers,
@@ -373,7 +376,7 @@ def pause(
if not fine_tuning_job_id:
raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
return self._post(
- f"/fine_tuning/jobs/{fine_tuning_job_id}/pause",
+ path_template("/fine_tuning/jobs/{fine_tuning_job_id}/pause", fine_tuning_job_id=fine_tuning_job_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -406,7 +409,7 @@ def resume(
if not fine_tuning_job_id:
raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
return self._post(
- f"/fine_tuning/jobs/{fine_tuning_job_id}/resume",
+ path_template("/fine_tuning/jobs/{fine_tuning_job_id}/resume", fine_tuning_job_id=fine_tuning_job_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -415,8 +418,11 @@ def resume(
class AsyncJobs(AsyncAPIResource):
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
+
@cached_property
def checkpoints(self) -> AsyncCheckpoints:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return AsyncCheckpoints(self._client)
@cached_property
@@ -585,7 +591,7 @@ async def retrieve(
if not fine_tuning_job_id:
raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
return await self._get(
- f"/fine_tuning/jobs/{fine_tuning_job_id}",
+ path_template("/fine_tuning/jobs/{fine_tuning_job_id}", fine_tuning_job_id=fine_tuning_job_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -670,7 +676,7 @@ async def cancel(
if not fine_tuning_job_id:
raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
return await self._post(
- f"/fine_tuning/jobs/{fine_tuning_job_id}/cancel",
+ path_template("/fine_tuning/jobs/{fine_tuning_job_id}/cancel", fine_tuning_job_id=fine_tuning_job_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -709,7 +715,7 @@ def list_events(
if not fine_tuning_job_id:
raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
return self._get_api_list(
- f"/fine_tuning/jobs/{fine_tuning_job_id}/events",
+ path_template("/fine_tuning/jobs/{fine_tuning_job_id}/events", fine_tuning_job_id=fine_tuning_job_id),
page=AsyncCursorPage[FineTuningJobEvent],
options=make_request_options(
extra_headers=extra_headers,
@@ -753,7 +759,7 @@ async def pause(
if not fine_tuning_job_id:
raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
return await self._post(
- f"/fine_tuning/jobs/{fine_tuning_job_id}/pause",
+ path_template("/fine_tuning/jobs/{fine_tuning_job_id}/pause", fine_tuning_job_id=fine_tuning_job_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -786,7 +792,7 @@ async def resume(
if not fine_tuning_job_id:
raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
return await self._post(
- f"/fine_tuning/jobs/{fine_tuning_job_id}/resume",
+ path_template("/fine_tuning/jobs/{fine_tuning_job_id}/resume", fine_tuning_job_id=fine_tuning_job_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -822,6 +828,7 @@ def __init__(self, jobs: Jobs) -> None:
@cached_property
def checkpoints(self) -> CheckpointsWithRawResponse:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return CheckpointsWithRawResponse(self._jobs.checkpoints)
@@ -853,6 +860,7 @@ def __init__(self, jobs: AsyncJobs) -> None:
@cached_property
def checkpoints(self) -> AsyncCheckpointsWithRawResponse:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return AsyncCheckpointsWithRawResponse(self._jobs.checkpoints)
@@ -884,6 +892,7 @@ def __init__(self, jobs: Jobs) -> None:
@cached_property
def checkpoints(self) -> CheckpointsWithStreamingResponse:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return CheckpointsWithStreamingResponse(self._jobs.checkpoints)
@@ -915,4 +924,5 @@ def __init__(self, jobs: AsyncJobs) -> None:
@cached_property
def checkpoints(self) -> AsyncCheckpointsWithStreamingResponse:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return AsyncCheckpointsWithStreamingResponse(self._jobs.checkpoints)
diff --git a/portkey_ai/_vendor/openai/resources/images.py b/portkey_ai/_vendor/openai/resources/images.py
index 80582848..6959c2ae 100644
--- a/portkey_ai/_vendor/openai/resources/images.py
+++ b/portkey_ai/_vendor/openai/resources/images.py
@@ -25,6 +25,8 @@
class Images(SyncAPIResource):
+ """Given a prompt and/or an input image, the model will generate a new image."""
+
@cached_property
def with_raw_response(self) -> ImagesWithRawResponse:
"""
@@ -147,14 +149,15 @@ def edit(
prompt.
This endpoint supports GPT Image models (`gpt-image-1.5`, `gpt-image-1`,
- and `gpt-image-1-mini`) and `dall-e-2`.
+ `gpt-image-1-mini`, and `chatgpt-image-latest`) and `dall-e-2`.
Args:
image: The image(s) to edit. Must be a supported image file or an array of images.
For the GPT image models (`gpt-image-1`, `gpt-image-1-mini`, and
`gpt-image-1.5`), each image should be a `png`, `webp`, or `jpg` file less than
- 50MB. You can provide up to 16 images.
+ 50MB. You can provide up to 16 images. `chatgpt-image-latest` follows the same
+ input constraints as GPT image models.
For `dall-e-2`, you can only provide one image, and it should be a square `png`
file less than 4MB.
@@ -172,17 +175,15 @@ def edit(
input_fidelity: Control how much effort the model will exert to match the style and features,
especially facial features, of input images. This parameter is only supported
- for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
- `low`. Defaults to `low`.
+ for `gpt-image-1` and `gpt-image-1.5` and later models, unsupported for
+ `gpt-image-1-mini`. Supports `high` and `low`. Defaults to `low`.
mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
indicate where `image` should be edited. If there are multiple images provided,
the mask will be applied on the first image. Must be a valid PNG file, less than
4MB, and have the same dimensions as `image`.
- model: The model to use for image generation. Only `dall-e-2` and the GPT image models
- are supported. Defaults to `dall-e-2` unless a parameter specific to the GPT
- image models is used.
+ model: The model to use for image generation. Defaults to `gpt-image-1.5`.
n: The number of images to generate. Must be between 1 and 10.
@@ -201,14 +202,13 @@ def edit(
Note that the final image may be sent before the full number of partial images
are generated if the full image is generated more quickly.
- quality: The quality of the image that will be generated. `high`, `medium` and `low` are
- only supported for the GPT image models. `dall-e-2` only supports `standard`
- quality. Defaults to `auto`.
+ quality: The quality of the image that will be generated for GPT image models. Defaults
+ to `auto`.
response_format: The format in which the generated images are returned. Must be one of `url` or
`b64_json`. URLs are only valid for 60 minutes after the image has been
- generated. This parameter is only supported for `dall-e-2`, as the GPT image
- models always return base64-encoded images.
+ generated. This parameter is only supported for `dall-e-2` (default is `url` for
+ `dall-e-2`), as GPT image models always return base64-encoded images.
size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
(landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image
@@ -262,14 +262,15 @@ def edit(
prompt.
This endpoint supports GPT Image models (`gpt-image-1.5`, `gpt-image-1`,
- and `gpt-image-1-mini`) and `dall-e-2`.
+ `gpt-image-1-mini`, and `chatgpt-image-latest`) and `dall-e-2`.
Args:
image: The image(s) to edit. Must be a supported image file or an array of images.
For the GPT image models (`gpt-image-1`, `gpt-image-1-mini`, and
`gpt-image-1.5`), each image should be a `png`, `webp`, or `jpg` file less than
- 50MB. You can provide up to 16 images.
+ 50MB. You can provide up to 16 images. `chatgpt-image-latest` follows the same
+ input constraints as GPT image models.
For `dall-e-2`, you can only provide one image, and it should be a square `png`
file less than 4MB.
@@ -291,17 +292,15 @@ def edit(
input_fidelity: Control how much effort the model will exert to match the style and features,
especially facial features, of input images. This parameter is only supported
- for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
- `low`. Defaults to `low`.
+ for `gpt-image-1` and `gpt-image-1.5` and later models, unsupported for
+ `gpt-image-1-mini`. Supports `high` and `low`. Defaults to `low`.
mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
indicate where `image` should be edited. If there are multiple images provided,
the mask will be applied on the first image. Must be a valid PNG file, less than
4MB, and have the same dimensions as `image`.
- model: The model to use for image generation. Only `dall-e-2` and the GPT image models
- are supported. Defaults to `dall-e-2` unless a parameter specific to the GPT
- image models is used.
+ model: The model to use for image generation. Defaults to `gpt-image-1.5`.
n: The number of images to generate. Must be between 1 and 10.
@@ -320,14 +319,13 @@ def edit(
Note that the final image may be sent before the full number of partial images
are generated if the full image is generated more quickly.
- quality: The quality of the image that will be generated. `high`, `medium` and `low` are
- only supported for the GPT image models. `dall-e-2` only supports `standard`
- quality. Defaults to `auto`.
+ quality: The quality of the image that will be generated for GPT image models. Defaults
+ to `auto`.
response_format: The format in which the generated images are returned. Must be one of `url` or
`b64_json`. URLs are only valid for 60 minutes after the image has been
- generated. This parameter is only supported for `dall-e-2`, as the GPT image
- models always return base64-encoded images.
+ generated. This parameter is only supported for `dall-e-2` (default is `url` for
+ `dall-e-2`), as GPT image models always return base64-encoded images.
size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
(landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image
@@ -377,14 +375,15 @@ def edit(
prompt.
This endpoint supports GPT Image models (`gpt-image-1.5`, `gpt-image-1`,
- and `gpt-image-1-mini`) and `dall-e-2`.
+ `gpt-image-1-mini`, and `chatgpt-image-latest`) and `dall-e-2`.
Args:
image: The image(s) to edit. Must be a supported image file or an array of images.
For the GPT image models (`gpt-image-1`, `gpt-image-1-mini`, and
`gpt-image-1.5`), each image should be a `png`, `webp`, or `jpg` file less than
- 50MB. You can provide up to 16 images.
+ 50MB. You can provide up to 16 images. `chatgpt-image-latest` follows the same
+ input constraints as GPT image models.
For `dall-e-2`, you can only provide one image, and it should be a square `png`
file less than 4MB.
@@ -406,17 +405,15 @@ def edit(
input_fidelity: Control how much effort the model will exert to match the style and features,
especially facial features, of input images. This parameter is only supported
- for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
- `low`. Defaults to `low`.
+ for `gpt-image-1` and `gpt-image-1.5` and later models, unsupported for
+ `gpt-image-1-mini`. Supports `high` and `low`. Defaults to `low`.
mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
indicate where `image` should be edited. If there are multiple images provided,
the mask will be applied on the first image. Must be a valid PNG file, less than
4MB, and have the same dimensions as `image`.
- model: The model to use for image generation. Only `dall-e-2` and the GPT image models
- are supported. Defaults to `dall-e-2` unless a parameter specific to the GPT
- image models is used.
+ model: The model to use for image generation. Defaults to `gpt-image-1.5`.
n: The number of images to generate. Must be between 1 and 10.
@@ -435,14 +432,13 @@ def edit(
Note that the final image may be sent before the full number of partial images
are generated if the full image is generated more quickly.
- quality: The quality of the image that will be generated. `high`, `medium` and `low` are
- only supported for the GPT image models. `dall-e-2` only supports `standard`
- quality. Defaults to `auto`.
+ quality: The quality of the image that will be generated for GPT image models. Defaults
+ to `auto`.
response_format: The format in which the generated images are returned. Must be one of `url` or
`b64_json`. URLs are only valid for 60 minutes after the image has been
- generated. This parameter is only supported for `dall-e-2`, as the GPT image
- models always return base64-encoded images.
+ generated. This parameter is only supported for `dall-e-2` (default is `url` for
+ `dall-e-2`), as GPT image models always return base64-encoded images.
size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
(landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image
@@ -921,6 +917,8 @@ def generate(
class AsyncImages(AsyncAPIResource):
+ """Given a prompt and/or an input image, the model will generate a new image."""
+
@cached_property
def with_raw_response(self) -> AsyncImagesWithRawResponse:
"""
@@ -1043,14 +1041,15 @@ async def edit(
prompt.
This endpoint supports GPT Image models (`gpt-image-1.5`, `gpt-image-1`,
- and `gpt-image-1-mini`) and `dall-e-2`.
+ `gpt-image-1-mini`, and `chatgpt-image-latest`) and `dall-e-2`.
Args:
image: The image(s) to edit. Must be a supported image file or an array of images.
For the GPT image models (`gpt-image-1`, `gpt-image-1-mini`, and
`gpt-image-1.5`), each image should be a `png`, `webp`, or `jpg` file less than
- 50MB. You can provide up to 16 images.
+ 50MB. You can provide up to 16 images. `chatgpt-image-latest` follows the same
+ input constraints as GPT image models.
For `dall-e-2`, you can only provide one image, and it should be a square `png`
file less than 4MB.
@@ -1068,17 +1067,15 @@ async def edit(
input_fidelity: Control how much effort the model will exert to match the style and features,
especially facial features, of input images. This parameter is only supported
- for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
- `low`. Defaults to `low`.
+ for `gpt-image-1` and `gpt-image-1.5` and later models, unsupported for
+ `gpt-image-1-mini`. Supports `high` and `low`. Defaults to `low`.
mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
indicate where `image` should be edited. If there are multiple images provided,
the mask will be applied on the first image. Must be a valid PNG file, less than
4MB, and have the same dimensions as `image`.
- model: The model to use for image generation. Only `dall-e-2` and the GPT image models
- are supported. Defaults to `dall-e-2` unless a parameter specific to the GPT
- image models is used.
+ model: The model to use for image generation. Defaults to `gpt-image-1.5`.
n: The number of images to generate. Must be between 1 and 10.
@@ -1097,14 +1094,13 @@ async def edit(
Note that the final image may be sent before the full number of partial images
are generated if the full image is generated more quickly.
- quality: The quality of the image that will be generated. `high`, `medium` and `low` are
- only supported for the GPT image models. `dall-e-2` only supports `standard`
- quality. Defaults to `auto`.
+ quality: The quality of the image that will be generated for GPT image models. Defaults
+ to `auto`.
response_format: The format in which the generated images are returned. Must be one of `url` or
`b64_json`. URLs are only valid for 60 minutes after the image has been
- generated. This parameter is only supported for `dall-e-2`, as the GPT image
- models always return base64-encoded images.
+ generated. This parameter is only supported for `dall-e-2` (default is `url` for
+ `dall-e-2`), as GPT image models always return base64-encoded images.
size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
(landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image
@@ -1158,14 +1154,15 @@ async def edit(
prompt.
This endpoint supports GPT Image models (`gpt-image-1.5`, `gpt-image-1`,
- and `gpt-image-1-mini`) and `dall-e-2`.
+ `gpt-image-1-mini`, and `chatgpt-image-latest`) and `dall-e-2`.
Args:
image: The image(s) to edit. Must be a supported image file or an array of images.
For the GPT image models (`gpt-image-1`, `gpt-image-1-mini`, and
`gpt-image-1.5`), each image should be a `png`, `webp`, or `jpg` file less than
- 50MB. You can provide up to 16 images.
+ 50MB. You can provide up to 16 images. `chatgpt-image-latest` follows the same
+ input constraints as GPT image models.
For `dall-e-2`, you can only provide one image, and it should be a square `png`
file less than 4MB.
@@ -1187,17 +1184,15 @@ async def edit(
input_fidelity: Control how much effort the model will exert to match the style and features,
especially facial features, of input images. This parameter is only supported
- for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
- `low`. Defaults to `low`.
+ for `gpt-image-1` and `gpt-image-1.5` and later models, unsupported for
+ `gpt-image-1-mini`. Supports `high` and `low`. Defaults to `low`.
mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
indicate where `image` should be edited. If there are multiple images provided,
the mask will be applied on the first image. Must be a valid PNG file, less than
4MB, and have the same dimensions as `image`.
- model: The model to use for image generation. Only `dall-e-2` and the GPT image models
- are supported. Defaults to `dall-e-2` unless a parameter specific to the GPT
- image models is used.
+ model: The model to use for image generation. Defaults to `gpt-image-1.5`.
n: The number of images to generate. Must be between 1 and 10.
@@ -1216,14 +1211,13 @@ async def edit(
Note that the final image may be sent before the full number of partial images
are generated if the full image is generated more quickly.
- quality: The quality of the image that will be generated. `high`, `medium` and `low` are
- only supported for the GPT image models. `dall-e-2` only supports `standard`
- quality. Defaults to `auto`.
+ quality: The quality of the image that will be generated for GPT image models. Defaults
+ to `auto`.
response_format: The format in which the generated images are returned. Must be one of `url` or
`b64_json`. URLs are only valid for 60 minutes after the image has been
- generated. This parameter is only supported for `dall-e-2`, as the GPT image
- models always return base64-encoded images.
+ generated. This parameter is only supported for `dall-e-2` (default is `url` for
+ `dall-e-2`), as GPT image models always return base64-encoded images.
size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
(landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image
@@ -1273,14 +1267,15 @@ async def edit(
prompt.
This endpoint supports GPT Image models (`gpt-image-1.5`, `gpt-image-1`,
- and `gpt-image-1-mini`) and `dall-e-2`.
+ `gpt-image-1-mini`, and `chatgpt-image-latest`) and `dall-e-2`.
Args:
image: The image(s) to edit. Must be a supported image file or an array of images.
For the GPT image models (`gpt-image-1`, `gpt-image-1-mini`, and
`gpt-image-1.5`), each image should be a `png`, `webp`, or `jpg` file less than
- 50MB. You can provide up to 16 images.
+ 50MB. You can provide up to 16 images. `chatgpt-image-latest` follows the same
+ input constraints as GPT image models.
For `dall-e-2`, you can only provide one image, and it should be a square `png`
file less than 4MB.
@@ -1302,17 +1297,15 @@ async def edit(
input_fidelity: Control how much effort the model will exert to match the style and features,
especially facial features, of input images. This parameter is only supported
- for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
- `low`. Defaults to `low`.
+ for `gpt-image-1` and `gpt-image-1.5` and later models, unsupported for
+ `gpt-image-1-mini`. Supports `high` and `low`. Defaults to `low`.
mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
indicate where `image` should be edited. If there are multiple images provided,
the mask will be applied on the first image. Must be a valid PNG file, less than
4MB, and have the same dimensions as `image`.
- model: The model to use for image generation. Only `dall-e-2` and the GPT image models
- are supported. Defaults to `dall-e-2` unless a parameter specific to the GPT
- image models is used.
+ model: The model to use for image generation. Defaults to `gpt-image-1.5`.
n: The number of images to generate. Must be between 1 and 10.
@@ -1331,14 +1324,13 @@ async def edit(
Note that the final image may be sent before the full number of partial images
are generated if the full image is generated more quickly.
- quality: The quality of the image that will be generated. `high`, `medium` and `low` are
- only supported for the GPT image models. `dall-e-2` only supports `standard`
- quality. Defaults to `auto`.
+ quality: The quality of the image that will be generated for GPT image models. Defaults
+ to `auto`.
response_format: The format in which the generated images are returned. Must be one of `url` or
`b64_json`. URLs are only valid for 60 minutes after the image has been
- generated. This parameter is only supported for `dall-e-2`, as the GPT image
- models always return base64-encoded images.
+ generated. This parameter is only supported for `dall-e-2` (default is `url` for
+ `dall-e-2`), as GPT image models always return base64-encoded images.
size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
(landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image
diff --git a/portkey_ai/_vendor/openai/resources/models.py b/portkey_ai/_vendor/openai/resources/models.py
index a8f76910..a1fe0d39 100644
--- a/portkey_ai/_vendor/openai/resources/models.py
+++ b/portkey_ai/_vendor/openai/resources/models.py
@@ -6,6 +6,7 @@
from .. import _legacy_response
from .._types import Body, Query, Headers, NotGiven, not_given
+from .._utils import path_template
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
@@ -21,6 +22,8 @@
class Models(SyncAPIResource):
+ """List and describe the various models available in the API."""
+
@cached_property
def with_raw_response(self) -> ModelsWithRawResponse:
"""
@@ -67,7 +70,7 @@ def retrieve(
if not model:
raise ValueError(f"Expected a non-empty value for `model` but received {model!r}")
return self._get(
- f"/models/{model}",
+ path_template("/models/{model}", model=model),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -125,7 +128,7 @@ def delete(
if not model:
raise ValueError(f"Expected a non-empty value for `model` but received {model!r}")
return self._delete(
- f"/models/{model}",
+ path_template("/models/{model}", model=model),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -134,6 +137,8 @@ def delete(
class AsyncModels(AsyncAPIResource):
+ """List and describe the various models available in the API."""
+
@cached_property
def with_raw_response(self) -> AsyncModelsWithRawResponse:
"""
@@ -180,7 +185,7 @@ async def retrieve(
if not model:
raise ValueError(f"Expected a non-empty value for `model` but received {model!r}")
return await self._get(
- f"/models/{model}",
+ path_template("/models/{model}", model=model),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -238,7 +243,7 @@ async def delete(
if not model:
raise ValueError(f"Expected a non-empty value for `model` but received {model!r}")
return await self._delete(
- f"/models/{model}",
+ path_template("/models/{model}", model=model),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
diff --git a/portkey_ai/_vendor/openai/resources/moderations.py b/portkey_ai/_vendor/openai/resources/moderations.py
index 5f378f71..0b9a2d23 100644
--- a/portkey_ai/_vendor/openai/resources/moderations.py
+++ b/portkey_ai/_vendor/openai/resources/moderations.py
@@ -22,6 +22,10 @@
class Moderations(SyncAPIResource):
+ """
+ Given text and/or image inputs, classifies if those inputs are potentially harmful.
+ """
+
@cached_property
def with_raw_response(self) -> ModerationsWithRawResponse:
"""
@@ -92,6 +96,10 @@ def create(
class AsyncModerations(AsyncAPIResource):
+ """
+ Given text and/or image inputs, classifies if those inputs are potentially harmful.
+ """
+
@cached_property
def with_raw_response(self) -> AsyncModerationsWithRawResponse:
"""
diff --git a/portkey_ai/_vendor/openai/resources/realtime/api.md b/portkey_ai/_vendor/openai/resources/realtime/api.md
new file mode 100644
index 00000000..1a178384
--- /dev/null
+++ b/portkey_ai/_vendor/openai/resources/realtime/api.md
@@ -0,0 +1,137 @@
+# Realtime
+
+Types:
+
+```python
+from openai.types.realtime import (
+ AudioTranscription,
+ ConversationCreatedEvent,
+ ConversationItem,
+ ConversationItemAdded,
+ ConversationItemCreateEvent,
+ ConversationItemCreatedEvent,
+ ConversationItemDeleteEvent,
+ ConversationItemDeletedEvent,
+ ConversationItemDone,
+ ConversationItemInputAudioTranscriptionCompletedEvent,
+ ConversationItemInputAudioTranscriptionDeltaEvent,
+ ConversationItemInputAudioTranscriptionFailedEvent,
+ ConversationItemInputAudioTranscriptionSegment,
+ ConversationItemRetrieveEvent,
+ ConversationItemTruncateEvent,
+ ConversationItemTruncatedEvent,
+ ConversationItemWithReference,
+ InputAudioBufferAppendEvent,
+ InputAudioBufferClearEvent,
+ InputAudioBufferClearedEvent,
+ InputAudioBufferCommitEvent,
+ InputAudioBufferCommittedEvent,
+ InputAudioBufferDtmfEventReceivedEvent,
+ InputAudioBufferSpeechStartedEvent,
+ InputAudioBufferSpeechStoppedEvent,
+ InputAudioBufferTimeoutTriggered,
+ LogProbProperties,
+ McpListToolsCompleted,
+ McpListToolsFailed,
+ McpListToolsInProgress,
+ NoiseReductionType,
+ OutputAudioBufferClearEvent,
+ RateLimitsUpdatedEvent,
+ RealtimeAudioConfig,
+ RealtimeAudioConfigInput,
+ RealtimeAudioConfigOutput,
+ RealtimeAudioFormats,
+ RealtimeAudioInputTurnDetection,
+ RealtimeClientEvent,
+ RealtimeConversationItemAssistantMessage,
+ RealtimeConversationItemFunctionCall,
+ RealtimeConversationItemFunctionCallOutput,
+ RealtimeConversationItemSystemMessage,
+ RealtimeConversationItemUserMessage,
+ RealtimeError,
+ RealtimeErrorEvent,
+ RealtimeFunctionTool,
+ RealtimeMcpApprovalRequest,
+ RealtimeMcpApprovalResponse,
+ RealtimeMcpListTools,
+ RealtimeMcpProtocolError,
+ RealtimeMcpToolCall,
+ RealtimeMcpToolExecutionError,
+ RealtimeMcphttpError,
+ RealtimeResponse,
+ RealtimeResponseCreateAudioOutput,
+ RealtimeResponseCreateMcpTool,
+ RealtimeResponseCreateParams,
+ RealtimeResponseStatus,
+ RealtimeResponseUsage,
+ RealtimeResponseUsageInputTokenDetails,
+ RealtimeResponseUsageOutputTokenDetails,
+ RealtimeServerEvent,
+ RealtimeSession,
+ RealtimeSessionCreateRequest,
+ RealtimeToolChoiceConfig,
+ RealtimeToolsConfig,
+ RealtimeToolsConfigUnion,
+ RealtimeTracingConfig,
+ RealtimeTranscriptionSessionAudio,
+ RealtimeTranscriptionSessionAudioInput,
+ RealtimeTranscriptionSessionAudioInputTurnDetection,
+ RealtimeTranscriptionSessionCreateRequest,
+ RealtimeTruncation,
+ RealtimeTruncationRetentionRatio,
+ ResponseAudioDeltaEvent,
+ ResponseAudioDoneEvent,
+ ResponseAudioTranscriptDeltaEvent,
+ ResponseAudioTranscriptDoneEvent,
+ ResponseCancelEvent,
+ ResponseContentPartAddedEvent,
+ ResponseContentPartDoneEvent,
+ ResponseCreateEvent,
+ ResponseCreatedEvent,
+ ResponseDoneEvent,
+ ResponseFunctionCallArgumentsDeltaEvent,
+ ResponseFunctionCallArgumentsDoneEvent,
+ ResponseMcpCallArgumentsDelta,
+ ResponseMcpCallArgumentsDone,
+ ResponseMcpCallCompleted,
+ ResponseMcpCallFailed,
+ ResponseMcpCallInProgress,
+ ResponseOutputItemAddedEvent,
+ ResponseOutputItemDoneEvent,
+ ResponseTextDeltaEvent,
+ ResponseTextDoneEvent,
+ SessionCreatedEvent,
+ SessionUpdateEvent,
+ SessionUpdatedEvent,
+ TranscriptionSessionUpdate,
+ TranscriptionSessionUpdatedEvent,
+)
+```
+
+## ClientSecrets
+
+Types:
+
+```python
+from openai.types.realtime import (
+ RealtimeSessionClientSecret,
+ RealtimeSessionCreateResponse,
+ RealtimeTranscriptionSessionCreateResponse,
+ RealtimeTranscriptionSessionTurnDetection,
+ ClientSecretCreateResponse,
+)
+```
+
+Methods:
+
+- client.realtime.client_secrets.create(\*\*params) -> ClientSecretCreateResponse
+
+## Calls
+
+Methods:
+
+- client.realtime.calls.create(\*\*params) -> HttpxBinaryResponseContent
+- client.realtime.calls.accept(call_id, \*\*params) -> None
+- client.realtime.calls.hangup(call_id) -> None
+- client.realtime.calls.refer(call_id, \*\*params) -> None
+- client.realtime.calls.reject(call_id, \*\*params) -> None
diff --git a/portkey_ai/_vendor/openai/resources/realtime/calls.py b/portkey_ai/_vendor/openai/resources/realtime/calls.py
index 20a22fc3..f34748d2 100644
--- a/portkey_ai/_vendor/openai/resources/realtime/calls.py
+++ b/portkey_ai/_vendor/openai/resources/realtime/calls.py
@@ -9,7 +9,7 @@
from ... import _legacy_response
from ..._types import Body, Omit, Query, Headers, NoneType, NotGiven, omit, not_given
-from ..._utils import maybe_transform, async_maybe_transform
+from ..._utils import path_template, maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
@@ -116,6 +116,7 @@ def accept(
str,
Literal[
"gpt-realtime",
+ "gpt-realtime-1.5",
"gpt-realtime-2025-08-28",
"gpt-4o-realtime-preview",
"gpt-4o-realtime-preview-2024-10-01",
@@ -126,6 +127,7 @@ def accept(
"gpt-realtime-mini",
"gpt-realtime-mini-2025-10-06",
"gpt-realtime-mini-2025-12-15",
+ "gpt-audio-1.5",
"gpt-audio-mini",
"gpt-audio-mini-2025-10-06",
"gpt-audio-mini-2025-12-15",
@@ -228,7 +230,7 @@ def accept(
raise ValueError(f"Expected a non-empty value for `call_id` but received {call_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._post(
- f"/realtime/calls/{call_id}/accept",
+ path_template("/realtime/calls/{call_id}/accept", call_id=call_id),
body=maybe_transform(
{
"type": type,
@@ -279,7 +281,7 @@ def hangup(
raise ValueError(f"Expected a non-empty value for `call_id` but received {call_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._post(
- f"/realtime/calls/{call_id}/hangup",
+ path_template("/realtime/calls/{call_id}/hangup", call_id=call_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -317,7 +319,7 @@ def refer(
raise ValueError(f"Expected a non-empty value for `call_id` but received {call_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._post(
- f"/realtime/calls/{call_id}/refer",
+ path_template("/realtime/calls/{call_id}/refer", call_id=call_id),
body=maybe_transform({"target_uri": target_uri}, call_refer_params.CallReferParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
@@ -356,7 +358,7 @@ def reject(
raise ValueError(f"Expected a non-empty value for `call_id` but received {call_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._post(
- f"/realtime/calls/{call_id}/reject",
+ path_template("/realtime/calls/{call_id}/reject", call_id=call_id),
body=maybe_transform({"status_code": status_code}, call_reject_params.CallRejectParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
@@ -443,6 +445,7 @@ async def accept(
str,
Literal[
"gpt-realtime",
+ "gpt-realtime-1.5",
"gpt-realtime-2025-08-28",
"gpt-4o-realtime-preview",
"gpt-4o-realtime-preview-2024-10-01",
@@ -453,6 +456,7 @@ async def accept(
"gpt-realtime-mini",
"gpt-realtime-mini-2025-10-06",
"gpt-realtime-mini-2025-12-15",
+ "gpt-audio-1.5",
"gpt-audio-mini",
"gpt-audio-mini-2025-10-06",
"gpt-audio-mini-2025-12-15",
@@ -555,7 +559,7 @@ async def accept(
raise ValueError(f"Expected a non-empty value for `call_id` but received {call_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return await self._post(
- f"/realtime/calls/{call_id}/accept",
+ path_template("/realtime/calls/{call_id}/accept", call_id=call_id),
body=await async_maybe_transform(
{
"type": type,
@@ -606,7 +610,7 @@ async def hangup(
raise ValueError(f"Expected a non-empty value for `call_id` but received {call_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return await self._post(
- f"/realtime/calls/{call_id}/hangup",
+ path_template("/realtime/calls/{call_id}/hangup", call_id=call_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -644,7 +648,7 @@ async def refer(
raise ValueError(f"Expected a non-empty value for `call_id` but received {call_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return await self._post(
- f"/realtime/calls/{call_id}/refer",
+ path_template("/realtime/calls/{call_id}/refer", call_id=call_id),
body=await async_maybe_transform({"target_uri": target_uri}, call_refer_params.CallReferParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
@@ -683,7 +687,7 @@ async def reject(
raise ValueError(f"Expected a non-empty value for `call_id` but received {call_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return await self._post(
- f"/realtime/calls/{call_id}/reject",
+ path_template("/realtime/calls/{call_id}/reject", call_id=call_id),
body=await async_maybe_transform({"status_code": status_code}, call_reject_params.CallRejectParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
diff --git a/portkey_ai/_vendor/openai/resources/realtime/client_secrets.py b/portkey_ai/_vendor/openai/resources/realtime/client_secrets.py
index 5ceba7be..d9947dd7 100644
--- a/portkey_ai/_vendor/openai/resources/realtime/client_secrets.py
+++ b/portkey_ai/_vendor/openai/resources/realtime/client_secrets.py
@@ -52,6 +52,20 @@ def create(
"""
Create a Realtime client secret with an associated session configuration.
+ Client secrets are short-lived tokens that can be passed to a client app, such
+ as a web frontend or mobile client, which grants access to the Realtime API
+ without leaking your main API key. You can configure a custom TTL for each
+ client secret.
+
+ You can also attach session configuration options to the client secret, which
+ will be applied to any sessions created using that client secret, but these can
+ also be overridden by the client connection.
+
+ [Learn more about authentication with client secrets over WebRTC](https://platform.openai.com/docs/guides/realtime-webrtc).
+
+ Returns the created client secret and the effective session object. The client
+ secret is a string that looks like `ek_1234`.
+
Args:
expires_after: Configuration for the client secret expiration. Expiration refers to the time
after which a client secret will no longer be valid for creating sessions. The
@@ -120,6 +134,20 @@ async def create(
"""
Create a Realtime client secret with an associated session configuration.
+ Client secrets are short-lived tokens that can be passed to a client app, such
+ as a web frontend or mobile client, which grants access to the Realtime API
+ without leaking your main API key. You can configure a custom TTL for each
+ client secret.
+
+ You can also attach session configuration options to the client secret, which
+ will be applied to any sessions created using that client secret, but these can
+ also be overridden by the client connection.
+
+ [Learn more about authentication with client secrets over WebRTC](https://platform.openai.com/docs/guides/realtime-webrtc).
+
+ Returns the created client secret and the effective session object. The client
+ secret is a string that looks like `ek_1234`.
+
Args:
expires_after: Configuration for the client secret expiration. Expiration refers to the time
after which a client secret will no longer be valid for creating sessions. The
diff --git a/portkey_ai/_vendor/openai/resources/realtime/realtime.py b/portkey_ai/_vendor/openai/resources/realtime/realtime.py
index 44f14cd3..73a87fc2 100644
--- a/portkey_ai/_vendor/openai/resources/realtime/realtime.py
+++ b/portkey_ai/_vendor/openai/resources/realtime/realtime.py
@@ -41,7 +41,7 @@
AsyncClientSecretsWithStreamingResponse,
)
from ...types.realtime import session_update_event_param
-from ...types.websocket_connection_options import WebsocketConnectionOptions
+from ...types.websocket_connection_options import WebSocketConnectionOptions
from ...types.realtime.realtime_client_event import RealtimeClientEvent
from ...types.realtime.realtime_server_event import RealtimeServerEvent
from ...types.realtime.conversation_item_param import ConversationItemParam
@@ -49,8 +49,8 @@
from ...types.realtime.realtime_response_create_params_param import RealtimeResponseCreateParamsParam
if TYPE_CHECKING:
- from websockets.sync.client import ClientConnection as WebsocketConnection
- from websockets.asyncio.client import ClientConnection as AsyncWebsocketConnection
+ from websockets.sync.client import ClientConnection as WebSocketConnection
+ from websockets.asyncio.client import ClientConnection as AsyncWebSocketConnection
from ..._client import OpenAI, AsyncOpenAI
@@ -96,7 +96,7 @@ def connect(
model: str | Omit = omit,
extra_query: Query = {},
extra_headers: Headers = {},
- websocket_connection_options: WebsocketConnectionOptions = {},
+ websocket_connection_options: WebSocketConnectionOptions = {},
) -> RealtimeConnectionManager:
"""
The Realtime API enables you to build low-latency, multi-modal conversational experiences. It currently supports text and audio as both input and output, as well as function calling.
@@ -156,7 +156,7 @@ def connect(
model: str | Omit = omit,
extra_query: Query = {},
extra_headers: Headers = {},
- websocket_connection_options: WebsocketConnectionOptions = {},
+ websocket_connection_options: WebSocketConnectionOptions = {},
) -> AsyncRealtimeConnectionManager:
"""
The Realtime API enables you to build low-latency, multi-modal conversational experiences. It currently supports text and audio as both input and output, as well as function calling.
@@ -240,9 +240,9 @@ class AsyncRealtimeConnection:
conversation: AsyncRealtimeConversationResource
output_audio_buffer: AsyncRealtimeOutputAudioBufferResource
- _connection: AsyncWebsocketConnection
+ _connection: AsyncWebSocketConnection
- def __init__(self, connection: AsyncWebsocketConnection) -> None:
+ def __init__(self, connection: AsyncWebSocketConnection) -> None:
self._connection = connection
self.session = AsyncRealtimeSessionResource(self)
@@ -281,7 +281,7 @@ async def recv_bytes(self) -> bytes:
then you can call `.parse_event(data)`.
"""
message = await self._connection.recv(decode=False)
- log.debug(f"Received websocket message: %s", message)
+ log.debug(f"Received WebSocket message: %s", message)
return message
async def send(self, event: RealtimeClientEvent | RealtimeClientEventParam) -> None:
@@ -334,7 +334,7 @@ def __init__(
model: str | Omit = omit,
extra_query: Query,
extra_headers: Headers,
- websocket_connection_options: WebsocketConnectionOptions,
+ websocket_connection_options: WebSocketConnectionOptions,
) -> None:
self.__client = client
self.__call_id = call_id
@@ -408,7 +408,9 @@ def _prepare_url(self) -> httpx.URL:
if self.__client.websocket_base_url is not None:
base_url = httpx.URL(self.__client.websocket_base_url)
else:
- base_url = self.__client._base_url.copy_with(scheme="wss")
+ scheme = self.__client._base_url.scheme
+ ws_scheme = "ws" if scheme == "http" else "wss"
+ base_url = self.__client._base_url.copy_with(scheme=ws_scheme)
merge_raw_path = base_url.raw_path.rstrip(b"/") + b"/realtime"
return base_url.copy_with(raw_path=merge_raw_path)
@@ -429,9 +431,9 @@ class RealtimeConnection:
conversation: RealtimeConversationResource
output_audio_buffer: RealtimeOutputAudioBufferResource
- _connection: WebsocketConnection
+ _connection: WebSocketConnection
- def __init__(self, connection: WebsocketConnection) -> None:
+ def __init__(self, connection: WebSocketConnection) -> None:
self._connection = connection
self.session = RealtimeSessionResource(self)
@@ -470,7 +472,7 @@ def recv_bytes(self) -> bytes:
then you can call `.parse_event(data)`.
"""
message = self._connection.recv(decode=False)
- log.debug(f"Received websocket message: %s", message)
+ log.debug(f"Received WebSocket message: %s", message)
return message
def send(self, event: RealtimeClientEvent | RealtimeClientEventParam) -> None:
@@ -523,7 +525,7 @@ def __init__(
model: str | Omit = omit,
extra_query: Query,
extra_headers: Headers,
- websocket_connection_options: WebsocketConnectionOptions,
+ websocket_connection_options: WebSocketConnectionOptions,
) -> None:
self.__client = client
self.__call_id = call_id
@@ -597,7 +599,9 @@ def _prepare_url(self) -> httpx.URL:
if self.__client.websocket_base_url is not None:
base_url = httpx.URL(self.__client.websocket_base_url)
else:
- base_url = self.__client._base_url.copy_with(scheme="wss")
+ scheme = self.__client._base_url.scheme
+ ws_scheme = "ws" if scheme == "http" else "wss"
+ base_url = self.__client._base_url.copy_with(scheme=ws_scheme)
merge_raw_path = base_url.raw_path.rstrip(b"/") + b"/realtime"
return base_url.copy_with(raw_path=merge_raw_path)
diff --git a/portkey_ai/_vendor/openai/resources/responses/api.md b/portkey_ai/_vendor/openai/resources/responses/api.md
new file mode 100644
index 00000000..891e0f97
--- /dev/null
+++ b/portkey_ai/_vendor/openai/resources/responses/api.md
@@ -0,0 +1,187 @@
+# Responses
+
+Types:
+
+```python
+from openai.types.responses import (
+ ApplyPatchTool,
+ CompactedResponse,
+ ComputerAction,
+ ComputerActionList,
+ ComputerTool,
+ ComputerUsePreviewTool,
+ ContainerAuto,
+ ContainerNetworkPolicyAllowlist,
+ ContainerNetworkPolicyDisabled,
+ ContainerNetworkPolicyDomainSecret,
+ ContainerReference,
+ CustomTool,
+ EasyInputMessage,
+ FileSearchTool,
+ FunctionShellTool,
+ FunctionTool,
+ InlineSkill,
+ InlineSkillSource,
+ LocalEnvironment,
+ LocalSkill,
+ NamespaceTool,
+ Response,
+ ResponseApplyPatchToolCall,
+ ResponseApplyPatchToolCallOutput,
+ ResponseAudioDeltaEvent,
+ ResponseAudioDoneEvent,
+ ResponseAudioTranscriptDeltaEvent,
+ ResponseAudioTranscriptDoneEvent,
+ ResponseCodeInterpreterCallCodeDeltaEvent,
+ ResponseCodeInterpreterCallCodeDoneEvent,
+ ResponseCodeInterpreterCallCompletedEvent,
+ ResponseCodeInterpreterCallInProgressEvent,
+ ResponseCodeInterpreterCallInterpretingEvent,
+ ResponseCodeInterpreterToolCall,
+ ResponseCompactionItem,
+ ResponseCompactionItemParam,
+ ResponseCompletedEvent,
+ ResponseComputerToolCall,
+ ResponseComputerToolCallOutputItem,
+ ResponseComputerToolCallOutputScreenshot,
+ ResponseContainerReference,
+ ResponseContent,
+ ResponseContentPartAddedEvent,
+ ResponseContentPartDoneEvent,
+ ResponseConversationParam,
+ ResponseCreatedEvent,
+ ResponseCustomToolCall,
+ ResponseCustomToolCallInputDeltaEvent,
+ ResponseCustomToolCallInputDoneEvent,
+ ResponseCustomToolCallItem,
+ ResponseCustomToolCallOutput,
+ ResponseCustomToolCallOutputItem,
+ ResponseError,
+ ResponseErrorEvent,
+ ResponseFailedEvent,
+ ResponseFileSearchCallCompletedEvent,
+ ResponseFileSearchCallInProgressEvent,
+ ResponseFileSearchCallSearchingEvent,
+ ResponseFileSearchToolCall,
+ ResponseFormatTextConfig,
+ ResponseFormatTextJSONSchemaConfig,
+ ResponseFunctionCallArgumentsDeltaEvent,
+ ResponseFunctionCallArgumentsDoneEvent,
+ ResponseFunctionCallOutputItem,
+ ResponseFunctionCallOutputItemList,
+ ResponseFunctionShellCallOutputContent,
+ ResponseFunctionShellToolCall,
+ ResponseFunctionShellToolCallOutput,
+ ResponseFunctionToolCall,
+ ResponseFunctionToolCallItem,
+ ResponseFunctionToolCallOutputItem,
+ ResponseFunctionWebSearch,
+ ResponseImageGenCallCompletedEvent,
+ ResponseImageGenCallGeneratingEvent,
+ ResponseImageGenCallInProgressEvent,
+ ResponseImageGenCallPartialImageEvent,
+ ResponseInProgressEvent,
+ ResponseIncludable,
+ ResponseIncompleteEvent,
+ ResponseInput,
+ ResponseInputAudio,
+ ResponseInputContent,
+ ResponseInputFile,
+ ResponseInputFileContent,
+ ResponseInputImage,
+ ResponseInputImageContent,
+ ResponseInputItem,
+ ResponseInputMessageContentList,
+ ResponseInputMessageItem,
+ ResponseInputText,
+ ResponseInputTextContent,
+ ResponseItem,
+ ResponseLocalEnvironment,
+ ResponseMcpCallArgumentsDeltaEvent,
+ ResponseMcpCallArgumentsDoneEvent,
+ ResponseMcpCallCompletedEvent,
+ ResponseMcpCallFailedEvent,
+ ResponseMcpCallInProgressEvent,
+ ResponseMcpListToolsCompletedEvent,
+ ResponseMcpListToolsFailedEvent,
+ ResponseMcpListToolsInProgressEvent,
+ ResponseOutputAudio,
+ ResponseOutputItem,
+ ResponseOutputItemAddedEvent,
+ ResponseOutputItemDoneEvent,
+ ResponseOutputMessage,
+ ResponseOutputRefusal,
+ ResponseOutputText,
+ ResponseOutputTextAnnotationAddedEvent,
+ ResponsePrompt,
+ ResponseQueuedEvent,
+ ResponseReasoningItem,
+ ResponseReasoningSummaryPartAddedEvent,
+ ResponseReasoningSummaryPartDoneEvent,
+ ResponseReasoningSummaryTextDeltaEvent,
+ ResponseReasoningSummaryTextDoneEvent,
+ ResponseReasoningTextDeltaEvent,
+ ResponseReasoningTextDoneEvent,
+ ResponseRefusalDeltaEvent,
+ ResponseRefusalDoneEvent,
+ ResponseStatus,
+ ResponseStreamEvent,
+ ResponseTextConfig,
+ ResponseTextDeltaEvent,
+ ResponseTextDoneEvent,
+ ResponseToolSearchCall,
+ ResponseToolSearchOutputItem,
+ ResponseToolSearchOutputItemParam,
+ ResponseUsage,
+ ResponseWebSearchCallCompletedEvent,
+ ResponseWebSearchCallInProgressEvent,
+ ResponseWebSearchCallSearchingEvent,
+ ResponsesClientEvent,
+ ResponsesServerEvent,
+ SkillReference,
+ Tool,
+ ToolChoiceAllowed,
+ ToolChoiceApplyPatch,
+ ToolChoiceCustom,
+ ToolChoiceFunction,
+ ToolChoiceMcp,
+ ToolChoiceOptions,
+ ToolChoiceShell,
+ ToolChoiceTypes,
+ ToolSearchTool,
+ WebSearchPreviewTool,
+ WebSearchTool,
+)
+```
+
+Methods:
+
+- client.responses.create(\*\*params) -> Response
+- client.responses.retrieve(response_id, \*\*params) -> Response
+- client.responses.delete(response_id) -> None
+- client.responses.cancel(response_id) -> Response
+- client.responses.compact(\*\*params) -> CompactedResponse
+
+## InputItems
+
+Types:
+
+```python
+from openai.types.responses import ResponseItemList
+```
+
+Methods:
+
+- client.responses.input_items.list(response_id, \*\*params) -> SyncCursorPage[ResponseItem]
+
+## InputTokens
+
+Types:
+
+```python
+from openai.types.responses import InputTokenCountResponse
+```
+
+Methods:
+
+- client.responses.input_tokens.count(\*\*params) -> InputTokenCountResponse
diff --git a/portkey_ai/_vendor/openai/resources/responses/input_items.py b/portkey_ai/_vendor/openai/resources/responses/input_items.py
index 3311bfe1..b9ae5eee 100644
--- a/portkey_ai/_vendor/openai/resources/responses/input_items.py
+++ b/portkey_ai/_vendor/openai/resources/responses/input_items.py
@@ -9,7 +9,7 @@
from ... import _legacy_response
from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
-from ..._utils import maybe_transform
+from ..._utils import path_template, maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
@@ -85,7 +85,7 @@ def list(
if not response_id:
raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
return self._get_api_list(
- f"/responses/{response_id}/input_items",
+ path_template("/responses/{response_id}/input_items", response_id=response_id),
page=SyncCursorPage[ResponseItem],
options=make_request_options(
extra_headers=extra_headers,
@@ -169,7 +169,7 @@ def list(
if not response_id:
raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
return self._get_api_list(
- f"/responses/{response_id}/input_items",
+ path_template("/responses/{response_id}/input_items", response_id=response_id),
page=AsyncCursorPage[ResponseItem],
options=make_request_options(
extra_headers=extra_headers,
diff --git a/portkey_ai/_vendor/openai/resources/responses/input_tokens.py b/portkey_ai/_vendor/openai/resources/responses/input_tokens.py
index 86641646..0056727f 100644
--- a/portkey_ai/_vendor/openai/resources/responses/input_tokens.py
+++ b/portkey_ai/_vendor/openai/resources/responses/input_tokens.py
@@ -65,7 +65,10 @@ def count(
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> InputTokenCountResponse:
"""
- Get input token counts
+ Returns input token counts of the request.
+
+ Returns an object with `object` set to `response.input_tokens` and an
+ `input_tokens` count.
Args:
conversation: The conversation that this response belongs to. Items from this conversation are
@@ -188,7 +191,10 @@ async def count(
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> InputTokenCountResponse:
"""
- Get input token counts
+ Returns input token counts of the request.
+
+ Returns an object with `object` set to `response.input_tokens` and an
+ `input_tokens` count.
Args:
conversation: The conversation that this response belongs to. Items from this conversation are
diff --git a/portkey_ai/_vendor/openai/resources/responses/responses.py b/portkey_ai/_vendor/openai/resources/responses/responses.py
index 8e80f679..63795f95 100644
--- a/portkey_ai/_vendor/openai/resources/responses/responses.py
+++ b/portkey_ai/_vendor/openai/resources/responses/responses.py
@@ -2,17 +2,22 @@
from __future__ import annotations
+import json
+import logging
from copy import copy
-from typing import Any, List, Type, Union, Iterable, Optional, cast
+from types import TracebackType
+from typing import TYPE_CHECKING, Any, List, Type, Union, Iterable, Iterator, Optional, AsyncIterator, cast
from functools import partial
from typing_extensions import Literal, overload
import httpx
+from pydantic import BaseModel
from ... import _legacy_response
from ..._types import NOT_GIVEN, Body, Omit, Query, Headers, NoneType, NotGiven, omit, not_given
-from ..._utils import is_given, maybe_transform, async_maybe_transform
+from ..._utils import is_given, path_template, maybe_transform, strip_not_given, async_maybe_transform
from ..._compat import cached_property
+from ..._models import construct_type_unchecked
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from .input_items import (
@@ -33,11 +38,13 @@
InputTokensWithStreamingResponse,
AsyncInputTokensWithStreamingResponse,
)
-from ..._base_client import make_request_options
+from ..._exceptions import OpenAIError
+from ..._base_client import _merge_mappings, make_request_options
from ...types.responses import (
response_create_params,
response_compact_params,
response_retrieve_params,
+ responses_client_event_param,
)
from ...lib._parsing._responses import (
TextFormatT,
@@ -51,16 +58,28 @@
from ...types.responses.parsed_response import ParsedResponse
from ...lib.streaming.responses._responses import ResponseStreamManager, AsyncResponseStreamManager
from ...types.responses.compacted_response import CompactedResponse
+from ...types.websocket_connection_options import WebSocketConnectionOptions
from ...types.responses.response_includable import ResponseIncludable
from ...types.shared_params.responses_model import ResponsesModel
from ...types.responses.response_input_param import ResponseInputParam
from ...types.responses.response_prompt_param import ResponsePromptParam
from ...types.responses.response_stream_event import ResponseStreamEvent
+from ...types.responses.responses_client_event import ResponsesClientEvent
+from ...types.responses.responses_server_event import ResponsesServerEvent
from ...types.responses.response_input_item_param import ResponseInputItemParam
from ...types.responses.response_text_config_param import ResponseTextConfigParam
+from ...types.responses.responses_client_event_param import ResponsesClientEventParam
+
+if TYPE_CHECKING:
+ from websockets.sync.client import ClientConnection as WebSocketConnection
+ from websockets.asyncio.client import ClientConnection as AsyncWebSocketConnection
+
+ from ..._client import OpenAI, AsyncOpenAI
__all__ = ["Responses", "AsyncResponses"]
+log: logging.Logger = logging.getLogger(__name__)
+
class Responses(SyncAPIResource):
@cached_property
@@ -95,6 +114,7 @@ def create(
self,
*,
background: Optional[bool] | Omit = omit,
+ context_management: Optional[Iterable[response_create_params.ContextManagement]] | Omit = omit,
conversation: Optional[response_create_params.Conversation] | Omit = omit,
include: Optional[List[ResponseIncludable]] | Omit = omit,
input: Union[str, ResponseInputParam] | Omit = omit,
@@ -147,6 +167,8 @@ def create(
background: Whether to run the model response in the background.
[Learn more](https://platform.openai.com/docs/guides/background).
+ context_management: Context management configuration for this request.
+
conversation: The conversation that this response belongs to. Items from this conversation are
prepended to `input_items` for this response request. Input items and output
items from this response are automatically added to this conversation after this
@@ -235,8 +257,9 @@ def create(
safety_identifier: A stable identifier used to help detect users of your application that may be
violating OpenAI's usage policies. The IDs should be a string that uniquely
- identifies each user. We recommend hashing their username or email address, in
- order to avoid sending us any identifying information.
+ identifies each user, with a maximum length of 64 characters. We recommend
+ hashing their username or email address, in order to avoid sending us any
+ identifying information.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
service_tier: Specifies the processing type used for serving the request.
@@ -341,6 +364,7 @@ def create(
*,
stream: Literal[True],
background: Optional[bool] | Omit = omit,
+ context_management: Optional[Iterable[response_create_params.ContextManagement]] | Omit = omit,
conversation: Optional[response_create_params.Conversation] | Omit = omit,
include: Optional[List[ResponseIncludable]] | Omit = omit,
input: Union[str, ResponseInputParam] | Omit = omit,
@@ -399,6 +423,8 @@ def create(
background: Whether to run the model response in the background.
[Learn more](https://platform.openai.com/docs/guides/background).
+ context_management: Context management configuration for this request.
+
conversation: The conversation that this response belongs to. Items from this conversation are
prepended to `input_items` for this response request. Input items and output
items from this response are automatically added to this conversation after this
@@ -487,8 +513,9 @@ def create(
safety_identifier: A stable identifier used to help detect users of your application that may be
violating OpenAI's usage policies. The IDs should be a string that uniquely
- identifies each user. We recommend hashing their username or email address, in
- order to avoid sending us any identifying information.
+ identifies each user, with a maximum length of 64 characters. We recommend
+ hashing their username or email address, in order to avoid sending us any
+ identifying information.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
service_tier: Specifies the processing type used for serving the request.
@@ -586,6 +613,7 @@ def create(
*,
stream: bool,
background: Optional[bool] | Omit = omit,
+ context_management: Optional[Iterable[response_create_params.ContextManagement]] | Omit = omit,
conversation: Optional[response_create_params.Conversation] | Omit = omit,
include: Optional[List[ResponseIncludable]] | Omit = omit,
input: Union[str, ResponseInputParam] | Omit = omit,
@@ -644,6 +672,8 @@ def create(
background: Whether to run the model response in the background.
[Learn more](https://platform.openai.com/docs/guides/background).
+ context_management: Context management configuration for this request.
+
conversation: The conversation that this response belongs to. Items from this conversation are
prepended to `input_items` for this response request. Input items and output
items from this response are automatically added to this conversation after this
@@ -732,8 +762,9 @@ def create(
safety_identifier: A stable identifier used to help detect users of your application that may be
violating OpenAI's usage policies. The IDs should be a string that uniquely
- identifies each user. We recommend hashing their username or email address, in
- order to avoid sending us any identifying information.
+ identifies each user, with a maximum length of 64 characters. We recommend
+ hashing their username or email address, in order to avoid sending us any
+ identifying information.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
service_tier: Specifies the processing type used for serving the request.
@@ -829,6 +860,7 @@ def create(
self,
*,
background: Optional[bool] | Omit = omit,
+ context_management: Optional[Iterable[response_create_params.ContextManagement]] | Omit = omit,
conversation: Optional[response_create_params.Conversation] | Omit = omit,
include: Optional[List[ResponseIncludable]] | Omit = omit,
input: Union[str, ResponseInputParam] | Omit = omit,
@@ -868,6 +900,7 @@ def create(
body=maybe_transform(
{
"background": background,
+ "context_management": context_management,
"conversation": conversation,
"include": include,
"input": input,
@@ -930,6 +963,7 @@ def stream(
input: Union[str, ResponseInputParam],
model: ResponsesModel,
background: Optional[bool] | Omit = omit,
+ context_management: Optional[Iterable[response_create_params.ContextManagement]] | Omit = omit,
text_format: type[TextFormatT] | Omit = omit,
tools: Iterable[ParseableToolParam] | Omit = omit,
conversation: Optional[response_create_params.Conversation] | Omit = omit,
@@ -970,6 +1004,7 @@ def stream(
input: Union[str, ResponseInputParam] | Omit = omit,
model: ResponsesModel | Omit = omit,
background: Optional[bool] | Omit = omit,
+ context_management: Optional[Iterable[response_create_params.ContextManagement]] | Omit = omit,
text_format: type[TextFormatT] | Omit = omit,
tools: Iterable[ParseableToolParam] | Omit = omit,
conversation: Optional[response_create_params.Conversation] | Omit = omit,
@@ -1006,6 +1041,7 @@ def stream(
new_response_args = {
"input": input,
"model": model,
+ "context_management": context_management,
"conversation": conversation,
"include": include,
"instructions": instructions,
@@ -1061,6 +1097,7 @@ def stream(
input=input,
model=model,
tools=tools,
+ context_management=context_management,
conversation=conversation,
include=include,
instructions=instructions,
@@ -1118,6 +1155,7 @@ def parse(
*,
text_format: type[TextFormatT] | Omit = omit,
background: Optional[bool] | Omit = omit,
+ context_management: Optional[Iterable[response_create_params.ContextManagement]] | Omit = omit,
conversation: Optional[response_create_params.Conversation] | Omit = omit,
include: Optional[List[ResponseIncludable]] | Omit = omit,
input: Union[str, ResponseInputParam] | Omit = omit,
@@ -1176,6 +1214,7 @@ def parser(raw_response: Response) -> ParsedResponse[TextFormatT]:
body=maybe_transform(
{
"background": background,
+ "context_management": context_management,
"conversation": conversation,
"include": include,
"input": input,
@@ -1432,7 +1471,7 @@ def retrieve(
if not response_id:
raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
return self._get(
- f"/responses/{response_id}",
+ path_template("/responses/{response_id}", response_id=response_id),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -1480,7 +1519,7 @@ def delete(
raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._delete(
- f"/responses/{response_id}",
+ path_template("/responses/{response_id}", response_id=response_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -1516,7 +1555,7 @@ def cancel(
if not response_id:
raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
return self._post(
- f"/responses/{response_id}/cancel",
+ path_template("/responses/{response_id}/cancel", response_id=response_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -1528,6 +1567,12 @@ def compact(
*,
model: Union[
Literal[
+ "gpt-5.4",
+ "gpt-5.4-mini",
+ "gpt-5.4-nano",
+ "gpt-5.4-mini-2026-03-17",
+ "gpt-5.4-nano-2026-03-17",
+ "gpt-5.3-chat-latest",
"gpt-5.2",
"gpt-5.2-2025-12-11",
"gpt-5.2-chat-latest",
@@ -1621,6 +1666,7 @@ def compact(
input: Union[str, Iterable[ResponseInputItemParam], None] | Omit = omit,
instructions: Optional[str] | Omit = omit,
previous_response_id: Optional[str] | Omit = omit,
+ prompt_cache_key: Optional[str] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -1628,8 +1674,14 @@ def compact(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> CompactedResponse:
- """
- Compact conversation
+ """Compact a conversation.
+
+ Returns a compacted response object.
+
+ Learn when and how to compact long-running conversations in the
+ [conversation state guide](https://platform.openai.com/docs/guides/conversation-state#managing-the-context-window).
+ For ZDR-compatible compaction details, see
+ [Compaction (advanced)](https://platform.openai.com/docs/guides/conversation-state#compaction-advanced).
Args:
model: Model ID used to generate the response, like `gpt-5` or `o3`. OpenAI offers a
@@ -1650,6 +1702,8 @@ def compact(
[conversation state](https://platform.openai.com/docs/guides/conversation-state).
Cannot be used in conjunction with `conversation`.
+ prompt_cache_key: A key to use when reading from or writing to the prompt cache.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -1666,6 +1720,7 @@ def compact(
"input": input,
"instructions": instructions,
"previous_response_id": previous_response_id,
+ "prompt_cache_key": prompt_cache_key,
},
response_compact_params.ResponseCompactParams,
),
@@ -1675,6 +1730,23 @@ def compact(
cast_to=CompactedResponse,
)
+ def connect(
+ self,
+ extra_query: Query = {},
+ extra_headers: Headers = {},
+ websocket_connection_options: WebSocketConnectionOptions = {},
+ ) -> ResponsesConnectionManager:
+ """Connect to a persistent Responses API WebSocket.
+
+ Send `response.create` events and receive response stream events over the socket.
+ """
+ return ResponsesConnectionManager(
+ client=self._client,
+ extra_query=extra_query,
+ extra_headers=extra_headers,
+ websocket_connection_options=websocket_connection_options,
+ )
+
class AsyncResponses(AsyncAPIResource):
@cached_property
@@ -1709,6 +1781,7 @@ async def create(
self,
*,
background: Optional[bool] | Omit = omit,
+ context_management: Optional[Iterable[response_create_params.ContextManagement]] | Omit = omit,
conversation: Optional[response_create_params.Conversation] | Omit = omit,
include: Optional[List[ResponseIncludable]] | Omit = omit,
input: Union[str, ResponseInputParam] | Omit = omit,
@@ -1761,6 +1834,8 @@ async def create(
background: Whether to run the model response in the background.
[Learn more](https://platform.openai.com/docs/guides/background).
+ context_management: Context management configuration for this request.
+
conversation: The conversation that this response belongs to. Items from this conversation are
prepended to `input_items` for this response request. Input items and output
items from this response are automatically added to this conversation after this
@@ -1849,8 +1924,9 @@ async def create(
safety_identifier: A stable identifier used to help detect users of your application that may be
violating OpenAI's usage policies. The IDs should be a string that uniquely
- identifies each user. We recommend hashing their username or email address, in
- order to avoid sending us any identifying information.
+ identifies each user, with a maximum length of 64 characters. We recommend
+ hashing their username or email address, in order to avoid sending us any
+ identifying information.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
service_tier: Specifies the processing type used for serving the request.
@@ -1955,6 +2031,7 @@ async def create(
*,
stream: Literal[True],
background: Optional[bool] | Omit = omit,
+ context_management: Optional[Iterable[response_create_params.ContextManagement]] | Omit = omit,
conversation: Optional[response_create_params.Conversation] | Omit = omit,
include: Optional[List[ResponseIncludable]] | Omit = omit,
input: Union[str, ResponseInputParam] | Omit = omit,
@@ -2013,6 +2090,8 @@ async def create(
background: Whether to run the model response in the background.
[Learn more](https://platform.openai.com/docs/guides/background).
+ context_management: Context management configuration for this request.
+
conversation: The conversation that this response belongs to. Items from this conversation are
prepended to `input_items` for this response request. Input items and output
items from this response are automatically added to this conversation after this
@@ -2101,8 +2180,9 @@ async def create(
safety_identifier: A stable identifier used to help detect users of your application that may be
violating OpenAI's usage policies. The IDs should be a string that uniquely
- identifies each user. We recommend hashing their username or email address, in
- order to avoid sending us any identifying information.
+ identifies each user, with a maximum length of 64 characters. We recommend
+ hashing their username or email address, in order to avoid sending us any
+ identifying information.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
service_tier: Specifies the processing type used for serving the request.
@@ -2200,6 +2280,7 @@ async def create(
*,
stream: bool,
background: Optional[bool] | Omit = omit,
+ context_management: Optional[Iterable[response_create_params.ContextManagement]] | Omit = omit,
conversation: Optional[response_create_params.Conversation] | Omit = omit,
include: Optional[List[ResponseIncludable]] | Omit = omit,
input: Union[str, ResponseInputParam] | Omit = omit,
@@ -2258,6 +2339,8 @@ async def create(
background: Whether to run the model response in the background.
[Learn more](https://platform.openai.com/docs/guides/background).
+ context_management: Context management configuration for this request.
+
conversation: The conversation that this response belongs to. Items from this conversation are
prepended to `input_items` for this response request. Input items and output
items from this response are automatically added to this conversation after this
@@ -2346,8 +2429,9 @@ async def create(
safety_identifier: A stable identifier used to help detect users of your application that may be
violating OpenAI's usage policies. The IDs should be a string that uniquely
- identifies each user. We recommend hashing their username or email address, in
- order to avoid sending us any identifying information.
+ identifies each user, with a maximum length of 64 characters. We recommend
+ hashing their username or email address, in order to avoid sending us any
+ identifying information.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
service_tier: Specifies the processing type used for serving the request.
@@ -2443,6 +2527,7 @@ async def create(
self,
*,
background: Optional[bool] | Omit = omit,
+ context_management: Optional[Iterable[response_create_params.ContextManagement]] | Omit = omit,
conversation: Optional[response_create_params.Conversation] | Omit = omit,
include: Optional[List[ResponseIncludable]] | Omit = omit,
input: Union[str, ResponseInputParam] | Omit = omit,
@@ -2482,6 +2567,7 @@ async def create(
body=await async_maybe_transform(
{
"background": background,
+ "context_management": context_management,
"conversation": conversation,
"include": include,
"input": input,
@@ -2544,6 +2630,7 @@ def stream(
input: Union[str, ResponseInputParam],
model: ResponsesModel,
background: Optional[bool] | Omit = omit,
+ context_management: Optional[Iterable[response_create_params.ContextManagement]] | Omit = omit,
text_format: type[TextFormatT] | Omit = omit,
tools: Iterable[ParseableToolParam] | Omit = omit,
conversation: Optional[response_create_params.Conversation] | Omit = omit,
@@ -2584,6 +2671,7 @@ def stream(
input: Union[str, ResponseInputParam] | Omit = omit,
model: ResponsesModel | Omit = omit,
background: Optional[bool] | Omit = omit,
+ context_management: Optional[Iterable[response_create_params.ContextManagement]] | Omit = omit,
text_format: type[TextFormatT] | Omit = omit,
tools: Iterable[ParseableToolParam] | Omit = omit,
conversation: Optional[response_create_params.Conversation] | Omit = omit,
@@ -2620,6 +2708,7 @@ def stream(
new_response_args = {
"input": input,
"model": model,
+ "context_management": context_management,
"conversation": conversation,
"include": include,
"instructions": instructions,
@@ -2675,6 +2764,7 @@ def stream(
model=model,
stream=True,
tools=tools,
+ context_management=context_management,
conversation=conversation,
include=include,
instructions=instructions,
@@ -2736,6 +2826,7 @@ async def parse(
*,
text_format: type[TextFormatT] | Omit = omit,
background: Optional[bool] | Omit = omit,
+ context_management: Optional[Iterable[response_create_params.ContextManagement]] | Omit = omit,
conversation: Optional[response_create_params.Conversation] | Omit = omit,
include: Optional[List[ResponseIncludable]] | Omit = omit,
input: Union[str, ResponseInputParam] | Omit = omit,
@@ -2794,6 +2885,7 @@ def parser(raw_response: Response) -> ParsedResponse[TextFormatT]:
body=maybe_transform(
{
"background": background,
+ "context_management": context_management,
"conversation": conversation,
"include": include,
"input": input,
@@ -3050,7 +3142,7 @@ async def retrieve(
if not response_id:
raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
return await self._get(
- f"/responses/{response_id}",
+ path_template("/responses/{response_id}", response_id=response_id),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -3098,7 +3190,7 @@ async def delete(
raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return await self._delete(
- f"/responses/{response_id}",
+ path_template("/responses/{response_id}", response_id=response_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -3134,7 +3226,7 @@ async def cancel(
if not response_id:
raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
return await self._post(
- f"/responses/{response_id}/cancel",
+ path_template("/responses/{response_id}/cancel", response_id=response_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -3146,6 +3238,12 @@ async def compact(
*,
model: Union[
Literal[
+ "gpt-5.4",
+ "gpt-5.4-mini",
+ "gpt-5.4-nano",
+ "gpt-5.4-mini-2026-03-17",
+ "gpt-5.4-nano-2026-03-17",
+ "gpt-5.3-chat-latest",
"gpt-5.2",
"gpt-5.2-2025-12-11",
"gpt-5.2-chat-latest",
@@ -3239,6 +3337,7 @@ async def compact(
input: Union[str, Iterable[ResponseInputItemParam], None] | Omit = omit,
instructions: Optional[str] | Omit = omit,
previous_response_id: Optional[str] | Omit = omit,
+ prompt_cache_key: Optional[str] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -3246,8 +3345,14 @@ async def compact(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> CompactedResponse:
- """
- Compact conversation
+ """Compact a conversation.
+
+ Returns a compacted response object.
+
+ Learn when and how to compact long-running conversations in the
+ [conversation state guide](https://platform.openai.com/docs/guides/conversation-state#managing-the-context-window).
+ For ZDR-compatible compaction details, see
+ [Compaction (advanced)](https://platform.openai.com/docs/guides/conversation-state#compaction-advanced).
Args:
model: Model ID used to generate the response, like `gpt-5` or `o3`. OpenAI offers a
@@ -3268,6 +3373,8 @@ async def compact(
[conversation state](https://platform.openai.com/docs/guides/conversation-state).
Cannot be used in conjunction with `conversation`.
+ prompt_cache_key: A key to use when reading from or writing to the prompt cache.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -3284,6 +3391,7 @@ async def compact(
"input": input,
"instructions": instructions,
"previous_response_id": previous_response_id,
+ "prompt_cache_key": prompt_cache_key,
},
response_compact_params.ResponseCompactParams,
),
@@ -3293,6 +3401,23 @@ async def compact(
cast_to=CompactedResponse,
)
+ def connect(
+ self,
+ extra_query: Query = {},
+ extra_headers: Headers = {},
+ websocket_connection_options: WebSocketConnectionOptions = {},
+ ) -> AsyncResponsesConnectionManager:
+ """Connect to a persistent Responses API WebSocket.
+
+ Send `response.create` events and receive response stream events over the socket.
+ """
+ return AsyncResponsesConnectionManager(
+ client=self._client,
+ extra_query=extra_query,
+ extra_headers=extra_headers,
+ websocket_connection_options=websocket_connection_options,
+ )
+
class ResponsesWithRawResponse:
def __init__(self, responses: Responses) -> None:
@@ -3452,3 +3577,495 @@ def _make_tools(tools: Iterable[ParseableToolParam] | Omit) -> List[ToolParam] |
converted_tools.append(new_tool.cast())
return converted_tools
+
+
+class AsyncResponsesConnection:
+ """Represents a live WebSocket connection to the Responses API"""
+
+ response: AsyncResponsesResponseResource
+
+ _connection: AsyncWebSocketConnection
+
+ def __init__(self, connection: AsyncWebSocketConnection) -> None:
+ self._connection = connection
+
+ self.response = AsyncResponsesResponseResource(self)
+
+ async def __aiter__(self) -> AsyncIterator[ResponsesServerEvent]:
+ """
+ An infinite-iterator that will continue to yield events until
+ the connection is closed.
+ """
+ from websockets.exceptions import ConnectionClosedOK
+
+ try:
+ while True:
+ yield await self.recv()
+ except ConnectionClosedOK:
+ return
+
+ async def recv(self) -> ResponsesServerEvent:
+ """
+ Receive the next message from the connection and parses it into a `ResponsesServerEvent` object.
+
+ Canceling this method is safe. There's no risk of losing data.
+ """
+ return self.parse_event(await self.recv_bytes())
+
+ async def recv_bytes(self) -> bytes:
+ """Receive the next message from the connection as raw bytes.
+
+ Canceling this method is safe. There's no risk of losing data.
+
+ If you want to parse the message into a `ResponsesServerEvent` object like `.recv()` does,
+ then you can call `.parse_event(data)`.
+ """
+ message = await self._connection.recv(decode=False)
+ log.debug(f"Received WebSocket message: %s", message)
+ return message
+
+ async def send(self, event: ResponsesClientEvent | ResponsesClientEventParam) -> None:
+ data = (
+ event.to_json(use_api_names=True, exclude_defaults=True, exclude_unset=True)
+ if isinstance(event, BaseModel)
+ else json.dumps(await async_maybe_transform(event, ResponsesClientEventParam))
+ )
+ await self._connection.send(data)
+
+ async def close(self, *, code: int = 1000, reason: str = "") -> None:
+ await self._connection.close(code=code, reason=reason)
+
+ def parse_event(self, data: str | bytes) -> ResponsesServerEvent:
+ """
+ Converts a raw `str` or `bytes` message into a `ResponsesServerEvent` object.
+
+ This is helpful if you're using `.recv_bytes()`.
+ """
+ return cast(
+ ResponsesServerEvent,
+ construct_type_unchecked(value=json.loads(data), type_=cast(Any, ResponsesServerEvent)),
+ )
+
+
+class AsyncResponsesConnectionManager:
+ """
+ Context manager over a `AsyncResponsesConnection` that is returned by `responses.connect()`
+
+ This context manager ensures that the connection will be closed when it exits.
+
+ ---
+
+ Note that if your application doesn't work well with the context manager approach then you
+ can call the `.enter()` method directly to initiate a connection.
+
+ **Warning**: You must remember to close the connection with `.close()`.
+
+ ```py
+ connection = await client.responses.connect(...).enter()
+ # ...
+ await connection.close()
+ ```
+ """
+
+ def __init__(
+ self,
+ *,
+ client: AsyncOpenAI,
+ extra_query: Query,
+ extra_headers: Headers,
+ websocket_connection_options: WebSocketConnectionOptions,
+ ) -> None:
+ self.__client = client
+ self.__connection: AsyncResponsesConnection | None = None
+ self.__extra_query = extra_query
+ self.__extra_headers = extra_headers
+ self.__websocket_connection_options = websocket_connection_options
+
+ async def __aenter__(self) -> AsyncResponsesConnection:
+ """
+ 👋 If your application doesn't work well with the context manager approach then you
+ can call this method directly to initiate a connection.
+
+ **Warning**: You must remember to close the connection with `.close()`.
+
+ ```py
+ connection = await client.responses.connect(...).enter()
+ # ...
+ await connection.close()
+ ```
+ """
+ try:
+ from websockets.asyncio.client import connect
+ except ImportError as exc:
+ raise OpenAIError("You need to install `openai[realtime]` to use this method") from exc
+
+ url = self._prepare_url().copy_with(
+ params={
+ **self.__client.base_url.params,
+ **self.__extra_query,
+ },
+ )
+ log.debug("Connecting to %s", url)
+ if self.__websocket_connection_options:
+ log.debug("Connection options: %s", self.__websocket_connection_options)
+
+ self.__connection = AsyncResponsesConnection(
+ await connect(
+ str(url),
+ user_agent_header=self.__client.user_agent,
+ additional_headers=_merge_mappings(
+ {
+ **self.__client.auth_headers,
+ },
+ self.__extra_headers,
+ ),
+ **self.__websocket_connection_options,
+ )
+ )
+
+ return self.__connection
+
+ enter = __aenter__
+
+ def _prepare_url(self) -> httpx.URL:
+ if self.__client.websocket_base_url is not None:
+ base_url = httpx.URL(self.__client.websocket_base_url)
+ else:
+ scheme = self.__client._base_url.scheme
+ ws_scheme = "ws" if scheme == "http" else "wss"
+ base_url = self.__client._base_url.copy_with(scheme=ws_scheme)
+
+ merge_raw_path = base_url.raw_path.rstrip(b"/") + b"/responses"
+ return base_url.copy_with(raw_path=merge_raw_path)
+
+ async def __aexit__(
+ self, exc_type: type[BaseException] | None, exc: BaseException | None, exc_tb: TracebackType | None
+ ) -> None:
+ if self.__connection is not None:
+ await self.__connection.close()
+
+
+class ResponsesConnection:
+ """Represents a live WebSocket connection to the Responses API"""
+
+ response: ResponsesResponseResource
+
+ _connection: WebSocketConnection
+
+ def __init__(self, connection: WebSocketConnection) -> None:
+ self._connection = connection
+
+ self.response = ResponsesResponseResource(self)
+
+ def __iter__(self) -> Iterator[ResponsesServerEvent]:
+ """
+ An infinite-iterator that will continue to yield events until
+ the connection is closed.
+ """
+ from websockets.exceptions import ConnectionClosedOK
+
+ try:
+ while True:
+ yield self.recv()
+ except ConnectionClosedOK:
+ return
+
+ def recv(self) -> ResponsesServerEvent:
+ """
+ Receive the next message from the connection and parses it into a `ResponsesServerEvent` object.
+
+ Canceling this method is safe. There's no risk of losing data.
+ """
+ return self.parse_event(self.recv_bytes())
+
+ def recv_bytes(self) -> bytes:
+ """Receive the next message from the connection as raw bytes.
+
+ Canceling this method is safe. There's no risk of losing data.
+
+ If you want to parse the message into a `ResponsesServerEvent` object like `.recv()` does,
+ then you can call `.parse_event(data)`.
+ """
+ message = self._connection.recv(decode=False)
+ log.debug(f"Received WebSocket message: %s", message)
+ return message
+
+ def send(self, event: ResponsesClientEvent | ResponsesClientEventParam) -> None:
+ data = (
+ event.to_json(use_api_names=True, exclude_defaults=True, exclude_unset=True)
+ if isinstance(event, BaseModel)
+ else json.dumps(maybe_transform(event, ResponsesClientEventParam))
+ )
+ self._connection.send(data)
+
+ def close(self, *, code: int = 1000, reason: str = "") -> None:
+ self._connection.close(code=code, reason=reason)
+
+ def parse_event(self, data: str | bytes) -> ResponsesServerEvent:
+ """
+ Converts a raw `str` or `bytes` message into a `ResponsesServerEvent` object.
+
+ This is helpful if you're using `.recv_bytes()`.
+ """
+ return cast(
+ ResponsesServerEvent,
+ construct_type_unchecked(value=json.loads(data), type_=cast(Any, ResponsesServerEvent)),
+ )
+
+
+class ResponsesConnectionManager:
+ """
+ Context manager over a `ResponsesConnection` that is returned by `responses.connect()`
+
+ This context manager ensures that the connection will be closed when it exits.
+
+ ---
+
+ Note that if your application doesn't work well with the context manager approach then you
+ can call the `.enter()` method directly to initiate a connection.
+
+ **Warning**: You must remember to close the connection with `.close()`.
+
+ ```py
+ connection = client.responses.connect(...).enter()
+ # ...
+ connection.close()
+ ```
+ """
+
+ def __init__(
+ self,
+ *,
+ client: OpenAI,
+ extra_query: Query,
+ extra_headers: Headers,
+ websocket_connection_options: WebSocketConnectionOptions,
+ ) -> None:
+ self.__client = client
+ self.__connection: ResponsesConnection | None = None
+ self.__extra_query = extra_query
+ self.__extra_headers = extra_headers
+ self.__websocket_connection_options = websocket_connection_options
+
+ def __enter__(self) -> ResponsesConnection:
+ """
+ 👋 If your application doesn't work well with the context manager approach then you
+ can call this method directly to initiate a connection.
+
+ **Warning**: You must remember to close the connection with `.close()`.
+
+ ```py
+ connection = client.responses.connect(...).enter()
+ # ...
+ connection.close()
+ ```
+ """
+ try:
+ from websockets.sync.client import connect
+ except ImportError as exc:
+ raise OpenAIError("You need to install `openai[realtime]` to use this method") from exc
+
+ url = self._prepare_url().copy_with(
+ params={
+ **self.__client.base_url.params,
+ **self.__extra_query,
+ },
+ )
+ log.debug("Connecting to %s", url)
+ if self.__websocket_connection_options:
+ log.debug("Connection options: %s", self.__websocket_connection_options)
+
+ self.__connection = ResponsesConnection(
+ connect(
+ str(url),
+ user_agent_header=self.__client.user_agent,
+ additional_headers=_merge_mappings(
+ {
+ **self.__client.auth_headers,
+ },
+ self.__extra_headers,
+ ),
+ **self.__websocket_connection_options,
+ )
+ )
+
+ return self.__connection
+
+ enter = __enter__
+
+ def _prepare_url(self) -> httpx.URL:
+ if self.__client.websocket_base_url is not None:
+ base_url = httpx.URL(self.__client.websocket_base_url)
+ else:
+ scheme = self.__client._base_url.scheme
+ ws_scheme = "ws" if scheme == "http" else "wss"
+ base_url = self.__client._base_url.copy_with(scheme=ws_scheme)
+
+ merge_raw_path = base_url.raw_path.rstrip(b"/") + b"/responses"
+ return base_url.copy_with(raw_path=merge_raw_path)
+
+ def __exit__(
+ self, exc_type: type[BaseException] | None, exc: BaseException | None, exc_tb: TracebackType | None
+ ) -> None:
+ if self.__connection is not None:
+ self.__connection.close()
+
+
+class BaseResponsesConnectionResource:
+ def __init__(self, connection: ResponsesConnection) -> None:
+ self._connection = connection
+
+
+class ResponsesResponseResource(BaseResponsesConnectionResource):
+ def create(
+ self,
+ *,
+ background: Optional[bool] | Omit = omit,
+ context_management: Optional[Iterable[responses_client_event_param.ContextManagement]] | Omit = omit,
+ conversation: Optional[responses_client_event_param.Conversation] | Omit = omit,
+ include: Optional[List[ResponseIncludable]] | Omit = omit,
+ input: Union[str, ResponseInputParam] | Omit = omit,
+ instructions: Optional[str] | Omit = omit,
+ max_output_tokens: Optional[int] | Omit = omit,
+ max_tool_calls: Optional[int] | Omit = omit,
+ metadata: Optional[Metadata] | Omit = omit,
+ model: ResponsesModel | Omit = omit,
+ parallel_tool_calls: Optional[bool] | Omit = omit,
+ previous_response_id: Optional[str] | Omit = omit,
+ prompt: Optional[ResponsePromptParam] | Omit = omit,
+ prompt_cache_key: str | Omit = omit,
+ prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
+ reasoning: Optional[Reasoning] | Omit = omit,
+ safety_identifier: str | Omit = omit,
+ service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
+ store: Optional[bool] | Omit = omit,
+ stream: Optional[bool] | Omit = omit,
+ stream_options: Optional[responses_client_event_param.StreamOptions] | Omit = omit,
+ temperature: Optional[float] | Omit = omit,
+ text: ResponseTextConfigParam | Omit = omit,
+ tool_choice: responses_client_event_param.ToolChoice | Omit = omit,
+ tools: Iterable[ToolParam] | Omit = omit,
+ top_logprobs: Optional[int] | Omit = omit,
+ top_p: Optional[float] | Omit = omit,
+ truncation: Optional[Literal["auto", "disabled"]] | Omit = omit,
+ user: str | Omit = omit,
+ ) -> None:
+ self._connection.send(
+ cast(
+ ResponsesClientEventParam,
+ strip_not_given(
+ {
+ "type": "response.create",
+ "background": background,
+ "context_management": context_management,
+ "conversation": conversation,
+ "include": include,
+ "input": input,
+ "instructions": instructions,
+ "max_output_tokens": max_output_tokens,
+ "max_tool_calls": max_tool_calls,
+ "metadata": metadata,
+ "model": model,
+ "parallel_tool_calls": parallel_tool_calls,
+ "previous_response_id": previous_response_id,
+ "prompt": prompt,
+ "prompt_cache_key": prompt_cache_key,
+ "prompt_cache_retention": prompt_cache_retention,
+ "reasoning": reasoning,
+ "safety_identifier": safety_identifier,
+ "service_tier": service_tier,
+ "store": store,
+ "stream": stream,
+ "stream_options": stream_options,
+ "temperature": temperature,
+ "text": text,
+ "tool_choice": tool_choice,
+ "tools": tools,
+ "top_logprobs": top_logprobs,
+ "top_p": top_p,
+ "truncation": truncation,
+ "user": user,
+ }
+ ),
+ )
+ )
+
+
+class BaseAsyncResponsesConnectionResource:
+ def __init__(self, connection: AsyncResponsesConnection) -> None:
+ self._connection = connection
+
+
+class AsyncResponsesResponseResource(BaseAsyncResponsesConnectionResource):
+ async def create(
+ self,
+ *,
+ background: Optional[bool] | Omit = omit,
+ context_management: Optional[Iterable[responses_client_event_param.ContextManagement]] | Omit = omit,
+ conversation: Optional[responses_client_event_param.Conversation] | Omit = omit,
+ include: Optional[List[ResponseIncludable]] | Omit = omit,
+ input: Union[str, ResponseInputParam] | Omit = omit,
+ instructions: Optional[str] | Omit = omit,
+ max_output_tokens: Optional[int] | Omit = omit,
+ max_tool_calls: Optional[int] | Omit = omit,
+ metadata: Optional[Metadata] | Omit = omit,
+ model: ResponsesModel | Omit = omit,
+ parallel_tool_calls: Optional[bool] | Omit = omit,
+ previous_response_id: Optional[str] | Omit = omit,
+ prompt: Optional[ResponsePromptParam] | Omit = omit,
+ prompt_cache_key: str | Omit = omit,
+ prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
+ reasoning: Optional[Reasoning] | Omit = omit,
+ safety_identifier: str | Omit = omit,
+ service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
+ store: Optional[bool] | Omit = omit,
+ stream: Optional[bool] | Omit = omit,
+ stream_options: Optional[responses_client_event_param.StreamOptions] | Omit = omit,
+ temperature: Optional[float] | Omit = omit,
+ text: ResponseTextConfigParam | Omit = omit,
+ tool_choice: responses_client_event_param.ToolChoice | Omit = omit,
+ tools: Iterable[ToolParam] | Omit = omit,
+ top_logprobs: Optional[int] | Omit = omit,
+ top_p: Optional[float] | Omit = omit,
+ truncation: Optional[Literal["auto", "disabled"]] | Omit = omit,
+ user: str | Omit = omit,
+ ) -> None:
+ await self._connection.send(
+ cast(
+ ResponsesClientEventParam,
+ strip_not_given(
+ {
+ "type": "response.create",
+ "background": background,
+ "context_management": context_management,
+ "conversation": conversation,
+ "include": include,
+ "input": input,
+ "instructions": instructions,
+ "max_output_tokens": max_output_tokens,
+ "max_tool_calls": max_tool_calls,
+ "metadata": metadata,
+ "model": model,
+ "parallel_tool_calls": parallel_tool_calls,
+ "previous_response_id": previous_response_id,
+ "prompt": prompt,
+ "prompt_cache_key": prompt_cache_key,
+ "prompt_cache_retention": prompt_cache_retention,
+ "reasoning": reasoning,
+ "safety_identifier": safety_identifier,
+ "service_tier": service_tier,
+ "store": store,
+ "stream": stream,
+ "stream_options": stream_options,
+ "temperature": temperature,
+ "text": text,
+ "tool_choice": tool_choice,
+ "tools": tools,
+ "top_logprobs": top_logprobs,
+ "top_p": top_p,
+ "truncation": truncation,
+ "user": user,
+ }
+ ),
+ )
+ )
diff --git a/portkey_ai/_vendor/openai/resources/skills/__init__.py b/portkey_ai/_vendor/openai/resources/skills/__init__.py
new file mode 100644
index 00000000..07f4d672
--- /dev/null
+++ b/portkey_ai/_vendor/openai/resources/skills/__init__.py
@@ -0,0 +1,47 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .skills import (
+ Skills,
+ AsyncSkills,
+ SkillsWithRawResponse,
+ AsyncSkillsWithRawResponse,
+ SkillsWithStreamingResponse,
+ AsyncSkillsWithStreamingResponse,
+)
+from .content import (
+ Content,
+ AsyncContent,
+ ContentWithRawResponse,
+ AsyncContentWithRawResponse,
+ ContentWithStreamingResponse,
+ AsyncContentWithStreamingResponse,
+)
+from .versions import (
+ Versions,
+ AsyncVersions,
+ VersionsWithRawResponse,
+ AsyncVersionsWithRawResponse,
+ VersionsWithStreamingResponse,
+ AsyncVersionsWithStreamingResponse,
+)
+
+__all__ = [
+ "Content",
+ "AsyncContent",
+ "ContentWithRawResponse",
+ "AsyncContentWithRawResponse",
+ "ContentWithStreamingResponse",
+ "AsyncContentWithStreamingResponse",
+ "Versions",
+ "AsyncVersions",
+ "VersionsWithRawResponse",
+ "AsyncVersionsWithRawResponse",
+ "VersionsWithStreamingResponse",
+ "AsyncVersionsWithStreamingResponse",
+ "Skills",
+ "AsyncSkills",
+ "SkillsWithRawResponse",
+ "AsyncSkillsWithRawResponse",
+ "SkillsWithStreamingResponse",
+ "AsyncSkillsWithStreamingResponse",
+]
diff --git a/portkey_ai/_vendor/openai/resources/skills/content.py b/portkey_ai/_vendor/openai/resources/skills/content.py
new file mode 100644
index 00000000..96b23717
--- /dev/null
+++ b/portkey_ai/_vendor/openai/resources/skills/content.py
@@ -0,0 +1,168 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ... import _legacy_response
+from ..._types import Body, Query, Headers, NotGiven, not_given
+from ..._utils import path_template
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
+ StreamedBinaryAPIResponse,
+ AsyncStreamedBinaryAPIResponse,
+ to_custom_streamed_response_wrapper,
+ async_to_custom_streamed_response_wrapper,
+)
+from ..._base_client import make_request_options
+
+__all__ = ["Content", "AsyncContent"]
+
+
+class Content(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> ContentWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return ContentWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> ContentWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return ContentWithStreamingResponse(self)
+
+ def retrieve(
+ self,
+ skill_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> _legacy_response.HttpxBinaryResponseContent:
+ """
+ Download a skill zip bundle by its ID.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not skill_id:
+ raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
+ extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
+ return self._get(
+ path_template("/skills/{skill_id}/content", skill_id=skill_id),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=_legacy_response.HttpxBinaryResponseContent,
+ )
+
+
+class AsyncContent(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncContentWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncContentWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncContentWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AsyncContentWithStreamingResponse(self)
+
+ async def retrieve(
+ self,
+ skill_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> _legacy_response.HttpxBinaryResponseContent:
+ """
+ Download a skill zip bundle by its ID.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not skill_id:
+ raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
+ extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
+ return await self._get(
+ path_template("/skills/{skill_id}/content", skill_id=skill_id),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=_legacy_response.HttpxBinaryResponseContent,
+ )
+
+
+class ContentWithRawResponse:
+ def __init__(self, content: Content) -> None:
+ self._content = content
+
+ self.retrieve = _legacy_response.to_raw_response_wrapper(
+ content.retrieve,
+ )
+
+
+class AsyncContentWithRawResponse:
+ def __init__(self, content: AsyncContent) -> None:
+ self._content = content
+
+ self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+ content.retrieve,
+ )
+
+
+class ContentWithStreamingResponse:
+ def __init__(self, content: Content) -> None:
+ self._content = content
+
+ self.retrieve = to_custom_streamed_response_wrapper(
+ content.retrieve,
+ StreamedBinaryAPIResponse,
+ )
+
+
+class AsyncContentWithStreamingResponse:
+ def __init__(self, content: AsyncContent) -> None:
+ self._content = content
+
+ self.retrieve = async_to_custom_streamed_response_wrapper(
+ content.retrieve,
+ AsyncStreamedBinaryAPIResponse,
+ )
diff --git a/portkey_ai/_vendor/openai/resources/skills/skills.py b/portkey_ai/_vendor/openai/resources/skills/skills.py
new file mode 100644
index 00000000..f44fb246
--- /dev/null
+++ b/portkey_ai/_vendor/openai/resources/skills/skills.py
@@ -0,0 +1,618 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Mapping, cast
+from typing_extensions import Literal
+
+import httpx
+
+from ... import _legacy_response
+from ...types import skill_list_params, skill_create_params, skill_update_params
+from .content import (
+ Content,
+ AsyncContent,
+ ContentWithRawResponse,
+ AsyncContentWithRawResponse,
+ ContentWithStreamingResponse,
+ AsyncContentWithStreamingResponse,
+)
+from ..._types import (
+ Body,
+ Omit,
+ Query,
+ Headers,
+ NotGiven,
+ FileTypes,
+ SequenceNotStr,
+ omit,
+ not_given,
+)
+from ..._utils import extract_files, path_template, maybe_transform, deepcopy_minimal, async_maybe_transform
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ...pagination import SyncCursorPage, AsyncCursorPage
+from ...types.skill import Skill
+from ..._base_client import AsyncPaginator, make_request_options
+from .versions.versions import (
+ Versions,
+ AsyncVersions,
+ VersionsWithRawResponse,
+ AsyncVersionsWithRawResponse,
+ VersionsWithStreamingResponse,
+ AsyncVersionsWithStreamingResponse,
+)
+from ...types.deleted_skill import DeletedSkill
+
+__all__ = ["Skills", "AsyncSkills"]
+
+
+class Skills(SyncAPIResource):
+ @cached_property
+ def content(self) -> Content:
+ return Content(self._client)
+
+ @cached_property
+ def versions(self) -> Versions:
+ return Versions(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> SkillsWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return SkillsWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> SkillsWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return SkillsWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ files: Union[SequenceNotStr[FileTypes], FileTypes] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Skill:
+ """
+ Create a new skill.
+
+ Args:
+ files: Skill files to upload (directory upload) or a single zip file.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ body = deepcopy_minimal({"files": files})
+ extracted_files = extract_files(cast(Mapping[str, object], body), paths=[["files", ""], ["files"]])
+ if extracted_files:
+ # It should be noted that the actual Content-Type header that will be
+ # sent to the server will contain a `boundary` parameter, e.g.
+ # multipart/form-data; boundary=---abc--
+ extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
+ return self._post(
+ "/skills",
+ body=maybe_transform(body, skill_create_params.SkillCreateParams),
+ files=extracted_files,
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=Skill,
+ )
+
+ def retrieve(
+ self,
+ skill_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Skill:
+ """
+ Get a skill by its ID.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not skill_id:
+ raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
+ return self._get(
+ path_template("/skills/{skill_id}", skill_id=skill_id),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=Skill,
+ )
+
+ def update(
+ self,
+ skill_id: str,
+ *,
+ default_version: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Skill:
+ """
+ Update the default version pointer for a skill.
+
+ Args:
+ default_version: The skill version number to set as default.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not skill_id:
+ raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
+ return self._post(
+ path_template("/skills/{skill_id}", skill_id=skill_id),
+ body=maybe_transform({"default_version": default_version}, skill_update_params.SkillUpdateParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=Skill,
+ )
+
+ def list(
+ self,
+ *,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SyncCursorPage[Skill]:
+ """
+ List all skills for the current project.
+
+ Args:
+ after: Identifier for the last item from the previous pagination request
+
+ limit: Number of items to retrieve
+
+ order: Sort order of results by timestamp. Use `asc` for ascending order or `desc` for
+ descending order.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get_api_list(
+ "/skills",
+ page=SyncCursorPage[Skill],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ },
+ skill_list_params.SkillListParams,
+ ),
+ ),
+ model=Skill,
+ )
+
+ def delete(
+ self,
+ skill_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> DeletedSkill:
+ """
+ Delete a skill by its ID.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not skill_id:
+ raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
+ return self._delete(
+ path_template("/skills/{skill_id}", skill_id=skill_id),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=DeletedSkill,
+ )
+
+
+class AsyncSkills(AsyncAPIResource):
+ @cached_property
+ def content(self) -> AsyncContent:
+ return AsyncContent(self._client)
+
+ @cached_property
+ def versions(self) -> AsyncVersions:
+ return AsyncVersions(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncSkillsWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncSkillsWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncSkillsWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AsyncSkillsWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ files: Union[SequenceNotStr[FileTypes], FileTypes] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Skill:
+ """
+ Create a new skill.
+
+ Args:
+ files: Skill files to upload (directory upload) or a single zip file.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ body = deepcopy_minimal({"files": files})
+ extracted_files = extract_files(cast(Mapping[str, object], body), paths=[["files", ""], ["files"]])
+ if extracted_files:
+ # It should be noted that the actual Content-Type header that will be
+ # sent to the server will contain a `boundary` parameter, e.g.
+ # multipart/form-data; boundary=---abc--
+ extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
+ return await self._post(
+ "/skills",
+ body=await async_maybe_transform(body, skill_create_params.SkillCreateParams),
+ files=extracted_files,
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=Skill,
+ )
+
+ async def retrieve(
+ self,
+ skill_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Skill:
+ """
+ Get a skill by its ID.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not skill_id:
+ raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
+ return await self._get(
+ path_template("/skills/{skill_id}", skill_id=skill_id),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=Skill,
+ )
+
+ async def update(
+ self,
+ skill_id: str,
+ *,
+ default_version: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Skill:
+ """
+ Update the default version pointer for a skill.
+
+ Args:
+ default_version: The skill version number to set as default.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not skill_id:
+ raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
+ return await self._post(
+ path_template("/skills/{skill_id}", skill_id=skill_id),
+ body=await async_maybe_transform(
+ {"default_version": default_version}, skill_update_params.SkillUpdateParams
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=Skill,
+ )
+
+ def list(
+ self,
+ *,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AsyncPaginator[Skill, AsyncCursorPage[Skill]]:
+ """
+ List all skills for the current project.
+
+ Args:
+ after: Identifier for the last item from the previous pagination request
+
+ limit: Number of items to retrieve
+
+ order: Sort order of results by timestamp. Use `asc` for ascending order or `desc` for
+ descending order.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get_api_list(
+ "/skills",
+ page=AsyncCursorPage[Skill],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ },
+ skill_list_params.SkillListParams,
+ ),
+ ),
+ model=Skill,
+ )
+
+ async def delete(
+ self,
+ skill_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> DeletedSkill:
+ """
+ Delete a skill by its ID.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not skill_id:
+ raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
+ return await self._delete(
+ path_template("/skills/{skill_id}", skill_id=skill_id),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=DeletedSkill,
+ )
+
+
+class SkillsWithRawResponse:
+ def __init__(self, skills: Skills) -> None:
+ self._skills = skills
+
+ self.create = _legacy_response.to_raw_response_wrapper(
+ skills.create,
+ )
+ self.retrieve = _legacy_response.to_raw_response_wrapper(
+ skills.retrieve,
+ )
+ self.update = _legacy_response.to_raw_response_wrapper(
+ skills.update,
+ )
+ self.list = _legacy_response.to_raw_response_wrapper(
+ skills.list,
+ )
+ self.delete = _legacy_response.to_raw_response_wrapper(
+ skills.delete,
+ )
+
+ @cached_property
+ def content(self) -> ContentWithRawResponse:
+ return ContentWithRawResponse(self._skills.content)
+
+ @cached_property
+ def versions(self) -> VersionsWithRawResponse:
+ return VersionsWithRawResponse(self._skills.versions)
+
+
+class AsyncSkillsWithRawResponse:
+ def __init__(self, skills: AsyncSkills) -> None:
+ self._skills = skills
+
+ self.create = _legacy_response.async_to_raw_response_wrapper(
+ skills.create,
+ )
+ self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+ skills.retrieve,
+ )
+ self.update = _legacy_response.async_to_raw_response_wrapper(
+ skills.update,
+ )
+ self.list = _legacy_response.async_to_raw_response_wrapper(
+ skills.list,
+ )
+ self.delete = _legacy_response.async_to_raw_response_wrapper(
+ skills.delete,
+ )
+
+ @cached_property
+ def content(self) -> AsyncContentWithRawResponse:
+ return AsyncContentWithRawResponse(self._skills.content)
+
+ @cached_property
+ def versions(self) -> AsyncVersionsWithRawResponse:
+ return AsyncVersionsWithRawResponse(self._skills.versions)
+
+
+class SkillsWithStreamingResponse:
+ def __init__(self, skills: Skills) -> None:
+ self._skills = skills
+
+ self.create = to_streamed_response_wrapper(
+ skills.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ skills.retrieve,
+ )
+ self.update = to_streamed_response_wrapper(
+ skills.update,
+ )
+ self.list = to_streamed_response_wrapper(
+ skills.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ skills.delete,
+ )
+
+ @cached_property
+ def content(self) -> ContentWithStreamingResponse:
+ return ContentWithStreamingResponse(self._skills.content)
+
+ @cached_property
+ def versions(self) -> VersionsWithStreamingResponse:
+ return VersionsWithStreamingResponse(self._skills.versions)
+
+
+class AsyncSkillsWithStreamingResponse:
+ def __init__(self, skills: AsyncSkills) -> None:
+ self._skills = skills
+
+ self.create = async_to_streamed_response_wrapper(
+ skills.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ skills.retrieve,
+ )
+ self.update = async_to_streamed_response_wrapper(
+ skills.update,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ skills.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ skills.delete,
+ )
+
+ @cached_property
+ def content(self) -> AsyncContentWithStreamingResponse:
+ return AsyncContentWithStreamingResponse(self._skills.content)
+
+ @cached_property
+ def versions(self) -> AsyncVersionsWithStreamingResponse:
+ return AsyncVersionsWithStreamingResponse(self._skills.versions)
diff --git a/portkey_ai/_vendor/openai/resources/skills/versions/__init__.py b/portkey_ai/_vendor/openai/resources/skills/versions/__init__.py
new file mode 100644
index 00000000..c9ad6fbb
--- /dev/null
+++ b/portkey_ai/_vendor/openai/resources/skills/versions/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .content import (
+ Content,
+ AsyncContent,
+ ContentWithRawResponse,
+ AsyncContentWithRawResponse,
+ ContentWithStreamingResponse,
+ AsyncContentWithStreamingResponse,
+)
+from .versions import (
+ Versions,
+ AsyncVersions,
+ VersionsWithRawResponse,
+ AsyncVersionsWithRawResponse,
+ VersionsWithStreamingResponse,
+ AsyncVersionsWithStreamingResponse,
+)
+
+__all__ = [
+ "Content",
+ "AsyncContent",
+ "ContentWithRawResponse",
+ "AsyncContentWithRawResponse",
+ "ContentWithStreamingResponse",
+ "AsyncContentWithStreamingResponse",
+ "Versions",
+ "AsyncVersions",
+ "VersionsWithRawResponse",
+ "AsyncVersionsWithRawResponse",
+ "VersionsWithStreamingResponse",
+ "AsyncVersionsWithStreamingResponse",
+]
diff --git a/portkey_ai/_vendor/openai/resources/skills/versions/content.py b/portkey_ai/_vendor/openai/resources/skills/versions/content.py
new file mode 100644
index 00000000..2f545867
--- /dev/null
+++ b/portkey_ai/_vendor/openai/resources/skills/versions/content.py
@@ -0,0 +1,178 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from .... import _legacy_response
+from ...._types import Body, Query, Headers, NotGiven, not_given
+from ...._utils import path_template
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import (
+ StreamedBinaryAPIResponse,
+ AsyncStreamedBinaryAPIResponse,
+ to_custom_streamed_response_wrapper,
+ async_to_custom_streamed_response_wrapper,
+)
+from ...._base_client import make_request_options
+
+__all__ = ["Content", "AsyncContent"]
+
+
+class Content(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> ContentWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return ContentWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> ContentWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return ContentWithStreamingResponse(self)
+
+ def retrieve(
+ self,
+ version: str,
+ *,
+ skill_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> _legacy_response.HttpxBinaryResponseContent:
+ """
+ Download a skill version zip bundle.
+
+ Args:
+ version: The skill version number.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not skill_id:
+ raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
+ if not version:
+ raise ValueError(f"Expected a non-empty value for `version` but received {version!r}")
+ extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
+ return self._get(
+ path_template("/skills/{skill_id}/versions/{version}/content", skill_id=skill_id, version=version),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=_legacy_response.HttpxBinaryResponseContent,
+ )
+
+
+class AsyncContent(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncContentWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncContentWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncContentWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AsyncContentWithStreamingResponse(self)
+
+ async def retrieve(
+ self,
+ version: str,
+ *,
+ skill_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> _legacy_response.HttpxBinaryResponseContent:
+ """
+ Download a skill version zip bundle.
+
+ Args:
+ version: The skill version number.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not skill_id:
+ raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
+ if not version:
+ raise ValueError(f"Expected a non-empty value for `version` but received {version!r}")
+ extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
+ return await self._get(
+ path_template("/skills/{skill_id}/versions/{version}/content", skill_id=skill_id, version=version),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=_legacy_response.HttpxBinaryResponseContent,
+ )
+
+
+class ContentWithRawResponse:
+ def __init__(self, content: Content) -> None:
+ self._content = content
+
+ self.retrieve = _legacy_response.to_raw_response_wrapper(
+ content.retrieve,
+ )
+
+
+class AsyncContentWithRawResponse:
+ def __init__(self, content: AsyncContent) -> None:
+ self._content = content
+
+ self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+ content.retrieve,
+ )
+
+
+class ContentWithStreamingResponse:
+ def __init__(self, content: Content) -> None:
+ self._content = content
+
+ self.retrieve = to_custom_streamed_response_wrapper(
+ content.retrieve,
+ StreamedBinaryAPIResponse,
+ )
+
+
+class AsyncContentWithStreamingResponse:
+ def __init__(self, content: AsyncContent) -> None:
+ self._content = content
+
+ self.retrieve = async_to_custom_streamed_response_wrapper(
+ content.retrieve,
+ AsyncStreamedBinaryAPIResponse,
+ )
diff --git a/portkey_ai/_vendor/openai/resources/skills/versions/versions.py b/portkey_ai/_vendor/openai/resources/skills/versions/versions.py
new file mode 100644
index 00000000..8b48075c
--- /dev/null
+++ b/portkey_ai/_vendor/openai/resources/skills/versions/versions.py
@@ -0,0 +1,544 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Mapping, cast
+from typing_extensions import Literal
+
+import httpx
+
+from .... import _legacy_response
+from .content import (
+ Content,
+ AsyncContent,
+ ContentWithRawResponse,
+ AsyncContentWithRawResponse,
+ ContentWithStreamingResponse,
+ AsyncContentWithStreamingResponse,
+)
+from ...._types import (
+ Body,
+ Omit,
+ Query,
+ Headers,
+ NotGiven,
+ FileTypes,
+ SequenceNotStr,
+ omit,
+ not_given,
+)
+from ...._utils import extract_files, path_template, maybe_transform, deepcopy_minimal, async_maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ....pagination import SyncCursorPage, AsyncCursorPage
+from ...._base_client import AsyncPaginator, make_request_options
+from ....types.skills import version_list_params, version_create_params
+from ....types.skills.skill_version import SkillVersion
+from ....types.skills.deleted_skill_version import DeletedSkillVersion
+
+__all__ = ["Versions", "AsyncVersions"]
+
+
+class Versions(SyncAPIResource):
+ @cached_property
+ def content(self) -> Content:
+ return Content(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> VersionsWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return VersionsWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> VersionsWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return VersionsWithStreamingResponse(self)
+
+ def create(
+ self,
+ skill_id: str,
+ *,
+ default: bool | Omit = omit,
+ files: Union[SequenceNotStr[FileTypes], FileTypes] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SkillVersion:
+ """
+ Create a new immutable skill version.
+
+ Args:
+ default: Whether to set this version as the default.
+
+ files: Skill files to upload (directory upload) or a single zip file.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not skill_id:
+ raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
+ body = deepcopy_minimal(
+ {
+ "default": default,
+ "files": files,
+ }
+ )
+ extracted_files = extract_files(cast(Mapping[str, object], body), paths=[["files", ""], ["files"]])
+ if extracted_files:
+ # It should be noted that the actual Content-Type header that will be
+ # sent to the server will contain a `boundary` parameter, e.g.
+ # multipart/form-data; boundary=---abc--
+ extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
+ return self._post(
+ path_template("/skills/{skill_id}/versions", skill_id=skill_id),
+ body=maybe_transform(body, version_create_params.VersionCreateParams),
+ files=extracted_files,
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=SkillVersion,
+ )
+
+ def retrieve(
+ self,
+ version: str,
+ *,
+ skill_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SkillVersion:
+ """
+ Get a specific skill version.
+
+ Args:
+ version: The version number to retrieve.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not skill_id:
+ raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
+ if not version:
+ raise ValueError(f"Expected a non-empty value for `version` but received {version!r}")
+ return self._get(
+ path_template("/skills/{skill_id}/versions/{version}", skill_id=skill_id, version=version),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=SkillVersion,
+ )
+
+ def list(
+ self,
+ skill_id: str,
+ *,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SyncCursorPage[SkillVersion]:
+ """
+ List skill versions for a skill.
+
+ Args:
+ after: The skill version ID to start after.
+
+ limit: Number of versions to retrieve.
+
+ order: Sort order of results by version number.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not skill_id:
+ raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
+ return self._get_api_list(
+ path_template("/skills/{skill_id}/versions", skill_id=skill_id),
+ page=SyncCursorPage[SkillVersion],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ },
+ version_list_params.VersionListParams,
+ ),
+ ),
+ model=SkillVersion,
+ )
+
+ def delete(
+ self,
+ version: str,
+ *,
+ skill_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> DeletedSkillVersion:
+ """
+ Delete a skill version.
+
+ Args:
+ version: The skill version number.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not skill_id:
+ raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
+ if not version:
+ raise ValueError(f"Expected a non-empty value for `version` but received {version!r}")
+ return self._delete(
+ path_template("/skills/{skill_id}/versions/{version}", skill_id=skill_id, version=version),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=DeletedSkillVersion,
+ )
+
+
+class AsyncVersions(AsyncAPIResource):
+ @cached_property
+ def content(self) -> AsyncContent:
+ return AsyncContent(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncVersionsWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncVersionsWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncVersionsWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AsyncVersionsWithStreamingResponse(self)
+
+ async def create(
+ self,
+ skill_id: str,
+ *,
+ default: bool | Omit = omit,
+ files: Union[SequenceNotStr[FileTypes], FileTypes] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SkillVersion:
+ """
+ Create a new immutable skill version.
+
+ Args:
+ default: Whether to set this version as the default.
+
+ files: Skill files to upload (directory upload) or a single zip file.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not skill_id:
+ raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
+ body = deepcopy_minimal(
+ {
+ "default": default,
+ "files": files,
+ }
+ )
+ extracted_files = extract_files(cast(Mapping[str, object], body), paths=[["files", ""], ["files"]])
+ if extracted_files:
+ # It should be noted that the actual Content-Type header that will be
+ # sent to the server will contain a `boundary` parameter, e.g.
+ # multipart/form-data; boundary=---abc--
+ extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
+ return await self._post(
+ path_template("/skills/{skill_id}/versions", skill_id=skill_id),
+ body=await async_maybe_transform(body, version_create_params.VersionCreateParams),
+ files=extracted_files,
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=SkillVersion,
+ )
+
+ async def retrieve(
+ self,
+ version: str,
+ *,
+ skill_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SkillVersion:
+ """
+ Get a specific skill version.
+
+ Args:
+ version: The version number to retrieve.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not skill_id:
+ raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
+ if not version:
+ raise ValueError(f"Expected a non-empty value for `version` but received {version!r}")
+ return await self._get(
+ path_template("/skills/{skill_id}/versions/{version}", skill_id=skill_id, version=version),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=SkillVersion,
+ )
+
+ def list(
+ self,
+ skill_id: str,
+ *,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AsyncPaginator[SkillVersion, AsyncCursorPage[SkillVersion]]:
+ """
+ List skill versions for a skill.
+
+ Args:
+ after: The skill version ID to start after.
+
+ limit: Number of versions to retrieve.
+
+ order: Sort order of results by version number.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not skill_id:
+ raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
+ return self._get_api_list(
+ path_template("/skills/{skill_id}/versions", skill_id=skill_id),
+ page=AsyncCursorPage[SkillVersion],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ },
+ version_list_params.VersionListParams,
+ ),
+ ),
+ model=SkillVersion,
+ )
+
+ async def delete(
+ self,
+ version: str,
+ *,
+ skill_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> DeletedSkillVersion:
+ """
+ Delete a skill version.
+
+ Args:
+ version: The skill version number.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not skill_id:
+ raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
+ if not version:
+ raise ValueError(f"Expected a non-empty value for `version` but received {version!r}")
+ return await self._delete(
+ path_template("/skills/{skill_id}/versions/{version}", skill_id=skill_id, version=version),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=DeletedSkillVersion,
+ )
+
+
+class VersionsWithRawResponse:
+ def __init__(self, versions: Versions) -> None:
+ self._versions = versions
+
+ self.create = _legacy_response.to_raw_response_wrapper(
+ versions.create,
+ )
+ self.retrieve = _legacy_response.to_raw_response_wrapper(
+ versions.retrieve,
+ )
+ self.list = _legacy_response.to_raw_response_wrapper(
+ versions.list,
+ )
+ self.delete = _legacy_response.to_raw_response_wrapper(
+ versions.delete,
+ )
+
+ @cached_property
+ def content(self) -> ContentWithRawResponse:
+ return ContentWithRawResponse(self._versions.content)
+
+
+class AsyncVersionsWithRawResponse:
+ def __init__(self, versions: AsyncVersions) -> None:
+ self._versions = versions
+
+ self.create = _legacy_response.async_to_raw_response_wrapper(
+ versions.create,
+ )
+ self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+ versions.retrieve,
+ )
+ self.list = _legacy_response.async_to_raw_response_wrapper(
+ versions.list,
+ )
+ self.delete = _legacy_response.async_to_raw_response_wrapper(
+ versions.delete,
+ )
+
+ @cached_property
+ def content(self) -> AsyncContentWithRawResponse:
+ return AsyncContentWithRawResponse(self._versions.content)
+
+
+class VersionsWithStreamingResponse:
+ def __init__(self, versions: Versions) -> None:
+ self._versions = versions
+
+ self.create = to_streamed_response_wrapper(
+ versions.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ versions.retrieve,
+ )
+ self.list = to_streamed_response_wrapper(
+ versions.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ versions.delete,
+ )
+
+ @cached_property
+ def content(self) -> ContentWithStreamingResponse:
+ return ContentWithStreamingResponse(self._versions.content)
+
+
+class AsyncVersionsWithStreamingResponse:
+ def __init__(self, versions: AsyncVersions) -> None:
+ self._versions = versions
+
+ self.create = async_to_streamed_response_wrapper(
+ versions.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ versions.retrieve,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ versions.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ versions.delete,
+ )
+
+ @cached_property
+ def content(self) -> AsyncContentWithStreamingResponse:
+ return AsyncContentWithStreamingResponse(self._versions.content)
diff --git a/portkey_ai/_vendor/openai/resources/uploads/parts.py b/portkey_ai/_vendor/openai/resources/uploads/parts.py
index 73eabd40..cf09eea7 100644
--- a/portkey_ai/_vendor/openai/resources/uploads/parts.py
+++ b/portkey_ai/_vendor/openai/resources/uploads/parts.py
@@ -8,7 +8,7 @@
from ... import _legacy_response
from ..._types import Body, Query, Headers, NotGiven, FileTypes, not_given
-from ..._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform
+from ..._utils import extract_files, path_template, maybe_transform, deepcopy_minimal, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
@@ -20,6 +20,8 @@
class Parts(SyncAPIResource):
+ """Use Uploads to upload large files in multiple parts."""
+
@cached_property
def with_raw_response(self) -> PartsWithRawResponse:
"""
@@ -84,7 +86,7 @@ def create(
# multipart/form-data; boundary=---abc--
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return self._post(
- f"/uploads/{upload_id}/parts",
+ path_template("/uploads/{upload_id}/parts", upload_id=upload_id),
body=maybe_transform(body, part_create_params.PartCreateParams),
files=files,
options=make_request_options(
@@ -95,6 +97,8 @@ def create(
class AsyncParts(AsyncAPIResource):
+ """Use Uploads to upload large files in multiple parts."""
+
@cached_property
def with_raw_response(self) -> AsyncPartsWithRawResponse:
"""
@@ -159,7 +163,7 @@ async def create(
# multipart/form-data; boundary=---abc--
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return await self._post(
- f"/uploads/{upload_id}/parts",
+ path_template("/uploads/{upload_id}/parts", upload_id=upload_id),
body=await async_maybe_transform(body, part_create_params.PartCreateParams),
files=files,
options=make_request_options(
diff --git a/portkey_ai/_vendor/openai/resources/uploads/uploads.py b/portkey_ai/_vendor/openai/resources/uploads/uploads.py
index e8c047bd..7778e515 100644
--- a/portkey_ai/_vendor/openai/resources/uploads/uploads.py
+++ b/portkey_ai/_vendor/openai/resources/uploads/uploads.py
@@ -23,7 +23,7 @@
)
from ...types import FilePurpose, upload_create_params, upload_complete_params
from ..._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
-from ..._utils import maybe_transform, async_maybe_transform
+from ..._utils import path_template, maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
@@ -41,8 +41,11 @@
class Uploads(SyncAPIResource):
+ """Use Uploads to upload large files in multiple parts."""
+
@cached_property
def parts(self) -> Parts:
+ """Use Uploads to upload large files in multiple parts."""
return Parts(self._client)
@cached_property
@@ -198,6 +201,8 @@ def create(
the documentation on
[creating a File](https://platform.openai.com/docs/api-reference/files/create).
+ Returns the Upload object with status `pending`.
+
Args:
bytes: The number of bytes in the file you are uploading.
@@ -257,6 +262,8 @@ def cancel(
No Parts may be added after an Upload is cancelled.
+ Returns the Upload object with status `cancelled`.
+
Args:
extra_headers: Send extra headers
@@ -269,7 +276,7 @@ def cancel(
if not upload_id:
raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}")
return self._post(
- f"/uploads/{upload_id}/cancel",
+ path_template("/uploads/{upload_id}/cancel", upload_id=upload_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -302,7 +309,9 @@ def complete(
The number of bytes uploaded upon completion must match the number of bytes
initially specified when creating the Upload object. No Parts may be added after
- an Upload is completed.
+ an Upload is completed. Returns the Upload object with status `completed`,
+ including an additional `file` property containing the created usable File
+ object.
Args:
part_ids: The ordered list of Part IDs.
@@ -321,7 +330,7 @@ def complete(
if not upload_id:
raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}")
return self._post(
- f"/uploads/{upload_id}/complete",
+ path_template("/uploads/{upload_id}/complete", upload_id=upload_id),
body=maybe_transform(
{
"part_ids": part_ids,
@@ -337,8 +346,11 @@ def complete(
class AsyncUploads(AsyncAPIResource):
+ """Use Uploads to upload large files in multiple parts."""
+
@cached_property
def parts(self) -> AsyncParts:
+ """Use Uploads to upload large files in multiple parts."""
return AsyncParts(self._client)
@cached_property
@@ -505,6 +517,8 @@ async def create(
the documentation on
[creating a File](https://platform.openai.com/docs/api-reference/files/create).
+ Returns the Upload object with status `pending`.
+
Args:
bytes: The number of bytes in the file you are uploading.
@@ -564,6 +578,8 @@ async def cancel(
No Parts may be added after an Upload is cancelled.
+ Returns the Upload object with status `cancelled`.
+
Args:
extra_headers: Send extra headers
@@ -576,7 +592,7 @@ async def cancel(
if not upload_id:
raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}")
return await self._post(
- f"/uploads/{upload_id}/cancel",
+ path_template("/uploads/{upload_id}/cancel", upload_id=upload_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -609,7 +625,9 @@ async def complete(
The number of bytes uploaded upon completion must match the number of bytes
initially specified when creating the Upload object. No Parts may be added after
- an Upload is completed.
+ an Upload is completed. Returns the Upload object with status `completed`,
+ including an additional `file` property containing the created usable File
+ object.
Args:
part_ids: The ordered list of Part IDs.
@@ -628,7 +646,7 @@ async def complete(
if not upload_id:
raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}")
return await self._post(
- f"/uploads/{upload_id}/complete",
+ path_template("/uploads/{upload_id}/complete", upload_id=upload_id),
body=await async_maybe_transform(
{
"part_ids": part_ids,
@@ -659,6 +677,7 @@ def __init__(self, uploads: Uploads) -> None:
@cached_property
def parts(self) -> PartsWithRawResponse:
+ """Use Uploads to upload large files in multiple parts."""
return PartsWithRawResponse(self._uploads.parts)
@@ -678,6 +697,7 @@ def __init__(self, uploads: AsyncUploads) -> None:
@cached_property
def parts(self) -> AsyncPartsWithRawResponse:
+ """Use Uploads to upload large files in multiple parts."""
return AsyncPartsWithRawResponse(self._uploads.parts)
@@ -697,6 +717,7 @@ def __init__(self, uploads: Uploads) -> None:
@cached_property
def parts(self) -> PartsWithStreamingResponse:
+ """Use Uploads to upload large files in multiple parts."""
return PartsWithStreamingResponse(self._uploads.parts)
@@ -716,4 +737,5 @@ def __init__(self, uploads: AsyncUploads) -> None:
@cached_property
def parts(self) -> AsyncPartsWithStreamingResponse:
+ """Use Uploads to upload large files in multiple parts."""
return AsyncPartsWithStreamingResponse(self._uploads.parts)
diff --git a/portkey_ai/_vendor/openai/resources/vector_stores/file_batches.py b/portkey_ai/_vendor/openai/resources/vector_stores/file_batches.py
index d31fb59b..f097cf8a 100644
--- a/portkey_ai/_vendor/openai/resources/vector_stores/file_batches.py
+++ b/portkey_ai/_vendor/openai/resources/vector_stores/file_batches.py
@@ -13,7 +13,7 @@
from ... import _legacy_response
from ...types import FileChunkingStrategyParam
from ..._types import Body, Omit, Query, Headers, NotGiven, FileTypes, SequenceNotStr, omit, not_given
-from ..._utils import is_given, maybe_transform, async_maybe_transform
+from ..._utils import is_given, path_template, maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
@@ -79,12 +79,14 @@ def create(
file_ids: A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
the vector store should use. Useful for tools like `file_search` that can access
files. If `attributes` or `chunking_strategy` are provided, they will be applied
- to all files in the batch. Mutually exclusive with `files`.
+ to all files in the batch. The maximum batch size is 2000 files. Mutually
+ exclusive with `files`.
files: A list of objects that each include a `file_id` plus optional `attributes` or
`chunking_strategy`. Use this when you need to override metadata for specific
files. The global `attributes` or `chunking_strategy` will be ignored and must
- be specified for each file. Mutually exclusive with `file_ids`.
+ be specified for each file. The maximum batch size is 2000 files. Mutually
+ exclusive with `file_ids`.
extra_headers: Send extra headers
@@ -98,7 +100,7 @@ def create(
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
- f"/vector_stores/{vector_store_id}/file_batches",
+ path_template("/vector_stores/{vector_store_id}/file_batches", vector_store_id=vector_store_id),
body=maybe_transform(
{
"attributes": attributes,
@@ -144,7 +146,11 @@ def retrieve(
raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get(
- f"/vector_stores/{vector_store_id}/file_batches/{batch_id}",
+ path_template(
+ "/vector_stores/{vector_store_id}/file_batches/{batch_id}",
+ vector_store_id=vector_store_id,
+ batch_id=batch_id,
+ ),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -183,7 +189,11 @@ def cancel(
raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
- f"/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel",
+ path_template(
+ "/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel",
+ vector_store_id=vector_store_id,
+ batch_id=batch_id,
+ ),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -194,15 +204,29 @@ def create_and_poll(
self,
vector_store_id: str,
*,
- file_ids: SequenceNotStr[str],
- poll_interval_ms: int | Omit = omit,
+ attributes: Optional[Dict[str, Union[str, float, bool]]] | Omit = omit,
chunking_strategy: FileChunkingStrategyParam | Omit = omit,
+ file_ids: SequenceNotStr[str] | Omit = omit,
+ files: Iterable[file_batch_create_params.File] | Omit = omit,
+ poll_interval_ms: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> VectorStoreFileBatch:
"""Create a vector store batch and poll until all files have been processed."""
batch = self.create(
vector_store_id=vector_store_id,
- file_ids=file_ids,
+ attributes=attributes,
chunking_strategy=chunking_strategy,
+ file_ids=file_ids,
+ files=files,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
)
# TODO: don't poll unless necessary??
return self.poll(
@@ -264,7 +288,11 @@ def list_files(
raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
- f"/vector_stores/{vector_store_id}/file_batches/{batch_id}/files",
+ path_template(
+ "/vector_stores/{vector_store_id}/file_batches/{batch_id}/files",
+ vector_store_id=vector_store_id,
+ batch_id=batch_id,
+ ),
page=SyncCursorPage[VectorStoreFile],
options=make_request_options(
extra_headers=extra_headers,
@@ -424,12 +452,14 @@ async def create(
file_ids: A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
the vector store should use. Useful for tools like `file_search` that can access
files. If `attributes` or `chunking_strategy` are provided, they will be applied
- to all files in the batch. Mutually exclusive with `files`.
+ to all files in the batch. The maximum batch size is 2000 files. Mutually
+ exclusive with `files`.
files: A list of objects that each include a `file_id` plus optional `attributes` or
`chunking_strategy`. Use this when you need to override metadata for specific
files. The global `attributes` or `chunking_strategy` will be ignored and must
- be specified for each file. Mutually exclusive with `file_ids`.
+ be specified for each file. The maximum batch size is 2000 files. Mutually
+ exclusive with `file_ids`.
extra_headers: Send extra headers
@@ -443,7 +473,7 @@ async def create(
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
- f"/vector_stores/{vector_store_id}/file_batches",
+ path_template("/vector_stores/{vector_store_id}/file_batches", vector_store_id=vector_store_id),
body=await async_maybe_transform(
{
"attributes": attributes,
@@ -489,7 +519,11 @@ async def retrieve(
raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._get(
- f"/vector_stores/{vector_store_id}/file_batches/{batch_id}",
+ path_template(
+ "/vector_stores/{vector_store_id}/file_batches/{batch_id}",
+ vector_store_id=vector_store_id,
+ batch_id=batch_id,
+ ),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -528,7 +562,11 @@ async def cancel(
raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
- f"/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel",
+ path_template(
+ "/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel",
+ vector_store_id=vector_store_id,
+ batch_id=batch_id,
+ ),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -539,15 +577,29 @@ async def create_and_poll(
self,
vector_store_id: str,
*,
- file_ids: SequenceNotStr[str],
- poll_interval_ms: int | Omit = omit,
+ attributes: Optional[Dict[str, Union[str, float, bool]]] | Omit = omit,
chunking_strategy: FileChunkingStrategyParam | Omit = omit,
+ file_ids: SequenceNotStr[str] | Omit = omit,
+ files: Iterable[file_batch_create_params.File] | Omit = omit,
+ poll_interval_ms: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> VectorStoreFileBatch:
"""Create a vector store batch and poll until all files have been processed."""
batch = await self.create(
vector_store_id=vector_store_id,
- file_ids=file_ids,
+ attributes=attributes,
chunking_strategy=chunking_strategy,
+ file_ids=file_ids,
+ files=files,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
)
# TODO: don't poll unless necessary??
return await self.poll(
@@ -609,7 +661,11 @@ def list_files(
raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
- f"/vector_stores/{vector_store_id}/file_batches/{batch_id}/files",
+ path_template(
+ "/vector_stores/{vector_store_id}/file_batches/{batch_id}/files",
+ vector_store_id=vector_store_id,
+ batch_id=batch_id,
+ ),
page=AsyncCursorPage[VectorStoreFile],
options=make_request_options(
extra_headers=extra_headers,
diff --git a/portkey_ai/_vendor/openai/resources/vector_stores/files.py b/portkey_ai/_vendor/openai/resources/vector_stores/files.py
index d2eb4e16..86664345 100644
--- a/portkey_ai/_vendor/openai/resources/vector_stores/files.py
+++ b/portkey_ai/_vendor/openai/resources/vector_stores/files.py
@@ -10,7 +10,7 @@
from ... import _legacy_response
from ...types import FileChunkingStrategyParam
from ..._types import Body, Omit, Query, Headers, NotGiven, FileTypes, omit, not_given
-from ..._utils import is_given, maybe_transform, async_maybe_transform
+from ..._utils import is_given, path_template, maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
@@ -90,7 +90,7 @@ def create(
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
- f"/vector_stores/{vector_store_id}/files",
+ path_template("/vector_stores/{vector_store_id}/files", vector_store_id=vector_store_id),
body=maybe_transform(
{
"file_id": file_id,
@@ -135,7 +135,9 @@ def retrieve(
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get(
- f"/vector_stores/{vector_store_id}/files/{file_id}",
+ path_template(
+ "/vector_stores/{vector_store_id}/files/{file_id}", vector_store_id=vector_store_id, file_id=file_id
+ ),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -179,7 +181,9 @@ def update(
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
- f"/vector_stores/{vector_store_id}/files/{file_id}",
+ path_template(
+ "/vector_stores/{vector_store_id}/files/{file_id}", vector_store_id=vector_store_id, file_id=file_id
+ ),
body=maybe_transform({"attributes": attributes}, file_update_params.FileUpdateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
@@ -237,7 +241,7 @@ def list(
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
- f"/vector_stores/{vector_store_id}/files",
+ path_template("/vector_stores/{vector_store_id}/files", vector_store_id=vector_store_id),
page=SyncCursorPage[VectorStoreFile],
options=make_request_options(
extra_headers=extra_headers,
@@ -292,7 +296,9 @@ def delete(
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._delete(
- f"/vector_stores/{vector_store_id}/files/{file_id}",
+ path_template(
+ "/vector_stores/{vector_store_id}/files/{file_id}", vector_store_id=vector_store_id, file_id=file_id
+ ),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -307,10 +313,23 @@ def create_and_poll(
attributes: Optional[Dict[str, Union[str, float, bool]]] | Omit = omit,
poll_interval_ms: int | Omit = omit,
chunking_strategy: FileChunkingStrategyParam | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> VectorStoreFile:
"""Attach a file to the given vector store and wait for it to be processed."""
self.create(
- vector_store_id=vector_store_id, file_id=file_id, chunking_strategy=chunking_strategy, attributes=attributes
+ vector_store_id=vector_store_id,
+ file_id=file_id,
+ chunking_strategy=chunking_strategy,
+ attributes=attributes,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
)
return self.poll(
@@ -424,7 +443,11 @@ def content(
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
- f"/vector_stores/{vector_store_id}/files/{file_id}/content",
+ path_template(
+ "/vector_stores/{vector_store_id}/files/{file_id}/content",
+ vector_store_id=vector_store_id,
+ file_id=file_id,
+ ),
page=SyncPage[FileContentResponse],
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
@@ -498,7 +521,7 @@ async def create(
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
- f"/vector_stores/{vector_store_id}/files",
+ path_template("/vector_stores/{vector_store_id}/files", vector_store_id=vector_store_id),
body=await async_maybe_transform(
{
"file_id": file_id,
@@ -543,7 +566,9 @@ async def retrieve(
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._get(
- f"/vector_stores/{vector_store_id}/files/{file_id}",
+ path_template(
+ "/vector_stores/{vector_store_id}/files/{file_id}", vector_store_id=vector_store_id, file_id=file_id
+ ),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -587,7 +612,9 @@ async def update(
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
- f"/vector_stores/{vector_store_id}/files/{file_id}",
+ path_template(
+ "/vector_stores/{vector_store_id}/files/{file_id}", vector_store_id=vector_store_id, file_id=file_id
+ ),
body=await async_maybe_transform({"attributes": attributes}, file_update_params.FileUpdateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
@@ -645,7 +672,7 @@ def list(
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
- f"/vector_stores/{vector_store_id}/files",
+ path_template("/vector_stores/{vector_store_id}/files", vector_store_id=vector_store_id),
page=AsyncCursorPage[VectorStoreFile],
options=make_request_options(
extra_headers=extra_headers,
@@ -700,7 +727,9 @@ async def delete(
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._delete(
- f"/vector_stores/{vector_store_id}/files/{file_id}",
+ path_template(
+ "/vector_stores/{vector_store_id}/files/{file_id}", vector_store_id=vector_store_id, file_id=file_id
+ ),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -715,10 +744,23 @@ async def create_and_poll(
attributes: Optional[Dict[str, Union[str, float, bool]]] | Omit = omit,
poll_interval_ms: int | Omit = omit,
chunking_strategy: FileChunkingStrategyParam | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> VectorStoreFile:
"""Attach a file to the given vector store and wait for it to be processed."""
await self.create(
- vector_store_id=vector_store_id, file_id=file_id, chunking_strategy=chunking_strategy, attributes=attributes
+ vector_store_id=vector_store_id,
+ file_id=file_id,
+ chunking_strategy=chunking_strategy,
+ attributes=attributes,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
)
return await self.poll(
@@ -834,7 +876,11 @@ def content(
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
- f"/vector_stores/{vector_store_id}/files/{file_id}/content",
+ path_template(
+ "/vector_stores/{vector_store_id}/files/{file_id}/content",
+ vector_store_id=vector_store_id,
+ file_id=file_id,
+ ),
page=AsyncPage[FileContentResponse],
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
diff --git a/portkey_ai/_vendor/openai/resources/vector_stores/vector_stores.py b/portkey_ai/_vendor/openai/resources/vector_stores/vector_stores.py
index 490e3e7f..7fa2ad52 100644
--- a/portkey_ai/_vendor/openai/resources/vector_stores/vector_stores.py
+++ b/portkey_ai/_vendor/openai/resources/vector_stores/vector_stores.py
@@ -24,7 +24,7 @@
vector_store_update_params,
)
from ..._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
-from ..._utils import maybe_transform, async_maybe_transform
+from ..._utils import path_template, maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
@@ -171,7 +171,7 @@ def retrieve(
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get(
- f"/vector_stores/{vector_store_id}",
+ path_template("/vector_stores/{vector_store_id}", vector_store_id=vector_store_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -219,7 +219,7 @@ def update(
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
- f"/vector_stores/{vector_store_id}",
+ path_template("/vector_stores/{vector_store_id}", vector_store_id=vector_store_id),
body=maybe_transform(
{
"expires_after": expires_after,
@@ -326,7 +326,7 @@ def delete(
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._delete(
- f"/vector_stores/{vector_store_id}",
+ path_template("/vector_stores/{vector_store_id}", vector_store_id=vector_store_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -377,7 +377,7 @@ def search(
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
- f"/vector_stores/{vector_store_id}/search",
+ path_template("/vector_stores/{vector_store_id}/search", vector_store_id=vector_store_id),
page=SyncPage[VectorStoreSearchResponse],
body=maybe_transform(
{
@@ -521,7 +521,7 @@ async def retrieve(
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._get(
- f"/vector_stores/{vector_store_id}",
+ path_template("/vector_stores/{vector_store_id}", vector_store_id=vector_store_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -569,7 +569,7 @@ async def update(
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
- f"/vector_stores/{vector_store_id}",
+ path_template("/vector_stores/{vector_store_id}", vector_store_id=vector_store_id),
body=await async_maybe_transform(
{
"expires_after": expires_after,
@@ -676,7 +676,7 @@ async def delete(
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._delete(
- f"/vector_stores/{vector_store_id}",
+ path_template("/vector_stores/{vector_store_id}", vector_store_id=vector_store_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -727,7 +727,7 @@ def search(
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
- f"/vector_stores/{vector_store_id}/search",
+ path_template("/vector_stores/{vector_store_id}/search", vector_store_id=vector_store_id),
page=AsyncPage[VectorStoreSearchResponse],
body=maybe_transform(
{
diff --git a/portkey_ai/_vendor/openai/resources/videos.py b/portkey_ai/_vendor/openai/resources/videos.py
index 9f74c942..a006e647 100644
--- a/portkey_ai/_vendor/openai/resources/videos.py
+++ b/portkey_ai/_vendor/openai/resources/videos.py
@@ -11,13 +11,16 @@
from ..types import (
VideoSize,
VideoSeconds,
+ video_edit_params,
video_list_params,
video_remix_params,
video_create_params,
+ video_extend_params,
+ video_create_character_params,
video_download_content_params,
)
from .._types import Body, Omit, Query, Headers, NotGiven, FileTypes, omit, not_given
-from .._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform
+from .._utils import extract_files, path_template, maybe_transform, deepcopy_minimal, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import (
@@ -36,6 +39,8 @@
from ..types.video_seconds import VideoSeconds
from ..types.video_model_param import VideoModelParam
from ..types.video_delete_response import VideoDeleteResponse
+from ..types.video_get_character_response import VideoGetCharacterResponse
+from ..types.video_create_character_response import VideoCreateCharacterResponse
__all__ = ["Videos", "AsyncVideos"]
@@ -64,7 +69,7 @@ def create(
self,
*,
prompt: str,
- input_reference: FileTypes | Omit = omit,
+ input_reference: video_create_params.InputReference | Omit = omit,
model: VideoModelParam | Omit = omit,
seconds: VideoSeconds | Omit = omit,
size: VideoSize | Omit = omit,
@@ -76,12 +81,12 @@ def create(
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Video:
"""
- Create a video
+ Create a new video generation job from a prompt and optional reference assets.
Args:
prompt: Text prompt that describes the video to generate.
- input_reference: Optional image reference that guides generation.
+ input_reference: Optional reference asset upload or reference object that guides generation.
model: The video generation model to use (allowed values: sora-2, sora-2-pro). Defaults
to `sora-2`.
@@ -109,11 +114,10 @@ def create(
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[["input_reference"]])
- if files:
- # It should be noted that the actual Content-Type header that will be
- # sent to the server will contain a `boundary` parameter, e.g.
- # multipart/form-data; boundary=---abc--
- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
+ # It should be noted that the actual Content-Type header that will be
+ # sent to the server will contain a `boundary` parameter, e.g.
+ # multipart/form-data; boundary=---abc--
+ extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return self._post(
"/videos",
body=maybe_transform(body, video_create_params.VideoCreateParams),
@@ -128,7 +132,7 @@ def create_and_poll(
self,
*,
prompt: str,
- input_reference: FileTypes | Omit = omit,
+ input_reference: video_create_params.InputReference | Omit = omit,
model: VideoModelParam | Omit = omit,
seconds: VideoSeconds | Omit = omit,
size: VideoSize | Omit = omit,
@@ -209,7 +213,7 @@ def retrieve(
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Video:
"""
- Retrieve a video
+ Fetch the latest metadata for a generated video.
Args:
extra_headers: Send extra headers
@@ -223,7 +227,7 @@ def retrieve(
if not video_id:
raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}")
return self._get(
- f"/videos/{video_id}",
+ path_template("/videos/{video_id}", video_id=video_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -244,7 +248,7 @@ def list(
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncConversationCursorPage[Video]:
"""
- List videos
+ List recently generated videos for the current project.
Args:
after: Identifier for the last item from the previous pagination request
@@ -294,7 +298,7 @@ def delete(
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> VideoDeleteResponse:
"""
- Delete a video
+ Permanently delete a completed or failed video and its stored assets.
Args:
extra_headers: Send extra headers
@@ -308,13 +312,62 @@ def delete(
if not video_id:
raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}")
return self._delete(
- f"/videos/{video_id}",
+ path_template("/videos/{video_id}", video_id=video_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=VideoDeleteResponse,
)
+ def create_character(
+ self,
+ *,
+ name: str,
+ video: FileTypes,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> VideoCreateCharacterResponse:
+ """
+ Create a character from an uploaded video.
+
+ Args:
+ name: Display name for this API character.
+
+ video: Video file used to create a character.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ body = deepcopy_minimal(
+ {
+ "name": name,
+ "video": video,
+ }
+ )
+ files = extract_files(cast(Mapping[str, object], body), paths=[["video"]])
+ # It should be noted that the actual Content-Type header that will be
+ # sent to the server will contain a `boundary` parameter, e.g.
+ # multipart/form-data; boundary=---abc--
+ extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
+ return self._post(
+ "/videos/characters",
+ body=maybe_transform(body, video_create_character_params.VideoCreateCharacterParams),
+ files=files,
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VideoCreateCharacterResponse,
+ )
+
def download_content(
self,
video_id: str,
@@ -327,12 +380,13 @@ def download_content(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> _legacy_response.HttpxBinaryResponseContent:
- """Download video content
+ """
+ Download the generated video bytes or a derived preview asset.
- Args:
- variant: Which downloadable asset to return.
+ Streams the rendered video content for the specified video job.
- Defaults to the MP4 video.
+ Args:
+ variant: Which downloadable asset to return. Defaults to the MP4 video.
extra_headers: Send extra headers
@@ -346,7 +400,7 @@ def download_content(
raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}")
extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
return self._get(
- f"/videos/{video_id}/content",
+ path_template("/videos/{video_id}/content", video_id=video_id),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -357,6 +411,143 @@ def download_content(
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
+ def edit(
+ self,
+ *,
+ prompt: str,
+ video: video_edit_params.Video,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Video:
+ """
+ Create a new video generation job by editing a source video or existing
+ generated video.
+
+ Args:
+ prompt: Text prompt that describes how to edit the source video.
+
+ video: Reference to the completed video to edit.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ body = deepcopy_minimal(
+ {
+ "prompt": prompt,
+ "video": video,
+ }
+ )
+ files = extract_files(cast(Mapping[str, object], body), paths=[["video"]])
+ # It should be noted that the actual Content-Type header that will be
+ # sent to the server will contain a `boundary` parameter, e.g.
+ # multipart/form-data; boundary=---abc--
+ extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
+ return self._post(
+ "/videos/edits",
+ body=maybe_transform(body, video_edit_params.VideoEditParams),
+ files=files,
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=Video,
+ )
+
+ def extend(
+ self,
+ *,
+ prompt: str,
+ seconds: VideoSeconds,
+ video: video_extend_params.Video,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Video:
+ """
+ Create an extension of a completed video.
+
+ Args:
+ prompt: Updated text prompt that directs the extension generation.
+
+ seconds: Length of the newly generated extension segment in seconds (allowed values: 4,
+ 8, 12, 16, 20).
+
+ video: Reference to the completed video to extend.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ body = deepcopy_minimal(
+ {
+ "prompt": prompt,
+ "seconds": seconds,
+ "video": video,
+ }
+ )
+ files = extract_files(cast(Mapping[str, object], body), paths=[["video"]])
+ # It should be noted that the actual Content-Type header that will be
+ # sent to the server will contain a `boundary` parameter, e.g.
+ # multipart/form-data; boundary=---abc--
+ extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
+ return self._post(
+ "/videos/extensions",
+ body=maybe_transform(body, video_extend_params.VideoExtendParams),
+ files=files,
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=Video,
+ )
+
+ def get_character(
+ self,
+ character_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> VideoGetCharacterResponse:
+ """
+ Fetch a character.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not character_id:
+ raise ValueError(f"Expected a non-empty value for `character_id` but received {character_id!r}")
+ return self._get(
+ path_template("/videos/characters/{character_id}", character_id=character_id),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VideoGetCharacterResponse,
+ )
+
def remix(
self,
video_id: str,
@@ -370,7 +561,7 @@ def remix(
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Video:
"""
- Create a video remix
+ Create a remix of a completed video using a refreshed prompt.
Args:
prompt: Updated text prompt that directs the remix generation.
@@ -386,7 +577,7 @@ def remix(
if not video_id:
raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}")
return self._post(
- f"/videos/{video_id}/remix",
+ path_template("/videos/{video_id}/remix", video_id=video_id),
body=maybe_transform({"prompt": prompt}, video_remix_params.VideoRemixParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
@@ -419,7 +610,7 @@ async def create(
self,
*,
prompt: str,
- input_reference: FileTypes | Omit = omit,
+ input_reference: video_create_params.InputReference | Omit = omit,
model: VideoModelParam | Omit = omit,
seconds: VideoSeconds | Omit = omit,
size: VideoSize | Omit = omit,
@@ -431,12 +622,12 @@ async def create(
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Video:
"""
- Create a video
+ Create a new video generation job from a prompt and optional reference assets.
Args:
prompt: Text prompt that describes the video to generate.
- input_reference: Optional image reference that guides generation.
+ input_reference: Optional reference asset upload or reference object that guides generation.
model: The video generation model to use (allowed values: sora-2, sora-2-pro). Defaults
to `sora-2`.
@@ -464,11 +655,10 @@ async def create(
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[["input_reference"]])
- if files:
- # It should be noted that the actual Content-Type header that will be
- # sent to the server will contain a `boundary` parameter, e.g.
- # multipart/form-data; boundary=---abc--
- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
+ # It should be noted that the actual Content-Type header that will be
+ # sent to the server will contain a `boundary` parameter, e.g.
+ # multipart/form-data; boundary=---abc--
+ extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return await self._post(
"/videos",
body=await async_maybe_transform(body, video_create_params.VideoCreateParams),
@@ -483,7 +673,7 @@ async def create_and_poll(
self,
*,
prompt: str,
- input_reference: FileTypes | Omit = omit,
+ input_reference: video_create_params.InputReference | Omit = omit,
model: VideoModelParam | Omit = omit,
seconds: VideoSeconds | Omit = omit,
size: VideoSize | Omit = omit,
@@ -564,7 +754,7 @@ async def retrieve(
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Video:
"""
- Retrieve a video
+ Fetch the latest metadata for a generated video.
Args:
extra_headers: Send extra headers
@@ -578,7 +768,7 @@ async def retrieve(
if not video_id:
raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}")
return await self._get(
- f"/videos/{video_id}",
+ path_template("/videos/{video_id}", video_id=video_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -599,7 +789,7 @@ def list(
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[Video, AsyncConversationCursorPage[Video]]:
"""
- List videos
+ List recently generated videos for the current project.
Args:
after: Identifier for the last item from the previous pagination request
@@ -649,7 +839,7 @@ async def delete(
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> VideoDeleteResponse:
"""
- Delete a video
+ Permanently delete a completed or failed video and its stored assets.
Args:
extra_headers: Send extra headers
@@ -663,13 +853,62 @@ async def delete(
if not video_id:
raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}")
return await self._delete(
- f"/videos/{video_id}",
+ path_template("/videos/{video_id}", video_id=video_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=VideoDeleteResponse,
)
+ async def create_character(
+ self,
+ *,
+ name: str,
+ video: FileTypes,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> VideoCreateCharacterResponse:
+ """
+ Create a character from an uploaded video.
+
+ Args:
+ name: Display name for this API character.
+
+ video: Video file used to create a character.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ body = deepcopy_minimal(
+ {
+ "name": name,
+ "video": video,
+ }
+ )
+ files = extract_files(cast(Mapping[str, object], body), paths=[["video"]])
+ # It should be noted that the actual Content-Type header that will be
+ # sent to the server will contain a `boundary` parameter, e.g.
+ # multipart/form-data; boundary=---abc--
+ extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
+ return await self._post(
+ "/videos/characters",
+ body=await async_maybe_transform(body, video_create_character_params.VideoCreateCharacterParams),
+ files=files,
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VideoCreateCharacterResponse,
+ )
+
async def download_content(
self,
video_id: str,
@@ -682,12 +921,13 @@ async def download_content(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> _legacy_response.HttpxBinaryResponseContent:
- """Download video content
+ """
+ Download the generated video bytes or a derived preview asset.
- Args:
- variant: Which downloadable asset to return.
+ Streams the rendered video content for the specified video job.
- Defaults to the MP4 video.
+ Args:
+ variant: Which downloadable asset to return. Defaults to the MP4 video.
extra_headers: Send extra headers
@@ -701,7 +941,7 @@ async def download_content(
raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}")
extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
return await self._get(
- f"/videos/{video_id}/content",
+ path_template("/videos/{video_id}/content", video_id=video_id),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -714,6 +954,143 @@ async def download_content(
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
+ async def edit(
+ self,
+ *,
+ prompt: str,
+ video: video_edit_params.Video,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Video:
+ """
+ Create a new video generation job by editing a source video or existing
+ generated video.
+
+ Args:
+ prompt: Text prompt that describes how to edit the source video.
+
+ video: Reference to the completed video to edit.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ body = deepcopy_minimal(
+ {
+ "prompt": prompt,
+ "video": video,
+ }
+ )
+ files = extract_files(cast(Mapping[str, object], body), paths=[["video"]])
+ # It should be noted that the actual Content-Type header that will be
+ # sent to the server will contain a `boundary` parameter, e.g.
+ # multipart/form-data; boundary=---abc--
+ extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
+ return await self._post(
+ "/videos/edits",
+ body=await async_maybe_transform(body, video_edit_params.VideoEditParams),
+ files=files,
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=Video,
+ )
+
+ async def extend(
+ self,
+ *,
+ prompt: str,
+ seconds: VideoSeconds,
+ video: video_extend_params.Video,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Video:
+ """
+ Create an extension of a completed video.
+
+ Args:
+ prompt: Updated text prompt that directs the extension generation.
+
+ seconds: Length of the newly generated extension segment in seconds (allowed values: 4,
+ 8, 12, 16, 20).
+
+ video: Reference to the completed video to extend.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ body = deepcopy_minimal(
+ {
+ "prompt": prompt,
+ "seconds": seconds,
+ "video": video,
+ }
+ )
+ files = extract_files(cast(Mapping[str, object], body), paths=[["video"]])
+ # It should be noted that the actual Content-Type header that will be
+ # sent to the server will contain a `boundary` parameter, e.g.
+ # multipart/form-data; boundary=---abc--
+ extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
+ return await self._post(
+ "/videos/extensions",
+ body=await async_maybe_transform(body, video_extend_params.VideoExtendParams),
+ files=files,
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=Video,
+ )
+
+ async def get_character(
+ self,
+ character_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> VideoGetCharacterResponse:
+ """
+ Fetch a character.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not character_id:
+ raise ValueError(f"Expected a non-empty value for `character_id` but received {character_id!r}")
+ return await self._get(
+ path_template("/videos/characters/{character_id}", character_id=character_id),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VideoGetCharacterResponse,
+ )
+
async def remix(
self,
video_id: str,
@@ -727,7 +1104,7 @@ async def remix(
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Video:
"""
- Create a video remix
+ Create a remix of a completed video using a refreshed prompt.
Args:
prompt: Updated text prompt that directs the remix generation.
@@ -743,7 +1120,7 @@ async def remix(
if not video_id:
raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}")
return await self._post(
- f"/videos/{video_id}/remix",
+ path_template("/videos/{video_id}/remix", video_id=video_id),
body=await async_maybe_transform({"prompt": prompt}, video_remix_params.VideoRemixParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
@@ -768,9 +1145,21 @@ def __init__(self, videos: Videos) -> None:
self.delete = _legacy_response.to_raw_response_wrapper(
videos.delete,
)
+ self.create_character = _legacy_response.to_raw_response_wrapper(
+ videos.create_character,
+ )
self.download_content = _legacy_response.to_raw_response_wrapper(
videos.download_content,
)
+ self.edit = _legacy_response.to_raw_response_wrapper(
+ videos.edit,
+ )
+ self.extend = _legacy_response.to_raw_response_wrapper(
+ videos.extend,
+ )
+ self.get_character = _legacy_response.to_raw_response_wrapper(
+ videos.get_character,
+ )
self.remix = _legacy_response.to_raw_response_wrapper(
videos.remix,
)
@@ -792,9 +1181,21 @@ def __init__(self, videos: AsyncVideos) -> None:
self.delete = _legacy_response.async_to_raw_response_wrapper(
videos.delete,
)
+ self.create_character = _legacy_response.async_to_raw_response_wrapper(
+ videos.create_character,
+ )
self.download_content = _legacy_response.async_to_raw_response_wrapper(
videos.download_content,
)
+ self.edit = _legacy_response.async_to_raw_response_wrapper(
+ videos.edit,
+ )
+ self.extend = _legacy_response.async_to_raw_response_wrapper(
+ videos.extend,
+ )
+ self.get_character = _legacy_response.async_to_raw_response_wrapper(
+ videos.get_character,
+ )
self.remix = _legacy_response.async_to_raw_response_wrapper(
videos.remix,
)
@@ -816,10 +1217,22 @@ def __init__(self, videos: Videos) -> None:
self.delete = to_streamed_response_wrapper(
videos.delete,
)
+ self.create_character = to_streamed_response_wrapper(
+ videos.create_character,
+ )
self.download_content = to_custom_streamed_response_wrapper(
videos.download_content,
StreamedBinaryAPIResponse,
)
+ self.edit = to_streamed_response_wrapper(
+ videos.edit,
+ )
+ self.extend = to_streamed_response_wrapper(
+ videos.extend,
+ )
+ self.get_character = to_streamed_response_wrapper(
+ videos.get_character,
+ )
self.remix = to_streamed_response_wrapper(
videos.remix,
)
@@ -841,10 +1254,22 @@ def __init__(self, videos: AsyncVideos) -> None:
self.delete = async_to_streamed_response_wrapper(
videos.delete,
)
+ self.create_character = async_to_streamed_response_wrapper(
+ videos.create_character,
+ )
self.download_content = async_to_custom_streamed_response_wrapper(
videos.download_content,
AsyncStreamedBinaryAPIResponse,
)
+ self.edit = async_to_streamed_response_wrapper(
+ videos.edit,
+ )
+ self.extend = async_to_streamed_response_wrapper(
+ videos.extend,
+ )
+ self.get_character = async_to_streamed_response_wrapper(
+ videos.get_character,
+ )
self.remix = async_to_streamed_response_wrapper(
videos.remix,
)
diff --git a/portkey_ai/_vendor/openai/resources/webhooks/__init__.py b/portkey_ai/_vendor/openai/resources/webhooks/__init__.py
new file mode 100644
index 00000000..66449ee7
--- /dev/null
+++ b/portkey_ai/_vendor/openai/resources/webhooks/__init__.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .webhooks import Webhooks as _Webhooks, AsyncWebhooks as _AsyncWebhooks
+
+
+class Webhooks(_Webhooks):
+ pass
+
+
+class AsyncWebhooks(_AsyncWebhooks):
+ pass
+
+
+__all__ = ["Webhooks", "AsyncWebhooks"]
diff --git a/portkey_ai/_vendor/openai/resources/webhooks/api.md b/portkey_ai/_vendor/openai/resources/webhooks/api.md
new file mode 100644
index 00000000..8e3c312e
--- /dev/null
+++ b/portkey_ai/_vendor/openai/resources/webhooks/api.md
@@ -0,0 +1,24 @@
+# Webhooks
+
+Types:
+
+```python
+from openai.types.webhooks import (
+ BatchCancelledWebhookEvent,
+ BatchCompletedWebhookEvent,
+ BatchExpiredWebhookEvent,
+ BatchFailedWebhookEvent,
+ EvalRunCanceledWebhookEvent,
+ EvalRunFailedWebhookEvent,
+ EvalRunSucceededWebhookEvent,
+ FineTuningJobCancelledWebhookEvent,
+ FineTuningJobFailedWebhookEvent,
+ FineTuningJobSucceededWebhookEvent,
+ RealtimeCallIncomingWebhookEvent,
+ ResponseCancelledWebhookEvent,
+ ResponseCompletedWebhookEvent,
+ ResponseFailedWebhookEvent,
+ ResponseIncompleteWebhookEvent,
+ UnwrapWebhookEvent,
+)
+```
diff --git a/portkey_ai/_vendor/openai/resources/webhooks.py b/portkey_ai/_vendor/openai/resources/webhooks/webhooks.py
similarity index 96%
rename from portkey_ai/_vendor/openai/resources/webhooks.py
rename to portkey_ai/_vendor/openai/resources/webhooks/webhooks.py
index 3e13d3fa..8d99568a 100644
--- a/portkey_ai/_vendor/openai/resources/webhooks.py
+++ b/portkey_ai/_vendor/openai/resources/webhooks/webhooks.py
@@ -9,12 +9,12 @@
import hashlib
from typing import cast
-from .._types import HeadersLike
-from .._utils import get_required_header
-from .._models import construct_type
-from .._resource import SyncAPIResource, AsyncAPIResource
-from .._exceptions import InvalidWebhookSignatureError
-from ..types.webhooks.unwrap_webhook_event import UnwrapWebhookEvent
+from ..._types import HeadersLike
+from ..._utils import get_required_header
+from ..._models import construct_type
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._exceptions import InvalidWebhookSignatureError
+from ...types.webhooks.unwrap_webhook_event import UnwrapWebhookEvent
__all__ = ["Webhooks", "AsyncWebhooks"]
diff --git a/portkey_ai/_vendor/openai/types/__init__.py b/portkey_ai/_vendor/openai/types/__init__.py
index 5eb267e8..d8dbea71 100644
--- a/portkey_ai/_vendor/openai/types/__init__.py
+++ b/portkey_ai/_vendor/openai/types/__init__.py
@@ -5,6 +5,7 @@
from .batch import Batch as Batch
from .image import Image as Image
from .model import Model as Model
+from .skill import Skill as Skill
from .video import Video as Video
from .shared import (
Metadata as Metadata,
@@ -30,6 +31,7 @@
from .chat_model import ChatModel as ChatModel
from .completion import Completion as Completion
from .moderation import Moderation as Moderation
+from .skill_list import SkillList as SkillList
from .video_size import VideoSize as VideoSize
from .audio_model import AudioModel as AudioModel
from .batch_error import BatchError as BatchError
@@ -41,6 +43,7 @@
from .file_deleted import FileDeleted as FileDeleted
from .file_purpose import FilePurpose as FilePurpose
from .vector_store import VectorStore as VectorStore
+from .deleted_skill import DeletedSkill as DeletedSkill
from .model_deleted import ModelDeleted as ModelDeleted
from .video_seconds import VideoSeconds as VideoSeconds
from .embedding_model import EmbeddingModel as EmbeddingModel
@@ -52,6 +55,8 @@
from .batch_list_params import BatchListParams as BatchListParams
from .completion_choice import CompletionChoice as CompletionChoice
from .image_edit_params import ImageEditParams as ImageEditParams
+from .skill_list_params import SkillListParams as SkillListParams
+from .video_edit_params import VideoEditParams as VideoEditParams
from .video_list_params import VideoListParams as VideoListParams
from .video_model_param import VideoModelParam as VideoModelParam
from .eval_create_params import EvalCreateParams as EvalCreateParams
@@ -61,7 +66,10 @@
from .video_create_error import VideoCreateError as VideoCreateError
from .video_remix_params import VideoRemixParams as VideoRemixParams
from .batch_create_params import BatchCreateParams as BatchCreateParams
+from .skill_create_params import SkillCreateParams as SkillCreateParams
+from .skill_update_params import SkillUpdateParams as SkillUpdateParams
from .video_create_params import VideoCreateParams as VideoCreateParams
+from .video_extend_params import VideoExtendParams as VideoExtendParams
from .batch_request_counts import BatchRequestCounts as BatchRequestCounts
from .eval_create_response import EvalCreateResponse as EvalCreateResponse
from .eval_delete_response import EvalDeleteResponse as EvalDeleteResponse
@@ -92,16 +100,23 @@
from .vector_store_search_params import VectorStoreSearchParams as VectorStoreSearchParams
from .vector_store_update_params import VectorStoreUpdateParams as VectorStoreUpdateParams
from .container_retrieve_response import ContainerRetrieveResponse as ContainerRetrieveResponse
+from .image_input_reference_param import ImageInputReferenceParam as ImageInputReferenceParam
from .moderation_text_input_param import ModerationTextInputParam as ModerationTextInputParam
from .file_chunking_strategy_param import FileChunkingStrategyParam as FileChunkingStrategyParam
from .vector_store_search_response import VectorStoreSearchResponse as VectorStoreSearchResponse
-from .websocket_connection_options import WebsocketConnectionOptions as WebsocketConnectionOptions
+from .video_get_character_response import VideoGetCharacterResponse as VideoGetCharacterResponse
+from .websocket_connection_options import (
+ WebSocketConnectionOptions as WebSocketConnectionOptions,
+ WebsocketConnectionOptions as WebsocketConnectionOptions,
+)
from .image_create_variation_params import ImageCreateVariationParams as ImageCreateVariationParams
from .image_gen_partial_image_event import ImageGenPartialImageEvent as ImageGenPartialImageEvent
from .static_file_chunking_strategy import StaticFileChunkingStrategy as StaticFileChunkingStrategy
+from .video_create_character_params import VideoCreateCharacterParams as VideoCreateCharacterParams
from .video_download_content_params import VideoDownloadContentParams as VideoDownloadContentParams
from .eval_custom_data_source_config import EvalCustomDataSourceConfig as EvalCustomDataSourceConfig
from .image_edit_partial_image_event import ImageEditPartialImageEvent as ImageEditPartialImageEvent
+from .video_create_character_response import VideoCreateCharacterResponse as VideoCreateCharacterResponse
from .moderation_image_url_input_param import ModerationImageURLInputParam as ModerationImageURLInputParam
from .auto_file_chunking_strategy_param import AutoFileChunkingStrategyParam as AutoFileChunkingStrategyParam
from .moderation_multi_modal_input_param import ModerationMultiModalInputParam as ModerationMultiModalInputParam
diff --git a/portkey_ai/_vendor/openai/types/audio/speech_create_params.py b/portkey_ai/_vendor/openai/types/audio/speech_create_params.py
index 417df5b2..1c0472ea 100644
--- a/portkey_ai/_vendor/openai/types/audio/speech_create_params.py
+++ b/portkey_ai/_vendor/openai/types/audio/speech_create_params.py
@@ -3,11 +3,11 @@
from __future__ import annotations
from typing import Union
-from typing_extensions import Literal, Required, TypedDict
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
from .speech_model import SpeechModel
-__all__ = ["SpeechCreateParams"]
+__all__ = ["SpeechCreateParams", "Voice", "VoiceID"]
class SpeechCreateParams(TypedDict, total=False):
@@ -20,14 +20,13 @@ class SpeechCreateParams(TypedDict, total=False):
`tts-1`, `tts-1-hd`, `gpt-4o-mini-tts`, or `gpt-4o-mini-tts-2025-12-15`.
"""
- voice: Required[
- Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"]]
- ]
+ voice: Required[Voice]
"""The voice to use when generating the audio.
Supported built-in voices are `alloy`, `ash`, `ballad`, `coral`, `echo`,
- `fable`, `onyx`, `nova`, `sage`, `shimmer`, `verse`, `marin`, and `cedar`.
- Previews of the voices are available in the
+ `fable`, `onyx`, `nova`, `sage`, `shimmer`, `verse`, `marin`, and `cedar`. You
+ may also provide a custom voice object with an `id`, for example
+ `{ "id": "voice_1234" }`. Previews of the voices are available in the
[Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options).
"""
@@ -55,3 +54,15 @@ class SpeechCreateParams(TypedDict, total=False):
Supported formats are `sse` and `audio`. `sse` is not supported for `tts-1` or
`tts-1-hd`.
"""
+
+
+class VoiceID(TypedDict, total=False):
+ """Custom voice reference."""
+
+ id: Required[str]
+ """The custom voice ID, e.g. `voice_1234`."""
+
+
+Voice: TypeAlias = Union[
+ str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"], VoiceID
+]
diff --git a/portkey_ai/_vendor/openai/types/batch_create_params.py b/portkey_ai/_vendor/openai/types/batch_create_params.py
index 1088aab3..97bd2c67 100644
--- a/portkey_ai/_vendor/openai/types/batch_create_params.py
+++ b/portkey_ai/_vendor/openai/types/batch_create_params.py
@@ -18,14 +18,24 @@ class BatchCreateParams(TypedDict, total=False):
"""
endpoint: Required[
- Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions", "/v1/moderations"]
+ Literal[
+ "/v1/responses",
+ "/v1/chat/completions",
+ "/v1/embeddings",
+ "/v1/completions",
+ "/v1/moderations",
+ "/v1/images/generations",
+ "/v1/images/edits",
+ "/v1/videos",
+ ]
]
"""The endpoint to be used for all requests in the batch.
Currently `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`,
- `/v1/completions`, and `/v1/moderations` are supported. Note that
- `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding
- inputs across all requests in the batch.
+ `/v1/completions`, `/v1/moderations`, `/v1/images/generations`,
+ `/v1/images/edits`, and `/v1/videos` are supported. Note that `/v1/embeddings`
+ batches are also restricted to a maximum of 50,000 embedding inputs across all
+ requests in the batch.
"""
input_file_id: Required[str]
diff --git a/portkey_ai/_vendor/openai/types/beta/assistant_create_params.py b/portkey_ai/_vendor/openai/types/beta/assistant_create_params.py
index 461d871a..5a468a35 100644
--- a/portkey_ai/_vendor/openai/types/beta/assistant_create_params.py
+++ b/portkey_ai/_vendor/openai/types/beta/assistant_create_params.py
@@ -187,8 +187,9 @@ class ToolResourcesFileSearchVectorStore(TypedDict, total=False):
file_ids: SequenceNotStr[str]
"""
A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to
- add to the vector store. There can be a maximum of 10000 files in a vector
- store.
+ add to the vector store. For vector stores created before Nov 2025, there can be
+ a maximum of 10,000 files in a vector store. For vector stores created starting
+ in Nov 2025, the limit is 100,000,000 files.
"""
metadata: Optional[Metadata]
diff --git a/portkey_ai/_vendor/openai/types/beta/thread_create_and_run_params.py b/portkey_ai/_vendor/openai/types/beta/thread_create_and_run_params.py
index c0aee3e9..3d3d5d4f 100644
--- a/portkey_ai/_vendor/openai/types/beta/thread_create_and_run_params.py
+++ b/portkey_ai/_vendor/openai/types/beta/thread_create_and_run_params.py
@@ -274,8 +274,9 @@ class ThreadToolResourcesFileSearchVectorStore(TypedDict, total=False):
file_ids: SequenceNotStr[str]
"""
A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to
- add to the vector store. There can be a maximum of 10000 files in a vector
- store.
+ add to the vector store. For vector stores created before Nov 2025, there can be
+ a maximum of 10,000 files in a vector store. For vector stores created starting
+ in Nov 2025, the limit is 100,000,000 files.
"""
metadata: Optional[Metadata]
diff --git a/portkey_ai/_vendor/openai/types/beta/thread_create_params.py b/portkey_ai/_vendor/openai/types/beta/thread_create_params.py
index ef83e3d4..b823d1c2 100644
--- a/portkey_ai/_vendor/openai/types/beta/thread_create_params.py
+++ b/portkey_ai/_vendor/openai/types/beta/thread_create_params.py
@@ -152,8 +152,9 @@ class ToolResourcesFileSearchVectorStore(TypedDict, total=False):
file_ids: SequenceNotStr[str]
"""
A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to
- add to the vector store. There can be a maximum of 10000 files in a vector
- store.
+ add to the vector store. For vector stores created before Nov 2025, there can be
+ a maximum of 10,000 files in a vector store. For vector stores created starting
+ in Nov 2025, the limit is 100,000,000 files.
"""
metadata: Optional[Metadata]
diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_audio_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_audio_param.py
index 1a73bb0c..fe64ba49 100644
--- a/portkey_ai/_vendor/openai/types/chat/chat_completion_audio_param.py
+++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_audio_param.py
@@ -3,9 +3,21 @@
from __future__ import annotations
from typing import Union
-from typing_extensions import Literal, Required, TypedDict
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
-__all__ = ["ChatCompletionAudioParam"]
+__all__ = ["ChatCompletionAudioParam", "Voice", "VoiceID"]
+
+
+class VoiceID(TypedDict, total=False):
+ """Custom voice reference."""
+
+ id: Required[str]
+ """The custom voice ID, e.g. `voice_1234`."""
+
+
+Voice: TypeAlias = Union[
+ str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"], VoiceID
+]
class ChatCompletionAudioParam(TypedDict, total=False):
@@ -21,11 +33,11 @@ class ChatCompletionAudioParam(TypedDict, total=False):
Must be one of `wav`, `mp3`, `flac`, `opus`, or `pcm16`.
"""
- voice: Required[
- Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"]]
- ]
+ voice: Required[Voice]
"""The voice the model uses to respond.
Supported built-in voices are `alloy`, `ash`, `ballad`, `coral`, `echo`,
- `fable`, `nova`, `onyx`, `sage`, `shimmer`, `marin`, and `cedar`.
+ `fable`, `nova`, `onyx`, `sage`, `shimmer`, `marin`, and `cedar`. You may also
+ provide a custom voice object with an `id`, for example
+ `{ "id": "voice_1234" }`.
"""
diff --git a/portkey_ai/_vendor/openai/types/chat/completion_create_params.py b/portkey_ai/_vendor/openai/types/chat/completion_create_params.py
index 49cefb95..8e71ccbe 100644
--- a/portkey_ai/_vendor/openai/types/chat/completion_create_params.py
+++ b/portkey_ai/_vendor/openai/types/chat/completion_create_params.py
@@ -227,8 +227,9 @@ class CompletionCreateParamsBase(TypedDict, total=False):
"""
A stable identifier used to help detect users of your application that may be
violating OpenAI's usage policies. The IDs should be a string that uniquely
- identifies each user. We recommend hashing their username or email address, in
- order to avoid sending us any identifying information.
+ identifies each user, with a maximum length of 64 characters. We recommend
+ hashing their username or email address, in order to avoid sending us any
+ identifying information.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
"""
diff --git a/portkey_ai/_vendor/openai/types/chat/completion_list_params.py b/portkey_ai/_vendor/openai/types/chat/completion_list_params.py
index 32bd3f5c..d93da834 100644
--- a/portkey_ai/_vendor/openai/types/chat/completion_list_params.py
+++ b/portkey_ai/_vendor/openai/types/chat/completion_list_params.py
@@ -18,13 +18,9 @@ class CompletionListParams(TypedDict, total=False):
"""Number of Chat Completions to retrieve."""
metadata: Optional[Metadata]
- """Set of 16 key-value pairs that can be attached to an object.
+ """A list of metadata keys to filter the Chat Completions by. Example:
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
+ `metadata[key1]=value1&metadata[key2]=value2`
"""
model: str
diff --git a/portkey_ai/_vendor/openai/types/container_create_params.py b/portkey_ai/_vendor/openai/types/container_create_params.py
index 47101ecd..63d28f39 100644
--- a/portkey_ai/_vendor/openai/types/container_create_params.py
+++ b/portkey_ai/_vendor/openai/types/container_create_params.py
@@ -2,11 +2,16 @@
from __future__ import annotations
-from typing_extensions import Literal, Required, TypedDict
+from typing import Union, Iterable
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
from .._types import SequenceNotStr
+from .responses.inline_skill_param import InlineSkillParam
+from .responses.skill_reference_param import SkillReferenceParam
+from .responses.container_network_policy_disabled_param import ContainerNetworkPolicyDisabledParam
+from .responses.container_network_policy_allowlist_param import ContainerNetworkPolicyAllowlistParam
-__all__ = ["ContainerCreateParams", "ExpiresAfter"]
+__all__ = ["ContainerCreateParams", "ExpiresAfter", "NetworkPolicy", "Skill"]
class ContainerCreateParams(TypedDict, total=False):
@@ -22,6 +27,12 @@ class ContainerCreateParams(TypedDict, total=False):
memory_limit: Literal["1g", "4g", "16g", "64g"]
"""Optional memory limit for the container. Defaults to "1g"."""
+ network_policy: NetworkPolicy
+ """Network access policy for the container."""
+
+ skills: Iterable[Skill]
+ """An optional list of skills referenced by id or inline data."""
+
class ExpiresAfter(TypedDict, total=False):
"""Container expiration time in seconds relative to the 'anchor' time."""
@@ -33,3 +44,8 @@ class ExpiresAfter(TypedDict, total=False):
"""
minutes: Required[int]
+
+
+NetworkPolicy: TypeAlias = Union[ContainerNetworkPolicyDisabledParam, ContainerNetworkPolicyAllowlistParam]
+
+Skill: TypeAlias = Union[SkillReferenceParam, InlineSkillParam]
diff --git a/portkey_ai/_vendor/openai/types/container_create_response.py b/portkey_ai/_vendor/openai/types/container_create_response.py
index 0ebcc040..34bc56ad 100644
--- a/portkey_ai/_vendor/openai/types/container_create_response.py
+++ b/portkey_ai/_vendor/openai/types/container_create_response.py
@@ -1,11 +1,11 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import Optional
+from typing import List, Optional
from typing_extensions import Literal
from .._models import BaseModel
-__all__ = ["ContainerCreateResponse", "ExpiresAfter"]
+__all__ = ["ContainerCreateResponse", "ExpiresAfter", "NetworkPolicy"]
class ExpiresAfter(BaseModel):
@@ -22,6 +22,16 @@ class ExpiresAfter(BaseModel):
"""The number of minutes after the anchor before the container expires."""
+class NetworkPolicy(BaseModel):
+ """Network access policy for the container."""
+
+ type: Literal["allowlist", "disabled"]
+ """The network policy mode."""
+
+ allowed_domains: Optional[List[str]] = None
+ """Allowed outbound domains when `type` is `allowlist`."""
+
+
class ContainerCreateResponse(BaseModel):
id: str
"""Unique identifier for the container."""
@@ -50,3 +60,6 @@ class ContainerCreateResponse(BaseModel):
memory_limit: Optional[Literal["1g", "4g", "16g", "64g"]] = None
"""The memory limit configured for the container."""
+
+ network_policy: Optional[NetworkPolicy] = None
+ """Network access policy for the container."""
diff --git a/portkey_ai/_vendor/openai/types/container_list_params.py b/portkey_ai/_vendor/openai/types/container_list_params.py
index 4821a87d..01ec43af 100644
--- a/portkey_ai/_vendor/openai/types/container_list_params.py
+++ b/portkey_ai/_vendor/openai/types/container_list_params.py
@@ -23,6 +23,9 @@ class ContainerListParams(TypedDict, total=False):
Limit can range between 1 and 100, and the default is 20.
"""
+ name: str
+ """Filter results by container name."""
+
order: Literal["asc", "desc"]
"""Sort order by the `created_at` timestamp of the objects.
diff --git a/portkey_ai/_vendor/openai/types/container_list_response.py b/portkey_ai/_vendor/openai/types/container_list_response.py
index 8f395482..bf572acd 100644
--- a/portkey_ai/_vendor/openai/types/container_list_response.py
+++ b/portkey_ai/_vendor/openai/types/container_list_response.py
@@ -1,11 +1,11 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import Optional
+from typing import List, Optional
from typing_extensions import Literal
from .._models import BaseModel
-__all__ = ["ContainerListResponse", "ExpiresAfter"]
+__all__ = ["ContainerListResponse", "ExpiresAfter", "NetworkPolicy"]
class ExpiresAfter(BaseModel):
@@ -22,6 +22,16 @@ class ExpiresAfter(BaseModel):
"""The number of minutes after the anchor before the container expires."""
+class NetworkPolicy(BaseModel):
+ """Network access policy for the container."""
+
+ type: Literal["allowlist", "disabled"]
+ """The network policy mode."""
+
+ allowed_domains: Optional[List[str]] = None
+ """Allowed outbound domains when `type` is `allowlist`."""
+
+
class ContainerListResponse(BaseModel):
id: str
"""Unique identifier for the container."""
@@ -50,3 +60,6 @@ class ContainerListResponse(BaseModel):
memory_limit: Optional[Literal["1g", "4g", "16g", "64g"]] = None
"""The memory limit configured for the container."""
+
+ network_policy: Optional[NetworkPolicy] = None
+ """Network access policy for the container."""
diff --git a/portkey_ai/_vendor/openai/types/container_retrieve_response.py b/portkey_ai/_vendor/openai/types/container_retrieve_response.py
index 9ba3e18c..b5a6d350 100644
--- a/portkey_ai/_vendor/openai/types/container_retrieve_response.py
+++ b/portkey_ai/_vendor/openai/types/container_retrieve_response.py
@@ -1,11 +1,11 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import Optional
+from typing import List, Optional
from typing_extensions import Literal
from .._models import BaseModel
-__all__ = ["ContainerRetrieveResponse", "ExpiresAfter"]
+__all__ = ["ContainerRetrieveResponse", "ExpiresAfter", "NetworkPolicy"]
class ExpiresAfter(BaseModel):
@@ -22,6 +22,16 @@ class ExpiresAfter(BaseModel):
"""The number of minutes after the anchor before the container expires."""
+class NetworkPolicy(BaseModel):
+ """Network access policy for the container."""
+
+ type: Literal["allowlist", "disabled"]
+ """The network policy mode."""
+
+ allowed_domains: Optional[List[str]] = None
+ """Allowed outbound domains when `type` is `allowlist`."""
+
+
class ContainerRetrieveResponse(BaseModel):
id: str
"""Unique identifier for the container."""
@@ -50,3 +60,6 @@ class ContainerRetrieveResponse(BaseModel):
memory_limit: Optional[Literal["1g", "4g", "16g", "64g"]] = None
"""The memory limit configured for the container."""
+
+ network_policy: Optional[NetworkPolicy] = None
+ """Network access policy for the container."""
diff --git a/portkey_ai/_vendor/openai/types/conversations/computer_screenshot_content.py b/portkey_ai/_vendor/openai/types/conversations/computer_screenshot_content.py
index e42096eb..ff43a7e5 100644
--- a/portkey_ai/_vendor/openai/types/conversations/computer_screenshot_content.py
+++ b/portkey_ai/_vendor/openai/types/conversations/computer_screenshot_content.py
@@ -11,6 +11,12 @@
class ComputerScreenshotContent(BaseModel):
"""A screenshot of a computer."""
+ detail: Literal["low", "high", "auto", "original"]
+ """The detail level of the screenshot image to be sent to the model.
+
+ One of `high`, `low`, `auto`, or `original`. Defaults to `auto`.
+ """
+
file_id: Optional[str] = None
"""The identifier of an uploaded file that contains the screenshot."""
diff --git a/portkey_ai/_vendor/openai/types/conversations/conversation_item.py b/portkey_ai/_vendor/openai/types/conversations/conversation_item.py
index 46268d38..52e87ccb 100644
--- a/portkey_ai/_vendor/openai/types/conversations/conversation_item.py
+++ b/portkey_ai/_vendor/openai/types/conversations/conversation_item.py
@@ -7,13 +7,16 @@
from ..._utils import PropertyInfo
from ..._models import BaseModel
from ..responses.response_reasoning_item import ResponseReasoningItem
+from ..responses.response_compaction_item import ResponseCompactionItem
from ..responses.response_custom_tool_call import ResponseCustomToolCall
+from ..responses.response_tool_search_call import ResponseToolSearchCall
from ..responses.response_computer_tool_call import ResponseComputerToolCall
from ..responses.response_function_web_search import ResponseFunctionWebSearch
from ..responses.response_apply_patch_tool_call import ResponseApplyPatchToolCall
from ..responses.response_file_search_tool_call import ResponseFileSearchToolCall
from ..responses.response_custom_tool_call_output import ResponseCustomToolCallOutput
from ..responses.response_function_tool_call_item import ResponseFunctionToolCallItem
+from ..responses.response_tool_search_output_item import ResponseToolSearchOutputItem
from ..responses.response_function_shell_tool_call import ResponseFunctionShellToolCall
from ..responses.response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall
from ..responses.response_apply_patch_tool_call_output import ResponseApplyPatchToolCallOutput
@@ -229,7 +232,10 @@ class McpCall(BaseModel):
ImageGenerationCall,
ResponseComputerToolCall,
ResponseComputerToolCallOutputItem,
+ ResponseToolSearchCall,
+ ResponseToolSearchOutputItem,
ResponseReasoningItem,
+ ResponseCompactionItem,
ResponseCodeInterpreterToolCall,
LocalShellCall,
LocalShellCallOutput,
diff --git a/portkey_ai/_vendor/openai/types/deleted_skill.py b/portkey_ai/_vendor/openai/types/deleted_skill.py
new file mode 100644
index 00000000..6f02f58c
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/deleted_skill.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["DeletedSkill"]
+
+
+class DeletedSkill(BaseModel):
+ id: str
+
+ deleted: bool
+
+ object: Literal["skill.deleted"]
diff --git a/portkey_ai/_vendor/openai/types/fine_tuning/checkpoints/__init__.py b/portkey_ai/_vendor/openai/types/fine_tuning/checkpoints/__init__.py
index 2947b331..5447b4d8 100644
--- a/portkey_ai/_vendor/openai/types/fine_tuning/checkpoints/__init__.py
+++ b/portkey_ai/_vendor/openai/types/fine_tuning/checkpoints/__init__.py
@@ -2,7 +2,9 @@
from __future__ import annotations
+from .permission_list_params import PermissionListParams as PermissionListParams
from .permission_create_params import PermissionCreateParams as PermissionCreateParams
+from .permission_list_response import PermissionListResponse as PermissionListResponse
from .permission_create_response import PermissionCreateResponse as PermissionCreateResponse
from .permission_delete_response import PermissionDeleteResponse as PermissionDeleteResponse
from .permission_retrieve_params import PermissionRetrieveParams as PermissionRetrieveParams
diff --git a/portkey_ai/_vendor/openai/types/fine_tuning/checkpoints/permission_list_params.py b/portkey_ai/_vendor/openai/types/fine_tuning/checkpoints/permission_list_params.py
new file mode 100644
index 00000000..1f389920
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/fine_tuning/checkpoints/permission_list_params.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["PermissionListParams"]
+
+
+class PermissionListParams(TypedDict, total=False):
+ after: str
+ """Identifier for the last permission ID from the previous pagination request."""
+
+ limit: int
+ """Number of permissions to retrieve."""
+
+ order: Literal["ascending", "descending"]
+ """The order in which to retrieve permissions."""
+
+ project_id: str
+ """The ID of the project to get permissions for."""
diff --git a/portkey_ai/_vendor/openai/types/fine_tuning/checkpoints/permission_list_response.py b/portkey_ai/_vendor/openai/types/fine_tuning/checkpoints/permission_list_response.py
new file mode 100644
index 00000000..26e913e0
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/fine_tuning/checkpoints/permission_list_response.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["PermissionListResponse"]
+
+
+class PermissionListResponse(BaseModel):
+ """
+ The `checkpoint.permission` object represents a permission for a fine-tuned model checkpoint.
+ """
+
+ id: str
+ """The permission identifier, which can be referenced in the API endpoints."""
+
+ created_at: int
+ """The Unix timestamp (in seconds) for when the permission was created."""
+
+ object: Literal["checkpoint.permission"]
+ """The object type, which is always "checkpoint.permission"."""
+
+ project_id: str
+ """The project identifier that the permission is for."""
diff --git a/portkey_ai/_vendor/openai/types/image_edit_params.py b/portkey_ai/_vendor/openai/types/image_edit_params.py
index 0bd5f39f..05f3401d 100644
--- a/portkey_ai/_vendor/openai/types/image_edit_params.py
+++ b/portkey_ai/_vendor/openai/types/image_edit_params.py
@@ -17,7 +17,8 @@ class ImageEditParamsBase(TypedDict, total=False):
For the GPT image models (`gpt-image-1`, `gpt-image-1-mini`, and
`gpt-image-1.5`), each image should be a `png`, `webp`, or `jpg` file less than
- 50MB. You can provide up to 16 images.
+ 50MB. You can provide up to 16 images. `chatgpt-image-latest` follows the same
+ input constraints as GPT image models.
For `dall-e-2`, you can only provide one image, and it should be a square `png`
file less than 4MB.
@@ -45,8 +46,8 @@ class ImageEditParamsBase(TypedDict, total=False):
"""
Control how much effort the model will exert to match the style and features,
especially facial features, of input images. This parameter is only supported
- for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
- `low`. Defaults to `low`.
+ for `gpt-image-1` and `gpt-image-1.5` and later models, unsupported for
+ `gpt-image-1-mini`. Supports `high` and `low`. Defaults to `low`.
"""
mask: FileTypes
@@ -58,11 +59,7 @@ class ImageEditParamsBase(TypedDict, total=False):
"""
model: Union[str, ImageModel, None]
- """The model to use for image generation.
-
- Only `dall-e-2` and the GPT image models are supported. Defaults to `dall-e-2`
- unless a parameter specific to the GPT image models is used.
- """
+ """The model to use for image generation. Defaults to `gpt-image-1.5`."""
n: Optional[int]
"""The number of images to generate. Must be between 1 and 10."""
@@ -93,18 +90,18 @@ class ImageEditParamsBase(TypedDict, total=False):
"""
quality: Optional[Literal["standard", "low", "medium", "high", "auto"]]
- """The quality of the image that will be generated.
+ """The quality of the image that will be generated for GPT image models.
- `high`, `medium` and `low` are only supported for the GPT image models.
- `dall-e-2` only supports `standard` quality. Defaults to `auto`.
+ Defaults to `auto`.
"""
response_format: Optional[Literal["url", "b64_json"]]
"""The format in which the generated images are returned.
Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the
- image has been generated. This parameter is only supported for `dall-e-2`, as
- the GPT image models always return base64-encoded images.
+ image has been generated. This parameter is only supported for `dall-e-2`
+ (default is `url` for `dall-e-2`), as GPT image models always return
+ base64-encoded images.
"""
size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]]
diff --git a/portkey_ai/_vendor/openai/types/image_input_reference_param.py b/portkey_ai/_vendor/openai/types/image_input_reference_param.py
new file mode 100644
index 00000000..10656329
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/image_input_reference_param.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["ImageInputReferenceParam"]
+
+
+class ImageInputReferenceParam(TypedDict, total=False):
+ file_id: str
+
+ image_url: str
+ """A fully qualified URL or base64-encoded data URL."""
diff --git a/portkey_ai/_vendor/openai/types/realtime/call_accept_params.py b/portkey_ai/_vendor/openai/types/realtime/call_accept_params.py
index d950f59f..6d8caf93 100644
--- a/portkey_ai/_vendor/openai/types/realtime/call_accept_params.py
+++ b/portkey_ai/_vendor/openai/types/realtime/call_accept_params.py
@@ -56,6 +56,7 @@ class CallAcceptParams(TypedDict, total=False):
str,
Literal[
"gpt-realtime",
+ "gpt-realtime-1.5",
"gpt-realtime-2025-08-28",
"gpt-4o-realtime-preview",
"gpt-4o-realtime-preview-2024-10-01",
@@ -66,6 +67,7 @@ class CallAcceptParams(TypedDict, total=False):
"gpt-realtime-mini",
"gpt-realtime-mini-2025-10-06",
"gpt-realtime-mini-2025-12-15",
+ "gpt-audio-1.5",
"gpt-audio-mini",
"gpt-audio-mini-2025-10-06",
"gpt-audio-mini-2025-12-15",
diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_output.py b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_output.py
index 2922405f..143cef67 100644
--- a/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_output.py
+++ b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_output.py
@@ -1,12 +1,24 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Union, Optional
-from typing_extensions import Literal
+from typing_extensions import Literal, TypeAlias
from ..._models import BaseModel
from .realtime_audio_formats import RealtimeAudioFormats
-__all__ = ["RealtimeAudioConfigOutput"]
+__all__ = ["RealtimeAudioConfigOutput", "Voice", "VoiceID"]
+
+
+class VoiceID(BaseModel):
+ """Custom voice reference."""
+
+ id: str
+ """The custom voice ID, e.g. `voice_1234`."""
+
+
+Voice: TypeAlias = Union[
+ str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"], VoiceID
+]
class RealtimeAudioConfigOutput(BaseModel):
@@ -24,13 +36,12 @@ class RealtimeAudioConfigOutput(BaseModel):
generated, it's also possible to prompt the model to speak faster or slower.
"""
- voice: Union[
- str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"], None
- ] = None
+ voice: Optional[Voice] = None
"""The voice the model uses to respond.
Supported built-in voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`,
- `shimmer`, `verse`, `marin`, and `cedar`. Voice cannot be changed during the
- session once the model has responded with audio at least once. We recommend
- `marin` and `cedar` for best quality.
+ `shimmer`, `verse`, `marin`, and `cedar`. You may also provide a custom voice
+ object with an `id`, for example `{ "id": "voice_1234" }`. Voice cannot be
+ changed during the session once the model has responded with audio at least
+ once. We recommend `marin` and `cedar` for best quality.
"""
diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_output_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_output_param.py
index d04fd3a3..5d920f69 100644
--- a/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_output_param.py
+++ b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_output_param.py
@@ -3,11 +3,23 @@
from __future__ import annotations
from typing import Union
-from typing_extensions import Literal, TypedDict
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
from .realtime_audio_formats_param import RealtimeAudioFormatsParam
-__all__ = ["RealtimeAudioConfigOutputParam"]
+__all__ = ["RealtimeAudioConfigOutputParam", "Voice", "VoiceID"]
+
+
+class VoiceID(TypedDict, total=False):
+ """Custom voice reference."""
+
+ id: Required[str]
+ """The custom voice ID, e.g. `voice_1234`."""
+
+
+Voice: TypeAlias = Union[
+ str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"], VoiceID
+]
class RealtimeAudioConfigOutputParam(TypedDict, total=False):
@@ -25,11 +37,12 @@ class RealtimeAudioConfigOutputParam(TypedDict, total=False):
generated, it's also possible to prompt the model to speak faster or slower.
"""
- voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"]]
+ voice: Voice
"""The voice the model uses to respond.
Supported built-in voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`,
- `shimmer`, `verse`, `marin`, and `cedar`. Voice cannot be changed during the
- session once the model has responded with audio at least once. We recommend
- `marin` and `cedar` for best quality.
+ `shimmer`, `verse`, `marin`, and `cedar`. You may also provide a custom voice
+ object with an `id`, for example `{ "id": "voice_1234" }`. Voice cannot be
+ changed during the session once the model has responded with audio at least
+ once. We recommend `marin` and `cedar` for best quality.
"""
diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_audio_output.py b/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_audio_output.py
index db02511a..78483572 100644
--- a/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_audio_output.py
+++ b/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_audio_output.py
@@ -1,26 +1,38 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Union, Optional
-from typing_extensions import Literal
+from typing_extensions import Literal, TypeAlias
from ..._models import BaseModel
from .realtime_audio_formats import RealtimeAudioFormats
-__all__ = ["RealtimeResponseCreateAudioOutput", "Output"]
+__all__ = ["RealtimeResponseCreateAudioOutput", "Output", "OutputVoice", "OutputVoiceID"]
+
+
+class OutputVoiceID(BaseModel):
+ """Custom voice reference."""
+
+ id: str
+ """The custom voice ID, e.g. `voice_1234`."""
+
+
+OutputVoice: TypeAlias = Union[
+ str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"], OutputVoiceID
+]
class Output(BaseModel):
format: Optional[RealtimeAudioFormats] = None
"""The format of the output audio."""
- voice: Union[
- str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"], None
- ] = None
+ voice: Optional[OutputVoice] = None
"""The voice the model uses to respond.
Supported built-in voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`,
- `shimmer`, `verse`, `marin`, and `cedar`. Voice cannot be changed during the
- session once the model has responded with audio at least once.
+ `shimmer`, `verse`, `marin`, and `cedar`. You may also provide a custom voice
+ object with an `id`, for example `{ "id": "voice_1234" }`. Voice cannot be
+ changed during the session once the model has responded with audio at least
+ once. We recommend `marin` and `cedar` for best quality.
"""
diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_audio_output_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_audio_output_param.py
index 22787ad1..bb930f54 100644
--- a/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_audio_output_param.py
+++ b/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_audio_output_param.py
@@ -3,23 +3,37 @@
from __future__ import annotations
from typing import Union
-from typing_extensions import Literal, TypedDict
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
from .realtime_audio_formats_param import RealtimeAudioFormatsParam
-__all__ = ["RealtimeResponseCreateAudioOutputParam", "Output"]
+__all__ = ["RealtimeResponseCreateAudioOutputParam", "Output", "OutputVoice", "OutputVoiceID"]
+
+
+class OutputVoiceID(TypedDict, total=False):
+ """Custom voice reference."""
+
+ id: Required[str]
+ """The custom voice ID, e.g. `voice_1234`."""
+
+
+OutputVoice: TypeAlias = Union[
+ str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"], OutputVoiceID
+]
class Output(TypedDict, total=False):
format: RealtimeAudioFormatsParam
"""The format of the output audio."""
- voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"]]
+ voice: OutputVoice
"""The voice the model uses to respond.
Supported built-in voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`,
- `shimmer`, `verse`, `marin`, and `cedar`. Voice cannot be changed during the
- session once the model has responded with audio at least once.
+ `shimmer`, `verse`, `marin`, and `cedar`. You may also provide a custom voice
+ object with an `id`, for example `{ "id": "voice_1234" }`. Voice cannot be
+ changed during the session once the model has responded with audio at least
+ once. We recommend `marin` and `cedar` for best quality.
"""
diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_mcp_tool.py b/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_mcp_tool.py
index 72189e10..cb5eae42 100644
--- a/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_mcp_tool.py
+++ b/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_mcp_tool.py
@@ -134,6 +134,9 @@ class RealtimeResponseCreateMcpTool(BaseModel):
- SharePoint: `connector_sharepoint`
"""
+ defer_loading: Optional[bool] = None
+ """Whether this MCP tool is deferred and discovered via tool search."""
+
headers: Optional[Dict[str, str]] = None
"""Optional HTTP headers to send to the MCP server.
diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_mcp_tool_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_mcp_tool_param.py
index 68dd6bdb..dd8c2e01 100644
--- a/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_mcp_tool_param.py
+++ b/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_mcp_tool_param.py
@@ -134,6 +134,9 @@ class RealtimeResponseCreateMcpToolParam(TypedDict, total=False):
- SharePoint: `connector_sharepoint`
"""
+ defer_loading: bool
+ """Whether this MCP tool is deferred and discovered via tool search."""
+
headers: Optional[Dict[str, str]]
"""Optional HTTP headers to send to the MCP server.
diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_session_create_request.py b/portkey_ai/_vendor/openai/types/realtime/realtime_session_create_request.py
index 4a93c91c..e34136a1 100644
--- a/portkey_ai/_vendor/openai/types/realtime/realtime_session_create_request.py
+++ b/portkey_ai/_vendor/openai/types/realtime/realtime_session_create_request.py
@@ -57,6 +57,7 @@ class RealtimeSessionCreateRequest(BaseModel):
str,
Literal[
"gpt-realtime",
+ "gpt-realtime-1.5",
"gpt-realtime-2025-08-28",
"gpt-4o-realtime-preview",
"gpt-4o-realtime-preview-2024-10-01",
@@ -67,6 +68,7 @@ class RealtimeSessionCreateRequest(BaseModel):
"gpt-realtime-mini",
"gpt-realtime-mini-2025-10-06",
"gpt-realtime-mini-2025-12-15",
+ "gpt-audio-1.5",
"gpt-audio-mini",
"gpt-audio-mini-2025-10-06",
"gpt-audio-mini-2025-12-15",
diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_session_create_request_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_session_create_request_param.py
index dee63d09..f3180c9e 100644
--- a/portkey_ai/_vendor/openai/types/realtime/realtime_session_create_request_param.py
+++ b/portkey_ai/_vendor/openai/types/realtime/realtime_session_create_request_param.py
@@ -58,6 +58,7 @@ class RealtimeSessionCreateRequestParam(TypedDict, total=False):
str,
Literal[
"gpt-realtime",
+ "gpt-realtime-1.5",
"gpt-realtime-2025-08-28",
"gpt-4o-realtime-preview",
"gpt-4o-realtime-preview-2024-10-01",
@@ -68,6 +69,7 @@ class RealtimeSessionCreateRequestParam(TypedDict, total=False):
"gpt-realtime-mini",
"gpt-realtime-mini-2025-10-06",
"gpt-realtime-mini-2025-12-15",
+ "gpt-audio-1.5",
"gpt-audio-mini",
"gpt-audio-mini-2025-10-06",
"gpt-audio-mini-2025-12-15",
diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_session_create_response.py b/portkey_ai/_vendor/openai/types/realtime/realtime_session_create_response.py
index 15a200ca..3c3bef93 100644
--- a/portkey_ai/_vendor/openai/types/realtime/realtime_session_create_response.py
+++ b/portkey_ai/_vendor/openai/types/realtime/realtime_session_create_response.py
@@ -363,6 +363,9 @@ class ToolMcpTool(BaseModel):
- SharePoint: `connector_sharepoint`
"""
+ defer_loading: Optional[bool] = None
+ """Whether this MCP tool is deferred and discovered via tool search."""
+
headers: Optional[Dict[str, str]] = None
"""Optional HTTP headers to send to the MCP server.
@@ -460,6 +463,7 @@ class RealtimeSessionCreateResponse(BaseModel):
str,
Literal[
"gpt-realtime",
+ "gpt-realtime-1.5",
"gpt-realtime-2025-08-28",
"gpt-4o-realtime-preview",
"gpt-4o-realtime-preview-2024-10-01",
@@ -470,6 +474,7 @@ class RealtimeSessionCreateResponse(BaseModel):
"gpt-realtime-mini",
"gpt-realtime-mini-2025-10-06",
"gpt-realtime-mini-2025-12-15",
+ "gpt-audio-1.5",
"gpt-audio-mini",
"gpt-audio-mini-2025-10-06",
"gpt-audio-mini-2025-12-15",
diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_tools_config_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_tools_config_param.py
index 3cc404fe..21713092 100644
--- a/portkey_ai/_vendor/openai/types/realtime/realtime_tools_config_param.py
+++ b/portkey_ai/_vendor/openai/types/realtime/realtime_tools_config_param.py
@@ -137,6 +137,9 @@ class Mcp(TypedDict, total=False):
- SharePoint: `connector_sharepoint`
"""
+ defer_loading: bool
+ """Whether this MCP tool is deferred and discovered via tool search."""
+
headers: Optional[Dict[str, str]]
"""Optional HTTP headers to send to the MCP server.
diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_tools_config_union.py b/portkey_ai/_vendor/openai/types/realtime/realtime_tools_config_union.py
index 92aaee7f..55da5826 100644
--- a/portkey_ai/_vendor/openai/types/realtime/realtime_tools_config_union.py
+++ b/portkey_ai/_vendor/openai/types/realtime/realtime_tools_config_union.py
@@ -137,6 +137,9 @@ class Mcp(BaseModel):
- SharePoint: `connector_sharepoint`
"""
+ defer_loading: Optional[bool] = None
+ """Whether this MCP tool is deferred and discovered via tool search."""
+
headers: Optional[Dict[str, str]] = None
"""Optional HTTP headers to send to the MCP server.
diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_tools_config_union_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_tools_config_union_param.py
index 6889b4c3..15118f33 100644
--- a/portkey_ai/_vendor/openai/types/realtime/realtime_tools_config_union_param.py
+++ b/portkey_ai/_vendor/openai/types/realtime/realtime_tools_config_union_param.py
@@ -136,6 +136,9 @@ class Mcp(TypedDict, total=False):
- SharePoint: `connector_sharepoint`
"""
+ defer_loading: bool
+ """Whether this MCP tool is deferred and discovered via tool search."""
+
headers: Optional[Dict[str, str]]
"""Optional HTTP headers to send to the MCP server.
diff --git a/portkey_ai/_vendor/openai/types/realtime/response_function_call_arguments_done_event.py b/portkey_ai/_vendor/openai/types/realtime/response_function_call_arguments_done_event.py
index 504f91d5..01bae805 100644
--- a/portkey_ai/_vendor/openai/types/realtime/response_function_call_arguments_done_event.py
+++ b/portkey_ai/_vendor/openai/types/realtime/response_function_call_arguments_done_event.py
@@ -25,6 +25,9 @@ class ResponseFunctionCallArgumentsDoneEvent(BaseModel):
item_id: str
"""The ID of the function call item."""
+ name: str
+ """The name of the function that was called."""
+
output_index: int
"""The index of the output item in the response."""
diff --git a/portkey_ai/_vendor/openai/types/responses/__init__.py b/portkey_ai/_vendor/openai/types/responses/__init__.py
index a4d939d9..5e6f45e9 100644
--- a/portkey_ai/_vendor/openai/types/responses/__init__.py
+++ b/portkey_ai/_vendor/openai/types/responses/__init__.py
@@ -6,11 +6,17 @@
from .response import Response as Response
from .tool_param import ToolParam as ToolParam
from .custom_tool import CustomTool as CustomTool
+from .local_skill import LocalSkill as LocalSkill
+from .inline_skill import InlineSkill as InlineSkill
from .computer_tool import ComputerTool as ComputerTool
from .function_tool import FunctionTool as FunctionTool
from .response_item import ResponseItem as ResponseItem
+from .container_auto import ContainerAuto as ContainerAuto
+from .namespace_tool import NamespaceTool as NamespaceTool
from .response_error import ResponseError as ResponseError
+from .response_input import ResponseInput as ResponseInput
from .response_usage import ResponseUsage as ResponseUsage
+from .computer_action import ComputerAction as ComputerAction
from .parsed_response import (
ParsedContent as ParsedContent,
ParsedResponse as ParsedResponse,
@@ -21,26 +27,36 @@
)
from .response_prompt import ResponsePrompt as ResponsePrompt
from .response_status import ResponseStatus as ResponseStatus
+from .skill_reference import SkillReference as SkillReference
from .tool_choice_mcp import ToolChoiceMcp as ToolChoiceMcp
from .web_search_tool import WebSearchTool as WebSearchTool
from .apply_patch_tool import ApplyPatchTool as ApplyPatchTool
from .file_search_tool import FileSearchTool as FileSearchTool
+from .tool_search_tool import ToolSearchTool as ToolSearchTool
from .custom_tool_param import CustomToolParam as CustomToolParam
+from .local_environment import LocalEnvironment as LocalEnvironment
+from .local_skill_param import LocalSkillParam as LocalSkillParam
from .tool_choice_shell import ToolChoiceShell as ToolChoiceShell
from .tool_choice_types import ToolChoiceTypes as ToolChoiceTypes
from .compacted_response import CompactedResponse as CompactedResponse
from .easy_input_message import EasyInputMessage as EasyInputMessage
+from .inline_skill_param import InlineSkillParam as InlineSkillParam
from .response_item_list import ResponseItemList as ResponseItemList
from .tool_choice_custom import ToolChoiceCustom as ToolChoiceCustom
from .computer_tool_param import ComputerToolParam as ComputerToolParam
+from .container_reference import ContainerReference as ContainerReference
from .function_shell_tool import FunctionShellTool as FunctionShellTool
from .function_tool_param import FunctionToolParam as FunctionToolParam
+from .inline_skill_source import InlineSkillSource as InlineSkillSource
from .response_includable import ResponseIncludable as ResponseIncludable
from .response_input_file import ResponseInputFile as ResponseInputFile
from .response_input_item import ResponseInputItem as ResponseInputItem
from .response_input_text import ResponseInputText as ResponseInputText
from .tool_choice_allowed import ToolChoiceAllowed as ToolChoiceAllowed
from .tool_choice_options import ToolChoiceOptions as ToolChoiceOptions
+from .computer_action_list import ComputerActionList as ComputerActionList
+from .container_auto_param import ContainerAutoParam as ContainerAutoParam
+from .namespace_tool_param import NamespaceToolParam as NamespaceToolParam
from .response_error_event import ResponseErrorEvent as ResponseErrorEvent
from .response_input_audio import ResponseInputAudio as ResponseInputAudio
from .response_input_image import ResponseInputImage as ResponseInputImage
@@ -49,10 +65,12 @@
from .response_output_text import ResponseOutputText as ResponseOutputText
from .response_text_config import ResponseTextConfig as ResponseTextConfig
from .tool_choice_function import ToolChoiceFunction as ToolChoiceFunction
+from .computer_action_param import ComputerActionParam as ComputerActionParam
from .response_failed_event import ResponseFailedEvent as ResponseFailedEvent
from .response_prompt_param import ResponsePromptParam as ResponsePromptParam
from .response_queued_event import ResponseQueuedEvent as ResponseQueuedEvent
from .response_stream_event import ResponseStreamEvent as ResponseStreamEvent
+from .skill_reference_param import SkillReferenceParam as SkillReferenceParam
from .tool_choice_mcp_param import ToolChoiceMcpParam as ToolChoiceMcpParam
from .web_search_tool_param import WebSearchToolParam as WebSearchToolParam
from .apply_patch_tool_param import ApplyPatchToolParam as ApplyPatchToolParam
@@ -61,6 +79,10 @@
from .response_create_params import ResponseCreateParams as ResponseCreateParams
from .response_created_event import ResponseCreatedEvent as ResponseCreatedEvent
from .response_input_content import ResponseInputContent as ResponseInputContent
+from .responses_client_event import ResponsesClientEvent as ResponsesClientEvent
+from .responses_server_event import ResponsesServerEvent as ResponsesServerEvent
+from .tool_search_tool_param import ToolSearchToolParam as ToolSearchToolParam
+from .local_environment_param import LocalEnvironmentParam as LocalEnvironmentParam
from .response_compact_params import ResponseCompactParams as ResponseCompactParams
from .response_output_message import ResponseOutputMessage as ResponseOutputMessage
from .response_output_refusal import ResponseOutputRefusal as ResponseOutputRefusal
@@ -76,7 +98,10 @@
from .response_retrieve_params import ResponseRetrieveParams as ResponseRetrieveParams
from .response_text_done_event import ResponseTextDoneEvent as ResponseTextDoneEvent
from .tool_choice_custom_param import ToolChoiceCustomParam as ToolChoiceCustomParam
+from .computer_use_preview_tool import ComputerUsePreviewTool as ComputerUsePreviewTool
+from .container_reference_param import ContainerReferenceParam as ContainerReferenceParam
from .function_shell_tool_param import FunctionShellToolParam as FunctionShellToolParam
+from .inline_skill_source_param import InlineSkillSourceParam as InlineSkillSourceParam
from .response_audio_done_event import ResponseAudioDoneEvent as ResponseAudioDoneEvent
from .response_custom_tool_call import ResponseCustomToolCall as ResponseCustomToolCall
from .response_incomplete_event import ResponseIncompleteEvent as ResponseIncompleteEvent
@@ -84,12 +109,15 @@
from .response_input_item_param import ResponseInputItemParam as ResponseInputItemParam
from .response_input_text_param import ResponseInputTextParam as ResponseInputTextParam
from .response_text_delta_event import ResponseTextDeltaEvent as ResponseTextDeltaEvent
+from .response_tool_search_call import ResponseToolSearchCall as ResponseToolSearchCall
from .tool_choice_allowed_param import ToolChoiceAllowedParam as ToolChoiceAllowedParam
+from .computer_action_list_param import ComputerActionListParam as ComputerActionListParam
from .input_token_count_response import InputTokenCountResponse as InputTokenCountResponse
from .response_audio_delta_event import ResponseAudioDeltaEvent as ResponseAudioDeltaEvent
from .response_in_progress_event import ResponseInProgressEvent as ResponseInProgressEvent
from .response_input_audio_param import ResponseInputAudioParam as ResponseInputAudioParam
from .response_input_image_param import ResponseInputImageParam as ResponseInputImageParam
+from .response_local_environment import ResponseLocalEnvironment as ResponseLocalEnvironment
from .response_output_text_param import ResponseOutputTextParam as ResponseOutputTextParam
from .response_text_config_param import ResponseTextConfigParam as ResponseTextConfigParam
from .tool_choice_function_param import ToolChoiceFunctionParam as ToolChoiceFunctionParam
@@ -101,10 +129,12 @@
from .response_input_message_item import ResponseInputMessageItem as ResponseInputMessageItem
from .response_input_text_content import ResponseInputTextContent as ResponseInputTextContent
from .response_refusal_done_event import ResponseRefusalDoneEvent as ResponseRefusalDoneEvent
+from .response_container_reference import ResponseContainerReference as ResponseContainerReference
from .response_function_web_search import ResponseFunctionWebSearch as ResponseFunctionWebSearch
from .response_input_content_param import ResponseInputContentParam as ResponseInputContentParam
from .response_input_image_content import ResponseInputImageContent as ResponseInputImageContent
from .response_refusal_delta_event import ResponseRefusalDeltaEvent as ResponseRefusalDeltaEvent
+from .responses_client_event_param import ResponsesClientEventParam as ResponsesClientEventParam
from .response_output_message_param import ResponseOutputMessageParam as ResponseOutputMessageParam
from .response_output_refusal_param import ResponseOutputRefusalParam as ResponseOutputRefusalParam
from .response_reasoning_item_param import ResponseReasoningItemParam as ResponseReasoningItemParam
@@ -112,22 +142,28 @@
from .web_search_preview_tool_param import WebSearchPreviewToolParam as WebSearchPreviewToolParam
from .response_apply_patch_tool_call import ResponseApplyPatchToolCall as ResponseApplyPatchToolCall
from .response_compaction_item_param import ResponseCompactionItemParam as ResponseCompactionItemParam
+from .response_custom_tool_call_item import ResponseCustomToolCallItem as ResponseCustomToolCallItem
from .response_file_search_tool_call import ResponseFileSearchToolCall as ResponseFileSearchToolCall
from .response_mcp_call_failed_event import ResponseMcpCallFailedEvent as ResponseMcpCallFailedEvent
+from .computer_use_preview_tool_param import ComputerUsePreviewToolParam as ComputerUsePreviewToolParam
from .response_custom_tool_call_param import ResponseCustomToolCallParam as ResponseCustomToolCallParam
from .response_output_item_done_event import ResponseOutputItemDoneEvent as ResponseOutputItemDoneEvent
from .response_content_part_done_event import ResponseContentPartDoneEvent as ResponseContentPartDoneEvent
from .response_custom_tool_call_output import ResponseCustomToolCallOutput as ResponseCustomToolCallOutput
from .response_function_tool_call_item import ResponseFunctionToolCallItem as ResponseFunctionToolCallItem
from .response_output_item_added_event import ResponseOutputItemAddedEvent as ResponseOutputItemAddedEvent
+from .response_tool_search_output_item import ResponseToolSearchOutputItem as ResponseToolSearchOutputItem
+from .container_network_policy_disabled import ContainerNetworkPolicyDisabled as ContainerNetworkPolicyDisabled
from .response_computer_tool_call_param import ResponseComputerToolCallParam as ResponseComputerToolCallParam
from .response_content_part_added_event import ResponseContentPartAddedEvent as ResponseContentPartAddedEvent
+from .response_conversation_param_param import ResponseConversationParamParam as ResponseConversationParamParam
from .response_format_text_config_param import ResponseFormatTextConfigParam as ResponseFormatTextConfigParam
from .response_function_shell_tool_call import ResponseFunctionShellToolCall as ResponseFunctionShellToolCall
from .response_function_tool_call_param import ResponseFunctionToolCallParam as ResponseFunctionToolCallParam
from .response_input_file_content_param import ResponseInputFileContentParam as ResponseInputFileContentParam
from .response_input_text_content_param import ResponseInputTextContentParam as ResponseInputTextContentParam
from .response_mcp_call_completed_event import ResponseMcpCallCompletedEvent as ResponseMcpCallCompletedEvent
+from .container_network_policy_allowlist import ContainerNetworkPolicyAllowlist as ContainerNetworkPolicyAllowlist
from .response_function_call_output_item import ResponseFunctionCallOutputItem as ResponseFunctionCallOutputItem
from .response_function_web_search_param import ResponseFunctionWebSearchParam as ResponseFunctionWebSearchParam
from .response_input_image_content_param import ResponseInputImageContentParam as ResponseInputImageContentParam
@@ -144,12 +180,22 @@
from .response_audio_transcript_delta_event import (
ResponseAudioTranscriptDeltaEvent as ResponseAudioTranscriptDeltaEvent,
)
+from .response_custom_tool_call_output_item import ResponseCustomToolCallOutputItem as ResponseCustomToolCallOutputItem
+from .container_network_policy_domain_secret import (
+ ContainerNetworkPolicyDomainSecret as ContainerNetworkPolicyDomainSecret,
+)
from .response_custom_tool_call_output_param import (
ResponseCustomToolCallOutputParam as ResponseCustomToolCallOutputParam,
)
from .response_mcp_call_arguments_done_event import (
ResponseMcpCallArgumentsDoneEvent as ResponseMcpCallArgumentsDoneEvent,
)
+from .response_tool_search_output_item_param import (
+ ResponseToolSearchOutputItemParam as ResponseToolSearchOutputItemParam,
+)
+from .container_network_policy_disabled_param import (
+ ContainerNetworkPolicyDisabledParam as ContainerNetworkPolicyDisabledParam,
+)
from .response_computer_tool_call_output_item import (
ResponseComputerToolCallOutputItem as ResponseComputerToolCallOutputItem,
)
@@ -171,6 +217,9 @@
from .response_mcp_list_tools_completed_event import (
ResponseMcpListToolsCompletedEvent as ResponseMcpListToolsCompletedEvent,
)
+from .container_network_policy_allowlist_param import (
+ ContainerNetworkPolicyAllowlistParam as ContainerNetworkPolicyAllowlistParam,
+)
from .response_function_call_output_item_param import (
ResponseFunctionCallOutputItemParam as ResponseFunctionCallOutputItemParam,
)
@@ -240,9 +289,15 @@
from .response_reasoning_summary_text_delta_event import (
ResponseReasoningSummaryTextDeltaEvent as ResponseReasoningSummaryTextDeltaEvent,
)
+from .container_network_policy_domain_secret_param import (
+ ContainerNetworkPolicyDomainSecretParam as ContainerNetworkPolicyDomainSecretParam,
+)
from .response_function_call_arguments_delta_event import (
ResponseFunctionCallArgumentsDeltaEvent as ResponseFunctionCallArgumentsDeltaEvent,
)
+from .response_tool_search_output_item_param_param import (
+ ResponseToolSearchOutputItemParamParam as ResponseToolSearchOutputItemParamParam,
+)
from .response_computer_tool_call_output_screenshot import (
ResponseComputerToolCallOutputScreenshot as ResponseComputerToolCallOutputScreenshot,
)
diff --git a/portkey_ai/_vendor/openai/types/responses/computer_action.py b/portkey_ai/_vendor/openai/types/responses/computer_action.py
new file mode 100644
index 00000000..f7a21d2a
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/computer_action.py
@@ -0,0 +1,196 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from ..._models import BaseModel
+
+__all__ = [
+ "ComputerAction",
+ "Click",
+ "DoubleClick",
+ "Drag",
+ "DragPath",
+ "Keypress",
+ "Move",
+ "Screenshot",
+ "Scroll",
+ "Type",
+ "Wait",
+]
+
+
+class Click(BaseModel):
+ """A click action."""
+
+ button: Literal["left", "right", "wheel", "back", "forward"]
+ """Indicates which mouse button was pressed during the click.
+
+ One of `left`, `right`, `wheel`, `back`, or `forward`.
+ """
+
+ type: Literal["click"]
+ """Specifies the event type. For a click action, this property is always `click`."""
+
+ x: int
+ """The x-coordinate where the click occurred."""
+
+ y: int
+ """The y-coordinate where the click occurred."""
+
+ keys: Optional[List[str]] = None
+ """The keys being held while clicking."""
+
+
+class DoubleClick(BaseModel):
+ """A double click action."""
+
+ keys: Optional[List[str]] = None
+ """The keys being held while double-clicking."""
+
+ type: Literal["double_click"]
+ """Specifies the event type.
+
+ For a double click action, this property is always set to `double_click`.
+ """
+
+ x: int
+ """The x-coordinate where the double click occurred."""
+
+ y: int
+ """The y-coordinate where the double click occurred."""
+
+
+class DragPath(BaseModel):
+ """An x/y coordinate pair, e.g. `{ x: 100, y: 200 }`."""
+
+ x: int
+ """The x-coordinate."""
+
+ y: int
+ """The y-coordinate."""
+
+
+class Drag(BaseModel):
+ """A drag action."""
+
+ path: List[DragPath]
+ """An array of coordinates representing the path of the drag action.
+
+ Coordinates will appear as an array of objects, eg
+
+ ```
+ [
+ { x: 100, y: 200 },
+ { x: 200, y: 300 }
+ ]
+ ```
+ """
+
+ type: Literal["drag"]
+ """Specifies the event type.
+
+ For a drag action, this property is always set to `drag`.
+ """
+
+ keys: Optional[List[str]] = None
+ """The keys being held while dragging the mouse."""
+
+
+class Keypress(BaseModel):
+ """A collection of keypresses the model would like to perform."""
+
+ keys: List[str]
+ """The combination of keys the model is requesting to be pressed.
+
+ This is an array of strings, each representing a key.
+ """
+
+ type: Literal["keypress"]
+ """Specifies the event type.
+
+ For a keypress action, this property is always set to `keypress`.
+ """
+
+
+class Move(BaseModel):
+ """A mouse move action."""
+
+ type: Literal["move"]
+ """Specifies the event type.
+
+ For a move action, this property is always set to `move`.
+ """
+
+ x: int
+ """The x-coordinate to move to."""
+
+ y: int
+ """The y-coordinate to move to."""
+
+ keys: Optional[List[str]] = None
+ """The keys being held while moving the mouse."""
+
+
+class Screenshot(BaseModel):
+ """A screenshot action."""
+
+ type: Literal["screenshot"]
+ """Specifies the event type.
+
+ For a screenshot action, this property is always set to `screenshot`.
+ """
+
+
+class Scroll(BaseModel):
+ """A scroll action."""
+
+ scroll_x: int
+ """The horizontal scroll distance."""
+
+ scroll_y: int
+ """The vertical scroll distance."""
+
+ type: Literal["scroll"]
+ """Specifies the event type.
+
+ For a scroll action, this property is always set to `scroll`.
+ """
+
+ x: int
+ """The x-coordinate where the scroll occurred."""
+
+ y: int
+ """The y-coordinate where the scroll occurred."""
+
+ keys: Optional[List[str]] = None
+ """The keys being held while scrolling."""
+
+
+class Type(BaseModel):
+ """An action to type in text."""
+
+ text: str
+ """The text to type."""
+
+ type: Literal["type"]
+ """Specifies the event type.
+
+ For a type action, this property is always set to `type`.
+ """
+
+
+class Wait(BaseModel):
+ """A wait action."""
+
+ type: Literal["wait"]
+ """Specifies the event type.
+
+ For a wait action, this property is always set to `wait`.
+ """
+
+
+ComputerAction: TypeAlias = Annotated[
+ Union[Click, DoubleClick, Drag, Keypress, Move, Screenshot, Scroll, Type, Wait], PropertyInfo(discriminator="type")
+]
diff --git a/portkey_ai/_vendor/openai/types/responses/computer_action_list.py b/portkey_ai/_vendor/openai/types/responses/computer_action_list.py
new file mode 100644
index 00000000..0198c6e8
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/computer_action_list.py
@@ -0,0 +1,10 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+from typing_extensions import TypeAlias
+
+from .computer_action import ComputerAction
+
+__all__ = ["ComputerActionList"]
+
+ComputerActionList: TypeAlias = List[ComputerAction]
diff --git a/portkey_ai/_vendor/openai/types/responses/computer_action_list_param.py b/portkey_ai/_vendor/openai/types/responses/computer_action_list_param.py
new file mode 100644
index 00000000..66a03520
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/computer_action_list_param.py
@@ -0,0 +1,198 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from ..._types import SequenceNotStr
+
+__all__ = [
+ "ComputerActionListParam",
+ "ComputerActionParam",
+ "Click",
+ "DoubleClick",
+ "Drag",
+ "DragPath",
+ "Keypress",
+ "Move",
+ "Screenshot",
+ "Scroll",
+ "Type",
+ "Wait",
+]
+
+
+class Click(TypedDict, total=False):
+ """A click action."""
+
+ button: Required[Literal["left", "right", "wheel", "back", "forward"]]
+ """Indicates which mouse button was pressed during the click.
+
+ One of `left`, `right`, `wheel`, `back`, or `forward`.
+ """
+
+ type: Required[Literal["click"]]
+ """Specifies the event type. For a click action, this property is always `click`."""
+
+ x: Required[int]
+ """The x-coordinate where the click occurred."""
+
+ y: Required[int]
+ """The y-coordinate where the click occurred."""
+
+ keys: Optional[SequenceNotStr[str]]
+ """The keys being held while clicking."""
+
+
+class DoubleClick(TypedDict, total=False):
+ """A double click action."""
+
+ keys: Required[Optional[SequenceNotStr[str]]]
+ """The keys being held while double-clicking."""
+
+ type: Required[Literal["double_click"]]
+ """Specifies the event type.
+
+ For a double click action, this property is always set to `double_click`.
+ """
+
+ x: Required[int]
+ """The x-coordinate where the double click occurred."""
+
+ y: Required[int]
+ """The y-coordinate where the double click occurred."""
+
+
+class DragPath(TypedDict, total=False):
+ """An x/y coordinate pair, e.g. `{ x: 100, y: 200 }`."""
+
+ x: Required[int]
+ """The x-coordinate."""
+
+ y: Required[int]
+ """The y-coordinate."""
+
+
+class Drag(TypedDict, total=False):
+ """A drag action."""
+
+ path: Required[Iterable[DragPath]]
+ """An array of coordinates representing the path of the drag action.
+
+ Coordinates will appear as an array of objects, eg
+
+ ```
+ [
+ { x: 100, y: 200 },
+ { x: 200, y: 300 }
+ ]
+ ```
+ """
+
+ type: Required[Literal["drag"]]
+ """Specifies the event type.
+
+ For a drag action, this property is always set to `drag`.
+ """
+
+ keys: Optional[SequenceNotStr[str]]
+ """The keys being held while dragging the mouse."""
+
+
+class Keypress(TypedDict, total=False):
+ """A collection of keypresses the model would like to perform."""
+
+ keys: Required[SequenceNotStr[str]]
+ """The combination of keys the model is requesting to be pressed.
+
+ This is an array of strings, each representing a key.
+ """
+
+ type: Required[Literal["keypress"]]
+ """Specifies the event type.
+
+ For a keypress action, this property is always set to `keypress`.
+ """
+
+
+class Move(TypedDict, total=False):
+ """A mouse move action."""
+
+ type: Required[Literal["move"]]
+ """Specifies the event type.
+
+ For a move action, this property is always set to `move`.
+ """
+
+ x: Required[int]
+ """The x-coordinate to move to."""
+
+ y: Required[int]
+ """The y-coordinate to move to."""
+
+ keys: Optional[SequenceNotStr[str]]
+ """The keys being held while moving the mouse."""
+
+
+class Screenshot(TypedDict, total=False):
+ """A screenshot action."""
+
+ type: Required[Literal["screenshot"]]
+ """Specifies the event type.
+
+ For a screenshot action, this property is always set to `screenshot`.
+ """
+
+
+class Scroll(TypedDict, total=False):
+ """A scroll action."""
+
+ scroll_x: Required[int]
+ """The horizontal scroll distance."""
+
+ scroll_y: Required[int]
+ """The vertical scroll distance."""
+
+ type: Required[Literal["scroll"]]
+ """Specifies the event type.
+
+ For a scroll action, this property is always set to `scroll`.
+ """
+
+ x: Required[int]
+ """The x-coordinate where the scroll occurred."""
+
+ y: Required[int]
+ """The y-coordinate where the scroll occurred."""
+
+ keys: Optional[SequenceNotStr[str]]
+ """The keys being held while scrolling."""
+
+
+class Type(TypedDict, total=False):
+ """An action to type in text."""
+
+ text: Required[str]
+ """The text to type."""
+
+ type: Required[Literal["type"]]
+ """Specifies the event type.
+
+ For a type action, this property is always set to `type`.
+ """
+
+
+class Wait(TypedDict, total=False):
+ """A wait action."""
+
+ type: Required[Literal["wait"]]
+ """Specifies the event type.
+
+ For a wait action, this property is always set to `wait`.
+ """
+
+
+ComputerActionParam: TypeAlias = Union[Click, DoubleClick, Drag, Keypress, Move, Screenshot, Scroll, Type, Wait]
+
+ComputerActionListParam: TypeAlias = List[ComputerActionParam]
diff --git a/portkey_ai/_vendor/openai/types/responses/computer_action_param.py b/portkey_ai/_vendor/openai/types/responses/computer_action_param.py
new file mode 100644
index 00000000..f60c72b1
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/computer_action_param.py
@@ -0,0 +1,195 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from ..._types import SequenceNotStr
+
+__all__ = [
+ "ComputerActionParam",
+ "Click",
+ "DoubleClick",
+ "Drag",
+ "DragPath",
+ "Keypress",
+ "Move",
+ "Screenshot",
+ "Scroll",
+ "Type",
+ "Wait",
+]
+
+
+class Click(TypedDict, total=False):
+ """A click action."""
+
+ button: Required[Literal["left", "right", "wheel", "back", "forward"]]
+ """Indicates which mouse button was pressed during the click.
+
+ One of `left`, `right`, `wheel`, `back`, or `forward`.
+ """
+
+ type: Required[Literal["click"]]
+ """Specifies the event type. For a click action, this property is always `click`."""
+
+ x: Required[int]
+ """The x-coordinate where the click occurred."""
+
+ y: Required[int]
+ """The y-coordinate where the click occurred."""
+
+ keys: Optional[SequenceNotStr[str]]
+ """The keys being held while clicking."""
+
+
+class DoubleClick(TypedDict, total=False):
+ """A double click action."""
+
+ keys: Required[Optional[SequenceNotStr[str]]]
+ """The keys being held while double-clicking."""
+
+ type: Required[Literal["double_click"]]
+ """Specifies the event type.
+
+ For a double click action, this property is always set to `double_click`.
+ """
+
+ x: Required[int]
+ """The x-coordinate where the double click occurred."""
+
+ y: Required[int]
+ """The y-coordinate where the double click occurred."""
+
+
+class DragPath(TypedDict, total=False):
+ """An x/y coordinate pair, e.g. `{ x: 100, y: 200 }`."""
+
+ x: Required[int]
+ """The x-coordinate."""
+
+ y: Required[int]
+ """The y-coordinate."""
+
+
+class Drag(TypedDict, total=False):
+ """A drag action."""
+
+ path: Required[Iterable[DragPath]]
+ """An array of coordinates representing the path of the drag action.
+
+ Coordinates will appear as an array of objects, eg
+
+ ```
+ [
+ { x: 100, y: 200 },
+ { x: 200, y: 300 }
+ ]
+ ```
+ """
+
+ type: Required[Literal["drag"]]
+ """Specifies the event type.
+
+ For a drag action, this property is always set to `drag`.
+ """
+
+ keys: Optional[SequenceNotStr[str]]
+ """The keys being held while dragging the mouse."""
+
+
+class Keypress(TypedDict, total=False):
+ """A collection of keypresses the model would like to perform."""
+
+ keys: Required[SequenceNotStr[str]]
+ """The combination of keys the model is requesting to be pressed.
+
+ This is an array of strings, each representing a key.
+ """
+
+ type: Required[Literal["keypress"]]
+ """Specifies the event type.
+
+ For a keypress action, this property is always set to `keypress`.
+ """
+
+
+class Move(TypedDict, total=False):
+ """A mouse move action."""
+
+ type: Required[Literal["move"]]
+ """Specifies the event type.
+
+ For a move action, this property is always set to `move`.
+ """
+
+ x: Required[int]
+ """The x-coordinate to move to."""
+
+ y: Required[int]
+ """The y-coordinate to move to."""
+
+ keys: Optional[SequenceNotStr[str]]
+ """The keys being held while moving the mouse."""
+
+
+class Screenshot(TypedDict, total=False):
+ """A screenshot action."""
+
+ type: Required[Literal["screenshot"]]
+ """Specifies the event type.
+
+ For a screenshot action, this property is always set to `screenshot`.
+ """
+
+
+class Scroll(TypedDict, total=False):
+ """A scroll action."""
+
+ scroll_x: Required[int]
+ """The horizontal scroll distance."""
+
+ scroll_y: Required[int]
+ """The vertical scroll distance."""
+
+ type: Required[Literal["scroll"]]
+ """Specifies the event type.
+
+ For a scroll action, this property is always set to `scroll`.
+ """
+
+ x: Required[int]
+ """The x-coordinate where the scroll occurred."""
+
+ y: Required[int]
+ """The y-coordinate where the scroll occurred."""
+
+ keys: Optional[SequenceNotStr[str]]
+ """The keys being held while scrolling."""
+
+
+class Type(TypedDict, total=False):
+ """An action to type in text."""
+
+ text: Required[str]
+ """The text to type."""
+
+ type: Required[Literal["type"]]
+ """Specifies the event type.
+
+ For a type action, this property is always set to `type`.
+ """
+
+
+class Wait(TypedDict, total=False):
+ """A wait action."""
+
+ type: Required[Literal["wait"]]
+ """Specifies the event type.
+
+ For a wait action, this property is always set to `wait`.
+ """
+
+
+ComputerActionParam: TypeAlias = Union[Click, DoubleClick, Drag, Keypress, Move, Screenshot, Scroll, Type, Wait]
diff --git a/portkey_ai/_vendor/openai/types/responses/computer_tool.py b/portkey_ai/_vendor/openai/types/responses/computer_tool.py
index 22871c84..392faa9e 100644
--- a/portkey_ai/_vendor/openai/types/responses/computer_tool.py
+++ b/portkey_ai/_vendor/openai/types/responses/computer_tool.py
@@ -13,14 +13,5 @@ class ComputerTool(BaseModel):
Learn more about the [computer tool](https://platform.openai.com/docs/guides/tools-computer-use).
"""
- display_height: int
- """The height of the computer display."""
-
- display_width: int
- """The width of the computer display."""
-
- environment: Literal["windows", "mac", "linux", "ubuntu", "browser"]
- """The type of computer environment to control."""
-
- type: Literal["computer_use_preview"]
- """The type of the computer use tool. Always `computer_use_preview`."""
+ type: Literal["computer"]
+ """The type of the computer tool. Always `computer`."""
diff --git a/portkey_ai/_vendor/openai/types/responses/computer_tool_param.py b/portkey_ai/_vendor/openai/types/responses/computer_tool_param.py
index cdf75a43..b5931dea 100644
--- a/portkey_ai/_vendor/openai/types/responses/computer_tool_param.py
+++ b/portkey_ai/_vendor/openai/types/responses/computer_tool_param.py
@@ -13,14 +13,5 @@ class ComputerToolParam(TypedDict, total=False):
Learn more about the [computer tool](https://platform.openai.com/docs/guides/tools-computer-use).
"""
- display_height: Required[int]
- """The height of the computer display."""
-
- display_width: Required[int]
- """The width of the computer display."""
-
- environment: Required[Literal["windows", "mac", "linux", "ubuntu", "browser"]]
- """The type of computer environment to control."""
-
- type: Required[Literal["computer_use_preview"]]
- """The type of the computer use tool. Always `computer_use_preview`."""
+ type: Required[Literal["computer"]]
+ """The type of the computer tool. Always `computer`."""
diff --git a/portkey_ai/_vendor/openai/types/responses/computer_use_preview_tool.py b/portkey_ai/_vendor/openai/types/responses/computer_use_preview_tool.py
new file mode 100644
index 00000000..686860e2
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/computer_use_preview_tool.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ComputerUsePreviewTool"]
+
+
+class ComputerUsePreviewTool(BaseModel):
+ """A tool that controls a virtual computer.
+
+ Learn more about the [computer tool](https://platform.openai.com/docs/guides/tools-computer-use).
+ """
+
+ display_height: int
+ """The height of the computer display."""
+
+ display_width: int
+ """The width of the computer display."""
+
+ environment: Literal["windows", "mac", "linux", "ubuntu", "browser"]
+ """The type of computer environment to control."""
+
+ type: Literal["computer_use_preview"]
+ """The type of the computer use tool. Always `computer_use_preview`."""
diff --git a/portkey_ai/_vendor/openai/types/responses/computer_use_preview_tool_param.py b/portkey_ai/_vendor/openai/types/responses/computer_use_preview_tool_param.py
new file mode 100644
index 00000000..2611c729
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/computer_use_preview_tool_param.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ComputerUsePreviewToolParam"]
+
+
+class ComputerUsePreviewToolParam(TypedDict, total=False):
+ """A tool that controls a virtual computer.
+
+ Learn more about the [computer tool](https://platform.openai.com/docs/guides/tools-computer-use).
+ """
+
+ display_height: Required[int]
+ """The height of the computer display."""
+
+ display_width: Required[int]
+ """The width of the computer display."""
+
+ environment: Required[Literal["windows", "mac", "linux", "ubuntu", "browser"]]
+ """The type of computer environment to control."""
+
+ type: Required[Literal["computer_use_preview"]]
+ """The type of the computer use tool. Always `computer_use_preview`."""
diff --git a/portkey_ai/_vendor/openai/types/responses/container_auto.py b/portkey_ai/_vendor/openai/types/responses/container_auto.py
new file mode 100644
index 00000000..06b8a285
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/container_auto.py
@@ -0,0 +1,36 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from ..._models import BaseModel
+from .inline_skill import InlineSkill
+from .skill_reference import SkillReference
+from .container_network_policy_disabled import ContainerNetworkPolicyDisabled
+from .container_network_policy_allowlist import ContainerNetworkPolicyAllowlist
+
+__all__ = ["ContainerAuto", "NetworkPolicy", "Skill"]
+
+NetworkPolicy: TypeAlias = Annotated[
+ Union[ContainerNetworkPolicyDisabled, ContainerNetworkPolicyAllowlist], PropertyInfo(discriminator="type")
+]
+
+Skill: TypeAlias = Annotated[Union[SkillReference, InlineSkill], PropertyInfo(discriminator="type")]
+
+
+class ContainerAuto(BaseModel):
+ type: Literal["container_auto"]
+ """Automatically creates a container for this request"""
+
+ file_ids: Optional[List[str]] = None
+ """An optional list of uploaded files to make available to your code."""
+
+ memory_limit: Optional[Literal["1g", "4g", "16g", "64g"]] = None
+ """The memory limit for the container."""
+
+ network_policy: Optional[NetworkPolicy] = None
+ """Network access policy for the container."""
+
+ skills: Optional[List[Skill]] = None
+ """An optional list of skills referenced by id or inline data."""
diff --git a/portkey_ai/_vendor/openai/types/responses/container_auto_param.py b/portkey_ai/_vendor/openai/types/responses/container_auto_param.py
new file mode 100644
index 00000000..b9e8bb52
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/container_auto_param.py
@@ -0,0 +1,35 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from ..._types import SequenceNotStr
+from .inline_skill_param import InlineSkillParam
+from .skill_reference_param import SkillReferenceParam
+from .container_network_policy_disabled_param import ContainerNetworkPolicyDisabledParam
+from .container_network_policy_allowlist_param import ContainerNetworkPolicyAllowlistParam
+
+__all__ = ["ContainerAutoParam", "NetworkPolicy", "Skill"]
+
+NetworkPolicy: TypeAlias = Union[ContainerNetworkPolicyDisabledParam, ContainerNetworkPolicyAllowlistParam]
+
+Skill: TypeAlias = Union[SkillReferenceParam, InlineSkillParam]
+
+
+class ContainerAutoParam(TypedDict, total=False):
+ type: Required[Literal["container_auto"]]
+ """Automatically creates a container for this request"""
+
+ file_ids: SequenceNotStr[str]
+ """An optional list of uploaded files to make available to your code."""
+
+ memory_limit: Optional[Literal["1g", "4g", "16g", "64g"]]
+ """The memory limit for the container."""
+
+ network_policy: NetworkPolicy
+ """Network access policy for the container."""
+
+ skills: Iterable[Skill]
+ """An optional list of skills referenced by id or inline data."""
diff --git a/portkey_ai/_vendor/openai/types/responses/container_network_policy_allowlist.py b/portkey_ai/_vendor/openai/types/responses/container_network_policy_allowlist.py
new file mode 100644
index 00000000..caa2565e
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/container_network_policy_allowlist.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .container_network_policy_domain_secret import ContainerNetworkPolicyDomainSecret
+
+__all__ = ["ContainerNetworkPolicyAllowlist"]
+
+
+class ContainerNetworkPolicyAllowlist(BaseModel):
+ allowed_domains: List[str]
+ """A list of allowed domains when type is `allowlist`."""
+
+ type: Literal["allowlist"]
+ """Allow outbound network access only to specified domains. Always `allowlist`."""
+
+ domain_secrets: Optional[List[ContainerNetworkPolicyDomainSecret]] = None
+ """Optional domain-scoped secrets for allowlisted domains."""
diff --git a/portkey_ai/_vendor/openai/types/responses/container_network_policy_allowlist_param.py b/portkey_ai/_vendor/openai/types/responses/container_network_policy_allowlist_param.py
new file mode 100644
index 00000000..583b761e
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/container_network_policy_allowlist_param.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable
+from typing_extensions import Literal, Required, TypedDict
+
+from ..._types import SequenceNotStr
+from .container_network_policy_domain_secret_param import ContainerNetworkPolicyDomainSecretParam
+
+__all__ = ["ContainerNetworkPolicyAllowlistParam"]
+
+
+class ContainerNetworkPolicyAllowlistParam(TypedDict, total=False):
+ allowed_domains: Required[SequenceNotStr[str]]
+ """A list of allowed domains when type is `allowlist`."""
+
+ type: Required[Literal["allowlist"]]
+ """Allow outbound network access only to specified domains. Always `allowlist`."""
+
+ domain_secrets: Iterable[ContainerNetworkPolicyDomainSecretParam]
+ """Optional domain-scoped secrets for allowlisted domains."""
diff --git a/portkey_ai/_vendor/openai/types/responses/container_network_policy_disabled.py b/portkey_ai/_vendor/openai/types/responses/container_network_policy_disabled.py
new file mode 100644
index 00000000..47891aa3
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/container_network_policy_disabled.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ContainerNetworkPolicyDisabled"]
+
+
+class ContainerNetworkPolicyDisabled(BaseModel):
+ type: Literal["disabled"]
+ """Disable outbound network access. Always `disabled`."""
diff --git a/portkey_ai/_vendor/openai/types/responses/container_network_policy_disabled_param.py b/portkey_ai/_vendor/openai/types/responses/container_network_policy_disabled_param.py
new file mode 100644
index 00000000..0db7d8ff
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/container_network_policy_disabled_param.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ContainerNetworkPolicyDisabledParam"]
+
+
+class ContainerNetworkPolicyDisabledParam(TypedDict, total=False):
+ type: Required[Literal["disabled"]]
+ """Disable outbound network access. Always `disabled`."""
diff --git a/portkey_ai/_vendor/openai/types/responses/container_network_policy_domain_secret.py b/portkey_ai/_vendor/openai/types/responses/container_network_policy_domain_secret.py
new file mode 100644
index 00000000..d0e18ba9
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/container_network_policy_domain_secret.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from ..._models import BaseModel
+
+__all__ = ["ContainerNetworkPolicyDomainSecret"]
+
+
+class ContainerNetworkPolicyDomainSecret(BaseModel):
+ domain: str
+ """The domain associated with the secret."""
+
+ name: str
+ """The name of the secret to inject for the domain."""
+
+ value: str
+ """The secret value to inject for the domain."""
diff --git a/portkey_ai/_vendor/openai/types/responses/container_network_policy_domain_secret_param.py b/portkey_ai/_vendor/openai/types/responses/container_network_policy_domain_secret_param.py
new file mode 100644
index 00000000..619fbde7
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/container_network_policy_domain_secret_param.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["ContainerNetworkPolicyDomainSecretParam"]
+
+
+class ContainerNetworkPolicyDomainSecretParam(TypedDict, total=False):
+ domain: Required[str]
+ """The domain associated with the secret."""
+
+ name: Required[str]
+ """The name of the secret to inject for the domain."""
+
+ value: Required[str]
+ """The secret value to inject for the domain."""
diff --git a/portkey_ai/_vendor/openai/types/responses/container_reference.py b/portkey_ai/_vendor/openai/types/responses/container_reference.py
new file mode 100644
index 00000000..17065bf3
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/container_reference.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ContainerReference"]
+
+
+class ContainerReference(BaseModel):
+ container_id: str
+ """The ID of the referenced container."""
+
+ type: Literal["container_reference"]
+ """References a container created with the /v1/containers endpoint"""
diff --git a/portkey_ai/_vendor/openai/types/responses/container_reference_param.py b/portkey_ai/_vendor/openai/types/responses/container_reference_param.py
new file mode 100644
index 00000000..1da2fb67
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/container_reference_param.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ContainerReferenceParam"]
+
+
+class ContainerReferenceParam(TypedDict, total=False):
+ container_id: Required[str]
+ """The ID of the referenced container."""
+
+ type: Required[Literal["container_reference"]]
+ """References a container created with the /v1/containers endpoint"""
diff --git a/portkey_ai/_vendor/openai/types/responses/custom_tool.py b/portkey_ai/_vendor/openai/types/responses/custom_tool.py
index 1ca401a4..017c6d69 100644
--- a/portkey_ai/_vendor/openai/types/responses/custom_tool.py
+++ b/portkey_ai/_vendor/openai/types/responses/custom_tool.py
@@ -21,6 +21,9 @@ class CustomTool(BaseModel):
type: Literal["custom"]
"""The type of the custom tool. Always `custom`."""
+ defer_loading: Optional[bool] = None
+ """Whether this tool should be deferred and discovered via tool search."""
+
description: Optional[str] = None
"""Optional description of the custom tool, used to provide more context."""
diff --git a/portkey_ai/_vendor/openai/types/responses/custom_tool_param.py b/portkey_ai/_vendor/openai/types/responses/custom_tool_param.py
index 4ce43cdf..e6400136 100644
--- a/portkey_ai/_vendor/openai/types/responses/custom_tool_param.py
+++ b/portkey_ai/_vendor/openai/types/responses/custom_tool_param.py
@@ -21,6 +21,9 @@ class CustomToolParam(TypedDict, total=False):
type: Required[Literal["custom"]]
"""The type of the custom tool. Always `custom`."""
+ defer_loading: bool
+ """Whether this tool should be deferred and discovered via tool search."""
+
description: str
"""Optional description of the custom tool, used to provide more context."""
diff --git a/portkey_ai/_vendor/openai/types/responses/easy_input_message.py b/portkey_ai/_vendor/openai/types/responses/easy_input_message.py
index 9a36a6b0..aa97827b 100644
--- a/portkey_ai/_vendor/openai/types/responses/easy_input_message.py
+++ b/portkey_ai/_vendor/openai/types/responses/easy_input_message.py
@@ -30,5 +30,13 @@ class EasyInputMessage(BaseModel):
One of `user`, `assistant`, `system`, or `developer`.
"""
+ phase: Optional[Literal["commentary", "final_answer"]] = None
+ """
+ Labels an `assistant` message as intermediate commentary (`commentary`) or the
+ final answer (`final_answer`). For models like `gpt-5.3-codex` and beyond, when
+ sending follow-up requests, preserve and resend phase on all assistant messages
+ — dropping it can degrade performance. Not used for user messages.
+ """
+
type: Optional[Literal["message"]] = None
"""The type of the message input. Always `message`."""
diff --git a/portkey_ai/_vendor/openai/types/responses/easy_input_message_param.py b/portkey_ai/_vendor/openai/types/responses/easy_input_message_param.py
index 0a382bdd..bfc8d577 100644
--- a/portkey_ai/_vendor/openai/types/responses/easy_input_message_param.py
+++ b/portkey_ai/_vendor/openai/types/responses/easy_input_message_param.py
@@ -2,7 +2,7 @@
from __future__ import annotations
-from typing import Union
+from typing import Union, Optional
from typing_extensions import Literal, Required, TypedDict
from .response_input_message_content_list_param import ResponseInputMessageContentListParam
@@ -31,5 +31,13 @@ class EasyInputMessageParam(TypedDict, total=False):
One of `user`, `assistant`, `system`, or `developer`.
"""
+ phase: Optional[Literal["commentary", "final_answer"]]
+ """
+ Labels an `assistant` message as intermediate commentary (`commentary`) or the
+ final answer (`final_answer`). For models like `gpt-5.3-codex` and beyond, when
+ sending follow-up requests, preserve and resend phase on all assistant messages
+ — dropping it can degrade performance. Not used for user messages.
+ """
+
type: Literal["message"]
"""The type of the message input. Always `message`."""
diff --git a/portkey_ai/_vendor/openai/types/responses/function_shell_tool.py b/portkey_ai/_vendor/openai/types/responses/function_shell_tool.py
index 5b237aa7..17d6bb36 100644
--- a/portkey_ai/_vendor/openai/types/responses/function_shell_tool.py
+++ b/portkey_ai/_vendor/openai/types/responses/function_shell_tool.py
@@ -1,10 +1,19 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing_extensions import Literal
+from typing import Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+from ..._utils import PropertyInfo
from ..._models import BaseModel
+from .container_auto import ContainerAuto
+from .local_environment import LocalEnvironment
+from .container_reference import ContainerReference
-__all__ = ["FunctionShellTool"]
+__all__ = ["FunctionShellTool", "Environment"]
+
+Environment: TypeAlias = Annotated[
+ Union[ContainerAuto, LocalEnvironment, ContainerReference, None], PropertyInfo(discriminator="type")
+]
class FunctionShellTool(BaseModel):
@@ -12,3 +21,5 @@ class FunctionShellTool(BaseModel):
type: Literal["shell"]
"""The type of the shell tool. Always `shell`."""
+
+ environment: Optional[Environment] = None
diff --git a/portkey_ai/_vendor/openai/types/responses/function_shell_tool_param.py b/portkey_ai/_vendor/openai/types/responses/function_shell_tool_param.py
index c640ddab..b8464ed3 100644
--- a/portkey_ai/_vendor/openai/types/responses/function_shell_tool_param.py
+++ b/portkey_ai/_vendor/openai/types/responses/function_shell_tool_param.py
@@ -2,9 +2,16 @@
from __future__ import annotations
-from typing_extensions import Literal, Required, TypedDict
+from typing import Union, Optional
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
-__all__ = ["FunctionShellToolParam"]
+from .container_auto_param import ContainerAutoParam
+from .local_environment_param import LocalEnvironmentParam
+from .container_reference_param import ContainerReferenceParam
+
+__all__ = ["FunctionShellToolParam", "Environment"]
+
+Environment: TypeAlias = Union[ContainerAutoParam, LocalEnvironmentParam, ContainerReferenceParam]
class FunctionShellToolParam(TypedDict, total=False):
@@ -12,3 +19,5 @@ class FunctionShellToolParam(TypedDict, total=False):
type: Required[Literal["shell"]]
"""The type of the shell tool. Always `shell`."""
+
+ environment: Optional[Environment]
diff --git a/portkey_ai/_vendor/openai/types/responses/function_tool.py b/portkey_ai/_vendor/openai/types/responses/function_tool.py
index b0827a9f..6e9751ad 100644
--- a/portkey_ai/_vendor/openai/types/responses/function_tool.py
+++ b/portkey_ai/_vendor/openai/types/responses/function_tool.py
@@ -26,6 +26,9 @@ class FunctionTool(BaseModel):
type: Literal["function"]
"""The type of the function tool. Always `function`."""
+ defer_loading: Optional[bool] = None
+ """Whether this function is deferred and loaded via tool search."""
+
description: Optional[str] = None
"""A description of the function.
diff --git a/portkey_ai/_vendor/openai/types/responses/function_tool_param.py b/portkey_ai/_vendor/openai/types/responses/function_tool_param.py
index ba0a3168..e7978b44 100644
--- a/portkey_ai/_vendor/openai/types/responses/function_tool_param.py
+++ b/portkey_ai/_vendor/openai/types/responses/function_tool_param.py
@@ -26,6 +26,9 @@ class FunctionToolParam(TypedDict, total=False):
type: Required[Literal["function"]]
"""The type of the function tool. Always `function`."""
+ defer_loading: bool
+ """Whether this function is deferred and loaded via tool search."""
+
description: Optional[str]
"""A description of the function.
diff --git a/portkey_ai/_vendor/openai/types/responses/inline_skill.py b/portkey_ai/_vendor/openai/types/responses/inline_skill.py
new file mode 100644
index 00000000..87c7ca5f
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/inline_skill.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .inline_skill_source import InlineSkillSource
+
+__all__ = ["InlineSkill"]
+
+
+class InlineSkill(BaseModel):
+ description: str
+ """The description of the skill."""
+
+ name: str
+ """The name of the skill."""
+
+ source: InlineSkillSource
+ """Inline skill payload"""
+
+ type: Literal["inline"]
+ """Defines an inline skill for this request."""
diff --git a/portkey_ai/_vendor/openai/types/responses/inline_skill_param.py b/portkey_ai/_vendor/openai/types/responses/inline_skill_param.py
new file mode 100644
index 00000000..f9181693
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/inline_skill_param.py
@@ -0,0 +1,23 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+from .inline_skill_source_param import InlineSkillSourceParam
+
+__all__ = ["InlineSkillParam"]
+
+
+class InlineSkillParam(TypedDict, total=False):
+ description: Required[str]
+ """The description of the skill."""
+
+ name: Required[str]
+ """The name of the skill."""
+
+ source: Required[InlineSkillSourceParam]
+ """Inline skill payload"""
+
+ type: Required[Literal["inline"]]
+ """Defines an inline skill for this request."""
diff --git a/portkey_ai/_vendor/openai/types/responses/inline_skill_source.py b/portkey_ai/_vendor/openai/types/responses/inline_skill_source.py
new file mode 100644
index 00000000..2586cdb0
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/inline_skill_source.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["InlineSkillSource"]
+
+
+class InlineSkillSource(BaseModel):
+ """Inline skill payload"""
+
+ data: str
+ """Base64-encoded skill zip bundle."""
+
+ media_type: Literal["application/zip"]
+ """The media type of the inline skill payload. Must be `application/zip`."""
+
+ type: Literal["base64"]
+ """The type of the inline skill source. Must be `base64`."""
diff --git a/portkey_ai/_vendor/openai/types/responses/inline_skill_source_param.py b/portkey_ai/_vendor/openai/types/responses/inline_skill_source_param.py
new file mode 100644
index 00000000..b14e63e2
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/inline_skill_source_param.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["InlineSkillSourceParam"]
+
+
+class InlineSkillSourceParam(TypedDict, total=False):
+ """Inline skill payload"""
+
+ data: Required[str]
+ """Base64-encoded skill zip bundle."""
+
+ media_type: Required[Literal["application/zip"]]
+ """The media type of the inline skill payload. Must be `application/zip`."""
+
+ type: Required[Literal["base64"]]
+ """The type of the inline skill source. Must be `base64`."""
diff --git a/portkey_ai/_vendor/openai/types/responses/input_token_count_params.py b/portkey_ai/_vendor/openai/types/responses/input_token_count_params.py
index 97ee4bf6..f8a20265 100644
--- a/portkey_ai/_vendor/openai/types/responses/input_token_count_params.py
+++ b/portkey_ai/_vendor/openai/types/responses/input_token_count_params.py
@@ -15,8 +15,8 @@
from .response_input_item_param import ResponseInputItemParam
from .tool_choice_allowed_param import ToolChoiceAllowedParam
from .tool_choice_function_param import ToolChoiceFunctionParam
-from .response_conversation_param import ResponseConversationParam
from .tool_choice_apply_patch_param import ToolChoiceApplyPatchParam
+from .response_conversation_param_param import ResponseConversationParamParam
from .response_format_text_config_param import ResponseFormatTextConfigParam
__all__ = ["InputTokenCountParams", "Conversation", "Text", "ToolChoice"]
@@ -97,7 +97,7 @@ class InputTokenCountParams(TypedDict, total=False):
"""
-Conversation: TypeAlias = Union[str, ResponseConversationParam]
+Conversation: TypeAlias = Union[str, ResponseConversationParamParam]
class Text(TypedDict, total=False):
diff --git a/portkey_ai/_vendor/openai/types/responses/local_environment.py b/portkey_ai/_vendor/openai/types/responses/local_environment.py
new file mode 100644
index 00000000..3ebdaa7e
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/local_environment.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .local_skill import LocalSkill
+
+__all__ = ["LocalEnvironment"]
+
+
+class LocalEnvironment(BaseModel):
+ type: Literal["local"]
+ """Use a local computer environment."""
+
+ skills: Optional[List[LocalSkill]] = None
+ """An optional list of skills."""
diff --git a/portkey_ai/_vendor/openai/types/responses/local_environment_param.py b/portkey_ai/_vendor/openai/types/responses/local_environment_param.py
new file mode 100644
index 00000000..0ed1e81f
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/local_environment_param.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable
+from typing_extensions import Literal, Required, TypedDict
+
+from .local_skill_param import LocalSkillParam
+
+__all__ = ["LocalEnvironmentParam"]
+
+
+class LocalEnvironmentParam(TypedDict, total=False):
+ type: Required[Literal["local"]]
+ """Use a local computer environment."""
+
+ skills: Iterable[LocalSkillParam]
+ """An optional list of skills."""
diff --git a/portkey_ai/_vendor/openai/types/responses/local_skill.py b/portkey_ai/_vendor/openai/types/responses/local_skill.py
new file mode 100644
index 00000000..1033c319
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/local_skill.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from ..._models import BaseModel
+
+__all__ = ["LocalSkill"]
+
+
+class LocalSkill(BaseModel):
+ description: str
+ """The description of the skill."""
+
+ name: str
+ """The name of the skill."""
+
+ path: str
+ """The path to the directory containing the skill."""
diff --git a/portkey_ai/_vendor/openai/types/responses/local_skill_param.py b/portkey_ai/_vendor/openai/types/responses/local_skill_param.py
new file mode 100644
index 00000000..63011a49
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/local_skill_param.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["LocalSkillParam"]
+
+
+class LocalSkillParam(TypedDict, total=False):
+ description: Required[str]
+ """The description of the skill."""
+
+ name: Required[str]
+ """The name of the skill."""
+
+ path: Required[str]
+ """The path to the directory containing the skill."""
diff --git a/portkey_ai/_vendor/openai/types/responses/namespace_tool.py b/portkey_ai/_vendor/openai/types/responses/namespace_tool.py
new file mode 100644
index 00000000..88f76a97
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/namespace_tool.py
@@ -0,0 +1,44 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from ..._models import BaseModel
+from .custom_tool import CustomTool
+
+__all__ = ["NamespaceTool", "Tool", "ToolFunction"]
+
+
+class ToolFunction(BaseModel):
+ name: str
+
+ type: Literal["function"]
+
+ defer_loading: Optional[bool] = None
+ """Whether this function should be deferred and discovered via tool search."""
+
+ description: Optional[str] = None
+
+ parameters: Optional[object] = None
+
+ strict: Optional[bool] = None
+
+
+Tool: TypeAlias = Annotated[Union[ToolFunction, CustomTool], PropertyInfo(discriminator="type")]
+
+
+class NamespaceTool(BaseModel):
+ """Groups function/custom tools under a shared namespace."""
+
+ description: str
+ """A description of the namespace shown to the model."""
+
+ name: str
+ """The namespace name used in tool calls (for example, `crm`)."""
+
+ tools: List[Tool]
+ """The function/custom tools available inside this namespace."""
+
+ type: Literal["namespace"]
+ """The type of the tool. Always `namespace`."""
diff --git a/portkey_ai/_vendor/openai/types/responses/namespace_tool_param.py b/portkey_ai/_vendor/openai/types/responses/namespace_tool_param.py
new file mode 100644
index 00000000..cb1e5e17
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/namespace_tool_param.py
@@ -0,0 +1,44 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from .custom_tool_param import CustomToolParam
+
+__all__ = ["NamespaceToolParam", "Tool", "ToolFunction"]
+
+
+class ToolFunction(TypedDict, total=False):
+ name: Required[str]
+
+ type: Required[Literal["function"]]
+
+ defer_loading: bool
+ """Whether this function should be deferred and discovered via tool search."""
+
+ description: Optional[str]
+
+ parameters: Optional[object]
+
+ strict: Optional[bool]
+
+
+Tool: TypeAlias = Union[ToolFunction, CustomToolParam]
+
+
+class NamespaceToolParam(TypedDict, total=False):
+ """Groups function/custom tools under a shared namespace."""
+
+ description: Required[str]
+ """A description of the namespace shown to the model."""
+
+ name: Required[str]
+ """The namespace name used in tool calls (for example, `crm`)."""
+
+ tools: Required[Iterable[Tool]]
+ """The function/custom tools available inside this namespace."""
+
+ type: Required[Literal["namespace"]]
+ """The type of the tool. Always `namespace`."""
diff --git a/portkey_ai/_vendor/openai/types/responses/parsed_response.py b/portkey_ai/_vendor/openai/types/responses/parsed_response.py
index a8597105..4100a8d9 100644
--- a/portkey_ai/_vendor/openai/types/responses/parsed_response.py
+++ b/portkey_ai/_vendor/openai/types/responses/parsed_response.py
@@ -12,7 +12,9 @@
LocalShellCall,
McpApprovalRequest,
ImageGenerationCall,
+ McpApprovalResponse,
LocalShellCallAction,
+ LocalShellCallOutput,
)
from .response_output_text import ResponseOutputText
from .response_output_message import ResponseOutputMessage
@@ -20,14 +22,19 @@
from .response_reasoning_item import ResponseReasoningItem
from .response_compaction_item import ResponseCompactionItem
from .response_custom_tool_call import ResponseCustomToolCall
+from .response_tool_search_call import ResponseToolSearchCall
from .response_computer_tool_call import ResponseComputerToolCall
from .response_function_tool_call import ResponseFunctionToolCall
from .response_function_web_search import ResponseFunctionWebSearch
from .response_apply_patch_tool_call import ResponseApplyPatchToolCall
from .response_file_search_tool_call import ResponseFileSearchToolCall
+from .response_tool_search_output_item import ResponseToolSearchOutputItem
from .response_function_shell_tool_call import ResponseFunctionShellToolCall
from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall
from .response_apply_patch_tool_call_output import ResponseApplyPatchToolCallOutput
+from .response_custom_tool_call_output_item import ResponseCustomToolCallOutputItem
+from .response_computer_tool_call_output_item import ResponseComputerToolCallOutputItem
+from .response_function_tool_call_output_item import ResponseFunctionToolCallOutputItem
from .response_function_shell_tool_call_output import ResponseFunctionShellToolCallOutput
__all__ = ["ParsedResponse", "ParsedResponseOutputMessage", "ParsedResponseOutputText"]
@@ -70,20 +77,27 @@ class ParsedResponseFunctionToolCall(ResponseFunctionToolCall):
ResponseFileSearchToolCall,
ResponseFunctionWebSearch,
ResponseComputerToolCall,
+ ResponseComputerToolCallOutputItem,
+ ResponseToolSearchCall,
+ ResponseToolSearchOutputItem,
ResponseReasoningItem,
McpCall,
McpApprovalRequest,
+ McpApprovalResponse,
ImageGenerationCall,
LocalShellCall,
+ LocalShellCallOutput,
LocalShellCallAction,
McpListTools,
ResponseCodeInterpreterToolCall,
- ResponseCustomToolCall,
ResponseCompactionItem,
ResponseFunctionShellToolCall,
ResponseFunctionShellToolCallOutput,
ResponseApplyPatchToolCall,
ResponseApplyPatchToolCallOutput,
+ ResponseFunctionToolCallOutputItem,
+ ResponseCustomToolCall,
+ ResponseCustomToolCallOutputItem,
],
PropertyInfo(discriminator="type"),
]
diff --git a/portkey_ai/_vendor/openai/types/responses/response.py b/portkey_ai/_vendor/openai/types/responses/response.py
index 6bac7d65..ada0783b 100644
--- a/portkey_ai/_vendor/openai/types/responses/response.py
+++ b/portkey_ai/_vendor/openai/types/responses/response.py
@@ -233,8 +233,9 @@ class Response(BaseModel):
"""
A stable identifier used to help detect users of your application that may be
violating OpenAI's usage policies. The IDs should be a string that uniquely
- identifies each user. We recommend hashing their username or email address, in
- order to avoid sending us any identifying information.
+ identifies each user, with a maximum length of 64 characters. We recommend
+ hashing their username or email address, in order to avoid sending us any
+ identifying information.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
"""
diff --git a/portkey_ai/_vendor/openai/types/responses/response_compact_params.py b/portkey_ai/_vendor/openai/types/responses/response_compact_params.py
index 657c6a07..0b163a9e 100644
--- a/portkey_ai/_vendor/openai/types/responses/response_compact_params.py
+++ b/portkey_ai/_vendor/openai/types/responses/response_compact_params.py
@@ -14,6 +14,12 @@ class ResponseCompactParams(TypedDict, total=False):
model: Required[
Union[
Literal[
+ "gpt-5.4",
+ "gpt-5.4-mini",
+ "gpt-5.4-nano",
+ "gpt-5.4-mini-2026-03-17",
+ "gpt-5.4-nano-2026-03-17",
+ "gpt-5.3-chat-latest",
"gpt-5.2",
"gpt-5.2-2025-12-11",
"gpt-5.2-chat-latest",
@@ -131,3 +137,6 @@ class ResponseCompactParams(TypedDict, total=False):
[conversation state](https://platform.openai.com/docs/guides/conversation-state).
Cannot be used in conjunction with `conversation`.
"""
+
+ prompt_cache_key: Optional[str]
+ """A key to use when reading from or writing to the prompt cache."""
diff --git a/portkey_ai/_vendor/openai/types/responses/response_computer_tool_call.py b/portkey_ai/_vendor/openai/types/responses/response_computer_tool_call.py
index 4e1b3cf7..e0339e4a 100644
--- a/portkey_ai/_vendor/openai/types/responses/response_computer_tool_call.py
+++ b/portkey_ai/_vendor/openai/types/responses/response_computer_tool_call.py
@@ -5,9 +5,11 @@
from ..._utils import PropertyInfo
from ..._models import BaseModel
+from .computer_action_list import ComputerActionList
__all__ = [
"ResponseComputerToolCall",
+ "PendingSafetyCheck",
"Action",
"ActionClick",
"ActionDoubleClick",
@@ -19,10 +21,22 @@
"ActionScroll",
"ActionType",
"ActionWait",
- "PendingSafetyCheck",
]
+class PendingSafetyCheck(BaseModel):
+ """A pending safety check for the computer call."""
+
+ id: str
+ """The ID of the pending safety check."""
+
+ code: Optional[str] = None
+ """The type of the pending safety check."""
+
+ message: Optional[str] = None
+ """Details about the pending safety check."""
+
+
class ActionClick(BaseModel):
"""A click action."""
@@ -41,10 +55,16 @@ class ActionClick(BaseModel):
y: int
"""The y-coordinate where the click occurred."""
+ keys: Optional[List[str]] = None
+ """The keys being held while clicking."""
+
class ActionDoubleClick(BaseModel):
"""A double click action."""
+ keys: Optional[List[str]] = None
+ """The keys being held while double-clicking."""
+
type: Literal["double_click"]
"""Specifies the event type.
@@ -90,6 +110,9 @@ class ActionDrag(BaseModel):
For a drag action, this property is always set to `drag`.
"""
+ keys: Optional[List[str]] = None
+ """The keys being held while dragging the mouse."""
+
class ActionKeypress(BaseModel):
"""A collection of keypresses the model would like to perform."""
@@ -122,6 +145,9 @@ class ActionMove(BaseModel):
y: int
"""The y-coordinate to move to."""
+ keys: Optional[List[str]] = None
+ """The keys being held while moving the mouse."""
+
class ActionScreenshot(BaseModel):
"""A screenshot action."""
@@ -154,6 +180,9 @@ class ActionScroll(BaseModel):
y: int
"""The y-coordinate where the scroll occurred."""
+ keys: Optional[List[str]] = None
+ """The keys being held while scrolling."""
+
class ActionType(BaseModel):
"""An action to type in text."""
@@ -194,19 +223,6 @@ class ActionWait(BaseModel):
]
-class PendingSafetyCheck(BaseModel):
- """A pending safety check for the computer call."""
-
- id: str
- """The ID of the pending safety check."""
-
- code: Optional[str] = None
- """The type of the pending safety check."""
-
- message: Optional[str] = None
- """Details about the pending safety check."""
-
-
class ResponseComputerToolCall(BaseModel):
"""A tool call to a computer use tool.
@@ -217,9 +233,6 @@ class ResponseComputerToolCall(BaseModel):
id: str
"""The unique ID of the computer call."""
- action: Action
- """A click action."""
-
call_id: str
"""An identifier used when responding to the tool call with output."""
@@ -235,3 +248,12 @@ class ResponseComputerToolCall(BaseModel):
type: Literal["computer_call"]
"""The type of the computer call. Always `computer_call`."""
+
+ action: Optional[Action] = None
+ """A click action."""
+
+ actions: Optional[ComputerActionList] = None
+ """Flattened batched actions for `computer_use`.
+
+ Each action includes an `type` discriminator and action-specific fields.
+ """
diff --git a/portkey_ai/_vendor/openai/types/responses/response_computer_tool_call_output_item.py b/portkey_ai/_vendor/openai/types/responses/response_computer_tool_call_output_item.py
index 90e935c3..bf5555d0 100644
--- a/portkey_ai/_vendor/openai/types/responses/response_computer_tool_call_output_item.py
+++ b/portkey_ai/_vendor/openai/types/responses/response_computer_tool_call_output_item.py
@@ -32,6 +32,13 @@ class ResponseComputerToolCallOutputItem(BaseModel):
output: ResponseComputerToolCallOutputScreenshot
"""A computer screenshot image used with the computer use tool."""
+ status: Literal["completed", "incomplete", "failed", "in_progress"]
+ """The status of the message input.
+
+ One of `in_progress`, `completed`, or `incomplete`. Populated when input items
+ are returned via API.
+ """
+
type: Literal["computer_call_output"]
"""The type of the computer tool call output. Always `computer_call_output`."""
@@ -41,9 +48,5 @@ class ResponseComputerToolCallOutputItem(BaseModel):
developer.
"""
- status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
- """The status of the message input.
-
- One of `in_progress`, `completed`, or `incomplete`. Populated when input items
- are returned via API.
- """
+ created_by: Optional[str] = None
+ """The identifier of the actor that created the item."""
diff --git a/portkey_ai/_vendor/openai/types/responses/response_computer_tool_call_param.py b/portkey_ai/_vendor/openai/types/responses/response_computer_tool_call_param.py
index 550ba599..3c121097 100644
--- a/portkey_ai/_vendor/openai/types/responses/response_computer_tool_call_param.py
+++ b/portkey_ai/_vendor/openai/types/responses/response_computer_tool_call_param.py
@@ -6,9 +6,11 @@
from typing_extensions import Literal, Required, TypeAlias, TypedDict
from ..._types import SequenceNotStr
+from .computer_action_list_param import ComputerActionListParam
__all__ = [
"ResponseComputerToolCallParam",
+ "PendingSafetyCheck",
"Action",
"ActionClick",
"ActionDoubleClick",
@@ -20,10 +22,22 @@
"ActionScroll",
"ActionType",
"ActionWait",
- "PendingSafetyCheck",
]
+class PendingSafetyCheck(TypedDict, total=False):
+ """A pending safety check for the computer call."""
+
+ id: Required[str]
+ """The ID of the pending safety check."""
+
+ code: Optional[str]
+ """The type of the pending safety check."""
+
+ message: Optional[str]
+ """Details about the pending safety check."""
+
+
class ActionClick(TypedDict, total=False):
"""A click action."""
@@ -42,10 +56,16 @@ class ActionClick(TypedDict, total=False):
y: Required[int]
"""The y-coordinate where the click occurred."""
+ keys: Optional[SequenceNotStr[str]]
+ """The keys being held while clicking."""
+
class ActionDoubleClick(TypedDict, total=False):
"""A double click action."""
+ keys: Required[Optional[SequenceNotStr[str]]]
+ """The keys being held while double-clicking."""
+
type: Required[Literal["double_click"]]
"""Specifies the event type.
@@ -91,6 +111,9 @@ class ActionDrag(TypedDict, total=False):
For a drag action, this property is always set to `drag`.
"""
+ keys: Optional[SequenceNotStr[str]]
+ """The keys being held while dragging the mouse."""
+
class ActionKeypress(TypedDict, total=False):
"""A collection of keypresses the model would like to perform."""
@@ -123,6 +146,9 @@ class ActionMove(TypedDict, total=False):
y: Required[int]
"""The y-coordinate to move to."""
+ keys: Optional[SequenceNotStr[str]]
+ """The keys being held while moving the mouse."""
+
class ActionScreenshot(TypedDict, total=False):
"""A screenshot action."""
@@ -155,6 +181,9 @@ class ActionScroll(TypedDict, total=False):
y: Required[int]
"""The y-coordinate where the scroll occurred."""
+ keys: Optional[SequenceNotStr[str]]
+ """The keys being held while scrolling."""
+
class ActionType(TypedDict, total=False):
"""An action to type in text."""
@@ -192,19 +221,6 @@ class ActionWait(TypedDict, total=False):
]
-class PendingSafetyCheck(TypedDict, total=False):
- """A pending safety check for the computer call."""
-
- id: Required[str]
- """The ID of the pending safety check."""
-
- code: Optional[str]
- """The type of the pending safety check."""
-
- message: Optional[str]
- """Details about the pending safety check."""
-
-
class ResponseComputerToolCallParam(TypedDict, total=False):
"""A tool call to a computer use tool.
@@ -215,9 +231,6 @@ class ResponseComputerToolCallParam(TypedDict, total=False):
id: Required[str]
"""The unique ID of the computer call."""
- action: Required[Action]
- """A click action."""
-
call_id: Required[str]
"""An identifier used when responding to the tool call with output."""
@@ -233,3 +246,12 @@ class ResponseComputerToolCallParam(TypedDict, total=False):
type: Required[Literal["computer_call"]]
"""The type of the computer call. Always `computer_call`."""
+
+ action: Action
+ """A click action."""
+
+ actions: ComputerActionListParam
+ """Flattened batched actions for `computer_use`.
+
+ Each action includes an `type` discriminator and action-specific fields.
+ """
diff --git a/portkey_ai/_vendor/openai/types/responses/response_container_reference.py b/portkey_ai/_vendor/openai/types/responses/response_container_reference.py
new file mode 100644
index 00000000..81f5bd8d
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/response_container_reference.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseContainerReference"]
+
+
+class ResponseContainerReference(BaseModel):
+ """Represents a container created with /v1/containers."""
+
+ container_id: str
+
+ type: Literal["container_reference"]
+ """The environment type. Always `container_reference`."""
diff --git a/portkey_ai/_vendor/openai/types/responses/response_conversation_param.py b/portkey_ai/_vendor/openai/types/responses/response_conversation_param.py
index d1587fe6..7db4129b 100644
--- a/portkey_ai/_vendor/openai/types/responses/response_conversation_param.py
+++ b/portkey_ai/_vendor/openai/types/responses/response_conversation_param.py
@@ -1,14 +1,12 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from __future__ import annotations
-
-from typing_extensions import Required, TypedDict
+from ..._models import BaseModel
__all__ = ["ResponseConversationParam"]
-class ResponseConversationParam(TypedDict, total=False):
+class ResponseConversationParam(BaseModel):
"""The conversation that this response belongs to."""
- id: Required[str]
+ id: str
"""The unique ID of the conversation."""
diff --git a/portkey_ai/_vendor/openai/types/responses/response_conversation_param_param.py b/portkey_ai/_vendor/openai/types/responses/response_conversation_param_param.py
new file mode 100644
index 00000000..dba3628d
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/response_conversation_param_param.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["ResponseConversationParamParam"]
+
+
+class ResponseConversationParamParam(TypedDict, total=False):
+ """The conversation that this response belongs to."""
+
+ id: Required[str]
+ """The unique ID of the conversation."""
diff --git a/portkey_ai/_vendor/openai/types/responses/response_create_params.py b/portkey_ai/_vendor/openai/types/responses/response_create_params.py
index 15844c65..bf7170da 100644
--- a/portkey_ai/_vendor/openai/types/responses/response_create_params.py
+++ b/portkey_ai/_vendor/openai/types/responses/response_create_params.py
@@ -19,12 +19,13 @@
from .tool_choice_allowed_param import ToolChoiceAllowedParam
from .response_text_config_param import ResponseTextConfigParam
from .tool_choice_function_param import ToolChoiceFunctionParam
-from .response_conversation_param import ResponseConversationParam
from .tool_choice_apply_patch_param import ToolChoiceApplyPatchParam
from ..shared_params.responses_model import ResponsesModel
+from .response_conversation_param_param import ResponseConversationParamParam
__all__ = [
"ResponseCreateParamsBase",
+ "ContextManagement",
"Conversation",
"StreamOptions",
"ToolChoice",
@@ -40,6 +41,9 @@ class ResponseCreateParamsBase(TypedDict, total=False):
[Learn more](https://platform.openai.com/docs/guides/background).
"""
+ context_management: Optional[Iterable[ContextManagement]]
+ """Context management configuration for this request."""
+
conversation: Optional[Conversation]
"""The conversation that this response belongs to.
@@ -167,8 +171,9 @@ class ResponseCreateParamsBase(TypedDict, total=False):
"""
A stable identifier used to help detect users of your application that may be
violating OpenAI's usage policies. The IDs should be a string that uniquely
- identifies each user. We recommend hashing their username or email address, in
- order to avoid sending us any identifying information.
+ identifies each user, with a maximum length of 64 characters. We recommend
+ hashing their username or email address, in order to avoid sending us any
+ identifying information.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
"""
@@ -279,7 +284,15 @@ class ResponseCreateParamsBase(TypedDict, total=False):
"""
-Conversation: TypeAlias = Union[str, ResponseConversationParam]
+class ContextManagement(TypedDict, total=False):
+ type: Required[str]
+ """The context management entry type. Currently only 'compaction' is supported."""
+
+ compact_threshold: Optional[int]
+ """Token threshold at which compaction should be triggered for this entry."""
+
+
+Conversation: TypeAlias = Union[str, ResponseConversationParamParam]
class StreamOptions(TypedDict, total=False):
diff --git a/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call.py b/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call.py
index f0574396..965ed88f 100644
--- a/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call.py
+++ b/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call.py
@@ -25,3 +25,6 @@ class ResponseCustomToolCall(BaseModel):
id: Optional[str] = None
"""The unique ID of the custom tool call in the OpenAI platform."""
+
+ namespace: Optional[str] = None
+ """The namespace of the custom tool being called."""
diff --git a/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_item.py b/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_item.py
new file mode 100644
index 00000000..4f0f9306
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_item.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from .response_custom_tool_call import ResponseCustomToolCall
+
+__all__ = ["ResponseCustomToolCallItem"]
+
+
+class ResponseCustomToolCallItem(ResponseCustomToolCall):
+ """A call to a custom tool created by the model."""
+
+ id: str # type: ignore
+ """The unique ID of the custom tool call item."""
+
+ status: Literal["in_progress", "completed", "incomplete"]
+ """The status of the item.
+
+ One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+ returned via API.
+ """
+
+ created_by: Optional[str] = None
+ """The identifier of the actor that created the item."""
diff --git a/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_output_item.py b/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_output_item.py
new file mode 100644
index 00000000..5e5a469e
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_output_item.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from .response_custom_tool_call_output import ResponseCustomToolCallOutput
+
+__all__ = ["ResponseCustomToolCallOutputItem"]
+
+
+class ResponseCustomToolCallOutputItem(ResponseCustomToolCallOutput):
+ """The output of a custom tool call from your code, being sent back to the model."""
+
+ id: str # type: ignore
+ """The unique ID of the custom tool call output item."""
+
+ status: Literal["in_progress", "completed", "incomplete"]
+ """The status of the item.
+
+ One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+ returned via API.
+ """
+
+ created_by: Optional[str] = None
+ """The identifier of the actor that created the item."""
diff --git a/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_param.py b/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_param.py
index 5d4ce337..9f82546e 100644
--- a/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_param.py
+++ b/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_param.py
@@ -24,3 +24,6 @@ class ResponseCustomToolCallParam(TypedDict, total=False):
id: str
"""The unique ID of the custom tool call in the OpenAI platform."""
+
+ namespace: str
+ """The namespace of the custom tool being called."""
diff --git a/portkey_ai/_vendor/openai/types/responses/response_function_shell_tool_call.py b/portkey_ai/_vendor/openai/types/responses/response_function_shell_tool_call.py
index 7c6a184e..22c75cf0 100644
--- a/portkey_ai/_vendor/openai/types/responses/response_function_shell_tool_call.py
+++ b/portkey_ai/_vendor/openai/types/responses/response_function_shell_tool_call.py
@@ -1,11 +1,14 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import List, Optional
-from typing_extensions import Literal
+from typing import List, Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+from ..._utils import PropertyInfo
from ..._models import BaseModel
+from .response_local_environment import ResponseLocalEnvironment
+from .response_container_reference import ResponseContainerReference
-__all__ = ["ResponseFunctionShellToolCall", "Action"]
+__all__ = ["ResponseFunctionShellToolCall", "Action", "Environment"]
class Action(BaseModel):
@@ -20,6 +23,11 @@ class Action(BaseModel):
"""Optional timeout in milliseconds for the commands."""
+Environment: TypeAlias = Annotated[
+ Union[ResponseLocalEnvironment, ResponseContainerReference, None], PropertyInfo(discriminator="type")
+]
+
+
class ResponseFunctionShellToolCall(BaseModel):
"""A tool call that executes one or more shell commands in a managed environment."""
@@ -35,6 +43,9 @@ class ResponseFunctionShellToolCall(BaseModel):
call_id: str
"""The unique ID of the shell tool call generated by the model."""
+ environment: Optional[Environment] = None
+ """Represents the use of a local environment to perform shell actions."""
+
status: Literal["in_progress", "completed", "incomplete"]
"""The status of the shell call.
diff --git a/portkey_ai/_vendor/openai/types/responses/response_function_tool_call.py b/portkey_ai/_vendor/openai/types/responses/response_function_tool_call.py
index 194e3f7d..3ff4c67a 100644
--- a/portkey_ai/_vendor/openai/types/responses/response_function_tool_call.py
+++ b/portkey_ai/_vendor/openai/types/responses/response_function_tool_call.py
@@ -30,6 +30,9 @@ class ResponseFunctionToolCall(BaseModel):
id: Optional[str] = None
"""The unique ID of the function tool call."""
+ namespace: Optional[str] = None
+ """The namespace of the function to run."""
+
status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
"""The status of the item.
diff --git a/portkey_ai/_vendor/openai/types/responses/response_function_tool_call_item.py b/portkey_ai/_vendor/openai/types/responses/response_function_tool_call_item.py
index 3df299e5..c8a44889 100644
--- a/portkey_ai/_vendor/openai/types/responses/response_function_tool_call_item.py
+++ b/portkey_ai/_vendor/openai/types/responses/response_function_tool_call_item.py
@@ -1,5 +1,8 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+from typing import Optional
+from typing_extensions import Literal
+
from .response_function_tool_call import ResponseFunctionToolCall
__all__ = ["ResponseFunctionToolCallItem"]
@@ -14,3 +17,13 @@ class ResponseFunctionToolCallItem(ResponseFunctionToolCall):
id: str # type: ignore
"""The unique ID of the function tool call."""
+
+ status: Literal["in_progress", "completed", "incomplete"] # type: ignore
+ """The status of the item.
+
+ One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+ returned via API.
+ """
+
+ created_by: Optional[str] = None
+ """The identifier of the actor that created the item."""
diff --git a/portkey_ai/_vendor/openai/types/responses/response_function_tool_call_output_item.py b/portkey_ai/_vendor/openai/types/responses/response_function_tool_call_output_item.py
index 1a2c848c..e40feeb3 100644
--- a/portkey_ai/_vendor/openai/types/responses/response_function_tool_call_output_item.py
+++ b/portkey_ai/_vendor/openai/types/responses/response_function_tool_call_output_item.py
@@ -29,12 +29,15 @@ class ResponseFunctionToolCallOutputItem(BaseModel):
list of output content.
"""
- type: Literal["function_call_output"]
- """The type of the function tool call output. Always `function_call_output`."""
-
- status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
+ status: Literal["in_progress", "completed", "incomplete"]
"""The status of the item.
One of `in_progress`, `completed`, or `incomplete`. Populated when items are
returned via API.
"""
+
+ type: Literal["function_call_output"]
+ """The type of the function tool call output. Always `function_call_output`."""
+
+ created_by: Optional[str] = None
+ """The identifier of the actor that created the item."""
diff --git a/portkey_ai/_vendor/openai/types/responses/response_function_tool_call_param.py b/portkey_ai/_vendor/openai/types/responses/response_function_tool_call_param.py
index 4e8dd3d6..5183e9e2 100644
--- a/portkey_ai/_vendor/openai/types/responses/response_function_tool_call_param.py
+++ b/portkey_ai/_vendor/openai/types/responses/response_function_tool_call_param.py
@@ -29,6 +29,9 @@ class ResponseFunctionToolCallParam(TypedDict, total=False):
id: str
"""The unique ID of the function tool call."""
+ namespace: str
+ """The namespace of the function to run."""
+
status: Literal["in_progress", "completed", "incomplete"]
"""The status of the item.
diff --git a/portkey_ai/_vendor/openai/types/responses/response_function_web_search.py b/portkey_ai/_vendor/openai/types/responses/response_function_web_search.py
index 0cb7e0b0..de6001e1 100644
--- a/portkey_ai/_vendor/openai/types/responses/response_function_web_search.py
+++ b/portkey_ai/_vendor/openai/types/responses/response_function_web_search.py
@@ -6,7 +6,14 @@
from ..._utils import PropertyInfo
from ..._models import BaseModel
-__all__ = ["ResponseFunctionWebSearch", "Action", "ActionSearch", "ActionSearchSource", "ActionOpenPage", "ActionFind"]
+__all__ = [
+ "ResponseFunctionWebSearch",
+ "Action",
+ "ActionSearch",
+ "ActionSearchSource",
+ "ActionOpenPage",
+ "ActionFind",
+]
class ActionSearchSource(BaseModel):
@@ -41,17 +48,17 @@ class ActionOpenPage(BaseModel):
type: Literal["open_page"]
"""The action type."""
- url: str
+ url: Optional[str] = None
"""The URL opened by the model."""
class ActionFind(BaseModel):
- """Action type "find": Searches for a pattern within a loaded page."""
+ """Action type "find_in_page": Searches for a pattern within a loaded page."""
pattern: str
"""The pattern or text to search for within the page."""
- type: Literal["find"]
+ type: Literal["find_in_page"]
"""The action type."""
url: str
@@ -74,7 +81,7 @@ class ResponseFunctionWebSearch(BaseModel):
action: Action
"""
An object describing the specific action taken in this web search call. Includes
- details on how the model used the web (search, open_page, find).
+ details on how the model used the web (search, open_page, find_in_page).
"""
status: Literal["in_progress", "searching", "completed", "failed"]
diff --git a/portkey_ai/_vendor/openai/types/responses/response_function_web_search_param.py b/portkey_ai/_vendor/openai/types/responses/response_function_web_search_param.py
index 7db3e3c8..15e313b0 100644
--- a/portkey_ai/_vendor/openai/types/responses/response_function_web_search_param.py
+++ b/portkey_ai/_vendor/openai/types/responses/response_function_web_search_param.py
@@ -2,7 +2,7 @@
from __future__ import annotations
-from typing import Union, Iterable
+from typing import Union, Iterable, Optional
from typing_extensions import Literal, Required, TypeAlias, TypedDict
from ..._types import SequenceNotStr
@@ -49,17 +49,17 @@ class ActionOpenPage(TypedDict, total=False):
type: Required[Literal["open_page"]]
"""The action type."""
- url: Required[str]
+ url: Optional[str]
"""The URL opened by the model."""
class ActionFind(TypedDict, total=False):
- """Action type "find": Searches for a pattern within a loaded page."""
+ """Action type "find_in_page": Searches for a pattern within a loaded page."""
pattern: Required[str]
"""The pattern or text to search for within the page."""
- type: Required[Literal["find"]]
+ type: Required[Literal["find_in_page"]]
"""The action type."""
url: Required[str]
@@ -82,7 +82,7 @@ class ResponseFunctionWebSearchParam(TypedDict, total=False):
action: Required[Action]
"""
An object describing the specific action taken in this web search call. Includes
- details on how the model used the web (search, open_page, find).
+ details on how the model used the web (search, open_page, find_in_page).
"""
status: Required[Literal["in_progress", "searching", "completed", "failed"]]
diff --git a/portkey_ai/_vendor/openai/types/responses/response_input.py b/portkey_ai/_vendor/openai/types/responses/response_input.py
new file mode 100644
index 00000000..e2180dec
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/response_input.py
@@ -0,0 +1,10 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+from typing_extensions import TypeAlias
+
+from .response_input_item import ResponseInputItem
+
+__all__ = ["ResponseInput"]
+
+ResponseInput: TypeAlias = List[ResponseInputItem]
diff --git a/portkey_ai/_vendor/openai/types/responses/response_input_image.py b/portkey_ai/_vendor/openai/types/responses/response_input_image.py
index 500bc4b3..63c4b42c 100644
--- a/portkey_ai/_vendor/openai/types/responses/response_input_image.py
+++ b/portkey_ai/_vendor/openai/types/responses/response_input_image.py
@@ -14,10 +14,10 @@ class ResponseInputImage(BaseModel):
Learn about [image inputs](https://platform.openai.com/docs/guides/vision).
"""
- detail: Literal["low", "high", "auto"]
+ detail: Literal["low", "high", "auto", "original"]
"""The detail level of the image to be sent to the model.
- One of `high`, `low`, or `auto`. Defaults to `auto`.
+ One of `high`, `low`, `auto`, or `original`. Defaults to `auto`.
"""
type: Literal["input_image"]
diff --git a/portkey_ai/_vendor/openai/types/responses/response_input_image_content.py b/portkey_ai/_vendor/openai/types/responses/response_input_image_content.py
index e38bc28d..d9619f65 100644
--- a/portkey_ai/_vendor/openai/types/responses/response_input_image_content.py
+++ b/portkey_ai/_vendor/openai/types/responses/response_input_image_content.py
@@ -17,10 +17,10 @@ class ResponseInputImageContent(BaseModel):
type: Literal["input_image"]
"""The type of the input item. Always `input_image`."""
- detail: Optional[Literal["low", "high", "auto"]] = None
+ detail: Optional[Literal["low", "high", "auto", "original"]] = None
"""The detail level of the image to be sent to the model.
- One of `high`, `low`, or `auto`. Defaults to `auto`.
+ One of `high`, `low`, `auto`, or `original`. Defaults to `auto`.
"""
file_id: Optional[str] = None
diff --git a/portkey_ai/_vendor/openai/types/responses/response_input_image_content_param.py b/portkey_ai/_vendor/openai/types/responses/response_input_image_content_param.py
index c21f46d7..743642d4 100644
--- a/portkey_ai/_vendor/openai/types/responses/response_input_image_content_param.py
+++ b/portkey_ai/_vendor/openai/types/responses/response_input_image_content_param.py
@@ -17,10 +17,10 @@ class ResponseInputImageContentParam(TypedDict, total=False):
type: Required[Literal["input_image"]]
"""The type of the input item. Always `input_image`."""
- detail: Optional[Literal["low", "high", "auto"]]
+ detail: Optional[Literal["low", "high", "auto", "original"]]
"""The detail level of the image to be sent to the model.
- One of `high`, `low`, or `auto`. Defaults to `auto`.
+ One of `high`, `low`, `auto`, or `original`. Defaults to `auto`.
"""
file_id: Optional[str]
diff --git a/portkey_ai/_vendor/openai/types/responses/response_input_image_param.py b/portkey_ai/_vendor/openai/types/responses/response_input_image_param.py
index fd8c1bd0..118f0b5f 100644
--- a/portkey_ai/_vendor/openai/types/responses/response_input_image_param.py
+++ b/portkey_ai/_vendor/openai/types/responses/response_input_image_param.py
@@ -14,10 +14,10 @@ class ResponseInputImageParam(TypedDict, total=False):
Learn about [image inputs](https://platform.openai.com/docs/guides/vision).
"""
- detail: Required[Literal["low", "high", "auto"]]
+ detail: Required[Literal["low", "high", "auto", "original"]]
"""The detail level of the image to be sent to the model.
- One of `high`, `low`, or `auto`. Defaults to `auto`.
+ One of `high`, `low`, `auto`, or `original`. Defaults to `auto`.
"""
type: Required[Literal["input_image"]]
diff --git a/portkey_ai/_vendor/openai/types/responses/response_input_item.py b/portkey_ai/_vendor/openai/types/responses/response_input_item.py
index 23eb2c89..af3ce5bd 100644
--- a/portkey_ai/_vendor/openai/types/responses/response_input_item.py
+++ b/portkey_ai/_vendor/openai/types/responses/response_input_item.py
@@ -5,7 +5,9 @@
from ..._utils import PropertyInfo
from ..._models import BaseModel
+from .local_environment import LocalEnvironment
from .easy_input_message import EasyInputMessage
+from .container_reference import ContainerReference
from .response_output_message import ResponseOutputMessage
from .response_reasoning_item import ResponseReasoningItem
from .response_custom_tool_call import ResponseCustomToolCall
@@ -17,6 +19,7 @@
from .response_custom_tool_call_output import ResponseCustomToolCallOutput
from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall
from .response_input_message_content_list import ResponseInputMessageContentList
+from .response_tool_search_output_item_param import ResponseToolSearchOutputItemParam
from .response_function_call_output_item_list import ResponseFunctionCallOutputItemList
from .response_function_shell_call_output_content import ResponseFunctionShellCallOutputContent
from .response_computer_tool_call_output_screenshot import ResponseComputerToolCallOutputScreenshot
@@ -27,12 +30,14 @@
"ComputerCallOutput",
"ComputerCallOutputAcknowledgedSafetyCheck",
"FunctionCallOutput",
+ "ToolSearchCall",
"ImageGenerationCall",
"LocalShellCall",
"LocalShellCallAction",
"LocalShellCallOutput",
"ShellCall",
"ShellCallAction",
+ "ShellCallEnvironment",
"ShellCallOutput",
"ApplyPatchCall",
"ApplyPatchCallOperation",
@@ -144,6 +149,26 @@ class FunctionCallOutput(BaseModel):
"""
+class ToolSearchCall(BaseModel):
+ arguments: object
+ """The arguments supplied to the tool search call."""
+
+ type: Literal["tool_search_call"]
+ """The item type. Always `tool_search_call`."""
+
+ id: Optional[str] = None
+ """The unique ID of this tool search call."""
+
+ call_id: Optional[str] = None
+ """The unique ID of the tool search call generated by the model."""
+
+ execution: Optional[Literal["server", "client"]] = None
+ """Whether tool search was executed by the server or by the client."""
+
+ status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
+ """The status of the tool search call."""
+
+
class ImageGenerationCall(BaseModel):
"""An image generation request made by the model."""
@@ -233,6 +258,11 @@ class ShellCallAction(BaseModel):
"""Maximum wall-clock time in milliseconds to allow the shell commands to run."""
+ShellCallEnvironment: TypeAlias = Annotated[
+ Union[LocalEnvironment, ContainerReference, None], PropertyInfo(discriminator="type")
+]
+
+
class ShellCall(BaseModel):
"""A tool representing a request to execute one or more shell commands."""
@@ -251,6 +281,9 @@ class ShellCall(BaseModel):
Populated when this item is returned via API.
"""
+ environment: Optional[ShellCallEnvironment] = None
+ """The environment to execute the shell commands in."""
+
status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
"""The status of the shell call.
@@ -285,6 +318,9 @@ class ShellCallOutput(BaseModel):
output.
"""
+ status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
+ """The status of the shell call output."""
+
class ApplyPatchCallOperationCreateFile(BaseModel):
"""Instruction for creating a new file via the apply_patch tool."""
@@ -512,6 +548,8 @@ class ItemReference(BaseModel):
ResponseFunctionWebSearch,
ResponseFunctionToolCall,
FunctionCallOutput,
+ ToolSearchCall,
+ ResponseToolSearchOutputItemParam,
ResponseReasoningItem,
ResponseCompactionItemParam,
ImageGenerationCall,
diff --git a/portkey_ai/_vendor/openai/types/responses/response_input_item_param.py b/portkey_ai/_vendor/openai/types/responses/response_input_item_param.py
index 2c42b930..87ea1bc5 100644
--- a/portkey_ai/_vendor/openai/types/responses/response_input_item_param.py
+++ b/portkey_ai/_vendor/openai/types/responses/response_input_item_param.py
@@ -6,7 +6,9 @@
from typing_extensions import Literal, Required, TypeAlias, TypedDict
from ..._types import SequenceNotStr
+from .local_environment_param import LocalEnvironmentParam
from .easy_input_message_param import EasyInputMessageParam
+from .container_reference_param import ContainerReferenceParam
from .response_output_message_param import ResponseOutputMessageParam
from .response_reasoning_item_param import ResponseReasoningItemParam
from .response_custom_tool_call_param import ResponseCustomToolCallParam
@@ -18,6 +20,7 @@
from .response_custom_tool_call_output_param import ResponseCustomToolCallOutputParam
from .response_code_interpreter_tool_call_param import ResponseCodeInterpreterToolCallParam
from .response_input_message_content_list_param import ResponseInputMessageContentListParam
+from .response_tool_search_output_item_param_param import ResponseToolSearchOutputItemParamParam
from .response_function_call_output_item_list_param import ResponseFunctionCallOutputItemListParam
from .response_function_shell_call_output_content_param import ResponseFunctionShellCallOutputContentParam
from .response_computer_tool_call_output_screenshot_param import ResponseComputerToolCallOutputScreenshotParam
@@ -28,12 +31,14 @@
"ComputerCallOutput",
"ComputerCallOutputAcknowledgedSafetyCheck",
"FunctionCallOutput",
+ "ToolSearchCall",
"ImageGenerationCall",
"LocalShellCall",
"LocalShellCallAction",
"LocalShellCallOutput",
"ShellCall",
"ShellCallAction",
+ "ShellCallEnvironment",
"ShellCallOutput",
"ApplyPatchCall",
"ApplyPatchCallOperation",
@@ -145,6 +150,26 @@ class FunctionCallOutput(TypedDict, total=False):
"""
+class ToolSearchCall(TypedDict, total=False):
+ arguments: Required[object]
+ """The arguments supplied to the tool search call."""
+
+ type: Required[Literal["tool_search_call"]]
+ """The item type. Always `tool_search_call`."""
+
+ id: Optional[str]
+ """The unique ID of this tool search call."""
+
+ call_id: Optional[str]
+ """The unique ID of the tool search call generated by the model."""
+
+ execution: Literal["server", "client"]
+ """Whether tool search was executed by the server or by the client."""
+
+ status: Optional[Literal["in_progress", "completed", "incomplete"]]
+ """The status of the tool search call."""
+
+
class ImageGenerationCall(TypedDict, total=False):
"""An image generation request made by the model."""
@@ -234,6 +259,9 @@ class ShellCallAction(TypedDict, total=False):
"""Maximum wall-clock time in milliseconds to allow the shell commands to run."""
+ShellCallEnvironment: TypeAlias = Union[LocalEnvironmentParam, ContainerReferenceParam]
+
+
class ShellCall(TypedDict, total=False):
"""A tool representing a request to execute one or more shell commands."""
@@ -252,6 +280,9 @@ class ShellCall(TypedDict, total=False):
Populated when this item is returned via API.
"""
+ environment: Optional[ShellCallEnvironment]
+ """The environment to execute the shell commands in."""
+
status: Optional[Literal["in_progress", "completed", "incomplete"]]
"""The status of the shell call.
@@ -286,6 +317,9 @@ class ShellCallOutput(TypedDict, total=False):
output.
"""
+ status: Optional[Literal["in_progress", "completed", "incomplete"]]
+ """The status of the shell call output."""
+
class ApplyPatchCallOperationCreateFile(TypedDict, total=False):
"""Instruction for creating a new file via the apply_patch tool."""
@@ -511,6 +545,8 @@ class ItemReference(TypedDict, total=False):
ResponseFunctionWebSearchParam,
ResponseFunctionToolCallParam,
FunctionCallOutput,
+ ToolSearchCall,
+ ResponseToolSearchOutputItemParamParam,
ResponseReasoningItemParam,
ResponseCompactionItemParamParam,
ImageGenerationCall,
diff --git a/portkey_ai/_vendor/openai/types/responses/response_input_message_item.py b/portkey_ai/_vendor/openai/types/responses/response_input_message_item.py
index 6a788e7f..788c92c9 100644
--- a/portkey_ai/_vendor/openai/types/responses/response_input_message_item.py
+++ b/portkey_ai/_vendor/openai/types/responses/response_input_message_item.py
@@ -22,12 +22,12 @@ class ResponseInputMessageItem(BaseModel):
role: Literal["user", "system", "developer"]
"""The role of the message input. One of `user`, `system`, or `developer`."""
+ type: Literal["message"]
+ """The type of the message input. Always set to `message`."""
+
status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
"""The status of item.
One of `in_progress`, `completed`, or `incomplete`. Populated when items are
returned via API.
"""
-
- type: Optional[Literal["message"]] = None
- """The type of the message input. Always set to `message`."""
diff --git a/portkey_ai/_vendor/openai/types/responses/response_input_param.py b/portkey_ai/_vendor/openai/types/responses/response_input_param.py
index c2d12c0a..cf4d5295 100644
--- a/portkey_ai/_vendor/openai/types/responses/response_input_param.py
+++ b/portkey_ai/_vendor/openai/types/responses/response_input_param.py
@@ -6,7 +6,9 @@
from typing_extensions import Literal, Required, TypeAlias, TypedDict
from ..._types import SequenceNotStr
+from .local_environment_param import LocalEnvironmentParam
from .easy_input_message_param import EasyInputMessageParam
+from .container_reference_param import ContainerReferenceParam
from .response_output_message_param import ResponseOutputMessageParam
from .response_reasoning_item_param import ResponseReasoningItemParam
from .response_custom_tool_call_param import ResponseCustomToolCallParam
@@ -18,6 +20,7 @@
from .response_custom_tool_call_output_param import ResponseCustomToolCallOutputParam
from .response_code_interpreter_tool_call_param import ResponseCodeInterpreterToolCallParam
from .response_input_message_content_list_param import ResponseInputMessageContentListParam
+from .response_tool_search_output_item_param_param import ResponseToolSearchOutputItemParamParam
from .response_function_call_output_item_list_param import ResponseFunctionCallOutputItemListParam
from .response_function_shell_call_output_content_param import ResponseFunctionShellCallOutputContentParam
from .response_computer_tool_call_output_screenshot_param import ResponseComputerToolCallOutputScreenshotParam
@@ -29,12 +32,14 @@
"ComputerCallOutput",
"ComputerCallOutputAcknowledgedSafetyCheck",
"FunctionCallOutput",
+ "ToolSearchCall",
"ImageGenerationCall",
"LocalShellCall",
"LocalShellCallAction",
"LocalShellCallOutput",
"ShellCall",
"ShellCallAction",
+ "ShellCallEnvironment",
"ShellCallOutput",
"ApplyPatchCall",
"ApplyPatchCallOperation",
@@ -146,6 +151,26 @@ class FunctionCallOutput(TypedDict, total=False):
"""
+class ToolSearchCall(TypedDict, total=False):
+ arguments: Required[object]
+ """The arguments supplied to the tool search call."""
+
+ type: Required[Literal["tool_search_call"]]
+ """The item type. Always `tool_search_call`."""
+
+ id: Optional[str]
+ """The unique ID of this tool search call."""
+
+ call_id: Optional[str]
+ """The unique ID of the tool search call generated by the model."""
+
+ execution: Literal["server", "client"]
+ """Whether tool search was executed by the server or by the client."""
+
+ status: Optional[Literal["in_progress", "completed", "incomplete"]]
+ """The status of the tool search call."""
+
+
class ImageGenerationCall(TypedDict, total=False):
"""An image generation request made by the model."""
@@ -235,6 +260,9 @@ class ShellCallAction(TypedDict, total=False):
"""Maximum wall-clock time in milliseconds to allow the shell commands to run."""
+ShellCallEnvironment: TypeAlias = Union[LocalEnvironmentParam, ContainerReferenceParam]
+
+
class ShellCall(TypedDict, total=False):
"""A tool representing a request to execute one or more shell commands."""
@@ -253,6 +281,9 @@ class ShellCall(TypedDict, total=False):
Populated when this item is returned via API.
"""
+ environment: Optional[ShellCallEnvironment]
+ """The environment to execute the shell commands in."""
+
status: Optional[Literal["in_progress", "completed", "incomplete"]]
"""The status of the shell call.
@@ -287,6 +318,9 @@ class ShellCallOutput(TypedDict, total=False):
output.
"""
+ status: Optional[Literal["in_progress", "completed", "incomplete"]]
+ """The status of the shell call output."""
+
class ApplyPatchCallOperationCreateFile(TypedDict, total=False):
"""Instruction for creating a new file via the apply_patch tool."""
@@ -512,6 +546,8 @@ class ItemReference(TypedDict, total=False):
ResponseFunctionWebSearchParam,
ResponseFunctionToolCallParam,
FunctionCallOutput,
+ ToolSearchCall,
+ ResponseToolSearchOutputItemParamParam,
ResponseReasoningItemParam,
ResponseCompactionItemParamParam,
ImageGenerationCall,
diff --git a/portkey_ai/_vendor/openai/types/responses/response_item.py b/portkey_ai/_vendor/openai/types/responses/response_item.py
index 3dba681d..721bf02e 100644
--- a/portkey_ai/_vendor/openai/types/responses/response_item.py
+++ b/portkey_ai/_vendor/openai/types/responses/response_item.py
@@ -6,15 +6,21 @@
from ..._utils import PropertyInfo
from ..._models import BaseModel
from .response_output_message import ResponseOutputMessage
+from .response_reasoning_item import ResponseReasoningItem
+from .response_compaction_item import ResponseCompactionItem
+from .response_tool_search_call import ResponseToolSearchCall
from .response_computer_tool_call import ResponseComputerToolCall
from .response_input_message_item import ResponseInputMessageItem
from .response_function_web_search import ResponseFunctionWebSearch
from .response_apply_patch_tool_call import ResponseApplyPatchToolCall
+from .response_custom_tool_call_item import ResponseCustomToolCallItem
from .response_file_search_tool_call import ResponseFileSearchToolCall
from .response_function_tool_call_item import ResponseFunctionToolCallItem
+from .response_tool_search_output_item import ResponseToolSearchOutputItem
from .response_function_shell_tool_call import ResponseFunctionShellToolCall
from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall
from .response_apply_patch_tool_call_output import ResponseApplyPatchToolCallOutput
+from .response_custom_tool_call_output_item import ResponseCustomToolCallOutputItem
from .response_computer_tool_call_output_item import ResponseComputerToolCallOutputItem
from .response_function_tool_call_output_item import ResponseFunctionToolCallOutputItem
from .response_function_shell_tool_call_output import ResponseFunctionShellToolCallOutput
@@ -227,6 +233,10 @@ class McpCall(BaseModel):
ResponseFunctionWebSearch,
ResponseFunctionToolCallItem,
ResponseFunctionToolCallOutputItem,
+ ResponseToolSearchCall,
+ ResponseToolSearchOutputItem,
+ ResponseReasoningItem,
+ ResponseCompactionItem,
ImageGenerationCall,
ResponseCodeInterpreterToolCall,
LocalShellCall,
@@ -239,6 +249,8 @@ class McpCall(BaseModel):
McpApprovalRequest,
McpApprovalResponse,
McpCall,
+ ResponseCustomToolCallItem,
+ ResponseCustomToolCallOutputItem,
],
PropertyInfo(discriminator="type"),
]
diff --git a/portkey_ai/_vendor/openai/types/responses/response_local_environment.py b/portkey_ai/_vendor/openai/types/responses/response_local_environment.py
new file mode 100644
index 00000000..6467fcb4
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/response_local_environment.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseLocalEnvironment"]
+
+
+class ResponseLocalEnvironment(BaseModel):
+ """Represents the use of a local environment to perform shell actions."""
+
+ type: Literal["local"]
+ """The environment type. Always `local`."""
diff --git a/portkey_ai/_vendor/openai/types/responses/response_output_item.py b/portkey_ai/_vendor/openai/types/responses/response_output_item.py
index 990f947b..a4b23f26 100644
--- a/portkey_ai/_vendor/openai/types/responses/response_output_item.py
+++ b/portkey_ai/_vendor/openai/types/responses/response_output_item.py
@@ -9,14 +9,19 @@
from .response_reasoning_item import ResponseReasoningItem
from .response_compaction_item import ResponseCompactionItem
from .response_custom_tool_call import ResponseCustomToolCall
+from .response_tool_search_call import ResponseToolSearchCall
from .response_computer_tool_call import ResponseComputerToolCall
from .response_function_tool_call import ResponseFunctionToolCall
from .response_function_web_search import ResponseFunctionWebSearch
from .response_apply_patch_tool_call import ResponseApplyPatchToolCall
from .response_file_search_tool_call import ResponseFileSearchToolCall
+from .response_tool_search_output_item import ResponseToolSearchOutputItem
from .response_function_shell_tool_call import ResponseFunctionShellToolCall
from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall
from .response_apply_patch_tool_call_output import ResponseApplyPatchToolCallOutput
+from .response_custom_tool_call_output_item import ResponseCustomToolCallOutputItem
+from .response_computer_tool_call_output_item import ResponseComputerToolCallOutputItem
+from .response_function_tool_call_output_item import ResponseFunctionToolCallOutputItem
from .response_function_shell_tool_call_output import ResponseFunctionShellToolCallOutput
__all__ = [
@@ -24,10 +29,12 @@
"ImageGenerationCall",
"LocalShellCall",
"LocalShellCallAction",
+ "LocalShellCallOutput",
"McpCall",
"McpListTools",
"McpListToolsTool",
"McpApprovalRequest",
+ "McpApprovalResponse",
]
@@ -88,6 +95,22 @@ class LocalShellCall(BaseModel):
"""The type of the local shell call. Always `local_shell_call`."""
+class LocalShellCallOutput(BaseModel):
+ """The output of a local shell tool call."""
+
+ id: str
+ """The unique ID of the local shell tool call generated by the model."""
+
+ output: str
+ """A JSON string of the output of the local shell tool call."""
+
+ type: Literal["local_shell_call_output"]
+ """The type of the local shell tool call output. Always `local_shell_call_output`."""
+
+ status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
+ """The status of the item. One of `in_progress`, `completed`, or `incomplete`."""
+
+
class McpCall(BaseModel):
"""An invocation of a tool on an MCP server."""
@@ -180,18 +203,42 @@ class McpApprovalRequest(BaseModel):
"""The type of the item. Always `mcp_approval_request`."""
+class McpApprovalResponse(BaseModel):
+ """A response to an MCP approval request."""
+
+ id: str
+ """The unique ID of the approval response"""
+
+ approval_request_id: str
+ """The ID of the approval request being answered."""
+
+ approve: bool
+ """Whether the request was approved."""
+
+ type: Literal["mcp_approval_response"]
+ """The type of the item. Always `mcp_approval_response`."""
+
+ reason: Optional[str] = None
+ """Optional reason for the decision."""
+
+
ResponseOutputItem: TypeAlias = Annotated[
Union[
ResponseOutputMessage,
ResponseFileSearchToolCall,
ResponseFunctionToolCall,
+ ResponseFunctionToolCallOutputItem,
ResponseFunctionWebSearch,
ResponseComputerToolCall,
+ ResponseComputerToolCallOutputItem,
ResponseReasoningItem,
+ ResponseToolSearchCall,
+ ResponseToolSearchOutputItem,
ResponseCompactionItem,
ImageGenerationCall,
ResponseCodeInterpreterToolCall,
LocalShellCall,
+ LocalShellCallOutput,
ResponseFunctionShellToolCall,
ResponseFunctionShellToolCallOutput,
ResponseApplyPatchToolCall,
@@ -199,7 +246,9 @@ class McpApprovalRequest(BaseModel):
McpCall,
McpListTools,
McpApprovalRequest,
+ McpApprovalResponse,
ResponseCustomToolCall,
+ ResponseCustomToolCallOutputItem,
],
PropertyInfo(discriminator="type"),
]
diff --git a/portkey_ai/_vendor/openai/types/responses/response_output_message.py b/portkey_ai/_vendor/openai/types/responses/response_output_message.py
index 9c1d1f97..760d72d5 100644
--- a/portkey_ai/_vendor/openai/types/responses/response_output_message.py
+++ b/portkey_ai/_vendor/openai/types/responses/response_output_message.py
@@ -1,6 +1,6 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import List, Union
+from typing import List, Union, Optional
from typing_extensions import Literal, Annotated, TypeAlias
from ..._utils import PropertyInfo
@@ -34,3 +34,11 @@ class ResponseOutputMessage(BaseModel):
type: Literal["message"]
"""The type of the output message. Always `message`."""
+
+ phase: Optional[Literal["commentary", "final_answer"]] = None
+ """
+ Labels an `assistant` message as intermediate commentary (`commentary`) or the
+ final answer (`final_answer`). For models like `gpt-5.3-codex` and beyond, when
+ sending follow-up requests, preserve and resend phase on all assistant messages
+ — dropping it can degrade performance. Not used for user messages.
+ """
diff --git a/portkey_ai/_vendor/openai/types/responses/response_output_message_param.py b/portkey_ai/_vendor/openai/types/responses/response_output_message_param.py
index 9c2f5246..09fec5bd 100644
--- a/portkey_ai/_vendor/openai/types/responses/response_output_message_param.py
+++ b/portkey_ai/_vendor/openai/types/responses/response_output_message_param.py
@@ -2,7 +2,7 @@
from __future__ import annotations
-from typing import Union, Iterable
+from typing import Union, Iterable, Optional
from typing_extensions import Literal, Required, TypeAlias, TypedDict
from .response_output_text_param import ResponseOutputTextParam
@@ -34,3 +34,11 @@ class ResponseOutputMessageParam(TypedDict, total=False):
type: Required[Literal["message"]]
"""The type of the output message. Always `message`."""
+
+ phase: Optional[Literal["commentary", "final_answer"]]
+ """
+ Labels an `assistant` message as intermediate commentary (`commentary`) or the
+ final answer (`final_answer`). For models like `gpt-5.3-codex` and beyond, when
+ sending follow-up requests, preserve and resend phase on all assistant messages
+ — dropping it can degrade performance. Not used for user messages.
+ """
diff --git a/portkey_ai/_vendor/openai/types/responses/response_tool_search_call.py b/portkey_ai/_vendor/openai/types/responses/response_tool_search_call.py
new file mode 100644
index 00000000..495bf171
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/response_tool_search_call.py
@@ -0,0 +1,31 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseToolSearchCall"]
+
+
+class ResponseToolSearchCall(BaseModel):
+ id: str
+ """The unique ID of the tool search call item."""
+
+ arguments: object
+ """Arguments used for the tool search call."""
+
+ call_id: Optional[str] = None
+ """The unique ID of the tool search call generated by the model."""
+
+ execution: Literal["server", "client"]
+ """Whether tool search was executed by the server or by the client."""
+
+ status: Literal["in_progress", "completed", "incomplete"]
+ """The status of the tool search call item that was recorded."""
+
+ type: Literal["tool_search_call"]
+ """The type of the item. Always `tool_search_call`."""
+
+ created_by: Optional[str] = None
+ """The identifier of the actor that created the item."""
diff --git a/portkey_ai/_vendor/openai/types/responses/response_tool_search_output_item.py b/portkey_ai/_vendor/openai/types/responses/response_tool_search_output_item.py
new file mode 100644
index 00000000..f8911dd4
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/response_tool_search_output_item.py
@@ -0,0 +1,32 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from .tool import Tool
+from ..._models import BaseModel
+
+__all__ = ["ResponseToolSearchOutputItem"]
+
+
+class ResponseToolSearchOutputItem(BaseModel):
+ id: str
+ """The unique ID of the tool search output item."""
+
+ call_id: Optional[str] = None
+ """The unique ID of the tool search call generated by the model."""
+
+ execution: Literal["server", "client"]
+ """Whether tool search was executed by the server or by the client."""
+
+ status: Literal["in_progress", "completed", "incomplete"]
+ """The status of the tool search output item that was recorded."""
+
+ tools: List[Tool]
+ """The loaded tool definitions returned by tool search."""
+
+ type: Literal["tool_search_output"]
+ """The type of the item. Always `tool_search_output`."""
+
+ created_by: Optional[str] = None
+ """The identifier of the actor that created the item."""
diff --git a/portkey_ai/_vendor/openai/types/responses/response_tool_search_output_item_param.py b/portkey_ai/_vendor/openai/types/responses/response_tool_search_output_item_param.py
new file mode 100644
index 00000000..f4d4ef34
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/response_tool_search_output_item_param.py
@@ -0,0 +1,29 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from .tool import Tool
+from ..._models import BaseModel
+
+__all__ = ["ResponseToolSearchOutputItemParam"]
+
+
+class ResponseToolSearchOutputItemParam(BaseModel):
+ tools: List[Tool]
+ """The loaded tool definitions returned by the tool search output."""
+
+ type: Literal["tool_search_output"]
+ """The item type. Always `tool_search_output`."""
+
+ id: Optional[str] = None
+ """The unique ID of this tool search output."""
+
+ call_id: Optional[str] = None
+ """The unique ID of the tool search call generated by the model."""
+
+ execution: Optional[Literal["server", "client"]] = None
+ """Whether tool search was executed by the server or by the client."""
+
+ status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
+ """The status of the tool search output."""
diff --git a/portkey_ai/_vendor/openai/types/responses/response_tool_search_output_item_param_param.py b/portkey_ai/_vendor/openai/types/responses/response_tool_search_output_item_param_param.py
new file mode 100644
index 00000000..28e9b1e1
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/response_tool_search_output_item_param_param.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable, Optional
+from typing_extensions import Literal, Required, TypedDict
+
+from .tool_param import ToolParam
+
+__all__ = ["ResponseToolSearchOutputItemParamParam"]
+
+
+class ResponseToolSearchOutputItemParamParam(TypedDict, total=False):
+ tools: Required[Iterable[ToolParam]]
+ """The loaded tool definitions returned by the tool search output."""
+
+ type: Required[Literal["tool_search_output"]]
+ """The item type. Always `tool_search_output`."""
+
+ id: Optional[str]
+ """The unique ID of this tool search output."""
+
+ call_id: Optional[str]
+ """The unique ID of the tool search call generated by the model."""
+
+ execution: Literal["server", "client"]
+ """Whether tool search was executed by the server or by the client."""
+
+ status: Optional[Literal["in_progress", "completed", "incomplete"]]
+ """The status of the tool search output."""
diff --git a/portkey_ai/_vendor/openai/types/responses/responses_client_event.py b/portkey_ai/_vendor/openai/types/responses/responses_client_event.py
new file mode 100644
index 00000000..2bc6f899
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/responses_client_event.py
@@ -0,0 +1,326 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal, TypeAlias
+
+from .tool import Tool
+from ..._models import BaseModel
+from .response_input import ResponseInput
+from .response_prompt import ResponsePrompt
+from .tool_choice_mcp import ToolChoiceMcp
+from ..shared.metadata import Metadata
+from ..shared.reasoning import Reasoning
+from .tool_choice_shell import ToolChoiceShell
+from .tool_choice_types import ToolChoiceTypes
+from .tool_choice_custom import ToolChoiceCustom
+from .response_includable import ResponseIncludable
+from .tool_choice_allowed import ToolChoiceAllowed
+from .tool_choice_options import ToolChoiceOptions
+from .response_text_config import ResponseTextConfig
+from .tool_choice_function import ToolChoiceFunction
+from ..shared.responses_model import ResponsesModel
+from .tool_choice_apply_patch import ToolChoiceApplyPatch
+from .response_conversation_param import ResponseConversationParam
+
+__all__ = ["ResponsesClientEvent", "ContextManagement", "Conversation", "StreamOptions", "ToolChoice"]
+
+
+class ContextManagement(BaseModel):
+ type: str
+ """The context management entry type. Currently only 'compaction' is supported."""
+
+ compact_threshold: Optional[int] = None
+ """Token threshold at which compaction should be triggered for this entry."""
+
+
+Conversation: TypeAlias = Union[str, ResponseConversationParam, None]
+
+
+class StreamOptions(BaseModel):
+ """Options for streaming responses. Only set this when you set `stream: true`."""
+
+ include_obfuscation: Optional[bool] = None
+ """When true, stream obfuscation will be enabled.
+
+ Stream obfuscation adds random characters to an `obfuscation` field on streaming
+ delta events to normalize payload sizes as a mitigation to certain side-channel
+ attacks. These obfuscation fields are included by default, but add a small
+ amount of overhead to the data stream. You can set `include_obfuscation` to
+ false to optimize for bandwidth if you trust the network links between your
+ application and the OpenAI API.
+ """
+
+
+ToolChoice: TypeAlias = Union[
+ ToolChoiceOptions,
+ ToolChoiceAllowed,
+ ToolChoiceTypes,
+ ToolChoiceFunction,
+ ToolChoiceMcp,
+ ToolChoiceCustom,
+ ToolChoiceApplyPatch,
+ ToolChoiceShell,
+]
+
+
+class ResponsesClientEvent(BaseModel):
+ type: Literal["response.create"]
+ """The type of the client event. Always `response.create`."""
+
+ background: Optional[bool] = None
+ """
+ Whether to run the model response in the background.
+ [Learn more](https://platform.openai.com/docs/guides/background).
+ """
+
+ context_management: Optional[List[ContextManagement]] = None
+ """Context management configuration for this request."""
+
+ conversation: Optional[Conversation] = None
+ """The conversation that this response belongs to.
+
+ Items from this conversation are prepended to `input_items` for this response
+ request. Input items and output items from this response are automatically added
+ to this conversation after this response completes.
+ """
+
+ include: Optional[List[ResponseIncludable]] = None
+ """Specify additional output data to include in the model response.
+
+ Currently supported values are:
+
+ - `web_search_call.action.sources`: Include the sources of the web search tool
+ call.
+ - `code_interpreter_call.outputs`: Includes the outputs of python code execution
+ in code interpreter tool call items.
+ - `computer_call_output.output.image_url`: Include image urls from the computer
+ call output.
+ - `file_search_call.results`: Include the search results of the file search tool
+ call.
+ - `message.input_image.image_url`: Include image urls from the input message.
+ - `message.output_text.logprobs`: Include logprobs with assistant messages.
+ - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
+ tokens in reasoning item outputs. This enables reasoning items to be used in
+ multi-turn conversations when using the Responses API statelessly (like when
+ the `store` parameter is set to `false`, or when an organization is enrolled
+ in the zero data retention program).
+ """
+
+ input: Union[str, ResponseInput, None] = None
+ """Text, image, or file inputs to the model, used to generate a response.
+
+ Learn more:
+
+ - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+ - [Image inputs](https://platform.openai.com/docs/guides/images)
+ - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
+ - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
+ - [Function calling](https://platform.openai.com/docs/guides/function-calling)
+ """
+
+ instructions: Optional[str] = None
+ """A system (or developer) message inserted into the model's context.
+
+ When using along with `previous_response_id`, the instructions from a previous
+ response will not be carried over to the next response. This makes it simple to
+ swap out system (or developer) messages in new responses.
+ """
+
+ max_output_tokens: Optional[int] = None
+ """
+ An upper bound for the number of tokens that can be generated for a response,
+ including visible output tokens and
+ [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
+ """
+
+ max_tool_calls: Optional[int] = None
+ """
+ The maximum number of total calls to built-in tools that can be processed in a
+ response. This maximum number applies across all built-in tool calls, not per
+ individual tool. Any further attempts to call a tool by the model will be
+ ignored.
+ """
+
+ metadata: Optional[Metadata] = None
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
+
+ model: Optional[ResponsesModel] = None
+ """Model ID used to generate the response, like `gpt-4o` or `o3`.
+
+ OpenAI offers a wide range of models with different capabilities, performance
+ characteristics, and price points. Refer to the
+ [model guide](https://platform.openai.com/docs/models) to browse and compare
+ available models.
+ """
+
+ parallel_tool_calls: Optional[bool] = None
+ """Whether to allow the model to run tool calls in parallel."""
+
+ previous_response_id: Optional[str] = None
+ """The unique ID of the previous response to the model.
+
+ Use this to create multi-turn conversations. Learn more about
+ [conversation state](https://platform.openai.com/docs/guides/conversation-state).
+ Cannot be used in conjunction with `conversation`.
+ """
+
+ prompt: Optional[ResponsePrompt] = None
+ """
+ Reference to a prompt template and its variables.
+ [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
+ """
+
+ prompt_cache_key: Optional[str] = None
+ """
+ Used by OpenAI to cache responses for similar requests to optimize your cache
+ hit rates. Replaces the `user` field.
+ [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
+ """
+
+ prompt_cache_retention: Optional[Literal["in-memory", "24h"]] = None
+ """The retention policy for the prompt cache.
+
+ Set to `24h` to enable extended prompt caching, which keeps cached prefixes
+ active for longer, up to a maximum of 24 hours.
+ [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
+ """
+
+ reasoning: Optional[Reasoning] = None
+ """**gpt-5 and o-series models only**
+
+ Configuration options for
+ [reasoning models](https://platform.openai.com/docs/guides/reasoning).
+ """
+
+ safety_identifier: Optional[str] = None
+ """
+ A stable identifier used to help detect users of your application that may be
+ violating OpenAI's usage policies. The IDs should be a string that uniquely
+ identifies each user, with a maximum length of 64 characters. We recommend
+ hashing their username or email address, in order to avoid sending us any
+ identifying information.
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
+ """
+
+ service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] = None
+ """Specifies the processing type used for serving the request.
+
+ - If set to 'auto', then the request will be processed with the service tier
+ configured in the Project settings. Unless otherwise configured, the Project
+ will use 'default'.
+ - If set to 'default', then the request will be processed with the standard
+ pricing and performance for the selected model.
+ - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
+ '[priority](https://openai.com/api-priority-processing/)', then the request
+ will be processed with the corresponding service tier.
+ - When not set, the default behavior is 'auto'.
+
+ When the `service_tier` parameter is set, the response body will include the
+ `service_tier` value based on the processing mode actually used to serve the
+ request. This response value may be different from the value set in the
+ parameter.
+ """
+
+ store: Optional[bool] = None
+ """Whether to store the generated model response for later retrieval via API."""
+
+ stream: Optional[bool] = None
+ """
+ If set to true, the model response data will be streamed to the client as it is
+ generated using
+ [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+ See the
+ [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+ for more information.
+ """
+
+ stream_options: Optional[StreamOptions] = None
+ """Options for streaming responses. Only set this when you set `stream: true`."""
+
+ temperature: Optional[float] = None
+ """What sampling temperature to use, between 0 and 2.
+
+ Higher values like 0.8 will make the output more random, while lower values like
+ 0.2 will make it more focused and deterministic. We generally recommend altering
+ this or `top_p` but not both.
+ """
+
+ text: Optional[ResponseTextConfig] = None
+ """Configuration options for a text response from the model.
+
+ Can be plain text or structured JSON data. Learn more:
+
+ - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+ - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
+ """
+
+ tool_choice: Optional[ToolChoice] = None
+ """
+ How the model should select which tool (or tools) to use when generating a
+ response. See the `tools` parameter to see how to specify which tools the model
+ can call.
+ """
+
+ tools: Optional[List[Tool]] = None
+ """An array of tools the model may call while generating a response.
+
+ You can specify which tool to use by setting the `tool_choice` parameter.
+
+ We support the following categories of tools:
+
+ - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
+ capabilities, like
+ [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+ [file search](https://platform.openai.com/docs/guides/tools-file-search).
+ Learn more about
+ [built-in tools](https://platform.openai.com/docs/guides/tools).
+ - **MCP Tools**: Integrations with third-party systems via custom MCP servers or
+ predefined connectors such as Google Drive and SharePoint. Learn more about
+ [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
+ - **Function calls (custom tools)**: Functions that are defined by you, enabling
+ the model to call your own code with strongly typed arguments and outputs.
+ Learn more about
+ [function calling](https://platform.openai.com/docs/guides/function-calling).
+ You can also use custom tools to call your own code.
+ """
+
+ top_logprobs: Optional[int] = None
+ """
+ An integer between 0 and 20 specifying the number of most likely tokens to
+ return at each token position, each with an associated log probability.
+ """
+
+ top_p: Optional[float] = None
+ """
+ An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
+ We generally recommend altering this or `temperature` but not both.
+ """
+
+ truncation: Optional[Literal["auto", "disabled"]] = None
+ """The truncation strategy to use for the model response.
+
+ - `auto`: If the input to this Response exceeds the model's context window size,
+ the model will truncate the response to fit the context window by dropping
+ items from the beginning of the conversation.
+ - `disabled` (default): If the input size will exceed the context window size
+ for a model, the request will fail with a 400 error.
+ """
+
+ user: Optional[str] = None
+ """This field is being replaced by `safety_identifier` and `prompt_cache_key`.
+
+ Use `prompt_cache_key` instead to maintain caching optimizations. A stable
+ identifier for your end-users. Used to boost cache hit rates by better bucketing
+ similar requests and to help OpenAI detect and prevent abuse.
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
+ """
diff --git a/portkey_ai/_vendor/openai/types/responses/responses_client_event_param.py b/portkey_ai/_vendor/openai/types/responses/responses_client_event_param.py
new file mode 100644
index 00000000..08596ef9
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/responses_client_event_param.py
@@ -0,0 +1,327 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from .tool_param import ToolParam
+from .response_includable import ResponseIncludable
+from .tool_choice_options import ToolChoiceOptions
+from .response_input_param import ResponseInputParam
+from .response_prompt_param import ResponsePromptParam
+from .tool_choice_mcp_param import ToolChoiceMcpParam
+from ..shared_params.metadata import Metadata
+from .tool_choice_shell_param import ToolChoiceShellParam
+from .tool_choice_types_param import ToolChoiceTypesParam
+from ..shared_params.reasoning import Reasoning
+from .tool_choice_custom_param import ToolChoiceCustomParam
+from .tool_choice_allowed_param import ToolChoiceAllowedParam
+from .response_text_config_param import ResponseTextConfigParam
+from .tool_choice_function_param import ToolChoiceFunctionParam
+from .tool_choice_apply_patch_param import ToolChoiceApplyPatchParam
+from ..shared_params.responses_model import ResponsesModel
+from .response_conversation_param_param import ResponseConversationParamParam
+
+__all__ = ["ResponsesClientEventParam", "ContextManagement", "Conversation", "StreamOptions", "ToolChoice"]
+
+
+class ContextManagement(TypedDict, total=False):
+ type: Required[str]
+ """The context management entry type. Currently only 'compaction' is supported."""
+
+ compact_threshold: Optional[int]
+ """Token threshold at which compaction should be triggered for this entry."""
+
+
+Conversation: TypeAlias = Union[str, ResponseConversationParamParam]
+
+
+class StreamOptions(TypedDict, total=False):
+ """Options for streaming responses. Only set this when you set `stream: true`."""
+
+ include_obfuscation: bool
+ """When true, stream obfuscation will be enabled.
+
+ Stream obfuscation adds random characters to an `obfuscation` field on streaming
+ delta events to normalize payload sizes as a mitigation to certain side-channel
+ attacks. These obfuscation fields are included by default, but add a small
+ amount of overhead to the data stream. You can set `include_obfuscation` to
+ false to optimize for bandwidth if you trust the network links between your
+ application and the OpenAI API.
+ """
+
+
+ToolChoice: TypeAlias = Union[
+ ToolChoiceOptions,
+ ToolChoiceAllowedParam,
+ ToolChoiceTypesParam,
+ ToolChoiceFunctionParam,
+ ToolChoiceMcpParam,
+ ToolChoiceCustomParam,
+ ToolChoiceApplyPatchParam,
+ ToolChoiceShellParam,
+]
+
+
+class ResponsesClientEventParam(TypedDict, total=False):
+ type: Required[Literal["response.create"]]
+ """The type of the client event. Always `response.create`."""
+
+ background: Optional[bool]
+ """
+ Whether to run the model response in the background.
+ [Learn more](https://platform.openai.com/docs/guides/background).
+ """
+
+ context_management: Optional[Iterable[ContextManagement]]
+ """Context management configuration for this request."""
+
+ conversation: Optional[Conversation]
+ """The conversation that this response belongs to.
+
+ Items from this conversation are prepended to `input_items` for this response
+ request. Input items and output items from this response are automatically added
+ to this conversation after this response completes.
+ """
+
+ include: Optional[List[ResponseIncludable]]
+ """Specify additional output data to include in the model response.
+
+ Currently supported values are:
+
+ - `web_search_call.action.sources`: Include the sources of the web search tool
+ call.
+ - `code_interpreter_call.outputs`: Includes the outputs of python code execution
+ in code interpreter tool call items.
+ - `computer_call_output.output.image_url`: Include image urls from the computer
+ call output.
+ - `file_search_call.results`: Include the search results of the file search tool
+ call.
+ - `message.input_image.image_url`: Include image urls from the input message.
+ - `message.output_text.logprobs`: Include logprobs with assistant messages.
+ - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
+ tokens in reasoning item outputs. This enables reasoning items to be used in
+ multi-turn conversations when using the Responses API statelessly (like when
+ the `store` parameter is set to `false`, or when an organization is enrolled
+ in the zero data retention program).
+ """
+
+ input: Union[str, ResponseInputParam]
+ """Text, image, or file inputs to the model, used to generate a response.
+
+ Learn more:
+
+ - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+ - [Image inputs](https://platform.openai.com/docs/guides/images)
+ - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
+ - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
+ - [Function calling](https://platform.openai.com/docs/guides/function-calling)
+ """
+
+ instructions: Optional[str]
+ """A system (or developer) message inserted into the model's context.
+
+ When using along with `previous_response_id`, the instructions from a previous
+ response will not be carried over to the next response. This makes it simple to
+ swap out system (or developer) messages in new responses.
+ """
+
+ max_output_tokens: Optional[int]
+ """
+ An upper bound for the number of tokens that can be generated for a response,
+ including visible output tokens and
+ [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
+ """
+
+ max_tool_calls: Optional[int]
+ """
+ The maximum number of total calls to built-in tools that can be processed in a
+ response. This maximum number applies across all built-in tool calls, not per
+ individual tool. Any further attempts to call a tool by the model will be
+ ignored.
+ """
+
+ metadata: Optional[Metadata]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
+
+ model: ResponsesModel
+ """Model ID used to generate the response, like `gpt-4o` or `o3`.
+
+ OpenAI offers a wide range of models with different capabilities, performance
+ characteristics, and price points. Refer to the
+ [model guide](https://platform.openai.com/docs/models) to browse and compare
+ available models.
+ """
+
+ parallel_tool_calls: Optional[bool]
+ """Whether to allow the model to run tool calls in parallel."""
+
+ previous_response_id: Optional[str]
+ """The unique ID of the previous response to the model.
+
+ Use this to create multi-turn conversations. Learn more about
+ [conversation state](https://platform.openai.com/docs/guides/conversation-state).
+ Cannot be used in conjunction with `conversation`.
+ """
+
+ prompt: Optional[ResponsePromptParam]
+ """
+ Reference to a prompt template and its variables.
+ [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
+ """
+
+ prompt_cache_key: str
+ """
+ Used by OpenAI to cache responses for similar requests to optimize your cache
+ hit rates. Replaces the `user` field.
+ [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
+ """
+
+ prompt_cache_retention: Optional[Literal["in-memory", "24h"]]
+ """The retention policy for the prompt cache.
+
+ Set to `24h` to enable extended prompt caching, which keeps cached prefixes
+ active for longer, up to a maximum of 24 hours.
+ [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
+ """
+
+ reasoning: Optional[Reasoning]
+ """**gpt-5 and o-series models only**
+
+ Configuration options for
+ [reasoning models](https://platform.openai.com/docs/guides/reasoning).
+ """
+
+ safety_identifier: str
+ """
+ A stable identifier used to help detect users of your application that may be
+ violating OpenAI's usage policies. The IDs should be a string that uniquely
+ identifies each user, with a maximum length of 64 characters. We recommend
+ hashing their username or email address, in order to avoid sending us any
+ identifying information.
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
+ """
+
+ service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]]
+ """Specifies the processing type used for serving the request.
+
+ - If set to 'auto', then the request will be processed with the service tier
+ configured in the Project settings. Unless otherwise configured, the Project
+ will use 'default'.
+ - If set to 'default', then the request will be processed with the standard
+ pricing and performance for the selected model.
+ - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
+ '[priority](https://openai.com/api-priority-processing/)', then the request
+ will be processed with the corresponding service tier.
+ - When not set, the default behavior is 'auto'.
+
+ When the `service_tier` parameter is set, the response body will include the
+ `service_tier` value based on the processing mode actually used to serve the
+ request. This response value may be different from the value set in the
+ parameter.
+ """
+
+ store: Optional[bool]
+ """Whether to store the generated model response for later retrieval via API."""
+
+ stream: Optional[bool]
+ """
+ If set to true, the model response data will be streamed to the client as it is
+ generated using
+ [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+ See the
+ [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+ for more information.
+ """
+
+ stream_options: Optional[StreamOptions]
+ """Options for streaming responses. Only set this when you set `stream: true`."""
+
+ temperature: Optional[float]
+ """What sampling temperature to use, between 0 and 2.
+
+ Higher values like 0.8 will make the output more random, while lower values like
+ 0.2 will make it more focused and deterministic. We generally recommend altering
+ this or `top_p` but not both.
+ """
+
+ text: ResponseTextConfigParam
+ """Configuration options for a text response from the model.
+
+ Can be plain text or structured JSON data. Learn more:
+
+ - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+ - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
+ """
+
+ tool_choice: ToolChoice
+ """
+ How the model should select which tool (or tools) to use when generating a
+ response. See the `tools` parameter to see how to specify which tools the model
+ can call.
+ """
+
+ tools: Iterable[ToolParam]
+ """An array of tools the model may call while generating a response.
+
+ You can specify which tool to use by setting the `tool_choice` parameter.
+
+ We support the following categories of tools:
+
+ - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
+ capabilities, like
+ [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+ [file search](https://platform.openai.com/docs/guides/tools-file-search).
+ Learn more about
+ [built-in tools](https://platform.openai.com/docs/guides/tools).
+ - **MCP Tools**: Integrations with third-party systems via custom MCP servers or
+ predefined connectors such as Google Drive and SharePoint. Learn more about
+ [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
+ - **Function calls (custom tools)**: Functions that are defined by you, enabling
+ the model to call your own code with strongly typed arguments and outputs.
+ Learn more about
+ [function calling](https://platform.openai.com/docs/guides/function-calling).
+ You can also use custom tools to call your own code.
+ """
+
+ top_logprobs: Optional[int]
+ """
+ An integer between 0 and 20 specifying the number of most likely tokens to
+ return at each token position, each with an associated log probability.
+ """
+
+ top_p: Optional[float]
+ """
+ An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
+ We generally recommend altering this or `temperature` but not both.
+ """
+
+ truncation: Optional[Literal["auto", "disabled"]]
+ """The truncation strategy to use for the model response.
+
+ - `auto`: If the input to this Response exceeds the model's context window size,
+ the model will truncate the response to fit the context window by dropping
+ items from the beginning of the conversation.
+ - `disabled` (default): If the input size will exceed the context window size
+ for a model, the request will fail with a 400 error.
+ """
+
+ user: str
+ """This field is being replaced by `safety_identifier` and `prompt_cache_key`.
+
+ Use `prompt_cache_key` instead to maintain caching optimizations. A stable
+ identifier for your end-users. Used to boost cache hit rates by better bucketing
+ similar requests and to help OpenAI detect and prevent abuse.
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
+ """
diff --git a/portkey_ai/_vendor/openai/types/responses/responses_server_event.py b/portkey_ai/_vendor/openai/types/responses/responses_server_event.py
new file mode 100644
index 00000000..c543587a
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/responses_server_event.py
@@ -0,0 +1,120 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from .response_error_event import ResponseErrorEvent
+from .response_failed_event import ResponseFailedEvent
+from .response_queued_event import ResponseQueuedEvent
+from .response_created_event import ResponseCreatedEvent
+from .response_completed_event import ResponseCompletedEvent
+from .response_text_done_event import ResponseTextDoneEvent
+from .response_audio_done_event import ResponseAudioDoneEvent
+from .response_incomplete_event import ResponseIncompleteEvent
+from .response_text_delta_event import ResponseTextDeltaEvent
+from .response_audio_delta_event import ResponseAudioDeltaEvent
+from .response_in_progress_event import ResponseInProgressEvent
+from .response_refusal_done_event import ResponseRefusalDoneEvent
+from .response_refusal_delta_event import ResponseRefusalDeltaEvent
+from .response_mcp_call_failed_event import ResponseMcpCallFailedEvent
+from .response_output_item_done_event import ResponseOutputItemDoneEvent
+from .response_content_part_done_event import ResponseContentPartDoneEvent
+from .response_output_item_added_event import ResponseOutputItemAddedEvent
+from .response_content_part_added_event import ResponseContentPartAddedEvent
+from .response_mcp_call_completed_event import ResponseMcpCallCompletedEvent
+from .response_reasoning_text_done_event import ResponseReasoningTextDoneEvent
+from .response_mcp_call_in_progress_event import ResponseMcpCallInProgressEvent
+from .response_reasoning_text_delta_event import ResponseReasoningTextDeltaEvent
+from .response_audio_transcript_done_event import ResponseAudioTranscriptDoneEvent
+from .response_mcp_list_tools_failed_event import ResponseMcpListToolsFailedEvent
+from .response_audio_transcript_delta_event import ResponseAudioTranscriptDeltaEvent
+from .response_mcp_call_arguments_done_event import ResponseMcpCallArgumentsDoneEvent
+from .response_image_gen_call_completed_event import ResponseImageGenCallCompletedEvent
+from .response_mcp_call_arguments_delta_event import ResponseMcpCallArgumentsDeltaEvent
+from .response_mcp_list_tools_completed_event import ResponseMcpListToolsCompletedEvent
+from .response_image_gen_call_generating_event import ResponseImageGenCallGeneratingEvent
+from .response_web_search_call_completed_event import ResponseWebSearchCallCompletedEvent
+from .response_web_search_call_searching_event import ResponseWebSearchCallSearchingEvent
+from .response_file_search_call_completed_event import ResponseFileSearchCallCompletedEvent
+from .response_file_search_call_searching_event import ResponseFileSearchCallSearchingEvent
+from .response_image_gen_call_in_progress_event import ResponseImageGenCallInProgressEvent
+from .response_mcp_list_tools_in_progress_event import ResponseMcpListToolsInProgressEvent
+from .response_custom_tool_call_input_done_event import ResponseCustomToolCallInputDoneEvent
+from .response_reasoning_summary_part_done_event import ResponseReasoningSummaryPartDoneEvent
+from .response_reasoning_summary_text_done_event import ResponseReasoningSummaryTextDoneEvent
+from .response_web_search_call_in_progress_event import ResponseWebSearchCallInProgressEvent
+from .response_custom_tool_call_input_delta_event import ResponseCustomToolCallInputDeltaEvent
+from .response_file_search_call_in_progress_event import ResponseFileSearchCallInProgressEvent
+from .response_function_call_arguments_done_event import ResponseFunctionCallArgumentsDoneEvent
+from .response_image_gen_call_partial_image_event import ResponseImageGenCallPartialImageEvent
+from .response_output_text_annotation_added_event import ResponseOutputTextAnnotationAddedEvent
+from .response_reasoning_summary_part_added_event import ResponseReasoningSummaryPartAddedEvent
+from .response_reasoning_summary_text_delta_event import ResponseReasoningSummaryTextDeltaEvent
+from .response_function_call_arguments_delta_event import ResponseFunctionCallArgumentsDeltaEvent
+from .response_code_interpreter_call_code_done_event import ResponseCodeInterpreterCallCodeDoneEvent
+from .response_code_interpreter_call_completed_event import ResponseCodeInterpreterCallCompletedEvent
+from .response_code_interpreter_call_code_delta_event import ResponseCodeInterpreterCallCodeDeltaEvent
+from .response_code_interpreter_call_in_progress_event import ResponseCodeInterpreterCallInProgressEvent
+from .response_code_interpreter_call_interpreting_event import ResponseCodeInterpreterCallInterpretingEvent
+
+__all__ = ["ResponsesServerEvent"]
+
+ResponsesServerEvent: TypeAlias = Annotated[
+ Union[
+ ResponseAudioDeltaEvent,
+ ResponseAudioDoneEvent,
+ ResponseAudioTranscriptDeltaEvent,
+ ResponseAudioTranscriptDoneEvent,
+ ResponseCodeInterpreterCallCodeDeltaEvent,
+ ResponseCodeInterpreterCallCodeDoneEvent,
+ ResponseCodeInterpreterCallCompletedEvent,
+ ResponseCodeInterpreterCallInProgressEvent,
+ ResponseCodeInterpreterCallInterpretingEvent,
+ ResponseCompletedEvent,
+ ResponseContentPartAddedEvent,
+ ResponseContentPartDoneEvent,
+ ResponseCreatedEvent,
+ ResponseErrorEvent,
+ ResponseFileSearchCallCompletedEvent,
+ ResponseFileSearchCallInProgressEvent,
+ ResponseFileSearchCallSearchingEvent,
+ ResponseFunctionCallArgumentsDeltaEvent,
+ ResponseFunctionCallArgumentsDoneEvent,
+ ResponseInProgressEvent,
+ ResponseFailedEvent,
+ ResponseIncompleteEvent,
+ ResponseOutputItemAddedEvent,
+ ResponseOutputItemDoneEvent,
+ ResponseReasoningSummaryPartAddedEvent,
+ ResponseReasoningSummaryPartDoneEvent,
+ ResponseReasoningSummaryTextDeltaEvent,
+ ResponseReasoningSummaryTextDoneEvent,
+ ResponseReasoningTextDeltaEvent,
+ ResponseReasoningTextDoneEvent,
+ ResponseRefusalDeltaEvent,
+ ResponseRefusalDoneEvent,
+ ResponseTextDeltaEvent,
+ ResponseTextDoneEvent,
+ ResponseWebSearchCallCompletedEvent,
+ ResponseWebSearchCallInProgressEvent,
+ ResponseWebSearchCallSearchingEvent,
+ ResponseImageGenCallCompletedEvent,
+ ResponseImageGenCallGeneratingEvent,
+ ResponseImageGenCallInProgressEvent,
+ ResponseImageGenCallPartialImageEvent,
+ ResponseMcpCallArgumentsDeltaEvent,
+ ResponseMcpCallArgumentsDoneEvent,
+ ResponseMcpCallCompletedEvent,
+ ResponseMcpCallFailedEvent,
+ ResponseMcpCallInProgressEvent,
+ ResponseMcpListToolsCompletedEvent,
+ ResponseMcpListToolsFailedEvent,
+ ResponseMcpListToolsInProgressEvent,
+ ResponseOutputTextAnnotationAddedEvent,
+ ResponseQueuedEvent,
+ ResponseCustomToolCallInputDeltaEvent,
+ ResponseCustomToolCallInputDoneEvent,
+ ],
+ PropertyInfo(discriminator="type"),
+]
diff --git a/portkey_ai/_vendor/openai/types/responses/skill_reference.py b/portkey_ai/_vendor/openai/types/responses/skill_reference.py
new file mode 100644
index 00000000..76e614ab
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/skill_reference.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["SkillReference"]
+
+
+class SkillReference(BaseModel):
+ skill_id: str
+ """The ID of the referenced skill."""
+
+ type: Literal["skill_reference"]
+ """References a skill created with the /v1/skills endpoint."""
+
+ version: Optional[str] = None
+ """Optional skill version. Use a positive integer or 'latest'. Omit for default."""
diff --git a/portkey_ai/_vendor/openai/types/responses/skill_reference_param.py b/portkey_ai/_vendor/openai/types/responses/skill_reference_param.py
new file mode 100644
index 00000000..33b57285
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/skill_reference_param.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["SkillReferenceParam"]
+
+
+class SkillReferenceParam(TypedDict, total=False):
+ skill_id: Required[str]
+ """The ID of the referenced skill."""
+
+ type: Required[Literal["skill_reference"]]
+ """References a skill created with the /v1/skills endpoint."""
+
+ version: str
+ """Optional skill version. Use a positive integer or 'latest'. Omit for default."""
diff --git a/portkey_ai/_vendor/openai/types/responses/tool.py b/portkey_ai/_vendor/openai/types/responses/tool.py
index 019962a0..34120a28 100644
--- a/portkey_ai/_vendor/openai/types/responses/tool.py
+++ b/portkey_ai/_vendor/openai/types/responses/tool.py
@@ -9,11 +9,16 @@
from .custom_tool import CustomTool
from .computer_tool import ComputerTool
from .function_tool import FunctionTool
+from .namespace_tool import NamespaceTool
from .web_search_tool import WebSearchTool
from .apply_patch_tool import ApplyPatchTool
from .file_search_tool import FileSearchTool
+from .tool_search_tool import ToolSearchTool
from .function_shell_tool import FunctionShellTool
from .web_search_preview_tool import WebSearchPreviewTool
+from .computer_use_preview_tool import ComputerUsePreviewTool
+from .container_network_policy_disabled import ContainerNetworkPolicyDisabled
+from .container_network_policy_allowlist import ContainerNetworkPolicyAllowlist
__all__ = [
"Tool",
@@ -28,6 +33,7 @@
"CodeInterpreter",
"CodeInterpreterContainer",
"CodeInterpreterContainerCodeInterpreterToolAuto",
+ "CodeInterpreterContainerCodeInterpreterToolAutoNetworkPolicy",
"ImageGeneration",
"ImageGenerationInputImageMask",
"LocalShell",
@@ -155,6 +161,9 @@ class Mcp(BaseModel):
- SharePoint: `connector_sharepoint`
"""
+ defer_loading: Optional[bool] = None
+ """Whether this MCP tool is deferred and discovered via tool search."""
+
headers: Optional[Dict[str, str]] = None
"""Optional HTTP headers to send to the MCP server.
@@ -174,6 +183,11 @@ class Mcp(BaseModel):
"""
+CodeInterpreterContainerCodeInterpreterToolAutoNetworkPolicy: TypeAlias = Annotated[
+ Union[ContainerNetworkPolicyDisabled, ContainerNetworkPolicyAllowlist], PropertyInfo(discriminator="type")
+]
+
+
class CodeInterpreterContainerCodeInterpreterToolAuto(BaseModel):
"""Configuration for a code interpreter container.
@@ -189,6 +203,9 @@ class CodeInterpreterContainerCodeInterpreterToolAuto(BaseModel):
memory_limit: Optional[Literal["1g", "4g", "16g", "64g"]] = None
"""The memory limit for the code interpreter container."""
+ network_policy: Optional[CodeInterpreterContainerCodeInterpreterToolAutoNetworkPolicy] = None
+ """Network access policy for the container."""
+
CodeInterpreterContainer: TypeAlias = Union[str, CodeInterpreterContainerCodeInterpreterToolAuto]
@@ -227,6 +244,9 @@ class ImageGeneration(BaseModel):
type: Literal["image_generation"]
"""The type of the image generation tool. Always `image_generation`."""
+ action: Optional[Literal["generate", "edit", "auto"]] = None
+ """Whether to generate a new image or edit an existing image. Default: `auto`."""
+
background: Optional[Literal["transparent", "opaque", "auto"]] = None
"""Background type for the generated image.
@@ -237,8 +257,8 @@ class ImageGeneration(BaseModel):
"""
Control how much effort the model will exert to match the style and features,
especially facial features, of input images. This parameter is only supported
- for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
- `low`. Defaults to `low`.
+ for `gpt-image-1` and `gpt-image-1.5` and later models, unsupported for
+ `gpt-image-1-mini`. Supports `high` and `low`. Defaults to `low`.
"""
input_image_mask: Optional[ImageGenerationInputImageMask] = None
@@ -247,7 +267,7 @@ class ImageGeneration(BaseModel):
Contains `image_url` (string, optional) and `file_id` (string, optional).
"""
- model: Union[str, Literal["gpt-image-1", "gpt-image-1-mini"], None] = None
+ model: Union[str, Literal["gpt-image-1", "gpt-image-1-mini", "gpt-image-1.5"], None] = None
"""The image generation model to use. Default: `gpt-image-1`."""
moderation: Optional[Literal["auto", "low"]] = None
@@ -293,6 +313,7 @@ class LocalShell(BaseModel):
FunctionTool,
FileSearchTool,
ComputerTool,
+ ComputerUsePreviewTool,
WebSearchTool,
Mcp,
CodeInterpreter,
@@ -300,6 +321,8 @@ class LocalShell(BaseModel):
LocalShell,
FunctionShellTool,
CustomTool,
+ NamespaceTool,
+ ToolSearchTool,
WebSearchPreviewTool,
ApplyPatchTool,
],
diff --git a/portkey_ai/_vendor/openai/types/responses/tool_choice_types.py b/portkey_ai/_vendor/openai/types/responses/tool_choice_types.py
index 044c014b..3cd1d3f6 100644
--- a/portkey_ai/_vendor/openai/types/responses/tool_choice_types.py
+++ b/portkey_ai/_vendor/openai/types/responses/tool_choice_types.py
@@ -16,7 +16,9 @@ class ToolChoiceTypes(BaseModel):
type: Literal[
"file_search",
"web_search_preview",
+ "computer",
"computer_use_preview",
+ "computer_use",
"web_search_preview_2025_03_11",
"image_generation",
"code_interpreter",
@@ -30,7 +32,9 @@ class ToolChoiceTypes(BaseModel):
- `file_search`
- `web_search_preview`
+ - `computer`
- `computer_use_preview`
+ - `computer_use`
- `code_interpreter`
- `image_generation`
"""
diff --git a/portkey_ai/_vendor/openai/types/responses/tool_choice_types_param.py b/portkey_ai/_vendor/openai/types/responses/tool_choice_types_param.py
index 9bf02dbf..5d08380a 100644
--- a/portkey_ai/_vendor/openai/types/responses/tool_choice_types_param.py
+++ b/portkey_ai/_vendor/openai/types/responses/tool_choice_types_param.py
@@ -17,7 +17,9 @@ class ToolChoiceTypesParam(TypedDict, total=False):
Literal[
"file_search",
"web_search_preview",
+ "computer",
"computer_use_preview",
+ "computer_use",
"web_search_preview_2025_03_11",
"image_generation",
"code_interpreter",
@@ -32,7 +34,9 @@ class ToolChoiceTypesParam(TypedDict, total=False):
- `file_search`
- `web_search_preview`
+ - `computer`
- `computer_use_preview`
+ - `computer_use`
- `code_interpreter`
- `image_generation`
"""
diff --git a/portkey_ai/_vendor/openai/types/responses/tool_param.py b/portkey_ai/_vendor/openai/types/responses/tool_param.py
index 37d3dde0..c0f33c45 100644
--- a/portkey_ai/_vendor/openai/types/responses/tool_param.py
+++ b/portkey_ai/_vendor/openai/types/responses/tool_param.py
@@ -11,11 +11,16 @@
from .custom_tool_param import CustomToolParam
from .computer_tool_param import ComputerToolParam
from .function_tool_param import FunctionToolParam
+from .namespace_tool_param import NamespaceToolParam
from .web_search_tool_param import WebSearchToolParam
from .apply_patch_tool_param import ApplyPatchToolParam
from .file_search_tool_param import FileSearchToolParam
+from .tool_search_tool_param import ToolSearchToolParam
from .function_shell_tool_param import FunctionShellToolParam
from .web_search_preview_tool_param import WebSearchPreviewToolParam
+from .computer_use_preview_tool_param import ComputerUsePreviewToolParam
+from .container_network_policy_disabled_param import ContainerNetworkPolicyDisabledParam
+from .container_network_policy_allowlist_param import ContainerNetworkPolicyAllowlistParam
__all__ = [
"ToolParam",
@@ -29,6 +34,7 @@
"CodeInterpreter",
"CodeInterpreterContainer",
"CodeInterpreterContainerCodeInterpreterToolAuto",
+ "CodeInterpreterContainerCodeInterpreterToolAutoNetworkPolicy",
"ImageGeneration",
"ImageGenerationInputImageMask",
"LocalShell",
@@ -155,6 +161,9 @@ class Mcp(TypedDict, total=False):
- SharePoint: `connector_sharepoint`
"""
+ defer_loading: bool
+ """Whether this MCP tool is deferred and discovered via tool search."""
+
headers: Optional[Dict[str, str]]
"""Optional HTTP headers to send to the MCP server.
@@ -174,6 +183,11 @@ class Mcp(TypedDict, total=False):
"""
+CodeInterpreterContainerCodeInterpreterToolAutoNetworkPolicy: TypeAlias = Union[
+ ContainerNetworkPolicyDisabledParam, ContainerNetworkPolicyAllowlistParam
+]
+
+
class CodeInterpreterContainerCodeInterpreterToolAuto(TypedDict, total=False):
"""Configuration for a code interpreter container.
@@ -189,6 +203,9 @@ class CodeInterpreterContainerCodeInterpreterToolAuto(TypedDict, total=False):
memory_limit: Optional[Literal["1g", "4g", "16g", "64g"]]
"""The memory limit for the code interpreter container."""
+ network_policy: CodeInterpreterContainerCodeInterpreterToolAutoNetworkPolicy
+ """Network access policy for the container."""
+
CodeInterpreterContainer: TypeAlias = Union[str, CodeInterpreterContainerCodeInterpreterToolAuto]
@@ -227,6 +244,9 @@ class ImageGeneration(TypedDict, total=False):
type: Required[Literal["image_generation"]]
"""The type of the image generation tool. Always `image_generation`."""
+ action: Literal["generate", "edit", "auto"]
+ """Whether to generate a new image or edit an existing image. Default: `auto`."""
+
background: Literal["transparent", "opaque", "auto"]
"""Background type for the generated image.
@@ -237,8 +257,8 @@ class ImageGeneration(TypedDict, total=False):
"""
Control how much effort the model will exert to match the style and features,
especially facial features, of input images. This parameter is only supported
- for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
- `low`. Defaults to `low`.
+ for `gpt-image-1` and `gpt-image-1.5` and later models, unsupported for
+ `gpt-image-1-mini`. Supports `high` and `low`. Defaults to `low`.
"""
input_image_mask: ImageGenerationInputImageMask
@@ -247,7 +267,7 @@ class ImageGeneration(TypedDict, total=False):
Contains `image_url` (string, optional) and `file_id` (string, optional).
"""
- model: Union[str, Literal["gpt-image-1", "gpt-image-1-mini"]]
+ model: Union[str, Literal["gpt-image-1", "gpt-image-1-mini", "gpt-image-1.5"]]
"""The image generation model to use. Default: `gpt-image-1`."""
moderation: Literal["auto", "low"]
@@ -292,6 +312,7 @@ class LocalShell(TypedDict, total=False):
FunctionToolParam,
FileSearchToolParam,
ComputerToolParam,
+ ComputerUsePreviewToolParam,
WebSearchToolParam,
Mcp,
CodeInterpreter,
@@ -299,6 +320,8 @@ class LocalShell(TypedDict, total=False):
LocalShell,
FunctionShellToolParam,
CustomToolParam,
+ NamespaceToolParam,
+ ToolSearchToolParam,
WebSearchPreviewToolParam,
ApplyPatchToolParam,
]
diff --git a/portkey_ai/_vendor/openai/types/responses/tool_search_tool.py b/portkey_ai/_vendor/openai/types/responses/tool_search_tool.py
new file mode 100644
index 00000000..44a741a1
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/tool_search_tool.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ToolSearchTool"]
+
+
+class ToolSearchTool(BaseModel):
+ """Hosted or BYOT tool search configuration for deferred tools."""
+
+ type: Literal["tool_search"]
+ """The type of the tool. Always `tool_search`."""
+
+ description: Optional[str] = None
+ """Description shown to the model for a client-executed tool search tool."""
+
+ execution: Optional[Literal["server", "client"]] = None
+ """Whether tool search is executed by the server or by the client."""
+
+ parameters: Optional[object] = None
+ """Parameter schema for a client-executed tool search tool."""
diff --git a/portkey_ai/_vendor/openai/types/responses/tool_search_tool_param.py b/portkey_ai/_vendor/openai/types/responses/tool_search_tool_param.py
new file mode 100644
index 00000000..3063da27
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/responses/tool_search_tool_param.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ToolSearchToolParam"]
+
+
+class ToolSearchToolParam(TypedDict, total=False):
+ """Hosted or BYOT tool search configuration for deferred tools."""
+
+ type: Required[Literal["tool_search"]]
+ """The type of the tool. Always `tool_search`."""
+
+ description: Optional[str]
+ """Description shown to the model for a client-executed tool search tool."""
+
+ execution: Literal["server", "client"]
+ """Whether tool search is executed by the server or by the client."""
+
+ parameters: Optional[object]
+ """Parameter schema for a client-executed tool search tool."""
diff --git a/portkey_ai/_vendor/openai/types/responses/web_search_preview_tool.py b/portkey_ai/_vendor/openai/types/responses/web_search_preview_tool.py
index 12478e89..bdf092f1 100644
--- a/portkey_ai/_vendor/openai/types/responses/web_search_preview_tool.py
+++ b/portkey_ai/_vendor/openai/types/responses/web_search_preview_tool.py
@@ -1,6 +1,6 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import Optional
+from typing import List, Optional
from typing_extensions import Literal
from ..._models import BaseModel
@@ -45,6 +45,8 @@ class WebSearchPreviewTool(BaseModel):
One of `web_search_preview` or `web_search_preview_2025_03_11`.
"""
+ search_content_types: Optional[List[Literal["text", "image"]]] = None
+
search_context_size: Optional[Literal["low", "medium", "high"]] = None
"""High level guidance for the amount of context window space to use for the
search.
diff --git a/portkey_ai/_vendor/openai/types/responses/web_search_preview_tool_param.py b/portkey_ai/_vendor/openai/types/responses/web_search_preview_tool_param.py
index 09619a33..b81f95e3 100644
--- a/portkey_ai/_vendor/openai/types/responses/web_search_preview_tool_param.py
+++ b/portkey_ai/_vendor/openai/types/responses/web_search_preview_tool_param.py
@@ -2,7 +2,7 @@
from __future__ import annotations
-from typing import Optional
+from typing import List, Optional
from typing_extensions import Literal, Required, TypedDict
__all__ = ["WebSearchPreviewToolParam", "UserLocation"]
@@ -45,6 +45,8 @@ class WebSearchPreviewToolParam(TypedDict, total=False):
One of `web_search_preview` or `web_search_preview_2025_03_11`.
"""
+ search_content_types: List[Literal["text", "image"]]
+
search_context_size: Literal["low", "medium", "high"]
"""High level guidance for the amount of context window space to use for the
search.
diff --git a/portkey_ai/_vendor/openai/types/shared/chat_model.py b/portkey_ai/_vendor/openai/types/shared/chat_model.py
index 8223b81b..501a22a8 100644
--- a/portkey_ai/_vendor/openai/types/shared/chat_model.py
+++ b/portkey_ai/_vendor/openai/types/shared/chat_model.py
@@ -5,6 +5,12 @@
__all__ = ["ChatModel"]
ChatModel: TypeAlias = Literal[
+ "gpt-5.4",
+ "gpt-5.4-mini",
+ "gpt-5.4-nano",
+ "gpt-5.4-mini-2026-03-17",
+ "gpt-5.4-nano-2026-03-17",
+ "gpt-5.3-chat-latest",
"gpt-5.2",
"gpt-5.2-2025-12-11",
"gpt-5.2-chat-latest",
diff --git a/portkey_ai/_vendor/openai/types/shared/comparison_filter.py b/portkey_ai/_vendor/openai/types/shared/comparison_filter.py
index 852cac17..57c26cd0 100644
--- a/portkey_ai/_vendor/openai/types/shared/comparison_filter.py
+++ b/portkey_ai/_vendor/openai/types/shared/comparison_filter.py
@@ -16,7 +16,7 @@ class ComparisonFilter(BaseModel):
key: str
"""The key to compare against the value."""
- type: Literal["eq", "ne", "gt", "gte", "lt", "lte"]
+ type: Literal["eq", "ne", "gt", "gte", "lt", "lte", "in", "nin"]
"""
Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`, `in`,
`nin`.
diff --git a/portkey_ai/_vendor/openai/types/shared_params/chat_model.py b/portkey_ai/_vendor/openai/types/shared_params/chat_model.py
index c1937a83..17eaacd9 100644
--- a/portkey_ai/_vendor/openai/types/shared_params/chat_model.py
+++ b/portkey_ai/_vendor/openai/types/shared_params/chat_model.py
@@ -7,6 +7,12 @@
__all__ = ["ChatModel"]
ChatModel: TypeAlias = Literal[
+ "gpt-5.4",
+ "gpt-5.4-mini",
+ "gpt-5.4-nano",
+ "gpt-5.4-mini-2026-03-17",
+ "gpt-5.4-nano-2026-03-17",
+ "gpt-5.3-chat-latest",
"gpt-5.2",
"gpt-5.2-2025-12-11",
"gpt-5.2-chat-latest",
diff --git a/portkey_ai/_vendor/openai/types/shared_params/comparison_filter.py b/portkey_ai/_vendor/openai/types/shared_params/comparison_filter.py
index 363688e4..005f4d1f 100644
--- a/portkey_ai/_vendor/openai/types/shared_params/comparison_filter.py
+++ b/portkey_ai/_vendor/openai/types/shared_params/comparison_filter.py
@@ -18,7 +18,7 @@ class ComparisonFilter(TypedDict, total=False):
key: Required[str]
"""The key to compare against the value."""
- type: Required[Literal["eq", "ne", "gt", "gte", "lt", "lte"]]
+ type: Required[Literal["eq", "ne", "gt", "gte", "lt", "lte", "in", "nin"]]
"""
Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`, `in`,
`nin`.
diff --git a/portkey_ai/_vendor/openai/types/skill.py b/portkey_ai/_vendor/openai/types/skill.py
new file mode 100644
index 00000000..25d74978
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/skill.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["Skill"]
+
+
+class Skill(BaseModel):
+ id: str
+ """Unique identifier for the skill."""
+
+ created_at: int
+ """Unix timestamp (seconds) for when the skill was created."""
+
+ default_version: str
+ """Default version for the skill."""
+
+ description: str
+ """Description of the skill."""
+
+ latest_version: str
+ """Latest version for the skill."""
+
+ name: str
+ """Name of the skill."""
+
+ object: Literal["skill"]
+ """The object type, which is `skill`."""
diff --git a/portkey_ai/_vendor/openai/types/skill_create_params.py b/portkey_ai/_vendor/openai/types/skill_create_params.py
new file mode 100644
index 00000000..5a709286
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/skill_create_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import TypedDict
+
+from .._types import FileTypes, SequenceNotStr
+
+__all__ = ["SkillCreateParams"]
+
+
+class SkillCreateParams(TypedDict, total=False):
+ files: Union[SequenceNotStr[FileTypes], FileTypes]
+ """Skill files to upload (directory upload) or a single zip file."""
diff --git a/portkey_ai/_vendor/openai/types/skill_list.py b/portkey_ai/_vendor/openai/types/skill_list.py
new file mode 100644
index 00000000..fc5b57bc
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/skill_list.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from .skill import Skill
+from .._models import BaseModel
+
+__all__ = ["SkillList"]
+
+
+class SkillList(BaseModel):
+ data: List[Skill]
+ """A list of items"""
+
+ first_id: Optional[str] = None
+ """The ID of the first item in the list."""
+
+ has_more: bool
+ """Whether there are more items available."""
+
+ last_id: Optional[str] = None
+ """The ID of the last item in the list."""
+
+ object: Literal["list"]
+ """The type of object returned, must be `list`."""
diff --git a/portkey_ai/_vendor/openai/types/skill_list_params.py b/portkey_ai/_vendor/openai/types/skill_list_params.py
new file mode 100644
index 00000000..ffe79679
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/skill_list_params.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["SkillListParams"]
+
+
+class SkillListParams(TypedDict, total=False):
+ after: str
+ """Identifier for the last item from the previous pagination request"""
+
+ limit: int
+ """Number of items to retrieve"""
+
+ order: Literal["asc", "desc"]
+ """Sort order of results by timestamp.
+
+ Use `asc` for ascending order or `desc` for descending order.
+ """
diff --git a/portkey_ai/_vendor/openai/types/skill_update_params.py b/portkey_ai/_vendor/openai/types/skill_update_params.py
new file mode 100644
index 00000000..48a790e1
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/skill_update_params.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["SkillUpdateParams"]
+
+
+class SkillUpdateParams(TypedDict, total=False):
+ default_version: Required[str]
+ """The skill version number to set as default."""
diff --git a/portkey_ai/_vendor/openai/types/skills/__init__.py b/portkey_ai/_vendor/openai/types/skills/__init__.py
new file mode 100644
index 00000000..b0605fb8
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/skills/__init__.py
@@ -0,0 +1,9 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .skill_version import SkillVersion as SkillVersion
+from .skill_version_list import SkillVersionList as SkillVersionList
+from .version_list_params import VersionListParams as VersionListParams
+from .deleted_skill_version import DeletedSkillVersion as DeletedSkillVersion
+from .version_create_params import VersionCreateParams as VersionCreateParams
diff --git a/portkey_ai/_vendor/openai/types/skills/deleted_skill_version.py b/portkey_ai/_vendor/openai/types/skills/deleted_skill_version.py
new file mode 100644
index 00000000..e1fd8c49
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/skills/deleted_skill_version.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["DeletedSkillVersion"]
+
+
+class DeletedSkillVersion(BaseModel):
+ id: str
+
+ deleted: bool
+
+ object: Literal["skill.version.deleted"]
+
+ version: str
+ """The deleted skill version."""
diff --git a/portkey_ai/_vendor/openai/types/skills/skill_version.py b/portkey_ai/_vendor/openai/types/skills/skill_version.py
new file mode 100644
index 00000000..74b47000
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/skills/skill_version.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["SkillVersion"]
+
+
+class SkillVersion(BaseModel):
+ id: str
+ """Unique identifier for the skill version."""
+
+ created_at: int
+ """Unix timestamp (seconds) for when the version was created."""
+
+ description: str
+ """Description of the skill version."""
+
+ name: str
+ """Name of the skill version."""
+
+ object: Literal["skill.version"]
+ """The object type, which is `skill.version`."""
+
+ skill_id: str
+ """Identifier of the skill for this version."""
+
+ version: str
+ """Version number for this skill."""
diff --git a/portkey_ai/_vendor/openai/types/skills/skill_version_list.py b/portkey_ai/_vendor/openai/types/skills/skill_version_list.py
new file mode 100644
index 00000000..7d10082f
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/skills/skill_version_list.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .skill_version import SkillVersion
+
+__all__ = ["SkillVersionList"]
+
+
+class SkillVersionList(BaseModel):
+ data: List[SkillVersion]
+ """A list of items"""
+
+ first_id: Optional[str] = None
+ """The ID of the first item in the list."""
+
+ has_more: bool
+ """Whether there are more items available."""
+
+ last_id: Optional[str] = None
+ """The ID of the last item in the list."""
+
+ object: Literal["list"]
+ """The type of object returned, must be `list`."""
diff --git a/portkey_ai/_vendor/openai/types/skills/version_create_params.py b/portkey_ai/_vendor/openai/types/skills/version_create_params.py
new file mode 100644
index 00000000..043b43a0
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/skills/version_create_params.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import TypedDict
+
+from ..._types import FileTypes, SequenceNotStr
+
+__all__ = ["VersionCreateParams"]
+
+
+class VersionCreateParams(TypedDict, total=False):
+ default: bool
+ """Whether to set this version as the default."""
+
+ files: Union[SequenceNotStr[FileTypes], FileTypes]
+ """Skill files to upload (directory upload) or a single zip file."""
diff --git a/portkey_ai/_vendor/openai/types/skills/version_list_params.py b/portkey_ai/_vendor/openai/types/skills/version_list_params.py
new file mode 100644
index 00000000..0638f400
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/skills/version_list_params.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["VersionListParams"]
+
+
+class VersionListParams(TypedDict, total=False):
+ after: str
+ """The skill version ID to start after."""
+
+ limit: int
+ """Number of versions to retrieve."""
+
+ order: Literal["asc", "desc"]
+ """Sort order of results by version number."""
diff --git a/portkey_ai/_vendor/openai/types/skills/versions/__init__.py b/portkey_ai/_vendor/openai/types/skills/versions/__init__.py
new file mode 100644
index 00000000..f8ee8b14
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/skills/versions/__init__.py
@@ -0,0 +1,3 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
diff --git a/portkey_ai/_vendor/openai/types/vector_stores/file_batch_create_params.py b/portkey_ai/_vendor/openai/types/vector_stores/file_batch_create_params.py
index 2ab98a83..7ca0de81 100644
--- a/portkey_ai/_vendor/openai/types/vector_stores/file_batch_create_params.py
+++ b/portkey_ai/_vendor/openai/types/vector_stores/file_batch_create_params.py
@@ -33,7 +33,8 @@ class FileBatchCreateParams(TypedDict, total=False):
A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
the vector store should use. Useful for tools like `file_search` that can access
files. If `attributes` or `chunking_strategy` are provided, they will be applied
- to all files in the batch. Mutually exclusive with `files`.
+ to all files in the batch. The maximum batch size is 2000 files. Mutually
+ exclusive with `files`.
"""
files: Iterable[File]
@@ -41,7 +42,8 @@ class FileBatchCreateParams(TypedDict, total=False):
A list of objects that each include a `file_id` plus optional `attributes` or
`chunking_strategy`. Use this when you need to override metadata for specific
files. The global `attributes` or `chunking_strategy` will be ignored and must
- be specified for each file. Mutually exclusive with `file_ids`.
+ be specified for each file. The maximum batch size is 2000 files. Mutually
+ exclusive with `file_ids`.
"""
diff --git a/portkey_ai/_vendor/openai/types/video.py b/portkey_ai/_vendor/openai/types/video.py
index e732ea54..051b951e 100644
--- a/portkey_ai/_vendor/openai/types/video.py
+++ b/portkey_ai/_vendor/openai/types/video.py
@@ -1,6 +1,6 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import Optional
+from typing import Union, Optional
from typing_extensions import Literal
from .._models import BaseModel
@@ -45,8 +45,11 @@ class Video(BaseModel):
remixed_from_video_id: Optional[str] = None
"""Identifier of the source video if this video is a remix."""
- seconds: VideoSeconds
- """Duration of the generated clip in seconds."""
+ seconds: Union[str, VideoSeconds]
+ """Duration of the generated clip in seconds.
+
+ For extensions, this is the stitched total duration.
+ """
size: VideoSize
"""The resolution of the generated video."""
diff --git a/portkey_ai/_vendor/openai/types/video_create_character_params.py b/portkey_ai/_vendor/openai/types/video_create_character_params.py
new file mode 100644
index 00000000..ef671e59
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/video_create_character_params.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+from .._types import FileTypes
+
+__all__ = ["VideoCreateCharacterParams"]
+
+
+class VideoCreateCharacterParams(TypedDict, total=False):
+ name: Required[str]
+ """Display name for this API character."""
+
+ video: Required[FileTypes]
+ """Video file used to create a character."""
diff --git a/portkey_ai/_vendor/openai/types/video_create_character_response.py b/portkey_ai/_vendor/openai/types/video_create_character_response.py
new file mode 100644
index 00000000..e3a65a02
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/video_create_character_response.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from .._models import BaseModel
+
+__all__ = ["VideoCreateCharacterResponse"]
+
+
+class VideoCreateCharacterResponse(BaseModel):
+ id: Optional[str] = None
+ """Identifier for the character creation cameo."""
+
+ created_at: int
+ """Unix timestamp (in seconds) when the character was created."""
+
+ name: Optional[str] = None
+ """Display name for the character."""
diff --git a/portkey_ai/_vendor/openai/types/video_create_params.py b/portkey_ai/_vendor/openai/types/video_create_params.py
index d787aaed..641ac7db 100644
--- a/portkey_ai/_vendor/openai/types/video_create_params.py
+++ b/portkey_ai/_vendor/openai/types/video_create_params.py
@@ -2,22 +2,24 @@
from __future__ import annotations
-from typing_extensions import Required, TypedDict
+from typing import Union
+from typing_extensions import Required, TypeAlias, TypedDict
from .._types import FileTypes
from .video_size import VideoSize
from .video_seconds import VideoSeconds
from .video_model_param import VideoModelParam
+from .image_input_reference_param import ImageInputReferenceParam
-__all__ = ["VideoCreateParams"]
+__all__ = ["VideoCreateParams", "InputReference"]
class VideoCreateParams(TypedDict, total=False):
prompt: Required[str]
"""Text prompt that describes the video to generate."""
- input_reference: FileTypes
- """Optional image reference that guides generation."""
+ input_reference: InputReference
+ """Optional reference asset upload or reference object that guides generation."""
model: VideoModelParam
"""The video generation model to use (allowed values: sora-2, sora-2-pro).
@@ -33,3 +35,6 @@ class VideoCreateParams(TypedDict, total=False):
Output resolution formatted as width x height (allowed values: 720x1280,
1280x720, 1024x1792, 1792x1024). Defaults to 720x1280.
"""
+
+
+InputReference: TypeAlias = Union[FileTypes, ImageInputReferenceParam]
diff --git a/portkey_ai/_vendor/openai/types/video_edit_params.py b/portkey_ai/_vendor/openai/types/video_edit_params.py
new file mode 100644
index 00000000..8d3b15fc
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/video_edit_params.py
@@ -0,0 +1,28 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import Required, TypeAlias, TypedDict
+
+from .._types import FileTypes
+
+__all__ = ["VideoEditParams", "Video", "VideoVideoReferenceInputParam"]
+
+
+class VideoEditParams(TypedDict, total=False):
+ prompt: Required[str]
+ """Text prompt that describes how to edit the source video."""
+
+ video: Required[Video]
+ """Reference to the completed video to edit."""
+
+
+class VideoVideoReferenceInputParam(TypedDict, total=False):
+ """Reference to the completed video."""
+
+ id: Required[str]
+ """The identifier of the completed video."""
+
+
+Video: TypeAlias = Union[FileTypes, VideoVideoReferenceInputParam]
diff --git a/portkey_ai/_vendor/openai/types/video_extend_params.py b/portkey_ai/_vendor/openai/types/video_extend_params.py
new file mode 100644
index 00000000..65be4b52
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/video_extend_params.py
@@ -0,0 +1,35 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import Required, TypeAlias, TypedDict
+
+from .._types import FileTypes
+from .video_seconds import VideoSeconds
+
+__all__ = ["VideoExtendParams", "Video", "VideoVideoReferenceInputParam"]
+
+
+class VideoExtendParams(TypedDict, total=False):
+ prompt: Required[str]
+ """Updated text prompt that directs the extension generation."""
+
+ seconds: Required[VideoSeconds]
+ """
+ Length of the newly generated extension segment in seconds (allowed values: 4,
+ 8, 12, 16, 20).
+ """
+
+ video: Required[Video]
+ """Reference to the completed video to extend."""
+
+
+class VideoVideoReferenceInputParam(TypedDict, total=False):
+ """Reference to the completed video."""
+
+ id: Required[str]
+ """The identifier of the completed video."""
+
+
+Video: TypeAlias = Union[FileTypes, VideoVideoReferenceInputParam]
diff --git a/portkey_ai/_vendor/openai/types/video_get_character_response.py b/portkey_ai/_vendor/openai/types/video_get_character_response.py
new file mode 100644
index 00000000..df6202ed
--- /dev/null
+++ b/portkey_ai/_vendor/openai/types/video_get_character_response.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from .._models import BaseModel
+
+__all__ = ["VideoGetCharacterResponse"]
+
+
+class VideoGetCharacterResponse(BaseModel):
+ id: Optional[str] = None
+ """Identifier for the character creation cameo."""
+
+ created_at: int
+ """Unix timestamp (in seconds) when the character was created."""
+
+ name: Optional[str] = None
+ """Display name for the character."""
diff --git a/portkey_ai/_vendor/openai/types/websocket_connection_options.py b/portkey_ai/_vendor/openai/types/websocket_connection_options.py
index 40fd24ab..519e4341 100644
--- a/portkey_ai/_vendor/openai/types/websocket_connection_options.py
+++ b/portkey_ai/_vendor/openai/types/websocket_connection_options.py
@@ -3,15 +3,17 @@
from __future__ import annotations
from typing import TYPE_CHECKING
-from typing_extensions import Sequence, TypedDict
+from typing_extensions import Sequence, TypeAlias, TypedDict
+
+__all__ = ["WebSocketConnectionOptions", "WebsocketConnectionOptions"]
if TYPE_CHECKING:
from websockets import Subprotocol
from websockets.extensions import ClientExtensionFactory
-class WebsocketConnectionOptions(TypedDict, total=False):
- """Websocket connection options copied from `websockets`.
+class WebSocketConnectionOptions(TypedDict, total=False):
+ """WebSocket connection options copied from `websockets`.
For example: https://websockets.readthedocs.io/en/stable/reference/asyncio/client.html#websockets.asyncio.client.connect
"""
@@ -34,3 +36,7 @@ class WebsocketConnectionOptions(TypedDict, total=False):
write_limit: int | tuple[int, int | None]
"""High-water mark of write buffer in bytes. It is passed to set_write_buffer_limits(). It defaults to 32 KiB. You may pass a (high, low) tuple to set the high-water and low-water marks."""
+
+
+# Backward compatibility for pre-rename imports.
+WebsocketConnectionOptions: TypeAlias = WebSocketConnectionOptions
diff --git a/portkey_ai/api_resources/__init__.py b/portkey_ai/api_resources/__init__.py
index 0cab7a03..059dc6ef 100644
--- a/portkey_ai/api_resources/__init__.py
+++ b/portkey_ai/api_resources/__init__.py
@@ -135,6 +135,14 @@
AsyncConversationsItems,
Videos,
AsyncVideos,
+ Skills,
+ AsyncSkills,
+ SkillsContent,
+ AsyncSkillsContent,
+ SkillsVersions,
+ AsyncSkillsVersions,
+ SkillsVersionsContent,
+ AsyncSkillsVersionsContent,
ChatKit,
AsyncChatKit,
ChatKitSessions,
@@ -339,6 +347,14 @@
"AsyncConversationsItems",
"Videos",
"AsyncVideos",
+ "Skills",
+ "AsyncSkills",
+ "SkillsContent",
+ "AsyncSkillsContent",
+ "SkillsVersions",
+ "AsyncSkillsVersions",
+ "SkillsVersionsContent",
+ "AsyncSkillsVersionsContent",
"ChatKit",
"AsyncChatKit",
"ChatKitSessions",
diff --git a/portkey_ai/api_resources/apis/__init__.py b/portkey_ai/api_resources/apis/__init__.py
index 6bf39cdb..2ea60e46 100644
--- a/portkey_ai/api_resources/apis/__init__.py
+++ b/portkey_ai/api_resources/apis/__init__.py
@@ -175,6 +175,17 @@
from .videos import Videos, AsyncVideos
+from .skills import (
+ Skills,
+ AsyncSkills,
+ SkillsContent,
+ AsyncSkillsContent,
+ SkillsVersions,
+ AsyncSkillsVersions,
+ SkillsVersionsContent,
+ AsyncSkillsVersionsContent,
+)
+
from .chatkit import (
ChatKit,
AsyncChatKit,
@@ -373,6 +384,14 @@
"AsyncCalls",
"InputTokens",
"AsyncInputTokens",
+ "Skills",
+ "AsyncSkills",
+ "SkillsContent",
+ "AsyncSkillsContent",
+ "SkillsVersions",
+ "AsyncSkillsVersions",
+ "SkillsVersionsContent",
+ "AsyncSkillsVersionsContent",
"Analytics",
"AsyncAnalytics",
"AnalyticsGraphs",
diff --git a/portkey_ai/api_resources/apis/audio.py b/portkey_ai/api_resources/apis/audio.py
index 052614fb..cb090344 100644
--- a/portkey_ai/api_resources/apis/audio.py
+++ b/portkey_ai/api_resources/apis/audio.py
@@ -4,6 +4,7 @@
from portkey_ai.api_resources.global_constants import AUDIO_FILE_DURATION_HEADER
from portkey_ai.api_resources.get_audio_duration import get_audio_file_duration
from ..._vendor.openai._types import Omit, omit, FileTypes
+from ..._vendor.openai.types.audio import speech_create_params
from portkey_ai.api_resources.client import AsyncPortkey, Portkey
import typing
@@ -137,7 +138,7 @@ def create(
*,
input: str,
model: str,
- voice: str,
+ voice: speech_create_params.Voice,
response_format: Union[str, Omit] = omit,
speed: Union[float, Omit] = omit,
stream: Union[bool, Omit] = omit,
@@ -283,7 +284,7 @@ async def create(
*,
input: str,
model: str,
- voice: str,
+ voice: speech_create_params.Voice,
response_format: Union[str, Omit] = omit,
speed: Union[float, Omit] = omit,
stream: Union[bool, Omit] = omit,
diff --git a/portkey_ai/api_resources/apis/containers.py b/portkey_ai/api_resources/apis/containers.py
index 1a0909e1..ce405100 100644
--- a/portkey_ai/api_resources/apis/containers.py
+++ b/portkey_ai/api_resources/apis/containers.py
@@ -1,5 +1,5 @@
import json
-from typing import Any, List, Literal, Union
+from typing import Any, Iterable, List, Literal, Union
from portkey_ai._vendor.openai.types import container_create_params
from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource
from portkey_ai.api_resources.client import AsyncPortkey, Portkey
@@ -29,6 +29,8 @@ def create(
expires_after: Union[container_create_params.ExpiresAfter, Omit] = omit,
file_ids: Union[List[str], Omit] = omit,
memory_limit: Union[Literal["1g", "4g", "16g", "64g"], Omit] = omit,
+ network_policy: Union[container_create_params.NetworkPolicy, Omit] = omit,
+ skills: Union[Iterable[container_create_params.Skill], Omit] = omit,
**kwargs,
) -> ContainerCreateResponse:
extra_headers = kwargs.pop("extra_headers", None)
@@ -40,6 +42,8 @@ def create(
expires_after=expires_after,
file_ids=file_ids,
memory_limit=memory_limit,
+ network_policy=network_policy,
+ skills=skills,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
@@ -85,6 +89,7 @@ def list(
*,
after: Union[str, Omit] = omit,
limit: Union[int, Omit] = omit,
+ name: Union[str, Omit] = omit,
order: Union[Literal["asc", "desc"], Omit] = omit,
**kwargs,
) -> ContainerListResponse:
@@ -95,6 +100,7 @@ def list(
response = self.openai_client.with_raw_response.containers.list(
after=after,
limit=limit,
+ name=name,
order=order,
extra_headers=extra_headers,
extra_query=extra_query,
@@ -289,6 +295,8 @@ async def create(
expires_after: Union[container_create_params.ExpiresAfter, Omit] = omit,
file_ids: Union[List[str], Omit] = omit,
memory_limit: Union[Literal["1g", "4g", "16g", "64g"], Omit] = omit,
+ network_policy: Union[container_create_params.NetworkPolicy, Omit] = omit,
+ skills: Union[Iterable[container_create_params.Skill], Omit] = omit,
**kwargs,
) -> ContainerCreateResponse:
extra_headers = kwargs.pop("extra_headers", None)
@@ -300,6 +308,8 @@ async def create(
expires_after=expires_after,
file_ids=file_ids,
memory_limit=memory_limit,
+ network_policy=network_policy,
+ skills=skills,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
@@ -345,6 +355,7 @@ async def list(
*,
after: Union[str, Omit] = omit,
limit: Union[int, Omit] = omit,
+ name: Union[str, Omit] = omit,
order: Union[Literal["asc", "desc"], Omit] = omit,
**kwargs,
) -> ContainerListResponse:
@@ -355,6 +366,7 @@ async def list(
response = await self.openai_client.with_raw_response.containers.list(
after=after,
limit=limit,
+ name=name,
order=order,
extra_headers=extra_headers,
extra_query=extra_query,
diff --git a/portkey_ai/api_resources/apis/fine_tuning.py b/portkey_ai/api_resources/apis/fine_tuning.py
index a52a556e..0d3679e1 100644
--- a/portkey_ai/api_resources/apis/fine_tuning.py
+++ b/portkey_ai/api_resources/apis/fine_tuning.py
@@ -1,5 +1,6 @@
import json
from typing import Iterable, List, Literal, Optional, Union
+import typing_extensions
from portkey_ai._vendor.openai.types.fine_tuning.alpha import grader_run_params
from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource
from portkey_ai.api_resources.client import AsyncPortkey, Portkey
@@ -10,6 +11,7 @@
from portkey_ai.api_resources.types.finetuning_checkpoint_permissions import (
PermissionCreateResponse,
PermissionDeleteResponse,
+ PermissionListPage,
PermissionRetrieveResponse,
)
from ..._vendor.openai._types import Omit, omit
@@ -184,6 +186,9 @@ def create(
return data
+ @typing_extensions.deprecated(
+ "Retrieve is deprecated. Please swap to the paginated list method instead."
+ )
def retrieve(
self,
fine_tuned_model_checkpoint: str,
@@ -207,6 +212,29 @@ def retrieve(
return data
+ def list(
+ self,
+ fine_tuned_model_checkpoint: str,
+ *,
+ after: Union[str, Omit] = omit,
+ limit: Union[int, Omit] = omit,
+ order: Union[Literal["ascending", "descending"], Omit] = omit,
+ project_id: Union[str, Omit] = omit,
+ **kwargs,
+ ) -> PermissionListPage:
+ response = self.openai_client.with_raw_response.fine_tuning.checkpoints.permissions.list( # noqa: E501
+ fine_tuned_model_checkpoint=fine_tuned_model_checkpoint,
+ after=after,
+ limit=limit,
+ order=order,
+ project_id=project_id,
+ extra_body=kwargs,
+ )
+ data = PermissionListPage(**json.loads(response.text))
+ data._headers = response.headers
+
+ return data
+
def delete(
self,
permission_id: str,
@@ -440,6 +468,9 @@ async def create(
return data
+ @typing_extensions.deprecated(
+ "Retrieve is deprecated. Please swap to the paginated list method instead."
+ )
async def retrieve(
self,
fine_tuned_model_checkpoint: str,
@@ -463,6 +494,29 @@ async def retrieve(
return data
+ async def list(
+ self,
+ fine_tuned_model_checkpoint: str,
+ *,
+ after: Union[str, Omit] = omit,
+ limit: Union[int, Omit] = omit,
+ order: Union[Literal["ascending", "descending"], Omit] = omit,
+ project_id: Union[str, Omit] = omit,
+ **kwargs,
+ ) -> PermissionListPage:
+ response = await self.openai_client.with_raw_response.fine_tuning.checkpoints.permissions.list( # noqa: E501
+ fine_tuned_model_checkpoint=fine_tuned_model_checkpoint,
+ after=after,
+ limit=limit,
+ order=order,
+ project_id=project_id,
+ extra_body=kwargs,
+ )
+ data = PermissionListPage(**json.loads(response.text))
+ data._headers = response.headers
+
+ return data
+
async def delete(
self,
permission_id: str,
diff --git a/portkey_ai/api_resources/apis/main_realtime.py b/portkey_ai/api_resources/apis/main_realtime.py
index 0e7b3dfb..b60952e5 100644
--- a/portkey_ai/api_resources/apis/main_realtime.py
+++ b/portkey_ai/api_resources/apis/main_realtime.py
@@ -28,7 +28,7 @@
ResponsePromptParam,
)
from portkey_ai._vendor.openai.types.websocket_connection_options import (
- WebsocketConnectionOptions,
+ WebSocketConnectionOptions,
)
from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource
from portkey_ai.api_resources.client import AsyncPortkey, Portkey
@@ -48,7 +48,7 @@ def connect(
model: str,
extra_query: Query = {},
extra_headers: Headers = {},
- websocket_connection_options: WebsocketConnectionOptions = {},
+ websocket_connection_options: WebSocketConnectionOptions = {},
) -> RealtimeConnectionManager:
return self.openai_client.realtime.connect(
model=model,
@@ -71,7 +71,7 @@ def connect(
model: str,
extra_query: Query = {},
extra_headers: Headers = {},
- websocket_connection_options: WebsocketConnectionOptions = {},
+ websocket_connection_options: WebSocketConnectionOptions = {},
) -> AsyncRealtimeConnectionManager:
return self.openai_client.realtime.connect(
model=model,
@@ -174,6 +174,7 @@ def accept(
str,
Literal[
"gpt-realtime",
+ "gpt-realtime-1.5",
"gpt-realtime-2025-08-28",
"gpt-4o-realtime-preview",
"gpt-4o-realtime-preview-2024-10-01",
@@ -183,8 +184,11 @@ def accept(
"gpt-4o-mini-realtime-preview-2024-12-17",
"gpt-realtime-mini",
"gpt-realtime-mini-2025-10-06",
+ "gpt-realtime-mini-2025-12-15",
+ "gpt-audio-1.5",
"gpt-audio-mini",
"gpt-audio-mini-2025-10-06",
+ "gpt-audio-mini-2025-12-15",
],
Omit,
] = omit,
@@ -314,6 +318,7 @@ async def accept(
str,
Literal[
"gpt-realtime",
+ "gpt-realtime-1.5",
"gpt-realtime-2025-08-28",
"gpt-4o-realtime-preview",
"gpt-4o-realtime-preview-2024-10-01",
@@ -323,8 +328,11 @@ async def accept(
"gpt-4o-mini-realtime-preview-2024-12-17",
"gpt-realtime-mini",
"gpt-realtime-mini-2025-10-06",
+ "gpt-realtime-mini-2025-12-15",
+ "gpt-audio-1.5",
"gpt-audio-mini",
"gpt-audio-mini-2025-10-06",
+ "gpt-audio-mini-2025-12-15",
],
Omit,
] = omit,
diff --git a/portkey_ai/api_resources/apis/responses.py b/portkey_ai/api_resources/apis/responses.py
index 547536c5..efa52863 100644
--- a/portkey_ai/api_resources/apis/responses.py
+++ b/portkey_ai/api_resources/apis/responses.py
@@ -1,11 +1,15 @@
import json
-from typing import Iterable, List, Literal, Optional, Union
+from typing import Iterable, List, Literal, Optional, Type, Union
from portkey_ai._vendor.openai._streaming import AsyncStream, Stream
from portkey_ai._vendor.openai.lib._parsing._responses import TextFormatT
from portkey_ai._vendor.openai.lib.streaming.responses._responses import (
AsyncResponseStreamManager,
ResponseStreamManager,
)
+from portkey_ai._vendor.openai.resources.responses.responses import (
+ AsyncResponsesConnectionManager,
+ ResponsesConnectionManager,
+)
from portkey_ai._vendor.openai.types.responses import (
input_token_count_params,
response_create_params,
@@ -20,6 +24,9 @@
from portkey_ai._vendor.openai.types.responses.response_input_param import (
ResponseInputParam,
)
+from portkey_ai._vendor.openai.types.responses.response_prompt_param import (
+ ResponsePromptParam,
+)
from portkey_ai._vendor.openai.types.responses.response_stream_event import (
ResponseStreamEvent,
)
@@ -34,6 +41,9 @@
from portkey_ai._vendor.openai.types.shared.chat_model import ChatModel
from portkey_ai._vendor.openai.types.shared.responses_model import ResponsesModel
from portkey_ai._vendor.openai.types.shared_params.reasoning import Reasoning
+from portkey_ai._vendor.openai.types.websocket_connection_options import (
+ WebSocketConnectionOptions,
+)
from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource
from portkey_ai.api_resources.client import AsyncPortkey, Portkey
from portkey_ai.api_resources.types.response_type import (
@@ -60,24 +70,40 @@ def __init__(self, client: Portkey) -> None:
def create(
self,
*,
- input: Union[str, ResponseInputParam, Omit] = omit,
- model: Union[ResponsesModel, Omit] = omit,
+ background: Union[Optional[bool], Omit] = omit,
+ context_management: Union[
+ Optional[Iterable[response_create_params.ContextManagement]], Omit
+ ] = omit,
+ conversation: Union[Optional[response_create_params.Conversation], Omit] = omit,
include: Union[Optional[List[ResponseIncludable]], Omit] = omit,
+ input: Union[str, ResponseInputParam, Omit] = omit,
instructions: Union[Optional[str], Omit] = omit,
max_output_tokens: Union[Optional[int], Omit] = omit,
+ max_tool_calls: Union[Optional[int], Omit] = omit,
metadata: Union[Optional[Metadata], Omit] = omit,
+ model: Union[ResponsesModel, Omit] = omit,
parallel_tool_calls: Union[Optional[bool], Omit] = omit,
previous_response_id: Union[Optional[str], Omit] = omit,
+ prompt: Union[Optional[ResponsePromptParam], Omit] = omit,
+ prompt_cache_key: Union[str, Omit] = omit,
prompt_cache_retention: Union[
Optional[Literal["in-memory", "24h"]], Omit
] = omit,
reasoning: Union[Optional[Reasoning], Omit] = omit,
+ safety_identifier: Union[str, Omit] = omit,
+ service_tier: Union[
+ Optional[Literal["auto", "default", "flex", "scale", "priority"]], Omit
+ ] = omit,
store: Union[Optional[bool], Omit] = omit,
- stream: Union[Literal[False], Omit] = omit,
+ stream: Union[Optional[Literal[False]], Omit] = omit,
+ stream_options: Union[
+ Optional[response_create_params.StreamOptions], Omit
+ ] = omit,
temperature: Union[Optional[float], Omit] = omit,
text: Union[ResponseTextConfigParam, Omit] = omit,
tool_choice: Union[response_create_params.ToolChoice, Omit] = omit,
tools: Union[Iterable[ToolParam], Omit] = omit,
+ top_logprobs: Union[Optional[int], Omit] = omit,
top_p: Union[Optional[float], Omit] = omit,
truncation: Union[Optional[Literal["auto", "disabled"]], Omit] = omit,
user: Union[str, Omit] = omit,
@@ -89,24 +115,40 @@ def create(
def create(
self,
*,
- input: Union[str, ResponseInputParam, Omit] = omit,
- model: Union[ResponsesModel, Omit] = omit,
stream: Literal[True],
+ background: Union[Optional[bool], Omit] = omit,
+ context_management: Union[
+ Optional[Iterable[response_create_params.ContextManagement]], Omit
+ ] = omit,
+ conversation: Union[Optional[response_create_params.Conversation], Omit] = omit,
include: Union[Optional[List[ResponseIncludable]], Omit] = omit,
+ input: Union[str, ResponseInputParam, Omit] = omit,
instructions: Union[Optional[str], Omit] = omit,
max_output_tokens: Union[Optional[int], Omit] = omit,
+ max_tool_calls: Union[Optional[int], Omit] = omit,
metadata: Union[Optional[Metadata], Omit] = omit,
+ model: Union[ResponsesModel, Omit] = omit,
parallel_tool_calls: Union[Optional[bool], Omit] = omit,
previous_response_id: Union[Optional[str], Omit] = omit,
+ prompt: Union[Optional[ResponsePromptParam], Omit] = omit,
+ prompt_cache_key: Union[str, Omit] = omit,
prompt_cache_retention: Union[
Optional[Literal["in-memory", "24h"]], Omit
] = omit,
reasoning: Union[Optional[Reasoning], Omit] = omit,
+ safety_identifier: Union[str, Omit] = omit,
+ service_tier: Union[
+ Optional[Literal["auto", "default", "flex", "scale", "priority"]], Omit
+ ] = omit,
store: Union[Optional[bool], Omit] = omit,
+ stream_options: Union[
+ Optional[response_create_params.StreamOptions], Omit
+ ] = omit,
temperature: Union[Optional[float], Omit] = omit,
text: Union[ResponseTextConfigParam, Omit] = omit,
tool_choice: Union[response_create_params.ToolChoice, Omit] = omit,
tools: Union[Iterable[ToolParam], Omit] = omit,
+ top_logprobs: Union[Optional[int], Omit] = omit,
top_p: Union[Optional[float], Omit] = omit,
truncation: Union[Optional[Literal["auto", "disabled"]], Omit] = omit,
user: Union[str, Omit] = omit,
@@ -118,24 +160,40 @@ def create(
def create(
self,
*,
- input: Union[str, ResponseInputParam, Omit] = omit,
- model: Union[ResponsesModel, Omit] = omit,
stream: bool,
+ background: Union[Optional[bool], Omit] = omit,
+ context_management: Union[
+ Optional[Iterable[response_create_params.ContextManagement]], Omit
+ ] = omit,
+ conversation: Union[Optional[response_create_params.Conversation], Omit] = omit,
include: Union[Optional[List[ResponseIncludable]], Omit] = omit,
+ input: Union[str, ResponseInputParam, Omit] = omit,
instructions: Union[Optional[str], Omit] = omit,
max_output_tokens: Union[Optional[int], Omit] = omit,
+ max_tool_calls: Union[Optional[int], Omit] = omit,
metadata: Union[Optional[Metadata], Omit] = omit,
+ model: Union[ResponsesModel, Omit] = omit,
parallel_tool_calls: Union[Optional[bool], Omit] = omit,
previous_response_id: Union[Optional[str], Omit] = omit,
+ prompt: Union[Optional[ResponsePromptParam], Omit] = omit,
+ prompt_cache_key: Union[str, Omit] = omit,
prompt_cache_retention: Union[
Optional[Literal["in-memory", "24h"]], Omit
] = omit,
reasoning: Union[Optional[Reasoning], Omit] = omit,
+ safety_identifier: Union[str, Omit] = omit,
+ service_tier: Union[
+ Optional[Literal["auto", "default", "flex", "scale", "priority"]], Omit
+ ] = omit,
store: Union[Optional[bool], Omit] = omit,
+ stream_options: Union[
+ Optional[response_create_params.StreamOptions], Omit
+ ] = omit,
temperature: Union[Optional[float], Omit] = omit,
text: Union[ResponseTextConfigParam, Omit] = omit,
tool_choice: Union[response_create_params.ToolChoice, Omit] = omit,
tools: Union[Iterable[ToolParam], Omit] = omit,
+ top_logprobs: Union[Optional[int], Omit] = omit,
top_p: Union[Optional[float], Omit] = omit,
truncation: Union[Optional[Literal["auto", "disabled"]], Omit] = omit,
user: Union[str, Omit] = omit,
@@ -146,24 +204,40 @@ def create(
def create(
self,
*,
- input: Union[str, ResponseInputParam, Omit] = omit,
- model: Union[ResponsesModel, Omit] = omit,
+ background: Union[Optional[bool], Omit] = omit,
+ context_management: Union[
+ Optional[Iterable[response_create_params.ContextManagement]], Omit
+ ] = omit,
+ conversation: Union[Optional[response_create_params.Conversation], Omit] = omit,
include: Union[Optional[List[ResponseIncludable]], Omit] = omit,
+ input: Union[str, ResponseInputParam, Omit] = omit,
instructions: Union[Optional[str], Omit] = omit,
max_output_tokens: Union[Optional[int], Omit] = omit,
+ max_tool_calls: Union[Optional[int], Omit] = omit,
metadata: Union[Optional[Metadata], Omit] = omit,
+ model: Union[ResponsesModel, Omit] = omit,
parallel_tool_calls: Union[Optional[bool], Omit] = omit,
previous_response_id: Union[Optional[str], Omit] = omit,
+ prompt: Union[Optional[ResponsePromptParam], Omit] = omit,
+ prompt_cache_key: Union[str, Omit] = omit,
prompt_cache_retention: Union[
Optional[Literal["in-memory", "24h"]], Omit
] = omit,
reasoning: Union[Optional[Reasoning], Omit] = omit,
+ safety_identifier: Union[str, Omit] = omit,
+ service_tier: Union[
+ Optional[Literal["auto", "default", "flex", "scale", "priority"]], Omit
+ ] = omit,
store: Union[Optional[bool], Omit] = omit,
stream: Union[Optional[Literal[False]], Literal[True], Omit] = omit,
+ stream_options: Union[
+ Optional[response_create_params.StreamOptions], Omit
+ ] = omit,
temperature: Union[Optional[float], Omit] = omit,
text: Union[ResponseTextConfigParam, Omit] = omit,
tool_choice: Union[response_create_params.ToolChoice, Omit] = omit,
tools: Union[Iterable[ToolParam], Omit] = omit,
+ top_logprobs: Union[Optional[int], Omit] = omit,
top_p: Union[Optional[float], Omit] = omit,
truncation: Union[Optional[Literal["auto", "disabled"]], Omit] = omit,
user: Union[str, Omit] = omit,
@@ -175,22 +249,32 @@ def create(
timeout = kwargs.pop("timeout", None)
return self.openai_client.responses.create( # type: ignore[misc]
- input=input,
- model=model,
+ background=background,
+ context_management=context_management,
+ conversation=conversation,
include=include,
+ input=input,
instructions=instructions,
max_output_tokens=max_output_tokens,
+ max_tool_calls=max_tool_calls,
metadata=metadata,
+ model=model,
parallel_tool_calls=parallel_tool_calls,
previous_response_id=previous_response_id,
+ prompt=prompt,
+ prompt_cache_key=prompt_cache_key,
prompt_cache_retention=prompt_cache_retention,
reasoning=reasoning,
+ safety_identifier=safety_identifier,
+ service_tier=service_tier,
store=store,
stream=stream, # type: ignore[arg-type]
+ stream_options=stream_options,
temperature=temperature,
text=text,
tool_choice=tool_choice,
tools=tools,
+ top_logprobs=top_logprobs,
top_p=top_p,
truncation=truncation,
user=user,
@@ -200,6 +284,7 @@ def create(
timeout=timeout,
)
+ @overload
def retrieve(
self,
response_id: str,
@@ -207,18 +292,71 @@ def retrieve(
include: Union[List[ResponseIncludable], Omit] = omit,
include_obfuscation: Union[bool, Omit] = omit,
starting_after: Union[int, Omit] = omit,
+ stream: Union[Literal[False], Omit] = omit,
**kwargs,
) -> ResponseType:
+ ...
+
+ @overload
+ def retrieve(
+ self,
+ response_id: str,
+ *,
+ stream: Literal[True],
+ include: Union[List[ResponseIncludable], Omit] = omit,
+ include_obfuscation: Union[bool, Omit] = omit,
+ starting_after: Union[int, Omit] = omit,
+ **kwargs,
+ ) -> Stream[ResponseStreamEvent]:
+ ...
+
+ @overload
+ def retrieve(
+ self,
+ response_id: str,
+ *,
+ stream: bool,
+ include: Union[List[ResponseIncludable], Omit] = omit,
+ include_obfuscation: Union[bool, Omit] = omit,
+ starting_after: Union[int, Omit] = omit,
+ **kwargs,
+ ) -> Union[ResponseType, Stream[ResponseStreamEvent]]:
+ ...
+
+ def retrieve(
+ self,
+ response_id: str,
+ *,
+ include: Union[List[ResponseIncludable], Omit] = omit,
+ include_obfuscation: Union[bool, Omit] = omit,
+ starting_after: Union[int, Omit] = omit,
+ stream: Union[Literal[False], Literal[True], Omit] = omit,
+ **kwargs,
+ ) -> Union[ResponseType, Stream[ResponseStreamEvent]]:
extra_headers = kwargs.pop("extra_headers", None)
extra_query = kwargs.pop("extra_query", None)
extra_body = kwargs.pop("extra_body", None)
timeout = kwargs.pop("timeout", None)
+ if stream is True:
+ return self.openai_client.responses.retrieve(
+ response_id=response_id,
+ stream=stream,
+ include=include,
+ include_obfuscation=include_obfuscation,
+ starting_after=starting_after,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body={**(extra_body or {}), **kwargs},
+ timeout=timeout,
+ )
+
response = self.openai_client.with_raw_response.responses.retrieve(
response_id=response_id,
include=include,
include_obfuscation=include_obfuscation,
starting_after=starting_after,
+ stream=stream,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body={**(extra_body or {}), **kwargs},
@@ -244,58 +382,143 @@ def delete(self, response_id: str, **kwargs) -> None:
timeout=timeout,
)
+ @overload
+ def stream(
+ self,
+ *,
+ response_id: str,
+ text_format: Union[Type[TextFormatT], Omit] = omit,
+ starting_after: Union[int, Omit] = omit,
+ tools: Union[Iterable[ParseableToolParam], Omit] = omit,
+ **kwargs,
+ ) -> ResponseStreamManager[TextFormatT]:
+ ...
+
+ @overload
def stream(
self,
*,
input: Union[str, ResponseInputParam],
- model: Union[str, ChatModel],
- text_format: Union[type[TextFormatT], Omit] = omit, # type: ignore[type-arg]
+ model: ResponsesModel,
+ background: Union[Optional[bool], Omit] = omit,
+ context_management: Union[
+ Optional[Iterable[response_create_params.ContextManagement]], Omit
+ ] = omit,
+ text_format: Union[Type[TextFormatT], Omit] = omit,
tools: Union[Iterable[ParseableToolParam], Omit] = omit,
- include: Union[List[ResponseIncludable], Omit] = omit,
- instructions: Union[str, Omit] = omit,
- max_output_tokens: Union[int, Omit] = omit,
- metadata: Union[Metadata, Omit] = omit,
- parallel_tool_calls: Union[bool, Omit] = omit,
- previous_response_id: Union[str, Omit] = omit,
+ conversation: Union[Optional[response_create_params.Conversation], Omit] = omit,
+ include: Union[Optional[List[ResponseIncludable]], Omit] = omit,
+ instructions: Union[Optional[str], Omit] = omit,
+ max_output_tokens: Union[Optional[int], Omit] = omit,
+ max_tool_calls: Union[Optional[int], Omit] = omit,
+ metadata: Union[Optional[Metadata], Omit] = omit,
+ parallel_tool_calls: Union[Optional[bool], Omit] = omit,
+ previous_response_id: Union[Optional[str], Omit] = omit,
+ prompt: Union[Optional[ResponsePromptParam], Omit] = omit,
+ prompt_cache_key: Union[str, Omit] = omit,
prompt_cache_retention: Union[
Optional[Literal["in-memory", "24h"]], Omit
] = omit,
- reasoning: Union[Reasoning, Omit] = omit,
- store: Union[bool, Omit] = omit,
- stream_options: Union[response_create_params.StreamOptions, Omit] = omit,
- temperature: Union[float, Omit] = omit,
+ reasoning: Union[Optional[Reasoning], Omit] = omit,
+ safety_identifier: Union[str, Omit] = omit,
+ service_tier: Union[
+ Optional[Literal["auto", "default", "flex", "scale", "priority"]], Omit
+ ] = omit,
+ store: Union[Optional[bool], Omit] = omit,
+ stream_options: Union[
+ Optional[response_create_params.StreamOptions], Omit
+ ] = omit,
+ temperature: Union[Optional[float], Omit] = omit,
text: Union[ResponseTextConfigParam, Omit] = omit,
tool_choice: Union[response_create_params.ToolChoice, Omit] = omit,
- top_p: Union[float, Omit] = omit,
- truncation: Union[Literal["auto", "disabled"], Omit] = omit,
+ top_logprobs: Union[Optional[int], Omit] = omit,
+ top_p: Union[Optional[float], Omit] = omit,
+ truncation: Union[Optional[Literal["auto", "disabled"]], Omit] = omit,
+ user: Union[str, Omit] = omit,
+ **kwargs,
+ ) -> ResponseStreamManager[TextFormatT]:
+ ...
+
+ def stream(
+ self,
+ *,
+ response_id: Union[str, Omit] = omit,
+ input: Union[str, ResponseInputParam, Omit] = omit,
+ model: Union[ResponsesModel, Omit] = omit,
+ background: Union[Optional[bool], Omit] = omit,
+ context_management: Union[
+ Optional[Iterable[response_create_params.ContextManagement]], Omit
+ ] = omit,
+ text_format: Union[Type[TextFormatT], Omit] = omit,
+ tools: Union[Iterable[ParseableToolParam], Omit] = omit,
+ conversation: Union[Optional[response_create_params.Conversation], Omit] = omit,
+ include: Union[Optional[List[ResponseIncludable]], Omit] = omit,
+ instructions: Union[Optional[str], Omit] = omit,
+ max_output_tokens: Union[Optional[int], Omit] = omit,
+ max_tool_calls: Union[Optional[int], Omit] = omit,
+ metadata: Union[Optional[Metadata], Omit] = omit,
+ parallel_tool_calls: Union[Optional[bool], Omit] = omit,
+ previous_response_id: Union[Optional[str], Omit] = omit,
+ prompt: Union[Optional[ResponsePromptParam], Omit] = omit,
+ prompt_cache_key: Union[str, Omit] = omit,
+ prompt_cache_retention: Union[
+ Optional[Literal["in-memory", "24h"]], Omit
+ ] = omit,
+ reasoning: Union[Optional[Reasoning], Omit] = omit,
+ safety_identifier: Union[str, Omit] = omit,
+ service_tier: Union[
+ Optional[Literal["auto", "default", "flex", "scale", "priority"]], Omit
+ ] = omit,
+ store: Union[Optional[bool], Omit] = omit,
+ stream_options: Union[
+ Optional[response_create_params.StreamOptions], Omit
+ ] = omit,
+ temperature: Union[Optional[float], Omit] = omit,
+ text: Union[ResponseTextConfigParam, Omit] = omit,
+ tool_choice: Union[response_create_params.ToolChoice, Omit] = omit,
+ top_logprobs: Union[Optional[int], Omit] = omit,
+ top_p: Union[Optional[float], Omit] = omit,
+ truncation: Union[Optional[Literal["auto", "disabled"]], Omit] = omit,
user: Union[str, Omit] = omit,
+ starting_after: Union[int, Omit] = omit,
**kwargs,
) -> ResponseStreamManager[TextFormatT]:
extra_headers = kwargs.pop("extra_headers", None)
extra_query = kwargs.pop("extra_query", None)
extra_body = kwargs.pop("extra_body", None)
timeout = kwargs.pop("timeout", None)
- return self.openai_client.responses.stream(
+ return self.openai_client.responses.stream( # type: ignore[call-overload, misc]
+ response_id=response_id,
input=input,
model=model,
+ background=background,
+ context_management=context_management,
text_format=text_format,
tools=tools,
+ conversation=conversation,
include=include,
instructions=instructions,
max_output_tokens=max_output_tokens,
+ max_tool_calls=max_tool_calls,
metadata=metadata,
parallel_tool_calls=parallel_tool_calls,
previous_response_id=previous_response_id,
+ prompt=prompt,
+ prompt_cache_key=prompt_cache_key,
prompt_cache_retention=prompt_cache_retention,
reasoning=reasoning,
+ safety_identifier=safety_identifier,
+ service_tier=service_tier,
store=store,
stream_options=stream_options,
temperature=temperature,
text=text,
tool_choice=tool_choice,
+ top_logprobs=top_logprobs,
top_p=top_p,
truncation=truncation,
user=user,
+ starting_after=starting_after,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body={**(extra_body or {}), **kwargs},
@@ -305,28 +528,45 @@ def stream(
def parse(
self,
*,
+ text_format: Union[Type[TextFormatT], Omit] = omit,
+ background: Union[Optional[bool], Omit] = omit,
+ context_management: Union[
+ Optional[Iterable[response_create_params.ContextManagement]], Omit
+ ] = omit,
+ conversation: Union[Optional[response_create_params.Conversation], Omit] = omit,
+ include: Union[Optional[List[ResponseIncludable]], Omit] = omit,
input: Union[str, ResponseInputParam, Omit] = omit,
- model: Union[str, ChatModel, Omit] = omit,
- text_format: Union[type[TextFormatT], Omit] = omit, # type: ignore[type-arg]
- tools: Union[Iterable[ParseableToolParam], Omit] = omit,
- include: Union[List[ResponseIncludable], Omit] = omit,
- instructions: Union[str, Omit] = omit,
- max_output_tokens: Union[int, Omit] = omit,
- metadata: Union[Metadata, Omit] = omit,
- parallel_tool_calls: Union[bool, Omit] = omit,
- previous_response_id: Union[str, Omit] = omit,
+ instructions: Union[Optional[str], Omit] = omit,
+ max_output_tokens: Union[Optional[int], Omit] = omit,
+ max_tool_calls: Union[Optional[int], Omit] = omit,
+ metadata: Union[Optional[Metadata], Omit] = omit,
+ model: Union[ResponsesModel, Omit] = omit,
+ parallel_tool_calls: Union[Optional[bool], Omit] = omit,
+ previous_response_id: Union[Optional[str], Omit] = omit,
+ prompt: Union[Optional[ResponsePromptParam], Omit] = omit,
+ prompt_cache_key: Union[str, Omit] = omit,
prompt_cache_retention: Union[
Optional[Literal["in-memory", "24h"]], Omit
] = omit,
- reasoning: Union[Reasoning, Omit] = omit,
- store: Union[bool, Omit] = omit,
- stream: Union[Literal[False], Literal[True], Omit] = omit,
- temperature: Union[float, Omit] = omit,
+ reasoning: Union[Optional[Reasoning], Omit] = omit,
+ safety_identifier: Union[str, Omit] = omit,
+ service_tier: Union[
+ Optional[Literal["auto", "default", "flex", "scale", "priority"]], Omit
+ ] = omit,
+ store: Union[Optional[bool], Omit] = omit,
+ stream: Union[Optional[Literal[False]], Literal[True], Omit] = omit,
+ stream_options: Union[
+ Optional[response_create_params.StreamOptions], Omit
+ ] = omit,
+ temperature: Union[Optional[float], Omit] = omit,
text: Union[ResponseTextConfigParam, Omit] = omit,
tool_choice: Union[response_create_params.ToolChoice, Omit] = omit,
- top_p: Union[float, Omit] = omit,
- truncation: Union[Literal["auto", "disabled"], Omit] = omit,
+ tools: Union[Iterable[ParseableToolParam], Omit] = omit,
+ top_logprobs: Union[Optional[int], Omit] = omit,
+ top_p: Union[Optional[float], Omit] = omit,
+ truncation: Union[Optional[Literal["auto", "disabled"]], Omit] = omit,
user: Union[str, Omit] = omit,
+ verbosity: Union[Optional[Literal["low", "medium", "high"]], Omit] = omit,
**kwargs,
) -> ParsedResponse[TextFormatT]:
extra_headers = kwargs.pop("extra_headers", None)
@@ -335,26 +575,37 @@ def parse(
timeout = kwargs.pop("timeout", None)
return self.openai_client.responses.parse(
+ text_format=text_format,
+ background=background,
+ context_management=context_management,
+ conversation=conversation,
input=input,
model=model,
- text_format=text_format,
tools=tools,
include=include,
instructions=instructions,
max_output_tokens=max_output_tokens,
+ max_tool_calls=max_tool_calls,
metadata=metadata,
parallel_tool_calls=parallel_tool_calls,
previous_response_id=previous_response_id,
+ prompt=prompt,
+ prompt_cache_key=prompt_cache_key,
prompt_cache_retention=prompt_cache_retention,
reasoning=reasoning,
+ safety_identifier=safety_identifier,
+ service_tier=service_tier,
store=store,
stream=stream,
+ stream_options=stream_options,
temperature=temperature,
text=text,
tool_choice=tool_choice,
+ top_logprobs=top_logprobs,
top_p=top_p,
truncation=truncation,
user=user,
+ verbosity=verbosity,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body={**(extra_body or {}), **kwargs},
@@ -381,10 +632,108 @@ def cancel(
def compact(
self,
*,
- model: Union[str, Omit] = omit,
+ model: Union[
+ Literal[
+ "gpt-5.4",
+ "gpt-5.4-mini",
+ "gpt-5.4-nano",
+ "gpt-5.4-mini-2026-03-17",
+ "gpt-5.4-nano-2026-03-17",
+ "gpt-5.3-chat-latest",
+ "gpt-5.2",
+ "gpt-5.2-2025-12-11",
+ "gpt-5.2-chat-latest",
+ "gpt-5.2-pro",
+ "gpt-5.2-pro-2025-12-11",
+ "gpt-5.1",
+ "gpt-5.1-2025-11-13",
+ "gpt-5.1-codex",
+ "gpt-5.1-mini",
+ "gpt-5.1-chat-latest",
+ "gpt-5",
+ "gpt-5-mini",
+ "gpt-5-nano",
+ "gpt-5-2025-08-07",
+ "gpt-5-mini-2025-08-07",
+ "gpt-5-nano-2025-08-07",
+ "gpt-5-chat-latest",
+ "gpt-4.1",
+ "gpt-4.1-mini",
+ "gpt-4.1-nano",
+ "gpt-4.1-2025-04-14",
+ "gpt-4.1-mini-2025-04-14",
+ "gpt-4.1-nano-2025-04-14",
+ "o4-mini",
+ "o4-mini-2025-04-16",
+ "o3",
+ "o3-2025-04-16",
+ "o3-mini",
+ "o3-mini-2025-01-31",
+ "o1",
+ "o1-2024-12-17",
+ "o1-preview",
+ "o1-preview-2024-09-12",
+ "o1-mini",
+ "o1-mini-2024-09-12",
+ "gpt-4o",
+ "gpt-4o-2024-11-20",
+ "gpt-4o-2024-08-06",
+ "gpt-4o-2024-05-13",
+ "gpt-4o-audio-preview",
+ "gpt-4o-audio-preview-2024-10-01",
+ "gpt-4o-audio-preview-2024-12-17",
+ "gpt-4o-audio-preview-2025-06-03",
+ "gpt-4o-mini-audio-preview",
+ "gpt-4o-mini-audio-preview-2024-12-17",
+ "gpt-4o-search-preview",
+ "gpt-4o-mini-search-preview",
+ "gpt-4o-search-preview-2025-03-11",
+ "gpt-4o-mini-search-preview-2025-03-11",
+ "chatgpt-4o-latest",
+ "codex-mini-latest",
+ "gpt-4o-mini",
+ "gpt-4o-mini-2024-07-18",
+ "gpt-4-turbo",
+ "gpt-4-turbo-2024-04-09",
+ "gpt-4-0125-preview",
+ "gpt-4-turbo-preview",
+ "gpt-4-1106-preview",
+ "gpt-4-vision-preview",
+ "gpt-4",
+ "gpt-4-0314",
+ "gpt-4-0613",
+ "gpt-4-32k",
+ "gpt-4-32k-0314",
+ "gpt-4-32k-0613",
+ "gpt-3.5-turbo",
+ "gpt-3.5-turbo-16k",
+ "gpt-3.5-turbo-0301",
+ "gpt-3.5-turbo-0613",
+ "gpt-3.5-turbo-1106",
+ "gpt-3.5-turbo-0125",
+ "gpt-3.5-turbo-16k-0613",
+ "o1-pro",
+ "o1-pro-2025-03-19",
+ "o3-pro",
+ "o3-pro-2025-06-10",
+ "o3-deep-research",
+ "o3-deep-research-2025-06-26",
+ "o4-mini-deep-research",
+ "o4-mini-deep-research-2025-06-26",
+ "computer-use-preview",
+ "computer-use-preview-2025-03-11",
+ "gpt-5-codex",
+ "gpt-5-pro",
+ "gpt-5-pro-2025-10-06",
+ "gpt-5.1-codex-max",
+ ],
+ str,
+ None,
+ ],
input: Union[str, Iterable[ResponseInputItemParam], None, Omit] = omit,
instructions: Union[Optional[str], Omit] = omit,
previous_response_id: Union[Optional[str], Omit] = omit,
+ prompt_cache_key: Union[Optional[str], Omit] = omit,
**kwargs,
) -> CompactedResponse:
import json
@@ -398,6 +747,7 @@ def compact(
input=input,
instructions=instructions,
previous_response_id=previous_response_id,
+ prompt_cache_key=prompt_cache_key,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body={**(extra_body or {}), **kwargs},
@@ -407,6 +757,18 @@ def compact(
data._headers = response.headers
return data
+ def connect(
+ self,
+ *,
+ websocket_connection_options: WebSocketConnectionOptions = {},
+ **kwargs,
+ ) -> ResponsesConnectionManager:
+ return self.openai_client.responses.connect(
+ websocket_connection_options=websocket_connection_options,
+ extra_headers=self.openai_client.default_headers,
+ **kwargs,
+ )
+
class InputItems(APIResource):
def __init__(self, client: Portkey) -> None:
@@ -503,24 +865,40 @@ def __init__(self, client: AsyncPortkey) -> None:
async def create(
self,
*,
- input: Union[str, ResponseInputParam, Omit] = omit,
- model: Union[ResponsesModel, Omit] = omit,
+ background: Union[Optional[bool], Omit] = omit,
+ context_management: Union[
+ Optional[Iterable[response_create_params.ContextManagement]], Omit
+ ] = omit,
+ conversation: Union[Optional[response_create_params.Conversation], Omit] = omit,
include: Union[Optional[List[ResponseIncludable]], Omit] = omit,
+ input: Union[str, ResponseInputParam, Omit] = omit,
instructions: Union[Optional[str], Omit] = omit,
max_output_tokens: Union[Optional[int], Omit] = omit,
+ max_tool_calls: Union[Optional[int], Omit] = omit,
metadata: Union[Optional[Metadata], Omit] = omit,
+ model: Union[ResponsesModel, Omit] = omit,
parallel_tool_calls: Union[Optional[bool], Omit] = omit,
previous_response_id: Union[Optional[str], Omit] = omit,
+ prompt: Union[Optional[ResponsePromptParam], Omit] = omit,
+ prompt_cache_key: Union[str, Omit] = omit,
prompt_cache_retention: Union[
Optional[Literal["in-memory", "24h"]], Omit
] = omit,
reasoning: Union[Optional[Reasoning], Omit] = omit,
+ safety_identifier: Union[str, Omit] = omit,
+ service_tier: Union[
+ Optional[Literal["auto", "default", "flex", "scale", "priority"]], Omit
+ ] = omit,
store: Union[Optional[bool], Omit] = omit,
stream: Union[Literal[False], Omit] = omit,
+ stream_options: Union[
+ Optional[response_create_params.StreamOptions], Omit
+ ] = omit,
temperature: Union[Optional[float], Omit] = omit,
text: Union[ResponseTextConfigParam, Omit] = omit,
tool_choice: Union[response_create_params.ToolChoice, Omit] = omit,
tools: Union[Iterable[ToolParam], Omit] = omit,
+ top_logprobs: Union[Optional[int], Omit] = omit,
top_p: Union[Optional[float], Omit] = omit,
truncation: Union[Optional[Literal["auto", "disabled"]], Omit] = omit,
user: Union[str, Omit] = omit,
@@ -532,24 +910,40 @@ async def create(
async def create(
self,
*,
- input: Union[str, ResponseInputParam, Omit] = omit,
- model: Union[ResponsesModel, Omit] = omit,
stream: Literal[True],
+ background: Union[Optional[bool], Omit] = omit,
+ context_management: Union[
+ Optional[Iterable[response_create_params.ContextManagement]], Omit
+ ] = omit,
+ conversation: Union[Optional[response_create_params.Conversation], Omit] = omit,
include: Union[Optional[List[ResponseIncludable]], Omit] = omit,
+ input: Union[str, ResponseInputParam, Omit] = omit,
instructions: Union[Optional[str], Omit] = omit,
max_output_tokens: Union[Optional[int], Omit] = omit,
+ max_tool_calls: Union[Optional[int], Omit] = omit,
metadata: Union[Optional[Metadata], Omit] = omit,
+ model: Union[ResponsesModel, Omit] = omit,
parallel_tool_calls: Union[Optional[bool], Omit] = omit,
previous_response_id: Union[Optional[str], Omit] = omit,
+ prompt: Union[Optional[ResponsePromptParam], Omit] = omit,
+ prompt_cache_key: Union[str, Omit] = omit,
prompt_cache_retention: Union[
Optional[Literal["in-memory", "24h"]], Omit
] = omit,
reasoning: Union[Optional[Reasoning], Omit] = omit,
+ safety_identifier: Union[str, Omit] = omit,
+ service_tier: Union[
+ Optional[Literal["auto", "default", "flex", "scale", "priority"]], Omit
+ ] = omit,
store: Union[Optional[bool], Omit] = omit,
+ stream_options: Union[
+ Optional[response_create_params.StreamOptions], Omit
+ ] = omit,
temperature: Union[Optional[float], Omit] = omit,
text: Union[ResponseTextConfigParam, Omit] = omit,
tool_choice: Union[response_create_params.ToolChoice, Omit] = omit,
tools: Union[Iterable[ToolParam], Omit] = omit,
+ top_logprobs: Union[Optional[int], Omit] = omit,
top_p: Union[Optional[float], Omit] = omit,
truncation: Union[Optional[Literal["auto", "disabled"]], Omit] = omit,
user: Union[str, Omit] = omit,
@@ -561,24 +955,40 @@ async def create(
async def create(
self,
*,
- input: Union[str, ResponseInputParam, Omit] = omit,
- model: Union[ResponsesModel, Omit] = omit,
stream: bool,
+ background: Union[Optional[bool], Omit] = omit,
+ context_management: Union[
+ Optional[Iterable[response_create_params.ContextManagement]], Omit
+ ] = omit,
+ conversation: Union[Optional[response_create_params.Conversation], Omit] = omit,
include: Union[Optional[List[ResponseIncludable]], Omit] = omit,
+ input: Union[str, ResponseInputParam, Omit] = omit,
instructions: Union[Optional[str], Omit] = omit,
max_output_tokens: Union[Optional[int], Omit] = omit,
+ max_tool_calls: Union[Optional[int], Omit] = omit,
metadata: Union[Optional[Metadata], Omit] = omit,
+ model: Union[ResponsesModel, Omit] = omit,
parallel_tool_calls: Union[Optional[bool], Omit] = omit,
previous_response_id: Union[Optional[str], Omit] = omit,
+ prompt: Union[Optional[ResponsePromptParam], Omit] = omit,
+ prompt_cache_key: Union[str, Omit] = omit,
prompt_cache_retention: Union[
Optional[Literal["in-memory", "24h"]], Omit
] = omit,
reasoning: Union[Optional[Reasoning], Omit] = omit,
+ safety_identifier: Union[str, Omit] = omit,
+ service_tier: Union[
+ Optional[Literal["auto", "default", "flex", "scale", "priority"]], Omit
+ ] = omit,
store: Union[Optional[bool], Omit] = omit,
+ stream_options: Union[
+ Optional[response_create_params.StreamOptions], Omit
+ ] = omit,
temperature: Union[Optional[float], Omit] = omit,
text: Union[ResponseTextConfigParam, Omit] = omit,
tool_choice: Union[response_create_params.ToolChoice, Omit] = omit,
tools: Union[Iterable[ToolParam], Omit] = omit,
+ top_logprobs: Union[Optional[int], Omit] = omit,
top_p: Union[Optional[float], Omit] = omit,
truncation: Union[Optional[Literal["auto", "disabled"]], Omit] = omit,
user: Union[str, Omit] = omit,
@@ -589,24 +999,40 @@ async def create(
async def create(
self,
*,
- input: Union[str, ResponseInputParam, Omit] = omit,
- model: Union[ResponsesModel, Omit] = omit,
+ background: Union[Optional[bool], Omit] = omit,
+ context_management: Union[
+ Optional[Iterable[response_create_params.ContextManagement]], Omit
+ ] = omit,
+ conversation: Union[Optional[response_create_params.Conversation], Omit] = omit,
include: Union[Optional[List[ResponseIncludable]], Omit] = omit,
+ input: Union[str, ResponseInputParam, Omit] = omit,
instructions: Union[Optional[str], Omit] = omit,
max_output_tokens: Union[Optional[int], Omit] = omit,
+ max_tool_calls: Union[Optional[int], Omit] = omit,
metadata: Union[Optional[Metadata], Omit] = omit,
+ model: Union[ResponsesModel, Omit] = omit,
parallel_tool_calls: Union[Optional[bool], Omit] = omit,
previous_response_id: Union[Optional[str], Omit] = omit,
+ prompt: Union[Optional[ResponsePromptParam], Omit] = omit,
+ prompt_cache_key: Union[str, Omit] = omit,
prompt_cache_retention: Union[
Optional[Literal["in-memory", "24h"]], Omit
] = omit,
reasoning: Union[Optional[Reasoning], Omit] = omit,
+ safety_identifier: Union[str, Omit] = omit,
+ service_tier: Union[
+ Optional[Literal["auto", "default", "flex", "scale", "priority"]], Omit
+ ] = omit,
store: Union[Optional[bool], Omit] = omit,
stream: Union[Optional[Literal[False]], Literal[True], Omit] = omit,
+ stream_options: Union[
+ Optional[response_create_params.StreamOptions], Omit
+ ] = omit,
temperature: Union[Optional[float], Omit] = omit,
text: Union[ResponseTextConfigParam, Omit] = omit,
tool_choice: Union[response_create_params.ToolChoice, Omit] = omit,
tools: Union[Iterable[ToolParam], Omit] = omit,
+ top_logprobs: Union[Optional[int], Omit] = omit,
top_p: Union[Optional[float], Omit] = omit,
truncation: Union[Optional[Literal["auto", "disabled"]], Omit] = omit,
user: Union[str, Omit] = omit,
@@ -618,22 +1044,32 @@ async def create(
timeout = kwargs.pop("timeout", None)
return await self.openai_client.responses.create( # type: ignore[misc]
- input=input,
- model=model,
+ background=background,
+ context_management=context_management,
+ conversation=conversation,
include=include,
+ input=input,
instructions=instructions,
max_output_tokens=max_output_tokens,
+ max_tool_calls=max_tool_calls,
metadata=metadata,
+ model=model,
parallel_tool_calls=parallel_tool_calls,
previous_response_id=previous_response_id,
+ prompt=prompt,
+ prompt_cache_key=prompt_cache_key,
prompt_cache_retention=prompt_cache_retention,
reasoning=reasoning,
+ safety_identifier=safety_identifier,
+ service_tier=service_tier,
store=store,
stream=stream, # type: ignore[arg-type]
+ stream_options=stream_options,
temperature=temperature,
text=text,
tool_choice=tool_choice,
tools=tools,
+ top_logprobs=top_logprobs,
top_p=top_p,
truncation=truncation,
user=user,
@@ -643,6 +1079,7 @@ async def create(
timeout=timeout,
)
+ @overload
async def retrieve(
self,
response_id: str,
@@ -650,18 +1087,71 @@ async def retrieve(
include: Union[List[ResponseIncludable], Omit] = omit,
include_obfuscation: Union[bool, Omit] = omit,
starting_after: Union[int, Omit] = omit,
+ stream: Union[Literal[False], Omit] = omit,
**kwargs,
) -> ResponseType:
+ ...
+
+ @overload
+ async def retrieve(
+ self,
+ response_id: str,
+ *,
+ stream: Literal[True],
+ include: Union[List[ResponseIncludable], Omit] = omit,
+ include_obfuscation: Union[bool, Omit] = omit,
+ starting_after: Union[int, Omit] = omit,
+ **kwargs,
+ ) -> AsyncStream[ResponseStreamEvent]:
+ ...
+
+ @overload
+ async def retrieve(
+ self,
+ response_id: str,
+ *,
+ stream: bool,
+ include: Union[List[ResponseIncludable], Omit] = omit,
+ include_obfuscation: Union[bool, Omit] = omit,
+ starting_after: Union[int, Omit] = omit,
+ **kwargs,
+ ) -> Union[ResponseType, AsyncStream[ResponseStreamEvent]]:
+ ...
+
+ async def retrieve(
+ self,
+ response_id: str,
+ *,
+ include: Union[List[ResponseIncludable], Omit] = omit,
+ include_obfuscation: Union[bool, Omit] = omit,
+ starting_after: Union[int, Omit] = omit,
+ stream: Union[Literal[False], Literal[True], Omit] = omit,
+ **kwargs,
+ ) -> Union[ResponseType, AsyncStream[ResponseStreamEvent]]:
extra_headers = kwargs.pop("extra_headers", None)
extra_query = kwargs.pop("extra_query", None)
extra_body = kwargs.pop("extra_body", None)
timeout = kwargs.pop("timeout", None)
+ if stream is True:
+ return await self.openai_client.responses.retrieve(
+ response_id=response_id,
+ stream=stream,
+ include=include,
+ include_obfuscation=include_obfuscation,
+ starting_after=starting_after,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body={**(extra_body or {}), **kwargs},
+ timeout=timeout,
+ )
+
response = await self.openai_client.with_raw_response.responses.retrieve(
response_id=response_id,
include=include,
include_obfuscation=include_obfuscation,
starting_after=starting_after,
+ stream=stream,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body={**(extra_body or {}), **kwargs},
@@ -687,58 +1177,143 @@ async def delete(self, response_id: str, **kwargs) -> None:
timeout=timeout,
)
+ @overload
+ def stream(
+ self,
+ *,
+ response_id: str,
+ text_format: Union[Type[TextFormatT], Omit] = omit,
+ starting_after: Union[int, Omit] = omit,
+ tools: Union[Iterable[ParseableToolParam], Omit] = omit,
+ **kwargs,
+ ) -> AsyncResponseStreamManager[TextFormatT]:
+ ...
+
+ @overload
def stream(
self,
*,
input: Union[str, ResponseInputParam],
model: Union[str, ChatModel],
- text_format: Union[type[TextFormatT], Omit] = omit, # type: ignore[type-arg]
+ background: Union[Optional[bool], Omit] = omit,
+ context_management: Union[
+ Optional[Iterable[response_create_params.ContextManagement]], Omit
+ ] = omit,
+ text_format: Union[Type[TextFormatT], Omit] = omit,
tools: Union[Iterable[ParseableToolParam], Omit] = omit,
- include: Union[List[ResponseIncludable], Omit] = omit,
- instructions: Union[str, Omit] = omit,
- max_output_tokens: Union[int, Omit] = omit,
- metadata: Union[Metadata, Omit] = omit,
- parallel_tool_calls: Union[bool, Omit] = omit,
- previous_response_id: Union[str, Omit] = omit,
+ conversation: Union[Optional[response_create_params.Conversation], Omit] = omit,
+ include: Union[Optional[List[ResponseIncludable]], Omit] = omit,
+ instructions: Union[Optional[str], Omit] = omit,
+ max_output_tokens: Union[Optional[int], Omit] = omit,
+ max_tool_calls: Union[Optional[int], Omit] = omit,
+ metadata: Union[Optional[Metadata], Omit] = omit,
+ parallel_tool_calls: Union[Optional[bool], Omit] = omit,
+ previous_response_id: Union[Optional[str], Omit] = omit,
+ prompt: Union[Optional[ResponsePromptParam], Omit] = omit,
+ prompt_cache_key: Union[str, Omit] = omit,
prompt_cache_retention: Union[
Optional[Literal["in-memory", "24h"]], Omit
] = omit,
- reasoning: Union[Reasoning, Omit] = omit,
- store: Union[bool, Omit] = omit,
- stream_options: Union[response_create_params.StreamOptions, Omit] = omit,
- temperature: Union[float, Omit] = omit,
+ reasoning: Union[Optional[Reasoning], Omit] = omit,
+ safety_identifier: Union[str, Omit] = omit,
+ service_tier: Union[
+ Optional[Literal["auto", "default", "flex", "scale", "priority"]], Omit
+ ] = omit,
+ store: Union[Optional[bool], Omit] = omit,
+ stream_options: Union[
+ Optional[response_create_params.StreamOptions], Omit
+ ] = omit,
+ temperature: Union[Optional[float], Omit] = omit,
text: Union[ResponseTextConfigParam, Omit] = omit,
tool_choice: Union[response_create_params.ToolChoice, Omit] = omit,
- top_p: Union[float, Omit] = omit,
- truncation: Union[Literal["auto", "disabled"], Omit] = omit,
+ top_logprobs: Union[Optional[int], Omit] = omit,
+ top_p: Union[Optional[float], Omit] = omit,
+ truncation: Union[Optional[Literal["auto", "disabled"]], Omit] = omit,
+ user: Union[str, Omit] = omit,
+ **kwargs,
+ ) -> AsyncResponseStreamManager[TextFormatT]:
+ ...
+
+ def stream(
+ self,
+ *,
+ response_id: Union[str, Omit] = omit,
+ input: Union[str, ResponseInputParam, Omit] = omit,
+ model: Union[str, ChatModel, Omit] = omit,
+ background: Union[Optional[bool], Omit] = omit,
+ context_management: Union[
+ Optional[Iterable[response_create_params.ContextManagement]], Omit
+ ] = omit,
+ text_format: Union[Type[TextFormatT], Omit] = omit,
+ tools: Union[Iterable[ParseableToolParam], Omit] = omit,
+ conversation: Union[Optional[response_create_params.Conversation], Omit] = omit,
+ include: Union[Optional[List[ResponseIncludable]], Omit] = omit,
+ instructions: Union[Optional[str], Omit] = omit,
+ max_output_tokens: Union[Optional[int], Omit] = omit,
+ max_tool_calls: Union[Optional[int], Omit] = omit,
+ metadata: Union[Optional[Metadata], Omit] = omit,
+ parallel_tool_calls: Union[Optional[bool], Omit] = omit,
+ previous_response_id: Union[Optional[str], Omit] = omit,
+ prompt: Union[Optional[ResponsePromptParam], Omit] = omit,
+ prompt_cache_key: Union[str, Omit] = omit,
+ prompt_cache_retention: Union[
+ Optional[Literal["in-memory", "24h"]], Omit
+ ] = omit,
+ reasoning: Union[Optional[Reasoning], Omit] = omit,
+ safety_identifier: Union[str, Omit] = omit,
+ service_tier: Union[
+ Optional[Literal["auto", "default", "flex", "scale", "priority"]], Omit
+ ] = omit,
+ store: Union[Optional[bool], Omit] = omit,
+ stream_options: Union[
+ Optional[response_create_params.StreamOptions], Omit
+ ] = omit,
+ temperature: Union[Optional[float], Omit] = omit,
+ text: Union[ResponseTextConfigParam, Omit] = omit,
+ tool_choice: Union[response_create_params.ToolChoice, Omit] = omit,
+ top_logprobs: Union[Optional[int], Omit] = omit,
+ top_p: Union[Optional[float], Omit] = omit,
+ truncation: Union[Optional[Literal["auto", "disabled"]], Omit] = omit,
user: Union[str, Omit] = omit,
+ starting_after: Union[int, Omit] = omit,
**kwargs,
) -> AsyncResponseStreamManager[TextFormatT]:
extra_headers = kwargs.pop("extra_headers", None)
extra_query = kwargs.pop("extra_query", None)
extra_body = kwargs.pop("extra_body", None)
timeout = kwargs.pop("timeout", None)
- return self.openai_client.responses.stream(
+ return self.openai_client.responses.stream( # type: ignore[call-overload, misc]
+ response_id=response_id,
input=input,
model=model,
+ background=background,
+ context_management=context_management,
text_format=text_format,
tools=tools,
+ conversation=conversation,
include=include,
instructions=instructions,
max_output_tokens=max_output_tokens,
+ max_tool_calls=max_tool_calls,
metadata=metadata,
parallel_tool_calls=parallel_tool_calls,
previous_response_id=previous_response_id,
+ prompt=prompt,
+ prompt_cache_key=prompt_cache_key,
prompt_cache_retention=prompt_cache_retention,
reasoning=reasoning,
+ safety_identifier=safety_identifier,
+ service_tier=service_tier,
store=store,
stream_options=stream_options,
temperature=temperature,
text=text,
tool_choice=tool_choice,
+ top_logprobs=top_logprobs,
top_p=top_p,
truncation=truncation,
user=user,
+ starting_after=starting_after,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body={**(extra_body or {}), **kwargs},
@@ -748,28 +1323,45 @@ def stream(
async def parse(
self,
*,
+ background: Union[Optional[bool], Omit] = omit,
+ context_management: Union[
+ Optional[Iterable[response_create_params.ContextManagement]], Omit
+ ] = omit,
+ conversation: Union[Optional[response_create_params.Conversation], Omit] = omit,
input: Union[str, ResponseInputParam, Omit] = omit,
- model: Union[str, ChatModel, Omit] = omit,
- text_format: Union[type[TextFormatT], Omit] = omit, # type: ignore[type-arg]
+ model: Union[ResponsesModel, Omit] = omit,
+ text_format: Union[Type[TextFormatT], Omit] = omit,
tools: Union[Iterable[ParseableToolParam], Omit] = omit,
- include: Union[List[ResponseIncludable], Omit] = omit,
- instructions: Union[str, Omit] = omit,
- max_output_tokens: Union[int, Omit] = omit,
- metadata: Union[Metadata, Omit] = omit,
- parallel_tool_calls: Union[bool, Omit] = omit,
- previous_response_id: Union[str, Omit] = omit,
+ include: Union[Optional[List[ResponseIncludable]], Omit] = omit,
+ instructions: Union[Optional[str], Omit] = omit,
+ max_output_tokens: Union[Optional[int], Omit] = omit,
+ max_tool_calls: Union[Optional[int], Omit] = omit,
+ metadata: Union[Optional[Metadata], Omit] = omit,
+ parallel_tool_calls: Union[Optional[bool], Omit] = omit,
+ previous_response_id: Union[Optional[str], Omit] = omit,
+ prompt: Union[Optional[ResponsePromptParam], Omit] = omit,
+ prompt_cache_key: Union[str, Omit] = omit,
prompt_cache_retention: Union[
Optional[Literal["in-memory", "24h"]], Omit
] = omit,
- reasoning: Union[Reasoning, Omit] = omit,
- store: Union[bool, Omit] = omit,
- stream: Union[Literal[False], Literal[True], Omit] = omit,
- temperature: Union[float, Omit] = omit,
+ reasoning: Union[Optional[Reasoning], Omit] = omit,
+ safety_identifier: Union[str, Omit] = omit,
+ service_tier: Union[
+ Optional[Literal["auto", "default", "flex", "scale", "priority"]], Omit
+ ] = omit,
+ store: Union[Optional[bool], Omit] = omit,
+ stream: Union[Optional[Literal[False]], Literal[True], Omit] = omit,
+ stream_options: Union[
+ Optional[response_create_params.StreamOptions], Omit
+ ] = omit,
+ temperature: Union[Optional[float], Omit] = omit,
text: Union[ResponseTextConfigParam, Omit] = omit,
tool_choice: Union[response_create_params.ToolChoice, Omit] = omit,
- top_p: Union[float, Omit] = omit,
- truncation: Union[Literal["auto", "disabled"], Omit] = omit,
+ top_logprobs: Union[Optional[int], Omit] = omit,
+ top_p: Union[Optional[float], Omit] = omit,
+ truncation: Union[Optional[Literal["auto", "disabled"]], Omit] = omit,
user: Union[str, Omit] = omit,
+ verbosity: Union[Optional[Literal["low", "medium", "high"]], Omit] = omit,
**kwargs,
) -> ParsedResponse[TextFormatT]:
extra_headers = kwargs.pop("extra_headers", None)
@@ -778,6 +1370,9 @@ async def parse(
timeout = kwargs.pop("timeout", None)
return await self.openai_client.responses.parse(
+ background=background,
+ context_management=context_management,
+ conversation=conversation,
input=input,
model=model,
text_format=text_format,
@@ -785,19 +1380,27 @@ async def parse(
include=include,
instructions=instructions,
max_output_tokens=max_output_tokens,
+ max_tool_calls=max_tool_calls,
metadata=metadata,
parallel_tool_calls=parallel_tool_calls,
previous_response_id=previous_response_id,
+ prompt=prompt,
+ prompt_cache_key=prompt_cache_key,
prompt_cache_retention=prompt_cache_retention,
reasoning=reasoning,
+ safety_identifier=safety_identifier,
+ service_tier=service_tier,
store=store,
stream=stream,
+ stream_options=stream_options,
temperature=temperature,
text=text,
tool_choice=tool_choice,
+ top_logprobs=top_logprobs,
top_p=top_p,
truncation=truncation,
user=user,
+ verbosity=verbosity,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body={**(extra_body or {}), **kwargs},
@@ -824,10 +1427,108 @@ async def cancel(
async def compact(
self,
*,
- model: Union[str, Omit] = omit,
+ model: Union[
+ Literal[
+ "gpt-5.4",
+ "gpt-5.4-mini",
+ "gpt-5.4-nano",
+ "gpt-5.4-mini-2026-03-17",
+ "gpt-5.4-nano-2026-03-17",
+ "gpt-5.3-chat-latest",
+ "gpt-5.2",
+ "gpt-5.2-2025-12-11",
+ "gpt-5.2-chat-latest",
+ "gpt-5.2-pro",
+ "gpt-5.2-pro-2025-12-11",
+ "gpt-5.1",
+ "gpt-5.1-2025-11-13",
+ "gpt-5.1-codex",
+ "gpt-5.1-mini",
+ "gpt-5.1-chat-latest",
+ "gpt-5",
+ "gpt-5-mini",
+ "gpt-5-nano",
+ "gpt-5-2025-08-07",
+ "gpt-5-mini-2025-08-07",
+ "gpt-5-nano-2025-08-07",
+ "gpt-5-chat-latest",
+ "gpt-4.1",
+ "gpt-4.1-mini",
+ "gpt-4.1-nano",
+ "gpt-4.1-2025-04-14",
+ "gpt-4.1-mini-2025-04-14",
+ "gpt-4.1-nano-2025-04-14",
+ "o4-mini",
+ "o4-mini-2025-04-16",
+ "o3",
+ "o3-2025-04-16",
+ "o3-mini",
+ "o3-mini-2025-01-31",
+ "o1",
+ "o1-2024-12-17",
+ "o1-preview",
+ "o1-preview-2024-09-12",
+ "o1-mini",
+ "o1-mini-2024-09-12",
+ "gpt-4o",
+ "gpt-4o-2024-11-20",
+ "gpt-4o-2024-08-06",
+ "gpt-4o-2024-05-13",
+ "gpt-4o-audio-preview",
+ "gpt-4o-audio-preview-2024-10-01",
+ "gpt-4o-audio-preview-2024-12-17",
+ "gpt-4o-audio-preview-2025-06-03",
+ "gpt-4o-mini-audio-preview",
+ "gpt-4o-mini-audio-preview-2024-12-17",
+ "gpt-4o-search-preview",
+ "gpt-4o-mini-search-preview",
+ "gpt-4o-search-preview-2025-03-11",
+ "gpt-4o-mini-search-preview-2025-03-11",
+ "chatgpt-4o-latest",
+ "codex-mini-latest",
+ "gpt-4o-mini",
+ "gpt-4o-mini-2024-07-18",
+ "gpt-4-turbo",
+ "gpt-4-turbo-2024-04-09",
+ "gpt-4-0125-preview",
+ "gpt-4-turbo-preview",
+ "gpt-4-1106-preview",
+ "gpt-4-vision-preview",
+ "gpt-4",
+ "gpt-4-0314",
+ "gpt-4-0613",
+ "gpt-4-32k",
+ "gpt-4-32k-0314",
+ "gpt-4-32k-0613",
+ "gpt-3.5-turbo",
+ "gpt-3.5-turbo-16k",
+ "gpt-3.5-turbo-0301",
+ "gpt-3.5-turbo-0613",
+ "gpt-3.5-turbo-1106",
+ "gpt-3.5-turbo-0125",
+ "gpt-3.5-turbo-16k-0613",
+ "o1-pro",
+ "o1-pro-2025-03-19",
+ "o3-pro",
+ "o3-pro-2025-06-10",
+ "o3-deep-research",
+ "o3-deep-research-2025-06-26",
+ "o4-mini-deep-research",
+ "o4-mini-deep-research-2025-06-26",
+ "computer-use-preview",
+ "computer-use-preview-2025-03-11",
+ "gpt-5-codex",
+ "gpt-5-pro",
+ "gpt-5-pro-2025-10-06",
+ "gpt-5.1-codex-max",
+ ],
+ str,
+ None,
+ ],
input: Union[str, Iterable[ResponseInputItemParam], None, Omit] = omit,
instructions: Union[Optional[str], Omit] = omit,
previous_response_id: Union[Optional[str], Omit] = omit,
+ prompt_cache_key: Union[Optional[str], Omit] = omit,
**kwargs,
) -> CompactedResponse:
import json
@@ -841,6 +1542,7 @@ async def compact(
input=input,
instructions=instructions,
previous_response_id=previous_response_id,
+ prompt_cache_key=prompt_cache_key,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body={**(extra_body or {}), **kwargs},
@@ -850,6 +1552,18 @@ async def compact(
data._headers = response.headers
return data
+ def connect(
+ self,
+ *,
+ websocket_connection_options: WebSocketConnectionOptions = {},
+ **kwargs,
+ ) -> AsyncResponsesConnectionManager:
+ return self.openai_client.responses.connect(
+ websocket_connection_options=websocket_connection_options,
+ extra_headers=self.openai_client.default_headers,
+ **kwargs,
+ )
+
class AsyncInputItems(AsyncAPIResource):
def __init__(self, client: AsyncPortkey) -> None:
diff --git a/portkey_ai/api_resources/apis/skills.py b/portkey_ai/api_resources/apis/skills.py
new file mode 100644
index 00000000..b94d64ab
--- /dev/null
+++ b/portkey_ai/api_resources/apis/skills.py
@@ -0,0 +1,574 @@
+import json
+from typing import List, Union
+from typing_extensions import Literal
+
+from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource
+from portkey_ai.api_resources.client import AsyncPortkey, Portkey
+from portkey_ai.api_resources.types.skills_type import (
+ Skill,
+ SkillDeleted,
+ SkillList,
+ SkillVersion,
+ SkillVersionDeleted,
+ SkillVersionList,
+)
+from ..._vendor.openai._types import NOT_GIVEN, FileTypes, NotGiven, Omit, omit
+from ..._vendor.openai._legacy_response import HttpxBinaryResponseContent
+
+
+class SkillsContent(APIResource):
+ def __init__(self, client: Portkey) -> None:
+ super().__init__(client)
+ self.openai_client = client.openai_client
+
+ def retrieve(
+ self,
+ skill_id: str,
+ *,
+ timeout: Union[float, NotGiven] = NOT_GIVEN,
+ **kwargs,
+ ) -> HttpxBinaryResponseContent:
+ extra_headers = kwargs.pop("extra_headers", None)
+ extra_query = kwargs.pop("extra_query", None)
+ extra_body = kwargs.pop("extra_body", None)
+ return self.openai_client.skills.content.retrieve(
+ skill_id,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body={**(extra_body or {}), **kwargs},
+ timeout=timeout,
+ )
+
+
+class AsyncSkillsContent(AsyncAPIResource):
+ def __init__(self, client: AsyncPortkey) -> None:
+ super().__init__(client)
+ self.openai_client = client.openai_client
+
+ async def retrieve(
+ self,
+ skill_id: str,
+ *,
+ timeout: Union[float, NotGiven] = NOT_GIVEN,
+ **kwargs,
+ ) -> HttpxBinaryResponseContent:
+ extra_headers = kwargs.pop("extra_headers", None)
+ extra_query = kwargs.pop("extra_query", None)
+ extra_body = kwargs.pop("extra_body", None)
+ return await self.openai_client.skills.content.retrieve(
+ skill_id,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body={**(extra_body or {}), **kwargs},
+ timeout=timeout,
+ )
+
+
+class SkillsVersionsContent(APIResource):
+ def __init__(self, client: Portkey) -> None:
+ super().__init__(client)
+ self.openai_client = client.openai_client
+
+ def retrieve(
+ self,
+ version: str,
+ *,
+ skill_id: str,
+ timeout: Union[float, NotGiven] = NOT_GIVEN,
+ **kwargs,
+ ) -> HttpxBinaryResponseContent:
+ extra_headers = kwargs.pop("extra_headers", None)
+ extra_query = kwargs.pop("extra_query", None)
+ extra_body = kwargs.pop("extra_body", None)
+ return self.openai_client.skills.versions.content.retrieve(
+ version,
+ skill_id=skill_id,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body={**(extra_body or {}), **kwargs},
+ timeout=timeout,
+ )
+
+
+class AsyncSkillsVersionsContent(AsyncAPIResource):
+ def __init__(self, client: AsyncPortkey) -> None:
+ super().__init__(client)
+ self.openai_client = client.openai_client
+
+ async def retrieve(
+ self,
+ version: str,
+ *,
+ skill_id: str,
+ timeout: Union[float, NotGiven] = NOT_GIVEN,
+ **kwargs,
+ ) -> HttpxBinaryResponseContent:
+ extra_headers = kwargs.pop("extra_headers", None)
+ extra_query = kwargs.pop("extra_query", None)
+ extra_body = kwargs.pop("extra_body", None)
+ return await self.openai_client.skills.versions.content.retrieve(
+ version,
+ skill_id=skill_id,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body={**(extra_body or {}), **kwargs},
+ timeout=timeout,
+ )
+
+
+class SkillsVersions(APIResource):
+ content: SkillsVersionsContent
+
+ def __init__(self, client: Portkey) -> None:
+ super().__init__(client)
+ self.openai_client = client.openai_client
+ self.content = SkillsVersionsContent(client)
+
+ def create(
+ self,
+ skill_id: str,
+ *,
+ default: Union[bool, Omit] = omit,
+ files: Union[List[FileTypes], FileTypes, Omit] = omit,
+ timeout: Union[float, NotGiven] = NOT_GIVEN,
+ **kwargs,
+ ) -> SkillVersion:
+ extra_headers = kwargs.pop("extra_headers", None)
+ extra_query = kwargs.pop("extra_query", None)
+ extra_body = kwargs.pop("extra_body", None)
+ response = self.openai_client.with_raw_response.skills.versions.create(
+ skill_id,
+ default=default,
+ files=files, # type: ignore[arg-type]
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body={**(extra_body or {}), **kwargs},
+ timeout=timeout,
+ )
+ data = SkillVersion(**json.loads(response.text))
+ data._headers = response.headers
+ return data
+
+ def retrieve(
+ self,
+ version: str,
+ *,
+ skill_id: str,
+ timeout: Union[float, NotGiven] = NOT_GIVEN,
+ **kwargs,
+ ) -> SkillVersion:
+ extra_headers = kwargs.pop("extra_headers", None)
+ extra_query = kwargs.pop("extra_query", None)
+ extra_body = kwargs.pop("extra_body", None)
+ response = self.openai_client.with_raw_response.skills.versions.retrieve(
+ version,
+ skill_id=skill_id,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body={**(extra_body or {}), **kwargs},
+ timeout=timeout,
+ )
+ data = SkillVersion(**json.loads(response.text))
+ data._headers = response.headers
+ return data
+
+ def list(
+ self,
+ skill_id: str,
+ *,
+ after: Union[str, Omit] = omit,
+ limit: Union[int, Omit] = omit,
+ order: Union[Literal["asc", "desc"], Omit] = omit,
+ timeout: Union[float, NotGiven] = NOT_GIVEN,
+ **kwargs,
+ ) -> SkillVersionList:
+ extra_headers = kwargs.pop("extra_headers", None)
+ extra_query = kwargs.pop("extra_query", None)
+ extra_body = kwargs.pop("extra_body", None)
+ response = self.openai_client.with_raw_response.skills.versions.list(
+ skill_id,
+ after=after,
+ limit=limit,
+ order=order,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body={**(extra_body or {}), **kwargs},
+ timeout=timeout,
+ )
+ data = SkillVersionList(**json.loads(response.text))
+ data._headers = response.headers
+ return data
+
+ def delete(
+ self,
+ version: str,
+ *,
+ skill_id: str,
+ timeout: Union[float, NotGiven] = NOT_GIVEN,
+ **kwargs,
+ ) -> SkillVersionDeleted:
+ extra_headers = kwargs.pop("extra_headers", None)
+ extra_query = kwargs.pop("extra_query", None)
+ extra_body = kwargs.pop("extra_body", None)
+ response = self.openai_client.with_raw_response.skills.versions.delete(
+ version,
+ skill_id=skill_id,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body={**(extra_body or {}), **kwargs},
+ timeout=timeout,
+ )
+ data = SkillVersionDeleted(**json.loads(response.text))
+ data._headers = response.headers
+ return data
+
+
+class AsyncSkillsVersions(AsyncAPIResource):
+ content: AsyncSkillsVersionsContent
+
+ def __init__(self, client: AsyncPortkey) -> None:
+ super().__init__(client)
+ self.openai_client = client.openai_client
+ self.content = AsyncSkillsVersionsContent(client)
+
+ async def create(
+ self,
+ skill_id: str,
+ *,
+ default: Union[bool, Omit] = omit,
+ files: Union[List[FileTypes], FileTypes, Omit] = omit,
+ timeout: Union[float, NotGiven] = NOT_GIVEN,
+ **kwargs,
+ ) -> SkillVersion:
+ extra_headers = kwargs.pop("extra_headers", None)
+ extra_query = kwargs.pop("extra_query", None)
+ extra_body = kwargs.pop("extra_body", None)
+ response = await self.openai_client.with_raw_response.skills.versions.create(
+ skill_id,
+ default=default,
+ files=files, # type: ignore[arg-type]
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body={**(extra_body or {}), **kwargs},
+ timeout=timeout,
+ )
+ data = SkillVersion(**json.loads(response.text))
+ data._headers = response.headers
+ return data
+
+ async def retrieve(
+ self,
+ version: str,
+ *,
+ skill_id: str,
+ timeout: Union[float, NotGiven] = NOT_GIVEN,
+ **kwargs,
+ ) -> SkillVersion:
+ extra_headers = kwargs.pop("extra_headers", None)
+ extra_query = kwargs.pop("extra_query", None)
+ extra_body = kwargs.pop("extra_body", None)
+ response = await self.openai_client.with_raw_response.skills.versions.retrieve(
+ version,
+ skill_id=skill_id,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body={**(extra_body or {}), **kwargs},
+ timeout=timeout,
+ )
+ data = SkillVersion(**json.loads(response.text))
+ data._headers = response.headers
+ return data
+
+ async def list(
+ self,
+ skill_id: str,
+ *,
+ after: Union[str, Omit] = omit,
+ limit: Union[int, Omit] = omit,
+ order: Union[Literal["asc", "desc"], Omit] = omit,
+ timeout: Union[float, NotGiven] = NOT_GIVEN,
+ **kwargs,
+ ) -> SkillVersionList:
+ extra_headers = kwargs.pop("extra_headers", None)
+ extra_query = kwargs.pop("extra_query", None)
+ extra_body = kwargs.pop("extra_body", None)
+ response = await self.openai_client.with_raw_response.skills.versions.list(
+ skill_id,
+ after=after,
+ limit=limit,
+ order=order,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body={**(extra_body or {}), **kwargs},
+ timeout=timeout,
+ )
+ data = SkillVersionList(**json.loads(response.text))
+ data._headers = response.headers
+ return data
+
+ async def delete(
+ self,
+ version: str,
+ *,
+ skill_id: str,
+ timeout: Union[float, NotGiven] = NOT_GIVEN,
+ **kwargs,
+ ) -> SkillVersionDeleted:
+ extra_headers = kwargs.pop("extra_headers", None)
+ extra_query = kwargs.pop("extra_query", None)
+ extra_body = kwargs.pop("extra_body", None)
+ response = await self.openai_client.with_raw_response.skills.versions.delete(
+ version,
+ skill_id=skill_id,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body={**(extra_body or {}), **kwargs},
+ timeout=timeout,
+ )
+ data = SkillVersionDeleted(**json.loads(response.text))
+ data._headers = response.headers
+ return data
+
+
+class Skills(APIResource):
+ content: SkillsContent
+ versions: SkillsVersions
+
+ def __init__(self, client: Portkey) -> None:
+ super().__init__(client)
+ self.openai_client = client.openai_client
+ self.content = SkillsContent(client)
+ self.versions = SkillsVersions(client)
+
+ def create(
+ self,
+ *,
+ files: Union[List[FileTypes], FileTypes, Omit] = omit,
+ timeout: Union[float, NotGiven] = NOT_GIVEN,
+ **kwargs,
+ ) -> Skill:
+ extra_headers = kwargs.pop("extra_headers", None)
+ extra_query = kwargs.pop("extra_query", None)
+ extra_body = kwargs.pop("extra_body", None)
+ response = self.openai_client.with_raw_response.skills.create(
+ files=files, # type: ignore[arg-type]
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body={**(extra_body or {}), **kwargs},
+ timeout=timeout,
+ )
+ data = Skill(**json.loads(response.text))
+ data._headers = response.headers
+ return data
+
+ def retrieve(
+ self,
+ skill_id: str,
+ *,
+ timeout: Union[float, NotGiven] = NOT_GIVEN,
+ **kwargs,
+ ) -> Skill:
+ extra_headers = kwargs.pop("extra_headers", None)
+ extra_query = kwargs.pop("extra_query", None)
+ extra_body = kwargs.pop("extra_body", None)
+ response = self.openai_client.with_raw_response.skills.retrieve(
+ skill_id,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body={**(extra_body or {}), **kwargs},
+ timeout=timeout,
+ )
+ data = Skill(**json.loads(response.text))
+ data._headers = response.headers
+ return data
+
+ def update(
+ self,
+ skill_id: str,
+ *,
+ default_version: str,
+ timeout: Union[float, NotGiven] = NOT_GIVEN,
+ **kwargs,
+ ) -> Skill:
+ extra_headers = kwargs.pop("extra_headers", None)
+ extra_query = kwargs.pop("extra_query", None)
+ extra_body = kwargs.pop("extra_body", None)
+ response = self.openai_client.with_raw_response.skills.update(
+ skill_id,
+ default_version=default_version,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body={**(extra_body or {}), **kwargs},
+ timeout=timeout,
+ )
+ data = Skill(**json.loads(response.text))
+ data._headers = response.headers
+ return data
+
+ def list(
+ self,
+ *,
+ after: Union[str, Omit] = omit,
+ limit: Union[int, Omit] = omit,
+ order: Union[Literal["asc", "desc"], Omit] = omit,
+ timeout: Union[float, NotGiven] = NOT_GIVEN,
+ **kwargs,
+ ) -> SkillList:
+ extra_headers = kwargs.pop("extra_headers", None)
+ extra_query = kwargs.pop("extra_query", None)
+ extra_body = kwargs.pop("extra_body", None)
+ response = self.openai_client.with_raw_response.skills.list(
+ after=after,
+ limit=limit,
+ order=order,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body={**(extra_body or {}), **kwargs},
+ timeout=timeout,
+ )
+ data = SkillList(**json.loads(response.text))
+ data._headers = response.headers
+ return data
+
+ def delete(
+ self,
+ skill_id: str,
+ *,
+ timeout: Union[float, NotGiven] = NOT_GIVEN,
+ **kwargs,
+ ) -> SkillDeleted:
+ extra_headers = kwargs.pop("extra_headers", None)
+ extra_query = kwargs.pop("extra_query", None)
+ extra_body = kwargs.pop("extra_body", None)
+ response = self.openai_client.with_raw_response.skills.delete(
+ skill_id,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body={**(extra_body or {}), **kwargs},
+ timeout=timeout,
+ )
+ data = SkillDeleted(**json.loads(response.text))
+ data._headers = response.headers
+ return data
+
+
+class AsyncSkills(AsyncAPIResource):
+ content: AsyncSkillsContent
+ versions: AsyncSkillsVersions
+
+ def __init__(self, client: AsyncPortkey) -> None:
+ super().__init__(client)
+ self.openai_client = client.openai_client
+ self.content = AsyncSkillsContent(client)
+ self.versions = AsyncSkillsVersions(client)
+
+ async def create(
+ self,
+ *,
+ files: Union[List[FileTypes], FileTypes, Omit] = omit,
+ timeout: Union[float, NotGiven] = NOT_GIVEN,
+ **kwargs,
+ ) -> Skill:
+ extra_headers = kwargs.pop("extra_headers", None)
+ extra_query = kwargs.pop("extra_query", None)
+ extra_body = kwargs.pop("extra_body", None)
+ response = await self.openai_client.with_raw_response.skills.create(
+ files=files, # type: ignore[arg-type]
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body={**(extra_body or {}), **kwargs},
+ timeout=timeout,
+ )
+ data = Skill(**json.loads(response.text))
+ data._headers = response.headers
+ return data
+
+ async def retrieve(
+ self,
+ skill_id: str,
+ *,
+ timeout: Union[float, NotGiven] = NOT_GIVEN,
+ **kwargs,
+ ) -> Skill:
+ extra_headers = kwargs.pop("extra_headers", None)
+ extra_query = kwargs.pop("extra_query", None)
+ extra_body = kwargs.pop("extra_body", None)
+ response = await self.openai_client.with_raw_response.skills.retrieve(
+ skill_id,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body={**(extra_body or {}), **kwargs},
+ timeout=timeout,
+ )
+ data = Skill(**json.loads(response.text))
+ data._headers = response.headers
+ return data
+
+ async def update(
+ self,
+ skill_id: str,
+ *,
+ default_version: str,
+ timeout: Union[float, NotGiven] = NOT_GIVEN,
+ **kwargs,
+ ) -> Skill:
+ extra_headers = kwargs.pop("extra_headers", None)
+ extra_query = kwargs.pop("extra_query", None)
+ extra_body = kwargs.pop("extra_body", None)
+ response = await self.openai_client.with_raw_response.skills.update(
+ skill_id,
+ default_version=default_version,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body={**(extra_body or {}), **kwargs},
+ timeout=timeout,
+ )
+ data = Skill(**json.loads(response.text))
+ data._headers = response.headers
+ return data
+
+ async def list(
+ self,
+ *,
+ after: Union[str, Omit] = omit,
+ limit: Union[int, Omit] = omit,
+ order: Union[Literal["asc", "desc"], Omit] = omit,
+ timeout: Union[float, NotGiven] = NOT_GIVEN,
+ **kwargs,
+ ) -> SkillList:
+ extra_headers = kwargs.pop("extra_headers", None)
+ extra_query = kwargs.pop("extra_query", None)
+ extra_body = kwargs.pop("extra_body", None)
+ response = await self.openai_client.with_raw_response.skills.list(
+ after=after,
+ limit=limit,
+ order=order,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body={**(extra_body or {}), **kwargs},
+ timeout=timeout,
+ )
+ data = SkillList(**json.loads(response.text))
+ data._headers = response.headers
+ return data
+
+ async def delete(
+ self,
+ skill_id: str,
+ *,
+ timeout: Union[float, NotGiven] = NOT_GIVEN,
+ **kwargs,
+ ) -> SkillDeleted:
+ extra_headers = kwargs.pop("extra_headers", None)
+ extra_query = kwargs.pop("extra_query", None)
+ extra_body = kwargs.pop("extra_body", None)
+ response = await self.openai_client.with_raw_response.skills.delete(
+ skill_id,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body={**(extra_body or {}), **kwargs},
+ timeout=timeout,
+ )
+ data = SkillDeleted(**json.loads(response.text))
+ data._headers = response.headers
+ return data
diff --git a/portkey_ai/api_resources/apis/vector_stores.py b/portkey_ai/api_resources/apis/vector_stores.py
index 80fddc13..ed51717d 100644
--- a/portkey_ai/api_resources/apis/vector_stores.py
+++ b/portkey_ai/api_resources/apis/vector_stores.py
@@ -5,8 +5,9 @@
from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource
from portkey_ai.api_resources.client import AsyncPortkey, Portkey
from portkey_ai.api_resources.types.shared_types import Metadata
-from ..._vendor.openai._types import Omit, omit, FileTypes
+from ..._vendor.openai._types import Omit, SequenceNotStr, omit, FileTypes
from ..._vendor.openai.types import (
+ FileChunkingStrategyParam,
vector_store_create_params,
vector_store_update_params,
)
@@ -233,17 +234,23 @@ def create_and_poll(
chunking_strategy: Union[Any, Omit] = omit,
**kwargs,
) -> VectorStoreFile:
+ extra_headers = kwargs.pop("extra_headers", None)
+ extra_query = kwargs.pop("extra_query", None)
+ extra_body = kwargs.pop("extra_body", None)
+ timeout = kwargs.pop("timeout", None)
+
response = self.openai_client.vector_stores.files.create_and_poll(
file_id=file_id,
vector_store_id=vector_store_id,
attributes=attributes,
poll_interval_ms=poll_interval_ms,
chunking_strategy=chunking_strategy,
- **kwargs,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body={**(extra_body or {}), **kwargs},
+ timeout=timeout,
)
- data = response
-
- return data # type: ignore[return-value]
+ return response # type: ignore[return-value]
def poll(
self,
@@ -368,20 +375,31 @@ def create_and_poll(
self,
vector_store_id: str,
*,
- file_ids: List[str],
+ attributes: Union[Optional[Dict[str, Union[str, float, bool]]], Omit] = omit,
+ chunking_strategy: Union[FileChunkingStrategyParam, Omit] = omit,
+ file_ids: Union[SequenceNotStr[str], Omit] = omit,
+ files: Union[Iterable[file_batch_create_params.File], Omit] = omit,
poll_interval_ms: Union[int, Omit] = omit,
- chunking_strategy: Union[Any, Omit] = omit,
**kwargs,
) -> VectorStoreFileBatch:
+ extra_headers = kwargs.pop("extra_headers", None)
+ extra_query = kwargs.pop("extra_query", None)
+ extra_body = kwargs.pop("extra_body", None)
+ timeout = kwargs.pop("timeout", None)
+
response = self.openai_client.vector_stores.file_batches.create_and_poll(
vector_store_id=vector_store_id,
+ attributes=attributes,
+ chunking_strategy=chunking_strategy,
file_ids=file_ids,
+ files=files,
poll_interval_ms=poll_interval_ms,
- chunking_strategy=chunking_strategy,
- **kwargs,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
)
- data = response
- return data # type: ignore[return-value]
+ return response # type: ignore[return-value]
@typing.no_type_check
def list_files(
@@ -668,11 +686,20 @@ async def create_and_poll(
chunking_strategy: Union[Any, Omit] = omit,
**kwargs,
) -> VectorStoreFile:
+ extra_headers = kwargs.pop("extra_headers", None)
+ extra_query = kwargs.pop("extra_query", None)
+ extra_body = kwargs.pop("extra_body", None)
+ timeout = kwargs.pop("timeout", None)
+
response = await self.openai_client.vector_stores.files.create_and_poll(
file_id=file_id,
vector_store_id=vector_store_id,
poll_interval_ms=poll_interval_ms,
chunking_strategy=chunking_strategy,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body={**(extra_body or {})},
+ timeout=timeout,
**kwargs,
)
data = response
@@ -796,17 +823,29 @@ async def create_and_poll(
self,
vector_store_id: str,
*,
- file_ids: List[str],
+ attributes: Union[Optional[Dict[str, Union[str, float, bool]]], Omit] = omit,
+ chunking_strategy: Union[FileChunkingStrategyParam, Omit] = omit,
+ file_ids: Union[SequenceNotStr[str], Omit] = omit,
+ files: Union[Iterable[file_batch_create_params.File], Omit] = omit,
poll_interval_ms: Union[int, Omit] = omit,
- chunking_strategy: Union[Any, Omit] = omit,
**kwargs,
) -> VectorStoreFileBatch:
+ extra_headers = kwargs.pop("extra_headers", None)
+ extra_query = kwargs.pop("extra_query", None)
+ extra_body = kwargs.pop("extra_body", None)
+ timeout = kwargs.pop("timeout", None)
+
response = await self.openai_client.vector_stores.file_batches.create_and_poll(
vector_store_id=vector_store_id,
+ attributes=attributes,
+ chunking_strategy=chunking_strategy,
file_ids=file_ids,
+ files=files,
poll_interval_ms=poll_interval_ms,
- chunking_strategy=chunking_strategy,
- **kwargs,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body={**(extra_body or {})},
+ timeout=timeout,
)
data = response
diff --git a/portkey_ai/api_resources/apis/videos.py b/portkey_ai/api_resources/apis/videos.py
index ef810cc4..aed3b368 100644
--- a/portkey_ai/api_resources/apis/videos.py
+++ b/portkey_ai/api_resources/apis/videos.py
@@ -2,6 +2,11 @@
from typing import Any, Literal, Optional, Union
import httpx
+from portkey_ai._vendor.openai.types import (
+ video_create_params,
+ video_edit_params,
+ video_extend_params,
+)
from portkey_ai._vendor.openai.types.video_model_param import VideoModelParam
from portkey_ai._vendor.openai.types.video_seconds import VideoSeconds
from portkey_ai._vendor.openai.types.video_size import VideoSize
@@ -10,7 +15,9 @@
from portkey_ai.api_resources.types.shared_types import Body, Headers, Query
from portkey_ai.api_resources.types.videos_type import (
Video,
+ VideoCreateCharacterResponse,
VideoDeleteResponse,
+ VideoGetCharacterResponse,
VideoList,
)
from ..._vendor.openai._types import FileTypes, NotGiven, Omit, not_given, omit
@@ -25,7 +32,7 @@ def create(
self,
*,
prompt: str,
- input_reference: Union[FileTypes, Omit] = omit,
+ input_reference: Union[video_create_params.InputReference, Omit] = omit,
model: Union[VideoModelParam, Omit] = omit,
seconds: Union[VideoSeconds, Omit] = omit,
size: Union[VideoSize, Omit] = omit,
@@ -54,7 +61,7 @@ def create_and_poll(
self,
*,
prompt: str,
- input_reference: Union[FileTypes, Omit] = omit,
+ input_reference: Union[video_create_params.InputReference, Omit] = omit,
model: Union[VideoModelParam, Omit] = omit,
seconds: Union[VideoSeconds, Omit] = omit,
size: Union[VideoSize, Omit] = omit,
@@ -155,6 +162,30 @@ def delete(
data._headers = response.headers
return data
+ def create_character(
+ self,
+ *,
+ name: str,
+ video: FileTypes,
+ **kwargs,
+ ) -> VideoCreateCharacterResponse:
+ extra_headers = kwargs.pop("extra_headers", None)
+ extra_query = kwargs.pop("extra_query", None)
+ extra_body = kwargs.pop("extra_body", None)
+ timeout = kwargs.pop("timeout", None)
+
+ response = self.openai_client.with_raw_response.videos.create_character(
+ name=name,
+ video=video,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body={**(extra_body or {})},
+ timeout=timeout,
+ )
+ data = VideoCreateCharacterResponse(**json.loads(response.text))
+ data._headers = response.headers
+ return data
+
def download_content(
self,
video_id: str,
@@ -175,6 +206,77 @@ def download_content(
)
return response
+ def edit(
+ self,
+ *,
+ prompt: str,
+ video: video_edit_params.Video,
+ **kwargs,
+ ) -> Video:
+ extra_headers = kwargs.pop("extra_headers", None)
+ extra_query = kwargs.pop("extra_query", None)
+ extra_body = kwargs.pop("extra_body", None)
+ timeout = kwargs.pop("timeout", None)
+
+ response = self.openai_client.with_raw_response.videos.edit(
+ prompt=prompt,
+ video=video,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body={**(extra_body or {})},
+ timeout=timeout,
+ )
+ data = Video(**json.loads(response.text))
+ data._headers = response.headers
+ return data
+
+ def extend(
+ self,
+ *,
+ prompt: str,
+ seconds: VideoSeconds,
+ video: video_edit_params.Video,
+ **kwargs,
+ ) -> Video:
+ extra_headers = kwargs.pop("extra_headers", None)
+ extra_query = kwargs.pop("extra_query", None)
+ extra_body = kwargs.pop("extra_body", None)
+ timeout = kwargs.pop("timeout", None)
+
+ response = self.openai_client.with_raw_response.videos.extend(
+ prompt=prompt,
+ seconds=seconds,
+ video=video,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body={**(extra_body or {})},
+ timeout=timeout,
+ )
+ data = Video(**json.loads(response.text))
+ data._headers = response.headers
+ return data
+
+ def get_character(
+ self,
+ character_id: str,
+ **kwargs,
+ ) -> VideoGetCharacterResponse:
+ extra_headers = kwargs.pop("extra_headers", None)
+ extra_query = kwargs.pop("extra_query", None)
+ extra_body = kwargs.pop("extra_body", None)
+ timeout = kwargs.pop("timeout", None)
+
+ response = self.openai_client.with_raw_response.videos.get_character(
+ character_id=character_id,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body={**(extra_body or {})},
+ timeout=timeout,
+ )
+ data = VideoGetCharacterResponse(**json.loads(response.text))
+ data._headers = response.headers
+ return data
+
def remix(
self,
video_id: str,
@@ -207,7 +309,7 @@ async def create(
self,
*,
prompt: str,
- input_reference: Union[FileTypes, Omit] = omit,
+ input_reference: Union[video_create_params.InputReference, Omit] = omit,
model: Union[VideoModelParam, Omit] = omit,
seconds: Union[VideoSeconds, Omit] = omit,
size: Union[VideoSize, Omit] = omit,
@@ -236,7 +338,7 @@ async def create_and_poll(
self,
*,
prompt: str,
- input_reference: Union[FileTypes, Omit] = omit,
+ input_reference: Union[video_create_params.InputReference, Omit] = omit,
model: Union[VideoModelParam, Omit] = omit,
seconds: Union[VideoSeconds, Omit] = omit,
size: Union[VideoSize, Omit] = omit,
@@ -337,6 +439,30 @@ async def delete(
data._headers = response.headers
return data
+ async def create_character(
+ self,
+ *,
+ name: str,
+ video: FileTypes,
+ **kwargs,
+ ) -> VideoCreateCharacterResponse:
+ extra_headers = kwargs.pop("extra_headers", None)
+ extra_query = kwargs.pop("extra_query", None)
+ extra_body = kwargs.pop("extra_body", None)
+ timeout = kwargs.pop("timeout", None)
+
+ response = await self.openai_client.with_raw_response.videos.create_character(
+ name=name,
+ video=video,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body={**(extra_body or {})},
+ timeout=timeout,
+ )
+ data = VideoCreateCharacterResponse(**json.loads(response.text))
+ data._headers = response.headers
+ return data
+
async def download_content(
self,
video_id: str,
@@ -357,6 +483,77 @@ async def download_content(
)
return response
+ async def edit(
+ self,
+ *,
+ prompt: str,
+ video: video_edit_params.Video,
+ **kwargs,
+ ) -> Video:
+ extra_headers = kwargs.pop("extra_headers", None)
+ extra_query = kwargs.pop("extra_query", None)
+ extra_body = kwargs.pop("extra_body", None)
+ timeout = kwargs.pop("timeout", None)
+
+ response = await self.openai_client.with_raw_response.videos.edit(
+ prompt=prompt,
+ video=video,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body={**(extra_body or {})},
+ timeout=timeout,
+ )
+ data = Video(**json.loads(response.text))
+ data._headers = response.headers
+ return data
+
+ async def extend(
+ self,
+ *,
+ prompt: str,
+ seconds: VideoSeconds,
+ video: video_extend_params.Video,
+ **kwargs,
+ ) -> Video:
+ extra_headers = kwargs.pop("extra_headers", None)
+ extra_query = kwargs.pop("extra_query", None)
+ extra_body = kwargs.pop("extra_body", None)
+ timeout = kwargs.pop("timeout", None)
+
+ response = await self.openai_client.with_raw_response.videos.extend(
+ prompt=prompt,
+ seconds=seconds,
+ video=video,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body={**(extra_body or {})},
+ timeout=timeout,
+ )
+ data = Video(**json.loads(response.text))
+ data._headers = response.headers
+ return data
+
+ async def get_character(
+ self,
+ character_id: str,
+ **kwargs,
+ ) -> VideoGetCharacterResponse:
+ extra_headers = kwargs.pop("extra_headers", None)
+ extra_query = kwargs.pop("extra_query", None)
+ extra_body = kwargs.pop("extra_body", None)
+ timeout = kwargs.pop("timeout", None)
+
+ response = await self.openai_client.with_raw_response.videos.get_character(
+ character_id=character_id,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body={**(extra_body or {})},
+ timeout=timeout,
+ )
+ data = VideoGetCharacterResponse(**json.loads(response.text))
+ data._headers = response.headers
+ return data
+
async def remix(
self,
video_id: str,
diff --git a/portkey_ai/api_resources/client.py b/portkey_ai/api_resources/client.py
index 9d5b6ea9..4558d2a3 100644
--- a/portkey_ai/api_resources/client.py
+++ b/portkey_ai/api_resources/client.py
@@ -42,6 +42,7 @@ class Portkey(APIClient):
realtime: apis.MainRealtime
conversations: apis.Conversations
videos: apis.Videos
+ skills: apis.Skills
analytics: apis.Analytics
mcp_servers: apis.McpServers
mcp_integrations: apis.McpIntegrations
@@ -197,6 +198,7 @@ def __init__(
self.realtime = apis.MainRealtime(self)
self.conversations = apis.Conversations(self)
self.videos = apis.Videos(self)
+ self.skills = apis.Skills(self)
self.analytics = apis.Analytics(self)
self.mcp_servers = apis.McpServers(self)
self.mcp_integrations = apis.McpIntegrations(self)
@@ -386,6 +388,7 @@ class AsyncPortkey(AsyncAPIClient):
realtime: apis.AsyncMainRealtime
conversations: apis.AsyncConversations
videos: apis.AsyncVideos
+ skills: apis.AsyncSkills
analytics: apis.AsyncAnalytics
mcp_servers: apis.AsyncMcpServers
mcp_integrations: apis.AsyncMcpIntegrations
@@ -541,6 +544,7 @@ def __init__(
self.realtime = apis.AsyncMainRealtime(self)
self.conversations = apis.AsyncConversations(self)
self.videos = apis.AsyncVideos(self)
+ self.skills = apis.AsyncSkills(self)
self.analytics = apis.AsyncAnalytics(self)
self.mcp_servers = apis.AsyncMcpServers(self)
self.mcp_integrations = apis.AsyncMcpIntegrations(self)
diff --git a/portkey_ai/api_resources/types/finetuning_checkpoint_permissions.py b/portkey_ai/api_resources/types/finetuning_checkpoint_permissions.py
index 9ef1f6cb..4fb131ed 100644
--- a/portkey_ai/api_resources/types/finetuning_checkpoint_permissions.py
+++ b/portkey_ai/api_resources/types/finetuning_checkpoint_permissions.py
@@ -56,6 +56,27 @@ def get_headers(self) -> Optional[Dict[str, str]]:
return parse_headers(self._headers)
+class PermissionListResponse(BaseModel, extra="allow"):
+ id: str
+ created_at: int
+ object: str
+ project_id: str
+ _headers: Optional[httpx.Headers] = PrivateAttr()
+
+ def __str__(self):
+ del self._headers
+ return json.dumps(self.dict(), indent=4)
+
+ def __getitem__(self, key):
+ return getattr(self, key, None)
+
+ def get(self, key: str, default: Optional[Any] = None):
+ return getattr(self, key, None) or default
+
+ def get_headers(self) -> Optional[Dict[str, str]]:
+ return parse_headers(self._headers)
+
+
class PermissionDeleteResponse(BaseModel, extra="allow"):
id: Optional[str] = None
deleted: Optional[bool] = None
@@ -74,3 +95,25 @@ def get(self, key: str, default: Optional[Any] = None):
def get_headers(self) -> Optional[Dict[str, str]]:
return parse_headers(self._headers)
+
+
+class PermissionListPage(BaseModel, extra="allow"):
+ data: Optional[List[PermissionListResponse]] = None
+ has_more: Optional[bool] = None
+ object: Optional[str] = None
+ first_id: Optional[str] = None
+ last_id: Optional[str] = None
+ _headers: Optional[httpx.Headers] = PrivateAttr()
+
+ def __str__(self):
+ del self._headers
+ return json.dumps(self.dict(), indent=4)
+
+ def __getitem__(self, key):
+ return getattr(self, key, None)
+
+ def get(self, key: str, default: Optional[Any] = None):
+ return getattr(self, key, None) or default
+
+ def get_headers(self) -> Optional[Dict[str, str]]:
+ return parse_headers(self._headers)
diff --git a/portkey_ai/api_resources/types/skills_type.py b/portkey_ai/api_resources/types/skills_type.py
new file mode 100644
index 00000000..296c14c2
--- /dev/null
+++ b/portkey_ai/api_resources/types/skills_type.py
@@ -0,0 +1,143 @@
+import json
+from typing import Any, Dict, List, Literal, Optional
+
+import httpx
+from pydantic import BaseModel, PrivateAttr
+
+from .utils import parse_headers
+
+__all__ = [
+ "Skill",
+ "SkillDeleted",
+ "SkillList",
+ "SkillVersion",
+ "SkillVersionDeleted",
+ "SkillVersionList",
+]
+
+
+class Skill(BaseModel, extra="allow"):
+ id: str
+ created_at: int
+ default_version: str
+ description: str
+ latest_version: str
+ name: str
+ object: Literal["skill"]
+ _headers: Optional[httpx.Headers] = PrivateAttr()
+
+ def __str__(self):
+ del self._headers
+ return json.dumps(self.dict(), indent=4)
+
+ def __getitem__(self, key):
+ return getattr(self, key, None)
+
+ def get(self, key: str, default: Optional[Any] = None):
+ return getattr(self, key, None) or default
+
+ def get_headers(self) -> Optional[Dict[str, str]]:
+ return parse_headers(self._headers)
+
+
+class SkillDeleted(BaseModel, extra="allow"):
+ id: str
+ deleted: bool
+ object: Literal["skill.deleted"]
+ _headers: Optional[httpx.Headers] = PrivateAttr()
+
+ def __str__(self):
+ del self._headers
+ return json.dumps(self.dict(), indent=4)
+
+ def __getitem__(self, key):
+ return getattr(self, key, None)
+
+ def get(self, key: str, default: Optional[Any] = None):
+ return getattr(self, key, None) or default
+
+ def get_headers(self) -> Optional[Dict[str, str]]:
+ return parse_headers(self._headers)
+
+
+class SkillList(BaseModel, extra="allow"):
+ object: Optional[str] = None
+ data: Optional[List[Skill]] = None
+ _headers: Optional[httpx.Headers] = PrivateAttr()
+
+ def __str__(self):
+ del self._headers
+ return json.dumps(self.dict(), indent=4)
+
+ def __getitem__(self, key):
+ return getattr(self, key, None)
+
+ def get(self, key: str, default: Optional[Any] = None):
+ return getattr(self, key, None) or default
+
+ def get_headers(self) -> Optional[Dict[str, str]]:
+ return parse_headers(self._headers)
+
+
+class SkillVersion(BaseModel, extra="allow"):
+ id: str
+ created_at: int
+ description: str
+ name: str
+ object: Literal["skill.version"]
+ skill_id: str
+ version: str
+ _headers: Optional[httpx.Headers] = PrivateAttr()
+
+ def __str__(self):
+ del self._headers
+ return json.dumps(self.dict(), indent=4)
+
+ def __getitem__(self, key):
+ return getattr(self, key, None)
+
+ def get(self, key: str, default: Optional[Any] = None):
+ return getattr(self, key, None) or default
+
+ def get_headers(self) -> Optional[Dict[str, str]]:
+ return parse_headers(self._headers)
+
+
+class SkillVersionDeleted(BaseModel, extra="allow"):
+ id: str
+ deleted: bool
+ object: Literal["skill.version.deleted"]
+ version: str
+ _headers: Optional[httpx.Headers] = PrivateAttr()
+
+ def __str__(self):
+ del self._headers
+ return json.dumps(self.dict(), indent=4)
+
+ def __getitem__(self, key):
+ return getattr(self, key, None)
+
+ def get(self, key: str, default: Optional[Any] = None):
+ return getattr(self, key, None) or default
+
+ def get_headers(self) -> Optional[Dict[str, str]]:
+ return parse_headers(self._headers)
+
+
+class SkillVersionList(BaseModel, extra="allow"):
+ object: Optional[str] = None
+ data: Optional[List[SkillVersion]] = None
+ _headers: Optional[httpx.Headers] = PrivateAttr()
+
+ def __str__(self):
+ del self._headers
+ return json.dumps(self.dict(), indent=4)
+
+ def __getitem__(self, key):
+ return getattr(self, key, None)
+
+ def get(self, key: str, default: Optional[Any] = None):
+ return getattr(self, key, None) or default
+
+ def get_headers(self) -> Optional[Dict[str, str]]:
+ return parse_headers(self._headers)
diff --git a/portkey_ai/api_resources/types/videos_type.py b/portkey_ai/api_resources/types/videos_type.py
index 3962faf9..7c768b04 100644
--- a/portkey_ai/api_resources/types/videos_type.py
+++ b/portkey_ai/api_resources/types/videos_type.py
@@ -77,3 +77,43 @@ def get(self, key: str, default: Optional[Any] = None):
def get_headers(self) -> Optional[Dict[str, str]]:
return parse_headers(self._headers)
+
+
+class VideoCreateCharacterResponse(BaseModel, extra="allow"):
+ id: str
+ created_at: int
+ name: str
+ _headers: Optional[httpx.Headers] = PrivateAttr()
+
+ def __str__(self):
+ del self._headers
+ return json.dumps(self.dict(), indent=4)
+
+ def __getitem__(self, key):
+ return getattr(self, key, None)
+
+ def get(self, key: str, default: Optional[Any] = None):
+ return getattr(self, key, None) or default
+
+ def get_headers(self) -> Optional[Dict[str, str]]:
+ return parse_headers(self._headers)
+
+
+class VideoGetCharacterResponse(BaseModel, extra="allow"):
+ id: str
+ created_at: int
+ name: str
+ _headers: Optional[httpx.Headers] = PrivateAttr()
+
+ def __str__(self):
+ del self._headers
+ return json.dumps(self.dict(), indent=4)
+
+ def __getitem__(self, key):
+ return getattr(self, key, None)
+
+ def get(self, key: str, default: Optional[Any] = None):
+ return getattr(self, key, None) or default
+
+ def get_headers(self) -> Optional[Dict[str, str]]:
+ return parse_headers(self._headers)
diff --git a/portkey_ai/integrations/strands.py b/portkey_ai/integrations/strands.py
index e07c81c8..3d31117b 100644
--- a/portkey_ai/integrations/strands.py
+++ b/portkey_ai/integrations/strands.py
@@ -27,7 +27,6 @@
Optional,
TYPE_CHECKING,
cast,
- List,
Type,
TypeVar,
Union,
@@ -124,13 +123,17 @@ def format_messages(
):
formatted.append({"role": role, "content": part["text"]})
elif isinstance(part, dict) and "toolUse" in part:
- formatted.append(self._format_tool_use_part(part))
+ formatted.append(
+ self._format_tool_use_part(part) # type: ignore[arg-type]
+ )
elif (
isinstance(part, dict)
and "toolResult" in part
and self._current_tool_use_id is not None
):
- formatted.append(self._format_tool_result_part(part))
+ formatted.append(
+ self._format_tool_result_part(part) # type: ignore[arg-type]
+ )
return formatted
@@ -339,13 +342,13 @@ async def stream(
state["tool_use_id"] = None
state["tool_name"] = None
- async def structured_output(
+ async def structured_output( # type: ignore[override]
self,
output_model: "Type[T]",
- prompt: List[dict[str, Any]],
+ prompt: Any,
system_prompt: Optional[str] = None,
**kwargs: Any,
- ) -> AsyncGenerator[dict[str, Union["T", Any]], None]: # type: ignore[override]
+ ) -> AsyncGenerator[dict[str, Union["T", Any]], None]:
"""Placeholder to satisfy Strands Model abstract requirements.
Note: PortkeyStrands currently focuses on streaming text/tool events via `stream`.
@@ -356,6 +359,7 @@ class to be instantiated by Strands without raising an abstract class error.
"PortkeyStrands.structured_output is not implemented yet. "
"Use model.stream(...) for streaming responses."
)
+ yield {} # Make this a generator (never reached due to raise above)
__all__ = ["PortkeyStrands"]
diff --git a/portkey_ai/version.py b/portkey_ai/version.py
index 3c00bb49..707faadc 100644
--- a/portkey_ai/version.py
+++ b/portkey_ai/version.py
@@ -1 +1 @@
-VERSION = "2.2.0"
+VERSION = "2.3.0"
diff --git a/vendorize.toml b/vendorize.toml
index 356956c6..9e1a1473 100644
--- a/vendorize.toml
+++ b/vendorize.toml
@@ -1,4 +1,4 @@
target = "portkey_ai/_vendor"
packages = [
- "openai==2.16.0"
+ "openai==2.30.0"
]
\ No newline at end of file