From 0a977fa36b6eedc9fe3deef21cf1713c6cf954c6 Mon Sep 17 00:00:00 2001 From: kamilbenkirane Date: Wed, 22 Apr 2026 11:35:51 +0200 Subject: [PATCH 1/3] feat(models): register 7 new provider models Add gpt-image-2 (OpenAI), mistral-medium-3-5 (Mistral), command-a-translate-08-2025 and command-r-plus-08-2024 (Cohere), claude-opus-4-7 (Anthropic), gemini-3.1-flash-tts-preview (Google TTS), and veo-3.1-lite-generate-preview (Google Veo). --- .../audio/providers/google/models.py | 12 ++++++++++ .../images/providers/openai/models.py | 14 +++++++++++ .../text/providers/anthropic/models.py | 16 +++++++++++++ .../text/providers/cohere/models.py | 24 +++++++++++++++++++ .../text/providers/mistral/models.py | 16 +++++++++++++ .../videos/providers/google/models.py | 17 +++++++++++++ 6 files changed, 99 insertions(+) diff --git a/src/celeste/modalities/audio/providers/google/models.py b/src/celeste/modalities/audio/providers/google/models.py index 6185b73b..234f92a2 100644 --- a/src/celeste/modalities/audio/providers/google/models.py +++ b/src/celeste/modalities/audio/providers/google/models.py @@ -67,4 +67,16 @@ AudioParameter.OUTPUT_FORMAT: Choice(options=GOOGLE_SUPPORTED_FORMATS), }, ), + Model( + id="gemini-3.1-flash-tts-preview", + provider=Provider.GOOGLE, + display_name="Google TTS Gemini 3.1 Flash (Preview)", + streaming=False, + operations={Modality.AUDIO: {Operation.SPEAK}}, + parameter_constraints={ + AudioParameter.VOICE: VoiceConstraint(voices=GOOGLE_VOICES), + AudioParameter.LANGUAGE: Choice(options=GOOGLE_SUPPORTED_LANGUAGES), + AudioParameter.OUTPUT_FORMAT: Choice(options=GOOGLE_SUPPORTED_FORMATS), + }, + ), ] diff --git a/src/celeste/modalities/images/providers/openai/models.py b/src/celeste/modalities/images/providers/openai/models.py index f89705d2..e9bf33a3 100644 --- a/src/celeste/modalities/images/providers/openai/models.py +++ b/src/celeste/modalities/images/providers/openai/models.py @@ -71,4 +71,18 @@ ImageParameter.QUALITY: Choice(options=["low", "medium", "high", "auto"]), }, ), + Model( + id="gpt-image-2", + provider=Provider.OPENAI, + display_name="GPT Image 2", + operations={Modality.IMAGES: {Operation.GENERATE, Operation.EDIT}}, + streaming=True, + parameter_constraints={ + ImageParameter.PARTIAL_IMAGES: Range(min=0, max=3), + ImageParameter.ASPECT_RATIO: Choice( + options=["1024x1024", "1536x1024", "1024x1536", "2048x2048", "auto"] + ), + ImageParameter.QUALITY: Choice(options=["low", "medium", "high", "auto"]), + }, + ), ] diff --git a/src/celeste/modalities/text/providers/anthropic/models.py b/src/celeste/modalities/text/providers/anthropic/models.py index e4c277ed..5f50cf97 100644 --- a/src/celeste/modalities/text/providers/anthropic/models.py +++ b/src/celeste/modalities/text/providers/anthropic/models.py @@ -95,6 +95,22 @@ TextParameter.DOCUMENT: DocumentsConstraint(), }, ), + Model( + id="claude-opus-4-7", + provider=Provider.ANTHROPIC, + display_name="Claude Opus 4.7", + operations={Modality.TEXT: {Operation.GENERATE, Operation.ANALYZE}}, + streaming=True, + parameter_constraints={ + Parameter.MAX_TOKENS: Range(min=1, max=128000), + TextParameter.THINKING_BUDGET: Range(min=-1, max=32000), + TextParameter.OUTPUT_SCHEMA: Schema(), + TextParameter.TOOLS: ToolSupport(tools=[WebSearch]), + TextParameter.TOOL_CHOICE: ToolChoiceSupport(), + TextParameter.IMAGE: ImagesConstraint(), + TextParameter.DOCUMENT: DocumentsConstraint(), + }, + ), Model( id="claude-sonnet-4-6", provider=Provider.ANTHROPIC, diff --git a/src/celeste/modalities/text/providers/cohere/models.py b/src/celeste/modalities/text/providers/cohere/models.py index c36b2b34..111f0b32 100644 --- a/src/celeste/modalities/text/providers/cohere/models.py +++ b/src/celeste/modalities/text/providers/cohere/models.py @@ -46,4 +46,28 @@ # thinking_budget: Support unclear, omit constraint for now }, ), + Model( + id="command-a-translate-08-2025", + provider=Provider.COHERE, + display_name="Command A Translate 08-2025", + operations={Modality.TEXT: {Operation.GENERATE}}, + streaming=True, + parameter_constraints={ + Parameter.TEMPERATURE: Range(min=0.0, max=1.0, step=0.01), + Parameter.MAX_TOKENS: Range(min=1, max=8000, step=1), + TextParameter.OUTPUT_SCHEMA: Schema(), + }, + ), + Model( + id="command-r-plus-08-2024", + provider=Provider.COHERE, + display_name="Command R+ 08-2024", + operations={Modality.TEXT: {Operation.GENERATE}}, + streaming=True, + parameter_constraints={ + Parameter.TEMPERATURE: Range(min=0.0, max=1.0, step=0.01), + Parameter.MAX_TOKENS: Range(min=1, max=4000, step=1), + TextParameter.OUTPUT_SCHEMA: Schema(), + }, + ), ] diff --git a/src/celeste/modalities/text/providers/mistral/models.py b/src/celeste/modalities/text/providers/mistral/models.py index 0106367b..90136e8d 100644 --- a/src/celeste/modalities/text/providers/mistral/models.py +++ b/src/celeste/modalities/text/providers/mistral/models.py @@ -44,6 +44,22 @@ TextParameter.TOOL_CHOICE: ToolChoiceSupport(), }, ), + Model( + id="mistral-medium-3-5", + provider=Provider.MISTRAL, + display_name="Mistral Medium 3.5", + operations={Modality.TEXT: {Operation.GENERATE, Operation.ANALYZE}}, + streaming=True, + parameter_constraints={ + Parameter.TEMPERATURE: Range(min=0.0, max=2.0, step=0.01), + Parameter.MAX_TOKENS: Range(min=1, max=32768, step=1), + TextParameter.OUTPUT_SCHEMA: Schema(), + TextParameter.IMAGE: ImagesConstraint(), + TextParameter.DOCUMENT: DocumentsConstraint(), + TextParameter.TOOLS: ToolSupport(tools=[]), + TextParameter.TOOL_CHOICE: ToolChoiceSupport(), + }, + ), Model( id="mistral-small-latest", provider=Provider.MISTRAL, diff --git a/src/celeste/modalities/videos/providers/google/models.py b/src/celeste/modalities/videos/providers/google/models.py index e9741d13..2fb02e67 100644 --- a/src/celeste/modalities/videos/providers/google/models.py +++ b/src/celeste/modalities/videos/providers/google/models.py @@ -81,4 +81,21 @@ ), }, ), + Model( + id="veo-3.1-lite-generate-preview", + provider=Provider.GOOGLE, + display_name="Veo 3.1 Lite (Preview)", + operations={Modality.VIDEOS: {Operation.GENERATE}}, + parameter_constraints={ + VideoParameter.ASPECT_RATIO: Choice(options=["16:9", "9:16"]), + VideoParameter.RESOLUTION: Choice(options=["720p", "1080p"]), + VideoParameter.DURATION: Choice(options=[4, 6, 8]), + VideoParameter.FIRST_FRAME: ImageConstraint( + supported_mime_types=VEO_SUPPORTED_MIME_TYPES, + ), + VideoParameter.LAST_FRAME: ImageConstraint( + supported_mime_types=VEO_SUPPORTED_MIME_TYPES, + ), + }, + ), ] From c9c7c9e4c7ed739e6080838707d1ab81ebf9baee Mon Sep 17 00:00:00 2001 From: kamilbenkirane Date: Wed, 22 Apr 2026 13:21:34 +0200 Subject: [PATCH 2/3] feat(models): wire OpenAI image extended params MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Extend ImageParameter enum with BACKGROUND, MODERATION, OUTPUT_COMPRESSION. Add NumImagesMapper base + wire 5 new modality-layer wrappers (num_images, output_format, background, moderation, output_compression) for OpenAI images. Register constraints on gpt-image-1, gpt-image-1-mini, gpt-image-1.5, gpt-image-2 — transparent background limited to the gpt-image-1 generation per OpenAI docs. --- src/celeste/modalities/images/parameters.py | 10 ++++ .../images/providers/openai/models.py | 35 ++++++++++++- .../images/providers/openai/parameters.py | 50 +++++++++++++++++++ .../providers/openai/images/parameters.py | 7 +++ 4 files changed, 101 insertions(+), 1 deletion(-) diff --git a/src/celeste/modalities/images/parameters.py b/src/celeste/modalities/images/parameters.py index 858677f9..b0bb8596 100644 --- a/src/celeste/modalities/images/parameters.py +++ b/src/celeste/modalities/images/parameters.py @@ -27,6 +27,9 @@ class ImageParameter(StrEnum): GUIDANCE = "guidance" MASK = "mask" THINKING_LEVEL = "thinking_level" + BACKGROUND = "background" + MODERATION = "moderation" + OUTPUT_COMPRESSION = "output_compression" class ImageParameters(Parameters, total=False): @@ -60,6 +63,13 @@ class ImageParameters(Parameters, total=False): ImageArtifact, Field(description="Mask image for inpainting a region.") ] thinking_level: Annotated[str, Field(description="Model reasoning depth.")] + background: Annotated[ + str, Field(description="Background handling (e.g. transparent, opaque, auto).") + ] + moderation: Annotated[str, Field(description="Content moderation strength tier.")] + output_compression: Annotated[ + int, Field(description="Output compression level (0-100) for jpeg/webp.") + ] __all__ = [ diff --git a/src/celeste/modalities/images/providers/openai/models.py b/src/celeste/modalities/images/providers/openai/models.py index e9bf33a3..5d141741 100644 --- a/src/celeste/modalities/images/providers/openai/models.py +++ b/src/celeste/modalities/images/providers/openai/models.py @@ -42,6 +42,13 @@ options=["1024x1024", "1536x1024", "1024x1536", "auto"] ), ImageParameter.QUALITY: Choice(options=["low", "medium", "high", "auto"]), + ImageParameter.NUM_IMAGES: Range(min=1, max=10), + ImageParameter.OUTPUT_FORMAT: Choice(options=["png", "jpeg", "webp"]), + ImageParameter.BACKGROUND: Choice( + options=["transparent", "opaque", "auto"] + ), + ImageParameter.MODERATION: Choice(options=["auto", "low"]), + ImageParameter.OUTPUT_COMPRESSION: Range(min=0, max=100), }, ), Model( @@ -56,6 +63,13 @@ options=["1024x1024", "1024x1536", "1536x1024", "auto"] ), ImageParameter.QUALITY: Choice(options=["low", "medium", "high", "auto"]), + ImageParameter.NUM_IMAGES: Range(min=1, max=10), + ImageParameter.OUTPUT_FORMAT: Choice(options=["png", "jpeg", "webp"]), + ImageParameter.BACKGROUND: Choice( + options=["transparent", "opaque", "auto"] + ), + ImageParameter.MODERATION: Choice(options=["auto", "low"]), + ImageParameter.OUTPUT_COMPRESSION: Range(min=0, max=100), }, ), Model( @@ -69,6 +83,11 @@ options=["1024x1024", "1536x1024", "1024x1536", "auto"] ), ImageParameter.QUALITY: Choice(options=["low", "medium", "high", "auto"]), + ImageParameter.NUM_IMAGES: Range(min=1, max=10), + ImageParameter.OUTPUT_FORMAT: Choice(options=["png", "jpeg", "webp"]), + ImageParameter.BACKGROUND: Choice(options=["opaque", "auto"]), + ImageParameter.MODERATION: Choice(options=["auto", "low"]), + ImageParameter.OUTPUT_COMPRESSION: Range(min=0, max=100), }, ), Model( @@ -80,9 +99,23 @@ parameter_constraints={ ImageParameter.PARTIAL_IMAGES: Range(min=0, max=3), ImageParameter.ASPECT_RATIO: Choice( - options=["1024x1024", "1536x1024", "1024x1536", "2048x2048", "auto"] + options=[ + "1024x1024", + "1536x1024", + "1024x1536", + "2048x2048", + "2048x1152", + "3840x2160", + "2160x3840", + "auto", + ] ), ImageParameter.QUALITY: Choice(options=["low", "medium", "high", "auto"]), + ImageParameter.NUM_IMAGES: Range(min=1, max=10), + ImageParameter.OUTPUT_FORMAT: Choice(options=["png", "jpeg", "webp"]), + ImageParameter.BACKGROUND: Choice(options=["opaque", "auto"]), + ImageParameter.MODERATION: Choice(options=["auto", "low"]), + ImageParameter.OUTPUT_COMPRESSION: Range(min=0, max=100), }, ), ] diff --git a/src/celeste/modalities/images/providers/openai/parameters.py b/src/celeste/modalities/images/providers/openai/parameters.py index 4896f16c..7fde2bed 100644 --- a/src/celeste/modalities/images/providers/openai/parameters.py +++ b/src/celeste/modalities/images/providers/openai/parameters.py @@ -1,6 +1,21 @@ """OpenAI parameter mappers for images.""" from celeste.parameters import ParameterMapper +from celeste.providers.openai.images.parameters import ( + BackgroundMapper as _BackgroundMapper, +) +from celeste.providers.openai.images.parameters import ( + ModerationMapper as _ModerationMapper, +) +from celeste.providers.openai.images.parameters import ( + NumImagesMapper as _NumImagesMapper, +) +from celeste.providers.openai.images.parameters import ( + OutputCompressionMapper as _OutputCompressionMapper, +) +from celeste.providers.openai.images.parameters import ( + OutputFormatMapper as _OutputFormatMapper, +) from celeste.providers.openai.images.parameters import ( PartialImagesMapper as _PartialImagesMapper, ) @@ -33,10 +48,45 @@ class QualityMapper(_QualityMapper): name = ImageParameter.QUALITY +class NumImagesMapper(_NumImagesMapper): + """Map num_images to OpenAI's n parameter.""" + + name = ImageParameter.NUM_IMAGES + + +class OutputFormatMapper(_OutputFormatMapper): + """Map output_format to OpenAI's output_format parameter.""" + + name = ImageParameter.OUTPUT_FORMAT + + +class BackgroundMapper(_BackgroundMapper): + """Map background to OpenAI's background parameter.""" + + name = ImageParameter.BACKGROUND + + +class ModerationMapper(_ModerationMapper): + """Map moderation to OpenAI's moderation parameter.""" + + name = ImageParameter.MODERATION + + +class OutputCompressionMapper(_OutputCompressionMapper): + """Map output_compression to OpenAI's output_compression parameter.""" + + name = ImageParameter.OUTPUT_COMPRESSION + + OPENAI_PARAMETER_MAPPERS: list[ParameterMapper[ImageContent]] = [ AspectRatioMapper(), PartialImagesMapper(), QualityMapper(), + NumImagesMapper(), + OutputFormatMapper(), + BackgroundMapper(), + ModerationMapper(), + OutputCompressionMapper(), ] __all__ = ["OPENAI_PARAMETER_MAPPERS"] diff --git a/src/celeste/providers/openai/images/parameters.py b/src/celeste/providers/openai/images/parameters.py index 465522f7..c5f494dc 100644 --- a/src/celeste/providers/openai/images/parameters.py +++ b/src/celeste/providers/openai/images/parameters.py @@ -52,9 +52,16 @@ class OutputCompressionMapper(FieldMapper[ImageContent]): field = "output_compression" +class NumImagesMapper(FieldMapper[ImageContent]): + """Map num_images to OpenAI n field.""" + + field = "n" + + __all__ = [ "BackgroundMapper", "ModerationMapper", + "NumImagesMapper", "OutputCompressionMapper", "OutputFormatMapper", "PartialImagesMapper", From aba8554a011e6b9b1756fd07616b7e574c35e3c2 Mon Sep 17 00:00:00 2001 From: kamilbenkirane Date: Wed, 22 Apr 2026 13:31:03 +0200 Subject: [PATCH 3/3] fix(models): adaptive thinking, veo reference images, SAFETY_TOLERANCE consolidation - claude-opus-4-7: swap THINKING_BUDGET (Range) for THINKING_LEVEL (Choice) to route to Anthropic's adaptive-thinking effort preset instead of the extended-thinking budget_tokens shape the model rejects. Add a new ThinkingLevelMapper base class that emits {type: "adaptive", effort: ...} and a modality wrapper in the Anthropic provider registry. - veo-3.1-lite-generate-preview: add REFERENCE_IMAGES constraint (max 3, JPEG/PNG/WebP) to match the -generate-preview sibling. - OpenAI images: use existing ImageParameter.SAFETY_TOLERANCE instead of introducing a duplicate MODERATION enum value (BFL already uses it with an integer range; widen its TypedDict type to int | str so the OpenAI 'auto'/'low' string tier fits). Rename the modality wrapper to SafetyToleranceMapper. - gpt-image-1.5: restore transparent background option; docs confirm only gpt-image-2 dropped transparent. --- src/celeste/modalities/images/parameters.py | 9 +++++--- .../images/providers/openai/models.py | 12 +++++----- .../images/providers/openai/parameters.py | 8 +++---- .../text/providers/anthropic/models.py | 5 ++++- .../text/providers/anthropic/parameters.py | 10 +++++++++ .../videos/providers/google/models.py | 4 ++++ .../anthropic/messages/parameters.py | 22 +++++++++++++++++++ 7 files changed, 57 insertions(+), 13 deletions(-) diff --git a/src/celeste/modalities/images/parameters.py b/src/celeste/modalities/images/parameters.py index b0bb8596..e8bd694d 100644 --- a/src/celeste/modalities/images/parameters.py +++ b/src/celeste/modalities/images/parameters.py @@ -28,7 +28,6 @@ class ImageParameter(StrEnum): MASK = "mask" THINKING_LEVEL = "thinking_level" BACKGROUND = "background" - MODERATION = "moderation" OUTPUT_COMPRESSION = "output_compression" @@ -55,7 +54,12 @@ class ImageParameters(Parameters, total=False): str, Field(description="Concepts to avoid in the output.") ] seed: Annotated[int, Field(description="Seed for deterministic output.")] - safety_tolerance: Annotated[int, Field(description="Safety filter threshold.")] + safety_tolerance: Annotated[ + int | str, + Field( + description="Safety filter threshold — integer tier (BFL) or preset name (OpenAI 'auto'/'low')." + ), + ] output_format: Annotated[str, Field(description="Output file format.")] steps: Annotated[int, Field(description="Number of denoising steps.")] guidance: Annotated[float, Field(description="Prompt-adherence strength.")] @@ -66,7 +70,6 @@ class ImageParameters(Parameters, total=False): background: Annotated[ str, Field(description="Background handling (e.g. transparent, opaque, auto).") ] - moderation: Annotated[str, Field(description="Content moderation strength tier.")] output_compression: Annotated[ int, Field(description="Output compression level (0-100) for jpeg/webp.") ] diff --git a/src/celeste/modalities/images/providers/openai/models.py b/src/celeste/modalities/images/providers/openai/models.py index 5d141741..a52c6e09 100644 --- a/src/celeste/modalities/images/providers/openai/models.py +++ b/src/celeste/modalities/images/providers/openai/models.py @@ -47,7 +47,7 @@ ImageParameter.BACKGROUND: Choice( options=["transparent", "opaque", "auto"] ), - ImageParameter.MODERATION: Choice(options=["auto", "low"]), + ImageParameter.SAFETY_TOLERANCE: Choice(options=["auto", "low"]), ImageParameter.OUTPUT_COMPRESSION: Range(min=0, max=100), }, ), @@ -68,7 +68,7 @@ ImageParameter.BACKGROUND: Choice( options=["transparent", "opaque", "auto"] ), - ImageParameter.MODERATION: Choice(options=["auto", "low"]), + ImageParameter.SAFETY_TOLERANCE: Choice(options=["auto", "low"]), ImageParameter.OUTPUT_COMPRESSION: Range(min=0, max=100), }, ), @@ -85,8 +85,10 @@ ImageParameter.QUALITY: Choice(options=["low", "medium", "high", "auto"]), ImageParameter.NUM_IMAGES: Range(min=1, max=10), ImageParameter.OUTPUT_FORMAT: Choice(options=["png", "jpeg", "webp"]), - ImageParameter.BACKGROUND: Choice(options=["opaque", "auto"]), - ImageParameter.MODERATION: Choice(options=["auto", "low"]), + ImageParameter.BACKGROUND: Choice( + options=["transparent", "opaque", "auto"] + ), + ImageParameter.SAFETY_TOLERANCE: Choice(options=["auto", "low"]), ImageParameter.OUTPUT_COMPRESSION: Range(min=0, max=100), }, ), @@ -114,7 +116,7 @@ ImageParameter.NUM_IMAGES: Range(min=1, max=10), ImageParameter.OUTPUT_FORMAT: Choice(options=["png", "jpeg", "webp"]), ImageParameter.BACKGROUND: Choice(options=["opaque", "auto"]), - ImageParameter.MODERATION: Choice(options=["auto", "low"]), + ImageParameter.SAFETY_TOLERANCE: Choice(options=["auto", "low"]), ImageParameter.OUTPUT_COMPRESSION: Range(min=0, max=100), }, ), diff --git a/src/celeste/modalities/images/providers/openai/parameters.py b/src/celeste/modalities/images/providers/openai/parameters.py index 7fde2bed..c782b5e8 100644 --- a/src/celeste/modalities/images/providers/openai/parameters.py +++ b/src/celeste/modalities/images/providers/openai/parameters.py @@ -66,10 +66,10 @@ class BackgroundMapper(_BackgroundMapper): name = ImageParameter.BACKGROUND -class ModerationMapper(_ModerationMapper): - """Map moderation to OpenAI's moderation parameter.""" +class SafetyToleranceMapper(_ModerationMapper): + """Map safety_tolerance to OpenAI's moderation parameter.""" - name = ImageParameter.MODERATION + name = ImageParameter.SAFETY_TOLERANCE class OutputCompressionMapper(_OutputCompressionMapper): @@ -85,7 +85,7 @@ class OutputCompressionMapper(_OutputCompressionMapper): NumImagesMapper(), OutputFormatMapper(), BackgroundMapper(), - ModerationMapper(), + SafetyToleranceMapper(), OutputCompressionMapper(), ] diff --git a/src/celeste/modalities/text/providers/anthropic/models.py b/src/celeste/modalities/text/providers/anthropic/models.py index 5f50cf97..4909fea7 100644 --- a/src/celeste/modalities/text/providers/anthropic/models.py +++ b/src/celeste/modalities/text/providers/anthropic/models.py @@ -1,6 +1,7 @@ """Anthropic models for text modality.""" from celeste.constraints import ( + Choice, DocumentsConstraint, ImagesConstraint, Range, @@ -103,7 +104,9 @@ streaming=True, parameter_constraints={ Parameter.MAX_TOKENS: Range(min=1, max=128000), - TextParameter.THINKING_BUDGET: Range(min=-1, max=32000), + TextParameter.THINKING_LEVEL: Choice( + options=["low", "medium", "high", "xhigh", "max"] + ), TextParameter.OUTPUT_SCHEMA: Schema(), TextParameter.TOOLS: ToolSupport(tools=[WebSearch]), TextParameter.TOOL_CHOICE: ToolChoiceSupport(), diff --git a/src/celeste/modalities/text/providers/anthropic/parameters.py b/src/celeste/modalities/text/providers/anthropic/parameters.py index 7b4761ff..c19b138a 100644 --- a/src/celeste/modalities/text/providers/anthropic/parameters.py +++ b/src/celeste/modalities/text/providers/anthropic/parameters.py @@ -13,6 +13,9 @@ from celeste.providers.anthropic.messages.parameters import ( TemperatureMapper as _TemperatureMapper, ) +from celeste.providers.anthropic.messages.parameters import ( + ThinkingLevelMapper as _ThinkingLevelMapper, +) from celeste.providers.anthropic.messages.parameters import ( ThinkingMapper as _ThinkingMapper, ) @@ -64,6 +67,12 @@ def map( return super().map(request, provider_value, model) +class ThinkingLevelMapper(_ThinkingLevelMapper): + """Map thinking_level to Anthropic's thinking.effort parameter (adaptive).""" + + name = TextParameter.THINKING_LEVEL + + class OutputSchemaMapper(_OutputFormatMapper): """Map output_schema to Anthropic's output_format parameter.""" @@ -86,6 +95,7 @@ class ToolChoiceMapper(_ToolChoiceMapper): TemperatureMapper(), MaxTokensMapper(), ThinkingBudgetMapper(), + ThinkingLevelMapper(), OutputSchemaMapper(), ToolsMapper(), ToolChoiceMapper(), diff --git a/src/celeste/modalities/videos/providers/google/models.py b/src/celeste/modalities/videos/providers/google/models.py index 2fb02e67..499c09b6 100644 --- a/src/celeste/modalities/videos/providers/google/models.py +++ b/src/celeste/modalities/videos/providers/google/models.py @@ -90,6 +90,10 @@ VideoParameter.ASPECT_RATIO: Choice(options=["16:9", "9:16"]), VideoParameter.RESOLUTION: Choice(options=["720p", "1080p"]), VideoParameter.DURATION: Choice(options=[4, 6, 8]), + VideoParameter.REFERENCE_IMAGES: ImagesConstraint( + supported_mime_types=VEO_SUPPORTED_MIME_TYPES, + max_count=3, + ), VideoParameter.FIRST_FRAME: ImageConstraint( supported_mime_types=VEO_SUPPORTED_MIME_TYPES, ), diff --git a/src/celeste/providers/anthropic/messages/parameters.py b/src/celeste/providers/anthropic/messages/parameters.py index c6a59b17..f9c2aad2 100644 --- a/src/celeste/providers/anthropic/messages/parameters.py +++ b/src/celeste/providers/anthropic/messages/parameters.py @@ -71,6 +71,27 @@ def map( return request +class ThinkingLevelMapper(ParameterMapper[TextContent]): + """Map thinking_level to Anthropic thinking field (adaptive mode). + + Emits {"type": "adaptive", "effort": } where is one of + "low" | "medium" | "high" | "xhigh" | "max". + """ + + def map( + self, + request: dict[str, Any], + value: object, + model: Model, + ) -> dict[str, Any]: + """Transform thinking_level into provider request.""" + validated_value = self._validate_value(value, model) + if validated_value is None: + return request + request["thinking"] = {"type": "adaptive", "effort": validated_value} + return request + + class ToolsMapper(ParameterMapper[TextContent]): """Map tools list to Anthropic tools field.""" @@ -223,6 +244,7 @@ def parse_output(self, content: TextContent, value: object | None) -> TextConten "OutputFormatMapper", "StopSequencesMapper", "TemperatureMapper", + "ThinkingLevelMapper", "ThinkingMapper", "ToolChoiceMapper", "ToolsMapper",