diff --git a/src/celeste/modalities/audio/providers/google/models.py b/src/celeste/modalities/audio/providers/google/models.py index 6185b73b..234f92a2 100644 --- a/src/celeste/modalities/audio/providers/google/models.py +++ b/src/celeste/modalities/audio/providers/google/models.py @@ -67,4 +67,16 @@ AudioParameter.OUTPUT_FORMAT: Choice(options=GOOGLE_SUPPORTED_FORMATS), }, ), + Model( + id="gemini-3.1-flash-tts-preview", + provider=Provider.GOOGLE, + display_name="Google TTS Gemini 3.1 Flash (Preview)", + streaming=False, + operations={Modality.AUDIO: {Operation.SPEAK}}, + parameter_constraints={ + AudioParameter.VOICE: VoiceConstraint(voices=GOOGLE_VOICES), + AudioParameter.LANGUAGE: Choice(options=GOOGLE_SUPPORTED_LANGUAGES), + AudioParameter.OUTPUT_FORMAT: Choice(options=GOOGLE_SUPPORTED_FORMATS), + }, + ), ] diff --git a/src/celeste/modalities/images/parameters.py b/src/celeste/modalities/images/parameters.py index 858677f9..e8bd694d 100644 --- a/src/celeste/modalities/images/parameters.py +++ b/src/celeste/modalities/images/parameters.py @@ -27,6 +27,8 @@ class ImageParameter(StrEnum): GUIDANCE = "guidance" MASK = "mask" THINKING_LEVEL = "thinking_level" + BACKGROUND = "background" + OUTPUT_COMPRESSION = "output_compression" class ImageParameters(Parameters, total=False): @@ -52,7 +54,12 @@ class ImageParameters(Parameters, total=False): str, Field(description="Concepts to avoid in the output.") ] seed: Annotated[int, Field(description="Seed for deterministic output.")] - safety_tolerance: Annotated[int, Field(description="Safety filter threshold.")] + safety_tolerance: Annotated[ + int | str, + Field( + description="Safety filter threshold — integer tier (BFL) or preset name (OpenAI 'auto'/'low')." + ), + ] output_format: Annotated[str, Field(description="Output file format.")] steps: Annotated[int, Field(description="Number of denoising steps.")] guidance: Annotated[float, Field(description="Prompt-adherence strength.")] @@ -60,6 +67,12 @@ class ImageParameters(Parameters, total=False): ImageArtifact, Field(description="Mask image for inpainting a region.") ] thinking_level: Annotated[str, Field(description="Model reasoning depth.")] + background: Annotated[ + str, Field(description="Background handling (e.g. transparent, opaque, auto).") + ] + output_compression: Annotated[ + int, Field(description="Output compression level (0-100) for jpeg/webp.") + ] __all__ = [ diff --git a/src/celeste/modalities/images/providers/openai/models.py b/src/celeste/modalities/images/providers/openai/models.py index f89705d2..a52c6e09 100644 --- a/src/celeste/modalities/images/providers/openai/models.py +++ b/src/celeste/modalities/images/providers/openai/models.py @@ -42,6 +42,13 @@ options=["1024x1024", "1536x1024", "1024x1536", "auto"] ), ImageParameter.QUALITY: Choice(options=["low", "medium", "high", "auto"]), + ImageParameter.NUM_IMAGES: Range(min=1, max=10), + ImageParameter.OUTPUT_FORMAT: Choice(options=["png", "jpeg", "webp"]), + ImageParameter.BACKGROUND: Choice( + options=["transparent", "opaque", "auto"] + ), + ImageParameter.SAFETY_TOLERANCE: Choice(options=["auto", "low"]), + ImageParameter.OUTPUT_COMPRESSION: Range(min=0, max=100), }, ), Model( @@ -56,6 +63,13 @@ options=["1024x1024", "1024x1536", "1536x1024", "auto"] ), ImageParameter.QUALITY: Choice(options=["low", "medium", "high", "auto"]), + ImageParameter.NUM_IMAGES: Range(min=1, max=10), + ImageParameter.OUTPUT_FORMAT: Choice(options=["png", "jpeg", "webp"]), + ImageParameter.BACKGROUND: Choice( + options=["transparent", "opaque", "auto"] + ), + ImageParameter.SAFETY_TOLERANCE: Choice(options=["auto", "low"]), + ImageParameter.OUTPUT_COMPRESSION: Range(min=0, max=100), }, ), Model( @@ -69,6 +83,41 @@ options=["1024x1024", "1536x1024", "1024x1536", "auto"] ), ImageParameter.QUALITY: Choice(options=["low", "medium", "high", "auto"]), + ImageParameter.NUM_IMAGES: Range(min=1, max=10), + ImageParameter.OUTPUT_FORMAT: Choice(options=["png", "jpeg", "webp"]), + ImageParameter.BACKGROUND: Choice( + options=["transparent", "opaque", "auto"] + ), + ImageParameter.SAFETY_TOLERANCE: Choice(options=["auto", "low"]), + ImageParameter.OUTPUT_COMPRESSION: Range(min=0, max=100), + }, + ), + Model( + id="gpt-image-2", + provider=Provider.OPENAI, + display_name="GPT Image 2", + operations={Modality.IMAGES: {Operation.GENERATE, Operation.EDIT}}, + streaming=True, + parameter_constraints={ + ImageParameter.PARTIAL_IMAGES: Range(min=0, max=3), + ImageParameter.ASPECT_RATIO: Choice( + options=[ + "1024x1024", + "1536x1024", + "1024x1536", + "2048x2048", + "2048x1152", + "3840x2160", + "2160x3840", + "auto", + ] + ), + ImageParameter.QUALITY: Choice(options=["low", "medium", "high", "auto"]), + ImageParameter.NUM_IMAGES: Range(min=1, max=10), + ImageParameter.OUTPUT_FORMAT: Choice(options=["png", "jpeg", "webp"]), + ImageParameter.BACKGROUND: Choice(options=["opaque", "auto"]), + ImageParameter.SAFETY_TOLERANCE: Choice(options=["auto", "low"]), + ImageParameter.OUTPUT_COMPRESSION: Range(min=0, max=100), }, ), ] diff --git a/src/celeste/modalities/images/providers/openai/parameters.py b/src/celeste/modalities/images/providers/openai/parameters.py index 4896f16c..c782b5e8 100644 --- a/src/celeste/modalities/images/providers/openai/parameters.py +++ b/src/celeste/modalities/images/providers/openai/parameters.py @@ -1,6 +1,21 @@ """OpenAI parameter mappers for images.""" from celeste.parameters import ParameterMapper +from celeste.providers.openai.images.parameters import ( + BackgroundMapper as _BackgroundMapper, +) +from celeste.providers.openai.images.parameters import ( + ModerationMapper as _ModerationMapper, +) +from celeste.providers.openai.images.parameters import ( + NumImagesMapper as _NumImagesMapper, +) +from celeste.providers.openai.images.parameters import ( + OutputCompressionMapper as _OutputCompressionMapper, +) +from celeste.providers.openai.images.parameters import ( + OutputFormatMapper as _OutputFormatMapper, +) from celeste.providers.openai.images.parameters import ( PartialImagesMapper as _PartialImagesMapper, ) @@ -33,10 +48,45 @@ class QualityMapper(_QualityMapper): name = ImageParameter.QUALITY +class NumImagesMapper(_NumImagesMapper): + """Map num_images to OpenAI's n parameter.""" + + name = ImageParameter.NUM_IMAGES + + +class OutputFormatMapper(_OutputFormatMapper): + """Map output_format to OpenAI's output_format parameter.""" + + name = ImageParameter.OUTPUT_FORMAT + + +class BackgroundMapper(_BackgroundMapper): + """Map background to OpenAI's background parameter.""" + + name = ImageParameter.BACKGROUND + + +class SafetyToleranceMapper(_ModerationMapper): + """Map safety_tolerance to OpenAI's moderation parameter.""" + + name = ImageParameter.SAFETY_TOLERANCE + + +class OutputCompressionMapper(_OutputCompressionMapper): + """Map output_compression to OpenAI's output_compression parameter.""" + + name = ImageParameter.OUTPUT_COMPRESSION + + OPENAI_PARAMETER_MAPPERS: list[ParameterMapper[ImageContent]] = [ AspectRatioMapper(), PartialImagesMapper(), QualityMapper(), + NumImagesMapper(), + OutputFormatMapper(), + BackgroundMapper(), + SafetyToleranceMapper(), + OutputCompressionMapper(), ] __all__ = ["OPENAI_PARAMETER_MAPPERS"] diff --git a/src/celeste/modalities/text/providers/anthropic/models.py b/src/celeste/modalities/text/providers/anthropic/models.py index e4c277ed..4909fea7 100644 --- a/src/celeste/modalities/text/providers/anthropic/models.py +++ b/src/celeste/modalities/text/providers/anthropic/models.py @@ -1,6 +1,7 @@ """Anthropic models for text modality.""" from celeste.constraints import ( + Choice, DocumentsConstraint, ImagesConstraint, Range, @@ -95,6 +96,24 @@ TextParameter.DOCUMENT: DocumentsConstraint(), }, ), + Model( + id="claude-opus-4-7", + provider=Provider.ANTHROPIC, + display_name="Claude Opus 4.7", + operations={Modality.TEXT: {Operation.GENERATE, Operation.ANALYZE}}, + streaming=True, + parameter_constraints={ + Parameter.MAX_TOKENS: Range(min=1, max=128000), + TextParameter.THINKING_LEVEL: Choice( + options=["low", "medium", "high", "xhigh", "max"] + ), + TextParameter.OUTPUT_SCHEMA: Schema(), + TextParameter.TOOLS: ToolSupport(tools=[WebSearch]), + TextParameter.TOOL_CHOICE: ToolChoiceSupport(), + TextParameter.IMAGE: ImagesConstraint(), + TextParameter.DOCUMENT: DocumentsConstraint(), + }, + ), Model( id="claude-sonnet-4-6", provider=Provider.ANTHROPIC, diff --git a/src/celeste/modalities/text/providers/anthropic/parameters.py b/src/celeste/modalities/text/providers/anthropic/parameters.py index 7b4761ff..c19b138a 100644 --- a/src/celeste/modalities/text/providers/anthropic/parameters.py +++ b/src/celeste/modalities/text/providers/anthropic/parameters.py @@ -13,6 +13,9 @@ from celeste.providers.anthropic.messages.parameters import ( TemperatureMapper as _TemperatureMapper, ) +from celeste.providers.anthropic.messages.parameters import ( + ThinkingLevelMapper as _ThinkingLevelMapper, +) from celeste.providers.anthropic.messages.parameters import ( ThinkingMapper as _ThinkingMapper, ) @@ -64,6 +67,12 @@ def map( return super().map(request, provider_value, model) +class ThinkingLevelMapper(_ThinkingLevelMapper): + """Map thinking_level to Anthropic's thinking.effort parameter (adaptive).""" + + name = TextParameter.THINKING_LEVEL + + class OutputSchemaMapper(_OutputFormatMapper): """Map output_schema to Anthropic's output_format parameter.""" @@ -86,6 +95,7 @@ class ToolChoiceMapper(_ToolChoiceMapper): TemperatureMapper(), MaxTokensMapper(), ThinkingBudgetMapper(), + ThinkingLevelMapper(), OutputSchemaMapper(), ToolsMapper(), ToolChoiceMapper(), diff --git a/src/celeste/modalities/text/providers/cohere/models.py b/src/celeste/modalities/text/providers/cohere/models.py index c36b2b34..111f0b32 100644 --- a/src/celeste/modalities/text/providers/cohere/models.py +++ b/src/celeste/modalities/text/providers/cohere/models.py @@ -46,4 +46,28 @@ # thinking_budget: Support unclear, omit constraint for now }, ), + Model( + id="command-a-translate-08-2025", + provider=Provider.COHERE, + display_name="Command A Translate 08-2025", + operations={Modality.TEXT: {Operation.GENERATE}}, + streaming=True, + parameter_constraints={ + Parameter.TEMPERATURE: Range(min=0.0, max=1.0, step=0.01), + Parameter.MAX_TOKENS: Range(min=1, max=8000, step=1), + TextParameter.OUTPUT_SCHEMA: Schema(), + }, + ), + Model( + id="command-r-plus-08-2024", + provider=Provider.COHERE, + display_name="Command R+ 08-2024", + operations={Modality.TEXT: {Operation.GENERATE}}, + streaming=True, + parameter_constraints={ + Parameter.TEMPERATURE: Range(min=0.0, max=1.0, step=0.01), + Parameter.MAX_TOKENS: Range(min=1, max=4000, step=1), + TextParameter.OUTPUT_SCHEMA: Schema(), + }, + ), ] diff --git a/src/celeste/modalities/text/providers/mistral/models.py b/src/celeste/modalities/text/providers/mistral/models.py index 0106367b..90136e8d 100644 --- a/src/celeste/modalities/text/providers/mistral/models.py +++ b/src/celeste/modalities/text/providers/mistral/models.py @@ -44,6 +44,22 @@ TextParameter.TOOL_CHOICE: ToolChoiceSupport(), }, ), + Model( + id="mistral-medium-3-5", + provider=Provider.MISTRAL, + display_name="Mistral Medium 3.5", + operations={Modality.TEXT: {Operation.GENERATE, Operation.ANALYZE}}, + streaming=True, + parameter_constraints={ + Parameter.TEMPERATURE: Range(min=0.0, max=2.0, step=0.01), + Parameter.MAX_TOKENS: Range(min=1, max=32768, step=1), + TextParameter.OUTPUT_SCHEMA: Schema(), + TextParameter.IMAGE: ImagesConstraint(), + TextParameter.DOCUMENT: DocumentsConstraint(), + TextParameter.TOOLS: ToolSupport(tools=[]), + TextParameter.TOOL_CHOICE: ToolChoiceSupport(), + }, + ), Model( id="mistral-small-latest", provider=Provider.MISTRAL, diff --git a/src/celeste/modalities/videos/providers/google/models.py b/src/celeste/modalities/videos/providers/google/models.py index e9741d13..499c09b6 100644 --- a/src/celeste/modalities/videos/providers/google/models.py +++ b/src/celeste/modalities/videos/providers/google/models.py @@ -81,4 +81,25 @@ ), }, ), + Model( + id="veo-3.1-lite-generate-preview", + provider=Provider.GOOGLE, + display_name="Veo 3.1 Lite (Preview)", + operations={Modality.VIDEOS: {Operation.GENERATE}}, + parameter_constraints={ + VideoParameter.ASPECT_RATIO: Choice(options=["16:9", "9:16"]), + VideoParameter.RESOLUTION: Choice(options=["720p", "1080p"]), + VideoParameter.DURATION: Choice(options=[4, 6, 8]), + VideoParameter.REFERENCE_IMAGES: ImagesConstraint( + supported_mime_types=VEO_SUPPORTED_MIME_TYPES, + max_count=3, + ), + VideoParameter.FIRST_FRAME: ImageConstraint( + supported_mime_types=VEO_SUPPORTED_MIME_TYPES, + ), + VideoParameter.LAST_FRAME: ImageConstraint( + supported_mime_types=VEO_SUPPORTED_MIME_TYPES, + ), + }, + ), ] diff --git a/src/celeste/providers/anthropic/messages/parameters.py b/src/celeste/providers/anthropic/messages/parameters.py index c6a59b17..f9c2aad2 100644 --- a/src/celeste/providers/anthropic/messages/parameters.py +++ b/src/celeste/providers/anthropic/messages/parameters.py @@ -71,6 +71,27 @@ def map( return request +class ThinkingLevelMapper(ParameterMapper[TextContent]): + """Map thinking_level to Anthropic thinking field (adaptive mode). + + Emits {"type": "adaptive", "effort": } where is one of + "low" | "medium" | "high" | "xhigh" | "max". + """ + + def map( + self, + request: dict[str, Any], + value: object, + model: Model, + ) -> dict[str, Any]: + """Transform thinking_level into provider request.""" + validated_value = self._validate_value(value, model) + if validated_value is None: + return request + request["thinking"] = {"type": "adaptive", "effort": validated_value} + return request + + class ToolsMapper(ParameterMapper[TextContent]): """Map tools list to Anthropic tools field.""" @@ -223,6 +244,7 @@ def parse_output(self, content: TextContent, value: object | None) -> TextConten "OutputFormatMapper", "StopSequencesMapper", "TemperatureMapper", + "ThinkingLevelMapper", "ThinkingMapper", "ToolChoiceMapper", "ToolsMapper", diff --git a/src/celeste/providers/openai/images/parameters.py b/src/celeste/providers/openai/images/parameters.py index 465522f7..c5f494dc 100644 --- a/src/celeste/providers/openai/images/parameters.py +++ b/src/celeste/providers/openai/images/parameters.py @@ -52,9 +52,16 @@ class OutputCompressionMapper(FieldMapper[ImageContent]): field = "output_compression" +class NumImagesMapper(FieldMapper[ImageContent]): + """Map num_images to OpenAI n field.""" + + field = "n" + + __all__ = [ "BackgroundMapper", "ModerationMapper", + "NumImagesMapper", "OutputCompressionMapper", "OutputFormatMapper", "PartialImagesMapper",