Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions src/celeste/modalities/audio/providers/google/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,4 +67,16 @@
AudioParameter.OUTPUT_FORMAT: Choice(options=GOOGLE_SUPPORTED_FORMATS),
},
),
Model(
id="gemini-3.1-flash-tts-preview",
provider=Provider.GOOGLE,
display_name="Google TTS Gemini 3.1 Flash (Preview)",
streaming=False,
operations={Modality.AUDIO: {Operation.SPEAK}},
parameter_constraints={
AudioParameter.VOICE: VoiceConstraint(voices=GOOGLE_VOICES),
AudioParameter.LANGUAGE: Choice(options=GOOGLE_SUPPORTED_LANGUAGES),
AudioParameter.OUTPUT_FORMAT: Choice(options=GOOGLE_SUPPORTED_FORMATS),
},
),
]
15 changes: 14 additions & 1 deletion src/celeste/modalities/images/parameters.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@ class ImageParameter(StrEnum):
GUIDANCE = "guidance"
MASK = "mask"
THINKING_LEVEL = "thinking_level"
BACKGROUND = "background"
OUTPUT_COMPRESSION = "output_compression"


class ImageParameters(Parameters, total=False):
Expand All @@ -52,14 +54,25 @@ class ImageParameters(Parameters, total=False):
str, Field(description="Concepts to avoid in the output.")
]
seed: Annotated[int, Field(description="Seed for deterministic output.")]
safety_tolerance: Annotated[int, Field(description="Safety filter threshold.")]
safety_tolerance: Annotated[
int | str,
Field(
description="Safety filter threshold — integer tier (BFL) or preset name (OpenAI 'auto'/'low')."
),
]
output_format: Annotated[str, Field(description="Output file format.")]
steps: Annotated[int, Field(description="Number of denoising steps.")]
guidance: Annotated[float, Field(description="Prompt-adherence strength.")]
mask: Annotated[
ImageArtifact, Field(description="Mask image for inpainting a region.")
]
thinking_level: Annotated[str, Field(description="Model reasoning depth.")]
background: Annotated[
str, Field(description="Background handling (e.g. transparent, opaque, auto).")
]
output_compression: Annotated[
int, Field(description="Output compression level (0-100) for jpeg/webp.")
]


__all__ = [
Expand Down
49 changes: 49 additions & 0 deletions src/celeste/modalities/images/providers/openai/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,13 @@
options=["1024x1024", "1536x1024", "1024x1536", "auto"]
),
ImageParameter.QUALITY: Choice(options=["low", "medium", "high", "auto"]),
ImageParameter.NUM_IMAGES: Range(min=1, max=10),
ImageParameter.OUTPUT_FORMAT: Choice(options=["png", "jpeg", "webp"]),
ImageParameter.BACKGROUND: Choice(
options=["transparent", "opaque", "auto"]
),
ImageParameter.SAFETY_TOLERANCE: Choice(options=["auto", "low"]),
ImageParameter.OUTPUT_COMPRESSION: Range(min=0, max=100),
},
),
Model(
Expand All @@ -56,6 +63,13 @@
options=["1024x1024", "1024x1536", "1536x1024", "auto"]
),
ImageParameter.QUALITY: Choice(options=["low", "medium", "high", "auto"]),
ImageParameter.NUM_IMAGES: Range(min=1, max=10),
ImageParameter.OUTPUT_FORMAT: Choice(options=["png", "jpeg", "webp"]),
ImageParameter.BACKGROUND: Choice(
options=["transparent", "opaque", "auto"]
),
ImageParameter.SAFETY_TOLERANCE: Choice(options=["auto", "low"]),
ImageParameter.OUTPUT_COMPRESSION: Range(min=0, max=100),
},
),
Model(
Expand All @@ -69,6 +83,41 @@
options=["1024x1024", "1536x1024", "1024x1536", "auto"]
),
ImageParameter.QUALITY: Choice(options=["low", "medium", "high", "auto"]),
ImageParameter.NUM_IMAGES: Range(min=1, max=10),
ImageParameter.OUTPUT_FORMAT: Choice(options=["png", "jpeg", "webp"]),
ImageParameter.BACKGROUND: Choice(
options=["transparent", "opaque", "auto"]
),
ImageParameter.SAFETY_TOLERANCE: Choice(options=["auto", "low"]),
ImageParameter.OUTPUT_COMPRESSION: Range(min=0, max=100),
},
),
Model(
id="gpt-image-2",
provider=Provider.OPENAI,
display_name="GPT Image 2",
operations={Modality.IMAGES: {Operation.GENERATE, Operation.EDIT}},
streaming=True,
parameter_constraints={
ImageParameter.PARTIAL_IMAGES: Range(min=0, max=3),
ImageParameter.ASPECT_RATIO: Choice(
options=[
"1024x1024",
"1536x1024",
"1024x1536",
"2048x2048",
"2048x1152",
"3840x2160",
"2160x3840",
"auto",
]
),
ImageParameter.QUALITY: Choice(options=["low", "medium", "high", "auto"]),
ImageParameter.NUM_IMAGES: Range(min=1, max=10),
ImageParameter.OUTPUT_FORMAT: Choice(options=["png", "jpeg", "webp"]),
ImageParameter.BACKGROUND: Choice(options=["opaque", "auto"]),
ImageParameter.SAFETY_TOLERANCE: Choice(options=["auto", "low"]),
ImageParameter.OUTPUT_COMPRESSION: Range(min=0, max=100),
},
),
]
50 changes: 50 additions & 0 deletions src/celeste/modalities/images/providers/openai/parameters.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,21 @@
"""OpenAI parameter mappers for images."""

from celeste.parameters import ParameterMapper
from celeste.providers.openai.images.parameters import (
BackgroundMapper as _BackgroundMapper,
)
from celeste.providers.openai.images.parameters import (
ModerationMapper as _ModerationMapper,
)
from celeste.providers.openai.images.parameters import (
NumImagesMapper as _NumImagesMapper,
)
from celeste.providers.openai.images.parameters import (
OutputCompressionMapper as _OutputCompressionMapper,
)
from celeste.providers.openai.images.parameters import (
OutputFormatMapper as _OutputFormatMapper,
)
from celeste.providers.openai.images.parameters import (
PartialImagesMapper as _PartialImagesMapper,
)
Expand Down Expand Up @@ -33,10 +48,45 @@ class QualityMapper(_QualityMapper):
name = ImageParameter.QUALITY


class NumImagesMapper(_NumImagesMapper):
"""Map num_images to OpenAI's n parameter."""

name = ImageParameter.NUM_IMAGES


class OutputFormatMapper(_OutputFormatMapper):
"""Map output_format to OpenAI's output_format parameter."""

name = ImageParameter.OUTPUT_FORMAT


class BackgroundMapper(_BackgroundMapper):
"""Map background to OpenAI's background parameter."""

name = ImageParameter.BACKGROUND


class SafetyToleranceMapper(_ModerationMapper):
"""Map safety_tolerance to OpenAI's moderation parameter."""

name = ImageParameter.SAFETY_TOLERANCE


class OutputCompressionMapper(_OutputCompressionMapper):
"""Map output_compression to OpenAI's output_compression parameter."""

name = ImageParameter.OUTPUT_COMPRESSION


OPENAI_PARAMETER_MAPPERS: list[ParameterMapper[ImageContent]] = [
AspectRatioMapper(),
PartialImagesMapper(),
QualityMapper(),
NumImagesMapper(),
OutputFormatMapper(),
BackgroundMapper(),
SafetyToleranceMapper(),
OutputCompressionMapper(),
]

__all__ = ["OPENAI_PARAMETER_MAPPERS"]
19 changes: 19 additions & 0 deletions src/celeste/modalities/text/providers/anthropic/models.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""Anthropic models for text modality."""

from celeste.constraints import (
Choice,
DocumentsConstraint,
ImagesConstraint,
Range,
Expand Down Expand Up @@ -95,6 +96,24 @@
TextParameter.DOCUMENT: DocumentsConstraint(),
},
),
Model(
id="claude-opus-4-7",
provider=Provider.ANTHROPIC,
display_name="Claude Opus 4.7",
operations={Modality.TEXT: {Operation.GENERATE, Operation.ANALYZE}},
streaming=True,
parameter_constraints={
Parameter.MAX_TOKENS: Range(min=1, max=128000),
TextParameter.THINKING_LEVEL: Choice(
options=["low", "medium", "high", "xhigh", "max"]
),
TextParameter.OUTPUT_SCHEMA: Schema(),
TextParameter.TOOLS: ToolSupport(tools=[WebSearch]),
TextParameter.TOOL_CHOICE: ToolChoiceSupport(),
TextParameter.IMAGE: ImagesConstraint(),
TextParameter.DOCUMENT: DocumentsConstraint(),
},
),
Model(
id="claude-sonnet-4-6",
provider=Provider.ANTHROPIC,
Expand Down
10 changes: 10 additions & 0 deletions src/celeste/modalities/text/providers/anthropic/parameters.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,9 @@
from celeste.providers.anthropic.messages.parameters import (
TemperatureMapper as _TemperatureMapper,
)
from celeste.providers.anthropic.messages.parameters import (
ThinkingLevelMapper as _ThinkingLevelMapper,
)
from celeste.providers.anthropic.messages.parameters import (
ThinkingMapper as _ThinkingMapper,
)
Expand Down Expand Up @@ -64,6 +67,12 @@ def map(
return super().map(request, provider_value, model)


class ThinkingLevelMapper(_ThinkingLevelMapper):
"""Map thinking_level to Anthropic's thinking.effort parameter (adaptive)."""

name = TextParameter.THINKING_LEVEL


class OutputSchemaMapper(_OutputFormatMapper):
"""Map output_schema to Anthropic's output_format parameter."""

Expand All @@ -86,6 +95,7 @@ class ToolChoiceMapper(_ToolChoiceMapper):
TemperatureMapper(),
MaxTokensMapper(),
ThinkingBudgetMapper(),
ThinkingLevelMapper(),
OutputSchemaMapper(),
ToolsMapper(),
ToolChoiceMapper(),
Expand Down
24 changes: 24 additions & 0 deletions src/celeste/modalities/text/providers/cohere/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,4 +46,28 @@
# thinking_budget: Support unclear, omit constraint for now
},
),
Model(
id="command-a-translate-08-2025",
provider=Provider.COHERE,
display_name="Command A Translate 08-2025",
operations={Modality.TEXT: {Operation.GENERATE}},
streaming=True,
parameter_constraints={
Parameter.TEMPERATURE: Range(min=0.0, max=1.0, step=0.01),
Parameter.MAX_TOKENS: Range(min=1, max=8000, step=1),
TextParameter.OUTPUT_SCHEMA: Schema(),
},
),
Model(
id="command-r-plus-08-2024",
provider=Provider.COHERE,
display_name="Command R+ 08-2024",
operations={Modality.TEXT: {Operation.GENERATE}},
streaming=True,
parameter_constraints={
Parameter.TEMPERATURE: Range(min=0.0, max=1.0, step=0.01),
Parameter.MAX_TOKENS: Range(min=1, max=4000, step=1),
TextParameter.OUTPUT_SCHEMA: Schema(),
},
),
]
16 changes: 16 additions & 0 deletions src/celeste/modalities/text/providers/mistral/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,22 @@
TextParameter.TOOL_CHOICE: ToolChoiceSupport(),
},
),
Model(
id="mistral-medium-3-5",
provider=Provider.MISTRAL,
display_name="Mistral Medium 3.5",
operations={Modality.TEXT: {Operation.GENERATE, Operation.ANALYZE}},
streaming=True,
parameter_constraints={
Parameter.TEMPERATURE: Range(min=0.0, max=2.0, step=0.01),
Parameter.MAX_TOKENS: Range(min=1, max=32768, step=1),
TextParameter.OUTPUT_SCHEMA: Schema(),
TextParameter.IMAGE: ImagesConstraint(),
TextParameter.DOCUMENT: DocumentsConstraint(),
TextParameter.TOOLS: ToolSupport(tools=[]),
TextParameter.TOOL_CHOICE: ToolChoiceSupport(),
},
),
Model(
id="mistral-small-latest",
provider=Provider.MISTRAL,
Expand Down
21 changes: 21 additions & 0 deletions src/celeste/modalities/videos/providers/google/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,4 +81,25 @@
),
},
),
Model(
id="veo-3.1-lite-generate-preview",
provider=Provider.GOOGLE,
display_name="Veo 3.1 Lite (Preview)",
operations={Modality.VIDEOS: {Operation.GENERATE}},
parameter_constraints={
VideoParameter.ASPECT_RATIO: Choice(options=["16:9", "9:16"]),
VideoParameter.RESOLUTION: Choice(options=["720p", "1080p"]),
VideoParameter.DURATION: Choice(options=[4, 6, 8]),
VideoParameter.REFERENCE_IMAGES: ImagesConstraint(
supported_mime_types=VEO_SUPPORTED_MIME_TYPES,
max_count=3,
),
VideoParameter.FIRST_FRAME: ImageConstraint(
supported_mime_types=VEO_SUPPORTED_MIME_TYPES,
),
VideoParameter.LAST_FRAME: ImageConstraint(
supported_mime_types=VEO_SUPPORTED_MIME_TYPES,
),
},
),
]
22 changes: 22 additions & 0 deletions src/celeste/providers/anthropic/messages/parameters.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,27 @@ def map(
return request


class ThinkingLevelMapper(ParameterMapper[TextContent]):
"""Map thinking_level to Anthropic thinking field (adaptive mode).

Emits {"type": "adaptive", "effort": <value>} where <value> is one of
"low" | "medium" | "high" | "xhigh" | "max".
"""

def map(
self,
request: dict[str, Any],
value: object,
model: Model,
) -> dict[str, Any]:
"""Transform thinking_level into provider request."""
validated_value = self._validate_value(value, model)
if validated_value is None:
return request
request["thinking"] = {"type": "adaptive", "effort": validated_value}
return request


class ToolsMapper(ParameterMapper[TextContent]):
"""Map tools list to Anthropic tools field."""

Expand Down Expand Up @@ -223,6 +244,7 @@ def parse_output(self, content: TextContent, value: object | None) -> TextConten
"OutputFormatMapper",
"StopSequencesMapper",
"TemperatureMapper",
"ThinkingLevelMapper",
"ThinkingMapper",
"ToolChoiceMapper",
"ToolsMapper",
Expand Down
7 changes: 7 additions & 0 deletions src/celeste/providers/openai/images/parameters.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,9 +52,16 @@ class OutputCompressionMapper(FieldMapper[ImageContent]):
field = "output_compression"


class NumImagesMapper(FieldMapper[ImageContent]):
"""Map num_images to OpenAI n field."""

field = "n"


__all__ = [
"BackgroundMapper",
"ModerationMapper",
"NumImagesMapper",
"OutputCompressionMapper",
"OutputFormatMapper",
"PartialImagesMapper",
Expand Down
Loading