-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy path16_multi_model_consensus.py
More file actions
118 lines (88 loc) · 3.34 KB
/
16_multi_model_consensus.py
File metadata and controls
118 lines (88 loc) · 3.34 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
"""
This example demonstrates how to ask the same question to multiple different LLMs
and then combine their responses into a single coherent answer using another LLM.
The example uses three different models for answering:
- GPT-4O Mini
- Gemini 2.0 Flash
- Llama 3.3 70B
Then uses O3 Mini (with medium reasoning effort) to analyze and combine their responses.
"""
import asyncio
from pydantic import BaseModel, Field
import workflowai
from workflowai import Model
class MultiModelInput(BaseModel):
"""Input model containing the question to ask all models."""
question: str = Field(
description="The question to ask all models",
)
model_name: str = Field(
description="Name of the model providing the response",
)
class ModelResponse(BaseModel):
"""Response from an individual model."""
model_name: str = Field(description="Name of the model that provided this response")
response: str = Field(description="The model's response to the question")
class CombinerInput(BaseModel):
"""Input for the response combiner."""
responses: list[ModelResponse] = Field(description="List of responses to combine")
class CombinedOutput(BaseModel):
"""Final output combining responses from all models."""
combined_answer: str = Field(
description="Synthesized answer combining insights from all models",
)
explanation: str = Field(
description="Explanation of how the responses were combined and why",
)
@workflowai.agent(
id="question-answerer",
)
async def get_model_response(query: MultiModelInput) -> ModelResponse:
"""Get response from the specified model."""
...
@workflowai.agent(
id="response-combiner",
model=Model.O3_MINI_2025_01_31_MEDIUM_REASONING_EFFORT,
)
async def combine_responses(responses_input: CombinerInput) -> CombinedOutput:
"""
Analyze and combine responses from multiple models into a single coherent answer.
You are an expert at analyzing and synthesizing information from multiple sources.
Your task is to:
1. Review the responses from different models
2. Identify key insights and unique perspectives from each
3. Create a comprehensive answer that combines the best elements
4. Explain your synthesis process
Please ensure the combined answer is:
- Accurate and well-reasoned
- Incorporates unique insights from each model
- Clear and coherent
- Properly attributed when using specific insights
"""
...
async def main():
# Example: Scientific explanation
print("\nExample: Scientific Concept")
print("-" * 50)
question = "What is dark matter and why is it important for our understanding of the universe?"
# Get responses from all models
models = [
(Model.GPT_4O_MINI_LATEST, "GPT-4O Mini"),
(Model.GEMINI_2_0_FLASH_LATEST, "Gemini 2.0 Flash"),
(Model.LLAMA_3_3_70B, "Llama 3.3 70B"),
]
responses = []
for model, model_name in models:
run = await get_model_response.run(
MultiModelInput(
question=question,
model_name=model_name,
),
model=model,
)
responses.append(run.output)
# Combine responses
combined = await combine_responses.run(CombinerInput(responses=responses))
print(combined)
if __name__ == "__main__":
asyncio.run(main())