|
| 1 | +""" |
| 2 | +This example demonstrates how to ask the same question to multiple different LLMs |
| 3 | +and then combine their responses into a single coherent answer using another LLM. |
| 4 | +
|
| 5 | +The example uses three different models for answering: |
| 6 | +- GPT-4O Mini |
| 7 | +- Gemini 2.0 Flash |
| 8 | +- Llama 3.3 70B |
| 9 | +
|
| 10 | +Then uses O3 Mini (with medium reasoning effort) to analyze and combine their responses. |
| 11 | +""" |
| 12 | + |
| 13 | +import asyncio |
| 14 | +from typing import List |
| 15 | + |
| 16 | +from pydantic import BaseModel, Field |
| 17 | + |
| 18 | +import workflowai |
| 19 | +from workflowai import Model, Run |
| 20 | + |
| 21 | + |
| 22 | +class MultiModelInput(BaseModel): |
| 23 | + """Input model containing the question to ask all models.""" |
| 24 | + question: str = Field( |
| 25 | + description="The question to ask all models", |
| 26 | + ) |
| 27 | + |
| 28 | + |
| 29 | +class ModelResponse(BaseModel): |
| 30 | + """Response from an individual model.""" |
| 31 | + model_name: str = Field(description="Name of the model that provided this response") |
| 32 | + response: str = Field(description="The model's response to the question") |
| 33 | + |
| 34 | + |
| 35 | +class CombinerInput(BaseModel): |
| 36 | + """Input for the response combiner.""" |
| 37 | + responses: List[ModelResponse] = Field(description="List of responses to combine") |
| 38 | + |
| 39 | + |
| 40 | +class CombinedOutput(BaseModel): |
| 41 | + """Final output combining responses from all models.""" |
| 42 | + individual_responses: List[ModelResponse] = Field( |
| 43 | + description="List of responses from each individual model", |
| 44 | + ) |
| 45 | + combined_answer: str = Field( |
| 46 | + description="Synthesized answer combining insights from all models", |
| 47 | + ) |
| 48 | + explanation: str = Field( |
| 49 | + description="Explanation of how the responses were combined and why", |
| 50 | + ) |
| 51 | + |
| 52 | + |
| 53 | +@workflowai.agent( |
| 54 | + id="gpt4o-mini-agent", |
| 55 | + model=Model.GPT_4O_MINI_LATEST, |
| 56 | +) |
| 57 | +async def get_gpt4_response(query: MultiModelInput) -> Run[ModelResponse]: |
| 58 | + """Get response from GPT-4O Mini model.""" |
| 59 | + ... |
| 60 | + |
| 61 | + |
| 62 | +@workflowai.agent( |
| 63 | + id="gemini-agent", |
| 64 | + model=Model.GEMINI_2_0_FLASH_LATEST, |
| 65 | +) |
| 66 | +async def get_gemini_response(query: MultiModelInput) -> Run[ModelResponse]: |
| 67 | + """Get response from Gemini 2.0 Flash model.""" |
| 68 | + ... |
| 69 | + |
| 70 | + |
| 71 | +@workflowai.agent( |
| 72 | + id="llama-agent", |
| 73 | + model=Model.LLAMA_3_3_70B, |
| 74 | +) |
| 75 | +async def get_llama_response(query: MultiModelInput) -> Run[ModelResponse]: |
| 76 | + """Get response from Llama 3.3 70B model.""" |
| 77 | + ... |
| 78 | + |
| 79 | + |
| 80 | +@workflowai.agent( |
| 81 | + id="response-combiner", |
| 82 | + model=Model.O3_MINI_2025_01_31_MEDIUM_REASONING_EFFORT, |
| 83 | +
|
| 84 | +) |
| 85 | +async def combine_responses(responses_input: CombinerInput) -> Run[CombinedOutput]: |
| 86 | + """ |
| 87 | + Analyze and combine responses from multiple models into a single coherent answer. |
| 88 | +
|
| 89 | + You are an expert at analyzing and synthesizing information from multiple sources. |
| 90 | + Your task is to: |
| 91 | + 1. Review the responses from different models |
| 92 | + 2. Identify key insights and unique perspectives from each |
| 93 | + 3. Create a comprehensive answer that combines the best elements |
| 94 | + 4. Explain your synthesis process |
| 95 | +
|
| 96 | + Please ensure the combined answer is: |
| 97 | + - Accurate and well-reasoned |
| 98 | + - Incorporates unique insights from each model |
| 99 | + - Clear and coherent |
| 100 | + - Properly attributed when using specific insights |
| 101 | + """ |
| 102 | + ... |
| 103 | + |
| 104 | + |
| 105 | +async def main(): |
| 106 | + # Example: Scientific explanation |
| 107 | + print("\nExample: Scientific Concept") |
| 108 | + print("-" * 50) |
| 109 | + question = "What is dark matter and why is it important for our understanding of the universe?" |
| 110 | + |
| 111 | + # Get responses from all models |
| 112 | + gpt4_run = await get_gpt4_response(MultiModelInput(question=question)) |
| 113 | + gemini_run = await get_gemini_response(MultiModelInput(question=question)) |
| 114 | + llama_run = await get_llama_response(MultiModelInput(question=question)) |
| 115 | + |
| 116 | + # Combine responses |
| 117 | + combined = await combine_responses(CombinerInput( |
| 118 | + responses=[ |
| 119 | + gpt4_run.output, |
| 120 | + gemini_run.output, |
| 121 | + llama_run.output, |
| 122 | + ], |
| 123 | + )) |
| 124 | + print(combined) |
| 125 | + |
| 126 | + |
| 127 | +if __name__ == "__main__": |
| 128 | + asyncio.run(main()) |
0 commit comments