|
| 1 | +"""Fan-out/fan-in con agregación de ranking usando LLM como juez. |
| 2 | +
|
| 3 | +Tres agentes creativos con diferentes personalidades (audaz, minimalista, |
| 4 | +emocional) proponen cada uno un eslogan de marketing. Un ejecutor ranker |
| 5 | +recopila las opciones, las formatea y usa un agente juez interno para |
| 6 | +calificarlas y ordenarlas — dejando que el LLM evalúe creatividad, |
| 7 | +memorabilidad y encaje con la marca. |
| 8 | +
|
| 9 | +Técnica de agregación: LLM como juez (generar N candidatos y rankear el mejor). |
| 10 | +
|
| 11 | +Ejecutar: |
| 12 | + uv run examples/spanish/workflow_aggregator_ranked.py |
| 13 | + uv run examples/spanish/workflow_aggregator_ranked.py --devui (abre DevUI en http://localhost:8104) |
| 14 | +""" |
| 15 | + |
| 16 | +import asyncio |
| 17 | +import os |
| 18 | +import sys |
| 19 | + |
| 20 | +from typing import Never |
| 21 | + |
| 22 | +from agent_framework import Agent, AgentExecutorResponse, Executor, Message, WorkflowBuilder, WorkflowContext, handler |
| 23 | +from agent_framework.openai import OpenAIChatClient |
| 24 | +from azure.identity.aio import DefaultAzureCredential, get_bearer_token_provider |
| 25 | +from dotenv import load_dotenv |
| 26 | +from pydantic import BaseModel, Field |
| 27 | + |
| 28 | + |
| 29 | +load_dotenv(override=True) |
| 30 | +API_HOST = os.getenv("API_HOST", "github") |
| 31 | + |
| 32 | +# Configura el cliente de chat según el proveedor de API |
| 33 | +async_credential = None |
| 34 | +if API_HOST == "azure": |
| 35 | + async_credential = DefaultAzureCredential() |
| 36 | + token_provider = get_bearer_token_provider(async_credential, "https://cognitiveservices.azure.com/.default") |
| 37 | + client = OpenAIChatClient( |
| 38 | + base_url=f"{os.environ['AZURE_OPENAI_ENDPOINT']}/openai/v1/", |
| 39 | + api_key=token_provider, |
| 40 | + model_id=os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"], |
| 41 | + ) |
| 42 | +elif API_HOST == "github": |
| 43 | + client = OpenAIChatClient( |
| 44 | + base_url="https://models.github.ai/inference", |
| 45 | + api_key=os.environ["GITHUB_TOKEN"], |
| 46 | + model_id=os.getenv("GITHUB_MODEL", "openai/gpt-5-mini"), |
| 47 | + ) |
| 48 | +else: |
| 49 | + client = OpenAIChatClient( |
| 50 | + api_key=os.environ["OPENAI_API_KEY"], model_id=os.environ.get("OPENAI_MODEL", "gpt-5-mini") |
| 51 | + ) |
| 52 | + |
| 53 | + |
| 54 | +class RankedSlogan(BaseModel): |
| 55 | + """Una sola entrada de eslogan rankeada.""" |
| 56 | + |
| 57 | + rank: int = Field(description="Rank position, 1 = best.") |
| 58 | + agent_name: str = Field(description="Name of the agent that produced the slogan.") |
| 59 | + slogan: str = Field(description="The marketing slogan text.") |
| 60 | + score: int = Field(description="Score from 1 to 10.") |
| 61 | + justification: str = Field(description="One-sentence justification for the score.") |
| 62 | + |
| 63 | + |
| 64 | +class RankedSlogans(BaseModel): |
| 65 | + """Salida tipada: una lista de eslóganes rankeados.""" |
| 66 | + |
| 67 | + rankings: list[RankedSlogan] = Field(description="Slogans ranked from best to worst.") |
| 68 | + |
| 69 | + |
| 70 | +class DispatchPrompt(Executor): |
| 71 | + """Emite el brief del producto hacia abajo para el broadcast de fan-out.""" |
| 72 | + |
| 73 | + @handler |
| 74 | + async def dispatch(self, prompt: str, ctx: WorkflowContext[str]) -> None: |
| 75 | + await ctx.send_message(prompt) |
| 76 | + |
| 77 | + |
| 78 | +class RankerExecutor(Executor): |
| 79 | + """Agregador fan-in que formatea eslóganes candidatos y los rankea vía el cliente LLM.""" |
| 80 | + |
| 81 | + def __init__(self, *, client: OpenAIChatClient, id: str = "Ranker") -> None: |
| 82 | + super().__init__(id=id) |
| 83 | + self._client = client |
| 84 | + |
| 85 | + @handler |
| 86 | + async def run( |
| 87 | + self, |
| 88 | + results: list[AgentExecutorResponse], |
| 89 | + ctx: WorkflowContext[Never, RankedSlogans], |
| 90 | + ) -> None: |
| 91 | + """Recopila eslóganes, los formatea y le pide al LLM que los rankee.""" |
| 92 | + lines = [] |
| 93 | + for result in results: |
| 94 | + slogan = result.agent_response.text.strip().strip("\"'").split("\n")[0].strip().strip("\"'") |
| 95 | + lines.append(f"- [{result.executor_id}]: \"{slogan}\"") |
| 96 | + |
| 97 | + messages = [ |
| 98 | + Message( |
| 99 | + role="system", |
| 100 | + text=( |
| 101 | + "Eres un director creativo senior evaluando eslóganes de marketing. " |
| 102 | + "Dada una lista de eslóganes candidatos, ordénalos del mejor al peor. " |
| 103 | + "Para cada eslogan, da una puntuación de 1 a 10 y una justificación de una sola oración " |
| 104 | + "evaluando creatividad, memorabilidad, claridad y encaje con la marca." |
| 105 | + ), |
| 106 | + ), |
| 107 | + Message(role="user", text="Eslóganes candidatos:\n" + "\n".join(lines)), |
| 108 | + ] |
| 109 | + response = await self._client.get_response(messages, options={"response_format": RankedSlogans}) |
| 110 | + await ctx.yield_output(response.value) |
| 111 | + |
| 112 | + |
| 113 | +dispatcher = DispatchPrompt(id="dispatcher") |
| 114 | + |
| 115 | +bold_writer = Agent( |
| 116 | + client=client, |
| 117 | + name="BoldWriter", |
| 118 | + instructions=( |
| 119 | + "Eres un copywriter audaz y dramático. " |
| 120 | + "Dado el brief del producto, propone UN eslogan de marketing contundente (máx. 10 palabras). " |
| 121 | + "Hazlo llamativo y con mucha confianza. Responde SOLO con el eslogan." |
| 122 | + ), |
| 123 | +) |
| 124 | + |
| 125 | +minimalist_writer = Agent( |
| 126 | + client=client, |
| 127 | + name="MinimalistWriter", |
| 128 | + instructions=( |
| 129 | + "Eres un copywriter minimalista que valora la brevedad por encima de todo. " |
| 130 | + "Dado el brief del producto, propone UN eslogan de marketing ultra-corto (máx. 6 palabras). " |
| 131 | + "Menos es más. Responde SOLO con el eslogan." |
| 132 | + ), |
| 133 | +) |
| 134 | + |
| 135 | +emotional_writer = Agent( |
| 136 | + client=client, |
| 137 | + name="EmotionalWriter", |
| 138 | + instructions=( |
| 139 | + "Eres un copywriter con enfoque empático. " |
| 140 | + "Dado el brief del producto, propone UN eslogan de marketing (máx. 10 palabras) " |
| 141 | + "que conecte emocionalmente con la audiencia. Responde SOLO con el eslogan." |
| 142 | + ), |
| 143 | +) |
| 144 | + |
| 145 | +# El ejecutor ranker llama directamente al cliente LLM para manejar el fan-in — |
| 146 | +# formatea los eslóganes recopilados y hace que el LLM los rankee. |
| 147 | +ranker = RankerExecutor(client=client) |
| 148 | + |
| 149 | +workflow = ( |
| 150 | + WorkflowBuilder( |
| 151 | + name="FanOutFanInRanked", |
| 152 | + description="Generate slogans in parallel, then LLM-judge ranks them.", |
| 153 | + start_executor=dispatcher, |
| 154 | + output_executors=[ranker], |
| 155 | + ) |
| 156 | + .add_fan_out_edges(dispatcher, [bold_writer, minimalist_writer, emotional_writer]) |
| 157 | + .add_fan_in_edges([bold_writer, minimalist_writer, emotional_writer], ranker) |
| 158 | + .build() |
| 159 | +) |
| 160 | + |
| 161 | + |
| 162 | +async def main() -> None: |
| 163 | + """Ejecuta el pipeline de eslóganes e imprime los resultados rankeados.""" |
| 164 | + prompt = "Bicicleta eléctrica económica para commuters urbanos. Confiable, accesible y verde." |
| 165 | + print(f"Brief del producto: {prompt}\n") |
| 166 | + |
| 167 | + events = await workflow.run(prompt) |
| 168 | + for output in events.get_outputs(): |
| 169 | + for entry in output.rankings: |
| 170 | + print(f"#{entry.rank} (puntaje {entry.score}) [{entry.agent_name}]: \"{entry.slogan}\"") |
| 171 | + print(f" {entry.justification}\n") |
| 172 | + |
| 173 | + if async_credential: |
| 174 | + await async_credential.close() |
| 175 | + |
| 176 | + |
| 177 | +if __name__ == "__main__": |
| 178 | + if "--devui" in sys.argv: |
| 179 | + from agent_framework.devui import serve |
| 180 | + |
| 181 | + serve(entities=[workflow], port=8104, auto_open=True) |
| 182 | + else: |
| 183 | + asyncio.run(main()) |
0 commit comments