Skip to content

Commit e7b6714

Browse files
committed
Add new workflow examples
1 parent a13c53d commit e7b6714

19 files changed

Lines changed: 1491 additions & 50 deletions

examples/spanish/workflow_conditional_state.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
from agent_framework.openai import OpenAIChatClient
2323
from azure.identity.aio import DefaultAzureCredential, get_bearer_token_provider
2424
from dotenv import load_dotenv
25-
from pydantic import BaseModel, ValidationError
25+
from pydantic import BaseModel
2626
from typing_extensions import Never
2727

2828
load_dotenv(override=True)
@@ -62,10 +62,7 @@ def parse_review_decision(message: Any) -> ReviewDecision | None:
6262
if not isinstance(message, AgentExecutorResponse):
6363
return None
6464

65-
try:
66-
return ReviewDecision.model_validate_json(message.agent_response.text)
67-
except ValidationError:
68-
return None
65+
return message.agent_response.value
6966

7067

7168
# Funciones de condición — reciben el mensaje del ejecutor anterior.
@@ -103,7 +100,7 @@ def needs_revision(message: Any) -> bool:
103100
"Define decision=REVISION_NEEDED si necesita mejoras.\n"
104101
"En feedback, explica brevemente tu razonamiento y da cambios accionables cuando aplique."
105102
),
106-
response_format=ReviewDecision,
103+
default_options={"response_format": ReviewDecision},
107104
)
108105

109106
editor = Agent(

examples/spanish/workflow_conditional_state_isolated.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
from agent_framework.openai import OpenAIChatClient
2828
from azure.identity.aio import DefaultAzureCredential, get_bearer_token_provider
2929
from dotenv import load_dotenv
30-
from pydantic import BaseModel, ValidationError
30+
from pydantic import BaseModel
3131
from typing_extensions import Never
3232

3333
load_dotenv(override=True)
@@ -67,10 +67,7 @@ def parse_review_decision(message: Any) -> ReviewDecision | None:
6767
if not isinstance(message, AgentExecutorResponse):
6868
return None
6969

70-
try:
71-
return ReviewDecision.model_validate_json(message.agent_response.text)
72-
except ValidationError:
73-
return None
70+
return message.agent_response.value
7471

7572

7673
# Funciones de condición — reciben el mensaje del ejecutor anterior.
@@ -109,7 +106,7 @@ def create_workflow(model_client: OpenAIChatClient):
109106
"Define decision=REVISION_NEEDED si necesita mejoras.\n"
110107
"En feedback, explica brevemente tu razonamiento y da cambios accionables cuando aplique."
111108
),
112-
response_format=ReviewDecision,
109+
default_options={"response_format": ReviewDecision},
113110
)
114111

115112
editor = Agent(

examples/spanish/workflow_conditional_structured.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
from agent_framework.openai import OpenAIChatClient
2525
from azure.identity.aio import DefaultAzureCredential, get_bearer_token_provider
2626
from dotenv import load_dotenv
27-
from pydantic import BaseModel, ValidationError
27+
from pydantic import BaseModel
2828
from typing_extensions import Never
2929

3030
load_dotenv(override=True)
@@ -66,10 +66,7 @@ def parse_review_decision(message: Any) -> ReviewDecision | None:
6666
if not isinstance(message, AgentExecutorResponse):
6767
return None
6868

69-
try:
70-
return ReviewDecision.model_validate_json(message.agent_response.text)
71-
except ValidationError:
72-
return None
69+
return message.agent_response.value
7370

7471

7572
def is_approved(message: Any) -> bool:
@@ -101,7 +98,7 @@ def needs_revision(message: Any) -> bool:
10198
"Si el borrador está listo, define decision=APPROVED e incluye la publicación lista para publicar en post_text. "
10299
"Si necesita cambios, define decision=REVISION_NEEDED y entrega feedback accionable."
103100
),
104-
response_format=ReviewDecision,
101+
default_options={"response_format": ReviewDecision},
105102
)
106103

107104
editor = Agent(

examples/spanish/workflow_converge.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -60,10 +60,7 @@ def parse_review_result(message: Any) -> ReviewResult | None:
6060
if not isinstance(message, AgentExecutorResponse):
6161
return None
6262

63-
try:
64-
return ReviewResult.model_validate_json(message.agent_response.text)
65-
except Exception:
66-
return None
63+
return message.agent_response.value
6764

6865

6966
def esta_aprobado(message: Any) -> bool:
@@ -104,7 +101,7 @@ def necesita_edicion(message: Any) -> bool:
104101
"- feedback: retroalimentación concisa y accionable\n"
105102
"- clarity, completeness, accuracy, structure: puntajes individuales (0-100)"
106103
),
107-
response_format=ReviewResult,
104+
default_options={"response_format": ReviewResult},
108105
)
109106

110107

examples/spanish/workflow_switch_case.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ class ClassifyResult(BaseModel):
7878
"Question, Complaint o Feedback. "
7979
"Devuelve un objeto JSON con category, original_message y reasoning."
8080
),
81-
response_format=ClassifyResult,
81+
default_options={"response_format": ClassifyResult},
8282
)
8383

8484

@@ -87,7 +87,7 @@ class ClassifyResult(BaseModel):
8787
@executor(id="extract_category")
8888
async def extract_category(response: AgentExecutorResponse, ctx: WorkflowContext[ClassifyResult]) -> None:
8989
"""Analiza la salida JSON estructurada del clasificador y la envía aguas abajo."""
90-
result = ClassifyResult.model_validate_json(response.agent_response.text)
90+
result: ClassifyResult = response.agent_response.value
9191
print(f"→ Clasificado como: {result.category}{result.reasoning}")
9292
await ctx.send_message(result)
9393

Lines changed: 152 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,152 @@
1+
"""Fan-out/fan-in with LLM summarization aggregation.
2+
3+
Same 3 expert branches as workflow_fan_out_fan_in_edges.py, but instead
4+
of a hand-coded template, a summarizer Agent synthesizes all branch
5+
outputs into a concise executive brief.
6+
7+
Aggregation technique: LLM synthesis (Agent as post-processor).
8+
9+
Run:
10+
uv run examples/workflow_aggregator_llm_summary.py
11+
uv run examples/workflow_aggregator_llm_summary.py --devui (opens DevUI at http://localhost:8101)
12+
"""
13+
14+
import asyncio
15+
import os
16+
import sys
17+
18+
from agent_framework import Agent, AgentExecutorResponse, Executor, WorkflowBuilder, WorkflowContext, handler
19+
from agent_framework.openai import OpenAIChatClient
20+
from azure.identity.aio import DefaultAzureCredential, get_bearer_token_provider
21+
from dotenv import load_dotenv
22+
23+
load_dotenv(override=True)
24+
API_HOST = os.getenv("API_HOST", "github")
25+
26+
# Configure the chat client based on the API host
27+
async_credential = None
28+
if API_HOST == "azure":
29+
async_credential = DefaultAzureCredential()
30+
token_provider = get_bearer_token_provider(async_credential, "https://cognitiveservices.azure.com/.default")
31+
client = OpenAIChatClient(
32+
base_url=f"{os.environ['AZURE_OPENAI_ENDPOINT']}/openai/v1/",
33+
api_key=token_provider,
34+
model_id=os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"],
35+
)
36+
elif API_HOST == "github":
37+
client = OpenAIChatClient(
38+
base_url="https://models.github.ai/inference",
39+
api_key=os.environ["GITHUB_TOKEN"],
40+
model_id=os.getenv("GITHUB_MODEL", "openai/gpt-5-mini"),
41+
)
42+
else:
43+
client = OpenAIChatClient(
44+
api_key=os.environ["OPENAI_API_KEY"], model_id=os.environ.get("OPENAI_MODEL", "gpt-5-mini")
45+
)
46+
47+
48+
class DispatchPrompt(Executor):
49+
"""Emit the same prompt downstream so fan-out edges can broadcast it."""
50+
51+
@handler
52+
async def dispatch(self, prompt: str, ctx: WorkflowContext[str]) -> None:
53+
await ctx.send_message(prompt)
54+
55+
56+
class FormatBranchResults(Executor):
57+
"""Fan-in collector that formats branch outputs into a single prompt."""
58+
59+
@handler
60+
async def format(
61+
self,
62+
results: list[AgentExecutorResponse],
63+
ctx: WorkflowContext[str],
64+
) -> None:
65+
"""Combine expert outputs into labeled sections for the summarizer."""
66+
sections = []
67+
for result in results:
68+
sections.append(f"[{result.executor_id}]\n{result.agent_response.text}")
69+
await ctx.send_message("\n\n---\n\n".join(sections))
70+
71+
72+
dispatcher = DispatchPrompt(id="dispatcher")
73+
74+
researcher = Agent(
75+
client=client,
76+
name="Researcher",
77+
instructions=(
78+
"You are an expert market researcher. "
79+
"Given the prompt, provide concise factual insights, opportunities, and risks. "
80+
"Use short bullet points."
81+
),
82+
)
83+
84+
marketer = Agent(
85+
client=client,
86+
name="Marketer",
87+
instructions=(
88+
"You are a marketing strategist. "
89+
"Given the prompt, propose clear value proposition and audience messaging. "
90+
"Use short bullet points."
91+
),
92+
)
93+
94+
legal = Agent(
95+
client=client,
96+
name="Legal",
97+
instructions=(
98+
"You are a legal and compliance reviewer. "
99+
"Given the prompt, list constraints, disclaimers, and policy concerns. "
100+
"Use short bullet points."
101+
),
102+
)
103+
104+
formatter = FormatBranchResults(id="formatter")
105+
106+
# The summarizer Agent is the final node — it receives the formatted expert
107+
# outputs and synthesizes them into a concise executive brief.
108+
summarizer = Agent(
109+
client=client,
110+
name="Summarizer",
111+
instructions=(
112+
"You receive analysis from three domain experts (researcher, marketer, legal). "
113+
"Synthesize their combined insights into a concise 3-sentence executive brief "
114+
"that a CEO could read in 30 seconds. Do not repeat the raw analysis."
115+
),
116+
)
117+
118+
workflow = (
119+
WorkflowBuilder(
120+
name="FanOutFanInLLMSummary",
121+
description="Fan-out/fan-in with LLM summarization aggregation.",
122+
start_executor=dispatcher,
123+
output_executors=[summarizer],
124+
)
125+
.add_fan_out_edges(dispatcher, [researcher, marketer, legal])
126+
.add_fan_in_edges([researcher, marketer, legal], formatter)
127+
.add_edge(formatter, summarizer)
128+
.build()
129+
)
130+
131+
132+
async def main() -> None:
133+
"""Run the sample and print the LLM-synthesized brief."""
134+
prompt = "We are launching a budget-friendly electric bike for urban commuters."
135+
print(f"Prompt: {prompt}\n")
136+
137+
events = await workflow.run(prompt)
138+
for output in events.get_outputs():
139+
print("=== Executive Brief (LLM-synthesized) ===")
140+
print(output)
141+
142+
if async_credential:
143+
await async_credential.close()
144+
145+
146+
if __name__ == "__main__":
147+
if "--devui" in sys.argv:
148+
from agent_framework.devui import serve
149+
150+
serve(entities=[workflow], port=8101, auto_open=True)
151+
else:
152+
asyncio.run(main())

0 commit comments

Comments
 (0)