Skip to content

Commit 2aa9e39

Browse files
authored
Merge pull request #30 from pamelafox/restore-magenticone
Restore workflow_magenticone.py and README mentions
2 parents 1b668ab + 228d202 commit 2aa9e39

3 files changed

Lines changed: 203 additions & 0 deletions

File tree

README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -194,6 +194,7 @@ You can run the examples in this repository by executing the scripts in the `exa
194194
| [agent_with_subagent.py](examples/agent_with_subagent.py) | Context isolation with sub-agents to keep prompts focused on relevant tools. |
195195
| [agent_without_subagent.py](examples/agent_without_subagent.py) | Context bloat example where one agent carries all tool schemas in a single prompt. |
196196
| [agent_summarization.py](examples/agent_summarization.py) | Context compaction via summarization middleware to reduce token usage in long conversations. |
197+
| [workflow_magenticone.py](examples/workflow_magenticone.py) | A MagenticOne multi-agent workflow. |
197198
| [agent_tool_approval.py](examples/agent_tool_approval.py) | Standalone agent with tool approval — gates sensitive operations before execution. |
198199
| [agent_middleware.py](examples/agent_middleware.py) | Agent, chat, and function middleware for logging, timing, and blocking. |
199200
| [agent_knowledge_aisearch.py](examples/agent_knowledge_aisearch.py) | Knowledge retrieval (RAG) using Azure AI Search with AgentFrameworkAzureAISearchRAG. |

examples/spanish/README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -189,6 +189,7 @@ Puedes ejecutar los ejemplos en este repositorio ejecutando los scripts en el di
189189
| [agent_with_subagent.py](agent_with_subagent.py) | Aislamiento de contexto con subagentes para mantener los prompts enfocados en herramientas relevantes. |
190190
| [agent_without_subagent.py](agent_without_subagent.py) | Ejemplo de inflado de contexto cuando un solo agente carga todos los esquemas de herramientas en un mismo prompt. |
191191
| [agent_summarization.py](agent_summarization.py) | Compactación de contexto mediante middleware de resumen para reducir el uso de tokens en conversaciones largas. |
192+
| [workflow_magenticone.py](workflow_magenticone.py) | Un workflow multi-agente MagenticOne. |
192193
| [agent_tool_approval.py](agent_tool_approval.py) | Agente independiente con aprobación de herramientas — controla operaciones sensibles antes de ejecutarlas. |
193194
| [agent_middleware.py](agent_middleware.py) | Middleware de agente, chat y funciones para logging, timing y bloqueo. |
194195
| [agent_knowledge_aisearch.py](agent_knowledge_aisearch.py) | Recuperación de conocimiento (RAG) usando Azure AI Search con AgentFrameworkAzureAISearchRAG. |

examples/workflow_magenticone.py

Lines changed: 201 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,201 @@
1+
"""MagenticOne orchestration example with OpenAIChatClient setup used in this repo.
2+
3+
This sample demonstrates a Magentic manager coordinating three specialists to
4+
produce a travel plan, with streaming output and orchestration ledger events.
5+
6+
Run:
7+
uv run examples/workflow_magenticone.py
8+
uv run examples/workflow_magenticone.py --devui
9+
"""
10+
11+
import asyncio
12+
import json
13+
import os
14+
import sys
15+
from typing import cast
16+
17+
from agent_framework import Agent, AgentResponseUpdate, Message, WorkflowEvent
18+
from agent_framework.openai import OpenAIChatClient
19+
from agent_framework.orchestrations import MagenticBuilder, MagenticProgressLedger
20+
from azure.identity.aio import DefaultAzureCredential, get_bearer_token_provider
21+
from dotenv import load_dotenv
22+
from rich.console import Console
23+
from rich.markdown import Markdown
24+
from rich.panel import Panel
25+
26+
# Configure OpenAI client based on environment
27+
load_dotenv(override=True)
28+
API_HOST = os.getenv("API_HOST", "github")
29+
30+
async_credential = None
31+
if API_HOST == "azure":
32+
async_credential = DefaultAzureCredential()
33+
token_provider = get_bearer_token_provider(async_credential, "https://cognitiveservices.azure.com/.default")
34+
client = OpenAIChatClient(
35+
base_url=f"{os.environ['AZURE_OPENAI_ENDPOINT']}/openai/v1/",
36+
api_key=token_provider,
37+
model_id=os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"],
38+
)
39+
elif API_HOST == "github":
40+
client = OpenAIChatClient(
41+
base_url="https://models.github.ai/inference",
42+
api_key=os.environ["GITHUB_TOKEN"],
43+
model_id=os.getenv("GITHUB_MODEL", "openai/gpt-4.1-mini"),
44+
)
45+
else:
46+
client = OpenAIChatClient(
47+
api_key=os.environ["OPENAI_API_KEY"], model_id=os.environ.get("OPENAI_MODEL", "gpt-4.1-mini")
48+
)
49+
50+
console = Console()
51+
52+
53+
local_agent = Agent(
54+
client=client,
55+
instructions=(
56+
"You suggest authentic and interesting local activities or places to visit, "
57+
"using any context provided by the user or other agents."
58+
),
59+
name="local_agent",
60+
description="Specialist in local activities and places.",
61+
)
62+
63+
language_agent = Agent(
64+
client=client,
65+
instructions=(
66+
"You review travel plans and provide practical tips for language and communication "
67+
"challenges at the destination. If coverage is already good, acknowledge that with rationale."
68+
),
69+
name="language_agent",
70+
description="Specialist in language and communication advice.",
71+
)
72+
73+
travel_summary_agent = Agent(
74+
client=client,
75+
instructions=(
76+
"You synthesize suggestions and advice from other agents into a complete travel plan. "
77+
"Make reasonable assumptions when details are missing. "
78+
"Do not ask the user follow-up questions. "
79+
"Do not ask for confirmations or permissions. "
80+
"YOUR FINAL RESPONSE MUST BE THE COMPLETE PLAN."
81+
),
82+
name="travel_summary_agent",
83+
description="Specialist in travel-plan synthesis.",
84+
)
85+
86+
manager_agent = Agent(
87+
client=client,
88+
name="manager_agent",
89+
description="Magentic manager that coordinates specialists.",
90+
instructions=(
91+
"You coordinate specialists to solve complex tasks efficiently. "
92+
"The user is not available for follow-up questions. "
93+
"If information is missing, choose sensible assumptions and continue. "
94+
"Ensure the workflow ends with a complete final plan."
95+
),
96+
)
97+
98+
magentic_workflow = MagenticBuilder(
99+
participants=[local_agent, language_agent, travel_summary_agent],
100+
manager_agent=manager_agent,
101+
max_round_count=10,
102+
max_stall_count=1,
103+
max_reset_count=1,
104+
).build()
105+
106+
107+
def handle_stream_event(event: WorkflowEvent, last_message_id: str | None) -> str | None:
108+
"""Render a workflow stream event and return the updated message id."""
109+
if event.type == "output" and isinstance(event.data, AgentResponseUpdate):
110+
message_id = event.data.message_id
111+
if message_id != last_message_id:
112+
if last_message_id is not None:
113+
console.print()
114+
console.print(f"🤖 {event.executor_id}:", end=" ")
115+
last_message_id = message_id
116+
console.print(event.data, end="")
117+
return last_message_id
118+
119+
if event.type == "magentic_orchestrator":
120+
console.print()
121+
emoji = "✅" if event.data.event_type.name == "PROGRESS_LEDGER_UPDATED" else "🧭"
122+
123+
if isinstance(event.data.content, MagenticProgressLedger):
124+
rendered_content = json.dumps(event.data.content.to_dict(), indent=2)
125+
console.print(
126+
Panel(
127+
rendered_content,
128+
title=f"{emoji} Orchestrator: {event.data.event_type.name}",
129+
border_style="bold yellow",
130+
padding=(1, 2),
131+
)
132+
)
133+
elif hasattr(event.data.content, "text"):
134+
console.print(
135+
Panel(
136+
Markdown(event.data.content.text),
137+
title=f"{emoji} Orchestrator: {event.data.event_type.name}",
138+
border_style="bold green",
139+
padding=(1, 2),
140+
)
141+
)
142+
else:
143+
console.print(
144+
Panel(
145+
Markdown(str(event.data.content)),
146+
title=f"{emoji} Orchestrator: {event.data.event_type.name}",
147+
border_style="bold green",
148+
padding=(1, 2),
149+
)
150+
)
151+
152+
return last_message_id
153+
154+
155+
def print_final_result(output_event: WorkflowEvent | None) -> None:
156+
"""Print the final plan from the workflow output event."""
157+
if output_event is None:
158+
return
159+
160+
output_messages = cast(list[Message], output_event.data)
161+
console.print(
162+
Panel(
163+
Markdown(output_messages[-1].text),
164+
title="🌍 Final Travel Plan",
165+
border_style="bold green",
166+
padding=(1, 2),
167+
)
168+
)
169+
170+
171+
async def main() -> None:
172+
"""Run the Magentic workflow with streaming output."""
173+
task = (
174+
"Plan a half-day trip in Costa Rica for a family with two children ages 6 and 9, "
175+
"staying in San José, with a mid-range budget. "
176+
"Provide a complete itinerary with timing, transport assumptions, estimated costs, "
177+
"food recommendation, and practical language tips."
178+
)
179+
console.print(f"[bold]Task:[/bold] {task}\n")
180+
181+
last_message_id: str | None = None
182+
output_event: WorkflowEvent | None = None
183+
184+
async for event in magentic_workflow.run(task, stream=True):
185+
last_message_id = handle_stream_event(event, last_message_id)
186+
if event.type == "output" and not isinstance(event.data, AgentResponseUpdate):
187+
output_event = event
188+
189+
print_final_result(output_event)
190+
191+
if async_credential:
192+
await async_credential.close()
193+
194+
195+
if __name__ == "__main__":
196+
if "--devui" in sys.argv:
197+
from agent_framework.devui import serve
198+
199+
serve(entities=[magentic_workflow], auto_open=True)
200+
else:
201+
asyncio.run(main())

0 commit comments

Comments
 (0)