|
| 1 | +""" |
| 2 | +OpenTelemetry + Aspire Dashboard example. |
| 3 | +
|
| 4 | +Demonstrates a tool-calling agent that exports OpenTelemetry traces, metrics, |
| 5 | +and structured logs to the .NET Aspire Dashboard via OTLP/gRPC. |
| 6 | +
|
| 7 | +Telemetry is only exported when the OTEL_EXPORTER_OTLP_ENDPOINT environment |
| 8 | +variable is set. Without it, the agent runs normally with no telemetry export. |
| 9 | +
|
| 10 | +To start the Aspire Dashboard: |
| 11 | +
|
| 12 | + docker run --rm -it -d \ |
| 13 | + -p 18888:18888 \ |
| 14 | + -p 4317:18889 \ |
| 15 | + --name aspire-dashboard \ |
| 16 | + mcr.microsoft.com/dotnet/aspire-dashboard:latest |
| 17 | +
|
| 18 | +The dashboard UI is at http://localhost:18888. |
| 19 | +Get the login token from the container logs: |
| 20 | +
|
| 21 | + docker logs aspire-dashboard |
| 22 | +
|
| 23 | +Look for: "Login to the dashboard at http://localhost:18888/login?t=<TOKEN>" |
| 24 | +
|
| 25 | +Then run this example with telemetry export enabled: |
| 26 | +
|
| 27 | + OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4317 python examples/agent_otel_aspire.py |
| 28 | +
|
| 29 | +In the Aspire Dashboard you will see: |
| 30 | + - Traces: agent -> chat completion -> tool execution spans |
| 31 | + - Metrics: token usage and operation duration histograms |
| 32 | + - Structured Logs: conversation messages (system, user, assistant, tool) |
| 33 | + - GenAI telemetry visualizer: full conversation view on chat spans |
| 34 | +
|
| 35 | +To stop the dashboard: |
| 36 | +
|
| 37 | + docker stop aspire-dashboard |
| 38 | +
|
| 39 | +For the full Python + Aspire guide, see: |
| 40 | + https://aspire.dev/dashboard/standalone-for-python/ |
| 41 | +""" |
| 42 | + |
| 43 | +import asyncio |
| 44 | +import logging |
| 45 | +import os |
| 46 | +import random |
| 47 | +from datetime import datetime, timezone |
| 48 | +from typing import Annotated |
| 49 | + |
| 50 | +from agent_framework import ChatAgent |
| 51 | +from agent_framework.observability import configure_otel_providers |
| 52 | +from agent_framework.openai import OpenAIChatClient |
| 53 | +from azure.identity.aio import DefaultAzureCredential, get_bearer_token_provider |
| 54 | +from dotenv import load_dotenv |
| 55 | +from pydantic import Field |
| 56 | +from rich import print |
| 57 | +from rich.logging import RichHandler |
| 58 | + |
| 59 | +# Setup logging |
| 60 | +handler = RichHandler(show_path=False, rich_tracebacks=True, show_level=False) |
| 61 | +logging.basicConfig(level=logging.WARNING, handlers=[handler], force=True, format="%(message)s") |
| 62 | +logger = logging.getLogger(__name__) |
| 63 | +logger.setLevel(logging.INFO) |
| 64 | + |
| 65 | +# Configure OpenTelemetry export to the Aspire Dashboard (if endpoint is set) |
| 66 | +otlp_endpoint = os.getenv("OTEL_EXPORTER_OTLP_ENDPOINT") |
| 67 | +if otlp_endpoint: |
| 68 | + os.environ.setdefault("OTEL_EXPORTER_OTLP_PROTOCOL", "grpc") |
| 69 | + os.environ.setdefault("OTEL_SERVICE_NAME", "agent-framework-demo") |
| 70 | + configure_otel_providers(enable_sensitive_data=True) |
| 71 | + logger.info(f"OpenTelemetry export enabled — sending to {otlp_endpoint}") |
| 72 | +else: |
| 73 | + logger.info("Set OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4317 to export telemetry to the Aspire Dashboard") |
| 74 | + |
| 75 | +# Configure OpenAI client based on environment |
| 76 | +load_dotenv(override=True) |
| 77 | +API_HOST = os.getenv("API_HOST", "github") |
| 78 | + |
| 79 | +async_credential = None |
| 80 | +if API_HOST == "azure": |
| 81 | + async_credential = DefaultAzureCredential() |
| 82 | + token_provider = get_bearer_token_provider(async_credential, "https://cognitiveservices.azure.com/.default") |
| 83 | + client = OpenAIChatClient( |
| 84 | + base_url=f"{os.environ['AZURE_OPENAI_ENDPOINT']}/openai/v1/", |
| 85 | + api_key=token_provider, |
| 86 | + model_id=os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"], |
| 87 | + ) |
| 88 | +elif API_HOST == "github": |
| 89 | + client = OpenAIChatClient( |
| 90 | + base_url="https://models.github.ai/inference", |
| 91 | + api_key=os.environ["GITHUB_TOKEN"], |
| 92 | + model_id=os.getenv("GITHUB_MODEL", "openai/gpt-5-mini"), |
| 93 | + ) |
| 94 | +else: |
| 95 | + client = OpenAIChatClient( |
| 96 | + api_key=os.environ["OPENAI_API_KEY"], model_id=os.environ.get("OPENAI_MODEL", "gpt-5-mini") |
| 97 | + ) |
| 98 | + |
| 99 | + |
| 100 | +def get_weather( |
| 101 | + city: Annotated[str, Field(description="City name, spelled out fully")], |
| 102 | +) -> dict: |
| 103 | + """Returns weather data for a given city, a dictionary with temperature and description.""" |
| 104 | + logger.info(f"Getting weather for {city}") |
| 105 | + weather_options = [ |
| 106 | + {"temperature": 72, "description": "Sunny"}, |
| 107 | + {"temperature": 60, "description": "Rainy"}, |
| 108 | + {"temperature": 55, "description": "Cloudy"}, |
| 109 | + {"temperature": 45, "description": "Windy"}, |
| 110 | + ] |
| 111 | + return random.choice(weather_options) |
| 112 | + |
| 113 | + |
| 114 | +def get_current_time( |
| 115 | + timezone_name: Annotated[str, Field(description="Timezone name, e.g. 'US/Eastern', 'Asia/Tokyo', 'UTC'")], |
| 116 | +) -> str: |
| 117 | + """Returns the current date and time in UTC (timezone_name is for display context only).""" |
| 118 | + logger.info(f"Getting current time for {timezone_name}") |
| 119 | + now = datetime.now(timezone.utc) |
| 120 | + return f"The current time in {timezone_name} is approximately {now.strftime('%Y-%m-%d %H:%M:%S')} UTC" |
| 121 | + |
| 122 | + |
| 123 | +agent = ChatAgent( |
| 124 | + name="weather-time-agent", |
| 125 | + chat_client=client, |
| 126 | + instructions="You are a helpful assistant that can look up weather and time information.", |
| 127 | + tools=[get_weather, get_current_time], |
| 128 | +) |
| 129 | + |
| 130 | + |
| 131 | +async def main(): |
| 132 | + response = await agent.run("What's the weather in Seattle and what time is it in Tokyo?") |
| 133 | + print(response.text) |
| 134 | + |
| 135 | + if async_credential: |
| 136 | + await async_credential.close() |
| 137 | + |
| 138 | + |
| 139 | +if __name__ == "__main__": |
| 140 | + asyncio.run(main()) |
0 commit comments