184 lines
5.9 KiB
Python
184 lines
5.9 KiB
Python
"""
|
|
AgentLens Multi-Agent Example — Nested traces for orchestrated agent workflows.
|
|
|
|
Demonstrates:
|
|
- A "planner" agent that delegates to sub-agents
|
|
- Nested trace contexts that create parent-child span relationships automatically
|
|
- Multiple decision types: ROUTING, PLANNING, TOOL_SELECTION
|
|
- How the dashboard shows the full agent call tree
|
|
|
|
Usage:
|
|
pip install vectry-agentlens
|
|
python multi_agent.py
|
|
"""
|
|
|
|
import agentlens
|
|
import time
|
|
|
|
# Initialize
|
|
agentlens.init(
|
|
api_key="your-api-key-here",
|
|
endpoint="http://localhost:4200",
|
|
)
|
|
|
|
|
|
def simulate_llm_call(prompt: str, delay: float = 0.2) -> str:
|
|
"""Fake LLM call — replace with real model calls in production."""
|
|
time.sleep(delay)
|
|
return f"[LLM response to: {prompt[:50]}...]"
|
|
|
|
|
|
# Top-level planner agent trace
|
|
with agentlens.trace("planner-agent", tags=["multi-agent", "blog-pipeline"]):
|
|
# Planner decides the workflow
|
|
agentlens.log_decision(
|
|
type="PLANNING",
|
|
chosen={
|
|
"name": "research_then_write",
|
|
"confidence": 0.93,
|
|
"params": {
|
|
"steps": ["research", "outline", "draft", "review"],
|
|
"topic": "AI agents in production",
|
|
},
|
|
},
|
|
alternatives=[
|
|
{
|
|
"name": "write_directly",
|
|
"confidence": 0.4,
|
|
"reason_rejected": "Topic requires research for factual accuracy",
|
|
},
|
|
],
|
|
reasoning="Complex topic — research phase needed before writing.",
|
|
)
|
|
|
|
# Planner routes to researcher agent
|
|
agentlens.log_decision(
|
|
type="ROUTING",
|
|
chosen={
|
|
"name": "researcher-agent",
|
|
"confidence": 0.95,
|
|
"params": {"query": "AI agents in production best practices 2025"},
|
|
},
|
|
alternatives=[
|
|
{
|
|
"name": "writer-agent",
|
|
"confidence": 0.3,
|
|
"reason_rejected": "Need facts before drafting",
|
|
},
|
|
],
|
|
reasoning="Researcher goes first to gather source material.",
|
|
)
|
|
|
|
# --- Nested: Researcher Agent ---
|
|
with agentlens.trace("researcher-agent", tags=["research"]):
|
|
agentlens.log_decision(
|
|
type="TOOL_SELECTION",
|
|
chosen={
|
|
"name": "web_search",
|
|
"confidence": 0.88,
|
|
"params": {
|
|
"query": "AI agents production deployment 2025",
|
|
"limit": 10,
|
|
},
|
|
},
|
|
alternatives=[
|
|
{
|
|
"name": "arxiv_search",
|
|
"confidence": 0.72,
|
|
"reason_rejected": "Need industry examples, not just papers",
|
|
},
|
|
],
|
|
reasoning="Web search covers blog posts, case studies, and papers.",
|
|
)
|
|
|
|
research_results = simulate_llm_call(
|
|
"Summarize findings about AI agents in production"
|
|
)
|
|
|
|
agentlens.log_decision(
|
|
type="MEMORY_RETRIEVAL",
|
|
chosen={
|
|
"name": "store_research_context",
|
|
"confidence": 0.9,
|
|
"params": {"key": "research_findings", "chunks": 5},
|
|
},
|
|
alternatives=[],
|
|
reasoning="Store condensed findings for the writer agent to consume.",
|
|
)
|
|
|
|
# Planner routes to writer agent
|
|
agentlens.log_decision(
|
|
type="ROUTING",
|
|
chosen={
|
|
"name": "writer-agent",
|
|
"confidence": 0.97,
|
|
"params": {"style": "technical-blog", "word_count": 1500},
|
|
},
|
|
alternatives=[
|
|
{
|
|
"name": "researcher-agent",
|
|
"confidence": 0.15,
|
|
"reason_rejected": "Research phase complete, enough material gathered",
|
|
},
|
|
],
|
|
reasoning="Research complete — hand off to writer with gathered material.",
|
|
)
|
|
|
|
# --- Nested: Writer Agent ---
|
|
with agentlens.trace("writer-agent", tags=["writing"]):
|
|
agentlens.log_decision(
|
|
type="PLANNING",
|
|
chosen={
|
|
"name": "structured_outline_first",
|
|
"confidence": 0.91,
|
|
"params": {
|
|
"sections": ["intro", "challenges", "solutions", "conclusion"]
|
|
},
|
|
},
|
|
alternatives=[
|
|
{
|
|
"name": "stream_of_consciousness",
|
|
"confidence": 0.3,
|
|
"reason_rejected": "Technical blog needs clear structure",
|
|
},
|
|
],
|
|
reasoning="Outline-first approach produces better organized blog posts.",
|
|
)
|
|
|
|
outline = simulate_llm_call("Create blog outline for AI agents in production")
|
|
draft = simulate_llm_call("Write full blog draft from outline", delay=0.5)
|
|
|
|
# --- Nested deeper: Editor sub-agent within writer ---
|
|
with agentlens.trace("editor-agent", tags=["editing"]):
|
|
agentlens.log_decision(
|
|
type="TOOL_SELECTION",
|
|
chosen={
|
|
"name": "grammar_check",
|
|
"confidence": 0.85,
|
|
"params": {"text_length": 1500, "style_guide": "technical"},
|
|
},
|
|
alternatives=[
|
|
{
|
|
"name": "skip_editing",
|
|
"confidence": 0.1,
|
|
"reason_rejected": "Always edit before publishing",
|
|
},
|
|
],
|
|
reasoning="Run grammar and style check on the draft.",
|
|
)
|
|
|
|
edited = simulate_llm_call("Edit and polish the blog draft", delay=0.3)
|
|
|
|
print("Blog pipeline complete!")
|
|
print(f"Research: {research_results}")
|
|
print(f"Final draft: {edited}")
|
|
|
|
# Shutdown
|
|
agentlens.shutdown()
|
|
|
|
print("\nDone! Check AgentLens dashboard — you'll see nested spans:")
|
|
print(" planner-agent")
|
|
print(" -> researcher-agent")
|
|
print(" -> writer-agent")
|
|
print(" -> editor-agent")
|