diff --git a/apps/web/src/app/docs/api-reference/page.tsx b/apps/web/src/app/docs/api-reference/page.tsx
new file mode 100644
index 0000000..bd66981
--- /dev/null
+++ b/apps/web/src/app/docs/api-reference/page.tsx
@@ -0,0 +1,629 @@
+import type { Metadata } from "next";
+
+export const metadata: Metadata = {
+ title: "REST API Reference",
+ description:
+ "Complete API contract for AgentLens trace ingestion and retrieval endpoints.",
+};
+
+function CodeBlock({ children, title }: { children: string; title?: string }) {
+ return (
+
+
setMobileOpen(false)}
+ onKeyDown={(e) => {
+ if (e.key === "Escape") setMobileOpen(false);
+ }}
+ role="button"
+ tabIndex={0}
+ aria-label="Close navigation"
+ />
+
+
+
+
+ )}
+
+
+ >
+ );
+}
diff --git a/apps/web/src/app/docs/getting-started/page.tsx b/apps/web/src/app/docs/getting-started/page.tsx
new file mode 100644
index 0000000..4e1b8ca
--- /dev/null
+++ b/apps/web/src/app/docs/getting-started/page.tsx
@@ -0,0 +1,226 @@
+import type { Metadata } from "next";
+
+export const metadata: Metadata = {
+ title: "Getting Started",
+ description:
+ "Install AgentLens, initialize the SDK, and send your first trace in under five minutes.",
+};
+
+function CodeBlock({ children, title }: { children: string; title?: string }) {
+ return (
+
+ {title && (
+
+ {title}
+
+ )}
+
+ {children}
+
+
+ );
+}
+
+export default function GettingStartedPage() {
+ return (
+
+
+ Getting Started
+
+
+ Go from zero to full agent observability in under five minutes. This
+ guide walks you through installing the SDK, initializing it, and sending
+ your first trace.
+
+
+
+
+
+
+ Step 1: Install the SDK
+
+
+ Python
+ pip install vectry-agentlens
+
+
+ TypeScript / Node.js
+
+ npm install agentlens-sdk
+
+
+
+
+ Step 2: Initialize AgentLens
+
+
+ Python
+ {`import agentlens
+
+agentlens.init(
+ api_key="your-api-key",
+ endpoint="https://agentlens.vectry.tech"
+)`}
+
+
+ TypeScript
+
+ {`import { init } from "agentlens-sdk";
+
+init({
+ apiKey: "your-api-key",
+ endpoint: "https://agentlens.vectry.tech",
+});`}
+
+
+
+
+ Step 3: Trace your first agent
+
+
+ Python
+ {`import agentlens
+from agentlens import trace
+
+agentlens.init(
+ api_key="your-api-key",
+ endpoint="https://agentlens.vectry.tech"
+)
+
+@trace(name="my-first-agent")
+def my_agent(prompt: str) -> str:
+ # Your agent logic here
+ response = call_llm(prompt)
+ return response
+
+# Run it — the trace is sent automatically
+result = my_agent("What is the capital of France?")`}
+
+
+ TypeScript
+
+ {`import { init, TraceBuilder } from "agentlens-sdk";
+
+init({
+ apiKey: "your-api-key",
+ endpoint: "https://agentlens.vectry.tech",
+});
+
+const trace = new TraceBuilder("my-first-agent");
+
+trace.addSpan({
+ name: "llm-call",
+ type: "LLM_CALL",
+ input: { prompt: "What is the capital of France?" },
+ output: { response: "Paris" },
+ status: "COMPLETED",
+});
+
+await trace.end();`}
+
+
+
+
+ Step 4: View in the dashboard
+
+
+ Open your AgentLens dashboard to see the trace you just sent. You will
+ see the trace name, its status, timing information, and any spans or
+ decision points you recorded.
+
+
+ Open Dashboard
+
+
+
+
+
+
+
+
+ );
+}
diff --git a/apps/web/src/app/docs/integrations/anthropic/page.tsx b/apps/web/src/app/docs/integrations/anthropic/page.tsx
new file mode 100644
index 0000000..260588e
--- /dev/null
+++ b/apps/web/src/app/docs/integrations/anthropic/page.tsx
@@ -0,0 +1,191 @@
+import type { Metadata } from "next";
+
+export const metadata: Metadata = {
+ title: "Anthropic Integration",
+ description:
+ "Wrap the Anthropic client to automatically trace Claude API calls with full metadata capture.",
+};
+
+function CodeBlock({ children, title }: { children: string; title?: string }) {
+ return (
+
+ {title && (
+
+ {title}
+
+ )}
+
+ {children}
+
+
+ );
+}
+
+export default function AnthropicIntegrationPage() {
+ return (
+
+
+ Anthropic Integration
+
+
+ Wrap the Anthropic Python client to automatically trace all Claude API
+ calls. AgentLens captures model, token usage, cost, latency, and the
+ full message exchange.
+
+
+
+ Installation
+ {`pip install vectry-agentlens anthropic`}
+
+
+
+ Quick setup
+ {`import agentlens
+from agentlens.integrations.anthropic import wrap_anthropic
+import anthropic
+
+agentlens.init(
+ api_key="your-api-key",
+ endpoint="https://agentlens.vectry.tech",
+)
+
+client = wrap_anthropic(anthropic.Anthropic())
+
+response = client.messages.create(
+ model="claude-sonnet-4-20250514",
+ max_tokens=1024,
+ messages=[
+ {"role": "user", "content": "Explain the halting problem."},
+ ],
+)`}
+
+
+
+ What gets captured
+
+
+
+
+ Field
+ Description
+
+
+
+
+ input.model
+ Model name (claude-sonnet-4-20250514, claude-haiku, etc.)
+
+
+ input.messages
+ Full message array sent to the API
+
+
+ input.system
+ System prompt if provided
+
+
+ output.content
+ Response content blocks
+
+
+ tokenCount
+ Input tokens + output tokens
+
+
+ costUsd
+ Estimated cost based on model pricing
+
+
+ durationMs
+ Wall-clock request time
+
+
+ metadata.stop_reason
+ How generation ended (end_turn, max_tokens, tool_use)
+
+
+
+
+
+
+
+ Async client
+ {`from agentlens.integrations.anthropic import wrap_anthropic
+import anthropic
+
+async_client = wrap_anthropic(anthropic.AsyncAnthropic())
+
+response = await async_client.messages.create(
+ model="claude-sonnet-4-20250514",
+ max_tokens=1024,
+ messages=[{"role": "user", "content": "Hello!"}],
+)`}
+
+
+
+
+ Combining with @trace
+
+ {`import agentlens
+from agentlens import trace
+from agentlens.integrations.anthropic import wrap_anthropic
+import anthropic
+
+agentlens.init(api_key="...", endpoint="...")
+client = wrap_anthropic(anthropic.Anthropic())
+
+@trace(name="analysis-agent")
+async def analyze(document: str) -> str:
+ response = client.messages.create(
+ model="claude-sonnet-4-20250514",
+ max_tokens=2048,
+ system="You are a document analysis expert.",
+ messages=[{"role": "user", "content": f"Analyze: {document}"}],
+ )
+ return response.content[0].text`}
+
+
+
+ Tool use
+
+ When Claude invokes tools, AgentLens captures each tool use as a
+ TOOL_SELECTION decision point automatically:
+
+ {`@trace(name="claude-tool-agent")
+async def tool_agent(prompt: str):
+ response = client.messages.create(
+ model="claude-sonnet-4-20250514",
+ max_tokens=1024,
+ tools=[{
+ "name": "get_stock_price",
+ "description": "Get the current stock price for a ticker symbol",
+ "input_schema": {
+ "type": "object",
+ "properties": {
+ "ticker": {
+ "type": "string",
+ "description": "Stock ticker symbol"
+ }
+ },
+ "required": ["ticker"]
+ }
+ }],
+ messages=[{"role": "user", "content": prompt}],
+ )
+ return response`}
+
+
+
+ Supported API methods
+
+
+ messages.create() — Message creation (including streaming)
+
+
+ messages.count_tokens() — Token counting
+
+
+
+
+ );
+}
diff --git a/apps/web/src/app/docs/integrations/langchain/page.tsx b/apps/web/src/app/docs/integrations/langchain/page.tsx
new file mode 100644
index 0000000..9e76b4d
--- /dev/null
+++ b/apps/web/src/app/docs/integrations/langchain/page.tsx
@@ -0,0 +1,213 @@
+import type { Metadata } from "next";
+
+export const metadata: Metadata = {
+ title: "LangChain Integration",
+ description:
+ "Use the AgentLensCallbackHandler to trace LangChain chains, agents, and tool invocations.",
+};
+
+function CodeBlock({ children, title }: { children: string; title?: string }) {
+ return (
+
+ {title && (
+
+ {title}
+
+ )}
+
+ {children}
+
+
+ );
+}
+
+export default function LangChainIntegrationPage() {
+ return (
+
+
+ LangChain Integration
+
+
+ The AgentLensCallbackHandler plugs into LangChain's callback system
+ to automatically trace chains, agents, LLM calls, and tool invocations
+ without changing your existing code.
+
+
+
+ Installation
+ {`pip install vectry-agentlens langchain langchain-openai`}
+
+
+
+ Quick setup
+ {`import agentlens
+from agentlens.integrations.langchain import AgentLensCallbackHandler
+
+agentlens.init(
+ api_key="your-api-key",
+ endpoint="https://agentlens.vectry.tech",
+)
+
+handler = AgentLensCallbackHandler()`}
+
+
+
+ Using with chains
+
+ Pass the handler in the callbacks config:
+
+ {`from langchain_openai import ChatOpenAI
+from langchain_core.prompts import ChatPromptTemplate
+from langchain_core.output_parsers import StrOutputParser
+
+llm = ChatOpenAI(model="gpt-4o")
+prompt = ChatPromptTemplate.from_messages([
+ ("system", "You are a helpful assistant."),
+ ("user", "{input}"),
+])
+
+chain = prompt | llm | StrOutputParser()
+
+result = chain.invoke(
+ {"input": "Explain recursion"},
+ config={"callbacks": [handler]},
+)`}
+
+
+
+ Using with agents
+ {`from langchain_openai import ChatOpenAI
+from langchain.agents import AgentExecutor, create_tool_calling_agent
+from langchain_core.prompts import ChatPromptTemplate
+from langchain_core.tools import tool
+
+@tool
+def calculator(expression: str) -> str:
+ """Evaluate a math expression."""
+ return str(eval(expression))
+
+llm = ChatOpenAI(model="gpt-4o")
+prompt = ChatPromptTemplate.from_messages([
+ ("system", "You are a helpful math assistant."),
+ ("user", "{input}"),
+ ("placeholder", "{agent_scratchpad}"),
+])
+
+agent = create_tool_calling_agent(llm, [calculator], prompt)
+executor = AgentExecutor(agent=agent, tools=[calculator])
+
+result = executor.invoke(
+ {"input": "What is 42 * 17 + 3?"},
+ config={"callbacks": [handler]},
+)`}
+
+
+
+ What gets captured
+
+ The callback handler maps LangChain events to AgentLens concepts:
+
+
+
+
+
+ LangChain Event
+ AgentLens Type
+ Captured Data
+
+
+
+
+ Chain start/end
+ CHAIN span
+ Input/output, duration
+
+
+ LLM start/end
+ LLM_CALL span
+ Model, messages, tokens, cost, duration
+
+
+ Tool start/end
+ TOOL_CALL span
+ Tool name, input args, output, duration
+
+
+ Agent action
+ TOOL_SELECTION decision
+ Selected tool, reasoning
+
+
+ Retry
+ RETRY event
+ Error message, attempt count
+
+
+ Error
+ ERROR event
+ Exception type, message, traceback
+
+
+
+
+
+
+
+ Global callbacks
+
+ To trace all LangChain operations without passing callbacks
+ individually, set the handler globally:
+
+ {`from langchain_core.globals import set_llm_cache
+from langchain.callbacks.manager import set_handler
+
+set_handler(handler)
+
+# Now all chains and agents are traced automatically
+result = chain.invoke({"input": "Hello"})
+# No need to pass config={"callbacks": [handler]}`}
+
+
+
+ Handler options
+
+
+
+
+ Parameter
+ Type
+ Default
+ Description
+
+
+
+
+ trace_name
+ str | None
+ None
+ Override the default trace name
+
+
+ tags
+ list[str]
+ []
+ Tags to attach to all traces
+
+
+ capture_io
+ bool
+ True
+ Capture input/output payloads
+
+
+
+
+ {`handler = AgentLensCallbackHandler(
+ trace_name="my-langchain-app",
+ tags=["production", "langchain"],
+ capture_io=True,
+)`}
+
+
+ );
+}
diff --git a/apps/web/src/app/docs/integrations/openai/page.tsx b/apps/web/src/app/docs/integrations/openai/page.tsx
new file mode 100644
index 0000000..5eccc93
--- /dev/null
+++ b/apps/web/src/app/docs/integrations/openai/page.tsx
@@ -0,0 +1,202 @@
+import type { Metadata } from "next";
+
+export const metadata: Metadata = {
+ title: "OpenAI Integration",
+ description:
+ "Auto-trace all OpenAI API calls with a single wrapper. Captures model, tokens, cost, and latency.",
+};
+
+function CodeBlock({ children, title }: { children: string; title?: string }) {
+ return (
+
+ {title && (
+
+ {title}
+
+ )}
+
+ {children}
+
+
+ );
+}
+
+export default function OpenAIIntegrationPage() {
+ return (
+
+
+ OpenAI Integration
+
+
+ Wrap the OpenAI client once and every API call is automatically traced.
+ AgentLens captures the model name, token usage, cost, latency, input
+ messages, and output completions.
+
+
+
+ Installation
+ {`pip install vectry-agentlens openai`}
+
+
+
+ Quick setup
+ {`import agentlens
+from agentlens.integrations.openai import wrap_openai
+import openai
+
+agentlens.init(
+ api_key="your-api-key",
+ endpoint="https://agentlens.vectry.tech",
+)
+
+client = wrap_openai(openai.OpenAI())
+
+# All calls are now auto-traced
+response = client.chat.completions.create(
+ model="gpt-4o",
+ messages=[
+ {"role": "system", "content": "You are a helpful assistant."},
+ {"role": "user", "content": "Explain quantum computing in one paragraph."},
+ ],
+)`}
+
+
+
+ What gets captured
+
+ Each OpenAI API call creates an LLM_CALL span with the following data:
+
+
+
+
+
+ Field
+ Description
+
+
+
+
+ input.model
+ Model name (gpt-4o, gpt-4o-mini, etc.)
+
+
+ input.messages
+ Full message array sent to the API
+
+
+ output.content
+ Response content from the model
+
+
+ tokenCount
+ Total tokens (prompt + completion)
+
+
+ costUsd
+ Estimated cost based on model pricing
+
+
+ durationMs
+ Wall-clock time for the request
+
+
+ metadata.finish_reason
+ How the model stopped (stop, length, tool_calls)
+
+
+
+
+
+
+
+ Async client
+
+ The wrapper works with both sync and async OpenAI clients:
+
+ {`from agentlens.integrations.openai import wrap_openai
+import openai
+
+async_client = wrap_openai(openai.AsyncOpenAI())
+
+response = await async_client.chat.completions.create(
+ model="gpt-4o",
+ messages=[{"role": "user", "content": "Hello!"}],
+)`}
+
+
+
+
+ Combining with @trace
+
+
+ When used inside a @trace-decorated
+ function, OpenAI calls appear as child spans of the trace:
+
+ {`import agentlens
+from agentlens import trace
+from agentlens.integrations.openai import wrap_openai
+import openai
+
+agentlens.init(api_key="...", endpoint="...")
+client = wrap_openai(openai.OpenAI())
+
+@trace(name="research-agent")
+async def research(topic: str) -> str:
+ # This LLM call becomes a child span of "research-agent"
+ response = client.chat.completions.create(
+ model="gpt-4o",
+ messages=[
+ {"role": "system", "content": "Summarize the following topic."},
+ {"role": "user", "content": topic},
+ ],
+ )
+ return response.choices[0].message.content`}
+
+
+
+ Tool calls
+
+ When the model invokes tools (function calling), AgentLens
+ automatically captures each tool call as a TOOL_SELECTION decision
+ point and the tool execution as a TOOL_CALL span:
+
+ {`@trace(name="tool-agent")
+async def agent_with_tools(prompt: str):
+ response = client.chat.completions.create(
+ model="gpt-4o",
+ messages=[{"role": "user", "content": prompt}],
+ tools=[{
+ "type": "function",
+ "function": {
+ "name": "get_weather",
+ "description": "Get weather for a city",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "city": {"type": "string"}
+ },
+ },
+ },
+ }],
+ )
+ # AgentLens captures the tool selection decision automatically
+ return response`}
+
+
+
+ Supported API methods
+
+
+ chat.completions.create() — Chat completions (including streaming)
+
+
+ completions.create() — Legacy completions
+
+
+ embeddings.create() — Embedding generation
+
+
+
+
+ );
+}
diff --git a/apps/web/src/app/docs/layout.tsx b/apps/web/src/app/docs/layout.tsx
new file mode 100644
index 0000000..f155a15
--- /dev/null
+++ b/apps/web/src/app/docs/layout.tsx
@@ -0,0 +1,63 @@
+import type { Metadata } from "next";
+import { DocsSidebar } from "./docs-sidebar";
+
+export const metadata: Metadata = {
+ title: {
+ default: "Documentation",
+ template: "%s | AgentLens Docs",
+ },
+ description:
+ "AgentLens documentation — learn how to instrument, trace, and observe your AI agents.",
+};
+
+export default function DocsLayout({
+ children,
+}: Readonly<{
+ children: React.ReactNode;
+}>) {
+ return (
+
+ );
+}
diff --git a/apps/web/src/app/docs/opencode-plugin/page.tsx b/apps/web/src/app/docs/opencode-plugin/page.tsx
new file mode 100644
index 0000000..0195146
--- /dev/null
+++ b/apps/web/src/app/docs/opencode-plugin/page.tsx
@@ -0,0 +1,230 @@
+import type { Metadata } from "next";
+
+export const metadata: Metadata = {
+ title: "OpenCode Plugin",
+ description:
+ "Capture OpenCode sessions including tool calls, LLM calls, file edits, and git diffs with the AgentLens OpenCode plugin.",
+};
+
+function CodeBlock({ children, title }: { children: string; title?: string }) {
+ return (
+
+ {title && (
+
+ {title}
+
+ )}
+
+ {children}
+
+
+ );
+}
+
+export default function OpenCodePluginPage() {
+ return (
+
+
+ OpenCode Plugin
+
+
+ The AgentLens OpenCode plugin captures everything that happens during an
+ OpenCode coding session and sends it as structured traces to your
+ AgentLens instance.
+
+
+
+ Installation
+ {`npm install opencode-agentlens`}
+
+
+
+ Configuration
+
+ Add the plugin to your opencode.json configuration file:
+
+ {`{
+ "plugin": ["opencode-agentlens"]
+}`}
+
+ Set the required environment variables:
+
+ {`export AGENTLENS_API_KEY="your-api-key"
+export AGENTLENS_ENDPOINT="https://agentlens.vectry.tech"`}
+
+
+ You can also add these to a .env file in your project root.
+
+
+
+
+ What gets captured
+
+ Every OpenCode session becomes a trace with nested spans and events
+ for each action taken during the session:
+
+
+
+
+
+
+ AGENT span
+
+
+ Sessions
+
+
+
+ Each OpenCode session is captured as a top-level AGENT span.
+ Includes session ID, start time, end time, and overall status.
+
+
+
+
+
+
+ LLM_CALL span
+
+
+ LLM calls
+
+
+
+ Every call to an LLM provider (Claude, GPT, etc.) is recorded with
+ the full prompt, response, token counts, and cost.
+
+
+
+
+
+
+ TOOL_CALL span
+
+
+ Tool calls
+
+
+
+ Tool invocations including file reads, writes, shell commands,
+ search operations, and MCP tool calls. Captures input arguments
+ and outputs.
+
+
+
+
+
+
+ TOOL_SELECTION decision
+
+
+ Permissions
+
+
+
+ Permission requests and grants are captured as decision points,
+ showing what the agent asked to do and whether it was allowed.
+
+
+
+
+
+
+ CUSTOM span
+
+
+ File edits
+
+
+
+ Every file creation, modification, and deletion is tracked with
+ before/after content diffs.
+
+
+
+
+
+
+ CUSTOM event
+
+
+ Git diffs
+
+
+
+ Git operations (commits, diffs, branch changes) are captured as
+ events with the full diff content.
+
+
+
+
+
+
+ Trace structure
+
+ A typical OpenCode session trace looks like this:
+
+ {`Trace: "opencode-session-abc123"
+ |
+ +-- Span: "session" (AGENT)
+ | +-- Span: "read-file: src/main.ts" (TOOL_CALL)
+ | +-- Span: "llm-call: claude-sonnet" (LLM_CALL)
+ | | Decision: TOOL_SELECTION -> chose "edit-file" over "write-file"
+ | +-- Span: "edit-file: src/main.ts" (TOOL_CALL)
+ | +-- Span: "llm-call: claude-sonnet" (LLM_CALL)
+ | +-- Span: "bash: npm test" (TOOL_CALL)
+ | +-- Event: "git-diff" (CUSTOM)
+ | +-- Span: "bash: git commit" (TOOL_CALL)`}
+
+
+
+ Environment variables
+
+
+
+
+ Variable
+ Required
+ Description
+
+
+
+
+ AGENTLENS_API_KEY
+ Yes
+ API key for authentication
+
+
+ AGENTLENS_ENDPOINT
+ Yes
+ AgentLens server URL
+
+
+ AGENTLENS_ENABLED
+ No
+ Set to "false" to disable (default: "true")
+
+
+ AGENTLENS_SESSION_TAGS
+ No
+ Comma-separated tags to add to all session traces
+
+
+
+
+
+
+
+ Filtering sensitive data
+
+ By default, the plugin captures full file contents and command outputs.
+ To filter sensitive data, set the AGENTLENS_REDACT_PATTERNS environment variable with a comma-separated list of regex patterns:
+
+ {`export AGENTLENS_REDACT_PATTERNS="password=.*,API_KEY=.*,Bearer .*"`}
+
+ Matched content is replaced with [REDACTED] before
+ being sent to the server.
+
+
+
+ );
+}
diff --git a/apps/web/src/app/docs/page.tsx b/apps/web/src/app/docs/page.tsx
new file mode 100644
index 0000000..e49d144
--- /dev/null
+++ b/apps/web/src/app/docs/page.tsx
@@ -0,0 +1,131 @@
+import type { Metadata } from "next";
+
+export const metadata: Metadata = {
+ title: "Documentation",
+ description:
+ "AgentLens documentation — instrument, trace, and observe your AI agents with full decision visibility.",
+};
+
+const sections = [
+ {
+ heading: "Getting Started",
+ items: [
+ {
+ title: "Quick Start",
+ href: "/docs/getting-started",
+ description:
+ "Install the SDK, initialize AgentLens, and send your first trace in under five minutes.",
+ },
+ {
+ title: "Core Concepts",
+ href: "/docs/concepts",
+ description:
+ "Understand Traces, Spans, Decision Points, and Events — the four building blocks of AgentLens.",
+ },
+ ],
+ },
+ {
+ heading: "SDKs",
+ items: [
+ {
+ title: "Python SDK",
+ href: "/docs/python-sdk",
+ description:
+ "Full reference for the Python SDK: init(), @trace decorator, log_decision(), TraceContext, and configuration.",
+ },
+ {
+ title: "TypeScript SDK",
+ href: "/docs/typescript-sdk",
+ description:
+ "Full reference for the TypeScript SDK: init(), TraceBuilder API, createDecision(), and shutdown().",
+ },
+ ],
+ },
+ {
+ heading: "Integrations",
+ items: [
+ {
+ title: "OpenAI",
+ href: "/docs/integrations/openai",
+ description:
+ "Auto-trace all OpenAI API calls with a single wrapper. Captures model, tokens, cost, and latency.",
+ },
+ {
+ title: "Anthropic",
+ href: "/docs/integrations/anthropic",
+ description:
+ "Wrap the Anthropic client to automatically trace Claude API calls with full metadata capture.",
+ },
+ {
+ title: "LangChain",
+ href: "/docs/integrations/langchain",
+ description:
+ "Use the AgentLensCallbackHandler to trace LangChain chains, agents, and tool invocations.",
+ },
+ ],
+ },
+ {
+ heading: "Tools & Deployment",
+ items: [
+ {
+ title: "OpenCode Plugin",
+ href: "/docs/opencode-plugin",
+ description:
+ "Capture OpenCode sessions including tool calls, LLM calls, file edits, and git diffs automatically.",
+ },
+ {
+ title: "REST API Reference",
+ href: "/docs/api-reference",
+ description:
+ "Complete contract for POST /api/traces and GET /api/traces including payload shapes and error codes.",
+ },
+ {
+ title: "Self-Hosting",
+ href: "/docs/self-hosting",
+ description:
+ "Deploy AgentLens with Docker or from source. Configure database, API keys, and environment variables.",
+ },
+ ],
+ },
+];
+
+export default function DocsPage() {
+ return (
+
+
+ AgentLens Documentation
+
+
+ AgentLens is an open-source agent observability platform that traces
+ decisions, not just API calls. These docs cover everything from initial
+ setup to advanced self-hosting.
+
+
+
+ {sections.map((section) => (
+
+
+ {section.heading}
+
+
+
+ ))}
+
+
+ );
+}
diff --git a/apps/web/src/app/docs/python-sdk/page.tsx b/apps/web/src/app/docs/python-sdk/page.tsx
new file mode 100644
index 0000000..b0c0938
--- /dev/null
+++ b/apps/web/src/app/docs/python-sdk/page.tsx
@@ -0,0 +1,327 @@
+import type { Metadata } from "next";
+
+export const metadata: Metadata = {
+ title: "Python SDK",
+ description:
+ "Full reference for the AgentLens Python SDK: init(), @trace decorator, log_decision(), TraceContext, and configuration.",
+};
+
+function CodeBlock({ children, title }: { children: string; title?: string }) {
+ return (
+
+ {title && (
+
+ {title}
+
+ )}
+
+ {children}
+
+
+ );
+}
+
+function ApiSection({
+ name,
+ signature,
+ description,
+ children,
+}: {
+ name: string;
+ signature: string;
+ description: string;
+ children?: React.ReactNode;
+}) {
+ return (
+
+
{name}
+
+ {signature}
+
+
{description}
+ {children}
+
+ );
+}
+
+export default function PythonSdkPage() {
+ return (
+
+
Python SDK
+
+ The AgentLens Python SDK provides decorators, context managers, and
+ helper functions to instrument your AI agents.
+
+
+
+
+ pip install vectry-agentlens
+
+
+
+
API Reference
+
+
+
+ Parameters
+
+
+
+
+
+ Parameter
+ Type
+ Default
+ Description
+
+
+
+
+ api_key
+ str
+ required
+ Your AgentLens API key
+
+
+ endpoint
+ str
+ required
+ AgentLens server URL
+
+
+ flush_interval
+ float
+ 5.0
+ Seconds between automatic flushes
+
+
+ max_batch_size
+ int
+ 100
+ Max traces per batch request
+
+
+ enabled
+ bool
+ True
+ Set to False to disable tracing globally
+
+
+
+
+ {`import agentlens
+
+agentlens.init(
+ api_key="al_key_abc123",
+ endpoint="https://agentlens.vectry.tech",
+ flush_interval=10.0,
+ max_batch_size=50,
+)`}
+
+
+
+
+ Parameters
+
+
+
+
+
+ Parameter
+ Type
+ Description
+
+
+
+
+ name
+ str | None
+ Trace name. Defaults to the function name.
+
+
+ tags
+ list[str] | None
+ Tags to attach to the trace
+
+
+ metadata
+ dict | None
+ Arbitrary metadata dict
+
+
+
+
+ {`from agentlens import trace
+
+@trace(name="research-agent", tags=["research", "v2"])
+async def research(topic: str) -> str:
+ result = await search(topic)
+ summary = await summarize(result)
+ return summary
+
+# Can also be used without arguments
+@trace
+def simple_agent(prompt: str) -> str:
+ return call_llm(prompt)`}
+
+
+
+
+ Parameters
+
+
+
+
+
+ Parameter
+ Type
+ Description
+
+
+
+
+ type
+ str
+ One of: TOOL_SELECTION, ROUTING, RETRY, ESCALATION, MEMORY_RETRIEVAL, PLANNING, CUSTOM
+
+
+ chosen
+ dict
+ What was chosen
+
+
+ alternatives
+ list[dict]
+ What else was considered
+
+
+ reasoning
+ str | None
+ Why this choice was made
+
+
+ context_snapshot
+ dict | None
+ Snapshot of context at decision time
+
+
+
+
+ {`import agentlens
+from agentlens import trace
+
+@trace(name="routing-agent")
+async def route_request(user_input: str):
+ intent = classify_intent(user_input)
+
+ agentlens.log_decision(
+ type="ROUTING",
+ chosen={"handler": "refund", "confidence": 0.92},
+ alternatives=[
+ {"handler": "faq", "confidence": 0.65},
+ {"handler": "escalate", "confidence": 0.23},
+ ],
+ reasoning="High confidence refund intent detected",
+ context_snapshot={"intent": intent, "input_length": len(user_input)},
+ )
+
+ return await handle_refund(user_input)`}
+
+
+
+ {`import agentlens
+
+async def process_batch(items: list[str]):
+ for item in items:
+ ctx = agentlens.TraceContext(
+ name=f"process-{item}",
+ tags=["batch"],
+ )
+ ctx.start()
+
+ try:
+ result = await process(item)
+ ctx.add_span(
+ name="process",
+ type="CUSTOM",
+ input={"item": item},
+ output={"result": result},
+ status="COMPLETED",
+ )
+ ctx.end(status="COMPLETED")
+ except Exception as e:
+ ctx.add_event(type="ERROR", name=str(e))
+ ctx.end(status="ERROR")`}
+
+
+
+ {`import agentlens
+import atexit
+
+agentlens.init(api_key="...", endpoint="...")
+
+# Register shutdown hook
+atexit.register(agentlens.shutdown)
+
+# Or call manually
+agentlens.shutdown(timeout=30.0)`}
+
+
+
+ Configuration
+
+ The SDK can also be configured via environment variables. These take
+ precedence over values passed to init().
+
+
+
+
+
+ Variable
+ Description
+
+
+
+
+ AGENTLENS_API_KEY
+ API key for authentication
+
+
+ AGENTLENS_ENDPOINT
+ Server URL
+
+
+ AGENTLENS_ENABLED
+ Set to "false" to disable tracing
+
+
+ AGENTLENS_FLUSH_INTERVAL
+ Flush interval in seconds
+
+
+
+
+
+
+ );
+}
diff --git a/apps/web/src/app/docs/self-hosting/page.tsx b/apps/web/src/app/docs/self-hosting/page.tsx
new file mode 100644
index 0000000..959de3b
--- /dev/null
+++ b/apps/web/src/app/docs/self-hosting/page.tsx
@@ -0,0 +1,255 @@
+import type { Metadata } from "next";
+
+export const metadata: Metadata = {
+ title: "Self-Hosting",
+ description:
+ "Deploy AgentLens with Docker or from source. Configure database, API keys, and environment variables.",
+};
+
+function CodeBlock({ children, title }: { children: string; title?: string }) {
+ return (
+
+ {title && (
+
+ {title}
+
+ )}
+
+ {children}
+
+
+ );
+}
+
+export default function SelfHostingPage() {
+ return (
+
+
Self-Hosting
+
+ AgentLens is open source and designed to be self-hosted. You can deploy
+ it with Docker in minutes, or run from source for development.
+
+
+
+ Quick start with Docker
+ {`git clone https://gitea.repi.fun/repi/agentlens
+cd agentlens
+docker build -t agentlens .
+docker run -p 3000:3000 \\
+ -e DATABASE_URL="postgresql://user:pass@host:5432/agentlens" \\
+ -e AGENTLENS_API_KEY="your-secret-key" \\
+ agentlens`}
+
+ The dashboard will be available at{" "}
+
+ http://localhost:3000
+ {" "}
+ and the API at{" "}
+
+ http://localhost:3000/api/traces
+
+ .
+
+
+
+
+ Docker Compose
+
+ For a complete setup with PostgreSQL included:
+
+ {`version: "3.8"
+
+services:
+ db:
+ image: postgres:16-alpine
+ environment:
+ POSTGRES_USER: agentlens
+ POSTGRES_PASSWORD: agentlens
+ POSTGRES_DB: agentlens
+ volumes:
+ - pgdata:/var/lib/postgresql/data
+ ports:
+ - "5432:5432"
+
+ app:
+ build: .
+ ports:
+ - "3000:3000"
+ environment:
+ DATABASE_URL: "postgresql://agentlens:agentlens@db:5432/agentlens"
+ AGENTLENS_API_KEY: "your-secret-key"
+ PORT: "3000"
+ depends_on:
+ - db
+
+volumes:
+ pgdata:`}
+ {`docker compose up -d`}
+
+
+
+ Running from source
+
+ For development or when you need to customize AgentLens:
+
+ {`git clone https://gitea.repi.fun/repi/agentlens
+cd agentlens
+
+# Install dependencies (uses npm workspaces)
+npm install
+
+# Set up the database
+cp apps/web/.env.example apps/web/.env
+# Edit .env with your DATABASE_URL
+
+# Generate Prisma client and push schema
+npm run db:generate --workspace=@agentlens/web
+npm run db:push --workspace=@agentlens/web
+
+# Start the development server
+npm run dev --workspace=@agentlens/web`}
+
+
+
+ Environment variables
+
+
+
+
+ Variable
+ Required
+ Default
+ Description
+
+
+
+
+ DATABASE_URL
+ Yes
+ -
+ PostgreSQL connection string
+
+
+ AGENTLENS_API_KEY
+ Yes
+ -
+ API key that SDKs must present to ingest traces
+
+
+ PORT
+ No
+ 3000
+ HTTP port the server listens on
+
+
+ NODE_ENV
+ No
+ production
+ Set to "development" for dev mode
+
+
+ NEXTAUTH_SECRET
+ No
+ -
+ Secret for session signing (if auth is enabled)
+
+
+
+
+
+
+
+ Database setup
+
+ AgentLens uses PostgreSQL with Prisma ORM. The database schema is
+ managed via Prisma migrations.
+
+
+ Connection string format
+
+ {`postgresql://USER:PASSWORD@HOST:PORT/DATABASE`}
+
+
+ Running migrations
+
+ {`# Push schema to database (development)
+npm run db:push --workspace=@agentlens/web
+
+# Run migrations (production)
+npm run db:migrate --workspace=@agentlens/web`}
+
+
+
+ Reverse proxy setup
+
+ For production deployments behind nginx or Caddy:
+
+ {`agentlens.yourdomain.com {
+ reverse_proxy localhost:3000
+}`}
+ {`server {
+ listen 443 ssl;
+ server_name agentlens.yourdomain.com;
+
+ location / {
+ proxy_pass http://localhost:3000;
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ }
+}`}
+
+
+
+ Updating
+ {`# Pull latest changes
+cd agentlens
+git pull origin main
+
+# Rebuild
+docker build -t agentlens .
+
+# Restart with new image
+docker compose up -d`}
+
+
+
+ Resource requirements
+
+
+
+
+ Component
+ Minimum
+ Recommended
+
+
+
+
+ CPU
+ 1 core
+ 2+ cores
+
+
+ Memory
+ 512 MB
+ 1 GB+
+
+
+ Disk
+ 1 GB
+ 10 GB+ (depends on trace volume)
+
+
+ PostgreSQL
+ 14+
+ 16+
+
+
+
+
+
+
+ );
+}
diff --git a/apps/web/src/app/docs/typescript-sdk/page.tsx b/apps/web/src/app/docs/typescript-sdk/page.tsx
new file mode 100644
index 0000000..dd6b762
--- /dev/null
+++ b/apps/web/src/app/docs/typescript-sdk/page.tsx
@@ -0,0 +1,301 @@
+import type { Metadata } from "next";
+
+export const metadata: Metadata = {
+ title: "TypeScript SDK",
+ description:
+ "Full reference for the AgentLens TypeScript SDK: init(), TraceBuilder, createDecision(), and shutdown().",
+};
+
+function CodeBlock({ children, title }: { children: string; title?: string }) {
+ return (
+
+ {title && (
+
+ {title}
+
+ )}
+
+ {children}
+
+
+ );
+}
+
+function ApiSection({
+ name,
+ signature,
+ description,
+ children,
+}: {
+ name: string;
+ signature: string;
+ description: string;
+ children?: React.ReactNode;
+}) {
+ return (
+
+
{name}
+
+ {signature}
+
+
{description}
+ {children}
+
+ );
+}
+
+export default function TypeScriptSdkPage() {
+ return (
+
+
+ TypeScript SDK
+
+
+ The AgentLens TypeScript SDK provides a builder-based API for
+ constructing and sending traces from Node.js and edge runtimes.
+
+
+
+
+ npm install agentlens-sdk
+
+
+
+
API Reference
+
+
+
+ Options
+
+
+
+
+
+ Property
+ Type
+ Default
+ Description
+
+
+
+
+ apiKey
+ string
+ required
+ Your AgentLens API key
+
+
+ endpoint
+ string
+ required
+ AgentLens server URL
+
+
+ flushInterval
+ number
+ 5000
+ Milliseconds between flushes
+
+
+ maxBatchSize
+ number
+ 100
+ Max traces per batch
+
+
+ enabled
+ boolean
+ true
+ Toggle tracing on/off
+
+
+
+
+ {`import { init } from "agentlens-sdk";
+
+init({
+ apiKey: process.env.AGENTLENS_API_KEY!,
+ endpoint: "https://agentlens.vectry.tech",
+ flushInterval: 10000,
+});`}
+
+
+
+
+ Constructor options
+
+
+
+
+
+ Property
+ Type
+ Description
+
+
+
+
+ tags
+ string[]
+ Tags for this trace
+
+
+ sessionId
+ string
+ Group traces into a session
+
+
+ metadata
+ Record<string, unknown>
+ Arbitrary metadata
+
+
+
+
+
+
+ Methods
+
+
+
+
+
+ addSpan(span: SpanInput): string
+
+
+ Add a span to the trace. Returns the generated span ID. Pass parentSpanId to nest spans.
+
+
+
+
+ addDecision(decision: DecisionInput): string
+
+
+ Add a decision point. Returns the generated decision ID.
+
+
+
+
+ addEvent(event: EventInput): string
+
+
+ Add an event to the trace. Returns the generated event ID.
+
+
+
+
+ end(status?: "COMPLETED" | "ERROR"): Promise<void>
+
+
+ Finalize and send the trace. Defaults to COMPLETED.
+
+
+
+
+ {`import { TraceBuilder } from "agentlens-sdk";
+
+const trace = new TraceBuilder("customer-support", {
+ tags: ["support", "v2"],
+ sessionId: "session-abc",
+});
+
+const agentSpan = trace.addSpan({
+ name: "classify-intent",
+ type: "LLM_CALL",
+ input: { messages: [{ role: "user", content: "I need a refund" }] },
+ output: { intent: "refund", confidence: 0.95 },
+ status: "COMPLETED",
+ tokenCount: 150,
+ costUsd: 0.002,
+ durationMs: 340,
+});
+
+trace.addDecision({
+ type: "ROUTING",
+ chosen: { handler: "refund-flow" },
+ alternatives: [{ handler: "faq-flow" }, { handler: "escalate" }],
+ reasoning: "High confidence refund intent",
+ parentSpanId: agentSpan,
+});
+
+trace.addSpan({
+ name: "process-refund",
+ type: "TOOL_CALL",
+ input: { orderId: "ord-123" },
+ output: { success: true },
+ status: "COMPLETED",
+ parentSpanId: agentSpan,
+});
+
+await trace.end();`}
+
+
+
+ {`import { createDecision } from "agentlens-sdk";
+
+const decision = createDecision(
+ "TOOL_SELECTION",
+ { tool: "calculator", confidence: 0.88 },
+ [
+ { tool: "web_search", confidence: 0.52 },
+ { tool: "code_exec", confidence: 0.34 },
+ ],
+ { reasoning: "Math expression detected in input" }
+);`}
+
+
+
+ {`import { shutdown } from "agentlens-sdk";
+
+process.on("SIGTERM", async () => {
+ await shutdown(30000);
+ process.exit(0);
+});`}
+
+
+
+ Environment Variables
+
+
+
+
+ Variable
+ Description
+
+
+
+
+ AGENTLENS_API_KEY
+ API key (overrides init param)
+
+
+ AGENTLENS_ENDPOINT
+ Server URL (overrides init param)
+
+
+ AGENTLENS_ENABLED
+ Set to "false" to disable
+
+
+
+
+
+
+ );
+}
diff --git a/apps/web/src/components/trace-analytics.tsx b/apps/web/src/components/trace-analytics.tsx
index 4f43351..4c7b075 100644
--- a/apps/web/src/components/trace-analytics.tsx
+++ b/apps/web/src/components/trace-analytics.tsx
@@ -467,12 +467,27 @@ function TokenUsageGauge({ trace }: { trace: Trace }) {
null;
const modelContextWindows: Record
= {
+ "gpt-5.2": 128000,
+ "gpt-5.1": 128000,
+ "gpt-5": 128000,
+ "gpt-5-mini": 128000,
+ "gpt-5-nano": 128000,
+ "gpt-4.1": 1047576,
+ "gpt-4.1-mini": 1047576,
+ "gpt-4.1-nano": 1047576,
+ "o3": 200000,
+ "o3-mini": 200000,
+ "o4-mini": 200000,
"gpt-4": 8192,
"gpt-4-32k": 32768,
"gpt-4-turbo": 128000,
"gpt-4o": 128000,
"gpt-4o-mini": 128000,
"gpt-3.5-turbo": 16385,
+ "claude-opus-4-6": 200000,
+ "claude-4.5-opus": 200000,
+ "claude-4.5-sonnet": 200000,
+ "claude-4.5-haiku": 200000,
"claude-3-opus": 200000,
"claude-3-sonnet": 200000,
"claude-3-haiku": 200000,
diff --git a/packages/opencode-plugin/src/utils.ts b/packages/opencode-plugin/src/utils.ts
index c745564..050b826 100644
--- a/packages/opencode-plugin/src/utils.ts
+++ b/packages/opencode-plugin/src/utils.ts
@@ -52,13 +52,32 @@ export function extractToolMetadata(
}
const MODEL_COSTS: Record = {
- "claude-opus-4-20250514": { input: 15, output: 75 },
- "claude-sonnet-4-20250514": { input: 3, output: 15 },
- "claude-haiku-3-20250307": { input: 0.25, output: 1.25 },
+ "gpt-5.2": { input: 1.75, output: 14 },
+ "gpt-5.1": { input: 1.25, output: 10 },
+ "gpt-5": { input: 1.25, output: 10 },
+ "gpt-5-mini": { input: 0.25, output: 2 },
+ "gpt-5-nano": { input: 0.05, output: 0.4 },
+ "gpt-4.1": { input: 2, output: 8 },
+ "gpt-4.1-mini": { input: 0.4, output: 1.6 },
+ "gpt-4.1-nano": { input: 0.1, output: 0.4 },
+ "o3": { input: 2, output: 8 },
+ "o3-mini": { input: 1.1, output: 4.4 },
+ "o4-mini": { input: 1.1, output: 4.4 },
+ "o1": { input: 15, output: 60 },
"gpt-4o": { input: 2.5, output: 10 },
"gpt-4o-mini": { input: 0.15, output: 0.6 },
"gpt-4-turbo": { input: 10, output: 30 },
- "o3-mini": { input: 1.1, output: 4.4 },
+ "gpt-4": { input: 30, output: 60 },
+ "claude-opus-4-6": { input: 5, output: 25 },
+ "claude-opus-4-20250514": { input: 15, output: 75 },
+ "claude-sonnet-4-20250514": { input: 3, output: 15 },
+ "claude-4.5-opus": { input: 5, output: 25 },
+ "claude-4.5-sonnet": { input: 3, output: 15 },
+ "claude-4.5-haiku": { input: 1, output: 5 },
+ "claude-3-5-sonnet": { input: 3, output: 15 },
+ "claude-3-5-haiku": { input: 0.8, output: 4 },
+ "claude-3-opus": { input: 15, output: 75 },
+ "claude-3-haiku": { input: 0.25, output: 1.25 },
};
export function getModelCost(
diff --git a/packages/sdk-python/agentlens/integrations/anthropic.py b/packages/sdk-python/agentlens/integrations/anthropic.py
index cc1e505..0313e3a 100644
--- a/packages/sdk-python/agentlens/integrations/anthropic.py
+++ b/packages/sdk-python/agentlens/integrations/anthropic.py
@@ -26,17 +26,22 @@ logger = logging.getLogger("agentlens")
# Cost per 1K tokens (input/output) for common Claude models
_MODEL_COSTS: Dict[str, tuple] = {
- # Claude 3 family
- "claude-3-opus-20240229": (0.015, 0.075),
- "claude-3-sonnet-20240229": (0.003, 0.015),
- "claude-3-haiku-20240307": (0.00025, 0.00125),
+ # Claude 4.5 family
+ "claude-opus-4-6": (0.005, 0.025),
+ "claude-4.5-opus": (0.005, 0.025),
+ "claude-4.5-sonnet": (0.003, 0.015),
+ "claude-4.5-haiku": (0.001, 0.005),
+ # Claude 4 family
+ "claude-sonnet-4-20250514": (0.003, 0.015),
+ "claude-opus-4-20250514": (0.015, 0.075),
# Claude 3.5 family
"claude-3-5-sonnet-20240620": (0.003, 0.015),
"claude-3-5-sonnet-20241022": (0.003, 0.015),
"claude-3-5-haiku-20241022": (0.0008, 0.004),
- # Claude 4 family
- "claude-sonnet-4-20250514": (0.003, 0.015),
- "claude-opus-4-20250514": (0.015, 0.075),
+ # Claude 3 family
+ "claude-3-opus-20240229": (0.015, 0.075),
+ "claude-3-sonnet-20240229": (0.003, 0.015),
+ "claude-3-haiku-20240307": (0.00025, 0.00125),
# Short aliases for prefix matching
"claude-3-opus": (0.015, 0.075),
"claude-3-sonnet": (0.003, 0.015),
@@ -46,9 +51,9 @@ _MODEL_COSTS: Dict[str, tuple] = {
"claude-3.5-sonnet": (0.003, 0.015),
"claude-3.5-haiku": (0.0008, 0.004),
"claude-sonnet-4": (0.003, 0.015),
- "claude-opus-4": (0.015, 0.075),
+ "claude-opus-4": (0.005, 0.025),
"claude-4-sonnet": (0.003, 0.015),
- "claude-4-opus": (0.015, 0.075),
+ "claude-4-opus": (0.005, 0.025),
}
diff --git a/packages/sdk-python/agentlens/integrations/openai.py b/packages/sdk-python/agentlens/integrations/openai.py
index c9f184b..0146ee1 100644
--- a/packages/sdk-python/agentlens/integrations/openai.py
+++ b/packages/sdk-python/agentlens/integrations/openai.py
@@ -26,16 +26,34 @@ logger = logging.getLogger("agentlens")
# Cost per 1K tokens (input/output) for common models
_MODEL_COSTS: Dict[str, tuple] = {
+ # GPT-5 family
+ "gpt-5.2": (0.00175, 0.014),
+ "gpt-5.1": (0.00125, 0.01),
+ "gpt-5": (0.00125, 0.01),
+ "gpt-5-mini": (0.00025, 0.002),
+ "gpt-5-nano": (0.00005, 0.0004),
+ # GPT-4.1 family
+ "gpt-4.1": (0.002, 0.008),
+ "gpt-4.1-mini": (0.0004, 0.0016),
+ "gpt-4.1-nano": (0.0001, 0.0004),
+ # o-series reasoning models
+ "o3": (0.002, 0.008),
+ "o3-mini": (0.0011, 0.0044),
+ "o4-mini": (0.0011, 0.0044),
+ "o1": (0.015, 0.06),
+ # GPT-4o family
+ "gpt-4o": (0.0025, 0.01),
+ "gpt-4o-2024-05-13": (0.005, 0.015),
+ "gpt-4o-2024-08-06": (0.0025, 0.01),
+ "gpt-4o-mini": (0.00015, 0.0006),
+ "gpt-4o-mini-2024-07-18": (0.00015, 0.0006),
+ # GPT-4 family
"gpt-4": (0.03, 0.06),
"gpt-4-32k": (0.06, 0.12),
"gpt-4-turbo": (0.01, 0.03),
"gpt-4-turbo-2024-04-09": (0.01, 0.03),
"gpt-4-turbo-preview": (0.01, 0.03),
- "gpt-4o": (0.005, 0.015),
- "gpt-4o-2024-05-13": (0.005, 0.015),
- "gpt-4o-2024-08-06": (0.0025, 0.01),
- "gpt-4o-mini": (0.00015, 0.0006),
- "gpt-4o-mini-2024-07-18": (0.00015, 0.0006),
+ # GPT-3.5 family
"gpt-3.5-turbo": (0.0005, 0.0015),
"gpt-3.5-turbo-0125": (0.0005, 0.0015),
"gpt-3.5-turbo-1106": (0.001, 0.002),