feat: initial monorepo scaffold - Next.js 15 + Prisma + Python SDK stubs

- Turborepo monorepo with apps/web and packages/database, sdk-python
- Next.js 15 app with professional landing page (dark theme, emerald accent)
- Prisma schema: Trace, DecisionPoint, Span, Event models with full indexing
- Docker Compose: web (port 4200), postgres:16, redis:7, migrate service
- Python SDK package stubs: init, trace decorator, log_decision, integrations
- Multi-stage Dockerfile for standalone Next.js production build
This commit is contained in:
Vectry
2026-02-09 22:46:16 +00:00
parent 572fd7e234
commit 9264866d1f
31 changed files with 3244 additions and 0 deletions

View File

@@ -0,0 +1,38 @@
# AgentLens Python SDK
AgentLens provides observability for AI agents by tracing decisions, not just API calls.
## Installation
```bash
pip install agentlens
```
## Quick Start
```python
from agentlens import init, trace
# Initialize AgentLens
init(api_key="your-api-key", endpoint="https://agentlens.vectry.tech")
# Trace your agent functions
@trace(name="research-agent")
async def research(topic: str) -> str:
return f"Researching: {topic}"
```
## Features
- **Decision Tracing**: Log and visualize agent decisions with alternatives
- **Context Awareness**: Monitor context window utilization
- **Cost Intelligence**: Track token usage and costs per operation
- **Integrations**: Native support for LangChain and OpenAI
## Documentation
Full documentation available at [https://agentlens.vectry.tech/docs](https://agentlens.vectry.tech/docs)
## License
MIT © 2026 Vectry

View File

@@ -0,0 +1,8 @@
"""AgentLens - Agent observability that traces decisions, not just API calls."""
from agentlens.client import init, shutdown
from agentlens.trace import trace
from agentlens.decision import log_decision
__version__ = "0.1.0"
__all__ = ["init", "shutdown", "trace", "log_decision"]

View File

@@ -0,0 +1,43 @@
"""Client initialization and management for AgentLens."""
from typing import Optional
_client: Optional["_Client"] = None
class _Client:
"""Internal client class for managing AgentLens connection."""
def __init__(self, api_key: str, endpoint: str) -> None:
self.api_key = api_key
self.endpoint = endpoint
self.is_shutdown = False
def shutdown(self) -> None:
"""Shutdown the client."""
self.is_shutdown = True
def init(api_key: str, endpoint: str = "https://agentlens.vectry.tech") -> None:
"""Initialize the AgentLens client.
Args:
api_key: Your AgentLens API key.
endpoint: The AgentLens API endpoint (default: https://agentlens.vectry.tech).
"""
global _client
_client = _Client(api_key=api_key, endpoint=endpoint)
def shutdown() -> None:
"""Shutdown the AgentLens client."""
global _client
if _client:
_client.shutdown()
_client = None
def get_client() -> Optional[_Client]:
"""Get the current client instance."""
return _client

View File

@@ -0,0 +1,32 @@
"""Decision logging for tracking agent decision points."""
from typing import Any, Dict, List
def log_decision(
type: str,
chosen: Any,
alternatives: List[Any],
reasoning: Optional[str] = None,
) -> None:
"""Log a decision point in the agent's reasoning.
Args:
type: Type of decision (e.g., "tool_selection", "routing", "retry").
chosen: The option that was selected.
alternatives: List of alternatives that were considered.
reasoning: Optional explanation for the decision.
Example:
log_decision(
type="tool_selection",
chosen="search",
alternatives=["search", "calculate", "browse"],
reasoning="Search is most appropriate for finding information"
)
"""
print(f"[AgentLens] Decision logged: {type}")
print(f"[AgentLens] Chosen: {chosen}")
print(f"[AgentLens] Alternatives: {alternatives}")
if reasoning:
print(f"[AgentLens] Reasoning: {reasoning}")

View File

@@ -0,0 +1 @@
"""Integration packages for AgentLens."""

View File

@@ -0,0 +1,55 @@
"""LangChain integration for AgentLens."""
from typing import Any, Dict, Optional, Sequence
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.outputs import LLMResult
from langchain_core.messages import BaseMessage
class AgentLensCallbackHandler(BaseCallbackHandler):
"""Callback handler for LangChain integration with AgentLens.
This handler captures LLM calls, tool calls, and agent actions
to provide observability for LangChain-based agents.
"""
def __init__(self) -> None:
self.trace_id: Optional[str] = None
def on_llm_start(
self,
serialized: Dict[str, Any],
prompts: list[str],
**kwargs: Any,
) -> None:
"""Called when an LLM starts processing."""
print(f"[AgentLens] LLM started: {serialized.get('name', 'unknown')}")
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Called when an LLM finishes processing."""
print(f"[AgentLens] LLM completed")
def on_llm_error(self, error: Exception, **kwargs: Any) -> None:
"""Called when an LLM encounters an error."""
print(f"[AgentLens] LLM error: {error}")
def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
**kwargs: Any,
) -> None:
"""Called when a tool starts executing."""
print(f"[AgentLens] Tool started: {serialized.get('name', 'unknown')}")
def on_tool_end(self, output: str, **kwargs: Any) -> None:
"""Called when a tool finishes executing."""
print(f"[AgentLens] Tool completed")
def on_tool_error(self, error: Exception, **kwargs: Any) -> None:
"""Called when a tool encounters an error."""
print(f"[AgentLens] Tool error: {error}")
def on_agent_action(self, action: Any, **kwargs: Any) -> None:
"""Called when an agent performs an action."""
print(f"[AgentLens] Agent action: {action.tool}")

View File

@@ -0,0 +1,39 @@
"""OpenAI integration for AgentLens."""
from typing import Any, Optional
from functools import wraps
def wrap_openai(client: Any) -> Any:
"""Wrap an OpenAI client to add AgentLens tracing.
Args:
client: The OpenAI client to wrap.
Returns:
Wrapped OpenAI client with AgentLens tracing enabled.
Example:
import openai
from agentlens.integrations.openai import wrap_openai
client = openai.OpenAI(api_key="sk-...")
traced_client = wrap_openai(client)
response = traced_client.chat.completions.create(...)
"""
original_create = client.chat.completions.create
@wraps(original_create)
def traced_create(*args: Any, **kwargs: Any) -> Any:
print("[AgentLens] OpenAI chat completion started")
try:
response = original_create(*args, **kwargs)
print("[AgentLens] OpenAI chat completion completed")
return response
except Exception as e:
print(f"[AgentLens] OpenAI error: {e}")
raise
client.chat.completions.create = traced_create
return client

View File

@@ -0,0 +1,76 @@
"""Trace decorator and context manager for instrumenting agent functions."""
from typing import Callable, Optional, Any
from functools import wraps
def trace(name: Optional[str] = None) -> Callable[..., Any]:
"""Decorator to trace a function or method.
Args:
name: Name for the trace. If not provided, uses the function name.
Returns:
Decorated function with tracing enabled.
Example:
@trace(name="research-agent")
async def research(topic: str) -> str:
return f"Researching: {topic}"
"""
def decorator(func: Callable[..., Any]) -> Callable[..., Any]:
@wraps(func)
async def async_wrapper(*args: Any, **kwargs: Any) -> Any:
trace_name = name or func.__name__
print(f"[AgentLens] Starting trace: {trace_name}")
try:
result = await func(*args, **kwargs)
print(f"[AgentLens] Completed trace: {trace_name}")
return result
except Exception as e:
print(f"[AgentLens] Error in trace {trace_name}: {e}")
raise
@wraps(func)
def sync_wrapper(*args: Any, **kwargs: Any) -> Any:
trace_name = name or func.__name__
print(f"[AgentLens] Starting trace: {trace_name}")
try:
result = func(*args, **kwargs)
print(f"[AgentLens] Completed trace: {trace_name}")
return result
except Exception as e:
print(f"[AgentLens] Error in trace {trace_name}: {e}")
raise
if hasattr(func, "__await__"):
return async_wrapper
else:
return sync_wrapper
return decorator
class Tracer:
"""Context manager for creating traces.
Example:
with Tracer(name="custom-operation"):
# Your code here
pass
"""
def __init__(self, name: str) -> None:
self.name = name
def __enter__(self) -> "Tracer":
print(f"[AgentLens] Starting trace: {self.name}")
return self
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> bool:
if exc_type is None:
print(f"[AgentLens] Completed trace: {self.name}")
else:
print(f"[AgentLens] Error in trace {self.name}: {exc_val}")
return False

View File

@@ -0,0 +1,38 @@
"""Batch transport for sending data to AgentLens API."""
from typing import List, Dict, Any
class BatchTransport:
"""Transport layer that batches events for efficient API calls.
This class handles batching and sending of traces, decisions, and other
events to the AgentLens backend.
"""
def __init__(self, max_batch_size: int = 100, flush_interval: float = 1.0) -> None:
self.max_batch_size = max_batch_size
self.flush_interval = flush_interval
self._batch: List[Dict[str, Any]] = []
def add(self, event: Dict[str, Any]) -> None:
"""Add an event to the batch.
Args:
event: Event data to be sent.
"""
self._batch.append(event)
if len(self._batch) >= self.max_batch_size:
self.flush()
def flush(self) -> None:
"""Flush the batch by sending all pending events."""
if not self._batch:
return
print(f"[AgentLens] Flushing batch of {len(self._batch)} events")
self._batch.clear()
def shutdown(self) -> None:
"""Shutdown the transport, flushing any remaining events."""
self.flush()

View File

@@ -0,0 +1,33 @@
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.backends"
[project]
name = "agentlens"
version = "0.1.0"
description = "Agent observability that traces decisions, not just API calls"
readme = "README.md"
license = "MIT"
requires-python = ">=3.9"
authors = [{ name = "Vectry", email = "hunter@repi.fun" }]
keywords = ["ai", "agents", "observability", "tracing", "llm"]
classifiers = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Topic :: Software Development :: Libraries",
]
dependencies = [
"httpx>=0.25.0",
]
[project.optional-dependencies]
langchain = ["langchain-core>=0.1.0"]
openai = ["openai>=1.0.0"]
all = ["agentlens[langchain,openai]"]
[project.urls]
Homepage = "https://agentlens.vectry.tech"
Repository = "https://gitea.repi.fun/repi/agentlens"
Documentation = "https://agentlens.vectry.tech/docs"