feat: initial monorepo scaffold - Next.js 15 + Prisma + Python SDK stubs
- Turborepo monorepo with apps/web and packages/database, sdk-python - Next.js 15 app with professional landing page (dark theme, emerald accent) - Prisma schema: Trace, DecisionPoint, Span, Event models with full indexing - Docker Compose: web (port 4200), postgres:16, redis:7, migrate service - Python SDK package stubs: init, trace decorator, log_decision, integrations - Multi-stage Dockerfile for standalone Next.js production build
This commit is contained in:
21
packages/database/package.json
Normal file
21
packages/database/package.json
Normal file
@@ -0,0 +1,21 @@
|
||||
{
|
||||
"name": "@agentlens/database",
|
||||
"version": "0.0.1",
|
||||
"private": true,
|
||||
"main": "./dist/index.js",
|
||||
"types": "./dist/index.d.ts",
|
||||
"scripts": {
|
||||
"build": "tsc",
|
||||
"clean": "rm -rf dist",
|
||||
"db:generate": "prisma generate",
|
||||
"db:push": "prisma db push",
|
||||
"db:migrate": "prisma migrate dev"
|
||||
},
|
||||
"dependencies": {
|
||||
"@prisma/client": "^6.3.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"prisma": "^6.3.0",
|
||||
"typescript": "^5.7"
|
||||
}
|
||||
}
|
||||
154
packages/database/prisma/schema.prisma
Normal file
154
packages/database/prisma/schema.prisma
Normal file
@@ -0,0 +1,154 @@
|
||||
datasource db {
|
||||
provider = "postgresql"
|
||||
url = env("DATABASE_URL")
|
||||
}
|
||||
|
||||
generator client {
|
||||
provider = "prisma-client-js"
|
||||
}
|
||||
|
||||
model Trace {
|
||||
id String @id @default(cuid())
|
||||
sessionId String?
|
||||
name String
|
||||
status TraceStatus @default(RUNNING)
|
||||
tags String[] @default([])
|
||||
metadata Json?
|
||||
|
||||
totalCost Float?
|
||||
totalTokens Int?
|
||||
totalDuration Int?
|
||||
|
||||
startedAt DateTime @default(now())
|
||||
endedAt DateTime?
|
||||
createdAt DateTime @default(now())
|
||||
updatedAt DateTime @updatedAt
|
||||
|
||||
decisionPoints DecisionPoint[]
|
||||
spans Span[]
|
||||
events Event[]
|
||||
|
||||
@@index([sessionId])
|
||||
@@index([status])
|
||||
@@index([createdAt])
|
||||
@@index([name])
|
||||
}
|
||||
|
||||
model DecisionPoint {
|
||||
id String @id @default(cuid())
|
||||
traceId String
|
||||
trace Trace @relation(fields: [traceId], references: [id], onDelete: Cascade)
|
||||
|
||||
type DecisionType
|
||||
reasoning String?
|
||||
|
||||
chosen Json
|
||||
alternatives Json[]
|
||||
|
||||
contextSnapshot Json?
|
||||
|
||||
durationMs Int?
|
||||
costUsd Float?
|
||||
|
||||
parentSpanId String?
|
||||
span Span? @relation(fields: [parentSpanId], references: [id])
|
||||
|
||||
timestamp DateTime @default(now())
|
||||
|
||||
@@index([traceId])
|
||||
@@index([type])
|
||||
@@index([timestamp])
|
||||
}
|
||||
|
||||
model Span {
|
||||
id String @id @default(cuid())
|
||||
traceId String
|
||||
trace Trace @relation(fields: [traceId], references: [id], onDelete: Cascade)
|
||||
|
||||
parentSpanId String?
|
||||
parentSpan Span? @relation("SpanTree", fields: [parentSpanId], references: [id])
|
||||
childSpans Span[] @relation("SpanTree")
|
||||
|
||||
name String
|
||||
type SpanType
|
||||
|
||||
input Json?
|
||||
output Json?
|
||||
|
||||
tokenCount Int?
|
||||
costUsd Float?
|
||||
durationMs Int?
|
||||
|
||||
status SpanStatus @default(RUNNING)
|
||||
statusMessage String?
|
||||
|
||||
startedAt DateTime @default(now())
|
||||
endedAt DateTime?
|
||||
|
||||
metadata Json?
|
||||
|
||||
decisionPoints DecisionPoint[]
|
||||
|
||||
@@index([traceId])
|
||||
@@index([parentSpanId])
|
||||
@@index([type])
|
||||
@@index([startedAt])
|
||||
}
|
||||
|
||||
model Event {
|
||||
id String @id @default(cuid())
|
||||
traceId String
|
||||
trace Trace @relation(fields: [traceId], references: [id], onDelete: Cascade)
|
||||
|
||||
spanId String?
|
||||
type EventType
|
||||
name String
|
||||
|
||||
metadata Json?
|
||||
|
||||
timestamp DateTime @default(now())
|
||||
|
||||
@@index([traceId])
|
||||
@@index([type])
|
||||
@@index([timestamp])
|
||||
}
|
||||
|
||||
enum TraceStatus {
|
||||
RUNNING
|
||||
COMPLETED
|
||||
ERROR
|
||||
}
|
||||
|
||||
enum DecisionType {
|
||||
TOOL_SELECTION
|
||||
ROUTING
|
||||
RETRY
|
||||
ESCALATION
|
||||
MEMORY_RETRIEVAL
|
||||
PLANNING
|
||||
CUSTOM
|
||||
}
|
||||
|
||||
enum SpanType {
|
||||
LLM_CALL
|
||||
TOOL_CALL
|
||||
MEMORY_OP
|
||||
CHAIN
|
||||
AGENT
|
||||
CUSTOM
|
||||
}
|
||||
|
||||
enum SpanStatus {
|
||||
RUNNING
|
||||
COMPLETED
|
||||
ERROR
|
||||
}
|
||||
|
||||
enum EventType {
|
||||
ERROR
|
||||
RETRY
|
||||
FALLBACK
|
||||
CONTEXT_OVERFLOW
|
||||
USER_FEEDBACK
|
||||
CUSTOM
|
||||
}
|
||||
2
packages/database/src/index.ts
Normal file
2
packages/database/src/index.ts
Normal file
@@ -0,0 +1,2 @@
|
||||
export { PrismaClient } from "@prisma/client";
|
||||
export type * from "@prisma/client";
|
||||
18
packages/database/tsconfig.json
Normal file
18
packages/database/tsconfig.json
Normal file
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"target": "ES2022",
|
||||
"module": "NodeNext",
|
||||
"moduleResolution": "NodeNext",
|
||||
"lib": ["ES2022"],
|
||||
"outDir": "./dist",
|
||||
"rootDir": "./src",
|
||||
"strict": true,
|
||||
"esModuleInterop": true,
|
||||
"skipLibCheck": true,
|
||||
"declaration": true,
|
||||
"declarationMap": true,
|
||||
"sourceMap": true
|
||||
},
|
||||
"include": ["src/**/*"],
|
||||
"exclude": ["node_modules", "dist"]
|
||||
}
|
||||
38
packages/sdk-python/README.md
Normal file
38
packages/sdk-python/README.md
Normal file
@@ -0,0 +1,38 @@
|
||||
# AgentLens Python SDK
|
||||
|
||||
AgentLens provides observability for AI agents by tracing decisions, not just API calls.
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
pip install agentlens
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
```python
|
||||
from agentlens import init, trace
|
||||
|
||||
# Initialize AgentLens
|
||||
init(api_key="your-api-key", endpoint="https://agentlens.vectry.tech")
|
||||
|
||||
# Trace your agent functions
|
||||
@trace(name="research-agent")
|
||||
async def research(topic: str) -> str:
|
||||
return f"Researching: {topic}"
|
||||
```
|
||||
|
||||
## Features
|
||||
|
||||
- **Decision Tracing**: Log and visualize agent decisions with alternatives
|
||||
- **Context Awareness**: Monitor context window utilization
|
||||
- **Cost Intelligence**: Track token usage and costs per operation
|
||||
- **Integrations**: Native support for LangChain and OpenAI
|
||||
|
||||
## Documentation
|
||||
|
||||
Full documentation available at [https://agentlens.vectry.tech/docs](https://agentlens.vectry.tech/docs)
|
||||
|
||||
## License
|
||||
|
||||
MIT © 2026 Vectry
|
||||
8
packages/sdk-python/agentlens/__init__.py
Normal file
8
packages/sdk-python/agentlens/__init__.py
Normal file
@@ -0,0 +1,8 @@
|
||||
"""AgentLens - Agent observability that traces decisions, not just API calls."""
|
||||
|
||||
from agentlens.client import init, shutdown
|
||||
from agentlens.trace import trace
|
||||
from agentlens.decision import log_decision
|
||||
|
||||
__version__ = "0.1.0"
|
||||
__all__ = ["init", "shutdown", "trace", "log_decision"]
|
||||
43
packages/sdk-python/agentlens/client.py
Normal file
43
packages/sdk-python/agentlens/client.py
Normal file
@@ -0,0 +1,43 @@
|
||||
"""Client initialization and management for AgentLens."""
|
||||
|
||||
from typing import Optional
|
||||
|
||||
|
||||
_client: Optional["_Client"] = None
|
||||
|
||||
|
||||
class _Client:
|
||||
"""Internal client class for managing AgentLens connection."""
|
||||
|
||||
def __init__(self, api_key: str, endpoint: str) -> None:
|
||||
self.api_key = api_key
|
||||
self.endpoint = endpoint
|
||||
self.is_shutdown = False
|
||||
|
||||
def shutdown(self) -> None:
|
||||
"""Shutdown the client."""
|
||||
self.is_shutdown = True
|
||||
|
||||
|
||||
def init(api_key: str, endpoint: str = "https://agentlens.vectry.tech") -> None:
|
||||
"""Initialize the AgentLens client.
|
||||
|
||||
Args:
|
||||
api_key: Your AgentLens API key.
|
||||
endpoint: The AgentLens API endpoint (default: https://agentlens.vectry.tech).
|
||||
"""
|
||||
global _client
|
||||
_client = _Client(api_key=api_key, endpoint=endpoint)
|
||||
|
||||
|
||||
def shutdown() -> None:
|
||||
"""Shutdown the AgentLens client."""
|
||||
global _client
|
||||
if _client:
|
||||
_client.shutdown()
|
||||
_client = None
|
||||
|
||||
|
||||
def get_client() -> Optional[_Client]:
|
||||
"""Get the current client instance."""
|
||||
return _client
|
||||
32
packages/sdk-python/agentlens/decision.py
Normal file
32
packages/sdk-python/agentlens/decision.py
Normal file
@@ -0,0 +1,32 @@
|
||||
"""Decision logging for tracking agent decision points."""
|
||||
|
||||
from typing import Any, Dict, List
|
||||
|
||||
|
||||
def log_decision(
|
||||
type: str,
|
||||
chosen: Any,
|
||||
alternatives: List[Any],
|
||||
reasoning: Optional[str] = None,
|
||||
) -> None:
|
||||
"""Log a decision point in the agent's reasoning.
|
||||
|
||||
Args:
|
||||
type: Type of decision (e.g., "tool_selection", "routing", "retry").
|
||||
chosen: The option that was selected.
|
||||
alternatives: List of alternatives that were considered.
|
||||
reasoning: Optional explanation for the decision.
|
||||
|
||||
Example:
|
||||
log_decision(
|
||||
type="tool_selection",
|
||||
chosen="search",
|
||||
alternatives=["search", "calculate", "browse"],
|
||||
reasoning="Search is most appropriate for finding information"
|
||||
)
|
||||
"""
|
||||
print(f"[AgentLens] Decision logged: {type}")
|
||||
print(f"[AgentLens] Chosen: {chosen}")
|
||||
print(f"[AgentLens] Alternatives: {alternatives}")
|
||||
if reasoning:
|
||||
print(f"[AgentLens] Reasoning: {reasoning}")
|
||||
1
packages/sdk-python/agentlens/integrations/__init__.py
Normal file
1
packages/sdk-python/agentlens/integrations/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Integration packages for AgentLens."""
|
||||
55
packages/sdk-python/agentlens/integrations/langchain.py
Normal file
55
packages/sdk-python/agentlens/integrations/langchain.py
Normal file
@@ -0,0 +1,55 @@
|
||||
"""LangChain integration for AgentLens."""
|
||||
|
||||
from typing import Any, Dict, Optional, Sequence
|
||||
from langchain_core.callbacks import BaseCallbackHandler
|
||||
from langchain_core.outputs import LLMResult
|
||||
from langchain_core.messages import BaseMessage
|
||||
|
||||
|
||||
class AgentLensCallbackHandler(BaseCallbackHandler):
|
||||
"""Callback handler for LangChain integration with AgentLens.
|
||||
|
||||
This handler captures LLM calls, tool calls, and agent actions
|
||||
to provide observability for LangChain-based agents.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.trace_id: Optional[str] = None
|
||||
|
||||
def on_llm_start(
|
||||
self,
|
||||
serialized: Dict[str, Any],
|
||||
prompts: list[str],
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Called when an LLM starts processing."""
|
||||
print(f"[AgentLens] LLM started: {serialized.get('name', 'unknown')}")
|
||||
|
||||
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
|
||||
"""Called when an LLM finishes processing."""
|
||||
print(f"[AgentLens] LLM completed")
|
||||
|
||||
def on_llm_error(self, error: Exception, **kwargs: Any) -> None:
|
||||
"""Called when an LLM encounters an error."""
|
||||
print(f"[AgentLens] LLM error: {error}")
|
||||
|
||||
def on_tool_start(
|
||||
self,
|
||||
serialized: Dict[str, Any],
|
||||
input_str: str,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Called when a tool starts executing."""
|
||||
print(f"[AgentLens] Tool started: {serialized.get('name', 'unknown')}")
|
||||
|
||||
def on_tool_end(self, output: str, **kwargs: Any) -> None:
|
||||
"""Called when a tool finishes executing."""
|
||||
print(f"[AgentLens] Tool completed")
|
||||
|
||||
def on_tool_error(self, error: Exception, **kwargs: Any) -> None:
|
||||
"""Called when a tool encounters an error."""
|
||||
print(f"[AgentLens] Tool error: {error}")
|
||||
|
||||
def on_agent_action(self, action: Any, **kwargs: Any) -> None:
|
||||
"""Called when an agent performs an action."""
|
||||
print(f"[AgentLens] Agent action: {action.tool}")
|
||||
39
packages/sdk-python/agentlens/integrations/openai.py
Normal file
39
packages/sdk-python/agentlens/integrations/openai.py
Normal file
@@ -0,0 +1,39 @@
|
||||
"""OpenAI integration for AgentLens."""
|
||||
|
||||
from typing import Any, Optional
|
||||
from functools import wraps
|
||||
|
||||
|
||||
def wrap_openai(client: Any) -> Any:
|
||||
"""Wrap an OpenAI client to add AgentLens tracing.
|
||||
|
||||
Args:
|
||||
client: The OpenAI client to wrap.
|
||||
|
||||
Returns:
|
||||
Wrapped OpenAI client with AgentLens tracing enabled.
|
||||
|
||||
Example:
|
||||
import openai
|
||||
from agentlens.integrations.openai import wrap_openai
|
||||
|
||||
client = openai.OpenAI(api_key="sk-...")
|
||||
traced_client = wrap_openai(client)
|
||||
|
||||
response = traced_client.chat.completions.create(...)
|
||||
"""
|
||||
original_create = client.chat.completions.create
|
||||
|
||||
@wraps(original_create)
|
||||
def traced_create(*args: Any, **kwargs: Any) -> Any:
|
||||
print("[AgentLens] OpenAI chat completion started")
|
||||
try:
|
||||
response = original_create(*args, **kwargs)
|
||||
print("[AgentLens] OpenAI chat completion completed")
|
||||
return response
|
||||
except Exception as e:
|
||||
print(f"[AgentLens] OpenAI error: {e}")
|
||||
raise
|
||||
|
||||
client.chat.completions.create = traced_create
|
||||
return client
|
||||
76
packages/sdk-python/agentlens/trace.py
Normal file
76
packages/sdk-python/agentlens/trace.py
Normal file
@@ -0,0 +1,76 @@
|
||||
"""Trace decorator and context manager for instrumenting agent functions."""
|
||||
|
||||
from typing import Callable, Optional, Any
|
||||
from functools import wraps
|
||||
|
||||
|
||||
def trace(name: Optional[str] = None) -> Callable[..., Any]:
|
||||
"""Decorator to trace a function or method.
|
||||
|
||||
Args:
|
||||
name: Name for the trace. If not provided, uses the function name.
|
||||
|
||||
Returns:
|
||||
Decorated function with tracing enabled.
|
||||
|
||||
Example:
|
||||
@trace(name="research-agent")
|
||||
async def research(topic: str) -> str:
|
||||
return f"Researching: {topic}"
|
||||
"""
|
||||
|
||||
def decorator(func: Callable[..., Any]) -> Callable[..., Any]:
|
||||
@wraps(func)
|
||||
async def async_wrapper(*args: Any, **kwargs: Any) -> Any:
|
||||
trace_name = name or func.__name__
|
||||
print(f"[AgentLens] Starting trace: {trace_name}")
|
||||
try:
|
||||
result = await func(*args, **kwargs)
|
||||
print(f"[AgentLens] Completed trace: {trace_name}")
|
||||
return result
|
||||
except Exception as e:
|
||||
print(f"[AgentLens] Error in trace {trace_name}: {e}")
|
||||
raise
|
||||
|
||||
@wraps(func)
|
||||
def sync_wrapper(*args: Any, **kwargs: Any) -> Any:
|
||||
trace_name = name or func.__name__
|
||||
print(f"[AgentLens] Starting trace: {trace_name}")
|
||||
try:
|
||||
result = func(*args, **kwargs)
|
||||
print(f"[AgentLens] Completed trace: {trace_name}")
|
||||
return result
|
||||
except Exception as e:
|
||||
print(f"[AgentLens] Error in trace {trace_name}: {e}")
|
||||
raise
|
||||
|
||||
if hasattr(func, "__await__"):
|
||||
return async_wrapper
|
||||
else:
|
||||
return sync_wrapper
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
class Tracer:
|
||||
"""Context manager for creating traces.
|
||||
|
||||
Example:
|
||||
with Tracer(name="custom-operation"):
|
||||
# Your code here
|
||||
pass
|
||||
"""
|
||||
|
||||
def __init__(self, name: str) -> None:
|
||||
self.name = name
|
||||
|
||||
def __enter__(self) -> "Tracer":
|
||||
print(f"[AgentLens] Starting trace: {self.name}")
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> bool:
|
||||
if exc_type is None:
|
||||
print(f"[AgentLens] Completed trace: {self.name}")
|
||||
else:
|
||||
print(f"[AgentLens] Error in trace {self.name}: {exc_val}")
|
||||
return False
|
||||
38
packages/sdk-python/agentlens/transport.py
Normal file
38
packages/sdk-python/agentlens/transport.py
Normal file
@@ -0,0 +1,38 @@
|
||||
"""Batch transport for sending data to AgentLens API."""
|
||||
|
||||
from typing import List, Dict, Any
|
||||
|
||||
|
||||
class BatchTransport:
|
||||
"""Transport layer that batches events for efficient API calls.
|
||||
|
||||
This class handles batching and sending of traces, decisions, and other
|
||||
events to the AgentLens backend.
|
||||
"""
|
||||
|
||||
def __init__(self, max_batch_size: int = 100, flush_interval: float = 1.0) -> None:
|
||||
self.max_batch_size = max_batch_size
|
||||
self.flush_interval = flush_interval
|
||||
self._batch: List[Dict[str, Any]] = []
|
||||
|
||||
def add(self, event: Dict[str, Any]) -> None:
|
||||
"""Add an event to the batch.
|
||||
|
||||
Args:
|
||||
event: Event data to be sent.
|
||||
"""
|
||||
self._batch.append(event)
|
||||
if len(self._batch) >= self.max_batch_size:
|
||||
self.flush()
|
||||
|
||||
def flush(self) -> None:
|
||||
"""Flush the batch by sending all pending events."""
|
||||
if not self._batch:
|
||||
return
|
||||
|
||||
print(f"[AgentLens] Flushing batch of {len(self._batch)} events")
|
||||
self._batch.clear()
|
||||
|
||||
def shutdown(self) -> None:
|
||||
"""Shutdown the transport, flushing any remaining events."""
|
||||
self.flush()
|
||||
33
packages/sdk-python/pyproject.toml
Normal file
33
packages/sdk-python/pyproject.toml
Normal file
@@ -0,0 +1,33 @@
|
||||
[build-system]
|
||||
requires = ["hatchling"]
|
||||
build-backend = "hatchling.backends"
|
||||
|
||||
[project]
|
||||
name = "agentlens"
|
||||
version = "0.1.0"
|
||||
description = "Agent observability that traces decisions, not just API calls"
|
||||
readme = "README.md"
|
||||
license = "MIT"
|
||||
requires-python = ">=3.9"
|
||||
authors = [{ name = "Vectry", email = "hunter@repi.fun" }]
|
||||
keywords = ["ai", "agents", "observability", "tracing", "llm"]
|
||||
classifiers = [
|
||||
"Development Status :: 3 - Alpha",
|
||||
"Intended Audience :: Developers",
|
||||
"License :: OSI Approved :: MIT License",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Topic :: Software Development :: Libraries",
|
||||
]
|
||||
dependencies = [
|
||||
"httpx>=0.25.0",
|
||||
]
|
||||
|
||||
[project.optional-dependencies]
|
||||
langchain = ["langchain-core>=0.1.0"]
|
||||
openai = ["openai>=1.0.0"]
|
||||
all = ["agentlens[langchain,openai]"]
|
||||
|
||||
[project.urls]
|
||||
Homepage = "https://agentlens.vectry.tech"
|
||||
Repository = "https://gitea.repi.fun/repi/agentlens"
|
||||
Documentation = "https://agentlens.vectry.tech/docs"
|
||||
Reference in New Issue
Block a user