deepagents-implementation by existential-birds/beagle
npx skills add https://github.com/existential-birds/beagle --skill deepagents-implementationDeep Agents 提供了一个基于 LangGraph 构建的、开箱即用的智能体框架:
create_deep_agent : 用于创建已配置智能体的工厂函数task 工具进行隔离的任务执行返回的智能体是一个已编译的 LangGraph StateGraph,兼容流式处理、检查点和 LangGraph Studio。
# 核心
from deepagents import create_deep_agent
# 子智能体
from deepagents import CompiledSubAgent
# 后端
from deepagents.backends import (
StateBackend, # 临时性(默认)
FilesystemBackend, # 真实磁盘
StoreBackend, # 跨线程持久化
CompositeBackend, # 将路径路由到后端
)
# LangGraph(用于检查点、存储、流式处理)
from langgraph.checkpoint.memory import InMemorySaver
from langgraph.checkpoint.postgres import PostgresSaver
from langgraph.store.memory import InMemoryStore
# LangChain(用于自定义模型、工具)
from langchain.chat_models import init_chat_model
from langchain_core.tools import tool
广告位招租
在这里展示您的产品或服务
触达数万 AI 开发者,精准高效
from deepagents import create_deep_agent
# 默认使用 Claude Sonnet 4
agent = create_deep_agent()
result = agent.invoke({"messages": [{"role": "user", "content": "Hello!"}]})
from langchain_core.tools import tool
from deepagents import create_deep_agent
@tool
def web_search(query: str) -> str:
"""搜索网络以获取信息。"""
return tavily_client.search(query)
agent = create_deep_agent(
tools=[web_search],
system_prompt="你是一个研究助手。搜索网络来回答问题。",
)
result = agent.invoke({"messages": [{"role": "user", "content": "什么是 LangGraph?"}]})
from langchain.chat_models import init_chat_model
from deepagents import create_deep_agent
# OpenAI
model = init_chat_model("openai:gpt-4o")
# 或使用自定义设置的 Anthropic
from langchain_anthropic import ChatAnthropic
model = ChatAnthropic(model_name="claude-sonnet-4-5-20250929", max_tokens=8192)
agent = create_deep_agent(model=model)
from langgraph.checkpoint.memory import InMemorySaver
from deepagents import create_deep_agent
agent = create_deep_agent(checkpointer=InMemorySaver())
# 使用检查点时必须提供 thread_id
config = {"configurable": {"thread_id": "user-123"}}
result = agent.invoke({"messages": [...]}, config)
# 恢复对话
result = agent.invoke({"messages": [{"role": "user", "content": "后续跟进"}]}, config)
该智能体支持所有 LangGraph 流模式。
for chunk in agent.stream(
{"messages": [{"role": "user", "content": "写一份报告"}]},
stream_mode="updates"
):
print(chunk) # {"node_name": {"key": "value"}}
for chunk in agent.stream(
{"messages": [{"role": "user", "content": "解释量子计算"}]},
stream_mode="messages"
):
# 实时词元流
print(chunk.content, end="", flush=True)
async for chunk in agent.astream(
{"messages": [...]},
stream_mode="updates"
):
print(chunk)
for mode, chunk in agent.stream(
{"messages": [...]},
stream_mode=["updates", "messages"]
):
if mode == "messages":
print("词元:", chunk.content)
else:
print("更新:", chunk)
文件存储在智能体状态中,仅在会话线程内持久化。
# 隐式 - 这是默认设置
agent = create_deep_agent()
# 显式
from deepagents.backends import StateBackend
agent = create_deep_agent(backend=lambda rt: StateBackend(rt))
在磁盘上读写实际文件。启用用于 shell 命令的 execute 工具。
from deepagents.backends import FilesystemBackend
agent = create_deep_agent(
backend=FilesystemBackend(root_dir="/path/to/project"),
)
使用 LangGraph Store 实现跨对话的持久化。
from langgraph.store.memory import InMemoryStore
from deepagents.backends import StoreBackend
store = InMemoryStore()
agent = create_deep_agent(
backend=lambda rt: StoreBackend(rt),
store=store, # StoreBackend 必需
)
将不同路径路由到不同的后端。
from langgraph.store.memory import InMemoryStore
from deepagents.backends import CompositeBackend, StateBackend, StoreBackend
store = InMemoryStore()
agent = create_deep_agent(
backend=CompositeBackend(
default=StateBackend(), # /workspace/* → 临时性
routes={
"/memories/": StoreBackend(store=store), # 持久化
"/preferences/": StoreBackend(store=store), # 持久化
},
),
store=store,
)
# /memories/ 下的文件在所有对话中持久化
# /workspace/ 下的文件在每个线程中是临时性的
默认情况下,一个具有所有主智能体工具的 general-purpose 子智能体可用。
agent = create_deep_agent(tools=[web_search])
# 现在智能体可以通过 `task` 工具进行委派:
# task(subagent_type="general-purpose", prompt="深入研究主题 X")
from deepagents import create_deep_agent
research_agent = {
"name": "researcher",
"description": "使用网络搜索对复杂主题进行深入研究",
"system_prompt": """你是一位专家研究员。
彻底搜索、交叉引用来源并综合发现。""",
"tools": [web_search, document_reader],
}
code_agent = {
"name": "coder",
"description": "编写、审查和调试代码",
"system_prompt": "你是一位专家程序员。编写干净、经过测试的代码。",
"tools": [code_executor, linter],
"model": "openai:gpt-4o", # 可选:每个子智能体使用不同的模型
}
agent = create_deep_agent(
subagents=[research_agent, code_agent],
system_prompt="将研究任务委派给研究员,将编码任务委派给程序员。",
)
将现有的 LangGraph 图用作子智能体。
from deepagents import CompiledSubAgent, create_deep_agent
from langgraph.prebuilt import create_react_agent
# 现有图
custom_graph = create_react_agent(
model="anthropic:claude-sonnet-4-5-20250929",
tools=[specialized_tool],
prompt="自定义工作流指令",
)
agent = create_deep_agent(
subagents=[CompiledSubAgent(
name="custom-workflow",
description="运行我的专业分析工作流",
runnable=custom_graph,
)]
)
from langchain.agents.middleware import AgentMiddleware
class LoggingMiddleware(AgentMiddleware):
def transform_response(self, response):
print(f"子智能体响应:{response}")
return response
agent_spec = {
"name": "logged-agent",
"description": "带有额外日志记录的智能体",
"system_prompt": "你很有帮助。",
"tools": [],
"middleware": [LoggingMiddleware()], # 在默认中间件之后添加
}
在执行特定工具前暂停,等待人工批准。
from deepagents import create_deep_agent
agent = create_deep_agent(
tools=[send_email, delete_file, web_search],
interrupt_on={
"send_email": True, # 简单中断
"delete_file": True, # 删除前需要批准
# web_search 未列出 - 无需批准即可运行
},
checkpointer=checkpointer, # 中断必需
)
agent = create_deep_agent(
tools=[send_email],
interrupt_on={
"send_email": {
"allowed_decisions": ["approve", "edit", "reject"]
},
},
checkpointer=checkpointer,
)
# 调用 - 将在 send_email 处暂停
config = {"configurable": {"thread_id": "user-123"}}
result = agent.invoke({"messages": [...]}, config)
# 检查状态
state = agent.get_state(config)
if state.next: # 有待处理的中断
# 批准后恢复
from langgraph.types import Command
agent.invoke(Command(resume={"approved": True}), config)
# 或编辑后恢复
agent.invoke(Command(resume={"edited_args": {"to": "new@email.com"}}), config)
# 或拒绝
agent.invoke(Command(resume={"rejected": True}), config)
# 中断也适用于子智能体
agent = create_deep_agent(
subagents=[research_agent],
interrupt_on={
"web_search": True, # 即使子智能体调用它也会中断
},
checkpointer=checkpointer,
)
from langchain.agents.middleware.types import (
AgentMiddleware,
ModelRequest,
ModelResponse,
)
from langchain_core.tools import tool
class MyMiddleware(AgentMiddleware):
# 要注入的工具
tools = []
# 要注入的系统提示内容
system_prompt = ""
def transform_request(self, request: ModelRequest) -> ModelRequest:
"""在发送到模型之前修改请求。"""
return request
def transform_response(self, response: ModelResponse) -> ModelResponse:
"""从模型接收后修改响应。"""
return response
from langchain_core.tools import tool
@tool
def get_current_time() -> str:
"""获取当前时间。"""
from datetime import datetime
return datetime.now().isoformat()
class TimeMiddleware(AgentMiddleware):
tools = [get_current_time]
system_prompt = "你可以访问 get_current_time 来处理时间敏感的任务。"
agent = create_deep_agent(middleware=[TimeMiddleware()])
class UserContextMiddleware(AgentMiddleware):
def __init__(self, user_preferences: dict):
self.user_preferences = user_preferences
@property
def system_prompt(self):
return f"用户偏好:{self.user_preferences}"
agent = create_deep_agent(
middleware=[UserContextMiddleware({"theme": "dark", "language": "en"})]
)
import logging
class LoggingMiddleware(AgentMiddleware):
def transform_response(self, response: ModelResponse) -> ModelResponse:
logging.info(f"智能体响应:{response.messages[-1].content[:100]}...")
return response
agent = create_deep_agent(middleware=[LoggingMiddleware()])
连接 MCP(模型上下文协议)服务器以提供额外的工具。
from langchain_mcp_adapters.client import MultiServerMCPClient
from deepagents import create_deep_agent
async def main():
mcp_client = MultiServerMCPClient({
"filesystem": {
"command": "npx",
"args": ["-y", "@modelcontextprotocol/server-filesystem", "/path"],
},
"github": {
"command": "npx",
"args": ["-y", "@modelcontextprotocol/server-github"],
"env": {"GITHUB_TOKEN": os.environ["GITHUB_TOKEN"]},
},
})
mcp_tools = await mcp_client.get_tools()
agent = create_deep_agent(tools=mcp_tools)
async for chunk in agent.astream(
{"messages": [{"role": "user", "content": "列出我的仓库"}]}
):
print(chunk)
有关详细的参考文档,请参阅:
每周安装量
87
代码仓库
GitHub 星标数
42
首次出现
2026年1月20日
安全审计
安装于
opencode72
gemini-cli72
codex70
claude-code64
cursor61
github-copilot59
Deep Agents provides a batteries-included agent harness built on LangGraph:
create_deep_agent : Factory function that creates a configured agenttask toolThe agent returned is a compiled LangGraph StateGraph, compatible with streaming, checkpointing, and LangGraph Studio.
# Core
from deepagents import create_deep_agent
# Subagents
from deepagents import CompiledSubAgent
# Backends
from deepagents.backends import (
StateBackend, # Ephemeral (default)
FilesystemBackend, # Real disk
StoreBackend, # Persistent cross-thread
CompositeBackend, # Route paths to backends
)
# LangGraph (for checkpointing, store, streaming)
from langgraph.checkpoint.memory import InMemorySaver
from langgraph.checkpoint.postgres import PostgresSaver
from langgraph.store.memory import InMemoryStore
# LangChain (for custom models, tools)
from langchain.chat_models import init_chat_model
from langchain_core.tools import tool
from deepagents import create_deep_agent
# Uses Claude Sonnet 4 by default
agent = create_deep_agent()
result = agent.invoke({"messages": [{"role": "user", "content": "Hello!"}]})
from langchain_core.tools import tool
from deepagents import create_deep_agent
@tool
def web_search(query: str) -> str:
"""Search the web for information."""
return tavily_client.search(query)
agent = create_deep_agent(
tools=[web_search],
system_prompt="You are a research assistant. Search the web to answer questions.",
)
result = agent.invoke({"messages": [{"role": "user", "content": "What is LangGraph?"}]})
from langchain.chat_models import init_chat_model
from deepagents import create_deep_agent
# OpenAI
model = init_chat_model("openai:gpt-4o")
# Or Anthropic with custom settings
from langchain_anthropic import ChatAnthropic
model = ChatAnthropic(model_name="claude-sonnet-4-5-20250929", max_tokens=8192)
agent = create_deep_agent(model=model)
from langgraph.checkpoint.memory import InMemorySaver
from deepagents import create_deep_agent
agent = create_deep_agent(checkpointer=InMemorySaver())
# Must provide thread_id with checkpointer
config = {"configurable": {"thread_id": "user-123"}}
result = agent.invoke({"messages": [...]}, config)
# Resume conversation
result = agent.invoke({"messages": [{"role": "user", "content": "Follow up"}]}, config)
The agent supports all LangGraph stream modes.
for chunk in agent.stream(
{"messages": [{"role": "user", "content": "Write a report"}]},
stream_mode="updates"
):
print(chunk) # {"node_name": {"key": "value"}}
for chunk in agent.stream(
{"messages": [{"role": "user", "content": "Explain quantum computing"}]},
stream_mode="messages"
):
# Real-time token streaming
print(chunk.content, end="", flush=True)
async for chunk in agent.astream(
{"messages": [...]},
stream_mode="updates"
):
print(chunk)
for mode, chunk in agent.stream(
{"messages": [...]},
stream_mode=["updates", "messages"]
):
if mode == "messages":
print("Token:", chunk.content)
else:
print("Update:", chunk)
Files stored in agent state, persist within thread only.
# Implicit - this is the default
agent = create_deep_agent()
# Explicit
from deepagents.backends import StateBackend
agent = create_deep_agent(backend=lambda rt: StateBackend(rt))
Read/write actual files on disk. Enables execute tool for shell commands.
from deepagents.backends import FilesystemBackend
agent = create_deep_agent(
backend=FilesystemBackend(root_dir="/path/to/project"),
)
Uses LangGraph Store for persistence across conversations.
from langgraph.store.memory import InMemoryStore
from deepagents.backends import StoreBackend
store = InMemoryStore()
agent = create_deep_agent(
backend=lambda rt: StoreBackend(rt),
store=store, # Required for StoreBackend
)
Route different paths to different backends.
from langgraph.store.memory import InMemoryStore
from deepagents.backends import CompositeBackend, StateBackend, StoreBackend
store = InMemoryStore()
agent = create_deep_agent(
backend=CompositeBackend(
default=StateBackend(), # /workspace/* → ephemeral
routes={
"/memories/": StoreBackend(store=store), # persistent
"/preferences/": StoreBackend(store=store), # persistent
},
),
store=store,
)
# Files under /memories/ persist across all conversations
# Files under /workspace/ are ephemeral per-thread
By default, a general-purpose subagent is available with all main agent tools.
agent = create_deep_agent(tools=[web_search])
# The agent can now delegate via the `task` tool:
# task(subagent_type="general-purpose", prompt="Research topic X in depth")
from deepagents import create_deep_agent
research_agent = {
"name": "researcher",
"description": "Conducts deep research on complex topics with web search",
"system_prompt": """You are an expert researcher.
Search thoroughly, cross-reference sources, and synthesize findings.""",
"tools": [web_search, document_reader],
}
code_agent = {
"name": "coder",
"description": "Writes, reviews, and debugs code",
"system_prompt": "You are an expert programmer. Write clean, tested code.",
"tools": [code_executor, linter],
"model": "openai:gpt-4o", # Optional: different model per subagent
}
agent = create_deep_agent(
subagents=[research_agent, code_agent],
system_prompt="Delegate research to the researcher and coding to the coder.",
)
Use existing LangGraph graphs as subagents.
from deepagents import CompiledSubAgent, create_deep_agent
from langgraph.prebuilt import create_react_agent
# Existing graph
custom_graph = create_react_agent(
model="anthropic:claude-sonnet-4-5-20250929",
tools=[specialized_tool],
prompt="Custom workflow instructions",
)
agent = create_deep_agent(
subagents=[CompiledSubAgent(
name="custom-workflow",
description="Runs my specialized analysis workflow",
runnable=custom_graph,
)]
)
from langchain.agents.middleware import AgentMiddleware
class LoggingMiddleware(AgentMiddleware):
def transform_response(self, response):
print(f"Subagent response: {response}")
return response
agent_spec = {
"name": "logged-agent",
"description": "Agent with extra logging",
"system_prompt": "You are helpful.",
"tools": [],
"middleware": [LoggingMiddleware()], # Added after default middleware
}
Pause execution before specific tools for human approval.
from deepagents import create_deep_agent
agent = create_deep_agent(
tools=[send_email, delete_file, web_search],
interrupt_on={
"send_email": True, # Simple interrupt
"delete_file": True, # Require approval before delete
# web_search not listed - runs without approval
},
checkpointer=checkpointer, # Required for interrupts
)
agent = create_deep_agent(
tools=[send_email],
interrupt_on={
"send_email": {
"allowed_decisions": ["approve", "edit", "reject"]
},
},
checkpointer=checkpointer,
)
# Invoke - will pause at send_email
config = {"configurable": {"thread_id": "user-123"}}
result = agent.invoke({"messages": [...]}, config)
# Check state
state = agent.get_state(config)
if state.next: # Has pending interrupt
# Resume with approval
from langgraph.types import Command
agent.invoke(Command(resume={"approved": True}), config)
# Or resume with edit
agent.invoke(Command(resume={"edited_args": {"to": "new@email.com"}}), config)
# Or reject
agent.invoke(Command(resume={"rejected": True}), config)
# Interrupts apply to subagents too
agent = create_deep_agent(
subagents=[research_agent],
interrupt_on={
"web_search": True, # Interrupt even when subagent calls it
},
checkpointer=checkpointer,
)
from langchain.agents.middleware.types import (
AgentMiddleware,
ModelRequest,
ModelResponse,
)
from langchain_core.tools import tool
class MyMiddleware(AgentMiddleware):
# Tools to inject
tools = []
# System prompt content to inject
system_prompt = ""
def transform_request(self, request: ModelRequest) -> ModelRequest:
"""Modify request before sending to model."""
return request
def transform_response(self, response: ModelResponse) -> ModelResponse:
"""Modify response after receiving from model."""
return response
from langchain_core.tools import tool
@tool
def get_current_time() -> str:
"""Get the current time."""
from datetime import datetime
return datetime.now().isoformat()
class TimeMiddleware(AgentMiddleware):
tools = [get_current_time]
system_prompt = "You have access to get_current_time for time-sensitive tasks."
agent = create_deep_agent(middleware=[TimeMiddleware()])
class UserContextMiddleware(AgentMiddleware):
def __init__(self, user_preferences: dict):
self.user_preferences = user_preferences
@property
def system_prompt(self):
return f"User preferences: {self.user_preferences}"
agent = create_deep_agent(
middleware=[UserContextMiddleware({"theme": "dark", "language": "en"})]
)
import logging
class LoggingMiddleware(AgentMiddleware):
def transform_response(self, response: ModelResponse) -> ModelResponse:
logging.info(f"Agent response: {response.messages[-1].content[:100]}...")
return response
agent = create_deep_agent(middleware=[LoggingMiddleware()])
Connect MCP (Model Context Protocol) servers to provide additional tools.
from langchain_mcp_adapters.client import MultiServerMCPClient
from deepagents import create_deep_agent
async def main():
mcp_client = MultiServerMCPClient({
"filesystem": {
"command": "npx",
"args": ["-y", "@modelcontextprotocol/server-filesystem", "/path"],
},
"github": {
"command": "npx",
"args": ["-y", "@modelcontextprotocol/server-github"],
"env": {"GITHUB_TOKEN": os.environ["GITHUB_TOKEN"]},
},
})
mcp_tools = await mcp_client.get_tools()
agent = create_deep_agent(tools=mcp_tools)
async for chunk in agent.astream(
{"messages": [{"role": "user", "content": "List my repos"}]}
):
print(chunk)
For detailed reference documentation, see:
Weekly Installs
87
Repository
GitHub Stars
42
First Seen
Jan 20, 2026
Security Audits
Gen Agent Trust HubFailSocketWarnSnykWarn
Installed on
opencode72
gemini-cli72
codex70
claude-code64
cursor61
github-copilot59
AI Elements:基于shadcn/ui的AI原生应用组件库,快速构建对话界面
67,500 周安装