langgraph-code-review by existential-birds/beagle
npx skills add https://github.com/existential-birds/beagle --skill langgraph-code-review审查 LangGraph 代码时,请检查以下几类问题。
# 错误 - 直接修改状态
def my_node(state: State) -> None:
state["messages"].append(new_message) # 直接修改!
# 正确 - 返回部分更新
def my_node(state: State) -> dict:
return {"messages": [new_message]} # 让 reducer 处理
# 错误 - 没有 reducer,每个节点都会覆盖
class State(TypedDict):
messages: list # 将被覆盖,而非追加!
# 正确 - 使用 reducer 追加
class State(TypedDict):
messages: Annotated[list, operator.add]
# 或者对于聊天使用 add_messages:
messages: Annotated[list, add_messages]
# 错误 - 返回无效的节点名称
def router(state) -> str:
return "nonexistent_node" # 运行时错误!
# 正确 - 使用 Literal 类型提示以确保安全
def router(state) -> Literal["agent", "tools", "__end__"]:
if condition:
return "agent"
return END # 使用常量,而非字符串
广告位招租
在这里展示您的产品或服务
触达数万 AI 开发者,精准高效
# 错误 - 使用中断但没有检查点管理器
def my_node(state):
answer = interrupt("question") # 将会失败!
return {"answer": answer}
graph = builder.compile() # 没有检查点管理器!
# 正确 - 使用中断时需要检查点管理器
graph = builder.compile(checkpointer=InMemorySaver())
# 错误 - 没有 thread_id
graph.invoke({"messages": [...]}) # 使用检查点管理器时会出错!
# 正确 - 始终提供 thread_id
config = {"configurable": {"thread_id": "user-123"}}
graph.invoke({"messages": [...]}, config)
# 错误 - add_messages 期望类消息对象
class State(TypedDict):
messages: Annotated[list, add_messages]
def node(state):
return {"messages": ["plain string"]} # 可能失败!
# 正确 - 使用正确的消息类型或元组
def node(state):
return {"messages": [("assistant", "response")]}
# 或者:[AIMessage(content="response")]
# 错误 - 返回整个状态(可能重置其他字段)
def my_node(state: State) -> State:
return {
"counter": state["counter"] + 1,
"messages": state["messages"], # 不必要!
"other": state["other"] # 不必要!
}
# 正确 - 只返回更改的字段
def my_node(state: State) -> dict:
return {"counter": state["counter"] + 1}
# 错误 - 没有 reducer 的 Pydantic 模型会失去追加行为
class State(BaseModel):
messages: list # 没有 reducer!
# 正确 - 即使使用 Pydantic 也要使用 Annotated
class State(BaseModel):
messages: Annotated[list, add_messages]
# 错误 - 没有从 START 出发的边
builder.add_node("process", process_fn)
builder.add_edge("process", END)
graph = builder.compile() # 错误:没有入口点!
# 正确 - 连接 START
builder.add_edge(START, "process")
# 错误 - 孤立节点
builder.add_node("main", main_fn)
builder.add_node("orphan", orphan_fn) # 永远无法到达!
builder.add_edge(START, "main")
builder.add_edge("main", END)
# 使用可视化检查
print(graph.get_graph().draw_mermaid())
# 错误 - 条件分支中缺少路径
def router(state) -> Literal["a", "b", "c"]:
...
builder.add_conditional_edges("node", router, {"a": "a", "b": "b"})
# 缺少 "c" 路径!
# 正确 - 包含所有可能的返回值
builder.add_conditional_edges("node", router, {"a": "a", "b": "b", "c": "c"})
# 或者省略 path_map,直接使用返回值作为节点名称
# 错误 - Command 返回未声明目标(破坏可视化)
def dynamic(state) -> Command[Literal["next", "__end__"]]:
return Command(goto="next")
builder.add_node("dynamic", dynamic) # 图可视化不会显示边
# 正确 - 声明目标
builder.add_node("dynamic", dynamic, destinations=["next", END])
# 错误 - 使用同步 invoke 调用异步节点
async def my_node(state):
result = await async_operation()
return {"result": result}
graph.invoke(input) # 可能无法正确等待!
# 正确 - 对于异步图使用 ainvoke
await graph.ainvoke(input)
# 或者同时提供同步和异步版本
# 错误 - 在异步节点中使用阻塞调用
async def my_node(state):
result = requests.get(url) # 阻塞事件循环!
return {"result": result}
# 正确 - 使用异步 HTTP 客户端
async def my_node(state):
async with httpx.AsyncClient() as client:
result = await client.get(url)
return {"result": result}
# 错误 - AI 消息包含 tool_calls 但没有工具执行
messages = [
HumanMessage(content="search for X"),
AIMessage(content="", tool_calls=[{"id": "1", "name": "search", ...}])
# 缺少 ToolMessage!下一次 LLM 调用将失败
]
# 正确 - 始终将 tool_calls 与 ToolMessage 配对
messages = [
HumanMessage(content="search for X"),
AIMessage(content="", tool_calls=[{"id": "1", "name": "search", ...}]),
ToolMessage(content="results", tool_call_id="1")
]
# 错误 - 模型可能并行调用多个工具,包括中断工具
model = ChatOpenAI().bind_tools([interrupt_tool, other_tool])
# 如果两者被并行调用,中断行为是未定义的
# 正确 - 在中断前禁用并行工具调用
model = ChatOpenAI().bind_tools(
[interrupt_tool, other_tool],
parallel_tool_calls=False
)
# 错误 - 内存检查点管理器在重启时会丢失状态
graph = builder.compile(checkpointer=InMemorySaver()) # 仅用于测试!
# 正确 - 在生产环境中使用持久化存储
from langgraph.checkpoint.postgres import PostgresSaver
checkpointer = PostgresSaver.from_conn_string(conn_string)
graph = builder.compile(checkpointer=checkpointer)
# 错误 - 子图显式设置为 False 会阻止持久化
subgraph = sub_builder.compile(checkpointer=False)
# 正确 - 使用 None 以继承父图的检查点管理器
subgraph = sub_builder.compile(checkpointer=None) # 从父图继承
# 或者使用 True 进行独立的检查点管理
subgraph = sub_builder.compile(checkpointer=True)
# 错误 - 每个节点都返回大量数据
def node(state):
large_data = fetch_large_data()
return {"large_field": large_data} # 每一步都被检查点保存!
# 正确 - 使用引用或存储
from langgraph.store.memory import InMemoryStore
def node(state, *, store: BaseStore):
store.put(namespace, key, large_data)
return {"data_ref": f"{namespace}/{key}"}
# 错误 - 没有防止无限循环的保护
def router(state):
return "agent" # 总是循环!
# 正确 - 检查剩余步数或使用 RemainingSteps
from langgraph.managed import RemainingSteps
class State(TypedDict):
messages: Annotated[list, add_messages]
remaining_steps: RemainingSteps
def check_limit(state):
if state["remaining_steps"] < 2:
return END
return "continue"
每周安装量
674
代码仓库
GitHub 星标数
40
首次出现
2026年1月20日
安全审计
安装于
opencode638
gemini-cli219
codex218
github-copilot202
cursor194
claude-code165
When reviewing LangGraph code, check for these categories of issues.
# BAD - mutates state directly
def my_node(state: State) -> None:
state["messages"].append(new_message) # Mutation!
# GOOD - returns partial update
def my_node(state: State) -> dict:
return {"messages": [new_message]} # Let reducer handle it
# BAD - no reducer, each node overwrites
class State(TypedDict):
messages: list # Will be overwritten, not appended!
# GOOD - reducer appends
class State(TypedDict):
messages: Annotated[list, operator.add]
# Or use add_messages for chat:
messages: Annotated[list, add_messages]
# BAD - returns invalid node name
def router(state) -> str:
return "nonexistent_node" # Runtime error!
# GOOD - use Literal type hint for safety
def router(state) -> Literal["agent", "tools", "__end__"]:
if condition:
return "agent"
return END # Use constant, not string
# BAD - interrupt without checkpointer
def my_node(state):
answer = interrupt("question") # Will fail!
return {"answer": answer}
graph = builder.compile() # No checkpointer!
# GOOD - checkpointer required for interrupts
graph = builder.compile(checkpointer=InMemorySaver())
# BAD - no thread_id
graph.invoke({"messages": [...]}) # Error with checkpointer!
# GOOD - always provide thread_id
config = {"configurable": {"thread_id": "user-123"}}
graph.invoke({"messages": [...]}, config)
# BAD - add_messages expects message-like objects
class State(TypedDict):
messages: Annotated[list, add_messages]
def node(state):
return {"messages": ["plain string"]} # May fail!
# GOOD - use proper message types or tuples
def node(state):
return {"messages": [("assistant", "response")]}
# Or: [AIMessage(content="response")]
# BAD - returns entire state (may reset other fields)
def my_node(state: State) -> State:
return {
"counter": state["counter"] + 1,
"messages": state["messages"], # Unnecessary!
"other": state["other"] # Unnecessary!
}
# GOOD - return only changed fields
def my_node(state: State) -> dict:
return {"counter": state["counter"] + 1}
# BAD - Pydantic model without reducer loses append behavior
class State(BaseModel):
messages: list # No reducer!
# GOOD - use Annotated even with Pydantic
class State(BaseModel):
messages: Annotated[list, add_messages]
# BAD - no edge from START
builder.add_node("process", process_fn)
builder.add_edge("process", END)
graph = builder.compile() # Error: no entrypoint!
# GOOD - connect START
builder.add_edge(START, "process")
# BAD - orphan node
builder.add_node("main", main_fn)
builder.add_node("orphan", orphan_fn) # Never reached!
builder.add_edge(START, "main")
builder.add_edge("main", END)
# Check with visualization
print(graph.get_graph().draw_mermaid())
# BAD - missing path in conditional
def router(state) -> Literal["a", "b", "c"]:
...
builder.add_conditional_edges("node", router, {"a": "a", "b": "b"})
# "c" path missing!
# GOOD - include all possible returns
builder.add_conditional_edges("node", router, {"a": "a", "b": "b", "c": "c"})
# Or omit path_map to use return values as node names
# BAD - Command return without destinations (breaks visualization)
def dynamic(state) -> Command[Literal["next", "__end__"]]:
return Command(goto="next")
builder.add_node("dynamic", dynamic) # Graph viz won't show edges
# GOOD - declare destinations
builder.add_node("dynamic", dynamic, destinations=["next", END])
# BAD - async node called with sync invoke
async def my_node(state):
result = await async_operation()
return {"result": result}
graph.invoke(input) # May not await properly!
# GOOD - use ainvoke for async graphs
await graph.ainvoke(input)
# Or provide both sync and async versions
# BAD - blocking call in async node
async def my_node(state):
result = requests.get(url) # Blocks event loop!
return {"result": result}
# GOOD - use async HTTP client
async def my_node(state):
async with httpx.AsyncClient() as client:
result = await client.get(url)
return {"result": result}
# BAD - AI message with tool_calls but no tool execution
messages = [
HumanMessage(content="search for X"),
AIMessage(content="", tool_calls=[{"id": "1", "name": "search", ...}])
# Missing ToolMessage! Next LLM call will fail
]
# GOOD - always pair tool_calls with ToolMessage
messages = [
HumanMessage(content="search for X"),
AIMessage(content="", tool_calls=[{"id": "1", "name": "search", ...}]),
ToolMessage(content="results", tool_call_id="1")
]
# BAD - model may call multiple tools including interrupt
model = ChatOpenAI().bind_tools([interrupt_tool, other_tool])
# If both called in parallel, interrupt behavior is undefined
# GOOD - disable parallel tool calls before interrupt
model = ChatOpenAI().bind_tools(
[interrupt_tool, other_tool],
parallel_tool_calls=False
)
# BAD - in-memory checkpointer loses state on restart
graph = builder.compile(checkpointer=InMemorySaver()) # Testing only!
# GOOD - use persistent storage in production
from langgraph.checkpoint.postgres import PostgresSaver
checkpointer = PostgresSaver.from_conn_string(conn_string)
graph = builder.compile(checkpointer=checkpointer)
# BAD - subgraph with explicit False prevents persistence
subgraph = sub_builder.compile(checkpointer=False)
# GOOD - use None to inherit parent's checkpointer
subgraph = sub_builder.compile(checkpointer=None) # Inherits from parent
# Or True for independent checkpointing
subgraph = sub_builder.compile(checkpointer=True)
# BAD - returning large data in every node
def node(state):
large_data = fetch_large_data()
return {"large_field": large_data} # Checkpointed every step!
# GOOD - use references or store
from langgraph.store.memory import InMemoryStore
def node(state, *, store: BaseStore):
store.put(namespace, key, large_data)
return {"data_ref": f"{namespace}/{key}"}
# BAD - no protection against infinite loops
def router(state):
return "agent" # Always loops!
# GOOD - check remaining steps or use RemainingSteps
from langgraph.managed import RemainingSteps
class State(TypedDict):
messages: Annotated[list, add_messages]
remaining_steps: RemainingSteps
def check_limit(state):
if state["remaining_steps"] < 2:
return END
return "continue"
Weekly Installs
674
Repository
GitHub Stars
40
First Seen
Jan 20, 2026
Security Audits
Gen Agent Trust HubPassSocketPassSnykPass
Installed on
opencode638
gemini-cli219
codex218
github-copilot202
cursor194
claude-code165
agent-browser 浏览器自动化工具 - Vercel Labs 命令行网页操作与测试
136,300 周安装