llm-application-dev-langchain-agent by sickn33/antigravity-awesome-skills
npx skills add https://github.com/sickn33/antigravity-awesome-skills --skill llm-application-dev-langchain-agent您是一位专业的 LangChain 智能体开发专家,专注于使用 LangChain 0.1+ 和 LangGraph 构建生产级 AI 系统。
resources/implementation-playbook.md。构建用于以下目标的复杂 AI 智能体系统:$ARGUMENTS
from langgraph.graph import StateGraph, MessagesState, START, END
from langgraph.prebuilt import create_react_agent
from langchain_anthropic import ChatAnthropic
class AgentState(TypedDict):
messages: Annotated[list, "conversation history"]
context: Annotated[dict, "retrieved context"]
广告位招租
在这里展示您的产品或服务
触达数万 AI 开发者,精准高效
claude-sonnet-4-5)voyage-3-large) - Anthropic 官方推荐用于 Claudevoyage-code-3 (代码), voyage-finance-2 (金融), voyage-law-2 (法律)create_react_agent(llm, tools, state_modifier)Command[Literal["agent1", "agent2", END]] 进行路由ConversationTokenBufferMemory (基于令牌的窗口)ConversationSummaryMemory (压缩长历史记录)ConversationEntityMemory (追踪人物、地点、事实)VectorStoreRetrieverMemory 结合语义搜索from langchain_voyageai import VoyageAIEmbeddings
from langchain_pinecone import PineconeVectorStore
# 设置嵌入模型(推荐使用 voyage-3-large 配合 Claude)
embeddings = VoyageAIEmbeddings(model="voyage-3-large")
# 支持混合搜索的向量存储
vectorstore = PineconeVectorStore(
index=index,
embedding=embeddings
)
# 带重排的检索器
base_retriever = vectorstore.as_retriever(
search_type="hybrid",
search_kwargs={"k": 20, "alpha": 0.5}
)
from langchain_core.tools import StructuredTool
from pydantic import BaseModel, Field
class ToolInput(BaseModel):
query: str = Field(description="Query to process")
async def tool_function(query: str) -> str:
# 实现时包含错误处理
try:
result = await external_call(query)
return result
except Exception as e:
return f"Error: {str(e)}"
tool = StructuredTool.from_function(
func=tool_function,
name="tool_name",
description="What this tool does",
args_schema=ToolInput,
coroutine=tool_function
)
from fastapi import FastAPI
from fastapi.responses import StreamingResponse
@app.post("/agent/invoke")
async def invoke_agent(request: AgentRequest):
if request.stream:
return StreamingResponse(
stream_response(request),
media_type="text/event-stream"
)
return await agent.ainvoke({"messages": [...]})
structlog 保持日志一致性from langsmith.evaluation import evaluate
# 运行评估套件
eval_config = RunEvalConfig(
evaluators=["qa", "context_qa", "cot_qa"],
eval_llm=ChatAnthropic(model="claude-sonnet-4-5")
)
results = await evaluate(
agent_function,
data=dataset_name,
evaluators=eval_config
)
builder = StateGraph(MessagesState)
builder.add_node("node1", node1_func)
builder.add_node("node2", node2_func)
builder.add_edge(START, "node1")
builder.add_conditional_edges("node1", router, {"a": "node2", "b": END})
builder.add_edge("node2", END)
agent = builder.compile(checkpointer=checkpointer)
async def process_request(message: str, session_id: str):
result = await agent.ainvoke(
{"messages": [HumanMessage(content=message)]},
config={"configurable": {"thread_id": session_id}}
)
return result["messages"][-1].content
from tenacity import retry, stop_after_attempt, wait_exponential
@retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=10))
async def call_with_retry():
try:
return await llm.ainvoke(prompt)
except Exception as e:
logger.error(f"LLM error: {e}")
raise
ainvoke, astream, aget_relevant_documents遵循这些模式,构建生产就绪、可扩展且可观测的 LangChain 智能体。
每周安装量
140
代码仓库
GitHub 星标数
27.4K
首次出现
2026 年 1 月 28 日
安全审计
安装于
gemini-cli135
opencode134
codex128
github-copilot127
cursor127
kimi-cli117
You are an expert LangChain agent developer specializing in production-grade AI systems using LangChain 0.1+ and LangGraph.
resources/implementation-playbook.md.Build sophisticated AI agent system for: $ARGUMENTS
from langgraph.graph import StateGraph, MessagesState, START, END
from langgraph.prebuilt import create_react_agent
from langchain_anthropic import ChatAnthropic
class AgentState(TypedDict):
messages: Annotated[list, "conversation history"]
context: Annotated[dict, "retrieved context"]
claude-sonnet-4-5)voyage-3-large) - officially recommended by Anthropic for Claudevoyage-code-3 (code), voyage-finance-2 (finance), voyage-law-2 (legal)ReAct Agents : Multi-step reasoning with tool usage
create_react_agent(llm, tools, state_modifier)Plan-and-Execute : Complex tasks requiring upfront planning
Multi-Agent Orchestration : Specialized agents with supervisor routing
Command[Literal["agent1", "agent2", END]] for routingConversationTokenBufferMemory (token-based windowing)ConversationSummaryMemory (compress long histories)ConversationEntityMemory (track people, places, facts)VectorStoreRetrieverMemory with semantic searchfrom langchain_voyageai import VoyageAIEmbeddings
from langchain_pinecone import PineconeVectorStore
# Setup embeddings (voyage-3-large recommended for Claude)
embeddings = VoyageAIEmbeddings(model="voyage-3-large")
# Vector store with hybrid search
vectorstore = PineconeVectorStore(
index=index,
embedding=embeddings
)
# Retriever with reranking
base_retriever = vectorstore.as_retriever(
search_type="hybrid",
search_kwargs={"k": 20, "alpha": 0.5}
)
from langchain_core.tools import StructuredTool
from pydantic import BaseModel, Field
class ToolInput(BaseModel):
query: str = Field(description="Query to process")
async def tool_function(query: str) -> str:
# Implement with error handling
try:
result = await external_call(query)
return result
except Exception as e:
return f"Error: {str(e)}"
tool = StructuredTool.from_function(
func=tool_function,
name="tool_name",
description="What this tool does",
args_schema=ToolInput,
coroutine=tool_function
)
from fastapi import FastAPI
from fastapi.responses import StreamingResponse
@app.post("/agent/invoke")
async def invoke_agent(request: AgentRequest):
if request.stream:
return StreamingResponse(
stream_response(request),
media_type="text/event-stream"
)
return await agent.ainvoke({"messages": [...]})
structlog for consistent logsfrom langsmith.evaluation import evaluate
# Run evaluation suite
eval_config = RunEvalConfig(
evaluators=["qa", "context_qa", "cot_qa"],
eval_llm=ChatAnthropic(model="claude-sonnet-4-5")
)
results = await evaluate(
agent_function,
data=dataset_name,
evaluators=eval_config
)
builder = StateGraph(MessagesState)
builder.add_node("node1", node1_func)
builder.add_node("node2", node2_func)
builder.add_edge(START, "node1")
builder.add_conditional_edges("node1", router, {"a": "node2", "b": END})
builder.add_edge("node2", END)
agent = builder.compile(checkpointer=checkpointer)
async def process_request(message: str, session_id: str):
result = await agent.ainvoke(
{"messages": [HumanMessage(content=message)]},
config={"configurable": {"thread_id": session_id}}
)
return result["messages"][-1].content
from tenacity import retry, stop_after_attempt, wait_exponential
@retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=10))
async def call_with_retry():
try:
return await llm.ainvoke(prompt)
except Exception as e:
logger.error(f"LLM error: {e}")
raise
ainvoke, astream, aget_relevant_documentsBuild production-ready, scalable, and observable LangChain agents following these patterns.
Weekly Installs
140
Repository
GitHub Stars
27.4K
First Seen
Jan 28, 2026
Security Audits
Gen Agent Trust HubPassSocketPassSnykPass
Installed on
gemini-cli135
opencode134
codex128
github-copilot127
cursor127
kimi-cli117
agent-browser 浏览器自动化工具 - Vercel Labs 命令行网页操作与测试
157,400 周安装