rag-implementation by wshobson/agents
npx skills add https://github.com/wshobson/agents --skill rag-implementation掌握检索增强生成(RAG),构建能够利用外部知识源提供准确、有依据回答的 LLM 应用程序。
目的:高效存储和检索文档嵌入向量
选项:
目的:将文本转换为数值向量以进行相似性搜索
模型(2026年):
| 模型 | 维度 | 最佳适用场景 |
|---|---|---|
广告位招租
在这里展示您的产品或服务
触达数万 AI 开发者,精准高效
| 1024 |
| Claude 应用(Anthropic 推荐) |
| voyage-code-3 | 1024 | 代码搜索 |
| text-embedding-3-large | 3072 | OpenAI 应用,高精度 |
| text-embedding-3-small | 1536 | OpenAI 应用,高性价比 |
| bge-large-en-v1.5 | 1024 | 开源,本地部署 |
| multilingual-e5-large | 1024 | 多语言支持 |
方法:
目的:通过重新排序结果提高检索质量
方法:
from langgraph.graph import StateGraph, START, END
from langchain_anthropic import ChatAnthropic
from langchain_voyageai import VoyageAIEmbeddings
from langchain_pinecone import PineconeVectorStore
from langchain_core.documents import Document
from langchain_core.prompts import ChatPromptTemplate
from langchain_text_splitters import RecursiveCharacterTextSplitter
from typing import TypedDict, Annotated
class RAGState(TypedDict):
question: str
context: list[Document]
answer: str
# 初始化组件
llm = ChatAnthropic(model="claude-sonnet-4-6")
embeddings = VoyageAIEmbeddings(model="voyage-3-large")
vectorstore = PineconeVectorStore(index_name="docs", embedding=embeddings)
retriever = vectorstore.as_retriever(search_kwargs={"k": 4})
# RAG 提示词
rag_prompt = ChatPromptTemplate.from_template(
"""根据以下上下文回答问题。如果无法回答,请说明。
上下文:
{context}
问题:{question}
回答:"""
)
async def retrieve(state: RAGState) -> RAGState:
"""检索相关文档。"""
docs = await retriever.ainvoke(state["question"])
return {"context": docs}
async def generate(state: RAGState) -> RAGState:
"""根据上下文生成答案。"""
context_text = "\n\n".join(doc.page_content for doc in state["context"])
messages = rag_prompt.format_messages(
context=context_text,
question=state["question"]
)
response = await llm.ainvoke(messages)
return {"answer": response.content}
# 构建 RAG 图
builder = StateGraph(RAGState)
builder.add_node("retrieve", retrieve)
builder.add_node("generate", generate)
builder.add_edge(START, "retrieve")
builder.add_edge("retrieve", "generate")
builder.add_edge("generate", END)
rag_chain = builder.compile()
# 使用
result = await rag_chain.ainvoke({"question": "What are the main features?"})
print(result["answer"])
from langchain_community.retrievers import BM25Retriever
from langchain.retrievers import EnsembleRetriever
# 稀疏检索器(用于关键词匹配的 BM25)
bm25_retriever = BM25Retriever.from_documents(documents)
bm25_retriever.k = 10
# 密集检索器(用于语义搜索的嵌入向量)
dense_retriever = vectorstore.as_retriever(search_kwargs={"k": 10})
# 使用倒数排名融合权重进行组合
ensemble_retriever = EnsembleRetriever(
retrievers=[bm25_retriever, dense_retriever],
weights=[0.3, 0.7] # 30% 关键词,70% 语义
)
from langchain.retrievers.multi_query import MultiQueryRetriever
# 生成多个查询视角以提高召回率
multi_query_retriever = MultiQueryRetriever.from_llm(
retriever=vectorstore.as_retriever(search_kwargs={"k": 5}),
llm=llm
)
# 单个查询 → 多个变体 → 合并结果
results = await multi_query_retriever.ainvoke("What is the main topic?")
from langchain.retrievers import ContextualCompressionRetriever
from langchain.retrievers.document_compressors import LLMChainExtractor
# 压缩器仅提取相关部分
compressor = LLMChainExtractor.from_llm(llm)
compression_retriever = ContextualCompressionRetriever(
base_compressor=compressor,
base_retriever=vectorstore.as_retriever(search_kwargs={"k": 10})
)
# 仅返回文档的相关部分
compressed_docs = await compression_retriever.ainvoke("specific query")
from langchain.retrievers import ParentDocumentRetriever
from langchain.storage import InMemoryStore
from langchain_text_splitters import RecursiveCharacterTextSplitter
# 小片段用于精确检索,大片段用于上下文
child_splitter = RecursiveCharacterTextSplitter(chunk_size=400, chunk_overlap=50)
parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=200)
# 父文档存储
docstore = InMemoryStore()
parent_retriever = ParentDocumentRetriever(
vectorstore=vectorstore,
docstore=docstore,
child_splitter=child_splitter,
parent_splitter=parent_splitter
)
# 添加文档(分割子文档,存储父文档)
await parent_retriever.aadd_documents(documents)
# 检索返回带有完整上下文的父文档
results = await parent_retriever.ainvoke("query")
from langchain_core.prompts import ChatPromptTemplate
class HyDEState(TypedDict):
question: str
hypothetical_doc: str
context: list[Document]
answer: str
hyde_prompt = ChatPromptTemplate.from_template(
"""撰写一个详细的段落来回答这个问题:
问题:{question}
段落:"""
)
async def generate_hypothetical(state: HyDEState) -> HyDEState:
"""生成假设文档以改善检索效果。"""
messages = hyde_prompt.format_messages(question=state["question"])
response = await llm.ainvoke(messages)
return {"hypothetical_doc": response.content}
async def retrieve_with_hyde(state: HyDEState) -> HyDEState:
"""使用假设文档进行检索。"""
# 使用假设文档进行检索,而非原始查询
docs = await retriever.ainvoke(state["hypothetical_doc"])
return {"context": docs}
# 构建 HyDE RAG 图
builder = StateGraph(HyDEState)
builder.add_node("hypothetical", generate_hypothetical)
builder.add_node("retrieve", retrieve_with_hyde)
builder.add_node("generate", generate)
builder.add_edge(START, "hypothetical")
builder.add_edge("hypothetical", "retrieve")
builder.add_edge("retrieve", "generate")
builder.add_edge("generate", END)
hyde_rag = builder.compile()
from langchain_text_splitters import RecursiveCharacterTextSplitter
splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200,
length_function=len,
separators=["\n\n", "\n", ". ", " ", ""] # 按顺序尝试
)
chunks = splitter.split_documents(documents)
from langchain_text_splitters import TokenTextSplitter
splitter = TokenTextSplitter(
chunk_size=512,
chunk_overlap=50,
encoding_name="cl100k_base" # OpenAI tiktoken 编码
)
from langchain_experimental.text_splitter import SemanticChunker
splitter = SemanticChunker(
embeddings=embeddings,
breakpoint_threshold_type="percentile",
breakpoint_threshold_amount=95
)
from langchain_text_splitters import MarkdownHeaderTextSplitter
headers_to_split_on = [
("#", "Header 1"),
("##", "Header 2"),
("###", "Header 3"),
]
splitter = MarkdownHeaderTextSplitter(
headers_to_split_on=headers_to_split_on,
strip_headers=False
)
from pinecone import Pinecone, ServerlessSpec
from langchain_pinecone import PineconeVectorStore
# 初始化 Pinecone 客户端
pc = Pinecone(api_key=os.environ["PINECONE_API_KEY"])
# 如果需要,创建索引
if "my-index" not in pc.list_indexes().names():
pc.create_index(
name="my-index",
dimension=1024, # voyage-3-large 维度
metric="cosine",
spec=ServerlessSpec(cloud="aws", region="us-east-1")
)
# 创建向量存储
index = pc.Index("my-index")
vectorstore = PineconeVectorStore(index=index, embedding=embeddings)
import weaviate
from langchain_weaviate import WeaviateVectorStore
client = weaviate.connect_to_local() # 或 connect_to_weaviate_cloud()
vectorstore = WeaviateVectorStore(
client=client,
index_name="Documents",
text_key="content",
embedding=embeddings
)
from langchain_chroma import Chroma
vectorstore = Chroma(
collection_name="my_collection",
embedding_function=embeddings,
persist_directory="./chroma_db"
)
from langchain_postgres.vectorstores import PGVector
connection_string = "postgresql+psycopg://user:pass@localhost:5432/vectordb"
vectorstore = PGVector(
embeddings=embeddings,
collection_name="documents",
connection=connection_string,
)
from langchain_core.documents import Document
# 在索引时添加元数据
docs_with_metadata = []
for doc in documents:
doc.metadata.update({
"source": doc.metadata.get("source", "unknown"),
"category": determine_category(doc.page_content),
"date": datetime.now().isoformat()
})
docs_with_metadata.append(doc)
# 在检索时过滤
results = await vectorstore.asimilarity_search(
"query",
filter={"category": "technical"},
k=5
)
# 平衡相关性与多样性
results = await vectorstore.amax_marginal_relevance_search(
"query",
k=5,
fetch_k=20, # 获取 20 个,返回前 5 个最具多样性的
lambda_mult=0.5 # 0=最大多样性,1=最大相关性
)
from sentence_transformers import CrossEncoder
reranker = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2')
async def retrieve_and_rerank(query: str, k: int = 5) -> list[Document]:
# 获取初始结果
candidates = await vectorstore.asimilarity_search(query, k=20)
# 重排序
pairs = [[query, doc.page_content] for doc in candidates]
scores = reranker.predict(pairs)
# 按分数排序并取前 k 个
ranked = sorted(zip(candidates, scores), key=lambda x: x[1], reverse=True)
return [doc for doc, score in ranked[:k]]
from langchain.retrievers import CohereRerank
from langchain_cohere import CohereRerank
reranker = CohereRerank(model="rerank-english-v3.0", top_n=5)
# 用重排序包装检索器
reranked_retriever = ContextualCompressionRetriever(
base_compressor=reranker,
base_retriever=vectorstore.as_retriever(search_kwargs={"k": 20})
)
rag_prompt = ChatPromptTemplate.from_template(
"""根据以下上下文回答问题。使用 [1], [2] 等格式包含引用。
如果无法根据上下文回答,请说“我没有足够的信息。”
上下文:
{context}
问题:{question}
指令:
1. 仅使用上下文中的信息
2. 使用 [1], [2] 格式引用来源
3. 如果不确定,请表达不确定性
回答(带引用):"""
)
from pydantic import BaseModel, Field
class RAGResponse(BaseModel):
answer: str = Field(description="基于上下文的回答")
confidence: float = Field(description="置信度分数 0-1")
sources: list[str] = Field(description="使用的源文档 ID")
reasoning: str = Field(description="答案的简要推理过程")
# 与结构化输出一起使用
structured_llm = llm.with_structured_output(RAGResponse)
from typing import TypedDict
class RAGEvalMetrics(TypedDict):
retrieval_precision: float # 相关文档数 / 检索到的文档数
retrieval_recall: float # 检索到的相关文档数 / 总相关文档数
answer_relevance: float # 答案与问题的相关性
faithfulness: float # 答案基于上下文的忠实度
context_relevance: float # 上下文与问题的相关性
async def evaluate_rag_system(
rag_chain,
test_cases: list[dict]
) -> RAGEvalMetrics:
"""在测试用例上评估 RAG 系统。"""
metrics = {k: [] for k in RAGEvalMetrics.__annotations__}
for test in test_cases:
result = await rag_chain.ainvoke({"question": test["question"]})
# 检索指标
retrieved_ids = {doc.metadata["id"] for doc in result["context"]}
relevant_ids = set(test["relevant_doc_ids"])
precision = len(retrieved_ids & relevant_ids) / len(retrieved_ids)
recall = len(retrieved_ids & relevant_ids) / len(relevant_ids)
metrics["retrieval_precision"].append(precision)
metrics["retrieval_recall"].append(recall)
# 使用 LLM 作为评判者进行质量指标评估
quality = await evaluate_answer_quality(
question=test["question"],
answer=result["answer"],
context=result["context"],
expected=test.get("expected_answer")
)
metrics["answer_relevance"].append(quality["relevance"])
metrics["faithfulness"].append(quality["faithfulness"])
metrics["context_relevance"].append(quality["context_relevance"])
return {k: sum(v) / len(v) for k, v in metrics.items()}
每周安装量
4.8K
仓库
GitHub 星标数
32.3K
首次出现
2026年1月20日
安全审计
安装于
opencode3.6K
gemini-cli3.6K
claude-code3.5K
codex3.4K
cursor3.3K
github-copilot3.1K
Master Retrieval-Augmented Generation (RAG) to build LLM applications that provide accurate, grounded responses using external knowledge sources.
Purpose : Store and retrieve document embeddings efficiently
Options:
Purpose : Convert text to numerical vectors for similarity search
Models (2026):
| Model | Dimensions | Best For |
|---|---|---|
| voyage-3-large | 1024 | Claude apps (Anthropic recommended) |
| voyage-code-3 | 1024 | Code search |
| text-embedding-3-large | 3072 | OpenAI apps, high accuracy |
| text-embedding-3-small | 1536 | OpenAI apps, cost-effective |
| bge-large-en-v1.5 | 1024 | Open source, local deployment |
| multilingual-e5-large | 1024 | Multi-language support |
Approaches:
Purpose : Improve retrieval quality by reordering results
Methods:
from langgraph.graph import StateGraph, START, END
from langchain_anthropic import ChatAnthropic
from langchain_voyageai import VoyageAIEmbeddings
from langchain_pinecone import PineconeVectorStore
from langchain_core.documents import Document
from langchain_core.prompts import ChatPromptTemplate
from langchain_text_splitters import RecursiveCharacterTextSplitter
from typing import TypedDict, Annotated
class RAGState(TypedDict):
question: str
context: list[Document]
answer: str
# Initialize components
llm = ChatAnthropic(model="claude-sonnet-4-6")
embeddings = VoyageAIEmbeddings(model="voyage-3-large")
vectorstore = PineconeVectorStore(index_name="docs", embedding=embeddings)
retriever = vectorstore.as_retriever(search_kwargs={"k": 4})
# RAG prompt
rag_prompt = ChatPromptTemplate.from_template(
"""Answer based on the context below. If you cannot answer, say so.
Context:
{context}
Question: {question}
Answer:"""
)
async def retrieve(state: RAGState) -> RAGState:
"""Retrieve relevant documents."""
docs = await retriever.ainvoke(state["question"])
return {"context": docs}
async def generate(state: RAGState) -> RAGState:
"""Generate answer from context."""
context_text = "\n\n".join(doc.page_content for doc in state["context"])
messages = rag_prompt.format_messages(
context=context_text,
question=state["question"]
)
response = await llm.ainvoke(messages)
return {"answer": response.content}
# Build RAG graph
builder = StateGraph(RAGState)
builder.add_node("retrieve", retrieve)
builder.add_node("generate", generate)
builder.add_edge(START, "retrieve")
builder.add_edge("retrieve", "generate")
builder.add_edge("generate", END)
rag_chain = builder.compile()
# Use
result = await rag_chain.ainvoke({"question": "What are the main features?"})
print(result["answer"])
from langchain_community.retrievers import BM25Retriever
from langchain.retrievers import EnsembleRetriever
# Sparse retriever (BM25 for keyword matching)
bm25_retriever = BM25Retriever.from_documents(documents)
bm25_retriever.k = 10
# Dense retriever (embeddings for semantic search)
dense_retriever = vectorstore.as_retriever(search_kwargs={"k": 10})
# Combine with Reciprocal Rank Fusion weights
ensemble_retriever = EnsembleRetriever(
retrievers=[bm25_retriever, dense_retriever],
weights=[0.3, 0.7] # 30% keyword, 70% semantic
)
from langchain.retrievers.multi_query import MultiQueryRetriever
# Generate multiple query perspectives for better recall
multi_query_retriever = MultiQueryRetriever.from_llm(
retriever=vectorstore.as_retriever(search_kwargs={"k": 5}),
llm=llm
)
# Single query → multiple variations → combined results
results = await multi_query_retriever.ainvoke("What is the main topic?")
from langchain.retrievers import ContextualCompressionRetriever
from langchain.retrievers.document_compressors import LLMChainExtractor
# Compressor extracts only relevant portions
compressor = LLMChainExtractor.from_llm(llm)
compression_retriever = ContextualCompressionRetriever(
base_compressor=compressor,
base_retriever=vectorstore.as_retriever(search_kwargs={"k": 10})
)
# Returns only relevant parts of documents
compressed_docs = await compression_retriever.ainvoke("specific query")
from langchain.retrievers import ParentDocumentRetriever
from langchain.storage import InMemoryStore
from langchain_text_splitters import RecursiveCharacterTextSplitter
# Small chunks for precise retrieval, large chunks for context
child_splitter = RecursiveCharacterTextSplitter(chunk_size=400, chunk_overlap=50)
parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=200)
# Store for parent documents
docstore = InMemoryStore()
parent_retriever = ParentDocumentRetriever(
vectorstore=vectorstore,
docstore=docstore,
child_splitter=child_splitter,
parent_splitter=parent_splitter
)
# Add documents (splits children, stores parents)
await parent_retriever.aadd_documents(documents)
# Retrieval returns parent documents with full context
results = await parent_retriever.ainvoke("query")
from langchain_core.prompts import ChatPromptTemplate
class HyDEState(TypedDict):
question: str
hypothetical_doc: str
context: list[Document]
answer: str
hyde_prompt = ChatPromptTemplate.from_template(
"""Write a detailed passage that would answer this question:
Question: {question}
Passage:"""
)
async def generate_hypothetical(state: HyDEState) -> HyDEState:
"""Generate hypothetical document for better retrieval."""
messages = hyde_prompt.format_messages(question=state["question"])
response = await llm.ainvoke(messages)
return {"hypothetical_doc": response.content}
async def retrieve_with_hyde(state: HyDEState) -> HyDEState:
"""Retrieve using hypothetical document."""
# Use hypothetical doc for retrieval instead of original query
docs = await retriever.ainvoke(state["hypothetical_doc"])
return {"context": docs}
# Build HyDE RAG graph
builder = StateGraph(HyDEState)
builder.add_node("hypothetical", generate_hypothetical)
builder.add_node("retrieve", retrieve_with_hyde)
builder.add_node("generate", generate)
builder.add_edge(START, "hypothetical")
builder.add_edge("hypothetical", "retrieve")
builder.add_edge("retrieve", "generate")
builder.add_edge("generate", END)
hyde_rag = builder.compile()
from langchain_text_splitters import RecursiveCharacterTextSplitter
splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200,
length_function=len,
separators=["\n\n", "\n", ". ", " ", ""] # Try in order
)
chunks = splitter.split_documents(documents)
from langchain_text_splitters import TokenTextSplitter
splitter = TokenTextSplitter(
chunk_size=512,
chunk_overlap=50,
encoding_name="cl100k_base" # OpenAI tiktoken encoding
)
from langchain_experimental.text_splitter import SemanticChunker
splitter = SemanticChunker(
embeddings=embeddings,
breakpoint_threshold_type="percentile",
breakpoint_threshold_amount=95
)
from langchain_text_splitters import MarkdownHeaderTextSplitter
headers_to_split_on = [
("#", "Header 1"),
("##", "Header 2"),
("###", "Header 3"),
]
splitter = MarkdownHeaderTextSplitter(
headers_to_split_on=headers_to_split_on,
strip_headers=False
)
from pinecone import Pinecone, ServerlessSpec
from langchain_pinecone import PineconeVectorStore
# Initialize Pinecone client
pc = Pinecone(api_key=os.environ["PINECONE_API_KEY"])
# Create index if needed
if "my-index" not in pc.list_indexes().names():
pc.create_index(
name="my-index",
dimension=1024, # voyage-3-large dimensions
metric="cosine",
spec=ServerlessSpec(cloud="aws", region="us-east-1")
)
# Create vector store
index = pc.Index("my-index")
vectorstore = PineconeVectorStore(index=index, embedding=embeddings)
import weaviate
from langchain_weaviate import WeaviateVectorStore
client = weaviate.connect_to_local() # or connect_to_weaviate_cloud()
vectorstore = WeaviateVectorStore(
client=client,
index_name="Documents",
text_key="content",
embedding=embeddings
)
from langchain_chroma import Chroma
vectorstore = Chroma(
collection_name="my_collection",
embedding_function=embeddings,
persist_directory="./chroma_db"
)
from langchain_postgres.vectorstores import PGVector
connection_string = "postgresql+psycopg://user:pass@localhost:5432/vectordb"
vectorstore = PGVector(
embeddings=embeddings,
collection_name="documents",
connection=connection_string,
)
from langchain_core.documents import Document
# Add metadata during indexing
docs_with_metadata = []
for doc in documents:
doc.metadata.update({
"source": doc.metadata.get("source", "unknown"),
"category": determine_category(doc.page_content),
"date": datetime.now().isoformat()
})
docs_with_metadata.append(doc)
# Filter during retrieval
results = await vectorstore.asimilarity_search(
"query",
filter={"category": "technical"},
k=5
)
# Balance relevance with diversity
results = await vectorstore.amax_marginal_relevance_search(
"query",
k=5,
fetch_k=20, # Fetch 20, return top 5 diverse
lambda_mult=0.5 # 0=max diversity, 1=max relevance
)
from sentence_transformers import CrossEncoder
reranker = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2')
async def retrieve_and_rerank(query: str, k: int = 5) -> list[Document]:
# Get initial results
candidates = await vectorstore.asimilarity_search(query, k=20)
# Rerank
pairs = [[query, doc.page_content] for doc in candidates]
scores = reranker.predict(pairs)
# Sort by score and take top k
ranked = sorted(zip(candidates, scores), key=lambda x: x[1], reverse=True)
return [doc for doc, score in ranked[:k]]
from langchain.retrievers import CohereRerank
from langchain_cohere import CohereRerank
reranker = CohereRerank(model="rerank-english-v3.0", top_n=5)
# Wrap retriever with reranking
reranked_retriever = ContextualCompressionRetriever(
base_compressor=reranker,
base_retriever=vectorstore.as_retriever(search_kwargs={"k": 20})
)
rag_prompt = ChatPromptTemplate.from_template(
"""Answer the question based on the context below. Include citations using [1], [2], etc.
If you cannot answer based on the context, say "I don't have enough information."
Context:
{context}
Question: {question}
Instructions:
1. Use only information from the context
2. Cite sources with [1], [2] format
3. If uncertain, express uncertainty
Answer (with citations):"""
)
from pydantic import BaseModel, Field
class RAGResponse(BaseModel):
answer: str = Field(description="The answer based on context")
confidence: float = Field(description="Confidence score 0-1")
sources: list[str] = Field(description="Source document IDs used")
reasoning: str = Field(description="Brief reasoning for the answer")
# Use with structured output
structured_llm = llm.with_structured_output(RAGResponse)
from typing import TypedDict
class RAGEvalMetrics(TypedDict):
retrieval_precision: float # Relevant docs / retrieved docs
retrieval_recall: float # Retrieved relevant / total relevant
answer_relevance: float # Answer addresses question
faithfulness: float # Answer grounded in context
context_relevance: float # Context relevant to question
async def evaluate_rag_system(
rag_chain,
test_cases: list[dict]
) -> RAGEvalMetrics:
"""Evaluate RAG system on test cases."""
metrics = {k: [] for k in RAGEvalMetrics.__annotations__}
for test in test_cases:
result = await rag_chain.ainvoke({"question": test["question"]})
# Retrieval metrics
retrieved_ids = {doc.metadata["id"] for doc in result["context"]}
relevant_ids = set(test["relevant_doc_ids"])
precision = len(retrieved_ids & relevant_ids) / len(retrieved_ids)
recall = len(retrieved_ids & relevant_ids) / len(relevant_ids)
metrics["retrieval_precision"].append(precision)
metrics["retrieval_recall"].append(recall)
# Use LLM-as-judge for quality metrics
quality = await evaluate_answer_quality(
question=test["question"],
answer=result["answer"],
context=result["context"],
expected=test.get("expected_answer")
)
metrics["answer_relevance"].append(quality["relevance"])
metrics["faithfulness"].append(quality["faithfulness"])
metrics["context_relevance"].append(quality["context_relevance"])
return {k: sum(v) / len(v) for k, v in metrics.items()}
Weekly Installs
4.8K
Repository
GitHub Stars
32.3K
First Seen
Jan 20, 2026
Security Audits
Gen Agent Trust HubPassSocketPassSnykPass
Installed on
opencode3.6K
gemini-cli3.6K
claude-code3.5K
codex3.4K
cursor3.3K
github-copilot3.1K
React 组合模式指南:Vercel 组件架构最佳实践,提升代码可维护性
102,200 周安装