pinecone by davila7/claude-code-templates
npx skills add https://github.com/davila7/claude-code-templates --skill pinecone面向生产级 AI 应用的向量数据库。
适用场景:
核心指标:
替代方案:
pip install pinecone-client
from pinecone import Pinecone, ServerlessSpec
# 初始化
pc = Pinecone(api_key="your-api-key")
# 创建索引
pc.create_index(
name="my-index",
dimension=1536, # 必须与嵌入维度匹配
metric="cosine", # 或 "euclidean", "dotproduct"
spec=ServerlessSpec(cloud="aws", region="us-east-1")
)
# 连接到索引
index = pc.Index("my-index")
# 插入/更新向量
index.upsert(vectors=[
{"id": "vec1", "values": [0.1, 0.2, ...], "metadata": {"category": "A"}},
{"id": "vec2", "values": [0.3, 0.4, ...], "metadata": {"category": "B"}}
])
# 查询
results = index.query(
vector=[0.1, 0.2, ...],
top_k=5,
include_metadata=True
)
print(results["matches"])
广告位招租
在这里展示您的产品或服务
触达数万 AI 开发者,精准高效
# 无服务器(推荐)
pc.create_index(
name="my-index",
dimension=1536,
metric="cosine",
spec=ServerlessSpec(
cloud="aws", # 或 "gcp", "azure"
region="us-east-1"
)
)
# Pod 模式(用于一致性能)
from pinecone import PodSpec
pc.create_index(
name="my-index",
dimension=1536,
metric="cosine",
spec=PodSpec(
environment="us-east1-gcp",
pod_type="p1.x1"
)
)
# 单次插入/更新
index.upsert(vectors=[
{
"id": "doc1",
"values": [0.1, 0.2, ...], # 1536 维度
"metadata": {
"text": "文档内容",
"category": "教程",
"timestamp": "2025-01-01"
}
}
])
# 批量插入/更新(推荐)
vectors = [
{"id": f"vec{i}", "values": embedding, "metadata": metadata}
for i, (embedding, metadata) in enumerate(zip(embeddings, metadatas))
]
index.upsert(vectors=vectors, batch_size=100)
# 基础查询
results = index.query(
vector=[0.1, 0.2, ...],
top_k=10,
include_metadata=True,
include_values=False
)
# 带元数据过滤
results = index.query(
vector=[0.1, 0.2, ...],
top_k=5,
filter={"category": {"$eq": "教程"}}
)
# 命名空间查询
results = index.query(
vector=[0.1, 0.2, ...],
top_k=5,
namespace="production"
)
# 访问结果
for match in results["matches"]:
print(f"ID: {match['id']}")
print(f"分数: {match['score']}")
print(f"元数据: {match['metadata']}")
# 精确匹配
filter = {"category": "教程"}
# 比较操作
filter = {"price": {"$gte": 100}} # $gt, $gte, $lt, $lte, $ne
# 逻辑运算符
filter = {
"$and": [
{"category": "教程"},
{"difficulty": {"$lte": 3}}
]
} # 还有:$or
# In 操作符
filter = {"tags": {"$in": ["python", "ml"]}}
# 按命名空间分区数据
index.upsert(
vectors=[{"id": "vec1", "values": [...]}],
namespace="user-123"
)
# 查询特定命名空间
results = index.query(
vector=[...],
namespace="user-123",
top_k=5
)
# 列出命名空间
stats = index.describe_index_stats()
print(stats['namespaces'])
# 插入带稀疏向量
index.upsert(vectors=[
{
"id": "doc1",
"values": [0.1, 0.2, ...], # 稠密向量
"sparse_values": {
"indices": [10, 45, 123], # 标记 ID
"values": [0.5, 0.3, 0.8] # TF-IDF 分数
},
"metadata": {"text": "..."}
}
])
# 混合查询
results = index.query(
vector=[0.1, 0.2, ...],
sparse_vector={
"indices": [10, 45],
"values": [0.5, 0.3]
},
top_k=5,
alpha=0.5 # 0=稀疏, 1=稠密, 0.5=混合
)
from langchain_pinecone import PineconeVectorStore
from langchain_openai import OpenAIEmbeddings
# 创建向量存储
vectorstore = PineconeVectorStore.from_documents(
documents=docs,
embedding=OpenAIEmbeddings(),
index_name="my-index"
)
# 查询
results = vectorstore.similarity_search("查询", k=5)
# 带元数据过滤
results = vectorstore.similarity_search(
"查询",
k=5,
filter={"category": "教程"}
)
# 作为检索器
retriever = vectorstore.as_retriever(search_kwargs={"k": 10})
from llama_index.vector_stores.pinecone import PineconeVectorStore
# 连接到 Pinecone
pc = Pinecone(api_key="your-key")
pinecone_index = pc.Index("my-index")
# 创建向量存储
vector_store = PineconeVectorStore(pinecone_index=pinecone_index)
# 在 LlamaIndex 中使用
from llama_index.core import StorageContext, VectorStoreIndex
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)
# 列出索引
indexes = pc.list_indexes()
# 描述索引
index_info = pc.describe_index("my-index")
print(index_info)
# 获取索引统计信息
stats = index.describe_index_stats()
print(f"总向量数: {stats['total_vector_count']}")
print(f"命名空间: {stats['namespaces']}")
# 删除索引
pc.delete_index("my-index")
# 按 ID 删除
index.delete(ids=["vec1", "vec2"])
# 按过滤器删除
index.delete(filter={"category": "old"})
# 删除命名空间中的所有向量
index.delete(delete_all=True, namespace="test")
# 删除整个索引
index.delete(delete_all=True)
| 操作 | 延迟 | 说明 |
|---|---|---|
| 插入/更新 | ~50-100ms | 每批次 |
| 查询 (p50) | ~50ms | 取决于索引大小 |
| 查询 (p95) | ~100ms | SLA 目标 |
| 元数据过滤 | ~+10-20ms | 额外开销 |
无服务器:
免费层:
每周安装量
236
代码仓库
GitHub 星标数
22.6K
首次出现
2026 年 1 月 21 日
安全审计
安装于
opencode194
claude-code190
gemini-cli188
cursor180
codex174
github-copilot164
The vector database for production AI applications.
Use when:
Metrics :
Use alternatives instead :
pip install pinecone-client
from pinecone import Pinecone, ServerlessSpec
# Initialize
pc = Pinecone(api_key="your-api-key")
# Create index
pc.create_index(
name="my-index",
dimension=1536, # Must match embedding dimension
metric="cosine", # or "euclidean", "dotproduct"
spec=ServerlessSpec(cloud="aws", region="us-east-1")
)
# Connect to index
index = pc.Index("my-index")
# Upsert vectors
index.upsert(vectors=[
{"id": "vec1", "values": [0.1, 0.2, ...], "metadata": {"category": "A"}},
{"id": "vec2", "values": [0.3, 0.4, ...], "metadata": {"category": "B"}}
])
# Query
results = index.query(
vector=[0.1, 0.2, ...],
top_k=5,
include_metadata=True
)
print(results["matches"])
# Serverless (recommended)
pc.create_index(
name="my-index",
dimension=1536,
metric="cosine",
spec=ServerlessSpec(
cloud="aws", # or "gcp", "azure"
region="us-east-1"
)
)
# Pod-based (for consistent performance)
from pinecone import PodSpec
pc.create_index(
name="my-index",
dimension=1536,
metric="cosine",
spec=PodSpec(
environment="us-east1-gcp",
pod_type="p1.x1"
)
)
# Single upsert
index.upsert(vectors=[
{
"id": "doc1",
"values": [0.1, 0.2, ...], # 1536 dimensions
"metadata": {
"text": "Document content",
"category": "tutorial",
"timestamp": "2025-01-01"
}
}
])
# Batch upsert (recommended)
vectors = [
{"id": f"vec{i}", "values": embedding, "metadata": metadata}
for i, (embedding, metadata) in enumerate(zip(embeddings, metadatas))
]
index.upsert(vectors=vectors, batch_size=100)
# Basic query
results = index.query(
vector=[0.1, 0.2, ...],
top_k=10,
include_metadata=True,
include_values=False
)
# With metadata filtering
results = index.query(
vector=[0.1, 0.2, ...],
top_k=5,
filter={"category": {"$eq": "tutorial"}}
)
# Namespace query
results = index.query(
vector=[0.1, 0.2, ...],
top_k=5,
namespace="production"
)
# Access results
for match in results["matches"]:
print(f"ID: {match['id']}")
print(f"Score: {match['score']}")
print(f"Metadata: {match['metadata']}")
# Exact match
filter = {"category": "tutorial"}
# Comparison
filter = {"price": {"$gte": 100}} # $gt, $gte, $lt, $lte, $ne
# Logical operators
filter = {
"$and": [
{"category": "tutorial"},
{"difficulty": {"$lte": 3}}
]
} # Also: $or
# In operator
filter = {"tags": {"$in": ["python", "ml"]}}
# Partition data by namespace
index.upsert(
vectors=[{"id": "vec1", "values": [...]}],
namespace="user-123"
)
# Query specific namespace
results = index.query(
vector=[...],
namespace="user-123",
top_k=5
)
# List namespaces
stats = index.describe_index_stats()
print(stats['namespaces'])
# Upsert with sparse vectors
index.upsert(vectors=[
{
"id": "doc1",
"values": [0.1, 0.2, ...], # Dense vector
"sparse_values": {
"indices": [10, 45, 123], # Token IDs
"values": [0.5, 0.3, 0.8] # TF-IDF scores
},
"metadata": {"text": "..."}
}
])
# Hybrid query
results = index.query(
vector=[0.1, 0.2, ...],
sparse_vector={
"indices": [10, 45],
"values": [0.5, 0.3]
},
top_k=5,
alpha=0.5 # 0=sparse, 1=dense, 0.5=hybrid
)
from langchain_pinecone import PineconeVectorStore
from langchain_openai import OpenAIEmbeddings
# Create vector store
vectorstore = PineconeVectorStore.from_documents(
documents=docs,
embedding=OpenAIEmbeddings(),
index_name="my-index"
)
# Query
results = vectorstore.similarity_search("query", k=5)
# With metadata filter
results = vectorstore.similarity_search(
"query",
k=5,
filter={"category": "tutorial"}
)
# As retriever
retriever = vectorstore.as_retriever(search_kwargs={"k": 10})
from llama_index.vector_stores.pinecone import PineconeVectorStore
# Connect to Pinecone
pc = Pinecone(api_key="your-key")
pinecone_index = pc.Index("my-index")
# Create vector store
vector_store = PineconeVectorStore(pinecone_index=pinecone_index)
# Use in LlamaIndex
from llama_index.core import StorageContext, VectorStoreIndex
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)
# List indices
indexes = pc.list_indexes()
# Describe index
index_info = pc.describe_index("my-index")
print(index_info)
# Get index stats
stats = index.describe_index_stats()
print(f"Total vectors: {stats['total_vector_count']}")
print(f"Namespaces: {stats['namespaces']}")
# Delete index
pc.delete_index("my-index")
# Delete by ID
index.delete(ids=["vec1", "vec2"])
# Delete by filter
index.delete(filter={"category": "old"})
# Delete all in namespace
index.delete(delete_all=True, namespace="test")
# Delete entire index
index.delete(delete_all=True)
| Operation | Latency | Notes |
|---|---|---|
| Upsert | ~50-100ms | Per batch |
| Query (p50) | ~50ms | Depends on index size |
| Query (p95) | ~100ms | SLA target |
| Metadata filter | ~+10-20ms | Additional overhead |
Serverless :
Free tier :
Weekly Installs
236
Repository
GitHub Stars
22.6K
First Seen
Jan 21, 2026
Security Audits
Gen Agent Trust HubPassSocketPassSnykFail
Installed on
opencode194
claude-code190
gemini-cli188
cursor180
codex174
github-copilot164
Azure 配额管理指南:服务限制、容量验证与配额增加方法
79,700 周安装
竞争对手研究指南:SEO、内容、反向链接与定价分析工具
231 周安装
Azure 工作负载自动升级评估工具 - 支持 Functions、App Service 计划与 SKU 迁移
231 周安装
Kaizen持续改进方法论:软件开发中的渐进式优化与防错设计实践指南
231 周安装
软件UI/UX设计指南:以用户为中心的设计原则、WCAG可访问性与平台规范
231 周安装
Apify 网络爬虫和自动化平台 - 无需编码抓取亚马逊、谷歌、领英等网站数据
231 周安装
llama.cpp 中文指南:纯 C/C++ LLM 推理,CPU/非 NVIDIA 硬件优化部署
231 周安装