openviking-context-database by aradotso/trending-skills
npx skills add https://github.com/aradotso/trending-skills --skill openviking-context-databaseSkill by ara.so — Daily 2026 Skills collection.
OpenViking 是一个面向 AI 智能体的开源上下文数据库,它用统一的文件系统范式取代了零散的向量存储。它采用分层的 L0/L1/L2 结构管理智能体的记忆、资源和技能,实现了分层上下文交付、可观察的检索轨迹以及自我进化的会话记忆。
pip install openviking --upgrade --force-reinstall
# 通过脚本安装
curl -fsSL https://raw.githubusercontent.com/volcengine/OpenViking/main/crates/ov_cli/install.sh | bash
# 或从源码构建(需要 Rust 工具链)
cargo install --git https://github.com/volcengine/OpenViking ov_cli
创建 ~/.openviking/ov.conf:
{
"storage": {
"workspace": "/home/user/openviking_workspace"
},
"log": {
"level": "INFO",
"output": "stdout"
},
"embedding": {
"dense": {
"api_base": "https://api.openai.com/v1",
"api_key": "$OPENAI_API_KEY",
"provider": "openai",
"dimension": 1536,
"model": "text-embedding-3-large"
},
"max_concurrent": 10
},
"vlm": {
"api_base": "https://api.openai.com/v1",
"api_key": "$OPENAI_API_KEY",
"provider": "openai",
"model": "gpt-4o",
"max_concurrent": 100
}
}
广告位招租
在这里展示您的产品或服务
触达数万 AI 开发者,精准高效
注意: OpenViking 将
api_key值读取为字符串;请在启动时使用环境变量注入,而不是直接写入明文密钥。
| 角色 | 供应商值 | 示例模型 |
|---|---|---|
| VLM | openai | gpt-4o |
| VLM | volcengine | doubao-seed-2-0-pro-260215 |
| VLM | litellm | claude-3-5-sonnet-20240620, ollama/llama3.1 |
| Embedding | openai | text-embedding-3-large |
| Embedding | volcengine | doubao-embedding-vision-250615 |
| Embedding | jina | jina-embeddings-v3 |
{
"vlm": {
"provider": "litellm",
"model": "claude-3-5-sonnet-20240620",
"api_key": "$ANTHROPIC_API_KEY"
}
}
{
"vlm": {
"provider": "litellm",
"model": "ollama/llama3.1",
"api_base": "http://localhost:11434"
}
}
{
"vlm": {
"provider": "litellm",
"model": "deepseek-chat",
"api_key": "$DEEPSEEK_API_KEY"
}
}
OpenViking 像文件系统一样组织智能体上下文:
workspace/
├── memories/ # 长期智能体记忆(L0 始终加载)
│ ├── user_prefs/
│ └── task_history/
├── resources/ # 外部知识、文档(L1 按需加载)
│ ├── codebase/
│ └── docs/
└── skills/ # 可复用的智能体能力(L2 检索)
├── coding/
└── analysis/
这种分层方法在最大化上下文相关性的同时,最小化令牌消耗。
import os
from openviking import OpenViking
# 使用配置文件初始化
ov = OpenViking(config_path="~/.openviking/ov.conf")
# 或通过编程方式初始化
ov = OpenViking(
workspace="/home/user/openviking_workspace",
vlm_provider="openai",
vlm_model="gpt-4o",
vlm_api_key=os.environ["OPENAI_API_KEY"],
embedding_provider="openai",
embedding_model="text-embedding-3-large",
embedding_api_key=os.environ["OPENAI_API_KEY"],
embedding_dimension=1536,
)
# 创建或打开一个命名空间(类似于一个智能体的文件系统根目录)
brain = ov.namespace("my_agent")
# 添加一个记忆文件
brain.write("memories/user_prefs.md", """
# 用户偏好
- 语言:Python
- 代码风格:PEP8
- 首选框架:FastAPI
""")
# 添加一个资源文档
brain.write("resources/api_docs/stripe.md", open("stripe_docs.md").read())
# 添加一个技能
brain.write("skills/coding/write_tests.md", """
# 技能:编写单元测试
当被要求编写测试时,使用 pytest 和 fixtures。
始终模拟外部 API 调用。目标覆盖率 80% 以上。
""")
# 在命名空间内进行语义搜索
results = brain.search("用户偏好如何格式化代码?")
for result in results:
print(result.path, result.score, result.content[:200])
# 目录范围检索(递归)
skill_results = brain.search(
query="为 FastAPI 端点编写单元测试",
directory="skills/",
top_k=3,
)
# 直接路径读取(L0 始终可用)
prefs = brain.read("memories/user_prefs.md")
print(prefs.content)
# 启动一个会话 — OpenViking 跟踪轮次并自动压缩
session = brain.session("task_build_api")
# 添加对话轮次
session.add_turn(role="user", content="为我构建一个待办事项的 REST API")
session.add_turn(role="assistant", content="我将创建一个带有 CRUD 操作的 FastAPI 应用...")
# 经过多轮对话后,触发压缩以提取长期记忆
summary = session.compress()
# 压缩后的见解会自动写入 memories/ 目录
# 结束会话 — 持久化提取的记忆
session.close()
# 启用轨迹跟踪以观察检索决策
with brain.observe() as tracker:
results = brain.search("身份验证最佳实践")
trajectory = tracker.trajectory()
for step in trajectory.steps:
print(f"[{step.level}] {step.path} → score={step.score:.3f}")
# 输出:
# [L0] memories/user_prefs.md → score=0.82
# [L1] resources/security/auth.md → score=0.91
# [L2] skills/coding/jwt_auth.md → score=0.88
import os
from openviking import OpenViking
ov = OpenViking(config_path="~/.openviking/ov.conf")
brain = ov.namespace("coding_agent")
def agent_respond(user_message: str, conversation_history: list) -> str:
# 检索相关上下文
context_results = brain.search(user_message, top_k=5)
context_text = "\n\n".join(r.content for r in context_results)
# 使用检索到的上下文构建提示
system_prompt = f"""你是一个编程助手。
## 相关上下文
{context_text}
"""
# ... 在这里使用 system_prompt + conversation_history 调用你的 LLM
response = call_llm(system_prompt, conversation_history, user_message)
# 为未来的记忆存储交互
brain.session("current").add_turn("user", user_message)
brain.session("current").add_turn("assistant", response)
return response
# 从目录结构注册技能
import pathlib
skills_dir = pathlib.Path("./agent_skills")
for skill_file in skills_dir.rglob("*.md"):
relative = skill_file.relative_to(skills_dir)
brain.write(f"skills/{relative}", skill_file.read_text())
# 在运行时,仅检索相关技能
def get_relevant_skills(task: str) -> list[str]:
results = brain.search(task, directory="skills/", top_k=3)
return [r.content for r in results]
task = "重构这个类以使用依赖注入"
skills = get_relevant_skills(task)
# 仅返回与 DI 相关的技能,而不是所有已注册的技能
import subprocess
import pathlib
brain = ov.namespace("codebase_agent")
# 索引一个代码库
def index_codebase(repo_path: str):
for f in pathlib.Path(repo_path).rglob("*.py"):
content = f.read_text(errors="ignore")
# 使用相对路径作为键存储
rel = f.relative_to(repo_path)
brain.write(f"resources/codebase/{rel}", content)
index_codebase("/home/user/myproject")
# 使用目录范围进行查询
def find_relevant_code(query: str) -> list:
return brain.search(
query=query,
directory="resources/codebase/",
top_k=5,
)
hits = find_relevant_code("数据库连接池")
for h in hits:
print(h.path, "\n", h.content[:300])
# 智能体 1 写入发现
agent1_brain = ov.namespace("researcher_agent")
agent1_brain.write("memories/findings/api_rate_limits.md", """
# 发现的 API 速率限制
- Stripe: 生产模式 100 次请求/秒
- SendGrid: 600 次请求/分钟
""")
# 智能体 2 读取共享工作空间发现
agent2_brain = ov.namespace("coder_agent")
# 跨命名空间读取(如果允许)
shared = ov.namespace("shared_knowledge")
rate_limits = shared.read("memories/findings/api_rate_limits.md")
# 检查版本
ov --version
# 列出命名空间
ov namespace list
# 创建命名空间
ov namespace create my_agent
# 写入上下文文件
ov write my_agent/memories/prefs.md --file ./prefs.md
# 读取文件
ov read my_agent/memories/prefs.md
# 搜索上下文
ov search my_agent "如何处理身份验证" --top-k 5
# 显示查询的检索轨迹
ov search my_agent "数据库迁移" --trace
# 压缩会话
ov session compress my_agent/task_build_api
# 列出命名空间中的文件
ov ls my_agent/skills/
# 删除上下文文件
ov rm my_agent/resources/outdated_docs.md
# 将命名空间导出到本地目录
ov export my_agent ./exported_brain/
# 从本地目录导入
ov import ./exported_brain/ my_agent_restored
# 验证配置位置
ls -la ~/.openviking/ov.conf
# OpenViking 也会检查 OV_CONFIG 环境变量
export OV_CONFIG=/path/to/custom/ov.conf
如果切换嵌入模型,存储的向量维度会发生冲突:
# 检查当前维度设置与存储的索引
# 解决方案:模型更改后重新索引
brain.reindex(force=True)
# 确保工作空间目录可写
chmod -R 755 /home/user/openviking_workspace
# 检查磁盘空间(嵌入索引可能很大)
df -h /home/user/openviking_workspace
# 对于不明确的模型使用明确的前缀
{
"vlm": {
"provider": "litellm",
"model": "openrouter/anthropic/claude-3-5-sonnet", # 需要完整前缀
"api_key": "$OPENROUTER_API_KEY",
"api_base": "https://openrouter.ai/api/v1"
}
}
启用分层加载以减少 L1/L2 的获取:
# 严格限制搜索范围以避免过度获取
results = brain.search(
query=user_message,
directory="skills/relevant_domain/", # 缩小范围
top_k=2, # 减少结果数量
min_score=0.75, # 质量阈值
)
# 在配置中增加并发数
{
"embedding": {
"max_concurrent": 20 # 从默认值 10 增加
},
"vlm": {
"max_concurrent": 50
}
}
# 或使用异步批量写入
import asyncio
async def index_async(files):
tasks = [brain.awrite(f"resources/{p}", c) for p, c in files]
await asyncio.gather(*tasks)
| 变量 | 用途 |
|---|---|
OV_CONFIG | 覆盖 ov.conf 的路径 |
OPENAI_API_KEY | 用于 VLM/嵌入的 OpenAI API 密钥 |
ANTHROPIC_API_KEY | 通过 LiteLLM 使用 Anthropic Claude |
DEEPSEEK_API_KEY | 通过 LiteLLM 使用 DeepSeek |
GEMINI_API_KEY | 通过 LiteLLM 使用 Google Gemini |
OV_LOG_LEVEL | 覆盖日志级别(DEBUG, INFO, WARN) |
OV_WORKSPACE | 覆盖工作空间路径 |
每周安装数
258
仓库
GitHub 星标数
10
首次出现
6 天前
安全审计
安装于
github-copilot257
codex257
amp257
cline257
kimi-cli257
gemini-cli257
Skill by ara.so — Daily 2026 Skills collection.
OpenViking is an open-source context database for AI Agents that replaces fragmented vector stores with a unified filesystem paradigm. It manages agent memory, resources, and skills in a tiered L0/L1/L2 structure, enabling hierarchical context delivery, observable retrieval trajectories, and self-evolving session memory.
pip install openviking --upgrade --force-reinstall
# Install via script
curl -fsSL https://raw.githubusercontent.com/volcengine/OpenViking/main/crates/ov_cli/install.sh | bash
# Or build from source (requires Rust toolchain)
cargo install --git https://github.com/volcengine/OpenViking ov_cli
Create ~/.openviking/ov.conf:
{
"storage": {
"workspace": "/home/user/openviking_workspace"
},
"log": {
"level": "INFO",
"output": "stdout"
},
"embedding": {
"dense": {
"api_base": "https://api.openai.com/v1",
"api_key": "$OPENAI_API_KEY",
"provider": "openai",
"dimension": 1536,
"model": "text-embedding-3-large"
},
"max_concurrent": 10
},
"vlm": {
"api_base": "https://api.openai.com/v1",
"api_key": "$OPENAI_API_KEY",
"provider": "openai",
"model": "gpt-4o",
"max_concurrent": 100
}
}
Note: OpenViking reads
api_keyvalues as strings; use environment variable injection at startup rather than literal secrets.
| Role | Provider Value | Example Model |
|---|---|---|
| VLM | openai | gpt-4o |
| VLM | volcengine | doubao-seed-2-0-pro-260215 |
| VLM | litellm | claude-3-5-sonnet-20240620, ollama/llama3.1 |
{
"vlm": {
"provider": "litellm",
"model": "claude-3-5-sonnet-20240620",
"api_key": "$ANTHROPIC_API_KEY"
}
}
{
"vlm": {
"provider": "litellm",
"model": "ollama/llama3.1",
"api_base": "http://localhost:11434"
}
}
{
"vlm": {
"provider": "litellm",
"model": "deepseek-chat",
"api_key": "$DEEPSEEK_API_KEY"
}
}
OpenViking organizes agent context like a filesystem:
workspace/
├── memories/ # Long-term agent memories (L0 always loaded)
│ ├── user_prefs/
│ └── task_history/
├── resources/ # External knowledge, documents (L1 on demand)
│ ├── codebase/
│ └── docs/
└── skills/ # Reusable agent capabilities (L2 retrieved)
├── coding/
└── analysis/
This tiered approach minimizes token consumption while maximizing context relevance.
import os
from openviking import OpenViking
# Initialize with config file
ov = OpenViking(config_path="~/.openviking/ov.conf")
# Or initialize programmatically
ov = OpenViking(
workspace="/home/user/openviking_workspace",
vlm_provider="openai",
vlm_model="gpt-4o",
vlm_api_key=os.environ["OPENAI_API_KEY"],
embedding_provider="openai",
embedding_model="text-embedding-3-large",
embedding_api_key=os.environ["OPENAI_API_KEY"],
embedding_dimension=1536,
)
# Create or open a namespace (like a filesystem root for one agent)
brain = ov.namespace("my_agent")
# Add a memory file
brain.write("memories/user_prefs.md", """
# User Preferences
- Language: Python
- Code style: PEP8
- Preferred framework: FastAPI
""")
# Add a resource document
brain.write("resources/api_docs/stripe.md", open("stripe_docs.md").read())
# Add a skill
brain.write("skills/coding/write_tests.md", """
# Skill: Write Unit Tests
When asked to write tests, use pytest with fixtures.
Always mock external API calls. Aim for 80%+ coverage.
""")
# Semantic search across the namespace
results = brain.search("how does the user prefer code to be formatted?")
for result in results:
print(result.path, result.score, result.content[:200])
# Directory-scoped retrieval (recursive)
skill_results = brain.search(
query="write unit tests for a FastAPI endpoint",
directory="skills/",
top_k=3,
)
# Direct path read (L0 always available)
prefs = brain.read("memories/user_prefs.md")
print(prefs.content)
# Start a session — OpenViking tracks turns and auto-compresses
session = brain.session("task_build_api")
# Add conversation turns
session.add_turn(role="user", content="Build me a REST API for todo items")
session.add_turn(role="assistant", content="I'll create a FastAPI app with CRUD operations...")
# After many turns, trigger compression to extract long-term memory
summary = session.compress()
# Compressed insights are automatically written to memories/
# End session — persists extracted memories
session.close()
# Enable trajectory tracking to observe retrieval decisions
with brain.observe() as tracker:
results = brain.search("authentication best practices")
trajectory = tracker.trajectory()
for step in trajectory.steps:
print(f"[{step.level}] {step.path} → score={step.score:.3f}")
# Output:
# [L0] memories/user_prefs.md → score=0.82
# [L1] resources/security/auth.md → score=0.91
# [L2] skills/coding/jwt_auth.md → score=0.88
import os
from openviking import OpenViking
ov = OpenViking(config_path="~/.openviking/ov.conf")
brain = ov.namespace("coding_agent")
def agent_respond(user_message: str, conversation_history: list) -> str:
# Retrieve relevant context
context_results = brain.search(user_message, top_k=5)
context_text = "\n\n".join(r.content for r in context_results)
# Build prompt with retrieved context
system_prompt = f"""You are a coding assistant.
## Relevant Context
{context_text}
"""
# ... call your LLM here with system_prompt + conversation_history
response = call_llm(system_prompt, conversation_history, user_message)
# Store interaction for future memory
brain.session("current").add_turn("user", user_message)
brain.session("current").add_turn("assistant", response)
return response
# Register skills from a directory structure
import pathlib
skills_dir = pathlib.Path("./agent_skills")
for skill_file in skills_dir.rglob("*.md"):
relative = skill_file.relative_to(skills_dir)
brain.write(f"skills/{relative}", skill_file.read_text())
# At runtime, retrieve only relevant skills
def get_relevant_skills(task: str) -> list[str]:
results = brain.search(task, directory="skills/", top_k=3)
return [r.content for r in results]
task = "Refactor this class to use dependency injection"
skills = get_relevant_skills(task)
# Returns only DI-related skills, not all registered skills
import subprocess
import pathlib
brain = ov.namespace("codebase_agent")
# Index a codebase
def index_codebase(repo_path: str):
for f in pathlib.Path(repo_path).rglob("*.py"):
content = f.read_text(errors="ignore")
# Store with relative path as key
rel = f.relative_to(repo_path)
brain.write(f"resources/codebase/{rel}", content)
index_codebase("/home/user/myproject")
# Query with directory scoping
def find_relevant_code(query: str) -> list:
return brain.search(
query=query,
directory="resources/codebase/",
top_k=5,
)
hits = find_relevant_code("database connection pooling")
for h in hits:
print(h.path, "\n", h.content[:300])
# Agent 1 writes discoveries
agent1_brain = ov.namespace("researcher_agent")
agent1_brain.write("memories/findings/api_rate_limits.md", """
# API Rate Limits Discovered
- Stripe: 100 req/s in live mode
- SendGrid: 600 req/min
""")
# Agent 2 reads shared workspace findings
agent2_brain = ov.namespace("coder_agent")
# Cross-namespace read (if permitted)
shared = ov.namespace("shared_knowledge")
rate_limits = shared.read("memories/findings/api_rate_limits.md")
# Check version
ov --version
# List namespaces
ov namespace list
# Create a namespace
ov namespace create my_agent
# Write context file
ov write my_agent/memories/prefs.md --file ./prefs.md
# Read a file
ov read my_agent/memories/prefs.md
# Search context
ov search my_agent "how to handle authentication" --top-k 5
# Show retrieval trajectory for a query
ov search my_agent "database migrations" --trace
# Compress a session
ov session compress my_agent/task_build_api
# List files in namespace
ov ls my_agent/skills/
# Delete a context file
ov rm my_agent/resources/outdated_docs.md
# Export namespace to local directory
ov export my_agent ./exported_brain/
# Import from local directory
ov import ./exported_brain/ my_agent_restored
# Verify config location
ls -la ~/.openviking/ov.conf
# OpenViking also checks OV_CONFIG env var
export OV_CONFIG=/path/to/custom/ov.conf
If you switch embedding models, the stored vector dimensions will conflict:
# Check current dimension setting vs stored index
# Solution: re-index after model change
brain.reindex(force=True)
# Ensure workspace directory is writable
chmod -R 755 /home/user/openviking_workspace
# Check disk space (embedding indexes can be large)
df -h /home/user/openviking_workspace
# Use explicit prefix for ambiguous models
{
"vlm": {
"provider": "litellm",
"model": "openrouter/anthropic/claude-3-5-sonnet", # full prefix required
"api_key": "$OPENROUTER_API_KEY",
"api_base": "https://openrouter.ai/api/v1"
}
}
Enable tiered loading to reduce L1/L2 fetches:
# Scope searches tightly to avoid over-fetching
results = brain.search(
query=user_message,
directory="skills/relevant_domain/", # narrow scope
top_k=2, # fewer results
min_score=0.75, # quality threshold
)
# Increase concurrency in config
{
"embedding": {
"max_concurrent": 20 # increase from default 10
},
"vlm": {
"max_concurrent": 50
}
}
# Or batch-write with async
import asyncio
async def index_async(files):
tasks = [brain.awrite(f"resources/{p}", c) for p, c in files]
await asyncio.gather(*tasks)
| Variable | Purpose |
|---|---|
OV_CONFIG | Path to ov.conf override |
OPENAI_API_KEY | OpenAI API key for VLM/embedding |
ANTHROPIC_API_KEY | Anthropic Claude via LiteLLM |
DEEPSEEK_API_KEY | DeepSeek via LiteLLM |
GEMINI_API_KEY | Google Gemini via LiteLLM |
OV_LOG_LEVEL |
Weekly Installs
258
Repository
GitHub Stars
10
First Seen
6 days ago
Security Audits
Gen Agent Trust HubFailSocketPassSnykFail
Installed on
github-copilot257
codex257
amp257
cline257
kimi-cli257
gemini-cli257
超能力技能使用指南:AI助手技能调用优先级与工作流程详解
41,800 周安装
| Embedding | openai | text-embedding-3-large |
| Embedding | volcengine | doubao-embedding-vision-250615 |
| Embedding | jina | jina-embeddings-v3 |
Override log level (DEBUG, INFO, WARN) |
OV_WORKSPACE | Override workspace path |