phoenix-cli by arize-ai/phoenix
npx skills add https://github.com/arize-ai/phoenix --skill phoenix-clipx <command> # 如果已全局安装
npx @arizeai/phoenix-cli <command> # 无需安装
export PHOENIX_HOST=http://localhost:6006
export PHOENIX_PROJECT=my-project
export PHOENIX_API_KEY=your-api-key # 如果启用了身份验证
当管道传输到 jq 时,请始终使用 --format raw --no-progress。
px traces --limit 20 --format raw --no-progress | jq .
px traces --last-n-minutes 60 --limit 20 --format raw --no-progress | jq '.[] | select(.status == "ERROR")'
px traces --format raw --no-progress | jq 'sort_by(-.duration) | .[0:5]'
px trace <trace-id> --format raw | jq .
px trace <trace-id> --format raw | jq '.spans[] | select(.status_code != "OK")'
广告位招租
在这里展示您的产品或服务
触达数万 AI 开发者,精准高效
Trace
traceId, status ("OK"|"ERROR"), duration (ms), startTime, endTime
rootSpan — 顶级跨度(parent_id: null)
spans[]
name, span_kind ("LLM"|"CHAIN"|"TOOL"|"RETRIEVER"|"EMBEDDING"|"AGENT")
status_code ("OK"|"ERROR"), parent_id, context.span_id
attributes
input.value, output.value — 原始输入/输出
llm.model_name, llm.provider
llm.token_count.prompt/completion/total
llm.token_count.prompt_details.cache_read
llm.token_count.completion_details.reasoning
llm.input_messages.{N}.message.role/content
llm.output_messages.{N}.message.role/content
llm.invocation_parameters — JSON 字符串(temperature 等)
exception.message — 如果跨度出错则设置
px sessions --limit 10 --format raw --no-progress | jq .
px sessions --order asc --format raw --no-progress | jq '.[].session_id'
px session <session-id> --format raw | jq .
px session <session-id> --include-annotations --format raw | jq '.annotations'
SessionData
id, session_id, project_id
start_time, end_time
traces[]
id, trace_id, start_time, end_time
SessionAnnotation (使用 --include-annotations)
id, name, annotator_kind ("LLM"|"CODE"|"HUMAN"), session_id
result { label, score, explanation }
metadata, identifier, source, created_at, updated_at
px datasets --format raw --no-progress | jq '.[].name'
px dataset <name> --format raw | jq '.examples[] | {input, output: .expected_output}'
px experiments --dataset <name> --format raw --no-progress | jq '.[] | {id, name, failed_run_count}'
px experiment <id> --format raw --no-progress | jq '.[] | select(.error != null) | {input, error}'
px prompts --format raw --no-progress | jq '.[].name'
px prompt <name> --text --no-progress # 纯文本,适合管道传输给 AI
用于上述命令未涵盖的即席查询。输出格式为 {"data": {...}}。
px api graphql '{ projectCount datasetCount promptCount evaluatorCount }'
px api graphql '{ projects { edges { node { name traceCount tokenCountTotal } } } }' | jq '.data.projects.edges[].node'
px api graphql '{ datasets { edges { node { name exampleCount experimentCount } } } }' | jq '.data.datasets.edges[].node'
px api graphql '{ evaluators { edges { node { name kind } } } }' | jq '.data.evaluators.edges[].node'
# 内省任何类型
px api graphql '{ __type(name: "Project") { fields { name type { name } } } }' | jq '.data.__type.fields[]'
关键根字段:projects, datasets, prompts, evaluators, projectCount, datasetCount, promptCount, evaluatorCount, viewer。
每周安装量
115
代码仓库
GitHub 星标数
8.8K
首次出现
2026年1月24日
安全审计
安装于
gemini-cli98
claude-code97
opencode93
github-copilot93
codex91
cursor90
px <command> # if installed globally
npx @arizeai/phoenix-cli <command> # no install required
export PHOENIX_HOST=http://localhost:6006
export PHOENIX_PROJECT=my-project
export PHOENIX_API_KEY=your-api-key # if auth is enabled
Always use --format raw --no-progress when piping to jq.
px traces --limit 20 --format raw --no-progress | jq .
px traces --last-n-minutes 60 --limit 20 --format raw --no-progress | jq '.[] | select(.status == "ERROR")'
px traces --format raw --no-progress | jq 'sort_by(-.duration) | .[0:5]'
px trace <trace-id> --format raw | jq .
px trace <trace-id> --format raw | jq '.spans[] | select(.status_code != "OK")'
Trace
traceId, status ("OK"|"ERROR"), duration (ms), startTime, endTime
rootSpan — top-level span (parent_id: null)
spans[]
name, span_kind ("LLM"|"CHAIN"|"TOOL"|"RETRIEVER"|"EMBEDDING"|"AGENT")
status_code ("OK"|"ERROR"), parent_id, context.span_id
attributes
input.value, output.value — raw input/output
llm.model_name, llm.provider
llm.token_count.prompt/completion/total
llm.token_count.prompt_details.cache_read
llm.token_count.completion_details.reasoning
llm.input_messages.{N}.message.role/content
llm.output_messages.{N}.message.role/content
llm.invocation_parameters — JSON string (temperature, etc.)
exception.message — set if span errored
px sessions --limit 10 --format raw --no-progress | jq .
px sessions --order asc --format raw --no-progress | jq '.[].session_id'
px session <session-id> --format raw | jq .
px session <session-id> --include-annotations --format raw | jq '.annotations'
SessionData
id, session_id, project_id
start_time, end_time
traces[]
id, trace_id, start_time, end_time
SessionAnnotation (with --include-annotations)
id, name, annotator_kind ("LLM"|"CODE"|"HUMAN"), session_id
result { label, score, explanation }
metadata, identifier, source, created_at, updated_at
px datasets --format raw --no-progress | jq '.[].name'
px dataset <name> --format raw | jq '.examples[] | {input, output: .expected_output}'
px experiments --dataset <name> --format raw --no-progress | jq '.[] | {id, name, failed_run_count}'
px experiment <id> --format raw --no-progress | jq '.[] | select(.error != null) | {input, error}'
px prompts --format raw --no-progress | jq '.[].name'
px prompt <name> --format text --no-progress # plain text, ideal for piping to AI
For ad-hoc queries not covered by the commands above. Output is {"data": {...}}.
px api graphql '{ projectCount datasetCount promptCount evaluatorCount }'
px api graphql '{ projects { edges { node { name traceCount tokenCountTotal } } } }' | jq '.data.projects.edges[].node'
px api graphql '{ datasets { edges { node { name exampleCount experimentCount } } } }' | jq '.data.datasets.edges[].node'
px api graphql '{ evaluators { edges { node { name kind } } } }' | jq '.data.evaluators.edges[].node'
# Introspect any type
px api graphql '{ __type(name: "Project") { fields { name type { name } } } }' | jq '.data.__type.fields[]'
Key root fields: projects, datasets, prompts, evaluators, projectCount, datasetCount, promptCount, evaluatorCount, viewer.
Weekly Installs
115
Repository
GitHub Stars
8.8K
First Seen
Jan 24, 2026
Security Audits
Gen Agent Trust HubPassSocketPassSnykPass
Installed on
gemini-cli98
claude-code97
opencode93
github-copilot93
codex91
cursor90
超能力技能使用指南:AI助手技能调用优先级与工作流程详解
46,500 周安装
Docker安全指南:全面容器安全最佳实践、漏洞扫描与合规性要求
177 周安装
iOS开发专家技能:精通Swift 6、SwiftUI与原生应用开发,涵盖架构、性能与App Store合规
177 周安装
describe技能:AI驱动结构化测试用例生成,提升代码质量与评审效率
2 周安装
专业 README 生成器 | 支持 Rust/TypeScript/Python 项目,自动应用最佳实践
2 周安装
Django 6 升级指南:从 Django 5 迁移的完整步骤与重大变更解析
1 周安装
GitLab DAG与并行处理指南:needs与parallel优化CI/CD流水线速度
2 周安装