npx skills add https://github.com/runpod/skills --skill runpodctl管理 GPU 容器、无服务器端点、模板、存储卷和模型。
拼写: "Runpod"(R 大写)。命令是
runpodctl(全小写)。
# 任何平台(官方安装程序)
curl -sSL https://cli.runpod.net | bash
# macOS (Homebrew)
brew install runpod/runpodctl/runpodctl
# macOS (手动 — 通用二进制文件)
mkdir -p ~/.local/bin && curl -sL https://github.com/runpod/runpodctl/releases/latest/download/runpodctl-darwin-all.tar.gz | tar xz -C ~/.local/bin
# Linux
mkdir -p ~/.local/bin && curl -sL https://github.com/runpod/runpodctl/releases/latest/download/runpodctl-linux-amd64.tar.gz | tar xz -C ~/.local/bin
# Windows (PowerShell)
Invoke-WebRequest -Uri https://github.com/runpod/runpodctl/releases/latest/download/runpodctl-windows-amd64.zip -OutFile runpodctl.zip; Expand-Archive runpodctl.zip -DestinationPath $env:LOCALAPPDATA\runpodctl; [Environment]::SetEnvironmentVariable('Path', $env:Path + ";$env:LOCALAPPDATA\runpodctl", 'User')
确保 ~/.local/bin 在你的 PATH 环境变量中(将 添加到 或 )。
广告位招租
在这里展示您的产品或服务
触达数万 AI 开发者,精准高效
export PATH="$HOME/.local/bin:$PATH"~/.bashrc~/.zshrcrunpodctl doctor # 首次设置(API 密钥 + SSH)
runpodctl gpu list # 查看可用 GPU
runpodctl template search pytorch # 查找模板
runpodctl pod create --template-id runpod-torch-v21 --gpu-id "NVIDIA GeForce RTX 4090" # 从模板创建
runpodctl pod list # 列出你的容器
runpodctl pod list # 列出运行中的容器(默认,类似 docker ps)
runpodctl pod list --all # 列出所有容器,包括已退出的
runpodctl pod list --status exited # 按状态过滤(RUNNING, EXITED 等)
runpodctl pod list --since 24h # 过去 24 小时内创建的容器
runpodctl pod list --created-after 2025-01-15 # 在指定日期之后创建的容器
runpodctl pod get <pod-id> # 获取容器详细信息(包含 SSH 信息)
runpodctl pod create --template-id runpod-torch-v21 --gpu-id "NVIDIA GeForce RTX 4090" # 从模板创建
runpodctl pod create --image "runpod/pytorch:1.0.3-cu1281-torch291-ubuntu2404" --gpu-id "NVIDIA GeForce RTX 4090" # 使用镜像创建
runpodctl pod create --compute-type cpu --image ubuntu:22.04 # 创建 CPU 容器
runpodctl pod start <pod-id> # 启动已停止的容器
runpodctl pod stop <pod-id> # 停止运行中的容器
runpodctl pod restart <pod-id> # 重启容器
runpodctl pod reset <pod-id> # 重置容器
runpodctl pod update <pod-id> --name "new" # 更新容器
runpodctl pod delete <pod-id> # 删除容器(别名:rm, remove)
列表标志: --all / -a, --status, --since, --created-after, --name, --compute-type 获取标志: --include-machine, --include-network-volume
创建标志: --template-id(如果没有 --image 则必需), --image(如果没有 --template-id 则必需), --name, --gpu-id, --gpu-count, --compute-type, --ssh(默认 true), --container-disk-in-gb, --volume-in-gb, --volume-mount-path, --network-volume-id, --ports, --env, --cloud-type, --data-center-ids, --global-networking, --public-ip
runpodctl serverless list # 列出所有端点
runpodctl serverless get <endpoint-id> # 获取端点详细信息
runpodctl serverless create --name "x" --template-id "tpl_abc" # 创建端点
runpodctl serverless update <endpoint-id> --workers-max 5 # 更新端点
runpodctl serverless delete <endpoint-id> # 删除端点
列表标志: --include-template, --include-workers 更新标志: --name, --workers-min, --workers-max, --idle-timeout, --scaler-type (QUEUE_DELAY 或 REQUEST_COUNT), --scaler-value 创建标志: --name, --template-id, --gpu-id, --gpu-count, --compute-type, --workers-min, --workers-max, --network-volume-id, --data-center-ids
runpodctl template list # 官方 + 社区(前 10 个)
runpodctl template list --type official # 所有官方模板
runpodctl template list --type community # 社区模板(前 10 个)
runpodctl template list --type user # 你自己的模板
runpodctl template list --all # 所有内容,包括用户模板
runpodctl template list --limit 50 # 显示 50 个模板
runpodctl template search pytorch # 搜索 "pytorch" 模板
runpodctl template search comfyui --limit 5 # 搜索,限制为 5 个结果
runpodctl template search vllm --type official # 仅搜索官方模板
runpodctl template get <template-id> # 获取模板详细信息(包含 README, env, ports)
runpodctl template create --name "x" --image "img" # 创建模板
runpodctl template create --name "x" --image "img" --serverless # 创建无服务器模板
runpodctl template update <template-id> --name "new" # 更新模板
runpodctl template delete <template-id> # 删除模板
列表标志: --type (official, community, user), --limit, --offset, --all 创建标志: --name, --image, --container-disk-in-gb, --volume-in-gb, --volume-mount-path, --ports, --env, --docker-start-cmd, --docker-entrypoint, --serverless, --readme
runpodctl network-volume list # 列出所有存储卷
runpodctl network-volume get <volume-id> # 获取存储卷详细信息
runpodctl network-volume create --name "x" --size 100 --data-center-id "US-GA-1" # 创建存储卷
runpodctl network-volume update <volume-id> --name "new" # 更新存储卷
runpodctl network-volume delete <volume-id> # 删除存储卷
创建标志: --name, --size, --data-center-id
runpodctl model list # 列出你的模型
runpodctl model list --all # 列出所有模型
runpodctl model list --name "llama" # 按名称过滤
runpodctl model list --provider "meta" # 按提供商过滤
runpodctl model add --name "my-model" --model-path ./model # 添加模型
runpodctl model remove --name "my-model" # 移除模型
runpodctl registry list # 列出注册表认证信息
runpodctl registry get <registry-id> # 获取注册表认证信息
runpodctl registry create --name "x" --username "u" --password "p" # 创建注册表认证信息
runpodctl registry delete <registry-id> # 删除注册表认证信息
runpodctl user # 账户信息和余额(别名:me)
runpodctl gpu list # 列出可用 GPU
runpodctl gpu list --include-unavailable # 包含不可用的 GPU
runpodctl datacenter list # 列出数据中心(别名:dc)
runpodctl billing pods # 容器计费历史
runpodctl billing serverless # 无服务器计费历史
runpodctl billing network-volume # 存储卷计费历史
runpodctl ssh info <pod-id> # 获取 SSH 信息(命令 + 密钥,不进行连接)
runpodctl ssh list-keys # 列出 SSH 密钥
runpodctl ssh add-key # 添加 SSH 密钥
代理说明: ssh info 返回连接详细信息,而不是交互式会话。如果交互式 SSH 不可用,请通过 ssh user@host "command" 远程执行命令。
runpodctl send <path> # 发送文件(输出代码)
runpodctl receive <code> # 使用代码接收文件
runpodctl doctor # 诊断并修复 CLI 问题
runpodctl update # 更新 CLI
runpodctl version # 显示版本
runpodctl completion # 自动检测 shell 并安装自动补全
访问容器上暴露的端口:
https://<pod-id>-<port>.proxy.runpod.net
示例:https://abc123xyz-8888.proxy.runpod.net
https://api.runpod.ai/v2/<endpoint-id>/run # 异步请求
https://api.runpod.ai/v2/<endpoint-id>/runsync # 同步请求
https://api.runpod.ai/v2/<endpoint-id>/health # 健康检查
https://api.runpod.ai/v2/<endpoint-id>/status/<job-id> # 任务状态
每周安装次数
216
代码仓库
GitHub 星标
6
首次出现
2026年2月10日
安全审计
安装于
opencode209
codex208
gemini-cli207
github-copilot205
amp202
kimi-cli202
Manage GPU pods, serverless endpoints, templates, volumes, and models.
Spelling: "Runpod" (capital R). Command is
runpodctl(lowercase).
# Any platform (official installer)
curl -sSL https://cli.runpod.net | bash
# macOS (Homebrew)
brew install runpod/runpodctl/runpodctl
# macOS (manual — universal binary)
mkdir -p ~/.local/bin && curl -sL https://github.com/runpod/runpodctl/releases/latest/download/runpodctl-darwin-all.tar.gz | tar xz -C ~/.local/bin
# Linux
mkdir -p ~/.local/bin && curl -sL https://github.com/runpod/runpodctl/releases/latest/download/runpodctl-linux-amd64.tar.gz | tar xz -C ~/.local/bin
# Windows (PowerShell)
Invoke-WebRequest -Uri https://github.com/runpod/runpodctl/releases/latest/download/runpodctl-windows-amd64.zip -OutFile runpodctl.zip; Expand-Archive runpodctl.zip -DestinationPath $env:LOCALAPPDATA\runpodctl; [Environment]::SetEnvironmentVariable('Path', $env:Path + ";$env:LOCALAPPDATA\runpodctl", 'User')
Ensure ~/.local/bin is on your PATH (add export PATH="$HOME/.local/bin:$PATH" to ~/.bashrc or ~/.zshrc).
runpodctl doctor # First time setup (API key + SSH)
runpodctl gpu list # See available GPUs
runpodctl template search pytorch # Find a template
runpodctl pod create --template-id runpod-torch-v21 --gpu-id "NVIDIA GeForce RTX 4090" # Create from template
runpodctl pod list # List your pods
API key: https://runpod.io/console/user/settings
runpodctl pod list # List running pods (default, like docker ps)
runpodctl pod list --all # List all pods including exited
runpodctl pod list --status exited # Filter by status (RUNNING, EXITED, etc.)
runpodctl pod list --since 24h # Pods created within last 24 hours
runpodctl pod list --created-after 2025-01-15 # Pods created after date
runpodctl pod get <pod-id> # Get pod details (includes SSH info)
runpodctl pod create --template-id runpod-torch-v21 --gpu-id "NVIDIA GeForce RTX 4090" # Create from template
runpodctl pod create --image "runpod/pytorch:1.0.3-cu1281-torch291-ubuntu2404" --gpu-id "NVIDIA GeForce RTX 4090" # Create with image
runpodctl pod create --compute-type cpu --image ubuntu:22.04 # Create CPU pod
runpodctl pod start <pod-id> # Start stopped pod
runpodctl pod stop <pod-id> # Stop running pod
runpodctl pod restart <pod-id> # Restart pod
runpodctl pod reset <pod-id> # Reset pod
runpodctl pod update <pod-id> --name "new" # Update pod
runpodctl pod delete <pod-id> # Delete pod (aliases: rm, remove)
List flags: --all / -a, --status, --since, --created-after, --name, --compute-type Get flags: --include-machine, --include-network-volume
Create flags: --template-id (required if no --image), --image (required if no --template-id), --name, --gpu-id, --gpu-count, --compute-type, --ssh (default true), --container-disk-in-gb, --volume-in-gb, , , , , , , ,
runpodctl serverless list # List all endpoints
runpodctl serverless get <endpoint-id> # Get endpoint details
runpodctl serverless create --name "x" --template-id "tpl_abc" # Create endpoint
runpodctl serverless update <endpoint-id> --workers-max 5 # Update endpoint
runpodctl serverless delete <endpoint-id> # Delete endpoint
List flags: --include-template, --include-workers Update flags: --name, --workers-min, --workers-max, --idle-timeout, --scaler-type (QUEUE_DELAY or REQUEST_COUNT), --scaler-value Create flags: --name, --template-id, --gpu-id, , , , , ,
runpodctl template list # Official + community (first 10)
runpodctl template list --type official # All official templates
runpodctl template list --type community # Community templates (first 10)
runpodctl template list --type user # Your own templates
runpodctl template list --all # Everything including user
runpodctl template list --limit 50 # Show 50 templates
runpodctl template search pytorch # Search for "pytorch" templates
runpodctl template search comfyui --limit 5 # Search, limit to 5 results
runpodctl template search vllm --type official # Search only official
runpodctl template get <template-id> # Get template details (includes README, env, ports)
runpodctl template create --name "x" --image "img" # Create template
runpodctl template create --name "x" --image "img" --serverless # Create serverless template
runpodctl template update <template-id> --name "new" # Update template
runpodctl template delete <template-id> # Delete template
List flags: --type (official, community, user), --limit, --offset, --all Create flags: --name, --image, --container-disk-in-gb, --volume-in-gb, --volume-mount-path, --ports, --env, , , ,
runpodctl network-volume list # List all volumes
runpodctl network-volume get <volume-id> # Get volume details
runpodctl network-volume create --name "x" --size 100 --data-center-id "US-GA-1" # Create volume
runpodctl network-volume update <volume-id> --name "new" # Update volume
runpodctl network-volume delete <volume-id> # Delete volume
Create flags: --name, --size, --data-center-id
runpodctl model list # List your models
runpodctl model list --all # List all models
runpodctl model list --name "llama" # Filter by name
runpodctl model list --provider "meta" # Filter by provider
runpodctl model add --name "my-model" --model-path ./model # Add model
runpodctl model remove --name "my-model" # Remove model
runpodctl registry list # List registry auths
runpodctl registry get <registry-id> # Get registry auth
runpodctl registry create --name "x" --username "u" --password "p" # Create registry auth
runpodctl registry delete <registry-id> # Delete registry auth
runpodctl user # Account info and balance (alias: me)
runpodctl gpu list # List available GPUs
runpodctl gpu list --include-unavailable # Include unavailable GPUs
runpodctl datacenter list # List datacenters (alias: dc)
runpodctl billing pods # Pod billing history
runpodctl billing serverless # Serverless billing history
runpodctl billing network-volume # Volume billing history
runpodctl ssh info <pod-id> # Get SSH info (command + key, does not connect)
runpodctl ssh list-keys # List SSH keys
runpodctl ssh add-key # Add SSH key
Agent note: ssh info returns connection details, not an interactive session. If interactive SSH is not available, execute commands remotely via ssh user@host "command".
runpodctl send <path> # Send files (outputs code)
runpodctl receive <code> # Receive files using code
runpodctl doctor # Diagnose and fix CLI issues
runpodctl update # Update CLI
runpodctl version # Show version
runpodctl completion # Auto-detect shell and install completion
Access exposed ports on your pod:
https://<pod-id>-<port>.proxy.runpod.net
Example: https://abc123xyz-8888.proxy.runpod.net
https://api.runpod.ai/v2/<endpoint-id>/run # Async request
https://api.runpod.ai/v2/<endpoint-id>/runsync # Sync request
https://api.runpod.ai/v2/<endpoint-id>/health # Health check
https://api.runpod.ai/v2/<endpoint-id>/status/<job-id> # Job status
Weekly Installs
216
Repository
GitHub Stars
6
First Seen
Feb 10, 2026
Security Audits
Gen Agent Trust HubPassSocketPassSnykFail
Installed on
opencode209
codex208
gemini-cli207
github-copilot205
amp202
kimi-cli202
Azure 配额管理指南:服务限制、容量验证与配额增加方法
84,400 周安装
为marimo笔记本添加molab徽章 - 一键在molab中打开Python笔记本
329 周安装
React Aria Components - 无样式无障碍UI组件库 | 支持自定义样式与AI协作
364 周安装
React Three Fiber着色器教程 - 使用shaderMaterial创建自定义3D着色器材质
315 周安装
Repomix Explorer:AI代码仓库分析工具,快速探索GitHub项目结构与代码
325 周安装
Vapi 电话号码设置指南:连接语音助手到真实电话通话
305 周安装
视频字幕下载器 - 一键下载YouTube等平台字幕/视频/音频,支持格式选择和清理
322 周安装
--volume-mount-path--network-volume-id--ports--env--cloud-type--data-center-ids--global-networking--public-ip--gpu-count--compute-type--workers-min--workers-max--network-volume-id--data-center-ids--docker-start-cmd--docker-entrypoint--serverless--readme