corridorkey-green-screen by aradotso/trending-skills
npx skills add https://github.com/aradotso/trending-skills --skill corridorkey-green-screen技能由 ara.so 提供 — Daily 2026 Skills 合集。
CorridorKey 是一个解决绿幕素材中颜色分离问题的神经网络。对于每个像素——包括来自运动模糊、头发或失焦边缘的半透明像素——它能预测真实的直出(非预乘)前景颜色和干净的线性 Alpha 通道。它读写 16 位和 32 位的 EXR 文件,以便集成到 VFX 流程中。
每帧需要两个输入:
模型根据提示填充精细细节;它是在模糊/腐蚀的遮罩上训练的。
# 双击或在终端中运行:
Install_CorridorKey_Windows.bat
# 可选的重型模块:
Install_GVM_Windows.bat
Install_VideoMaMa_Windows.bat
# 安装 uv
curl -LsSf https://astral.sh/uv/install.sh | sh
# 安装依赖项 — 选择一项:
uv sync # CPU / Apple MPS (通用)
uv sync --extra cuda # NVIDIA GPU (Linux/Windows)
uv sync --extra mlx # Apple Silicon MLX
# 下载所需模型 (~300MB)
mkdir -p CorridorKeyModule/checkpoints
# 将下载的 CorridorKey_v1.0.pth 放置为:
# CorridorKeyModule/checkpoints/CorridorKey.pth
广告位招租
在这里展示您的产品或服务
触达数万 AI 开发者,精准高效
# GVM (自动,~80GB VRAM,适用于人物)
uv run hf download geyongtao/gvm --local-dir gvm_core/weights
# VideoMaMa (需要遮罩提示,社区调整后 <24GB VRAM)
uv run hf download SammyLim/VideoMaMa \
--local-dir VideoMaMaInferenceModule/checkpoints/VideoMaMa
uv run hf download stabilityai/stable-video-diffusion-img2vid-xt \
--local-dir VideoMaMaInferenceModule/checkpoints/stable-video-diffusion-img2vid-xt \
--include "feature_extractor/*" "image_encoder/*" "vae/*" "model_index.json"
# 对准备好的片段运行推理
uv run python main.py run_inference --device cuda
uv run python main.py run_inference --device cpu
uv run python main.py run_inference --device mps # Apple Silicon
# 列出可用的片段/镜头
uv run python main.py list
# 交互式设置向导
uv run python main.py wizard
uv run python main.py wizard --win_path /path/to/ClipsForInference
# 构建
docker build -t corridorkey:latest .
# 运行推理
docker run --rm -it --gpus all \
-e OPENCV_IO_ENABLE_OPENEXR=1 \
-v "$(pwd)/ClipsForInference:/app/ClipsForInference" \
-v "$(pwd)/Output:/app/Output" \
-v "$(pwd)/CorridorKeyModule/checkpoints:/app/CorridorKeyModule/checkpoints" \
corridorkey:latest run_inference --device cuda
# Docker Compose
docker compose build
docker compose --profile gpu run --rm corridorkey run_inference --device cuda
docker compose --profile gpu run --rm corridorkey list
# 在多 GPU 系统上固定到特定 GPU
NVIDIA_VISIBLE_DEVICES=0 docker compose --profile gpu run --rm corridorkey run_inference --device cuda
CorridorKey/
├── ClipsForInference/ # 输入镜头放在这里
│ └── my_shot/
│ ├── frames/ # 绿幕 RGB 帧 (PNG/EXR)
│ ├── alpha_hints/ # 粗略的 Alpha 遮罩 (灰度)
│ └── VideoMamaMaskHint/ # 可选:VideoMaMa 的手绘提示
├── Output/ # 处理结果
│ └── my_shot/
│ ├── foreground/ # 直出 RGBA EXR 帧
│ └── alpha/ # 线性 Alpha 通道帧
├── CorridorKeyModule/
│ └── checkpoints/
│ └── CorridorKey.pth # 必需的模型权重
├── gvm_core/weights/ # 可选的 GVM 权重
└── VideoMaMaInferenceModule/
└── checkpoints/ # 可选的 VideoMaMa 权重
import torch
from pathlib import Path
from CorridorKeyModule.model import CorridorKeyModel # 调整到实际的模块路径
from CorridorKeyModule.inference import run_inference
# 加载模型
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = CorridorKeyModel()
model.load_state_dict(torch.load("CorridorKeyModule/checkpoints/CorridorKey.pth"))
model.to(device)
model.eval()
# 在镜头文件夹上运行推理
run_inference(
shot_dir=Path("ClipsForInference/my_shot"),
output_dir=Path("Output/my_shot"),
device=device,
)
import cv2
import numpy as np
import os
os.environ["OPENCV_IO_ENABLE_OPENEXR"] = "1"
# 读取一个 32 位线性 EXR 帧
frame = cv2.imread("frame_0001.exr", cv2.IMREAD_UNCHANGED | cv2.IMREAD_ANYCOLOR)
# frame 是 float32,线性光,BGR 通道顺序
# 转换 BGR -> RGB 以便处理
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# 写入输出 EXR (直出 RGBA)
# 假设 `foreground` 是 float32 HxWx4 (RGBA,线性,直出 Alpha)
foreground_bgra = cv2.cvtColor(foreground, cv2.COLOR_RGBA2BGRA)
cv2.imwrite("output_0001.exr", foreground_bgra.astype(np.float32))
import cv2
import numpy as np
def generate_chroma_key_hint(image_bgr: np.ndarray, erode_px: int = 5) -> np.ndarray:
"""
为 CorridorKey 输入生成快速且粗略的绿幕提示。
返回灰度遮罩 (0=背景,255=前景)。
"""
hsv = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2HSV)
# 根据你的特定绿幕调整这些范围
lower_green = np.array([35, 50, 50])
upper_green = np.array([85, 255, 255])
green_mask = cv2.inRange(hsv, lower_green, upper_green)
foreground_mask = cv2.bitwise_not(green_mask)
# 腐蚀以使遮罩远离边缘 (CorridorKey 处理边缘细节)
kernel = np.ones((erode_px, erode_px), np.uint8)
eroded = cv2.erode(foreground_mask, kernel, iterations=2)
# 可选:轻微模糊以软化提示
blurred = cv2.GaussianBlur(eroded, (15, 15), 5)
return blurred
# 用法
frame = cv2.imread("greenscreen_frame.png")
hint = generate_chroma_key_hint(frame, erode_px=8)
cv2.imwrite("alpha_hint.png", hint)
from pathlib import Path
import cv2
import numpy as np
import os
os.environ["OPENCV_IO_ENABLE_OPENEXR"] = "1"
def prepare_shot_folder(
raw_frames_dir: Path,
output_shot_dir: Path,
hint_generator_fn=None
):
"""
从原始绿幕帧准备一个 CorridorKey 镜头文件夹。
"""
frames_out = output_shot_dir / "frames"
hints_out = output_shot_dir / "alpha_hints"
frames_out.mkdir(parents=True, exist_ok=True)
hints_out.mkdir(parents=True, exist_ok=True)
frame_paths = sorted(raw_frames_dir.glob("*.png")) + \
sorted(raw_frames_dir.glob("*.exr"))
for frame_path in frame_paths:
frame = cv2.imread(str(frame_path), cv2.IMREAD_UNCHANGED | cv2.IMREAD_ANYCOLOR)
# 复制帧
cv2.imwrite(str(frames_out / frame_path.name), frame)
# 生成提示
if hint_generator_fn:
hint = hint_generator_fn(frame)
else:
hint = generate_chroma_key_hint(frame)
hint_name = frame_path.stem + ".png"
cv2.imwrite(str(hints_out / hint_name), hint)
print(f"在 {output_shot_dir} 中准备了 {len(frame_paths)} 帧")
prepare_shot_folder(
raw_frames_dir=Path("raw_footage/shot_01"),
output_shot_dir=Path("ClipsForInference/shot_01"),
)
# GVM (自动 — 无需额外输入)
from clip_manager import generate_alpha_hints_gvm
generate_alpha_hints_gvm(
shot_dir="ClipsForInference/my_shot",
device="cuda"
)
# VideoMaMa (首先将粗略遮罩放入 VideoMamaMaskHint/)
from clip_manager import generate_alpha_hints_videomama
generate_alpha_hints_videomama(
shot_dir="ClipsForInference/my_shot",
device="cuda"
)
# BiRefNet (轻量级选项,无需大 VRAM)
from clip_manager import generate_alpha_hints_birefnet
generate_alpha_hints_birefnet(
shot_dir="ClipsForInference/my_shot",
device="cuda"
)
# 良好:腐蚀的、略微模糊的提示 — 远离边缘
# 模型根据提示填充边缘细节
kernel = np.ones((10, 10), np.uint8)
good_hint = cv2.erode(raw_mask, kernel, iterations=3)
good_hint = cv2.GaussianBlur(good_hint, (21, 21), 7)
# 不良:扩展/膨胀的提示 — 模型在减去方面表现更差
# 不要将遮罩向外推过真实主体边界
bad_hint = cv2.dilate(raw_mask, kernel, iterations=3) # 避免这样做
# 可接受:二值化的粗略色度键原样
# 即使是硬二值遮罩也有效 — 只要不扩展
acceptable_hint = raw_chroma_key_mask # 无膨胀
CorridorKey 输出直出(非预乘)的 RGBA EXR,采用线性光:
# 在 Nuke 中:作为 EXR 读取,将色彩空间设置为 "linear"
# Alpha 已经是干净的 — 无需 Unpremult 节点
# 直接连接到 Merge (over) 节点与你的背景板
# 验证输出是直出 Alpha (非预乘):
import cv2, numpy as np, os
os.environ["OPENCV_IO_ENABLE_OPENEXR"] = "1"
result = cv2.imread("Output/shot_01/foreground/frame_0001.exr",
cv2.IMREAD_UNCHANGED | cv2.IMREAD_ANYCOLOR)
# result[..., 3] = Alpha 通道 (线性 0.0–1.0)
# result[..., :3] = 直出颜色 (未乘以 Alpha)
# 检查一个半透明像素
h, w = result.shape[:2]
sample_alpha = result[h//2, w//2, 3]
sample_color = result[h//2, w//2, :3]
print(f"Alpha: {sample_alpha:.3f}, Color: {sample_color}")
# 即使 Alpha < 1.0,颜色值也应该是全强度的 (直出 Alpha)
# 检查 CUDA 版本要求:驱动程序必须支持 CUDA 12.8+
nvidia-smi # 显示支持的最大 CUDA 版本
# 使用显式的 CUDA extra 重新安装
uv sync --extra cuda
# 验证 PyTorch 能识别 GPU
uv run python -c "import torch; print(torch.cuda.is_available(), torch.version.cuda)"
# 必须在导入 cv2 之前设置环境变量
export OPENCV_IO_ENABLE_OPENEXR=1
uv run python your_script.py
# 或者在 Python 中 (必须在 import cv2 之前)
import os
os.environ["OPENCV_IO_ENABLE_OPENEXR"] = "1"
import cv2
# 使用 CPU 回退
uv run python main.py run_inference --device cpu
# 或者减少批次大小 / 使用分块推理 (如果支持)
# 引擎动态缩放到 2048x2048 块 — 对于 4K,
# 确保至少有 6-8GB VRAM
# Apple Silicon:使用 MPS
uv run python main.py run_inference --device mps
# 验证确切的文件名和位置:
ls CorridorKeyModule/checkpoints/
# 必须命名为:CorridorKey.pth
# 不能是:CorridorKey_v1.0.pth
mv CorridorKeyModule/checkpoints/CorridorKey_v1.0.pth \
CorridorKeyModule/checkpoints/CorridorKey.pth
# 测试 NVIDIA 容器工具包
docker run --rm --gpus all nvidia/cuda:12.6.3-runtime-ubuntu22.04 nvidia-smi
# 如果失败,安装/重新配置 nvidia-container-toolkit:
# https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html
# 然后重启 Docker 守护进程
sudo systemctl restart docker
每周安装次数
295
仓库
GitHub 星标
10
首次出现
7 天前
安全审计
安装于
gemini-cli294
github-copilot294
codex294
amp294
cline294
kimi-cli294
Skill by ara.so — Daily 2026 Skills collection.
CorridorKey is a neural network that solves the color unmixing problem in green screen footage. For every pixel — including semi-transparent ones from motion blur, hair, or out-of-focus edges — it predicts the true straight (un-premultiplied) foreground color and a clean linear alpha channel. It reads/writes 16-bit and 32-bit EXR files for VFX pipeline integration.
Two inputs required per frame:
The model fills in fine detail from the hint; it's trained on blurry/eroded masks.
# Double-click or run from terminal:
Install_CorridorKey_Windows.bat
# Optional heavy modules:
Install_GVM_Windows.bat
Install_VideoMaMa_Windows.bat
# Install uv
curl -LsSf https://astral.sh/uv/install.sh | sh
# Install dependencies — pick one:
uv sync # CPU / Apple MPS (universal)
uv sync --extra cuda # NVIDIA GPU (Linux/Windows)
uv sync --extra mlx # Apple Silicon MLX
# Download required model (~300MB)
mkdir -p CorridorKeyModule/checkpoints
# Place downloaded CorridorKey_v1.0.pth as:
# CorridorKeyModule/checkpoints/CorridorKey.pth
Model download: https://huggingface.co/nikopueringer/CorridorKey_v1.0/resolve/main/CorridorKey_v1.0.pth
# GVM (automatic, ~80GB VRAM, good for people)
uv run hf download geyongtao/gvm --local-dir gvm_core/weights
# VideoMaMa (requires mask hint, <24GB VRAM with community tweaks)
uv run hf download SammyLim/VideoMaMa \
--local-dir VideoMaMaInferenceModule/checkpoints/VideoMaMa
uv run hf download stabilityai/stable-video-diffusion-img2vid-xt \
--local-dir VideoMaMaInferenceModule/checkpoints/stable-video-diffusion-img2vid-xt \
--include "feature_extractor/*" "image_encoder/*" "vae/*" "model_index.json"
# Run inference on prepared clips
uv run python main.py run_inference --device cuda
uv run python main.py run_inference --device cpu
uv run python main.py run_inference --device mps # Apple Silicon
# List available clips/shots
uv run python main.py list
# Interactive setup wizard
uv run python main.py wizard
uv run python main.py wizard --win_path /path/to/ClipsForInference
# Build
docker build -t corridorkey:latest .
# Run inference
docker run --rm -it --gpus all \
-e OPENCV_IO_ENABLE_OPENEXR=1 \
-v "$(pwd)/ClipsForInference:/app/ClipsForInference" \
-v "$(pwd)/Output:/app/Output" \
-v "$(pwd)/CorridorKeyModule/checkpoints:/app/CorridorKeyModule/checkpoints" \
corridorkey:latest run_inference --device cuda
# Docker Compose
docker compose build
docker compose --profile gpu run --rm corridorkey run_inference --device cuda
docker compose --profile gpu run --rm corridorkey list
# Pin to specific GPU on multi-GPU systems
NVIDIA_VISIBLE_DEVICES=0 docker compose --profile gpu run --rm corridorkey run_inference --device cuda
CorridorKey/
├── ClipsForInference/ # Input shots go here
│ └── my_shot/
│ ├── frames/ # Green screen RGB frames (PNG/EXR)
│ ├── alpha_hints/ # Coarse alpha masks (grayscale)
│ └── VideoMamaMaskHint/ # Optional: hand-drawn hints for VideoMaMa
├── Output/ # Processed results
│ └── my_shot/
│ ├── foreground/ # Straight RGBA EXR frames
│ └── alpha/ # Linear alpha channel frames
├── CorridorKeyModule/
│ └── checkpoints/
│ └── CorridorKey.pth # Required model weights
├── gvm_core/weights/ # Optional GVM weights
└── VideoMaMaInferenceModule/
└── checkpoints/ # Optional VideoMaMa weights
import torch
from pathlib import Path
from CorridorKeyModule.model import CorridorKeyModel # adjust to actual module path
from CorridorKeyModule.inference import run_inference
# Load model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = CorridorKeyModel()
model.load_state_dict(torch.load("CorridorKeyModule/checkpoints/CorridorKey.pth"))
model.to(device)
model.eval()
# Run inference on a shot folder
run_inference(
shot_dir=Path("ClipsForInference/my_shot"),
output_dir=Path("Output/my_shot"),
device=device,
)
import cv2
import numpy as np
import os
os.environ["OPENCV_IO_ENABLE_OPENEXR"] = "1"
# Read a 32-bit linear EXR frame
frame = cv2.imread("frame_0001.exr", cv2.IMREAD_UNCHANGED | cv2.IMREAD_ANYCOLOR)
# frame is float32, linear light, BGR channel order
# Convert BGR -> RGB for processing
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Write output EXR (straight RGBA)
# Assume `foreground` is float32 HxWx4 (RGBA, linear, straight alpha)
foreground_bgra = cv2.cvtColor(foreground, cv2.COLOR_RGBA2BGRA)
cv2.imwrite("output_0001.exr", foreground_bgra.astype(np.float32))
import cv2
import numpy as np
def generate_chroma_key_hint(image_bgr: np.ndarray, erode_px: int = 5) -> np.ndarray:
"""
Quick-and-dirty green screen hint for CorridorKey input.
Returns grayscale mask (0=background, 255=foreground).
"""
hsv = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2HSV)
# Tune these ranges for your specific green screen
lower_green = np.array([35, 50, 50])
upper_green = np.array([85, 255, 255])
green_mask = cv2.inRange(hsv, lower_green, upper_green)
foreground_mask = cv2.bitwise_not(green_mask)
# Erode to pull mask away from edges (CorridorKey handles edge detail)
kernel = np.ones((erode_px, erode_px), np.uint8)
eroded = cv2.erode(foreground_mask, kernel, iterations=2)
# Optional: slight blur to soften hint
blurred = cv2.GaussianBlur(eroded, (15, 15), 5)
return blurred
# Usage
frame = cv2.imread("greenscreen_frame.png")
hint = generate_chroma_key_hint(frame, erode_px=8)
cv2.imwrite("alpha_hint.png", hint)
from pathlib import Path
import cv2
import numpy as np
import os
os.environ["OPENCV_IO_ENABLE_OPENEXR"] = "1"
def prepare_shot_folder(
raw_frames_dir: Path,
output_shot_dir: Path,
hint_generator_fn=None
):
"""
Prepares a CorridorKey shot folder from raw green screen frames.
"""
frames_out = output_shot_dir / "frames"
hints_out = output_shot_dir / "alpha_hints"
frames_out.mkdir(parents=True, exist_ok=True)
hints_out.mkdir(parents=True, exist_ok=True)
frame_paths = sorted(raw_frames_dir.glob("*.png")) + \
sorted(raw_frames_dir.glob("*.exr"))
for frame_path in frame_paths:
frame = cv2.imread(str(frame_path), cv2.IMREAD_UNCHANGED | cv2.IMREAD_ANYCOLOR)
# Copy frame
cv2.imwrite(str(frames_out / frame_path.name), frame)
# Generate hint
if hint_generator_fn:
hint = hint_generator_fn(frame)
else:
hint = generate_chroma_key_hint(frame)
hint_name = frame_path.stem + ".png"
cv2.imwrite(str(hints_out / hint_name), hint)
print(f"Prepared {len(frame_paths)} frames in {output_shot_dir}")
prepare_shot_folder(
raw_frames_dir=Path("raw_footage/shot_01"),
output_shot_dir=Path("ClipsForInference/shot_01"),
)
# GVM (automatic — no extra input needed)
from clip_manager import generate_alpha_hints_gvm
generate_alpha_hints_gvm(
shot_dir="ClipsForInference/my_shot",
device="cuda"
)
# VideoMaMa (place rough mask in VideoMamaMaskHint/ first)
from clip_manager import generate_alpha_hints_videomama
generate_alpha_hints_videomama(
shot_dir="ClipsForInference/my_shot",
device="cuda"
)
# BiRefNet (lightweight option, no large VRAM needed)
from clip_manager import generate_alpha_hints_birefnet
generate_alpha_hints_birefnet(
shot_dir="ClipsForInference/my_shot",
device="cuda"
)
# GOOD: Eroded, slightly blurry hint — pulls away from edges
# The model fills edge detail from the hint
kernel = np.ones((10, 10), np.uint8)
good_hint = cv2.erode(raw_mask, kernel, iterations=3)
good_hint = cv2.GaussianBlur(good_hint, (21, 21), 7)
# BAD: Expanded / dilated hint — model is worse at subtracting
# Don't push the mask OUTWARD past the true subject boundary
bad_hint = cv2.dilate(raw_mask, kernel, iterations=3) # avoid this
# ACCEPTABLE: Binary rough chroma key as-is
# Even a hard binary mask works — just not expanded
acceptable_hint = raw_chroma_key_mask # no dilation
CorridorKey outputs straight (un-premultiplied) RGBA EXRs in linear light:
# In Nuke: read as EXR, set colorspace to "linear"
# The alpha is already clean — no need for Unpremult node
# Connect straight to a Merge (over) node with your background plate
# Verify output is straight alpha (not premultiplied):
import cv2, numpy as np, os
os.environ["OPENCV_IO_ENABLE_OPENEXR"] = "1"
result = cv2.imread("Output/shot_01/foreground/frame_0001.exr",
cv2.IMREAD_UNCHANGED | cv2.IMREAD_ANYCOLOR)
# result[..., 3] = alpha channel (linear 0.0–1.0)
# result[..., :3] = straight color (not multiplied by alpha)
# Check a semi-transparent pixel
h, w = result.shape[:2]
sample_alpha = result[h//2, w//2, 3]
sample_color = result[h//2, w//2, :3]
print(f"Alpha: {sample_alpha:.3f}, Color: {sample_color}")
# Color values should be full-strength even where alpha < 1.0 (straight alpha)
# Check CUDA version requirement: driver must support CUDA 12.8+
nvidia-smi # shows max supported CUDA version
# Reinstall with explicit CUDA extra
uv sync --extra cuda
# Verify PyTorch sees GPU
uv run python -c "import torch; print(torch.cuda.is_available(), torch.version.cuda)"
# Must set environment variable before importing cv2
export OPENCV_IO_ENABLE_OPENEXR=1
uv run python your_script.py
# Or in Python (must be BEFORE import cv2)
import os
os.environ["OPENCV_IO_ENABLE_OPENEXR"] = "1"
import cv2
# Use CPU fallback
uv run python main.py run_inference --device cpu
# Or reduce batch size / use tiled inference if supported
# The engine dynamically scales to 2048x2048 tiles — for 4K,
# ensure at least 6-8GB VRAM
# Apple Silicon: use MPS
uv run python main.py run_inference --device mps
# Verify exact filename and location:
ls CorridorKeyModule/checkpoints/
# Must be named exactly: CorridorKey.pth
# Not: CorridorKey_v1.0.pth
mv CorridorKeyModule/checkpoints/CorridorKey_v1.0.pth \
CorridorKeyModule/checkpoints/CorridorKey.pth
# Test NVIDIA container toolkit
docker run --rm --gpus all nvidia/cuda:12.6.3-runtime-ubuntu22.04 nvidia-smi
# If it fails, install/reconfigure nvidia-container-toolkit:
# https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html
# Then restart Docker daemon
sudo systemctl restart docker
Weekly Installs
295
Repository
GitHub Stars
10
First Seen
7 days ago
Security Audits
Gen Agent Trust HubPassSocketPassSnykWarn
Installed on
gemini-cli294
github-copilot294
codex294
amp294
cline294
kimi-cli294
AI Elements:基于shadcn/ui的AI原生应用组件库,快速构建对话界面
56,200 周安装