mlflow by davila7/claude-code-templates
npx skills add https://github.com/davila7/claude-code-templates --skill mlflow当你需要时,请使用 MLflow:
用户:20,000+ 组织 | GitHub 星标:23k+ | 许可证:Apache 2.0
# 安装 MLflow
pip install mlflow
# 安装额外组件
pip install mlflow[extras] # 包含 SQLAlchemy、boto3 等
# 启动 MLflow UI
mlflow ui
# 在 http://localhost:5000 访问
import mlflow
# 启动一个运行
with mlflow.start_run():
# 记录参数
mlflow.log_param("learning_rate", 0.001)
mlflow.log_param("batch_size", 32)
# 你的训练代码
model = train_model()
# 记录指标
mlflow.log_metric("train_loss", 0.15)
mlflow.log_metric("val_accuracy", 0.92)
# 记录模型
mlflow.sklearn.log_model(model, "model")
广告位招租
在这里展示您的产品或服务
触达数万 AI 开发者,精准高效
import mlflow
from sklearn.ensemble import RandomForestClassifier
# 启用自动日志记录
mlflow.autolog()
# 训练(自动记录)
model = RandomForestClassifier(n_estimators=100, max_depth=5)
model.fit(X_train, y_train)
# 指标、参数和模型自动记录!
实验:相关运行的逻辑容器 运行:机器学习代码的单次执行(参数、指标、工件)
import mlflow
# 创建/设置实验
mlflow.set_experiment("my-experiment")
# 启动一个运行
with mlflow.start_run(run_name="baseline-model"):
# 记录参数
mlflow.log_param("model", "ResNet50")
mlflow.log_param("epochs", 10)
# 训练
model = train()
# 记录指标
mlflow.log_metric("accuracy", 0.95)
# 记录模型
mlflow.pytorch.log_model(model, "model")
# 运行 ID 自动生成
print(f"Run ID: {mlflow.active_run().info.run_id}")
with mlflow.start_run():
# 单个参数
mlflow.log_param("learning_rate", 0.001)
# 多个参数
mlflow.log_params({
"batch_size": 32,
"epochs": 50,
"optimizer": "Adam",
"dropout": 0.2
})
# 嵌套参数(作为字典)
config = {
"model": {
"architecture": "ResNet50",
"pretrained": True
},
"training": {
"lr": 0.001,
"weight_decay": 1e-4
}
}
# 作为 JSON 字符串或单独参数记录
for key, value in config.items():
mlflow.log_param(key, str(value))
with mlflow.start_run():
# 训练循环
for epoch in range(NUM_EPOCHS):
train_loss = train_epoch()
val_loss = validate()
# 在每个步骤记录指标
mlflow.log_metric("train_loss", train_loss, step=epoch)
mlflow.log_metric("val_loss", val_loss, step=epoch)
# 记录多个指标
mlflow.log_metrics({
"train_accuracy": train_acc,
"val_accuracy": val_acc
}, step=epoch)
# 记录最终指标(无步骤)
mlflow.log_metric("final_accuracy", final_acc)
with mlflow.start_run():
# 记录文件
model.save('model.pkl')
mlflow.log_artifact('model.pkl')
# 记录目录
os.makedirs('plots', exist_ok=True)
plt.savefig('plots/loss_curve.png')
mlflow.log_artifacts('plots')
# 记录文本
with open('config.txt', 'w') as f:
f.write(str(config))
mlflow.log_artifact('config.txt')
# 将字典记录为 JSON
mlflow.log_dict({'config': config}, 'config.json')
# PyTorch
import mlflow.pytorch
with mlflow.start_run():
model = train_pytorch_model()
mlflow.pytorch.log_model(model, "model")
# Scikit-learn
import mlflow.sklearn
with mlflow.start_run():
model = train_sklearn_model()
mlflow.sklearn.log_model(model, "model")
# Keras/TensorFlow
import mlflow.keras
with mlflow.start_run():
model = train_keras_model()
mlflow.keras.log_model(model, "model")
# HuggingFace Transformers
import mlflow.transformers
with mlflow.start_run():
mlflow.transformers.log_model(
transformers_model={
"model": model,
"tokenizer": tokenizer
},
artifact_path="model"
)
为流行框架自动记录指标、参数和模型。
import mlflow
# 为所有支持的框架启用
mlflow.autolog()
# 或为特定框架启用
mlflow.sklearn.autolog()
mlflow.pytorch.autolog()
mlflow.keras.autolog()
mlflow.xgboost.autolog()
import mlflow
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
# 启用自动日志记录
mlflow.sklearn.autolog()
# 拆分数据
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# 训练(自动记录参数、指标、模型)
with mlflow.start_run():
model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=42)
model.fit(X_train, y_train)
# 指标如准确率、f1_score 自动记录
# 模型自动记录
# 训练时长自动记录
import mlflow
import pytorch_lightning as pl
# 启用自动日志记录
mlflow.pytorch.autolog()
# 训练
with mlflow.start_run():
trainer = pl.Trainer(max_epochs=10)
trainer.fit(model, datamodule=dm)
# 超参数已记录
# 训练指标已记录
# 最佳模型检查点已记录
通过版本控制和阶段转换管理模型生命周期。
import mlflow
# 记录并注册模型
with mlflow.start_run():
model = train_model()
# 记录模型
mlflow.sklearn.log_model(
model,
"model",
registered_model_name="my-classifier" # 立即注册
)
# 或稍后注册
run_id = "abc123"
model_uri = f"runs:/{run_id}/model"
mlflow.register_model(model_uri, "my-classifier")
在阶段之间转换模型:无 → 暂存 → 生产 → 已归档
from mlflow.tracking import MlflowClient
client = MlflowClient()
# 提升到暂存阶段
client.transition_model_version_stage(
name="my-classifier",
version=3,
stage="Staging"
)
# 提升到生产阶段
client.transition_model_version_stage(
name="my-classifier",
version=3,
stage="Production",
archive_existing_versions=True # 归档旧的生产版本
)
# 归档模型
client.transition_model_version_stage(
name="my-classifier",
version=2,
stage="Archived"
)
import mlflow.pyfunc
# 加载最新的生产模型
model = mlflow.pyfunc.load_model("models:/my-classifier/Production")
# 加载特定版本
model = mlflow.pyfunc.load_model("models:/my-classifier/3")
# 从暂存阶段加载
model = mlflow.pyfunc.load_model("models:/my-classifier/Staging")
# 使用模型
predictions = model.predict(X_test)
client = MlflowClient()
# 列出所有版本
versions = client.search_model_versions("name='my-classifier'")
for v in versions:
print(f"Version {v.version}: {v.current_stage}")
# 按阶段获取最新版本
latest_prod = client.get_latest_versions("my-classifier", stages=["Production"])
latest_staging = client.get_latest_versions("my-classifier", stages=["Staging"])
# 获取模型版本详情
version_info = client.get_model_version(name="my-classifier", version="3")
print(f"Run ID: {version_info.run_id}")
print(f"Stage: {version_info.current_stage}")
print(f"Tags: {version_info.tags}")
client = MlflowClient()
# 添加描述
client.update_model_version(
name="my-classifier",
version="3",
description="ResNet50 classifier trained on 1M images with 95% accuracy"
)
# 添加标签
client.set_model_version_tag(
name="my-classifier",
version="3",
key="validation_status",
value="approved"
)
client.set_model_version_tag(
name="my-classifier",
version="3",
key="deployed_date",
value="2025-01-15"
)
以编程方式查找运行。
from mlflow.tracking import MlflowClient
client = MlflowClient()
# 在实验中搜索所有运行
experiment_id = client.get_experiment_by_name("my-experiment").experiment_id
runs = client.search_runs(
experiment_ids=[experiment_id],
filter_string="metrics.accuracy > 0.9",
order_by=["metrics.accuracy DESC"],
max_results=10
)
for run in runs:
print(f"Run ID: {run.info.run_id}")
print(f"Accuracy: {run.data.metrics['accuracy']}")
print(f"Params: {run.data.params}")
# 使用复杂过滤器搜索
runs = client.search_runs(
experiment_ids=[experiment_id],
filter_string="""
metrics.accuracy > 0.9 AND
params.model = 'ResNet50' AND
tags.dataset = 'ImageNet'
""",
order_by=["metrics.f1_score DESC"]
)
import mlflow
import torch
import torch.nn as nn
# 启用自动日志记录
mlflow.pytorch.autolog()
with mlflow.start_run():
# 记录配置
config = {
"lr": 0.001,
"epochs": 10,
"batch_size": 32
}
mlflow.log_params(config)
# 训练
model = create_model()
optimizer = torch.optim.Adam(model.parameters(), lr=config["lr"])
for epoch in range(config["epochs"]):
train_loss = train_epoch(model, optimizer, train_loader)
val_loss, val_acc = validate(model, val_loader)
# 记录指标
mlflow.log_metrics({
"train_loss": train_loss,
"val_loss": val_loss,
"val_accuracy": val_acc
}, step=epoch)
# 记录模型
mlflow.pytorch.log_model(model, "model")
import mlflow
from transformers import Trainer, TrainingArguments
# 启用自动日志记录
mlflow.transformers.autolog()
training_args = TrainingArguments(
output_dir="./results",
num_train_epochs=3,
per_device_train_batch_size=16,
evaluation_strategy="epoch",
save_strategy="epoch",
load_best_model_at_end=True
)
# 启动 MLflow 运行
with mlflow.start_run():
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset
)
# 训练(自动记录)
trainer.train()
# 将最终模型记录到注册表
mlflow.transformers.log_model(
transformers_model={
"model": trainer.model,
"tokenizer": tokenizer
},
artifact_path="model",
registered_model_name="hf-classifier"
)
import mlflow
import xgboost as xgb
# 启用自动日志记录
mlflow.xgboost.autolog()
with mlflow.start_run():
dtrain = xgb.DMatrix(X_train, label=y_train)
dval = xgb.DMatrix(X_val, label=y_val)
params = {
'max_depth': 6,
'learning_rate': 0.1,
'objective': 'binary:logistic',
'eval_metric': ['logloss', 'auc']
}
# 训练(自动记录)
model = xgb.train(
params,
dtrain,
num_boost_round=100,
evals=[(dtrain, 'train'), (dval, 'val')],
early_stopping_rounds=10
)
# 模型和指标自动记录
# ✅ 良好:为不同任务设置独立的实验
mlflow.set_experiment("sentiment-analysis")
mlflow.set_experiment("image-classification")
mlflow.set_experiment("recommendation-system")
# ❌ 不佳:所有内容放在一个实验中
mlflow.set_experiment("all-models")
# ✅ 良好:描述性名称
with mlflow.start_run(run_name="resnet50-imagenet-lr0.001-bs32"):
train()
# ❌ 不佳:无名称(自动生成的 UUID)
with mlflow.start_run():
train()
with mlflow.start_run():
# 记录超参数
mlflow.log_params({
"learning_rate": 0.001,
"batch_size": 32,
"epochs": 50
})
# 记录系统信息
mlflow.set_tags({
"dataset": "ImageNet",
"framework": "PyTorch 2.0",
"gpu": "A100",
"git_commit": get_git_commit()
})
# 记录数据信息
mlflow.log_param("train_samples", len(train_dataset))
mlflow.log_param("val_samples", len(val_dataset))
# 链接运行以理解谱系
with mlflow.start_run(run_name="preprocessing"):
data = preprocess()
mlflow.log_artifact("data.csv")
preprocessing_run_id = mlflow.active_run().info.run_id
with mlflow.start_run(run_name="training"):
# 引用父运行
mlflow.set_tag("preprocessing_run_id", preprocessing_run_id)
model = train(data)
# ✅ 良好:使用注册表进行生产部署
model_uri = "models:/my-classifier/Production"
model = mlflow.pyfunc.load_model(model_uri)
# ❌ 不佳:硬编码运行 ID
model_uri = "runs:/abc123/model"
model = mlflow.pyfunc.load_model(model_uri)
# 服务已注册的模型
mlflow models serve -m "models:/my-classifier/Production" -p 5001
# 从运行服务
mlflow models serve -m "runs:/<RUN_ID>/model" -p 5001
# 测试端点
curl http://127.0.0.1:5001/invocations -H 'Content-Type: application/json' -d '{
"inputs": [[1.0, 2.0, 3.0, 4.0]]
}'
# 部署到 AWS SageMaker
mlflow sagemaker deploy -m "models:/my-classifier/Production" --region-name us-west-2
# 部署到 Azure ML
mlflow azureml deploy -m "models:/my-classifier/Production"
# 启动带有后端存储的跟踪服务器
mlflow server \
--backend-store-uri postgresql://user:password@localhost/mlflow \
--default-artifact-root s3://my-bucket/mlflow \
--host 0.0.0.0 \
--port 5000
import mlflow
# 设置跟踪 URI
mlflow.set_tracking_uri("http://localhost:5000")
# 或使用环境变量
# export MLFLOW_TRACKING_URI=http://localhost:5000
references/tracking.md - 全面的跟踪指南references/model-registry.md - 模型生命周期管理references/deployment.md - 生产部署模式每周安装
237
仓库
GitHub 星标
23.5K
首次出现
Jan 21, 2026
安全审计
安装于
opencode199
gemini-cli187
claude-code185
codex180
cursor168
github-copilot167
Use MLflow when you need to:
Users : 20,000+ organizations | GitHub Stars : 23k+ | License : Apache 2.0
# Install MLflow
pip install mlflow
# Install with extras
pip install mlflow[extras] # Includes SQLAlchemy, boto3, etc.
# Start MLflow UI
mlflow ui
# Access at http://localhost:5000
import mlflow
# Start a run
with mlflow.start_run():
# Log parameters
mlflow.log_param("learning_rate", 0.001)
mlflow.log_param("batch_size", 32)
# Your training code
model = train_model()
# Log metrics
mlflow.log_metric("train_loss", 0.15)
mlflow.log_metric("val_accuracy", 0.92)
# Log model
mlflow.sklearn.log_model(model, "model")
import mlflow
from sklearn.ensemble import RandomForestClassifier
# Enable autologging
mlflow.autolog()
# Train (automatically logged)
model = RandomForestClassifier(n_estimators=100, max_depth=5)
model.fit(X_train, y_train)
# Metrics, parameters, and model logged automatically!
Experiment : Logical container for related runs Run : Single execution of ML code (parameters, metrics, artifacts)
import mlflow
# Create/set experiment
mlflow.set_experiment("my-experiment")
# Start a run
with mlflow.start_run(run_name="baseline-model"):
# Log params
mlflow.log_param("model", "ResNet50")
mlflow.log_param("epochs", 10)
# Train
model = train()
# Log metrics
mlflow.log_metric("accuracy", 0.95)
# Log model
mlflow.pytorch.log_model(model, "model")
# Run ID is automatically generated
print(f"Run ID: {mlflow.active_run().info.run_id}")
with mlflow.start_run():
# Single parameter
mlflow.log_param("learning_rate", 0.001)
# Multiple parameters
mlflow.log_params({
"batch_size": 32,
"epochs": 50,
"optimizer": "Adam",
"dropout": 0.2
})
# Nested parameters (as dict)
config = {
"model": {
"architecture": "ResNet50",
"pretrained": True
},
"training": {
"lr": 0.001,
"weight_decay": 1e-4
}
}
# Log as JSON string or individual params
for key, value in config.items():
mlflow.log_param(key, str(value))
with mlflow.start_run():
# Training loop
for epoch in range(NUM_EPOCHS):
train_loss = train_epoch()
val_loss = validate()
# Log metrics at each step
mlflow.log_metric("train_loss", train_loss, step=epoch)
mlflow.log_metric("val_loss", val_loss, step=epoch)
# Log multiple metrics
mlflow.log_metrics({
"train_accuracy": train_acc,
"val_accuracy": val_acc
}, step=epoch)
# Log final metrics (no step)
mlflow.log_metric("final_accuracy", final_acc)
with mlflow.start_run():
# Log file
model.save('model.pkl')
mlflow.log_artifact('model.pkl')
# Log directory
os.makedirs('plots', exist_ok=True)
plt.savefig('plots/loss_curve.png')
mlflow.log_artifacts('plots')
# Log text
with open('config.txt', 'w') as f:
f.write(str(config))
mlflow.log_artifact('config.txt')
# Log dict as JSON
mlflow.log_dict({'config': config}, 'config.json')
# PyTorch
import mlflow.pytorch
with mlflow.start_run():
model = train_pytorch_model()
mlflow.pytorch.log_model(model, "model")
# Scikit-learn
import mlflow.sklearn
with mlflow.start_run():
model = train_sklearn_model()
mlflow.sklearn.log_model(model, "model")
# Keras/TensorFlow
import mlflow.keras
with mlflow.start_run():
model = train_keras_model()
mlflow.keras.log_model(model, "model")
# HuggingFace Transformers
import mlflow.transformers
with mlflow.start_run():
mlflow.transformers.log_model(
transformers_model={
"model": model,
"tokenizer": tokenizer
},
artifact_path="model"
)
Automatically log metrics, parameters, and models for popular frameworks.
import mlflow
# Enable for all supported frameworks
mlflow.autolog()
# Or enable for specific framework
mlflow.sklearn.autolog()
mlflow.pytorch.autolog()
mlflow.keras.autolog()
mlflow.xgboost.autolog()
import mlflow
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
# Enable autologging
mlflow.sklearn.autolog()
# Split data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Train (automatically logs params, metrics, model)
with mlflow.start_run():
model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=42)
model.fit(X_train, y_train)
# Metrics like accuracy, f1_score logged automatically
# Model logged automatically
# Training duration logged
import mlflow
import pytorch_lightning as pl
# Enable autologging
mlflow.pytorch.autolog()
# Train
with mlflow.start_run():
trainer = pl.Trainer(max_epochs=10)
trainer.fit(model, datamodule=dm)
# Hyperparameters logged
# Training metrics logged
# Best model checkpoint logged
Manage model lifecycle with versioning and stage transitions.
import mlflow
# Log and register model
with mlflow.start_run():
model = train_model()
# Log model
mlflow.sklearn.log_model(
model,
"model",
registered_model_name="my-classifier" # Register immediately
)
# Or register later
run_id = "abc123"
model_uri = f"runs:/{run_id}/model"
mlflow.register_model(model_uri, "my-classifier")
Transition models between stages: None → Staging → Production → Archived
from mlflow.tracking import MlflowClient
client = MlflowClient()
# Promote to staging
client.transition_model_version_stage(
name="my-classifier",
version=3,
stage="Staging"
)
# Promote to production
client.transition_model_version_stage(
name="my-classifier",
version=3,
stage="Production",
archive_existing_versions=True # Archive old production versions
)
# Archive model
client.transition_model_version_stage(
name="my-classifier",
version=2,
stage="Archived"
)
import mlflow.pyfunc
# Load latest production model
model = mlflow.pyfunc.load_model("models:/my-classifier/Production")
# Load specific version
model = mlflow.pyfunc.load_model("models:/my-classifier/3")
# Load from staging
model = mlflow.pyfunc.load_model("models:/my-classifier/Staging")
# Use model
predictions = model.predict(X_test)
client = MlflowClient()
# List all versions
versions = client.search_model_versions("name='my-classifier'")
for v in versions:
print(f"Version {v.version}: {v.current_stage}")
# Get latest version by stage
latest_prod = client.get_latest_versions("my-classifier", stages=["Production"])
latest_staging = client.get_latest_versions("my-classifier", stages=["Staging"])
# Get model version details
version_info = client.get_model_version(name="my-classifier", version="3")
print(f"Run ID: {version_info.run_id}")
print(f"Stage: {version_info.current_stage}")
print(f"Tags: {version_info.tags}")
client = MlflowClient()
# Add description
client.update_model_version(
name="my-classifier",
version="3",
description="ResNet50 classifier trained on 1M images with 95% accuracy"
)
# Add tags
client.set_model_version_tag(
name="my-classifier",
version="3",
key="validation_status",
value="approved"
)
client.set_model_version_tag(
name="my-classifier",
version="3",
key="deployed_date",
value="2025-01-15"
)
Find runs programmatically.
from mlflow.tracking import MlflowClient
client = MlflowClient()
# Search all runs in experiment
experiment_id = client.get_experiment_by_name("my-experiment").experiment_id
runs = client.search_runs(
experiment_ids=[experiment_id],
filter_string="metrics.accuracy > 0.9",
order_by=["metrics.accuracy DESC"],
max_results=10
)
for run in runs:
print(f"Run ID: {run.info.run_id}")
print(f"Accuracy: {run.data.metrics['accuracy']}")
print(f"Params: {run.data.params}")
# Search with complex filters
runs = client.search_runs(
experiment_ids=[experiment_id],
filter_string="""
metrics.accuracy > 0.9 AND
params.model = 'ResNet50' AND
tags.dataset = 'ImageNet'
""",
order_by=["metrics.f1_score DESC"]
)
import mlflow
import torch
import torch.nn as nn
# Enable autologging
mlflow.pytorch.autolog()
with mlflow.start_run():
# Log config
config = {
"lr": 0.001,
"epochs": 10,
"batch_size": 32
}
mlflow.log_params(config)
# Train
model = create_model()
optimizer = torch.optim.Adam(model.parameters(), lr=config["lr"])
for epoch in range(config["epochs"]):
train_loss = train_epoch(model, optimizer, train_loader)
val_loss, val_acc = validate(model, val_loader)
# Log metrics
mlflow.log_metrics({
"train_loss": train_loss,
"val_loss": val_loss,
"val_accuracy": val_acc
}, step=epoch)
# Log model
mlflow.pytorch.log_model(model, "model")
import mlflow
from transformers import Trainer, TrainingArguments
# Enable autologging
mlflow.transformers.autolog()
training_args = TrainingArguments(
output_dir="./results",
num_train_epochs=3,
per_device_train_batch_size=16,
evaluation_strategy="epoch",
save_strategy="epoch",
load_best_model_at_end=True
)
# Start MLflow run
with mlflow.start_run():
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset
)
# Train (automatically logged)
trainer.train()
# Log final model to registry
mlflow.transformers.log_model(
transformers_model={
"model": trainer.model,
"tokenizer": tokenizer
},
artifact_path="model",
registered_model_name="hf-classifier"
)
import mlflow
import xgboost as xgb
# Enable autologging
mlflow.xgboost.autolog()
with mlflow.start_run():
dtrain = xgb.DMatrix(X_train, label=y_train)
dval = xgb.DMatrix(X_val, label=y_val)
params = {
'max_depth': 6,
'learning_rate': 0.1,
'objective': 'binary:logistic',
'eval_metric': ['logloss', 'auc']
}
# Train (automatically logged)
model = xgb.train(
params,
dtrain,
num_boost_round=100,
evals=[(dtrain, 'train'), (dval, 'val')],
early_stopping_rounds=10
)
# Model and metrics logged automatically
# ✅ Good: Separate experiments for different tasks
mlflow.set_experiment("sentiment-analysis")
mlflow.set_experiment("image-classification")
mlflow.set_experiment("recommendation-system")
# ❌ Bad: Everything in one experiment
mlflow.set_experiment("all-models")
# ✅ Good: Descriptive names
with mlflow.start_run(run_name="resnet50-imagenet-lr0.001-bs32"):
train()
# ❌ Bad: No name (auto-generated UUID)
with mlflow.start_run():
train()
with mlflow.start_run():
# Log hyperparameters
mlflow.log_params({
"learning_rate": 0.001,
"batch_size": 32,
"epochs": 50
})
# Log system info
mlflow.set_tags({
"dataset": "ImageNet",
"framework": "PyTorch 2.0",
"gpu": "A100",
"git_commit": get_git_commit()
})
# Log data info
mlflow.log_param("train_samples", len(train_dataset))
mlflow.log_param("val_samples", len(val_dataset))
# Link runs to understand lineage
with mlflow.start_run(run_name="preprocessing"):
data = preprocess()
mlflow.log_artifact("data.csv")
preprocessing_run_id = mlflow.active_run().info.run_id
with mlflow.start_run(run_name="training"):
# Reference parent run
mlflow.set_tag("preprocessing_run_id", preprocessing_run_id)
model = train(data)
# ✅ Good: Use registry for production
model_uri = "models:/my-classifier/Production"
model = mlflow.pyfunc.load_model(model_uri)
# ❌ Bad: Hard-code run IDs
model_uri = "runs:/abc123/model"
model = mlflow.pyfunc.load_model(model_uri)
# Serve registered model
mlflow models serve -m "models:/my-classifier/Production" -p 5001
# Serve from run
mlflow models serve -m "runs:/<RUN_ID>/model" -p 5001
# Test endpoint
curl http://127.0.0.1:5001/invocations -H 'Content-Type: application/json' -d '{
"inputs": [[1.0, 2.0, 3.0, 4.0]]
}'
# Deploy to AWS SageMaker
mlflow sagemaker deploy -m "models:/my-classifier/Production" --region-name us-west-2
# Deploy to Azure ML
mlflow azureml deploy -m "models:/my-classifier/Production"
# Start tracking server with backend store
mlflow server \
--backend-store-uri postgresql://user:password@localhost/mlflow \
--default-artifact-root s3://my-bucket/mlflow \
--host 0.0.0.0 \
--port 5000
import mlflow
# Set tracking URI
mlflow.set_tracking_uri("http://localhost:5000")
# Or use environment variable
# export MLFLOW_TRACKING_URI=http://localhost:5000
references/tracking.md - Comprehensive tracking guidereferences/model-registry.md - Model lifecycle managementreferences/deployment.md - Production deployment patternsWeekly Installs
237
Repository
GitHub Stars
23.5K
First Seen
Jan 21, 2026
Security Audits
Gen Agent Trust HubPassSocketPassSnykFail
Installed on
opencode199
gemini-cli187
claude-code185
codex180
cursor168
github-copilot167
React 组合模式指南:Vercel 组件架构最佳实践,提升代码可维护性
106,200 周安装
Hugging Face Jobs:云端运行AI工作负载,无需本地GPU,支持数据处理、批量推理和模型训练
232 周安装
MCP服务器构建器:快速创建生产就绪的MCP服务器与ChatGPT小组件
314 周安装
Mapbox样式质量检查与优化工具 - 验证、可访问性、性能优化指南
384 周安装
PDF 生成器 - Deno 自动化 PDF 创建、填充、合并与处理工具
355 周安装
Azure镜像构建器教程:使用Packer创建Azure托管镜像和计算库镜像
346 周安装
Qdrant向量数据库Java集成指南:Spring Boot与LangChain4j语义搜索实战
339 周安装