peft-fine-tuning by davila7/claude-code-templates
npx skills add https://github.com/davila7/claude-code-templates --skill peft-fine-tuning使用 LoRA、QLoRA 和 25 种以上的适配器方法,通过训练 <1% 的参数来微调大语言模型。
在以下情况使用 PEFT/LoRA:
在以下情况使用 QLoRA(PEFT + 量化):
在以下情况使用完全微调:
# 基础安装
pip install peft
# 带量化支持(推荐)
pip install peft bitsandbytes
# 完整堆栈
pip install peft transformers accelerate bitsandbytes datasets
from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, Trainer
from peft import get_peft_model, LoraConfig, TaskType
from datasets import load_dataset
# 加载基础模型
model_name = "meta-llama/Llama-3.1-8B"
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype="auto", device_map="auto")
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenizer.pad_token = tokenizer.eos_token
# LoRA 配置
lora_config = LoraConfig(
task_type=TaskType.CAUSAL_LM,
r=16, # 秩(8-64,越高容量越大)
lora_alpha=32, # 缩放因子(通常为 2*r)
lora_dropout=0.05, # 用于正则化的 Dropout
target_modules=["q_proj", "v_proj", "k_proj", "o_proj"], # 注意力层
bias="none" # 不训练偏置
)
# 应用 LoRA
model = get_peft_model(model, lora_config)
model.print_trainable_parameters()
# 输出:trainable params: 13,631,488 || all params: 8,043,307,008 || trainable%: 0.17%
# 准备数据集
dataset = load_dataset("databricks/databricks-dolly-15k", split="train")
def tokenize(example):
text = f"### Instruction:\n{example['instruction']}\n\n### Response:\n{example['response']}"
return tokenizer(text, truncation=True, max_length=512, padding="max_length")
tokenized = dataset.map(tokenize, remove_columns=dataset.column_names)
# 训练
training_args = TrainingArguments(
output_dir="./lora-llama",
num_train_epochs=3,
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
learning_rate=2e-4,
fp16=True,
logging_steps=10,
save_strategy="epoch"
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized,
data_collator=lambda data: {"input_ids": torch.stack([f["input_ids"] for f in data]),
"attention_mask": torch.stack([f["attention_mask"] for f in data]),
"labels": torch.stack([f["input_ids"] for f in data])}
)
trainer.train()
# 仅保存适配器(6MB vs 16GB)
model.save_pretrained("./lora-llama-adapter")
广告位招租
在这里展示您的产品或服务
触达数万 AI 开发者,精准高效
from transformers import AutoModelForCausalLM, BitsAndBytesConfig
from peft import get_peft_model, LoraConfig, prepare_model_for_kbit_training
# 4-bit 量化配置
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4", # NormalFloat4(最适合 LLMs)
bnb_4bit_compute_dtype="bfloat16", # 使用 bf16 计算
bnb_4bit_use_double_quant=True # 嵌套量化
)
# 加载量化模型
model = AutoModelForCausalLM.from_pretrained(
"meta-llama/Llama-3.1-70B",
quantization_config=bnb_config,
device_map="auto"
)
# 为训练做准备(启用梯度检查点)
model = prepare_model_for_kbit_training(model)
# QLoRA 的 LoRA 配置
lora_config = LoraConfig(
r=64, # 对于 70B 模型使用更高的秩
lora_alpha=128,
lora_dropout=0.1,
target_modules=["q_proj", "v_proj", "k_proj", "o_proj", "gate_proj", "up_proj", "down_proj"],
bias="none",
task_type="CAUSAL_LM"
)
model = get_peft_model(model, lora_config)
# 现在 70B 模型可以放在单个 24GB GPU 上!
| 秩 | 可训练参数 | 内存 | 质量 | 使用场景 |
|---|---|---|---|---|
| 4 | ~3M | 最小 | 较低 | 简单任务,原型设计 |
| 8 | ~7M | 低 | 良好 | 推荐起点 |
| 16 | ~14M | 中等 | 更好 | 通用微调 |
| 32 | ~27M | 较高 | 高 | 复杂任务 |
| 64 | ~54M | 高 | 最高 | 领域适应,70B 模型 |
# 经验法则:alpha = 2 * rank
LoraConfig(r=16, lora_alpha=32) # 标准
LoraConfig(r=16, lora_alpha=16) # 保守(较低的学习率效果)
LoraConfig(r=16, lora_alpha=64) # 激进(较高的学习率效果)
# Llama / Mistral / Qwen
target_modules = ["q_proj", "v_proj", "k_proj", "o_proj", "gate_proj", "up_proj", "down_proj"]
# GPT-2 / GPT-Neo
target_modules = ["c_attn", "c_proj", "c_fc"]
# Falcon
target_modules = ["query_key_value", "dense", "dense_h_to_4h", "dense_4h_to_h"]
# BLOOM
target_modules = ["query_key_value", "dense", "dense_h_to_4h", "dense_4h_to_h"]
# 自动检测所有线性层
target_modules = "all-linear" # PEFT 0.6.0+
from peft import PeftModel, AutoPeftModelForCausalLM
from transformers import AutoModelForCausalLM
# 选项 1:使用 PeftModel 加载
base_model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.1-8B")
model = PeftModel.from_pretrained(base_model, "./lora-llama-adapter")
# 选项 2:直接加载(推荐)
model = AutoPeftModelForCausalLM.from_pretrained(
"./lora-llama-adapter",
device_map="auto"
)
# 为部署合并(无适配器开销)
merged_model = model.merge_and_unload()
# 保存合并后的模型
merged_model.save_pretrained("./llama-merged")
tokenizer.save_pretrained("./llama-merged")
# 推送到 Hub
merged_model.push_to_hub("username/llama-finetuned")
from peft import PeftModel
# 加载基础模型和第一个适配器
model = AutoPeftModelForCausalLM.from_pretrained("./adapter-task1")
# 加载额外的适配器
model.load_adapter("./adapter-task2", adapter_name="task2")
model.load_adapter("./adapter-task3", adapter_name="task3")
# 在运行时切换适配器
model.set_adapter("task1") # 使用 task1 适配器
output1 = model.generate(**inputs)
model.set_adapter("task2") # 切换到 task2
output2 = model.generate(**inputs)
# 禁用适配器(使用基础模型)
with model.disable_adapter():
base_output = model.generate(**inputs)
| 方法 | 可训练 % | 内存 | 速度 | 最适合 |
|---|---|---|---|---|
| LoRA | 0.1-1% | 低 | 快 | 通用微调 |
| QLoRA | 0.1-1% | 非常低 | 中等 | 内存受限 |
| AdaLoRA | 0.1-1% | 低 | 中等 | 自动秩选择 |
| IA3 | 0.01% | 最小 | 最快 | 少样本适应 |
| Prefix Tuning | 0.1% | 低 | 中等 | 生成控制 |
| Prompt Tuning | 0.001% | 最小 | 快 | 简单任务适应 |
| P-Tuning v2 | 0.1% | 低 | 中等 | NLU 任务 |
from peft import IA3Config
ia3_config = IA3Config(
target_modules=["q_proj", "v_proj", "k_proj", "down_proj"],
feedforward_modules=["down_proj"]
)
model = get_peft_model(model, ia3_config)
# 仅训练 0.01% 的参数!
from peft import PrefixTuningConfig
prefix_config = PrefixTuningConfig(
task_type="CAUSAL_LM",
num_virtual_tokens=20, # 前置令牌
prefix_projection=True # 使用 MLP 投影
)
model = get_peft_model(model, prefix_config)
from trl import SFTTrainer, SFTConfig
from peft import LoraConfig
lora_config = LoraConfig(r=16, lora_alpha=32, target_modules="all-linear")
trainer = SFTTrainer(
model=model,
args=SFTConfig(output_dir="./output", max_seq_length=512),
train_dataset=dataset,
peft_config=lora_config, # 直接传递 LoRA 配置
)
trainer.train()
# axolotl config.yaml
adapter: lora
lora_r: 16
lora_alpha: 32
lora_dropout: 0.05
lora_target_modules:
- q_proj
- v_proj
- k_proj
- o_proj
lora_target_linear: true # 目标所有线性层
from vllm import LLM
from vllm.lora.request import LoRARequest
# 加载支持 LoRA 的基础模型
llm = LLM(model="meta-llama/Llama-3.1-8B", enable_lora=True)
# 使用适配器服务
outputs = llm.generate(
prompts,
lora_request=LoRARequest("adapter1", 1, "./lora-adapter")
)
| 方法 | GPU 内存 | 可训练参数 |
|---|---|---|
| 完全微调 | 60+ GB | 8B (100%) |
| LoRA r=16 | 18 GB | 14M (0.17%) |
| QLoRA r=16 | 6 GB | 14M (0.17%) |
| IA3 | 16 GB | 800K (0.01%) |
| 方法 | 令牌/秒 | 相对于完全微调 |
|---|---|---|
| 完全微调 | 2,500 | 1x |
| LoRA | 3,200 | 1.3x |
| QLoRA | 2,100 | 0.84x |
| 模型 | 完全微调 | LoRA | QLoRA |
|---|---|---|---|
| Llama 2-7B | 45.3 | 44.8 | 44.1 |
| Llama 2-13B | 54.8 | 54.2 | 53.5 |
# 解决方案 1:启用梯度检查点
model.gradient_checkpointing_enable()
# 解决方案 2:减小批次大小 + 增加累积步数
TrainingArguments(
per_device_train_batch_size=1,
gradient_accumulation_steps=16
)
# 解决方案 3:使用 QLoRA
from transformers import BitsAndBytesConfig
bnb_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_quant_type="nf4")
# 验证适配器是否激活
print(model.active_adapters) # 应显示适配器名称
# 检查可训练参数
model.print_trainable_parameters()
# 确保模型处于训练模式
model.train()
# 增加秩
LoraConfig(r=32, lora_alpha=64)
# 目标更多模块
target_modules = "all-linear"
# 使用更多训练数据和轮次
TrainingArguments(num_train_epochs=5)
# 降低学习率
TrainingArguments(learning_rate=1e-4)
每周安装量
204
仓库
GitHub Stars
23.4K
首次出现
2026年1月21日
安全审计
安装在
opencode169
claude-code167
gemini-cli156
cursor152
codex148
github-copilot138
Fine-tune LLMs by training <1% of parameters using LoRA, QLoRA, and 25+ adapter methods.
Use PEFT/LoRA when:
Use QLoRA (PEFT + quantization) when:
Use full fine-tuning instead when:
# Basic installation
pip install peft
# With quantization support (recommended)
pip install peft bitsandbytes
# Full stack
pip install peft transformers accelerate bitsandbytes datasets
from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, Trainer
from peft import get_peft_model, LoraConfig, TaskType
from datasets import load_dataset
# Load base model
model_name = "meta-llama/Llama-3.1-8B"
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype="auto", device_map="auto")
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenizer.pad_token = tokenizer.eos_token
# LoRA configuration
lora_config = LoraConfig(
task_type=TaskType.CAUSAL_LM,
r=16, # Rank (8-64, higher = more capacity)
lora_alpha=32, # Scaling factor (typically 2*r)
lora_dropout=0.05, # Dropout for regularization
target_modules=["q_proj", "v_proj", "k_proj", "o_proj"], # Attention layers
bias="none" # Don't train biases
)
# Apply LoRA
model = get_peft_model(model, lora_config)
model.print_trainable_parameters()
# Output: trainable params: 13,631,488 || all params: 8,043,307,008 || trainable%: 0.17%
# Prepare dataset
dataset = load_dataset("databricks/databricks-dolly-15k", split="train")
def tokenize(example):
text = f"### Instruction:\n{example['instruction']}\n\n### Response:\n{example['response']}"
return tokenizer(text, truncation=True, max_length=512, padding="max_length")
tokenized = dataset.map(tokenize, remove_columns=dataset.column_names)
# Training
training_args = TrainingArguments(
output_dir="./lora-llama",
num_train_epochs=3,
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
learning_rate=2e-4,
fp16=True,
logging_steps=10,
save_strategy="epoch"
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized,
data_collator=lambda data: {"input_ids": torch.stack([f["input_ids"] for f in data]),
"attention_mask": torch.stack([f["attention_mask"] for f in data]),
"labels": torch.stack([f["input_ids"] for f in data])}
)
trainer.train()
# Save adapter only (6MB vs 16GB)
model.save_pretrained("./lora-llama-adapter")
from transformers import AutoModelForCausalLM, BitsAndBytesConfig
from peft import get_peft_model, LoraConfig, prepare_model_for_kbit_training
# 4-bit quantization config
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4", # NormalFloat4 (best for LLMs)
bnb_4bit_compute_dtype="bfloat16", # Compute in bf16
bnb_4bit_use_double_quant=True # Nested quantization
)
# Load quantized model
model = AutoModelForCausalLM.from_pretrained(
"meta-llama/Llama-3.1-70B",
quantization_config=bnb_config,
device_map="auto"
)
# Prepare for training (enables gradient checkpointing)
model = prepare_model_for_kbit_training(model)
# LoRA config for QLoRA
lora_config = LoraConfig(
r=64, # Higher rank for 70B
lora_alpha=128,
lora_dropout=0.1,
target_modules=["q_proj", "v_proj", "k_proj", "o_proj", "gate_proj", "up_proj", "down_proj"],
bias="none",
task_type="CAUSAL_LM"
)
model = get_peft_model(model, lora_config)
# 70B model now fits on single 24GB GPU!
| Rank | Trainable Params | Memory | Quality | Use Case |
|---|---|---|---|---|
| 4 | ~3M | Minimal | Lower | Simple tasks, prototyping |
| 8 | ~7M | Low | Good | Recommended starting point |
| 16 | ~14M | Medium | Better | General fine-tuning |
| 32 | ~27M | Higher | High | Complex tasks |
| 64 | ~54M | High | Highest | Domain adaptation, 70B models |
# Rule of thumb: alpha = 2 * rank
LoraConfig(r=16, lora_alpha=32) # Standard
LoraConfig(r=16, lora_alpha=16) # Conservative (lower learning rate effect)
LoraConfig(r=16, lora_alpha=64) # Aggressive (higher learning rate effect)
# Llama / Mistral / Qwen
target_modules = ["q_proj", "v_proj", "k_proj", "o_proj", "gate_proj", "up_proj", "down_proj"]
# GPT-2 / GPT-Neo
target_modules = ["c_attn", "c_proj", "c_fc"]
# Falcon
target_modules = ["query_key_value", "dense", "dense_h_to_4h", "dense_4h_to_h"]
# BLOOM
target_modules = ["query_key_value", "dense", "dense_h_to_4h", "dense_4h_to_h"]
# Auto-detect all linear layers
target_modules = "all-linear" # PEFT 0.6.0+
from peft import PeftModel, AutoPeftModelForCausalLM
from transformers import AutoModelForCausalLM
# Option 1: Load with PeftModel
base_model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.1-8B")
model = PeftModel.from_pretrained(base_model, "./lora-llama-adapter")
# Option 2: Load directly (recommended)
model = AutoPeftModelForCausalLM.from_pretrained(
"./lora-llama-adapter",
device_map="auto"
)
# Merge for deployment (no adapter overhead)
merged_model = model.merge_and_unload()
# Save merged model
merged_model.save_pretrained("./llama-merged")
tokenizer.save_pretrained("./llama-merged")
# Push to Hub
merged_model.push_to_hub("username/llama-finetuned")
from peft import PeftModel
# Load base with first adapter
model = AutoPeftModelForCausalLM.from_pretrained("./adapter-task1")
# Load additional adapters
model.load_adapter("./adapter-task2", adapter_name="task2")
model.load_adapter("./adapter-task3", adapter_name="task3")
# Switch between adapters at runtime
model.set_adapter("task1") # Use task1 adapter
output1 = model.generate(**inputs)
model.set_adapter("task2") # Switch to task2
output2 = model.generate(**inputs)
# Disable adapters (use base model)
with model.disable_adapter():
base_output = model.generate(**inputs)
| Method | Trainable % | Memory | Speed | Best For |
|---|---|---|---|---|
| LoRA | 0.1-1% | Low | Fast | General fine-tuning |
| QLoRA | 0.1-1% | Very Low | Medium | Memory-constrained |
| AdaLoRA | 0.1-1% | Low | Medium | Automatic rank selection |
| IA3 | 0.01% | Minimal | Fastest | Few-shot adaptation |
| Prefix Tuning | 0.1% | Low | Medium | Generation control |
| Prompt Tuning | 0.001% |
from peft import IA3Config
ia3_config = IA3Config(
target_modules=["q_proj", "v_proj", "k_proj", "down_proj"],
feedforward_modules=["down_proj"]
)
model = get_peft_model(model, ia3_config)
# Trains only 0.01% of parameters!
from peft import PrefixTuningConfig
prefix_config = PrefixTuningConfig(
task_type="CAUSAL_LM",
num_virtual_tokens=20, # Prepended tokens
prefix_projection=True # Use MLP projection
)
model = get_peft_model(model, prefix_config)
from trl import SFTTrainer, SFTConfig
from peft import LoraConfig
lora_config = LoraConfig(r=16, lora_alpha=32, target_modules="all-linear")
trainer = SFTTrainer(
model=model,
args=SFTConfig(output_dir="./output", max_seq_length=512),
train_dataset=dataset,
peft_config=lora_config, # Pass LoRA config directly
)
trainer.train()
# axolotl config.yaml
adapter: lora
lora_r: 16
lora_alpha: 32
lora_dropout: 0.05
lora_target_modules:
- q_proj
- v_proj
- k_proj
- o_proj
lora_target_linear: true # Target all linear layers
from vllm import LLM
from vllm.lora.request import LoRARequest
# Load base model with LoRA support
llm = LLM(model="meta-llama/Llama-3.1-8B", enable_lora=True)
# Serve with adapter
outputs = llm.generate(
prompts,
lora_request=LoRARequest("adapter1", 1, "./lora-adapter")
)
| Method | GPU Memory | Trainable Params |
|---|---|---|
| Full fine-tuning | 60+ GB | 8B (100%) |
| LoRA r=16 | 18 GB | 14M (0.17%) |
| QLoRA r=16 | 6 GB | 14M (0.17%) |
| IA3 | 16 GB | 800K (0.01%) |
| Method | Tokens/sec | vs Full FT |
|---|---|---|
| Full FT | 2,500 | 1x |
| LoRA | 3,200 | 1.3x |
| QLoRA | 2,100 | 0.84x |
| Model | Full FT | LoRA | QLoRA |
|---|---|---|---|
| Llama 2-7B | 45.3 | 44.8 | 44.1 |
| Llama 2-13B | 54.8 | 54.2 | 53.5 |
# Solution 1: Enable gradient checkpointing
model.gradient_checkpointing_enable()
# Solution 2: Reduce batch size + increase accumulation
TrainingArguments(
per_device_train_batch_size=1,
gradient_accumulation_steps=16
)
# Solution 3: Use QLoRA
from transformers import BitsAndBytesConfig
bnb_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_quant_type="nf4")
# Verify adapter is active
print(model.active_adapters) # Should show adapter name
# Check trainable parameters
model.print_trainable_parameters()
# Ensure model in training mode
model.train()
# Increase rank
LoraConfig(r=32, lora_alpha=64)
# Target more modules
target_modules = "all-linear"
# Use more training data and epochs
TrainingArguments(num_train_epochs=5)
# Lower learning rate
TrainingArguments(learning_rate=1e-4)
Weekly Installs
204
Repository
GitHub Stars
23.4K
First Seen
Jan 21, 2026
Security Audits
Gen Agent Trust HubPassSocketPassSnykWarn
Installed on
opencode169
claude-code167
gemini-cli156
cursor152
codex148
github-copilot138
AI 代码实施计划编写技能 | 自动化开发任务分解与 TDD 流程规划工具
49,000 周安装
Databricks Python SDK 开发指南:SDK、Connect、CLI 与 REST API 完整教程
89 周安装
Excel自动化编程技能:使用ExcelJS、SheetJS、pandas、openpyxl创建、读取、修改XLSX文件
89 周安装
Reddit Ads API 自动化指南:编程创建、管理、优化广告活动
89 周安装
记忆任务管理:使用基础记忆架构管理工作进度与上下文恢复
89 周安装
React/Next.js高级质量保证工具:自动化测试、覆盖率分析与E2E测试脚手架
89 周安装
创建设计系统规则 - 为Figma到代码工作流定制AI生成规则,确保代码一致性
89 周安装
| Minimal |
| Fast |
| Simple task adaptation |
| P-Tuning v2 | 0.1% | Low | Medium | NLU tasks |