guidance by davila7/claude-code-templates
npx skills add https://github.com/davila7/claude-code-templates --skill guidance在以下情况下使用 Guidance:
GitHub Stars : 18,000+ | 来自 : Microsoft Research
# 基础安装
pip install guidance
# 安装特定后端
pip install guidance[transformers] # Hugging Face 模型
pip install guidance[llama_cpp] # llama.cpp 模型
from guidance import models, gen
# 加载模型(支持 OpenAI、Transformers、llama.cpp)
lm = models.OpenAI("gpt-4")
# 带约束生成
result = lm + "The capital of France is " + gen("capital", max_tokens=5)
print(result["capital"]) # "Paris"
from guidance import models, gen, system, user, assistant
# 配置 Claude
lm = models.Anthropic("claude-sonnet-4-5-20250929")
# 使用上下文管理器处理聊天格式
with system():
lm += "You are a helpful assistant."
with user():
lm += "What is the capital of France?"
with assistant():
lm += gen(max_tokens=20)
广告位招租
在这里展示您的产品或服务
触达数万 AI 开发者,精准高效
Guidance 使用 Pythonic 的上下文管理器来处理聊天式交互。
from guidance import system, user, assistant, gen
lm = models.Anthropic("claude-sonnet-4-5-20250929")
# 系统消息
with system():
lm += "You are a JSON generation expert."
# 用户消息
with user():
lm += "Generate a person object with name and age."
# 助手回复
with assistant():
lm += gen("response", max_tokens=100)
print(lm["response"])
优点:
Guidance 使用正则表达式或语法确保输出匹配指定的模式。
from guidance import models, gen
lm = models.Anthropic("claude-sonnet-4-5-20250929")
# 约束为有效的电子邮件格式
lm += "Email: " + gen("email", regex=r"[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}")
# 约束为日期格式 (YYYY-MM-DD)
lm += "Date: " + gen("date", regex=r"\d{4}-\d{2}-\d{2}")
# 约束为电话号码
lm += "Phone: " + gen("phone", regex=r"\d{3}-\d{3}-\d{4}")
print(lm["email"]) # 保证是有效的电子邮件
print(lm["date"]) # 保证是 YYYY-MM-DD 格式
工作原理:
from guidance import models, gen, select
lm = models.Anthropic("claude-sonnet-4-5-20250929")
# 约束为特定选项
lm += "Sentiment: " + select(["positive", "negative", "neutral"], name="sentiment")
# 多项选择
lm += "Best answer: " + select(
["A) Paris", "B) London", "C) Berlin", "D) Madrid"],
name="answer"
)
print(lm["sentiment"]) # 其中之一:positive, negative, neutral
print(lm["answer"]) # 其中之一:A, B, C, 或 D
Guidance 自动"修复"提示和生成之间的词元边界。
问题: 词元化会产生不自然的边界。
# 没有词元修复
prompt = "The capital of France is "
# 最后一个词元:" is "
# 第一个生成的词元可能是 " Par"(带前导空格)
# 结果:"The capital of France is Paris"(双空格!)
解决方案: Guidance 回退一个词元并重新生成。
from guidance import models, gen
lm = models.Anthropic("claude-sonnet-4-5-20250929")
# 默认启用词元修复
lm += "The capital of France is " + gen("capital", max_tokens=5)
# 结果:"The capital of France is Paris"(正确的间距)
优点:
使用上下文无关语法定义复杂结构。
from guidance import models, gen
lm = models.Anthropic("claude-sonnet-4-5-20250929")
# JSON 语法(简化版)
json_grammar = """
{
"name": <gen name regex="[A-Za-z ]+" max_tokens=20>,
"age": <gen age regex="[0-9]+" max_tokens=3>,
"email": <gen email regex="[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,}" max_tokens=50>
}
"""
# 生成有效的 JSON
lm += gen("person", grammar=json_grammar)
print(lm["person"]) # 保证是有效的 JSON 结构
使用场景:
使用 @guidance 装饰器创建可重用的生成模式。
from guidance import guidance, gen, models
@guidance
def generate_person(lm):
"""生成一个包含姓名和年龄的人。"""
lm += "Name: " + gen("name", max_tokens=20, stop="\n")
lm += "\nAge: " + gen("age", regex=r"[0-9]+", max_tokens=3)
return lm
# 使用该函数
lm = models.Anthropic("claude-sonnet-4-5-20250929")
lm = generate_person(lm)
print(lm["name"])
print(lm["age"])
有状态函数:
@guidance(stateless=False)
def react_agent(lm, question, tools, max_rounds=5):
"""使用工具的 ReAct 智能体。"""
lm += f"Question: {question}\n\n"
for i in range(max_rounds):
# 思考
lm += f"Thought {i+1}: " + gen("thought", stop="\n")
# 行动
lm += "\nAction: " + select(list(tools.keys()), name="action")
# 执行工具
tool_result = tools[lm["action"]]()
lm += f"\nObservation: {tool_result}\n\n"
# 检查是否完成
lm += "Done? " + select(["Yes", "No"], name="done")
if lm["done"] == "Yes":
break
# 最终答案
lm += "\nFinal Answer: " + gen("answer", max_tokens=100)
return lm
from guidance import models
lm = models.Anthropic(
model="claude-sonnet-4-5-20250929",
api_key="your-api-key" # 或设置 ANTHROPIC_API_KEY 环境变量
)
lm = models.OpenAI(
model="gpt-4o-mini",
api_key="your-api-key" # 或设置 OPENAI_API_KEY 环境变量
)
from guidance.models import Transformers
lm = Transformers(
"microsoft/Phi-4-mini-instruct",
device="cuda" # 或 "cpu"
)
from guidance.models import LlamaCpp
lm = LlamaCpp(
model_path="/path/to/model.gguf",
n_ctx=4096,
n_gpu_layers=35
)
from guidance import models, gen, system, user, assistant
lm = models.Anthropic("claude-sonnet-4-5-20250929")
with system():
lm += "You generate valid JSON."
with user():
lm += "Generate a user profile with name, age, and email."
with assistant():
lm += """{
"name": """ + gen("name", regex=r'"[A-Za-z ]+"', max_tokens=30) + """,
"age": """ + gen("age", regex=r"[0-9]+", max_tokens=3) + """,
"email": """ + gen("email", regex=r'"[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}"', max_tokens=50) + """
}"""
print(lm) # 保证是有效的 JSON
from guidance import models, gen, select
lm = models.Anthropic("claude-sonnet-4-5-20250929")
text = "This product is amazing! I love it."
lm += f"Text: {text}\n"
lm += "Sentiment: " + select(["positive", "negative", "neutral"], name="sentiment")
lm += "\nConfidence: " + gen("confidence", regex=r"[0-9]+", max_tokens=3) + "%"
print(f"Sentiment: {lm['sentiment']}")
print(f"Confidence: {lm['confidence']}%")
from guidance import models, gen, guidance
@guidance
def chain_of_thought(lm, question):
"""生成带有逐步推理的答案。"""
lm += f"Question: {question}\n\n"
# 生成多个推理步骤
for i in range(3):
lm += f"Step {i+1}: " + gen(f"step_{i+1}", stop="\n", max_tokens=100) + "\n"
# 最终答案
lm += "\nTherefore, the answer is: " + gen("answer", max_tokens=50)
return lm
lm = models.Anthropic("claude-sonnet-4-5-20250929")
lm = chain_of_thought(lm, "What is 15% of 200?")
print(lm["answer"])
from guidance import models, gen, select, guidance
@guidance(stateless=False)
def react_agent(lm, question):
"""使用工具的 ReAct 智能体。"""
tools = {
"calculator": lambda expr: eval(expr),
"search": lambda query: f"Search results for: {query}",
}
lm += f"Question: {question}\n\n"
for round in range(5):
# 思考
lm += f"Thought: " + gen("thought", stop="\n") + "\n"
# 行动选择
lm += "Action: " + select(["calculator", "search", "answer"], name="action")
if lm["action"] == "answer":
lm += "\nFinal Answer: " + gen("answer", max_tokens=100)
break
# 行动输入
lm += "\nAction Input: " + gen("action_input", stop="\n") + "\n"
# 执行工具
if lm["action"] in tools:
result = tools[lm["action"]](lm["action_input"])
lm += f"Observation: {result}\n\n"
return lm
lm = models.Anthropic("claude-sonnet-4-5-20250929")
lm = react_agent(lm, "What is 25 * 4 + 10?")
print(lm["answer"])
from guidance import models, gen, guidance
@guidance
def extract_entities(lm, text):
"""从文本中提取结构化实体。"""
lm += f"Text: {text}\n\n"
# 提取人物
lm += "Person: " + gen("person", stop="\n", max_tokens=30) + "\n"
# 提取组织
lm += "Organization: " + gen("organization", stop="\n", max_tokens=30) + "\n"
# 提取日期
lm += "Date: " + gen("date", regex=r"\d{4}-\d{2}-\d{2}", max_tokens=10) + "\n"
# 提取地点
lm += "Location: " + gen("location", stop="\n", max_tokens=30) + "\n"
return lm
text = "Tim Cook announced at Apple Park on 2024-09-15 in Cupertino."
lm = models.Anthropic("claude-sonnet-4-5-20250929")
lm = extract_entities(lm, text)
print(f"Person: {lm['person']}")
print(f"Organization: {lm['organization']}")
print(f"Date: {lm['date']}")
print(f"Location: {lm['location']}")
# ✅ 好:正则表达式确保有效格式
lm += "Email: " + gen("email", regex=r"[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}")
# ❌ 差:自由生成可能产生无效的电子邮件
lm += "Email: " + gen("email", max_tokens=50)
# ✅ 好:保证是有效的类别
lm += "Status: " + select(["pending", "approved", "rejected"], name="status")
# ❌ 差:可能生成拼写错误或无效值
lm += "Status: " + gen("status", max_tokens=20)
# 词元修复默认启用
# 无需特殊操作 - 只需自然连接
lm += "The capital is " + gen("capital") # 自动修复
# ✅ 好:在换行处停止,用于单行输出
lm += "Name: " + gen("name", stop="\n")
# ❌ 差:可能生成多行
lm += "Name: " + gen("name", max_tokens=50)
# ✅ 好:可重用模式
@guidance
def generate_person(lm):
lm += "Name: " + gen("name", stop="\n")
lm += "\nAge: " + gen("age", regex=r"[0-9]+")
return lm
# 多次使用
lm = generate_person(lm)
lm += "\n\n"
lm = generate_person(lm)
# ✅ 好:合理的约束
lm += gen("name", regex=r"[A-Za-z ]+", max_tokens=30)
# ❌ 太严格:可能失败或非常慢
lm += gen("name", regex=r"^(John|Jane)$", max_tokens=10)
| 功能 | Guidance | Instructor | Outlines | LMQL |
|---|---|---|---|---|
| 正则表达式约束 | ✅ 是 | ❌ 否 | ✅ 是 | ✅ 是 |
| 语法支持 | ✅ CFG | ❌ 否 | ✅ CFG | ✅ CFG |
| Pydantic 验证 | ❌ 否 | ✅ 是 | ✅ 是 | ❌ 否 |
| 词元修复 | ✅ 是 | ❌ 否 | ✅ 是 | ❌ 否 |
| 本地模型 | ✅ 是 | ⚠️ 有限 | ✅ 是 | ✅ 是 |
| API 模型 | ✅ 是 | ✅ 是 | ⚠️ 有限 | ✅ 是 |
| Pythonic 语法 | ✅ 是 | ✅ 是 | ✅ 是 | ❌ SQL-like |
| 学习曲线 | 低 | 低 | 中 | 高 |
何时选择 Guidance:
何时选择其他方案:
延迟降低:
内存使用:
词元效率:
references/constraints.md - 全面的正则表达式和语法模式references/backends.md - 后端特定配置references/examples.md - 生产就绪示例每周安装次数
163
仓库
GitHub Stars
22.6K
首次出现
Jan 21, 2026
安全审计
安装于
opencode131
claude-code129
gemini-cli121
cursor112
codex108
antigravity103
Use Guidance when you need to:
GitHub Stars : 18,000+ | From : Microsoft Research
# Base installation
pip install guidance
# With specific backends
pip install guidance[transformers] # Hugging Face models
pip install guidance[llama_cpp] # llama.cpp models
from guidance import models, gen
# Load model (supports OpenAI, Transformers, llama.cpp)
lm = models.OpenAI("gpt-4")
# Generate with constraints
result = lm + "The capital of France is " + gen("capital", max_tokens=5)
print(result["capital"]) # "Paris"
from guidance import models, gen, system, user, assistant
# Configure Claude
lm = models.Anthropic("claude-sonnet-4-5-20250929")
# Use context managers for chat format
with system():
lm += "You are a helpful assistant."
with user():
lm += "What is the capital of France?"
with assistant():
lm += gen(max_tokens=20)
Guidance uses Pythonic context managers for chat-style interactions.
from guidance import system, user, assistant, gen
lm = models.Anthropic("claude-sonnet-4-5-20250929")
# System message
with system():
lm += "You are a JSON generation expert."
# User message
with user():
lm += "Generate a person object with name and age."
# Assistant response
with assistant():
lm += gen("response", max_tokens=100)
print(lm["response"])
Benefits:
Guidance ensures outputs match specified patterns using regex or grammars.
from guidance import models, gen
lm = models.Anthropic("claude-sonnet-4-5-20250929")
# Constrain to valid email format
lm += "Email: " + gen("email", regex=r"[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}")
# Constrain to date format (YYYY-MM-DD)
lm += "Date: " + gen("date", regex=r"\d{4}-\d{2}-\d{2}")
# Constrain to phone number
lm += "Phone: " + gen("phone", regex=r"\d{3}-\d{3}-\d{4}")
print(lm["email"]) # Guaranteed valid email
print(lm["date"]) # Guaranteed YYYY-MM-DD format
How it works:
from guidance import models, gen, select
lm = models.Anthropic("claude-sonnet-4-5-20250929")
# Constrain to specific choices
lm += "Sentiment: " + select(["positive", "negative", "neutral"], name="sentiment")
# Multiple-choice selection
lm += "Best answer: " + select(
["A) Paris", "B) London", "C) Berlin", "D) Madrid"],
name="answer"
)
print(lm["sentiment"]) # One of: positive, negative, neutral
print(lm["answer"]) # One of: A, B, C, or D
Guidance automatically "heals" token boundaries between prompt and generation.
Problem: Tokenization creates unnatural boundaries.
# Without token healing
prompt = "The capital of France is "
# Last token: " is "
# First generated token might be " Par" (with leading space)
# Result: "The capital of France is Paris" (double space!)
Solution: Guidance backs up one token and regenerates.
from guidance import models, gen
lm = models.Anthropic("claude-sonnet-4-5-20250929")
# Token healing enabled by default
lm += "The capital of France is " + gen("capital", max_tokens=5)
# Result: "The capital of France is Paris" (correct spacing)
Benefits:
Define complex structures using context-free grammars.
from guidance import models, gen
lm = models.Anthropic("claude-sonnet-4-5-20250929")
# JSON grammar (simplified)
json_grammar = """
{
"name": <gen name regex="[A-Za-z ]+" max_tokens=20>,
"age": <gen age regex="[0-9]+" max_tokens=3>,
"email": <gen email regex="[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,}" max_tokens=50>
}
"""
# Generate valid JSON
lm += gen("person", grammar=json_grammar)
print(lm["person"]) # Guaranteed valid JSON structure
Use cases:
Create reusable generation patterns with the @guidance decorator.
from guidance import guidance, gen, models
@guidance
def generate_person(lm):
"""Generate a person with name and age."""
lm += "Name: " + gen("name", max_tokens=20, stop="\n")
lm += "\nAge: " + gen("age", regex=r"[0-9]+", max_tokens=3)
return lm
# Use the function
lm = models.Anthropic("claude-sonnet-4-5-20250929")
lm = generate_person(lm)
print(lm["name"])
print(lm["age"])
Stateful Functions:
@guidance(stateless=False)
def react_agent(lm, question, tools, max_rounds=5):
"""ReAct agent with tool use."""
lm += f"Question: {question}\n\n"
for i in range(max_rounds):
# Thought
lm += f"Thought {i+1}: " + gen("thought", stop="\n")
# Action
lm += "\nAction: " + select(list(tools.keys()), name="action")
# Execute tool
tool_result = tools[lm["action"]]()
lm += f"\nObservation: {tool_result}\n\n"
# Check if done
lm += "Done? " + select(["Yes", "No"], name="done")
if lm["done"] == "Yes":
break
# Final answer
lm += "\nFinal Answer: " + gen("answer", max_tokens=100)
return lm
from guidance import models
lm = models.Anthropic(
model="claude-sonnet-4-5-20250929",
api_key="your-api-key" # Or set ANTHROPIC_API_KEY env var
)
lm = models.OpenAI(
model="gpt-4o-mini",
api_key="your-api-key" # Or set OPENAI_API_KEY env var
)
from guidance.models import Transformers
lm = Transformers(
"microsoft/Phi-4-mini-instruct",
device="cuda" # Or "cpu"
)
from guidance.models import LlamaCpp
lm = LlamaCpp(
model_path="/path/to/model.gguf",
n_ctx=4096,
n_gpu_layers=35
)
from guidance import models, gen, system, user, assistant
lm = models.Anthropic("claude-sonnet-4-5-20250929")
with system():
lm += "You generate valid JSON."
with user():
lm += "Generate a user profile with name, age, and email."
with assistant():
lm += """{
"name": """ + gen("name", regex=r'"[A-Za-z ]+"', max_tokens=30) + """,
"age": """ + gen("age", regex=r"[0-9]+", max_tokens=3) + """,
"email": """ + gen("email", regex=r'"[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}"', max_tokens=50) + """
}"""
print(lm) # Valid JSON guaranteed
from guidance import models, gen, select
lm = models.Anthropic("claude-sonnet-4-5-20250929")
text = "This product is amazing! I love it."
lm += f"Text: {text}\n"
lm += "Sentiment: " + select(["positive", "negative", "neutral"], name="sentiment")
lm += "\nConfidence: " + gen("confidence", regex=r"[0-9]+", max_tokens=3) + "%"
print(f"Sentiment: {lm['sentiment']}")
print(f"Confidence: {lm['confidence']}%")
from guidance import models, gen, guidance
@guidance
def chain_of_thought(lm, question):
"""Generate answer with step-by-step reasoning."""
lm += f"Question: {question}\n\n"
# Generate multiple reasoning steps
for i in range(3):
lm += f"Step {i+1}: " + gen(f"step_{i+1}", stop="\n", max_tokens=100) + "\n"
# Final answer
lm += "\nTherefore, the answer is: " + gen("answer", max_tokens=50)
return lm
lm = models.Anthropic("claude-sonnet-4-5-20250929")
lm = chain_of_thought(lm, "What is 15% of 200?")
print(lm["answer"])
from guidance import models, gen, select, guidance
@guidance(stateless=False)
def react_agent(lm, question):
"""ReAct agent with tool use."""
tools = {
"calculator": lambda expr: eval(expr),
"search": lambda query: f"Search results for: {query}",
}
lm += f"Question: {question}\n\n"
for round in range(5):
# Thought
lm += f"Thought: " + gen("thought", stop="\n") + "\n"
# Action selection
lm += "Action: " + select(["calculator", "search", "answer"], name="action")
if lm["action"] == "answer":
lm += "\nFinal Answer: " + gen("answer", max_tokens=100)
break
# Action input
lm += "\nAction Input: " + gen("action_input", stop="\n") + "\n"
# Execute tool
if lm["action"] in tools:
result = tools[lm["action"]](lm["action_input"])
lm += f"Observation: {result}\n\n"
return lm
lm = models.Anthropic("claude-sonnet-4-5-20250929")
lm = react_agent(lm, "What is 25 * 4 + 10?")
print(lm["answer"])
from guidance import models, gen, guidance
@guidance
def extract_entities(lm, text):
"""Extract structured entities from text."""
lm += f"Text: {text}\n\n"
# Extract person
lm += "Person: " + gen("person", stop="\n", max_tokens=30) + "\n"
# Extract organization
lm += "Organization: " + gen("organization", stop="\n", max_tokens=30) + "\n"
# Extract date
lm += "Date: " + gen("date", regex=r"\d{4}-\d{2}-\d{2}", max_tokens=10) + "\n"
# Extract location
lm += "Location: " + gen("location", stop="\n", max_tokens=30) + "\n"
return lm
text = "Tim Cook announced at Apple Park on 2024-09-15 in Cupertino."
lm = models.Anthropic("claude-sonnet-4-5-20250929")
lm = extract_entities(lm, text)
print(f"Person: {lm['person']}")
print(f"Organization: {lm['organization']}")
print(f"Date: {lm['date']}")
print(f"Location: {lm['location']}")
# ✅ Good: Regex ensures valid format
lm += "Email: " + gen("email", regex=r"[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}")
# ❌ Bad: Free generation may produce invalid emails
lm += "Email: " + gen("email", max_tokens=50)
# ✅ Good: Guaranteed valid category
lm += "Status: " + select(["pending", "approved", "rejected"], name="status")
# ❌ Bad: May generate typos or invalid values
lm += "Status: " + gen("status", max_tokens=20)
# Token healing is enabled by default
# No special action needed - just concatenate naturally
lm += "The capital is " + gen("capital") # Automatic healing
# ✅ Good: Stop at newline for single-line outputs
lm += "Name: " + gen("name", stop="\n")
# ❌ Bad: May generate multiple lines
lm += "Name: " + gen("name", max_tokens=50)
# ✅ Good: Reusable pattern
@guidance
def generate_person(lm):
lm += "Name: " + gen("name", stop="\n")
lm += "\nAge: " + gen("age", regex=r"[0-9]+")
return lm
# Use multiple times
lm = generate_person(lm)
lm += "\n\n"
lm = generate_person(lm)
# ✅ Good: Reasonable constraints
lm += gen("name", regex=r"[A-Za-z ]+", max_tokens=30)
# ❌ Too strict: May fail or be very slow
lm += gen("name", regex=r"^(John|Jane)$", max_tokens=10)
| Feature | Guidance | Instructor | Outlines | LMQL |
|---|---|---|---|---|
| Regex Constraints | ✅ Yes | ❌ No | ✅ Yes | ✅ Yes |
| Grammar Support | ✅ CFG | ❌ No | ✅ CFG | ✅ CFG |
| Pydantic Validation | ❌ No | ✅ Yes | ✅ Yes | ❌ No |
| Token Healing | ✅ Yes | ❌ No | ✅ Yes | ❌ No |
| Local Models | ✅ Yes | ⚠️ Limited | ✅ Yes | ✅ Yes |
| API Models | ✅ Yes | ✅ Yes | ⚠️ Limited |
When to choose Guidance:
When to choose alternatives:
Latency Reduction:
Memory Usage:
Token Efficiency:
references/constraints.md - Comprehensive regex and grammar patternsreferences/backends.md - Backend-specific configurationreferences/examples.md - Production-ready examplesWeekly Installs
163
Repository
GitHub Stars
22.6K
First Seen
Jan 21, 2026
Security Audits
Gen Agent Trust HubPassSocketPassSnykPass
Installed on
opencode131
claude-code129
gemini-cli121
cursor112
codex108
antigravity103
超能力技能使用指南:AI助手技能调用优先级与工作流程详解
46,500 周安装
Docker安全指南:全面容器安全最佳实践、漏洞扫描与合规性要求
177 周安装
iOS开发专家技能:精通Swift 6、SwiftUI与原生应用开发,涵盖架构、性能与App Store合规
177 周安装
describe技能:AI驱动结构化测试用例生成,提升代码质量与评审效率
2 周安装
专业 README 生成器 | 支持 Rust/TypeScript/Python 项目,自动应用最佳实践
2 周安装
Django 6 升级指南:从 Django 5 迁移的完整步骤与重大变更解析
1 周安装
GitLab DAG与并行处理指南:needs与parallel优化CI/CD流水线速度
2 周安装
| ✅ Yes |
| Pythonic Syntax | ✅ Yes | ✅ Yes | ✅ Yes | ❌ SQL-like |
| Learning Curve | Low | Low | Medium | High |