crewai-multi-agent by davila7/claude-code-templates
npx skills add https://github.com/davila7/claude-code-templates --skill crewai-multi-agent构建能够协作解决复杂任务的自主 AI 智能体团队。
在以下情况下使用 CrewAI:
主要特性:
请改用以下替代方案:
# 核心框架
pip install crewai
# 包含 50+ 内置工具
pip install 'crewai[tools]'
广告位招租
在这里展示您的产品或服务
触达数万 AI 开发者,精准高效
# 创建新的 crew 项目
crewai create crew my_project
cd my_project
# 安装依赖
crewai install
# 运行 crew
crewai run
from crewai import Agent, Task, Crew, Process
# 1. 定义智能体
researcher = Agent(
role="高级研究分析师",
goal="发现 AI 领域的尖端发展",
backstory="您是一位眼光敏锐、善于发现新兴趋势的专家分析师。",
verbose=True
)
writer = Agent(
role="技术作家",
goal="创建关于技术主题的清晰、引人入胜的内容",
backstory="您擅长向普通受众解释复杂概念。",
verbose=True
)
# 2. 定义任务
research_task = Task(
description="研究 {topic} 的最新发展。找出 5 个关键趋势。",
expected_output="一份包含 5 个关键趋势要点的详细报告。",
agent=researcher
)
write_task = Task(
description="根据研究发现撰写一篇博客文章。",
expected_output="一篇 500 字的 Markdown 格式博客文章。",
agent=writer,
context=[research_task] # 使用研究输出
)
# 3. 创建并运行 crew
crew = Crew(
agents=[researcher, writer],
tasks=[research_task, write_task],
process=Process.sequential, # 任务按顺序运行
verbose=True
)
# 4. 执行
result = crew.kickoff(inputs={"topic": "AI 智能体"})
print(result.raw)
from crewai import Agent
agent = Agent(
role="数据科学家", # 职位/角色
goal="分析数据以发现洞察", # 他们旨在实现的目标
backstory="统计学博士...", # 背景信息
llm="gpt-4o", # 使用的 LLM
tools=[], # 可用工具
memory=True, # 启用记忆
verbose=True, # 显示推理过程
allow_delegation=True, # 可以委派给他人
max_iter=15, # 最大推理迭代次数
max_rpm=10 # 速率限制
)
from crewai import Task
task = Task(
description="分析 2024 年第四季度的销售数据。{context}",
expected_output="包含关键指标和趋势的总结报告。",
agent=analyst, # 分配的智能体
context=[previous_task], # 来自其他任务的输入
output_file="report.md", # 保存到文件
async_execution=False, # 同步运行
human_input=False # 无需人工批准
)
from crewai import Crew, Process
crew = Crew(
agents=[researcher, writer, editor], # 团队成员
tasks=[research, write, edit], # 要完成的任务
process=Process.sequential, # 或 Process.hierarchical
verbose=True,
memory=True, # 启用 crew 记忆
cache=True, # 缓存工具结果
max_rpm=10, # 速率限制
share_crew=False # 选择加入遥测
)
# 使用输入执行
result = crew.kickoff(inputs={"topic": "AI 趋势"})
# 访问结果
print(result.raw) # 最终输出
print(result.tasks_output) # 所有任务输出
print(result.token_usage) # Token 消耗
任务按顺序执行,每个智能体在下一个任务开始前完成其任务:
crew = Crew(
agents=[researcher, writer],
tasks=[research_task, write_task],
process=Process.sequential # 任务 1 → 任务 2 → 任务 3
)
自动创建一个管理者智能体来委派和协调任务:
crew = Crew(
agents=[researcher, writer, analyst],
tasks=[research_task, write_task, analyze_task],
process=Process.hierarchical, # 管理者委派任务
manager_llm="gpt-4o" # 管理者的 LLM
)
pip install 'crewai[tools]'
from crewai_tools import (
SerperDevTool, # 网页搜索
ScrapeWebsiteTool, # 网页抓取
FileReadTool, # 读取文件
PDFSearchTool, # 搜索 PDF
WebsiteSearchTool, # 搜索网站
CodeDocsSearchTool, # 搜索代码文档
YoutubeVideoSearchTool, # 搜索 YouTube
)
# 为智能体分配工具
researcher = Agent(
role="研究员",
goal="查找准确信息",
backstory="擅长在线查找数据。",
tools=[SerperDevTool(), ScrapeWebsiteTool()]
)
from crewai.tools import BaseTool
from pydantic import Field
class CalculatorTool(BaseTool):
name: str = "计算器"
description: str = "执行数学计算。输入:表达式"
def _run(self, expression: str) -> str:
try:
result = eval(expression)
return f"结果: {result}"
except Exception as e:
return f"错误: {str(e)}"
# 使用自定义工具
agent = Agent(
role="分析师",
goal="执行计算",
tools=[CalculatorTool()]
)
my_project/
├── src/my_project/
│ ├── config/
│ │ ├── agents.yaml # 智能体定义
│ │ └── tasks.yaml # 任务定义
│ ├── crew.py # Crew 组装
│ └── main.py # 入口点
└── pyproject.toml
researcher:
role: "{topic} 高级数据研究员"
goal: "揭示 {topic} 领域的尖端发展"
backstory: >
您是一位经验丰富的研究员,擅长发掘 {topic} 领域的最新发展。
以能够找到相关信息并清晰呈现而闻名。
reporting_analyst:
role: "报告分析师"
goal: "根据研究数据创建详细报告"
backstory: >
您是一位细致的分析师,通过结构良好的报告将原始数据转化为可操作的洞察。
research_task:
description: >
对 {topic} 进行彻底研究。
为 {year} 年找到最相关的信息。
expected_output: >
一份包含 10 个要点的列表,列出关于 {topic} 的最相关信息。
agent: researcher
reporting_task:
description: >
审查研究并创建一份全面的报告。
重点关注关键发现和建议。
expected_output: >
一份 Markdown 格式的详细报告,包含执行摘要、发现和建议。
agent: reporting_analyst
output_file: report.md
from crewai import Agent, Crew, Process, Task
from crewai.project import CrewBase, agent, crew, task
from crewai_tools import SerperDevTool
@CrewBase
class MyProjectCrew:
"""我的项目 crew"""
@agent
def researcher(self) -> Agent:
return Agent(
config=self.agents_config['researcher'],
tools=[SerperDevTool()],
verbose=True
)
@agent
def reporting_analyst(self) -> Agent:
return Agent(
config=self.agents_config['reporting_analyst'],
verbose=True
)
@task
def research_task(self) -> Task:
return Task(config=self.tasks_config['research_task'])
@task
def reporting_task(self) -> Task:
return Task(
config=self.tasks_config['reporting_task'],
output_file='report.md'
)
@crew
def crew(self) -> Crew:
return Crew(
agents=self.agents,
tasks=self.tasks,
process=Process.sequential,
verbose=True
)
from my_project.crew import MyProjectCrew
def run():
inputs = {
'topic': 'AI 智能体',
'year': 2025
}
MyProjectCrew().crew().kickoff(inputs=inputs)
if __name__ == "__main__":
run()
对于具有条件逻辑的复杂工作流,请使用 Flows:
from crewai.flow.flow import Flow, listen, start, router
from pydantic import BaseModel
class MyState(BaseModel):
confidence: float = 0.0
class MyFlow(Flow[MyState]):
@start()
def gather_data(self):
return {"data": "collected"}
@listen(gather_data)
def analyze(self, data):
self.state.confidence = 0.85
return analysis_crew.kickoff(inputs=data)
@router(analyze)
def decide(self):
return "high" if self.state.confidence > 0.8 else "low"
@listen("high")
def generate_report(self):
return report_crew.kickoff()
# 运行 flow
flow = MyFlow()
result = flow.kickoff()
完整文档请参阅 Flows 指南。
# 启用所有记忆类型
crew = Crew(
agents=[researcher],
tasks=[research_task],
memory=True, # 启用记忆
embedder={ # 自定义嵌入
"provider": "openai",
"config": {"model": "text-embedding-3-small"}
}
)
记忆类型: 短期(ChromaDB)、长期(SQLite)、实体(ChromaDB)
from crewai import LLM
llm = LLM(model="gpt-4o") # OpenAI(默认)
llm = LLM(model="claude-sonnet-4-5-20250929") # Anthropic
llm = LLM(model="ollama/llama3.1", base_url="http://localhost:11434") # 本地
llm = LLM(model="azure/gpt-4o", base_url="https://...") # Azure
agent = Agent(role="分析师", goal="分析数据", llm=llm)
| 特性 | CrewAI | LangChain | LangGraph |
|---|---|---|---|
| 最适合 | 多智能体团队 | 通用 LLM 应用 | 有状态工作流 |
| 学习曲线 | 低 | 中等 | 较高 |
| 智能体范式 | 基于角色 | 基于工具 | 基于图 |
| 记忆 | 内置 | 插件式 | 自定义 |
智能体陷入循环:
agent = Agent(
role="...",
max_iter=10, # 限制迭代次数
max_rpm=5 # 速率限制
)
任务未使用上下文:
task2 = Task(
description="...",
context=[task1], # 显式传递上下文
agent=writer
)
记忆错误:
# 使用环境变量进行存储
import os
os.environ["CREWAI_STORAGE_DIR"] = "./my_storage"
每周安装数
492
仓库
GitHub 星标数
23.4K
首次出现
2026年1月21日
安全审计
安装于
opencode436
gemini-cli419
codex406
github-copilot380
cursor377
kimi-cli321
Build teams of autonomous AI agents that collaborate to solve complex tasks.
Use CrewAI when:
Key features:
Use alternatives instead:
# Core framework
pip install crewai
# With 50+ built-in tools
pip install 'crewai[tools]'
# Create new crew project
crewai create crew my_project
cd my_project
# Install dependencies
crewai install
# Run the crew
crewai run
from crewai import Agent, Task, Crew, Process
# 1. Define agents
researcher = Agent(
role="Senior Research Analyst",
goal="Discover cutting-edge developments in AI",
backstory="You are an expert analyst with a keen eye for emerging trends.",
verbose=True
)
writer = Agent(
role="Technical Writer",
goal="Create clear, engaging content about technical topics",
backstory="You excel at explaining complex concepts to general audiences.",
verbose=True
)
# 2. Define tasks
research_task = Task(
description="Research the latest developments in {topic}. Find 5 key trends.",
expected_output="A detailed report with 5 bullet points on key trends.",
agent=researcher
)
write_task = Task(
description="Write a blog post based on the research findings.",
expected_output="A 500-word blog post in markdown format.",
agent=writer,
context=[research_task] # Uses research output
)
# 3. Create and run crew
crew = Crew(
agents=[researcher, writer],
tasks=[research_task, write_task],
process=Process.sequential, # Tasks run in order
verbose=True
)
# 4. Execute
result = crew.kickoff(inputs={"topic": "AI Agents"})
print(result.raw)
from crewai import Agent
agent = Agent(
role="Data Scientist", # Job title/role
goal="Analyze data to find insights", # What they aim to achieve
backstory="PhD in statistics...", # Background context
llm="gpt-4o", # LLM to use
tools=[], # Tools available
memory=True, # Enable memory
verbose=True, # Show reasoning
allow_delegation=True, # Can delegate to others
max_iter=15, # Max reasoning iterations
max_rpm=10 # Rate limit
)
from crewai import Task
task = Task(
description="Analyze the sales data for Q4 2024. {context}",
expected_output="A summary report with key metrics and trends.",
agent=analyst, # Assigned agent
context=[previous_task], # Input from other tasks
output_file="report.md", # Save to file
async_execution=False, # Run synchronously
human_input=False # No human approval needed
)
from crewai import Crew, Process
crew = Crew(
agents=[researcher, writer, editor], # Team members
tasks=[research, write, edit], # Tasks to complete
process=Process.sequential, # Or Process.hierarchical
verbose=True,
memory=True, # Enable crew memory
cache=True, # Cache tool results
max_rpm=10, # Rate limit
share_crew=False # Opt-in telemetry
)
# Execute with inputs
result = crew.kickoff(inputs={"topic": "AI trends"})
# Access results
print(result.raw) # Final output
print(result.tasks_output) # All task outputs
print(result.token_usage) # Token consumption
Tasks execute in order, each agent completing their task before the next:
crew = Crew(
agents=[researcher, writer],
tasks=[research_task, write_task],
process=Process.sequential # Task 1 → Task 2 → Task 3
)
Auto-creates a manager agent that delegates and coordinates:
crew = Crew(
agents=[researcher, writer, analyst],
tasks=[research_task, write_task, analyze_task],
process=Process.hierarchical, # Manager delegates tasks
manager_llm="gpt-4o" # LLM for manager
)
pip install 'crewai[tools]'
from crewai_tools import (
SerperDevTool, # Web search
ScrapeWebsiteTool, # Web scraping
FileReadTool, # Read files
PDFSearchTool, # Search PDFs
WebsiteSearchTool, # Search websites
CodeDocsSearchTool, # Search code docs
YoutubeVideoSearchTool, # Search YouTube
)
# Assign tools to agent
researcher = Agent(
role="Researcher",
goal="Find accurate information",
backstory="Expert at finding data online.",
tools=[SerperDevTool(), ScrapeWebsiteTool()]
)
from crewai.tools import BaseTool
from pydantic import Field
class CalculatorTool(BaseTool):
name: str = "Calculator"
description: str = "Performs mathematical calculations. Input: expression"
def _run(self, expression: str) -> str:
try:
result = eval(expression)
return f"Result: {result}"
except Exception as e:
return f"Error: {str(e)}"
# Use custom tool
agent = Agent(
role="Analyst",
goal="Perform calculations",
tools=[CalculatorTool()]
)
my_project/
├── src/my_project/
│ ├── config/
│ │ ├── agents.yaml # Agent definitions
│ │ └── tasks.yaml # Task definitions
│ ├── crew.py # Crew assembly
│ └── main.py # Entry point
└── pyproject.toml
researcher:
role: "{topic} Senior Data Researcher"
goal: "Uncover cutting-edge developments in {topic}"
backstory: >
You're a seasoned researcher with a knack for uncovering
the latest developments in {topic}. Known for your ability
to find relevant information and present it clearly.
reporting_analyst:
role: "Reporting Analyst"
goal: "Create detailed reports based on research data"
backstory: >
You're a meticulous analyst who transforms raw data into
actionable insights through well-structured reports.
research_task:
description: >
Conduct thorough research about {topic}.
Find the most relevant information for {year}.
expected_output: >
A list with 10 bullet points of the most relevant
information about {topic}.
agent: researcher
reporting_task:
description: >
Review the research and create a comprehensive report.
Focus on key findings and recommendations.
expected_output: >
A detailed report in markdown format with executive
summary, findings, and recommendations.
agent: reporting_analyst
output_file: report.md
from crewai import Agent, Crew, Process, Task
from crewai.project import CrewBase, agent, crew, task
from crewai_tools import SerperDevTool
@CrewBase
class MyProjectCrew:
"""My Project crew"""
@agent
def researcher(self) -> Agent:
return Agent(
config=self.agents_config['researcher'],
tools=[SerperDevTool()],
verbose=True
)
@agent
def reporting_analyst(self) -> Agent:
return Agent(
config=self.agents_config['reporting_analyst'],
verbose=True
)
@task
def research_task(self) -> Task:
return Task(config=self.tasks_config['research_task'])
@task
def reporting_task(self) -> Task:
return Task(
config=self.tasks_config['reporting_task'],
output_file='report.md'
)
@crew
def crew(self) -> Crew:
return Crew(
agents=self.agents,
tasks=self.tasks,
process=Process.sequential,
verbose=True
)
from my_project.crew import MyProjectCrew
def run():
inputs = {
'topic': 'AI Agents',
'year': 2025
}
MyProjectCrew().crew().kickoff(inputs=inputs)
if __name__ == "__main__":
run()
For complex workflows with conditional logic, use Flows:
from crewai.flow.flow import Flow, listen, start, router
from pydantic import BaseModel
class MyState(BaseModel):
confidence: float = 0.0
class MyFlow(Flow[MyState]):
@start()
def gather_data(self):
return {"data": "collected"}
@listen(gather_data)
def analyze(self, data):
self.state.confidence = 0.85
return analysis_crew.kickoff(inputs=data)
@router(analyze)
def decide(self):
return "high" if self.state.confidence > 0.8 else "low"
@listen("high")
def generate_report(self):
return report_crew.kickoff()
# Run flow
flow = MyFlow()
result = flow.kickoff()
See Flows Guide for complete documentation.
# Enable all memory types
crew = Crew(
agents=[researcher],
tasks=[research_task],
memory=True, # Enable memory
embedder={ # Custom embeddings
"provider": "openai",
"config": {"model": "text-embedding-3-small"}
}
)
Memory types: Short-term (ChromaDB), Long-term (SQLite), Entity (ChromaDB)
from crewai import LLM
llm = LLM(model="gpt-4o") # OpenAI (default)
llm = LLM(model="claude-sonnet-4-5-20250929") # Anthropic
llm = LLM(model="ollama/llama3.1", base_url="http://localhost:11434") # Local
llm = LLM(model="azure/gpt-4o", base_url="https://...") # Azure
agent = Agent(role="Analyst", goal="Analyze data", llm=llm)
| Feature | CrewAI | LangChain | LangGraph |
|---|---|---|---|
| Best for | Multi-agent teams | General LLM apps | Stateful workflows |
| Learning curve | Low | Medium | Higher |
| Agent paradigm | Role-based | Tool-based | Graph-based |
| Memory | Built-in | Plugin-based | Custom |
Agent stuck in loop:
agent = Agent(
role="...",
max_iter=10, # Limit iterations
max_rpm=5 # Rate limit
)
Task not using context:
task2 = Task(
description="...",
context=[task1], # Explicitly pass context
agent=writer
)
Memory errors:
# Use environment variable for storage
import os
os.environ["CREWAI_STORAGE_DIR"] = "./my_storage"
Weekly Installs
492
Repository
GitHub Stars
23.4K
First Seen
Jan 21, 2026
Security Audits
Gen Agent Trust HubPassSocketPassSnykWarn
Installed on
opencode436
gemini-cli419
codex406
github-copilot380
cursor377
kimi-cli321
AI 代码实施计划编写技能 | 自动化开发任务分解与 TDD 流程规划工具
42,300 周安装
PDF转Markdown工具:自动检测原生/扫描文档,支持OCR转换
375 周安装
RAG系统分块策略指南:5种方法优化文档检索与AI生成性能
375 周安装
Vite Flare Starter:开箱即用的全栈Cloudflare应用模板,集成React 19、Hono、D1
375 周安装
VectorBT + OpenAlgo Python回测环境一键配置指南 | 量化交易开发
375 周安装
Turso数据库测试指南:SQL兼容性、Rust集成与模糊测试方法详解
375 周安装
LLM硬件模型匹配器:自动检测系统配置,推荐最佳LLM模型,支持GPU/CPU/量化
375 周安装