npx skills add https://github.com/akillness/oh-my-gods --skill genkit.prompt 文件进行管理# npm (推荐用于 JavaScript/TypeScript)
npm install -g genkit-cli
# macOS/Linux 二进制文件
curl -sL cli.genkit.dev | bash
mkdir my-genkit-app && cd my-genkit-app
npm init -y
npm pkg set type=module
npm install -D typescript tsx
npx tsc --init
mkdir src && touch src/index.ts
# 核心 + Google AI (Gemini) — 免费层,无需信用卡
npm install genkit @genkit-ai/google-genai
# 或者:Vertex AI (需要 GCP 项目)
npm install genkit @genkit-ai/vertexai
# 或者:OpenAI
npm install genkit genkitx-openai
# 或者:Anthropic (Claude)
npm install genkit genkitx-anthropic
# 或者:Ollama (本地模型)
npm install genkit genkitx-ollama
.prompt files with Dotprompt# npm (recommended for JavaScript/TypeScript)
npm install -g genkit-cli
# macOS/Linux binary
curl -sL cli.genkit.dev | bash
mkdir my-genkit-app && cd my-genkit-app
npm init -y
npm pkg set type=module
npm install -D typescript tsx
npx tsc --init
mkdir src && touch src/index.ts
广告位招租
在这里展示您的产品或服务
触达数万 AI 开发者,精准高效
# Google AI (Gemini)
export GEMINI_API_KEY=your_key_here
# OpenAI
export OPENAI_API_KEY=your_key_here
# Anthropic
export ANTHROPIC_API_KEY=your_key_here
import { googleAI } from '@genkit-ai/google-genai';
import { genkit } from 'genkit';
const ai = genkit({
plugins: [googleAI()],
model: googleAI.model('gemini-2.5-flash'), // 默认模型
});
流是核心原语:类型安全、可观测、可部署的 AI 函数。
import { genkit, z } from 'genkit';
import { googleAI } from '@genkit-ai/google-genai';
const ai = genkit({ plugins: [googleAI()] });
// 使用 Zod 定义输入/输出模式
const SummaryInputSchema = z.object({
text: z.string().describe('要总结的文本'),
maxWords: z.number().optional().default(100),
});
const SummaryOutputSchema = z.object({
summary: z.string(),
keyPoints: z.array(z.string()),
});
export const summarizeFlow = ai.defineFlow(
{
name: 'summarizeFlow',
inputSchema: SummaryInputSchema,
outputSchema: SummaryOutputSchema,
},
async ({ text, maxWords }) => {
const { output } = await ai.generate({
model: googleAI.model('gemini-2.5-flash'),
prompt: `用最多 ${maxWords} 个词总结以下文本并提取要点:\n\n${text}`,
output: { schema: SummaryOutputSchema },
});
if (!output) throw new Error('未生成输出');
return output;
}
);
// 调用流
const result = await summarizeFlow({
text: '长文章内容在这里...',
maxWords: 50,
});
console.log(result.summary);
// 简单文本生成
const { text } = await ai.generate({
model: googleAI.model('gemini-2.5-flash'),
prompt: '用一句话解释量子计算。',
});
// 结构化输出
const { output } = await ai.generate({
prompt: '列出 3 种编程语言及其用例',
output: {
schema: z.object({
languages: z.array(z.object({
name: z.string(),
useCase: z.string(),
})),
}),
},
});
// 使用系统提示词
const { text: response } = await ai.generate({
system: '你是一名资深的 TypeScript 工程师。请简明扼要。',
prompt: 'TypeScript 中 interface 和 type 有什么区别?',
});
// 多模态(图像 + 文本)
const { text: description } = await ai.generate({
prompt: [
{ text: '这张图片里有什么?' },
{ media: { url: 'https://example.com/image.jpg', contentType: 'image/jpeg' } },
],
});
export const streamingFlow = ai.defineFlow(
{
name: 'streamingFlow',
inputSchema: z.object({ topic: z.string() }),
streamSchema: z.string(), // 每个数据块的类型
outputSchema: z.object({ full: z.string() }),
},
async ({ topic }, { sendChunk }) => {
const { stream, response } = ai.generateStream({
prompt: `写一篇关于 ${topic} 的详细文章。`,
});
for await (const chunk of stream) {
sendChunk(chunk.text); // 将每个令牌流式传输到客户端
}
const { text } = await response;
return { full: text };
}
);
// 客户端消费
const stream = streamingFlow.stream({ topic: 'AI 伦理' });
for await (const chunk of stream.stream) {
process.stdout.write(chunk);
}
const finalOutput = await stream.output;
import { z } from 'genkit';
// 定义工具
const getWeatherTool = ai.defineTool(
{
name: 'getWeather',
description: '获取城市的当前天气',
inputSchema: z.object({ city: z.string() }),
outputSchema: z.object({ temp: z.number(), condition: z.string() }),
},
async ({ city }) => {
// 调用真实的天气 API
return { temp: 22, condition: 'sunny' };
}
);
const searchWebTool = ai.defineTool(
{
name: 'searchWeb',
description: '在网络上搜索信息',
inputSchema: z.object({ query: z.string() }),
outputSchema: z.string(),
},
async ({ query }) => {
// 调用搜索 API
return `搜索查询结果:${query}`;
}
);
// 带有工具的智能体流
export const agentFlow = ai.defineFlow(
{
name: 'agentFlow',
inputSchema: z.object({ question: z.string() }),
outputSchema: z.string(),
},
async ({ question }) => {
const { text } = await ai.generate({
prompt: question,
tools: [getWeatherTool, searchWebTool],
returnToolRequests: false, // 自动执行工具
});
return text;
}
);
将提示词作为版本化的 .prompt 文件进行管理:
# src/prompts/summarize.prompt
---
model: googleai/gemini-2.5-flash
input:
schema:
text: string
style?: string
output:
schema:
summary: string
sentiment: string
---
用 {{style, default: "professional"}} 的语气总结以下文本:
{{text}}
返回包含 summary 和 sentiment (positive/negative/neutral) 的 JSON。
// 加载并使用 dotprompt
const summarizePrompt = ai.prompt('summarize');
const { output } = await summarizePrompt({
text: '文章内容在这里...',
style: 'casual',
});
import { devLocalVectorstore } from '@genkit-ai/dev-local-vectorstore';
import { textEmbedding004 } from '@genkit-ai/google-genai';
const ai = genkit({
plugins: [
googleAI(),
devLocalVectorstore([{
indexName: 'documents',
embedder: textEmbedding004,
}]),
],
});
// 索引文档
await ai.index({
indexer: devLocalVectorstoreIndexer('documents'),
docs: [
{ content: [{ text: '文档 1 内容...' }], metadata: { source: 'doc1' } },
{ content: [{ text: '文档 2 内容...' }], metadata: { source: 'doc2' } },
],
});
// RAG 流
export const ragFlow = ai.defineFlow(
{
name: 'ragFlow',
inputSchema: z.object({ question: z.string() }),
outputSchema: z.string(),
},
async ({ question }) => {
// 检索相关文档
const docs = await ai.retrieve({
retriever: devLocalVectorstoreRetriever('documents'),
query: question,
options: { k: 3 },
});
// 基于检索到的文档生成答案
const { text } = await ai.generate({
system: '仅使用提供的上下文回答问题。',
prompt: question,
docs,
});
return text;
}
);
export const chatFlow = ai.defineFlow(
{
name: 'chatFlow',
inputSchema: z.object({ message: z.string(), sessionId: z.string() }),
outputSchema: z.string(),
},
async ({ message, sessionId }) => {
const session = ai.loadSession(sessionId) ?? ai.createSession({ sessionId });
const chat = session.chat({
system: '你是一个乐于助人的助手。',
});
const { text } = await chat.send(message);
return text;
}
);
// 专家智能体
const researchAgent = ai.defineFlow(
{ name: 'researchAgent', inputSchema: z.string(), outputSchema: z.string() },
async (query) => {
const { text } = await ai.generate({
system: '你是一名研究专家。收集事实并引用来源。',
prompt: query,
tools: [searchWebTool],
});
return text;
}
);
const writerAgent = ai.defineFlow(
{ name: 'writerAgent', inputSchema: z.string(), outputSchema: z.string() },
async (brief) => {
const { text } = await ai.generate({
system: '你是一名专业作家。撰写清晰、引人入胜的内容。',
prompt: brief,
});
return text;
}
);
// 协调器委托给专家
export const contentPipelineFlow = ai.defineFlow(
{
name: 'contentPipelineFlow',
inputSchema: z.object({ topic: z.string() }),
outputSchema: z.string(),
},
async ({ topic }) => {
const research = await researchAgent(`研究:${topic}`);
const article = await writerAgent(`基于以下内容撰写文章:${research}`);
return article;
}
);
# 启动开发者 UI 并连接到你的应用
genkit start -- npx tsx --watch src/index.ts
genkit start -o -- npx tsx src/index.ts # 自动打开浏览器
# 从 CLI 运行特定流
genkit flow:run summarizeFlow '{"text": "Hello world", "maxWords": 10}'
# 运行并流式输出
genkit flow:run streamingFlow '{"topic": "AI"}' -s
# 评估一个流
genkit eval:flow ragFlow --input eval-inputs.json
# 查看所有命令
genkit --help
# 禁用分析遥测
genkit config set analyticsOptOut true
开发者 UI 运行在 http://localhost:4000 并提供:
流运行器:使用自定义 JSON 输入执行流
追踪检查器:可视化每个步骤(生成、嵌入、检索、工具调用)
提示词游乐场:交互式测试提示词
模型测试器:比较不同模型的输出
评估器:针对流运行评估数据集
"scripts": { "genkit:dev": "genkit start -- npx tsx --watch src/index.ts" }
npm run genkit:dev
import { onCallGenkit } from 'firebase-functions/https';
import { defineSecret } from 'firebase-functions/params';
const apiKey = defineSecret('GOOGLE_AI_API_KEY');
export const summarize = onCallGenkit(
{ secrets: [apiKey] },
summarizeFlow
);
firebase deploy --only functions
import express from 'express';
import { expressHandler } from 'genkit/express';
const app = express();
app.use(express.json());
app.post('/summarize', expressHandler(summarizeFlow));
app.post('/chat', expressHandler(chatFlow));
app.listen(3000, () => console.log('Server running on port 3000'));
# 构建并部署
gcloud run deploy genkit-app \
--source . \
--region us-central1 \
--set-env-vars GEMINI_API_KEY=$GEMINI_API_KEY
| 插件 | 包 | 模型 |
|---|---|---|
| Google AI | @genkit-ai/google-genai | Gemini 2.5 Flash/Pro |
| Vertex AI | @genkit-ai/vertexai | Gemini, Imagen, Claude |
| OpenAI | genkitx-openai | GPT-4o, o1, 等 |
| Anthropic | genkitx-anthropic | Claude 3.5/3 |
| AWS Bedrock | genkitx-aws-bedrock | Claude, Titan, 等 |
| Ollama | genkitx-ollama | 本地模型 |
| DeepSeek | genkitx-deepseek | DeepSeek-R1 |
| xAI (Grok) | genkitx-xai | Grok 模型 |
| 插件 | 包 |
|---|---|
| Dev Local (测试用) | @genkit-ai/dev-local-vectorstore |
| Pinecone | genkitx-pinecone |
| pgvector | genkitx-pgvector |
| Chroma | genkitx-chroma |
| Cloud Firestore | @genkit-ai/firebase |
| LanceDB | genkitx-lancedb |
ai.run() 来追踪自定义步骤 — 将非 Genkit 代码包装在 ai.run() 中以获得追踪可见性streamSchema 和 sendChunk 的 defineFlow 以获得更好的用户体验.prompt 文件支持版本控制、审查和重用generate() 的 null 输出 — 抛出有意义的错误GENKIT_ENV=devonCallGenkit(而非原始的 Cloud Functions)generate()genkit start — 总是传递 -- <your-run-command>async/awaitimport { googleAI } from '@genkit-ai/google-genai';
import { genkit, z } from 'genkit';
const ai = genkit({ plugins: [googleAI()] });
export const helloFlow = ai.defineFlow(
{
name: 'helloFlow',
inputSchema: z.object({ name: z.string() }),
outputSchema: z.string(),
},
async ({ name }) => {
const { text } = await ai.generate(`用一种有创意的方式向 ${name} 问好。`);
return text;
}
);
// 运行它
const greeting = await helloFlow({ name: 'World' });
console.log(greeting);
import { googleAI, textEmbedding004 } from '@genkit-ai/google-genai';
import { devLocalVectorstore } from '@genkit-ai/dev-local-vectorstore';
import { genkit, z } from 'genkit';
const ai = genkit({
plugins: [
googleAI(),
devLocalVectorstore([{ indexName: 'kb', embedder: textEmbedding004 }]),
],
});
// 索引知识库文档
const indexKnowledgeBase = ai.defineFlow(
{ name: 'indexKB', inputSchema: z.array(z.string()) },
async (texts) => {
await ai.index({
indexer: devLocalVectorstoreIndexer('kb'),
docs: texts.map(text => ({ content: [{ text }] })),
});
}
);
// 使用 RAG 回答问题
export const answerFlow = ai.defineFlow(
{
name: 'answerFlow',
inputSchema: z.object({ question: z.string() }),
outputSchema: z.object({ answer: z.string(), sources: z.number() }),
},
async ({ question }) => {
const docs = await ai.retrieve({
retriever: devLocalVectorstoreRetriever('kb'),
query: question,
options: { k: 5 },
});
const { text } = await ai.generate({
system: '仅从提供的上下文中回答。如果不确定,请说明。',
prompt: question,
docs,
});
return { answer: text, sources: docs.length };
}
);
import { googleAI } from '@genkit-ai/google-genai';
import { openAI } from 'genkitx-openai';
import { genkit, z } from 'genkit';
const ai = genkit({ plugins: [googleAI(), openAI()] });
export const compareModelsFlow = ai.defineFlow(
{
name: 'compareModelsFlow',
inputSchema: z.object({ prompt: z.string() }),
outputSchema: z.object({ gemini: z.string(), gpt4o: z.string() }),
},
async ({ prompt }) => {
const [geminiResult, gptResult] = await Promise.all([
ai.generate({ model: googleAI.model('gemini-2.5-flash'), prompt }),
ai.generate({ model: 'openai/gpt-4o', prompt }),
]);
return {
gemini: geminiResult.text,
gpt4o: gptResult.text,
};
}
);
每周安装数
1
仓库
首次出现
1 天前
安全审计
安装于
mcpjam1
claude-code1
replit1
junie1
windsurf1
zencoder1
# Core + Google AI (Gemini) — free tier, no credit card required
npm install genkit @genkit-ai/google-genai
# Or: Vertex AI (requires GCP project)
npm install genkit @genkit-ai/vertexai
# Or: OpenAI
npm install genkit genkitx-openai
# Or: Anthropic (Claude)
npm install genkit genkitx-anthropic
# Or: Ollama (local models)
npm install genkit genkitx-ollama
# Google AI (Gemini)
export GEMINI_API_KEY=your_key_here
# OpenAI
export OPENAI_API_KEY=your_key_here
# Anthropic
export ANTHROPIC_API_KEY=your_key_here
import { googleAI } from '@genkit-ai/google-genai';
import { genkit } from 'genkit';
const ai = genkit({
plugins: [googleAI()],
model: googleAI.model('gemini-2.5-flash'), // default model
});
Flows are the core primitive: type-safe, observable, deployable AI functions.
import { genkit, z } from 'genkit';
import { googleAI } from '@genkit-ai/google-genai';
const ai = genkit({ plugins: [googleAI()] });
// Input/output schemas with Zod
const SummaryInputSchema = z.object({
text: z.string().describe('Text to summarize'),
maxWords: z.number().optional().default(100),
});
const SummaryOutputSchema = z.object({
summary: z.string(),
keyPoints: z.array(z.string()),
});
export const summarizeFlow = ai.defineFlow(
{
name: 'summarizeFlow',
inputSchema: SummaryInputSchema,
outputSchema: SummaryOutputSchema,
},
async ({ text, maxWords }) => {
const { output } = await ai.generate({
model: googleAI.model('gemini-2.5-flash'),
prompt: `Summarize the following text in at most ${maxWords} words and extract key points:\n\n${text}`,
output: { schema: SummaryOutputSchema },
});
if (!output) throw new Error('No output generated');
return output;
}
);
// Call the flow
const result = await summarizeFlow({
text: 'Long article content here...',
maxWords: 50,
});
console.log(result.summary);
// Simple text generation
const { text } = await ai.generate({
model: googleAI.model('gemini-2.5-flash'),
prompt: 'Explain quantum computing in one sentence.',
});
// Structured output
const { output } = await ai.generate({
prompt: 'List 3 programming languages with their use cases',
output: {
schema: z.object({
languages: z.array(z.object({
name: z.string(),
useCase: z.string(),
})),
}),
},
});
// With system prompt
const { text: response } = await ai.generate({
system: 'You are a senior TypeScript engineer. Be concise.',
prompt: 'What is the difference between interface and type in TypeScript?',
});
// Multimodal (image + text)
const { text: description } = await ai.generate({
prompt: [
{ text: 'What is in this image?' },
{ media: { url: 'https://example.com/image.jpg', contentType: 'image/jpeg' } },
],
});
export const streamingFlow = ai.defineFlow(
{
name: 'streamingFlow',
inputSchema: z.object({ topic: z.string() }),
streamSchema: z.string(), // type of each chunk
outputSchema: z.object({ full: z.string() }),
},
async ({ topic }, { sendChunk }) => {
const { stream, response } = ai.generateStream({
prompt: `Write a detailed essay about ${topic}.`,
});
for await (const chunk of stream) {
sendChunk(chunk.text); // stream each token to client
}
const { text } = await response;
return { full: text };
}
);
// Client-side consumption
const stream = streamingFlow.stream({ topic: 'AI ethics' });
for await (const chunk of stream.stream) {
process.stdout.write(chunk);
}
const finalOutput = await stream.output;
import { z } from 'genkit';
// Define tools
const getWeatherTool = ai.defineTool(
{
name: 'getWeather',
description: 'Get current weather for a city',
inputSchema: z.object({ city: z.string() }),
outputSchema: z.object({ temp: z.number(), condition: z.string() }),
},
async ({ city }) => {
// Call real weather API
return { temp: 22, condition: 'sunny' };
}
);
const searchWebTool = ai.defineTool(
{
name: 'searchWeb',
description: 'Search the web for information',
inputSchema: z.object({ query: z.string() }),
outputSchema: z.string(),
},
async ({ query }) => {
// Call search API
return `Search results for: ${query}`;
}
);
// Agent flow with tools
export const agentFlow = ai.defineFlow(
{
name: 'agentFlow',
inputSchema: z.object({ question: z.string() }),
outputSchema: z.string(),
},
async ({ question }) => {
const { text } = await ai.generate({
prompt: question,
tools: [getWeatherTool, searchWebTool],
returnToolRequests: false, // auto-execute tools
});
return text;
}
);
Manage prompts as versioned .prompt files:
# src/prompts/summarize.prompt
---
model: googleai/gemini-2.5-flash
input:
schema:
text: string
style?: string
output:
schema:
summary: string
sentiment: string
---
Summarize the following text in a {{style, default: "professional"}} tone:
{{text}}
Return JSON with summary and sentiment (positive/negative/neutral).
// Load and use dotprompt
const summarizePrompt = ai.prompt('summarize');
const { output } = await summarizePrompt({
text: 'Article content here...',
style: 'casual',
});
import { devLocalVectorstore } from '@genkit-ai/dev-local-vectorstore';
import { textEmbedding004 } from '@genkit-ai/google-genai';
const ai = genkit({
plugins: [
googleAI(),
devLocalVectorstore([{
indexName: 'documents',
embedder: textEmbedding004,
}]),
],
});
// Index documents
await ai.index({
indexer: devLocalVectorstoreIndexer('documents'),
docs: [
{ content: [{ text: 'Document 1 content...' }], metadata: { source: 'doc1' } },
{ content: [{ text: 'Document 2 content...' }], metadata: { source: 'doc2' } },
],
});
// RAG flow
export const ragFlow = ai.defineFlow(
{
name: 'ragFlow',
inputSchema: z.object({ question: z.string() }),
outputSchema: z.string(),
},
async ({ question }) => {
// Retrieve relevant documents
const docs = await ai.retrieve({
retriever: devLocalVectorstoreRetriever('documents'),
query: question,
options: { k: 3 },
});
// Generate answer grounded in retrieved docs
const { text } = await ai.generate({
system: 'Answer questions using only the provided context.',
prompt: question,
docs,
});
return text;
}
);
export const chatFlow = ai.defineFlow(
{
name: 'chatFlow',
inputSchema: z.object({ message: z.string(), sessionId: z.string() }),
outputSchema: z.string(),
},
async ({ message, sessionId }) => {
const session = ai.loadSession(sessionId) ?? ai.createSession({ sessionId });
const chat = session.chat({
system: 'You are a helpful assistant.',
});
const { text } = await chat.send(message);
return text;
}
);
// Specialist agents
const researchAgent = ai.defineFlow(
{ name: 'researchAgent', inputSchema: z.string(), outputSchema: z.string() },
async (query) => {
const { text } = await ai.generate({
system: 'You are a research expert. Gather facts and cite sources.',
prompt: query,
tools: [searchWebTool],
});
return text;
}
);
const writerAgent = ai.defineFlow(
{ name: 'writerAgent', inputSchema: z.string(), outputSchema: z.string() },
async (brief) => {
const { text } = await ai.generate({
system: 'You are a professional writer. Write clear, engaging content.',
prompt: brief,
});
return text;
}
);
// Orchestrator delegates to specialists
export const contentPipelineFlow = ai.defineFlow(
{
name: 'contentPipelineFlow',
inputSchema: z.object({ topic: z.string() }),
outputSchema: z.string(),
},
async ({ topic }) => {
const research = await researchAgent(`Research: ${topic}`);
const article = await writerAgent(`Write an article based on: ${research}`);
return article;
}
);
# Start Developer UI + connect to your app
genkit start -- npx tsx --watch src/index.ts
genkit start -o -- npx tsx src/index.ts # auto-open browser
# Run a specific flow from CLI
genkit flow:run summarizeFlow '{"text": "Hello world", "maxWords": 10}'
# Run with streaming output
genkit flow:run streamingFlow '{"topic": "AI"}' -s
# Evaluate a flow
genkit eval:flow ragFlow --input eval-inputs.json
# View all commands
genkit --help
# Disable analytics telemetry
genkit config set analyticsOptOut true
The Developer UI runs at http://localhost:4000 and provides:
Flow runner : Execute flows with custom JSON inputs
Trace inspector : Visualize each step (generate, embed, retrieve, tool calls)
Prompt playground : Test prompts interactively
Model tester : Compare outputs across different models
Evaluator : Run evaluation datasets against flows
"scripts": { "genkit:dev": "genkit start -- npx tsx --watch src/index.ts" }
npm run genkit:dev
import { onCallGenkit } from 'firebase-functions/https';
import { defineSecret } from 'firebase-functions/params';
const apiKey = defineSecret('GOOGLE_AI_API_KEY');
export const summarize = onCallGenkit(
{ secrets: [apiKey] },
summarizeFlow
);
firebase deploy --only functions
import express from 'express';
import { expressHandler } from 'genkit/express';
const app = express();
app.use(express.json());
app.post('/summarize', expressHandler(summarizeFlow));
app.post('/chat', expressHandler(chatFlow));
app.listen(3000, () => console.log('Server running on port 3000'));
# Build and deploy
gcloud run deploy genkit-app \
--source . \
--region us-central1 \
--set-env-vars GEMINI_API_KEY=$GEMINI_API_KEY
| Plugin | Package | Models |
|---|---|---|
| Google AI | @genkit-ai/google-genai | Gemini 2.5 Flash/Pro |
| Vertex AI | @genkit-ai/vertexai | Gemini, Imagen, Claude |
| OpenAI | genkitx-openai | GPT-4o, o1, etc. |
| Anthropic | genkitx-anthropic | Claude 3.5/3 |
| AWS Bedrock | genkitx-aws-bedrock | Claude, Titan, etc. |
| Ollama | genkitx-ollama | Local models |
| DeepSeek | genkitx-deepseek | DeepSeek-R1 |
| xAI (Grok) | genkitx-xai | Grok models |
| Plugin | Package |
|---|---|
| Dev Local (testing) | @genkit-ai/dev-local-vectorstore |
| Pinecone | genkitx-pinecone |
| pgvector | genkitx-pgvector |
| Chroma | genkitx-chroma |
| Cloud Firestore | @genkit-ai/firebase |
| LanceDB | genkitx-lancedb |
ai.run() to trace custom steps — Wrap non-Genkit code in ai.run() for trace visibilitydefineFlow with streamSchema + sendChunk for better UX.prompt files enable versioning, review, and reusenull output from generate() — throw meaningful errorsGENKIT_ENV=dev when running flows separately from the dev serveronCallGenkit (not raw Cloud Functions) when deploying to Firebasegenerate() outside a flow if you need tracing/observabilitygenkit start without a command — always pass -- <your-run-command>async/awaitimport { googleAI } from '@genkit-ai/google-genai';
import { genkit, z } from 'genkit';
const ai = genkit({ plugins: [googleAI()] });
export const helloFlow = ai.defineFlow(
{
name: 'helloFlow',
inputSchema: z.object({ name: z.string() }),
outputSchema: z.string(),
},
async ({ name }) => {
const { text } = await ai.generate(`Say hello to ${name} in a creative way.`);
return text;
}
);
// Run it
const greeting = await helloFlow({ name: 'World' });
console.log(greeting);
import { googleAI, textEmbedding004 } from '@genkit-ai/google-genai';
import { devLocalVectorstore } from '@genkit-ai/dev-local-vectorstore';
import { genkit, z } from 'genkit';
const ai = genkit({
plugins: [
googleAI(),
devLocalVectorstore([{ indexName: 'kb', embedder: textEmbedding004 }]),
],
});
// Index knowledge base documents
const indexKnowledgeBase = ai.defineFlow(
{ name: 'indexKB', inputSchema: z.array(z.string()) },
async (texts) => {
await ai.index({
indexer: devLocalVectorstoreIndexer('kb'),
docs: texts.map(text => ({ content: [{ text }] })),
});
}
);
// Answer questions using RAG
export const answerFlow = ai.defineFlow(
{
name: 'answerFlow',
inputSchema: z.object({ question: z.string() }),
outputSchema: z.object({ answer: z.string(), sources: z.number() }),
},
async ({ question }) => {
const docs = await ai.retrieve({
retriever: devLocalVectorstoreRetriever('kb'),
query: question,
options: { k: 5 },
});
const { text } = await ai.generate({
system: 'Answer only from the provided context. If unsure, say so.',
prompt: question,
docs,
});
return { answer: text, sources: docs.length };
}
);
import { googleAI } from '@genkit-ai/google-genai';
import { openAI } from 'genkitx-openai';
import { genkit, z } from 'genkit';
const ai = genkit({ plugins: [googleAI(), openAI()] });
export const compareModelsFlow = ai.defineFlow(
{
name: 'compareModelsFlow',
inputSchema: z.object({ prompt: z.string() }),
outputSchema: z.object({ gemini: z.string(), gpt4o: z.string() }),
},
async ({ prompt }) => {
const [geminiResult, gptResult] = await Promise.all([
ai.generate({ model: googleAI.model('gemini-2.5-flash'), prompt }),
ai.generate({ model: 'openai/gpt-4o', prompt }),
]);
return {
gemini: geminiResult.text,
gpt4o: gptResult.text,
};
}
);
Weekly Installs
1
Repository
First Seen
1 day ago
Security Audits
Installed on
mcpjam1
claude-code1
replit1
junie1
windsurf1
zencoder1
Azure Data Explorer (Kusto) 查询技能:KQL数据分析、日志遥测与时间序列处理
119,800 周安装