Effect AI by andrueandersoncs/claude-skill-effect-ts
npx skills add https://github.com/andrueandersoncs/claude-skill-effect-ts --skill 'Effect AI'Effect AI (@effect/ai) 提供了与 AI/LLM 服务的类型安全集成:
npm install @effect/ai @effect/ai-openai
# 或
npm install @effect/ai @effect/ai-anthropic
import { AiChat } from "@effect/ai"
import { OpenAiChat } from "@effect/ai-openai"
import { Effect, Layer } from "effect"
const OpenAiLive = OpenAiChat.layer({
apiKey: Config.redacted("OPENAI_API_KEY"),
model: "gpt-4"
})
import { AnthropicChat } from "@effect/ai-anthropic"
const AnthropicLive = AnthropicChat.layer({
apiKey: Config.redacted("ANTHROPIC_API_KEY"),
model: "claude-3-opus-20240229"
})
广告位招租
在这里展示您的产品或服务
触达数万 AI 开发者,精准高效
const program = Effect.gen(function* () {
const ai = yield* AiChat.AiChat
const response = yield* ai.generateText({
prompt: "用一句话解释函数式编程。"
})
return response.text
})
const result = yield* program.pipe(
Effect.provide(OpenAiLive)
)
const chat = Effect.gen(function* () {
const ai = yield* AiChat.AiChat
const response = yield* ai.generateText({
messages: [
{ role: "system", content: "你是一个乐于助人的助手。" },
{ role: "user", content: "什么是 Effect-TS?" }
]
})
return response.text
})
定义 AI 可以调用的工具:
import { AiTool } from "@effect/ai"
import { Schema } from "effect"
const WeatherInput = Schema.Struct({
city: Schema.String,
unit: Schema.optional(Schema.Literal("celsius", "fahrenheit"))
})
const getWeather = AiTool.make({
name: "get_weather",
description: "获取城市的当前天气",
input: WeatherInput,
handler: (input) =>
Effect.succeed({
city: input.city,
temperature: 22,
unit: input.unit ?? "celsius",
conditions: "sunny"
})
})
const programWithTools = Effect.gen(function* () {
const ai = yield* AiChat.AiChat
const response = yield* ai.generateText({
prompt: "东京的天气怎么样?",
tools: [getWeather]
})
return response.text
})
const searchTool = AiTool.make({
name: "search",
description: "搜索网络",
input: Schema.Struct({ query: Schema.String }),
handler: ({ query }) => performSearch(query)
})
const calculatorTool = AiTool.make({
name: "calculator",
description: "执行计算",
input: Schema.Struct({
expression: Schema.String
}),
handler: ({ expression }) => evaluate(expression)
})
const response = yield* ai.generateText({
prompt: "搜索 Effect-TS 并计算 2+2",
tools: [searchTool, calculatorTool]
})
获取类型化、经过验证的响应:
const ProductReview = Schema.Struct({
sentiment: Schema.Literal("positive", "negative", "neutral"),
score: Schema.Number.pipe(Schema.between(1, 5)),
summary: Schema.String,
keywords: Schema.Array(Schema.String)
})
const analyzeReview = Effect.gen(function* () {
const ai = yield* AiChat.AiChat
const review = yield* ai.generateObject({
prompt: "分析这个产品评论:'很棒的产品,强烈推荐!'",
schema: ProductReview
})
return review
})
用于复杂的多步骤 AI 工作流:
import { AiPlan } from "@effect/ai"
const researchPlan = AiPlan.make({
name: "research",
description: "研究一个主题并总结发现",
steps: [
{
name: "search",
description: "搜索相关信息",
tool: searchTool
},
{
name: "analyze",
description: "分析搜索结果",
handler: (context) =>
Effect.gen(function* () {
const ai = yield* AiChat.AiChat
return yield* ai.generateText({
prompt: `分析这些结果:${context.previousResults}`
})
})
},
{
name: "summarize",
description: "创建最终摘要",
handler: (context) =>
Effect.gen(function* () {
const ai = yield* AiChat.AiChat
return yield* ai.generateObject({
prompt: `总结:${context.analysis}`,
schema: ResearchSummary
})
})
}
]
})
const result = yield* AiPlan.execute(researchPlan, {
topic: "Effect-TS 的优势"
})
import { Stream } from "effect"
const streamProgram = Effect.gen(function* () {
const ai = yield* AiChat.AiChat
const stream = yield* ai.streamText({
prompt: "写一个关于机器人的短篇故事。"
})
yield* Stream.runForEach(stream, (chunk) =>
Effect.sync(() => process.stdout.write(chunk))
)
})
const OpenAiLive = OpenAiChat.layer({
apiKey: Config.redacted("OPENAI_API_KEY"),
model: "gpt-4-turbo",
temperature: 0.7,
maxTokens: 1000,
organizationId: Config.string("OPENAI_ORG_ID").pipe(Config.option)
})
const AnthropicLive = AnthropicChat.layer({
apiKey: Config.redacted("ANTHROPIC_API_KEY"),
model: "claude-3-opus-20240229",
maxTokens: 4096
})
import { AiError } from "@effect/ai"
const safeChat = program.pipe(
Effect.catchTag("AiRateLimitError", (error) =>
Effect.gen(function* () {
yield* Effect.sleep(error.retryAfter)
return yield* program
})
),
Effect.catchTag("AiAuthenticationError", () =>
Effect.fail(new ConfigurationError())
),
Effect.catchTag("AiError", (error) =>
Effect.gen(function* () {
yield* Effect.logError("AI 错误", error)
return "抱歉,我无法处理该请求。"
})
)
)
const MockAiLive = Layer.succeed(
AiChat.AiChat,
{
generateText: () =>
Effect.succeed({ text: "模拟响应" }),
generateObject: (options) =>
Effect.succeed(mockData),
streamText: () =>
Effect.succeed(Stream.make("模拟", " ", "流"))
}
)
const testProgram = program.pipe(
Effect.provide(MockAiLive)
)
有关全面的 Effect AI 文档,请查阅 ${CLAUDE_PLUGIN_ROOT}/references/llms-full.txt。
搜索以下部分:
每周安装量
0
代码仓库
GitHub 星标数
5
首次出现时间
1970年1月1日
安全审计
Effect AI (@effect/ai) provides type-safe integration with AI/LLM services:
npm install @effect/ai @effect/ai-openai
# or
npm install @effect/ai @effect/ai-anthropic
import { AiChat } from "@effect/ai"
import { OpenAiChat } from "@effect/ai-openai"
import { Effect, Layer } from "effect"
const OpenAiLive = OpenAiChat.layer({
apiKey: Config.redacted("OPENAI_API_KEY"),
model: "gpt-4"
})
import { AnthropicChat } from "@effect/ai-anthropic"
const AnthropicLive = AnthropicChat.layer({
apiKey: Config.redacted("ANTHROPIC_API_KEY"),
model: "claude-3-opus-20240229"
})
const program = Effect.gen(function* () {
const ai = yield* AiChat.AiChat
const response = yield* ai.generateText({
prompt: "Explain functional programming in one sentence."
})
return response.text
})
const result = yield* program.pipe(
Effect.provide(OpenAiLive)
)
const chat = Effect.gen(function* () {
const ai = yield* AiChat.AiChat
const response = yield* ai.generateText({
messages: [
{ role: "system", content: "You are a helpful assistant." },
{ role: "user", content: "What is Effect-TS?" }
]
})
return response.text
})
Define tools that AI can call:
import { AiTool } from "@effect/ai"
import { Schema } from "effect"
const WeatherInput = Schema.Struct({
city: Schema.String,
unit: Schema.optional(Schema.Literal("celsius", "fahrenheit"))
})
const getWeather = AiTool.make({
name: "get_weather",
description: "Get current weather for a city",
input: WeatherInput,
handler: (input) =>
Effect.succeed({
city: input.city,
temperature: 22,
unit: input.unit ?? "celsius",
conditions: "sunny"
})
})
const programWithTools = Effect.gen(function* () {
const ai = yield* AiChat.AiChat
const response = yield* ai.generateText({
prompt: "What's the weather in Tokyo?",
tools: [getWeather]
})
return response.text
})
const searchTool = AiTool.make({
name: "search",
description: "Search the web",
input: Schema.Struct({ query: Schema.String }),
handler: ({ query }) => performSearch(query)
})
const calculatorTool = AiTool.make({
name: "calculator",
description: "Perform calculations",
input: Schema.Struct({
expression: Schema.String
}),
handler: ({ expression }) => evaluate(expression)
})
const response = yield* ai.generateText({
prompt: "Search for Effect-TS and calculate 2+2",
tools: [searchTool, calculatorTool]
})
Get typed, validated responses:
const ProductReview = Schema.Struct({
sentiment: Schema.Literal("positive", "negative", "neutral"),
score: Schema.Number.pipe(Schema.between(1, 5)),
summary: Schema.String,
keywords: Schema.Array(Schema.String)
})
const analyzeReview = Effect.gen(function* () {
const ai = yield* AiChat.AiChat
const review = yield* ai.generateObject({
prompt: "Analyze this product review: 'Great product, highly recommend!'",
schema: ProductReview
})
return review
})
For complex multi-step AI workflows:
import { AiPlan } from "@effect/ai"
const researchPlan = AiPlan.make({
name: "research",
description: "Research a topic and summarize findings",
steps: [
{
name: "search",
description: "Search for relevant information",
tool: searchTool
},
{
name: "analyze",
description: "Analyze search results",
handler: (context) =>
Effect.gen(function* () {
const ai = yield* AiChat.AiChat
return yield* ai.generateText({
prompt: `Analyze these results: ${context.previousResults}`
})
})
},
{
name: "summarize",
description: "Create final summary",
handler: (context) =>
Effect.gen(function* () {
const ai = yield* AiChat.AiChat
return yield* ai.generateObject({
prompt: `Summarize: ${context.analysis}`,
schema: ResearchSummary
})
})
}
]
})
const result = yield* AiPlan.execute(researchPlan, {
topic: "Effect-TS benefits"
})
import { Stream } from "effect"
const streamProgram = Effect.gen(function* () {
const ai = yield* AiChat.AiChat
const stream = yield* ai.streamText({
prompt: "Write a short story about a robot."
})
yield* Stream.runForEach(stream, (chunk) =>
Effect.sync(() => process.stdout.write(chunk))
)
})
const OpenAiLive = OpenAiChat.layer({
apiKey: Config.redacted("OPENAI_API_KEY"),
model: "gpt-4-turbo",
temperature: 0.7,
maxTokens: 1000,
organizationId: Config.string("OPENAI_ORG_ID").pipe(Config.option)
})
const AnthropicLive = AnthropicChat.layer({
apiKey: Config.redacted("ANTHROPIC_API_KEY"),
model: "claude-3-opus-20240229",
maxTokens: 4096
})
import { AiError } from "@effect/ai"
const safeChat = program.pipe(
Effect.catchTag("AiRateLimitError", (error) =>
Effect.gen(function* () {
yield* Effect.sleep(error.retryAfter)
return yield* program
})
),
Effect.catchTag("AiAuthenticationError", () =>
Effect.fail(new ConfigurationError())
),
Effect.catchTag("AiError", (error) =>
Effect.gen(function* () {
yield* Effect.logError("AI error", error)
return "Sorry, I couldn't process that request."
})
)
)
const MockAiLive = Layer.succeed(
AiChat.AiChat,
{
generateText: () =>
Effect.succeed({ text: "Mock response" }),
generateObject: (options) =>
Effect.succeed(mockData),
streamText: () =>
Effect.succeed(Stream.make("Mock", " ", "stream"))
}
)
const testProgram = program.pipe(
Effect.provide(MockAiLive)
)
For comprehensive Effect AI documentation, consult ${CLAUDE_PLUGIN_ROOT}/references/llms-full.txt.
Search for these sections:
Weekly Installs
0
Repository
GitHub Stars
5
First Seen
Jan 1, 1970
Security Audits
超能力技能使用指南:AI助手技能调用优先级与工作流程详解
45,100 周安装