AI SDK UI by constellos/claude-code-plugins
npx skills add https://github.com/constellos/claude-code-plugins --skill 'AI SDK UI'使用 Vercel AI SDK 实现 AI 驱动的用户界面。此技能涵盖流式 UI 模式、对话界面、补全功能、带视觉反馈的工具调用以及使用 React 服务器组件的生成式 UI。
使用场景:
AI SDK 提供了用于客户端 AI 交互的 React 钩子:
useChat - 带有消息历史的对话界面:
'use client';
import { useChat } from 'ai/react';
export function ChatInterface() {
const { messages, input, handleInputChange, handleSubmit, isLoading } = useChat({
api: '/api/chat',
});
return (
<div className="flex flex-col h-full">
<div className="flex-1 overflow-y-auto p-4 space-y-4">
{messages.map((message) => (
<div
key={message.id}
className={message.role === 'user' ? 'text-right' : 'text-left'}
>
<div className={`inline-block p-3 rounded-lg ${
message.role === 'user'
? 'bg-blue-500 text-white'
: 'bg-gray-100 text-gray-900'
}`}>
{message.content}
</div>
</div>
))}
</div>
<form onSubmit={handleSubmit} className="p-4 border-t">
<div className="flex gap-2">
<input
value={input}
onChange={handleInputChange}
placeholder="Type a message..."
className="flex-1 p-2 border rounded"
disabled={isLoading}
/>
<button
type="submit"
disabled={isLoading}
className="px-4 py-2 bg-blue-500 text-white rounded disabled:opacity-50"
>
{isLoading ? 'Sending...' : 'Send'}
</button>
</div>
</form>
</div>
);
}
广告位招租
在这里展示您的产品或服务
触达数万 AI 开发者,精准高效
useCompletion - 不带消息历史的单次补全:
'use client';
import { useCompletion } from 'ai/react';
export function CompletionInput() {
const { completion, input, handleInputChange, handleSubmit, isLoading } = useCompletion({
api: '/api/completion',
});
return (
<div className="space-y-4">
<form onSubmit={handleSubmit}>
<textarea
value={input}
onChange={handleInputChange}
placeholder="Enter prompt..."
className="w-full p-3 border rounded"
rows={4}
/>
<button
type="submit"
disabled={isLoading}
className="mt-2 px-4 py-2 bg-green-500 text-white rounded"
>
{isLoading ? 'Generating...' : 'Generate'}
</button>
</form>
{completion && (
<div className="p-4 bg-gray-50 rounded">
<h3 className="font-semibold mb-2">Result:</h3>
<p className="whitespace-pre-wrap">{completion}</p>
</div>
)}
</div>
);
}
创建流式响应的 API 路由:
聊天 API 路由 (app/api/chat/route.ts):
import { openai } from '@ai-sdk/openai';
import { streamText } from 'ai';
export async function POST(req: Request) {
const { messages } = await req.json();
const result = streamText({
model: openai('gpt-4o'),
messages,
system: 'You are a helpful assistant.',
});
return result.toDataStreamResponse();
}
补全 API 路由 (app/api/completion/route.ts):
import { openai } from '@ai-sdk/openai';
import { streamText } from 'ai';
export async function POST(req: Request) {
const { prompt } = await req.json();
const result = streamText({
model: openai('gpt-4o'),
prompt,
});
return result.toDataStreamResponse();
}
逐令牌流式显示:
'use client';
import { useChat } from 'ai/react';
export function StreamingChat() {
const { messages, input, handleInputChange, handleSubmit } = useChat();
return (
<div>
{messages.map((message) => (
<div key={message.id}>
<strong>{message.role}:</strong>
{/* Content streams in token-by-token */}
<span className="animate-pulse">{message.content}</span>
</div>
))}
<form onSubmit={handleSubmit}>
<input value={input} onChange={handleInputChange} />
<button type="submit">Send</button>
</form>
</div>
);
}
加载状态和指示器:
'use client';
import { useChat } from 'ai/react';
export function ChatWithLoadingStates() {
const { messages, input, handleInputChange, handleSubmit, isLoading, error } = useChat();
return (
<div>
{messages.map((message) => (
<div key={message.id}>{message.content}</div>
))}
{isLoading && (
<div className="flex items-center gap-2 text-gray-500">
<div className="animate-spin h-4 w-4 border-2 border-gray-300 border-t-blue-500 rounded-full" />
<span>AI is thinking...</span>
</div>
)}
{error && (
<div className="text-red-500 p-2 bg-red-50 rounded">
Error: {error.message}
</div>
)}
<form onSubmit={handleSubmit}>
<input
value={input}
onChange={handleInputChange}
disabled={isLoading}
/>
<button type="submit" disabled={isLoading}>
Send
</button>
</form>
</div>
);
}
实现 Claude 可以调用并带有视觉反馈的工具:
服务端工具定义:
import { openai } from '@ai-sdk/openai';
import { streamText, tool } from 'ai';
import { z } from 'zod';
export async function POST(req: Request) {
const { messages } = await req.json();
const result = streamText({
model: openai('gpt-4o'),
messages,
tools: {
getWeather: tool({
description: 'Get current weather for a location',
parameters: z.object({
location: z.string().describe('City name'),
}),
execute: async ({ location }) => {
// Fetch weather data
return { temperature: 72, condition: 'sunny', location };
},
}),
searchProducts: tool({
description: 'Search for products',
parameters: z.object({
query: z.string(),
maxResults: z.number().optional().default(5),
}),
execute: async ({ query, maxResults }) => {
// Search products
return { results: [], query, count: 0 };
},
}),
},
});
return result.toDataStreamResponse();
}
客户端工具结果渲染:
'use client';
import { useChat } from 'ai/react';
function WeatherCard({ data }: { data: { temperature: number; condition: string; location: string } }) {
return (
<div className="p-4 bg-blue-50 rounded-lg">
<h3 className="font-semibold">{data.location}</h3>
<p className="text-2xl">{data.temperature}°F</p>
<p className="text-gray-600">{data.condition}</p>
</div>
);
}
export function ChatWithTools() {
const { messages, input, handleInputChange, handleSubmit } = useChat({
maxSteps: 5, // Allow multi-step tool use
});
return (
<div>
{messages.map((message) => (
<div key={message.id}>
{message.role === 'assistant' && message.toolInvocations?.map((tool) => (
<div key={tool.toolCallId}>
{tool.toolName === 'getWeather' && tool.state === 'result' && (
<WeatherCard data={tool.result} />
)}
{tool.state === 'call' && (
<div className="animate-pulse p-2 bg-gray-100 rounded">
Calling {tool.toolName}...
</div>
)}
</div>
))}
{message.content && <p>{message.content}</p>}
</div>
))}
<form onSubmit={handleSubmit}>
<input value={input} onChange={handleInputChange} />
<button type="submit">Send</button>
</form>
</div>
);
}
使用 streamUI 在服务端流式传输 React 组件:
使用 streamUI 的服务端操作:
'use server';
import { openai } from '@ai-sdk/openai';
import { streamUI } from 'ai/rsc';
import { z } from 'zod';
export async function generateUI(prompt: string) {
const result = await streamUI({
model: openai('gpt-4o'),
prompt,
tools: {
showWeather: {
description: 'Show weather widget',
parameters: z.object({
location: z.string(),
temperature: z.number(),
}),
generate: async function* ({ location, temperature }) {
yield <div className="animate-pulse">Loading weather...</div>;
// Simulate API call
await new Promise(resolve => setTimeout(resolve, 1000));
return (
<div className="p-4 bg-gradient-to-r from-blue-400 to-blue-600 text-white rounded-lg">
<h3 className="text-xl font-bold">{location}</h3>
<p className="text-3xl">{temperature}°F</p>
</div>
);
},
},
showStockChart: {
description: 'Show stock price chart',
parameters: z.object({
symbol: z.string(),
price: z.number(),
}),
generate: async function* ({ symbol, price }) {
yield <div>Loading {symbol} data...</div>;
return (
<div className="p-4 border rounded-lg">
<h3 className="font-bold">{symbol}</h3>
<p className="text-2xl text-green-600">${price}</p>
</div>
);
},
},
},
});
return result.value;
}
消费 streamUI 的客户端组件:
'use client';
import { useState } from 'react';
import { generateUI } from './actions';
export function GenerativeUIDemo() {
const [ui, setUI] = useState<React.ReactNode>(null);
const [prompt, setPrompt] = useState('');
const [isLoading, setIsLoading] = useState(false);
async function handleSubmit(e: React.FormEvent) {
e.preventDefault();
setIsLoading(true);
const result = await generateUI(prompt);
setUI(result);
setIsLoading(false);
}
return (
<div className="space-y-4">
<form onSubmit={handleSubmit} className="flex gap-2">
<input
value={prompt}
onChange={(e) => setPrompt(e.target.value)}
placeholder="Ask about weather, stocks..."
className="flex-1 p-2 border rounded"
/>
<button
type="submit"
disabled={isLoading}
className="px-4 py-2 bg-purple-500 text-white rounded"
>
Generate
</button>
</form>
<div className="min-h-[200px] p-4 border rounded">
{ui || <p className="text-gray-400">Generated UI will appear here</p>}
</div>
</div>
);
}
性能:
maxSteps 限制工具调用深度错误处理:
error 状态可访问性:
安全性:
关于详细模式和高级技术:
references/advanced-patterns.md - 多模态 AI、对话记忆、自定义提供程序每周安装量
0
仓库
GitHub 星标数
5
首次出现
1970年1月1日
安全审计
Implement AI-powered user interfaces with the Vercel AI SDK. This skill covers streaming UI patterns, conversational interfaces, completion features, tool calling with visual feedback, and generative UI using React Server Components.
When to use:
The AI SDK provides React hooks for client-side AI interactions:
useChat - Conversational interfaces with message history:
'use client';
import { useChat } from 'ai/react';
export function ChatInterface() {
const { messages, input, handleInputChange, handleSubmit, isLoading } = useChat({
api: '/api/chat',
});
return (
<div className="flex flex-col h-full">
<div className="flex-1 overflow-y-auto p-4 space-y-4">
{messages.map((message) => (
<div
key={message.id}
className={message.role === 'user' ? 'text-right' : 'text-left'}
>
<div className={`inline-block p-3 rounded-lg ${
message.role === 'user'
? 'bg-blue-500 text-white'
: 'bg-gray-100 text-gray-900'
}`}>
{message.content}
</div>
</div>
))}
</div>
<form onSubmit={handleSubmit} className="p-4 border-t">
<div className="flex gap-2">
<input
value={input}
onChange={handleInputChange}
placeholder="Type a message..."
className="flex-1 p-2 border rounded"
disabled={isLoading}
/>
<button
type="submit"
disabled={isLoading}
className="px-4 py-2 bg-blue-500 text-white rounded disabled:opacity-50"
>
{isLoading ? 'Sending...' : 'Send'}
</button>
</div>
</form>
</div>
);
}
useCompletion - Single completions without message history:
'use client';
import { useCompletion } from 'ai/react';
export function CompletionInput() {
const { completion, input, handleInputChange, handleSubmit, isLoading } = useCompletion({
api: '/api/completion',
});
return (
<div className="space-y-4">
<form onSubmit={handleSubmit}>
<textarea
value={input}
onChange={handleInputChange}
placeholder="Enter prompt..."
className="w-full p-3 border rounded"
rows={4}
/>
<button
type="submit"
disabled={isLoading}
className="mt-2 px-4 py-2 bg-green-500 text-white rounded"
>
{isLoading ? 'Generating...' : 'Generate'}
</button>
</form>
{completion && (
<div className="p-4 bg-gray-50 rounded">
<h3 className="font-semibold mb-2">Result:</h3>
<p className="whitespace-pre-wrap">{completion}</p>
</div>
)}
</div>
);
}
Create API routes that stream responses:
Chat API Route (app/api/chat/route.ts):
import { openai } from '@ai-sdk/openai';
import { streamText } from 'ai';
export async function POST(req: Request) {
const { messages } = await req.json();
const result = streamText({
model: openai('gpt-4o'),
messages,
system: 'You are a helpful assistant.',
});
return result.toDataStreamResponse();
}
Completion API Route (app/api/completion/route.ts):
import { openai } from '@ai-sdk/openai';
import { streamText } from 'ai';
export async function POST(req: Request) {
const { prompt } = await req.json();
const result = streamText({
model: openai('gpt-4o'),
prompt,
});
return result.toDataStreamResponse();
}
Token-by-token streaming display:
'use client';
import { useChat } from 'ai/react';
export function StreamingChat() {
const { messages, input, handleInputChange, handleSubmit } = useChat();
return (
<div>
{messages.map((message) => (
<div key={message.id}>
<strong>{message.role}:</strong>
{/* Content streams in token-by-token */}
<span className="animate-pulse">{message.content}</span>
</div>
))}
<form onSubmit={handleSubmit}>
<input value={input} onChange={handleInputChange} />
<button type="submit">Send</button>
</form>
</div>
);
}
Loading states and indicators:
'use client';
import { useChat } from 'ai/react';
export function ChatWithLoadingStates() {
const { messages, input, handleInputChange, handleSubmit, isLoading, error } = useChat();
return (
<div>
{messages.map((message) => (
<div key={message.id}>{message.content}</div>
))}
{isLoading && (
<div className="flex items-center gap-2 text-gray-500">
<div className="animate-spin h-4 w-4 border-2 border-gray-300 border-t-blue-500 rounded-full" />
<span>AI is thinking...</span>
</div>
)}
{error && (
<div className="text-red-500 p-2 bg-red-50 rounded">
Error: {error.message}
</div>
)}
<form onSubmit={handleSubmit}>
<input
value={input}
onChange={handleInputChange}
disabled={isLoading}
/>
<button type="submit" disabled={isLoading}>
Send
</button>
</form>
</div>
);
}
Implement tools that Claude can call with visual feedback:
Server-side tool definition:
import { openai } from '@ai-sdk/openai';
import { streamText, tool } from 'ai';
import { z } from 'zod';
export async function POST(req: Request) {
const { messages } = await req.json();
const result = streamText({
model: openai('gpt-4o'),
messages,
tools: {
getWeather: tool({
description: 'Get current weather for a location',
parameters: z.object({
location: z.string().describe('City name'),
}),
execute: async ({ location }) => {
// Fetch weather data
return { temperature: 72, condition: 'sunny', location };
},
}),
searchProducts: tool({
description: 'Search for products',
parameters: z.object({
query: z.string(),
maxResults: z.number().optional().default(5),
}),
execute: async ({ query, maxResults }) => {
// Search products
return { results: [], query, count: 0 };
},
}),
},
});
return result.toDataStreamResponse();
}
Client-side tool result rendering:
'use client';
import { useChat } from 'ai/react';
function WeatherCard({ data }: { data: { temperature: number; condition: string; location: string } }) {
return (
<div className="p-4 bg-blue-50 rounded-lg">
<h3 className="font-semibold">{data.location}</h3>
<p className="text-2xl">{data.temperature}°F</p>
<p className="text-gray-600">{data.condition}</p>
</div>
);
}
export function ChatWithTools() {
const { messages, input, handleInputChange, handleSubmit } = useChat({
maxSteps: 5, // Allow multi-step tool use
});
return (
<div>
{messages.map((message) => (
<div key={message.id}>
{message.role === 'assistant' && message.toolInvocations?.map((tool) => (
<div key={tool.toolCallId}>
{tool.toolName === 'getWeather' && tool.state === 'result' && (
<WeatherCard data={tool.result} />
)}
{tool.state === 'call' && (
<div className="animate-pulse p-2 bg-gray-100 rounded">
Calling {tool.toolName}...
</div>
)}
</div>
))}
{message.content && <p>{message.content}</p>}
</div>
))}
<form onSubmit={handleSubmit}>
<input value={input} onChange={handleInputChange} />
<button type="submit">Send</button>
</form>
</div>
);
}
Use streamUI for server-side streaming of React components:
Server Action with streamUI:
'use server';
import { openai } from '@ai-sdk/openai';
import { streamUI } from 'ai/rsc';
import { z } from 'zod';
export async function generateUI(prompt: string) {
const result = await streamUI({
model: openai('gpt-4o'),
prompt,
tools: {
showWeather: {
description: 'Show weather widget',
parameters: z.object({
location: z.string(),
temperature: z.number(),
}),
generate: async function* ({ location, temperature }) {
yield <div className="animate-pulse">Loading weather...</div>;
// Simulate API call
await new Promise(resolve => setTimeout(resolve, 1000));
return (
<div className="p-4 bg-gradient-to-r from-blue-400 to-blue-600 text-white rounded-lg">
<h3 className="text-xl font-bold">{location}</h3>
<p className="text-3xl">{temperature}°F</p>
</div>
);
},
},
showStockChart: {
description: 'Show stock price chart',
parameters: z.object({
symbol: z.string(),
price: z.number(),
}),
generate: async function* ({ symbol, price }) {
yield <div>Loading {symbol} data...</div>;
return (
<div className="p-4 border rounded-lg">
<h3 className="font-bold">{symbol}</h3>
<p className="text-2xl text-green-600">${price}</p>
</div>
);
},
},
},
});
return result.value;
}
Client component consuming streamUI:
'use client';
import { useState } from 'react';
import { generateUI } from './actions';
export function GenerativeUIDemo() {
const [ui, setUI] = useState<React.ReactNode>(null);
const [prompt, setPrompt] = useState('');
const [isLoading, setIsLoading] = useState(false);
async function handleSubmit(e: React.FormEvent) {
e.preventDefault();
setIsLoading(true);
const result = await generateUI(prompt);
setUI(result);
setIsLoading(false);
}
return (
<div className="space-y-4">
<form onSubmit={handleSubmit} className="flex gap-2">
<input
value={prompt}
onChange={(e) => setPrompt(e.target.value)}
placeholder="Ask about weather, stocks..."
className="flex-1 p-2 border rounded"
/>
<button
type="submit"
disabled={isLoading}
className="px-4 py-2 bg-purple-500 text-white rounded"
>
Generate
</button>
</form>
<div className="min-h-[200px] p-4 border rounded">
{ui || <p className="text-gray-400">Generated UI will appear here</p>}
</div>
</div>
);
}
Performance:
maxSteps to limit tool calling depthError Handling:
error state from hooksAccessibility:
Security:
For detailed patterns and advanced techniques:
references/advanced-patterns.md - Multi-modal AI, conversation memory, custom providersWeekly Installs
0
Repository
GitHub Stars
5
First Seen
Jan 1, 1970
Security Audits
React 组合模式指南:Vercel 组件架构最佳实践,提升代码可维护性
109,600 周安装