llm-application-dev by skillcreatorai/ai-agent-skills
npx skills add https://github.com/skillcreatorai/ai-agent-skills --skill llm-application-devconst systemPrompt = `You are a helpful assistant that answers questions about our product.
RULES:
- Only answer questions about our product
- If you don't know, say "I don't know"
- Keep responses concise (under 100 words)
- Never make up information
CONTEXT:
{context}`;
const userPrompt = `Question: {question}`;
const prompt = `Classify the sentiment of customer feedback.
Examples:
Input: "Love this product!"
Output: positive
Input: "Worst purchase ever"
Output: negative
Input: "It works fine"
Output: neutral
Input: "${customerFeedback}"
Output:`;
const prompt = `Solve this step by step:
Question: ${question}
Let's think through this:
1. First, identify the key information
2. Then, determine the approach
3. Finally, calculate the answer
Step-by-step solution:`;
广告位招租
在这里展示您的产品或服务
触达数万 AI 开发者,精准高效
import OpenAI from 'openai';
const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY });
async function chat(messages: Message[]): Promise<string> {
const response = await openai.chat.completions.create({
model: 'gpt-4',
messages,
temperature: 0.7,
max_tokens: 500,
});
return response.choices[0].message.content ?? '';
}
import Anthropic from '@anthropic-ai/sdk';
const anthropic = new Anthropic({ apiKey: process.env.ANTHROPIC_API_KEY });
async function chat(prompt: string): Promise<string> {
const response = await anthropic.messages.create({
model: 'claude-3-opus-20240229',
max_tokens: 1024,
messages: [{ role: 'user', content: prompt }],
});
return response.content[0].type === 'text'
? response.content[0].text
: '';
}
async function* streamChat(prompt: string) {
const stream = await openai.chat.completions.create({
model: 'gpt-4',
messages: [{ role: 'user', content: prompt }],
stream: true,
});
for await (const chunk of stream) {
const content = chunk.choices[0]?.delta?.content;
if (content) yield content;
}
}
async function ragQuery(question: string): Promise<string> {
// 1. Embed the question
const questionEmbedding = await embedText(question);
// 2. Search vector database
const relevantDocs = await vectorDb.search(questionEmbedding, { limit: 5 });
// 3. Build context
const context = relevantDocs.map(d => d.content).join('\n\n');
// 4. Generate answer
const prompt = `Answer based on this context:\n${context}\n\nQuestion: ${question}`;
return await chat(prompt);
}
function chunkDocument(text: string, options: ChunkOptions): string[] {
const { chunkSize = 1000, overlap = 200 } = options;
const chunks: string[] = [];
let start = 0;
while (start < text.length) {
const end = Math.min(start + chunkSize, text.length);
chunks.push(text.slice(start, end));
start += chunkSize - overlap;
}
return chunks;
}
// Using Supabase with pgvector
async function storeEmbeddings(docs: Document[]) {
for (const doc of docs) {
const embedding = await embedText(doc.content);
await supabase.from('documents').insert({
content: doc.content,
metadata: doc.metadata,
embedding: embedding, // vector column
});
}
}
async function searchSimilar(query: string, limit = 5) {
const embedding = await embedText(query);
const { data } = await supabase.rpc('match_documents', {
query_embedding: embedding,
match_count: limit,
});
return data;
}
async function safeLLMCall<T>(
fn: () => Promise<T>,
options: { retries?: number; fallback?: T }
): Promise<T> {
const { retries = 3, fallback } = options;
for (let i = 0; i < retries; i++) {
try {
return await fn();
} catch (error) {
if (error.status === 429) {
// Rate limit - exponential backoff
await sleep(Math.pow(2, i) * 1000);
continue;
}
if (i === retries - 1) {
if (fallback !== undefined) return fallback;
throw error;
}
}
}
throw new Error('Max retries exceeded');
}
每周安装量
150
代码仓库
GitHub 星标数
957
首次出现
2026年1月20日
安全审计
安装于
opencode128
gemini-cli125
codex118
claude-code112
cursor112
github-copilot110
const systemPrompt = `You are a helpful assistant that answers questions about our product.
RULES:
- Only answer questions about our product
- If you don't know, say "I don't know"
- Keep responses concise (under 100 words)
- Never make up information
CONTEXT:
{context}`;
const userPrompt = `Question: {question}`;
const prompt = `Classify the sentiment of customer feedback.
Examples:
Input: "Love this product!"
Output: positive
Input: "Worst purchase ever"
Output: negative
Input: "It works fine"
Output: neutral
Input: "${customerFeedback}"
Output:`;
const prompt = `Solve this step by step:
Question: ${question}
Let's think through this:
1. First, identify the key information
2. Then, determine the approach
3. Finally, calculate the answer
Step-by-step solution:`;
import OpenAI from 'openai';
const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY });
async function chat(messages: Message[]): Promise<string> {
const response = await openai.chat.completions.create({
model: 'gpt-4',
messages,
temperature: 0.7,
max_tokens: 500,
});
return response.choices[0].message.content ?? '';
}
import Anthropic from '@anthropic-ai/sdk';
const anthropic = new Anthropic({ apiKey: process.env.ANTHROPIC_API_KEY });
async function chat(prompt: string): Promise<string> {
const response = await anthropic.messages.create({
model: 'claude-3-opus-20240229',
max_tokens: 1024,
messages: [{ role: 'user', content: prompt }],
});
return response.content[0].type === 'text'
? response.content[0].text
: '';
}
async function* streamChat(prompt: string) {
const stream = await openai.chat.completions.create({
model: 'gpt-4',
messages: [{ role: 'user', content: prompt }],
stream: true,
});
for await (const chunk of stream) {
const content = chunk.choices[0]?.delta?.content;
if (content) yield content;
}
}
async function ragQuery(question: string): Promise<string> {
// 1. Embed the question
const questionEmbedding = await embedText(question);
// 2. Search vector database
const relevantDocs = await vectorDb.search(questionEmbedding, { limit: 5 });
// 3. Build context
const context = relevantDocs.map(d => d.content).join('\n\n');
// 4. Generate answer
const prompt = `Answer based on this context:\n${context}\n\nQuestion: ${question}`;
return await chat(prompt);
}
function chunkDocument(text: string, options: ChunkOptions): string[] {
const { chunkSize = 1000, overlap = 200 } = options;
const chunks: string[] = [];
let start = 0;
while (start < text.length) {
const end = Math.min(start + chunkSize, text.length);
chunks.push(text.slice(start, end));
start += chunkSize - overlap;
}
return chunks;
}
// Using Supabase with pgvector
async function storeEmbeddings(docs: Document[]) {
for (const doc of docs) {
const embedding = await embedText(doc.content);
await supabase.from('documents').insert({
content: doc.content,
metadata: doc.metadata,
embedding: embedding, // vector column
});
}
}
async function searchSimilar(query: string, limit = 5) {
const embedding = await embedText(query);
const { data } = await supabase.rpc('match_documents', {
query_embedding: embedding,
match_count: limit,
});
return data;
}
async function safeLLMCall<T>(
fn: () => Promise<T>,
options: { retries?: number; fallback?: T }
): Promise<T> {
const { retries = 3, fallback } = options;
for (let i = 0; i < retries; i++) {
try {
return await fn();
} catch (error) {
if (error.status === 429) {
// Rate limit - exponential backoff
await sleep(Math.pow(2, i) * 1000);
continue;
}
if (i === retries - 1) {
if (fallback !== undefined) return fallback;
throw error;
}
}
}
throw new Error('Max retries exceeded');
}
Weekly Installs
150
Repository
GitHub Stars
957
First Seen
Jan 20, 2026
Security Audits
Gen Agent Trust HubPassSocketPassSnykPass
Installed on
opencode128
gemini-cli125
codex118
claude-code112
cursor112
github-copilot110
AI 代码实施计划编写技能 | 自动化开发任务分解与 TDD 流程规划工具
50,900 周安装
ImageMagick图像处理技能:批量调整大小、格式转换与元数据提取
8,200 周安装
GitHub Actions 工作流规范创建指南:AI优化模板与CI/CD流程设计
8,200 周安装
GitHub Copilot SDK 官方开发包 - 在应用中嵌入AI智能体工作流(Python/TypeScript/Go/.NET)
8,200 周安装
AI提示工程安全审查与改进指南 - 负责任AI开发、偏见检测与提示优化
8,200 周安装
GitHub Copilot 提示词构建器 - 专业提示工程工具,提升AI编程效率
8,300 周安装
AI实施方案生成器 - 创建机器可读、结构化的自动化执行计划
8,300 周安装