openai-api-development by mindrally/skills
npx skills add https://github.com/mindrally/skills --skill openai-api-development您是一位精通 OpenAI API 开发的专家,涵盖 GPT 模型、Assistants API、函数调用、嵌入以及构建生产就绪的 AI 应用程序。
import os
from openai import OpenAI
# 始终使用环境变量存储 API 密钥
client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
.env 文件中,切勿提交到版本控制系统python-dotenvfrom openai import OpenAI
client = OpenAI()
response = client.chat.completions.create(
model="gpt-4o",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello!"}
],
temperature=0.7,
max_tokens=1000
)
message = response.choices[0].message.content
广告位招租
在这里展示您的产品或服务
触达数万 AI 开发者,精准高效
stream = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "Tell me a story"}],
stream=True
)
for chunk in stream:
if chunk.choices[0].delta.content is not None:
print(chunk.choices[0].delta.content, end="")
gpt-4ogpt-4o-minio1 模型gpt-3.5-turbotools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get current weather for a location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "City and state, e.g., San Francisco, CA"
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
"description": "Temperature unit"
}
},
"required": ["location"]
}
}
}
]
response = client.chat.completions.create(
model="gpt-4o",
messages=messages,
tools=tools,
tool_choice="auto"
)
import json
def process_tool_calls(response, messages):
tool_calls = response.choices[0].message.tool_calls
if tool_calls:
messages.append(response.choices[0].message)
for tool_call in tool_calls:
function_name = tool_call.function.name
function_args = json.loads(tool_call.function.arguments)
# 执行函数
result = execute_function(function_name, function_args)
messages.append({
"role": "tool",
"tool_call_id": tool_call.id,
"content": json.dumps(result)
})
# 获取最终响应
return client.chat.completions.create(
model="gpt-4o",
messages=messages,
tools=tools
)
return response
assistant = client.beta.assistants.create(
name="Data Analyst",
instructions="You are a data analyst. Analyze data and provide insights.",
tools=[
{"type": "code_interpreter"},
{"type": "file_search"}
],
model="gpt-4o"
)
# 创建线程
thread = client.beta.threads.create()
# 添加消息
message = client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content="Analyze this data..."
)
# 运行助手
run = client.beta.threads.runs.create_and_poll(
thread_id=thread.id,
assistant_id=assistant.id
)
# 获取消息
if run.status == "completed":
messages = client.beta.threads.messages.list(thread_id=thread.id)
response = client.embeddings.create(
model="text-embedding-3-small",
input="Your text to embed",
encoding_format="float"
)
embedding = response.data[0].embedding
text-embedding-3-smalltext-embedding-3-largeresponse = client.chat.completions.create(
model="gpt-4o",
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": "What's in this image?"},
{
"type": "image_url",
"image_url": {
"url": "https://example.com/image.jpg",
"detail": "high"
}
}
]
}
]
)
from openai import RateLimitError, APIError
import time
def call_with_retry(func, max_retries=3, base_delay=1):
for attempt in range(max_retries):
try:
return func()
except RateLimitError:
delay = base_delay * (2 ** attempt)
time.sleep(delay)
except APIError as e:
if attempt == max_retries - 1:
raise
time.sleep(base_delay)
raise Exception("Max retries exceeded")
RateLimitError:实现指数退避APIError:检查 API 状态,使用退避策略重试AuthenticationError:验证 API 密钥InvalidRequestError:验证输入参数max_tokens 限制每周安装量
100
代码仓库
GitHub 星标数
43
首次出现
2026年1月25日
安全审计
安装于
gemini-cli84
opencode84
cursor81
codex80
github-copilot76
claude-code74
You are an expert in OpenAI API development, including GPT models, Assistants API, function calling, embeddings, and building production-ready AI applications.
import os
from openai import OpenAI
# Always use environment variables for API keys
client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
.env files, never commit thempython-dotenv for local developmentfrom openai import OpenAI
client = OpenAI()
response = client.chat.completions.create(
model="gpt-4o",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello!"}
],
temperature=0.7,
max_tokens=1000
)
message = response.choices[0].message.content
stream = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "Tell me a story"}],
stream=True
)
for chunk in stream:
if chunk.choices[0].delta.content is not None:
print(chunk.choices[0].delta.content, end="")
gpt-4o for complex reasoning and multimodal tasksgpt-4o-mini for faster, cost-effective responseso1 models for advanced reasoning tasksgpt-3.5-turbo for simple tasks requiring speedtools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get current weather for a location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "City and state, e.g., San Francisco, CA"
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
"description": "Temperature unit"
}
},
"required": ["location"]
}
}
}
]
response = client.chat.completions.create(
model="gpt-4o",
messages=messages,
tools=tools,
tool_choice="auto"
)
import json
def process_tool_calls(response, messages):
tool_calls = response.choices[0].message.tool_calls
if tool_calls:
messages.append(response.choices[0].message)
for tool_call in tool_calls:
function_name = tool_call.function.name
function_args = json.loads(tool_call.function.arguments)
# Execute the function
result = execute_function(function_name, function_args)
messages.append({
"role": "tool",
"tool_call_id": tool_call.id,
"content": json.dumps(result)
})
# Get final response
return client.chat.completions.create(
model="gpt-4o",
messages=messages,
tools=tools
)
return response
assistant = client.beta.assistants.create(
name="Data Analyst",
instructions="You are a data analyst. Analyze data and provide insights.",
tools=[
{"type": "code_interpreter"},
{"type": "file_search"}
],
model="gpt-4o"
)
# Create a thread
thread = client.beta.threads.create()
# Add a message
message = client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content="Analyze this data..."
)
# Run the assistant
run = client.beta.threads.runs.create_and_poll(
thread_id=thread.id,
assistant_id=assistant.id
)
# Get messages
if run.status == "completed":
messages = client.beta.threads.messages.list(thread_id=thread.id)
response = client.embeddings.create(
model="text-embedding-3-small",
input="Your text to embed",
encoding_format="float"
)
embedding = response.data[0].embedding
text-embedding-3-small for cost-effective solutionstext-embedding-3-large for maximum accuracyresponse = client.chat.completions.create(
model="gpt-4o",
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": "What's in this image?"},
{
"type": "image_url",
"image_url": {
"url": "https://example.com/image.jpg",
"detail": "high"
}
}
]
}
]
)
from openai import RateLimitError, APIError
import time
def call_with_retry(func, max_retries=3, base_delay=1):
for attempt in range(max_retries):
try:
return func()
except RateLimitError:
delay = base_delay * (2 ** attempt)
time.sleep(delay)
except APIError as e:
if attempt == max_retries - 1:
raise
time.sleep(base_delay)
raise Exception("Max retries exceeded")
RateLimitError: Implement exponential backoffAPIError: Check API status, retry with backoffAuthenticationError: Verify API keyInvalidRequestError: Validate input parametersmax_tokens limitsWeekly Installs
100
Repository
GitHub Stars
43
First Seen
Jan 25, 2026
Security Audits
Gen Agent Trust HubPassSocketPassSnykPass
Installed on
gemini-cli84
opencode84
cursor81
codex80
github-copilot76
claude-code74
超能力技能使用指南:AI助手技能调用优先级与工作流程详解
52,100 周安装