cloudflare-d1 by jezweb/claude-skills
npx skills add https://github.com/jezweb/claude-skills --skill cloudflare-d1状态 : 生产就绪 ✅ 最后更新 : 2026-01-20 依赖项 : cloudflare-worker-base (用于 Worker 设置) 最新版本 : wrangler@4.59.2, @cloudflare/workers-types@4.20260109.0
近期更新 (2025) :
# 创建一个新的 D1 数据库
npx wrangler d1 create my-database
# 输出包含 database_id - 请保存此信息!
# ✅ 成功创建数据库 'my-database'
#
# [[d1_databases]]
# binding = "DB"
# database_name = "my-database"
# database_id = "<UUID>"
添加到你的 wrangler.jsonc 文件中:
广告位招租
在这里展示您的产品或服务
触达数万 AI 开发者,精准高效
{
"name": "my-worker",
"main": "src/index.ts",
"compatibility_date": "2025-10-11",
"d1_databases": [
{
"binding": "DB", // 在你的 Worker 中可通过 env.DB 访问
"database_name": "my-database", // 来自 wrangler d1 create 的名称
"database_id": "<UUID>", // 来自 wrangler d1 create 的 ID
"preview_database_id": "local-db" // 用于本地开发
}
]
}
关键点:
binding 是你在代码中访问数据库的方式 (env.DB)database_id 是生产数据库的 UUIDpreview_database_id 用于本地开发(可以是任意字符串)database_id 值提交到公共仓库 - 使用环境变量或密钥# 创建迁移文件
npx wrangler d1 migrations create my-database create_users_table
# 这将创建:migrations/0001_create_users_table.sql
编辑迁移文件:
-- migrations/0001_create_users_table.sql
DROP TABLE IF EXISTS users;
CREATE TABLE IF NOT EXISTS users (
user_id INTEGER PRIMARY KEY AUTOINCREMENT,
email TEXT NOT NULL UNIQUE,
username TEXT NOT NULL,
created_at INTEGER NOT NULL,
updated_at INTEGER
);
-- 为常用查询创建索引
CREATE INDEX IF NOT EXISTS idx_users_email ON users(email);
-- 优化数据库
PRAGMA optimize;
# 首先在本地应用(用于测试)
npx wrangler d1 migrations apply my-database --local
# 准备好后应用到生产环境
npx wrangler d1 migrations apply my-database --remote
// src/index.ts
import { Hono } from 'hono';
type Bindings = {
DB: D1Database;
};
const app = new Hono<{ Bindings: Bindings }>();
app.get('/api/users/:email', async (c) => {
const email = c.req.param('email');
try {
// 始终对用户输入使用预处理语句和 bind()
const result = await c.env.DB.prepare(
'SELECT * FROM users WHERE email = ?'
)
.bind(email)
.first();
if (!result) {
return c.json({ error: '用户未找到' }, 404);
}
return c.json(result);
} catch (error: any) {
console.error('D1 错误:', error.message);
return c.json({ error: '数据库错误' }, 500);
}
});
export default app;
# 1. 创建迁移
npx wrangler d1 migrations create <DATABASE_NAME> <MIGRATION_NAME>
# 2. 列出未应用的迁移
npx wrangler d1 migrations list <DATABASE_NAME> --local
npx wrangler d1 migrations list <DATABASE_NAME> --remote
# 3. 应用迁移
npx wrangler d1 migrations apply <DATABASE_NAME> --local # 本地测试
npx wrangler d1 migrations apply <DATABASE_NAME> --remote # 部署到生产环境
迁移会自动进行版本控制:
migrations/
├── 0000_initial_schema.sql
├── 0001_add_users_table.sql
├── 0002_add_posts_table.sql
└── 0003_add_indexes.sql
规则:
d1_migrations 表中跟踪){
"d1_databases": [
{
"binding": "DB",
"database_name": "my-database",
"database_id": "<UUID>",
"migrations_dir": "db/migrations", // 自定义目录(默认:migrations/)
"migrations_table": "schema_migrations" // 自定义跟踪表(默认:d1_migrations)
}
]
}
-- 使用 IF NOT EXISTS 使迁移具有幂等性
CREATE TABLE IF NOT EXISTS users (...);
CREATE INDEX IF NOT EXISTS idx_users_email ON users(email);
-- 模式更改后运行 PRAGMA optimize
PRAGMA optimize;
-- 在触发器中使用大写 BEGIN/END(小写在远程会失败)
CREATE TRIGGER update_timestamp
AFTER UPDATE ON users
FOR EACH ROW
BEGIN
UPDATE users SET updated_at = unixepoch() WHERE user_id = NEW.user_id;
END;
-- 对数据迁移使用事务
BEGIN TRANSACTION;
UPDATE users SET updated_at = unixepoch() WHERE updated_at IS NULL;
COMMIT;
-- 不要在迁移文件开头包含 BEGIN TRANSACTION(D1 会自动处理)
BEGIN TRANSACTION; -- ❌ 删除此行
-- 不要在触发器中使用小写 begin/end(本地有效,远程会失败)
CREATE TRIGGER my_trigger
AFTER INSERT ON table
begin -- ❌ 使用 BEGIN(大写)
UPDATE ...;
end; -- ❌ 使用 END(大写)
-- 不要使用 MySQL/PostgreSQL 语法
ALTER TABLE users MODIFY COLUMN email VARCHAR(255); -- ❌ 不是 SQLite 语法
-- 不要在没有 IF NOT EXISTS 的情况下创建表
CREATE TABLE users (...); -- ❌ 如果表已存在则会失败
-- 在模式更改期间临时禁用外键检查
PRAGMA defer_foreign_keys = true;
-- 进行可能违反外键的模式更改
ALTER TABLE posts DROP COLUMN author_id;
ALTER TABLE posts ADD COLUMN user_id INTEGER REFERENCES users(user_id);
-- 迁移结束时外键会自动重新启用
类型定义:
interface Env { DB: D1Database; }
type Bindings = { DB: D1Database; };
const app = new Hono<{ Bindings: Bindings }>();
prepare() - 主要方法(始终对用户输入使用):
const user = await env.DB.prepare('SELECT * FROM users WHERE email = ?')
.bind(email).first();
原因:防止 SQL 注入,可重用,性能更好,类型安全
查询结果方法:
.all() → { results, meta } - 获取所有行.first() → 行对象或 null - 获取第一行.first('column') → 值 - 获取单个列值(例如 COUNT).run() → { success, meta } - 执行 INSERT/UPDATE/DELETE(无结果)batch() - 性能关键:
const results = await env.DB.batch([
env.DB.prepare('SELECT * FROM users WHERE user_id = ?').bind(1),
env.DB.prepare('SELECT * FROM posts WHERE user_id = ?').bind(1)
]);
exec() - 避免在生产环境中使用:
await env.DB.exec('SELECT * FROM users;'); // 仅用于迁移/维护
// 创建
const { meta } = await env.DB.prepare(
'INSERT INTO users (email, username, created_at) VALUES (?, ?, ?)'
).bind(email, username, Date.now()).run();
const newUserId = meta.last_row_id;
// 读取(单条)
const user = await env.DB.prepare('SELECT * FROM users WHERE user_id = ?')
.bind(userId).first();
// 读取(多条)
const { results } = await env.DB.prepare('SELECT * FROM users LIMIT ?')
.bind(10).all();
// 更新
const { meta } = await env.DB.prepare('UPDATE users SET username = ? WHERE user_id = ?')
.bind(newUsername, userId).run();
const rowsAffected = meta.rows_written;
// 删除
await env.DB.prepare('DELETE FROM users WHERE user_id = ?').bind(userId).run();
// 计数
const count = await env.DB.prepare('SELECT COUNT(*) as total FROM users').first('total');
// 存在性检查
const exists = await env.DB.prepare('SELECT 1 FROM users WHERE email = ? LIMIT 1')
.bind(email).first();
const page = parseInt(c.req.query('page') || '1');
const limit = 20;
const offset = (page - 1) * limit;
const [countResult, usersResult] = await c.env.DB.batch([
c.env.DB.prepare('SELECT COUNT(*) as total FROM users'),
c.env.DB.prepare('SELECT * FROM users ORDER BY created_at DESC LIMIT ? OFFSET ?')
.bind(limit, offset)
]);
return c.json({
users: usersResult.results,
pagination: { page, limit, total: countResult.results[0].total }
});
// D1 不支持多语句事务,但 batch() 提供了顺序执行
await env.DB.batch([
env.DB.prepare('UPDATE users SET credits = credits - ? WHERE user_id = ?').bind(amount, fromUserId),
env.DB.prepare('UPDATE users SET credits = credits + ? WHERE user_id = ?').bind(amount, toUserId),
env.DB.prepare('INSERT INTO transactions (from_user, to_user, amount) VALUES (?, ?, ?)').bind(fromUserId, toUserId, amount)
]);
// 如果任何语句失败,批处理会停止(类似事务行为)
常见错误类型:
D1_ERROR - 一般 D1 错误(通常是暂时的)D1_EXEC_ERROR - SQL 语法错误或限制D1_TYPE_ERROR - 类型不匹配(使用 undefined 而不是 null)D1_COLUMN_NOTFOUND - 列不存在常见错误及修复方法:
| 错误 | 原因 | 解决方案 |
|---|---|---|
| 语句过长 | 包含 1000+ 行的大型 INSERT | 使用 batch() 分成 100-250 行的批次 |
| 网络连接丢失 | 暂时性故障或大型导入 | 实现重试逻辑(见下文)或分成更小的块 |
| 排队请求过多 | 循环中的单个查询 | 使用 batch() 代替循环 |
| D1_TYPE_ERROR | 在 bind 中使用 undefined | 对可选值使用 null:`.bind(email, bio |
| 事务冲突 | 迁移中的 BEGIN TRANSACTION | 移除 BEGIN/COMMIT(D1 会自动处理) |
| 外键违规 | 模式更改破坏约束 | 使用 PRAGMA defer_foreign_keys = true |
| D1_EXEC_ERROR: 输入不完整 | D1Database.exec() 中的多行 SQL | 使用预处理语句或外部 .sql 文件 (Issue #9133) |
关键点:D1 查询会因"网络连接丢失"、"存储操作超时"或"隔离区超出内存限制"等错误而暂时失败。Cloudflare 文档指出**"每隔几小时出现少量错误是正常的"**,并建议实现重试逻辑。(D1 常见问题解答)
常见暂时性错误:
D1_ERROR: 网络连接丢失D1 DB 存储操作超时导致对象被重置启动 D1 DB 存储时发生内部错误导致对象被重置D1 DB 的隔离区超出内存限制并被重置重试模式(推荐):
async function queryWithRetry<T>(
fn: () => Promise<T>,
maxRetries = 3,
baseDelay = 100
): Promise<T> {
for (let i = 0; i < maxRetries; i++) {
try {
return await fn();
} catch (error: any) {
const isTransient = error.message?.includes('网络连接丢失') ||
error.message?.includes('超时') ||
error.message?.includes('超出内存限制');
if (!isTransient || i === maxRetries - 1) throw error;
// 指数退避
await new Promise(r => setTimeout(r, baseDelay * Math.pow(2, i)));
}
}
throw new Error('超出最大重试次数');
}
// 用法
const user = await queryWithRetry(() =>
env.DB.prepare('SELECT * FROM users WHERE email = ?').bind(email).first()
);
自动重试(2025年9月):D1 会自动重试只读查询(SELECT、EXPLAIN、WITH),在可重试错误上最多重试 2 次。检查响应中的 meta.total_attempts 获取重试次数。写入查询仍应实现自定义重试逻辑。
索引最佳实践:
CREATE INDEX idx_users_email ON users(email)CREATE INDEX idx_posts_user_id ON posts(user_id)CREATE INDEX idx_posts_created_at ON posts(created_at DESC)CREATE INDEX idx_posts_user_published ON posts(user_id, published)CREATE INDEX idx_users_active ON users(email) WHERE deleted = 0EXPLAIN QUERY PLAN SELECT ...PRAGMA optimize(2025年2月):
CREATE INDEX idx_users_email ON users(email);
PRAGMA optimize; -- 模式更改后运行
查询优化:
SELECT *)WHERE LOWER(email) → 改为存储小写形式本地与远程(2025年11月 - 远程绑定正式发布):
# 本地数据库(自动创建)
npx wrangler d1 migrations apply my-database --local
npx wrangler d1 execute my-database --local --command "SELECT * FROM users"
# 远程数据库
npx wrangler d1 execute my-database --remote --command "SELECT * FROM users"
# 远程绑定 (wrangler@4.37.0+) - 将本地 Worker 连接到已部署的 D1
# 添加到 wrangler.jsonc:{ "binding": "DB", "remote": true }
已知问题:当使用远程 D1 绑定 ({ "remote": true }) 时,连接在闲置 1 小时后会超时。(GitHub Issue #10801)
错误:D1_ERROR: 无法将正文解析为 JSON,得到:错误代码:1031
解决方法:
// 通过定期查询保持连接活跃(可选)
setInterval(async () => {
try {
await env.DB.prepare('SELECT 1').first();
} catch (e) {
console.log('连接保活失败:', e);
}
}, 30 * 60 * 1000); // 每30分钟一次
或者,如果查询在闲置 1 小时后失败,只需重启你的开发服务器。
当在单个 wrangler dev 进程中运行多个带有服务绑定的 Worker 时,辅助 Worker 无法访问其 D1 绑定,因为两个 Worker 共享相同的持久化路径。(GitHub Issue #11121)
解决方案:使用 --persist-to 标志将所有 Worker 指向相同的持久化存储:
# 将 worker2 的迁移应用到 worker1 的持久化路径
cd worker2
npx wrangler d1 migrations apply DB --local --persist-to=../worker1/.wrangler/state
# 现在两个 Worker 都可以访问 D1
cd ../worker1
npx wrangler dev # 两个 Worker 共享相同的 D1 数据
本地数据库位置:.wrangler/state/v3/d1/miniflare-D1DatabaseObject/<database_id>.sqlite
种子本地数据库:
npx wrangler d1 execute my-database --local --file=seed.sql
D1 有严格的每个数据库 10 GB 限制,但 Cloudflare 支持每个 Worker 最多 50,000 个数据库。使用分片来扩展超过 10 GB。(DEV.to 文章)
基于哈希的分片示例(10 个数据库 = 100 GB 容量):
// 哈希用户 ID 到分片编号
function getShardId(userId: string): number {
const hash = Array.from(userId).reduce((acc, char) =>
((acc << 5) - acc) + char.charCodeAt(0), 0
);
return Math.abs(hash) % 10; // 10 个分片
}
// wrangler.jsonc - 定义 10 个数据库分片
{
"d1_databases": [
{ "binding": "DB_SHARD_0", "database_id": "..." },
{ "binding": "DB_SHARD_1", "database_id": "..." },
{ "binding": "DB_SHARD_2", "database_id": "..." },
// ... 最多到 DB_SHARD_9
]
}
// 获取用户的正确分片
function getUserDb(env: Env, userId: string): D1Database {
const shardId = getShardId(userId);
return env[`DB_SHARD_${shardId}`];
}
// 从正确的分片查询用户数据
const db = getUserDb(env, userId);
const user = await db.prepare('SELECT * FROM users WHERE user_id = ?')
.bind(userId).first();
替代方案:基于租户的分片(每个客户/租户一个数据库)
D1 有 2 MB 的行大小限制。对于大型内容(HTML、JSON、图像),使用 R2 进行存储,D1 用于元数据。(DEV.to 文章)
错误:数据库行大小超过允许的最大大小
解决方案 - 混合存储模式:
// 1. 将大型内容存储在 R2 中
const contentKey = `pages/${crypto.randomUUID()}.html`;
await env.R2_BUCKET.put(contentKey, largeHtmlContent);
// 2. 将元数据存储在 D1 中
await env.DB.prepare(`
INSERT INTO pages (url, r2_key, size, created_at)
VALUES (?, ?, ?, ?)
`).bind(url, contentKey, largeHtmlContent.length, Date.now()).run();
// 3. 检索内容
const page = await env.DB.prepare('SELECT * FROM pages WHERE url = ?')
.bind(url).first();
if (page) {
const content = await env.R2_BUCKET.get(page.r2_key);
const html = await content.text();
}
如果你计划以后从 D1 (SQLite) 迁移到 Hyperdrive (PostgreSQL),请使用一致的小写命名。PostgreSQL 对表和列名区分大小写,而 SQLite 不区分。(Mats 的博客)
-- 为可移植性使用小写
CREATE TABLE users (user_id INTEGER, email TEXT);
CREATE INDEX idx_users_email ON users(email);
-- 不要使用:CREATE TABLE Users (UserId INTEGER, Email TEXT);
大小写敏感性:创建虚拟表时始终使用小写 "fts5"。大写可能导致"未授权"错误。(Cloudflare 社区)
-- 正确
CREATE VIRTUAL TABLE search_index USING fts5(
title,
content,
tokenize = 'porter unicode61'
);
-- 查询索引
SELECT * FROM search_index WHERE search_index MATCH '查询词';
导出限制:带有 FTS5 虚拟表的数据库无法使用 wrangler d1 export 导出。在导出前删除虚拟表,导入后重新创建。(GitHub Issue #9519)
大型导入的网络超时:包含 5000+ 条 INSERT 语句的文件可能因"网络连接丢失"错误而失败。(GitHub Issue #11958)
解决方案:
batch() API 而不是 wrangler CLIWindows 特定问题:在 Windows 11 上,从 D1 导出的大型 SQL 文件可能无法重新导入,出现"HashIndex 检测到哈希表不一致"错误。(GitHub Issue #11708)
解决方法:在执行前删除 .wrangler 目录:
rm -rf .wrangler
npx wrangler d1 execute db-name --file=database.sql
.bind() 处理用户输入.batch() 处理多个查询(减少延迟)PRAGMA optimize 在模式更改后IF NOT EXISTS 在迁移中实现幂等性null 而不是 undefined 处理可选值meta.rows_written 在 UPDATE/DELETE 后.exec() 处理用户输入(SQL 注入风险)database_id 到公共仓库undefined 在绑定参数中(导致 D1_TYPE_ERROR)LIMIT 在可能的大型结果集上SELECT *(指定列)BEGIN TRANSACTION此技能预防了 14 个已记录的 D1 错误:
| 问题编号 | 错误/问题 | 描述 | 如何避免 | 来源 |
|---|---|---|---|---|
| #1 | 语句过长 | 大型 INSERT 语句超出 D1 限制 | 使用 batch() 分成 100-250 行的批次 | 现有 |
| #2 | 事务冲突 | 迁移文件中的 BEGIN TRANSACTION | 移除 BEGIN/COMMIT(D1 会自动处理) | 现有 |
| #3 | 外键违规 | 模式更改破坏外键约束 | 在迁移中使用 PRAGMA defer_foreign_keys = true | 现有 |
| #4 | 速率限制 / 队列过载 | 太多单个查询 | 使用 batch() 代替循环 | 现有 |
| #5 | 内存限制超出 | 查询加载太多数据到内存 | 添加 LIMIT,分页结果,分片查询 | 现有 |
| #6 | 类型不匹配错误 | 在 bind() 中使用 undefined 而不是 null | 始终对可选值使用 null | 现有 |
| #7 | 触发器中的小写 BEGIN | 带有小写 begin/end 的触发器在远程失败 | 使用大写 BEGIN/END 关键字 (Issue #10998) | TIER 1 |
| #8 | 远程绑定超时 | 连接在闲置 1 小时后超时 | 重启开发服务器或实现保活模式 (Issue #10801) | TIER 1 |
| #9 | 服务绑定 D1 访问 | 辅助 Worker 在多 Worker 开发中无法访问 D1 | 使用 --persist-to 标志共享持久化路径 (Issue #11121) | TIER 1 |
| #10 | 暂时性网络错误 | 随机的"网络连接丢失"故障 | 实现指数退避重试逻辑 (D1 FAQ) | TIER 1 |
| #11 | FTS5 破坏导出 | 带有 FTS5 虚拟表的数据库无法导出 | 导出前删除虚拟表,导入后重新创建 (Issue #9519) | TIER 1 |
| #12 | exec() 中的多行 SQL | D1Database.exec() 在多行 SQL 上失败 | 使用预处理语句或外部 .sql 文件 (Issue #9133) | TIER 1 |
| #13 | 10 GB 数据库限制 | 单个数据库限制为 10 GB | 实现跨多个数据库的分片 (社区) | TIER 2 |
| #14 | 2 MB 行大小限制 | 超过 2 MB 的行失败 | 使用混合 D1 + R2 存储模式 (社区) | TIER 2 |
# 数据库管理
wrangler d1 create <DATABASE_NAME>
wrangler d1 list
wrangler d1 delete <DATABASE_NAME>
wrangler d1 info <DATABASE_NAME>
# 迁移
wrangler d1 migrations create <DATABASE_NAME> <MIGRATION_NAME>
wrangler d1 migrations list <DATABASE_NAME> --local|--remote
wrangler d1 migrations apply <DATABASE_NAME> --local|--remote
# 执行查询
wrangler d1 execute <DATABASE_NAME> --local|--remote --command "SELECT * FROM users"
wrangler d1 execute <DATABASE_NAME> --local|--remote --file=./query.sql
# 时间旅行(查看历史数据)
wrangler d1 time-travel info <DATABASE_NAME> --timestamp "2025-10-20"
wrangler d1 time-travel restore <DATABASE_NAME> --timestamp "2025-10-20"
准备好使用 D1 构建了! 🚀
最后验证 : 2026-01-20 | 技能版本 : 3.0.0 | 变更 : 从社区研究中添加了 8 个新的已知问题(TIER 1-2 发现):触发器大小写敏感性、远程绑定超时、多 Worker 开发模式、暂时性错误处理、FTS5 限制、分片模式、混合 D1+R2 存储以及数据库可移植性考虑。
每周安装次数
408
仓库
GitHub 星标数
643
首次出现
2026年1月20日
安全审计
安装于
claude-code323
opencode280
gemini-cli277
codex243
antigravity231
cursor231
Status : Production Ready ✅ Last Updated : 2026-01-20 Dependencies : cloudflare-worker-base (for Worker setup) Latest Versions : wrangler@4.59.2, @cloudflare/workers-types@4.20260109.0
Recent Updates (2025) :
# Create a new D1 database
npx wrangler d1 create my-database
# Output includes database_id - save this!
# ✅ Successfully created DB 'my-database'
#
# [[d1_databases]]
# binding = "DB"
# database_name = "my-database"
# database_id = "<UUID>"
Add to your wrangler.jsonc:
{
"name": "my-worker",
"main": "src/index.ts",
"compatibility_date": "2025-10-11",
"d1_databases": [
{
"binding": "DB", // Available as env.DB in your Worker
"database_name": "my-database", // Name from wrangler d1 create
"database_id": "<UUID>", // ID from wrangler d1 create
"preview_database_id": "local-db" // For local development
}
]
}
CRITICAL:
binding is how you access the database in code (env.DB)database_id is the production database UUIDpreview_database_id is for local dev (can be any string)database_id values to public repos - use environment variables or secrets# Create migration file
npx wrangler d1 migrations create my-database create_users_table
# This creates: migrations/0001_create_users_table.sql
Edit the migration file:
-- migrations/0001_create_users_table.sql
DROP TABLE IF EXISTS users;
CREATE TABLE IF NOT EXISTS users (
user_id INTEGER PRIMARY KEY AUTOINCREMENT,
email TEXT NOT NULL UNIQUE,
username TEXT NOT NULL,
created_at INTEGER NOT NULL,
updated_at INTEGER
);
-- Create index for common queries
CREATE INDEX IF NOT EXISTS idx_users_email ON users(email);
-- Optimize database
PRAGMA optimize;
# Apply locally first (for testing)
npx wrangler d1 migrations apply my-database --local
# Apply to production when ready
npx wrangler d1 migrations apply my-database --remote
// src/index.ts
import { Hono } from 'hono';
type Bindings = {
DB: D1Database;
};
const app = new Hono<{ Bindings: Bindings }>();
app.get('/api/users/:email', async (c) => {
const email = c.req.param('email');
try {
// ALWAYS use prepared statements with bind()
const result = await c.env.DB.prepare(
'SELECT * FROM users WHERE email = ?'
)
.bind(email)
.first();
if (!result) {
return c.json({ error: 'User not found' }, 404);
}
return c.json(result);
} catch (error: any) {
console.error('D1 Error:', error.message);
return c.json({ error: 'Database error' }, 500);
}
});
export default app;
# 1. Create migration
npx wrangler d1 migrations create <DATABASE_NAME> <MIGRATION_NAME>
# 2. List unapplied migrations
npx wrangler d1 migrations list <DATABASE_NAME> --local
npx wrangler d1 migrations list <DATABASE_NAME> --remote
# 3. Apply migrations
npx wrangler d1 migrations apply <DATABASE_NAME> --local # Test locally
npx wrangler d1 migrations apply <DATABASE_NAME> --remote # Deploy to production
Migrations are automatically versioned:
migrations/
├── 0000_initial_schema.sql
├── 0001_add_users_table.sql
├── 0002_add_posts_table.sql
└── 0003_add_indexes.sql
Rules:
d1_migrations table){
"d1_databases": [
{
"binding": "DB",
"database_name": "my-database",
"database_id": "<UUID>",
"migrations_dir": "db/migrations", // Custom directory (default: migrations/)
"migrations_table": "schema_migrations" // Custom tracking table (default: d1_migrations)
}
]
}
-- Use IF NOT EXISTS to make migrations idempotent
CREATE TABLE IF NOT EXISTS users (...);
CREATE INDEX IF NOT EXISTS idx_users_email ON users(email);
-- Run PRAGMA optimize after schema changes
PRAGMA optimize;
-- Use UPPERCASE BEGIN/END in triggers (lowercase fails remotely)
CREATE TRIGGER update_timestamp
AFTER UPDATE ON users
FOR EACH ROW
BEGIN
UPDATE users SET updated_at = unixepoch() WHERE user_id = NEW.user_id;
END;
-- Use transactions for data migrations
BEGIN TRANSACTION;
UPDATE users SET updated_at = unixepoch() WHERE updated_at IS NULL;
COMMIT;
-- DON'T include BEGIN TRANSACTION at start of migration file (D1 handles this)
BEGIN TRANSACTION; -- ❌ Remove this
-- DON'T use lowercase begin/end in triggers (works locally, FAILS remotely)
CREATE TRIGGER my_trigger
AFTER INSERT ON table
begin -- ❌ Use BEGIN (uppercase)
UPDATE ...;
end; -- ❌ Use END (uppercase)
-- DON'T use MySQL/PostgreSQL syntax
ALTER TABLE users MODIFY COLUMN email VARCHAR(255); -- ❌ Not SQLite
-- DON'T create tables without IF NOT EXISTS
CREATE TABLE users (...); -- ❌ Fails if table exists
-- Temporarily disable foreign key checks during schema changes
PRAGMA defer_foreign_keys = true;
-- Make schema changes that would violate foreign keys
ALTER TABLE posts DROP COLUMN author_id;
ALTER TABLE posts ADD COLUMN user_id INTEGER REFERENCES users(user_id);
-- Foreign keys re-enabled automatically at end of migration
Type Definitions:
interface Env { DB: D1Database; }
type Bindings = { DB: D1Database; };
const app = new Hono<{ Bindings: Bindings }>();
prepare() - PRIMARY METHOD (always use for user input):
const user = await env.DB.prepare('SELECT * FROM users WHERE email = ?')
.bind(email).first();
Why: Prevents SQL injection, reusable, better performance, type-safe
Query Result Methods:
.all() → { results, meta } - Get all rows.first() → row object or null - Get first row.first('column') → value - Get single column value (e.g., COUNT).run() → { success, meta } - Execute INSERT/UPDATE/DELETE (no results)batch() - CRITICAL FOR PERFORMANCE:
const results = await env.DB.batch([
env.DB.prepare('SELECT * FROM users WHERE user_id = ?').bind(1),
env.DB.prepare('SELECT * FROM posts WHERE user_id = ?').bind(1)
]);
exec() - AVOID IN PRODUCTION:
await env.DB.exec('SELECT * FROM users;'); // Only for migrations/maintenance
// CREATE
const { meta } = await env.DB.prepare(
'INSERT INTO users (email, username, created_at) VALUES (?, ?, ?)'
).bind(email, username, Date.now()).run();
const newUserId = meta.last_row_id;
// READ (single)
const user = await env.DB.prepare('SELECT * FROM users WHERE user_id = ?')
.bind(userId).first();
// READ (multiple)
const { results } = await env.DB.prepare('SELECT * FROM users LIMIT ?')
.bind(10).all();
// UPDATE
const { meta } = await env.DB.prepare('UPDATE users SET username = ? WHERE user_id = ?')
.bind(newUsername, userId).run();
const rowsAffected = meta.rows_written;
// DELETE
await env.DB.prepare('DELETE FROM users WHERE user_id = ?').bind(userId).run();
// COUNT
const count = await env.DB.prepare('SELECT COUNT(*) as total FROM users').first('total');
// EXISTS check
const exists = await env.DB.prepare('SELECT 1 FROM users WHERE email = ? LIMIT 1')
.bind(email).first();
const page = parseInt(c.req.query('page') || '1');
const limit = 20;
const offset = (page - 1) * limit;
const [countResult, usersResult] = await c.env.DB.batch([
c.env.DB.prepare('SELECT COUNT(*) as total FROM users'),
c.env.DB.prepare('SELECT * FROM users ORDER BY created_at DESC LIMIT ? OFFSET ?')
.bind(limit, offset)
]);
return c.json({
users: usersResult.results,
pagination: { page, limit, total: countResult.results[0].total }
});
// D1 doesn't support multi-statement transactions, but batch() provides sequential execution
await env.DB.batch([
env.DB.prepare('UPDATE users SET credits = credits - ? WHERE user_id = ?').bind(amount, fromUserId),
env.DB.prepare('UPDATE users SET credits = credits + ? WHERE user_id = ?').bind(amount, toUserId),
env.DB.prepare('INSERT INTO transactions (from_user, to_user, amount) VALUES (?, ?, ?)').bind(fromUserId, toUserId, amount)
]);
// If any statement fails, batch stops (transaction-like behavior)
Common Error Types:
D1_ERROR - General D1 error (often transient)D1_EXEC_ERROR - SQL syntax error or limitationsD1_TYPE_ERROR - Type mismatch (undefined instead of null)D1_COLUMN_NOTFOUND - Column doesn't existCommon Errors and Fixes:
| Error | Cause | Solution |
|---|---|---|
| Statement too long | Large INSERT with 1000+ rows | Break into batches of 100-250 using batch() |
| Network connection lost | Transient failure or large import | Implement retry logic (see below) or break into smaller chunks |
| Too many requests queued | Individual queries in loop | Use batch() instead of loop |
| D1_TYPE_ERROR | Using undefined in bind | Use null for optional values: `.bind(email, bio |
CRITICAL : D1 queries fail transiently with errors like "Network connection lost", "storage operation exceeded timeout", or "isolate exceeded its memory limit". Cloudflare documentation states "a handful of errors every several hours is not unexpected" and recommends implementing retry logic. (D1 FAQ)
Common Transient Errors:
D1_ERROR: Network connection lostD1 DB storage operation exceeded timeout which caused object to be resetInternal error while starting up D1 DB storage caused object to be resetD1 DB's isolate exceeded its memory limit and was resetRetry Pattern (Recommended):
async function queryWithRetry<T>(
fn: () => Promise<T>,
maxRetries = 3,
baseDelay = 100
): Promise<T> {
for (let i = 0; i < maxRetries; i++) {
try {
return await fn();
} catch (error: any) {
const isTransient = error.message?.includes('Network connection lost') ||
error.message?.includes('exceeded timeout') ||
error.message?.includes('exceeded its memory limit');
if (!isTransient || i === maxRetries - 1) throw error;
// Exponential backoff
await new Promise(r => setTimeout(r, baseDelay * Math.pow(2, i)));
}
}
throw new Error('Max retries exceeded');
}
// Usage
const user = await queryWithRetry(() =>
env.DB.prepare('SELECT * FROM users WHERE email = ?').bind(email).first()
);
Automatic Retries (Sept 2025): D1 automatically retries read-only queries (SELECT, EXPLAIN, WITH) up to 2 times on retryable errors. Check meta.total_attempts in response for retry count. Write queries should still implement custom retry logic.
Index Best Practices:
CREATE INDEX idx_users_email ON users(email)CREATE INDEX idx_posts_user_id ON posts(user_id)CREATE INDEX idx_posts_created_at ON posts(created_at DESC)CREATE INDEX idx_posts_user_published ON posts(user_id, published)CREATE INDEX idx_users_active ON users(email) WHERE deleted = 0EXPLAIN QUERY PLAN SELECT ...PRAGMA optimize (Feb 2025):
CREATE INDEX idx_users_email ON users(email);
PRAGMA optimize; -- Run after schema changes
Query Optimization:
SELECT *)WHERE LOWER(email) → store lowercase insteadLocal vs Remote (Nov 2025 - Remote Bindings GA):
# Local database (automatic creation)
npx wrangler d1 migrations apply my-database --local
npx wrangler d1 execute my-database --local --command "SELECT * FROM users"
# Remote database
npx wrangler d1 execute my-database --remote --command "SELECT * FROM users"
# Remote bindings (wrangler@4.37.0+) - connect local Worker to deployed D1
# Add to wrangler.jsonc: { "binding": "DB", "remote": true }
Known Issue : When using remote D1 bindings ({ "remote": true }), the connection times out after exactly 1 hour of inactivity. (GitHub Issue #10801)
Error : D1_ERROR: Failed to parse body as JSON, got: error code: 1031
Workaround :
// Keep connection alive with periodic query (optional)
setInterval(async () => {
try {
await env.DB.prepare('SELECT 1').first();
} catch (e) {
console.log('Connection keepalive failed:', e);
}
}, 30 * 60 * 1000); // Every 30 minutes
Or simply restart your dev server if queries fail after 1 hour of inactivity.
When running multiple Workers with service bindings in a single wrangler dev process, the auxiliary worker cannot access its D1 binding because both workers share the same persistence path. (GitHub Issue #11121)
Solution : Use --persist-to flag to point all workers to the same persistence store:
# Apply worker2 migrations to worker1's persistence path
cd worker2
npx wrangler d1 migrations apply DB --local --persist-to=../worker1/.wrangler/state
# Now both workers can access D1
cd ../worker1
npx wrangler dev # Both workers share the same D1 data
Local Database Location: .wrangler/state/v3/d1/miniflare-D1DatabaseObject/<database_id>.sqlite
Seed Local Database:
npx wrangler d1 execute my-database --local --file=seed.sql
D1 has a hard 10 GB per database limit, but Cloudflare supports up to 50,000 databases per Worker. Use sharding to scale beyond 10 GB. (DEV.to Article)
Hash-based sharding example (10 databases = 100 GB capacity):
// Hash user ID to shard number
function getShardId(userId: string): number {
const hash = Array.from(userId).reduce((acc, char) =>
((acc << 5) - acc) + char.charCodeAt(0), 0
);
return Math.abs(hash) % 10; // 10 shards
}
// wrangler.jsonc - Define 10 database shards
{
"d1_databases": [
{ "binding": "DB_SHARD_0", "database_id": "..." },
{ "binding": "DB_SHARD_1", "database_id": "..." },
{ "binding": "DB_SHARD_2", "database_id": "..." },
// ... up to DB_SHARD_9
]
}
// Get correct shard for user
function getUserDb(env: Env, userId: string): D1Database {
const shardId = getShardId(userId);
return env[`DB_SHARD_${shardId}`];
}
// Query user's data from correct shard
const db = getUserDb(env, userId);
const user = await db.prepare('SELECT * FROM users WHERE user_id = ?')
.bind(userId).first();
Alternative : Tenant-based sharding (one database per customer/tenant)
D1 has a 2 MB row size limit. For large content (HTML, JSON, images), use R2 for storage and D1 for metadata. (DEV.to Article)
Error : database row size exceeded maximum allowed size
Solution - Hybrid storage pattern:
// 1. Store large content in R2
const contentKey = `pages/${crypto.randomUUID()}.html`;
await env.R2_BUCKET.put(contentKey, largeHtmlContent);
// 2. Store metadata in D1
await env.DB.prepare(`
INSERT INTO pages (url, r2_key, size, created_at)
VALUES (?, ?, ?, ?)
`).bind(url, contentKey, largeHtmlContent.length, Date.now()).run();
// 3. Retrieve content
const page = await env.DB.prepare('SELECT * FROM pages WHERE url = ?')
.bind(url).first();
if (page) {
const content = await env.R2_BUCKET.get(page.r2_key);
const html = await content.text();
}
If you plan to migrate from D1 (SQLite) to Hyperdrive (PostgreSQL) later, use consistent lowercase naming. PostgreSQL is case-sensitive for table and column names, while SQLite is not. (Mats' Blog)
-- Use lowercase for portability
CREATE TABLE users (user_id INTEGER, email TEXT);
CREATE INDEX idx_users_email ON users(email);
-- NOT: CREATE TABLE Users (UserId INTEGER, Email TEXT);
Case Sensitivity : Always use lowercase "fts5" when creating virtual tables. Uppercase may cause "not authorized" errors. (Cloudflare Community)
-- Correct
CREATE VIRTUAL TABLE search_index USING fts5(
title,
content,
tokenize = 'porter unicode61'
);
-- Query the index
SELECT * FROM search_index WHERE search_index MATCH 'query terms';
Export Limitation : Databases with FTS5 virtual tables cannot be exported using wrangler d1 export. Drop virtual tables before export, then recreate after import. (GitHub Issue #9519)
Network Timeout on Large Imports : Files with 5000+ INSERT statements may fail with "Network connection lost" error. (GitHub Issue #11958)
Solutions :
batch() API from Worker instead of wrangler CLIWindows-Specific Issue : On Windows 11, large SQL files exported from D1 may fail to re-import with "HashIndex detected hash table inconsistency". (GitHub Issue #11708)
Workaround : Delete .wrangler directory before executing:
rm -rf .wrangler
npx wrangler d1 execute db-name --file=database.sql
.bind() for user input.batch() for multiple queries (reduces latency)PRAGMA optimize after schema changesIF NOT EXISTS in migrations for idempotencynull instead of undefined for optional valuesmeta.rows_written after UPDATE/DELETE.exec() with user input (SQL injection risk)database_id in public reposundefined in bind parameters (causes D1_TYPE_ERROR)LIMIT on potentially large result setsSELECT * in production (specify columns)BEGIN TRANSACTION in migration filesThis skill prevents 14 documented D1 errors:
| Issue # | Error/Issue | Description | How to Avoid | Source |
|---|---|---|---|---|
| #1 | Statement too long | Large INSERT statements exceed D1 limits | Break into batches of 100-250 rows using batch() | Existing |
| #2 | Transaction conflicts | BEGIN TRANSACTION in migration files | Remove BEGIN/COMMIT (D1 handles automatically) | Existing |
| #3 | Foreign key violations | Schema changes break foreign key constraints | Use PRAGMA defer_foreign_keys = true in migrations |
# Database management
wrangler d1 create <DATABASE_NAME>
wrangler d1 list
wrangler d1 delete <DATABASE_NAME>
wrangler d1 info <DATABASE_NAME>
# Migrations
wrangler d1 migrations create <DATABASE_NAME> <MIGRATION_NAME>
wrangler d1 migrations list <DATABASE_NAME> --local|--remote
wrangler d1 migrations apply <DATABASE_NAME> --local|--remote
# Execute queries
wrangler d1 execute <DATABASE_NAME> --local|--remote --command "SELECT * FROM users"
wrangler d1 execute <DATABASE_NAME> --local|--remote --file=./query.sql
# Time Travel (view historical data)
wrangler d1 time-travel info <DATABASE_NAME> --timestamp "2025-10-20"
wrangler d1 time-travel restore <DATABASE_NAME> --timestamp "2025-10-20"
Ready to build with D1! 🚀
Last verified : 2026-01-20 | Skill version : 3.0.0 | Changes : Added 8 new known issues from community research (TIER 1-2 findings): trigger case sensitivity, remote binding timeouts, multi-worker dev patterns, transient error handling, FTS5 limitations, sharding patterns, hybrid D1+R2 storage, and database portability considerations.
Weekly Installs
408
Repository
GitHub Stars
643
First Seen
Jan 20, 2026
Security Audits
Gen Agent Trust HubPassSocketPassSnykWarn
Installed on
claude-code323
opencode280
gemini-cli277
codex243
antigravity231
cursor231
| BEGIN TRANSACTION in migration |
| Remove BEGIN/COMMIT (D1 handles automatically) |
| Foreign key violations | Schema changes break constraints | Use PRAGMA defer_foreign_keys = true |
| D1_EXEC_ERROR: incomplete input | Multi-line SQL in D1Database.exec() | Use prepared statements or external .sql files (Issue #9133) |
| Existing |
| #4 | Rate limiting / queue overload | Too many individual queries | Use batch() instead of loops | Existing |
| #5 | Memory limit exceeded | Query loads too much data into memory | Add LIMIT, paginate results, shard queries | Existing |
| #6 | Type mismatch errors | Using undefined instead of null in bind() | Always use null for optional values | Existing |
| #7 | Lowercase BEGIN in triggers | Triggers with lowercase begin/end fail remotely | Use uppercase BEGIN/END keywords (Issue #10998) | TIER 1 |
| #8 | Remote bindings timeout | Connection times out after 1 hour of inactivity | Restart dev server or implement keepalive pattern (Issue #10801) | TIER 1 |
| #9 | Service bindings D1 access | Auxiliary worker can't access D1 in multi-worker dev | Use --persist-to flag to share persistence path (Issue #11121) | TIER 1 |
| #10 | Transient network errors | Random "Network connection lost" failures | Implement exponential backoff retry logic (D1 FAQ) | TIER 1 |
| #11 | FTS5 breaks export | Databases with FTS5 virtual tables can't export | Drop virtual tables before export, recreate after import (Issue #9519) | TIER 1 |
| #12 | Multi-line SQL in exec() | D1Database.exec() fails on multi-line SQL | Use prepared statements or external .sql files (Issue #9133) | TIER 1 |
| #13 | 10 GB database limit | Single database limited to 10 GB | Implement sharding across multiple databases (Community) | TIER 2 |
| #14 | 2 MB row size limit | Rows exceeding 2 MB fail | Use hybrid D1 + R2 storage pattern (Community) | TIER 2 |