aws-aurora by alinaqi/claude-bootstrap
npx skills add https://github.com/alinaqi/claude-bootstrap --skill aws-aurora加载方式:base.md + [typescript.md | python.md]
Amazon Aurora 是一款兼容 MySQL/PostgreSQL 的关系型数据库,具备无服务器扩展、高可用性和企业级功能。
资料来源: Aurora 文档 | Serverless v2 | RDS Proxy
对于无服务器架构使用 RDS Proxy,追求简单性使用 Data API,始终使用连接池。
Aurora 擅长处理符合 ACID 原则的工作负载。对于无服务器架构(Lambda),务必使用 RDS Proxy 或 Data API 来处理连接管理。切勿从 Lambda 函数建立原始连接。
| 选项 | 最佳适用场景 |
|---|---|
| Aurora Serverless v2 | 可变工作负载,自动扩展(0.5-128 ACU) |
| Aurora 预置型 | 可预测的工作负载,最大性能 |
| Aurora Global | 多区域,灾难恢复 |
| Data API | 无需 VPC 的无服务器架构,简单的 HTTP 访问 |
广告位招租
在这里展示您的产品或服务
触达数万 AI 开发者,精准高效
| RDS Proxy | 用于 Lambda 的连接池,高并发 |
Lambda → RDS Proxy → Aurora
(池)
Lambda → Data API (HTTP) → Aurora
应用服务器 → Aurora
(持久连接)
// CDK 示例
import * as rds from 'aws-cdk-lib/aws-rds';
const proxy = new rds.DatabaseProxy(this, 'Proxy', {
proxyTarget: rds.ProxyTarget.fromCluster(cluster),
secrets: [cluster.secret!],
vpc,
securityGroups: [proxySecurityGroup],
requireTLS: true,
idleClientTimeout: cdk.Duration.minutes(30),
maxConnectionsPercent: 90,
maxIdleConnectionsPercent: 10,
borrowTimeout: cdk.Duration.seconds(30)
});
// lib/db.ts
import { Pool } from 'pg';
import { Signer } from '@aws-sdk/rds-signer';
const signer = new Signer({
hostname: process.env.RDS_PROXY_ENDPOINT!,
port: 5432,
username: process.env.DB_USER!,
region: process.env.AWS_REGION!
});
// IAM 身份验证
async function getPool(): Promise<Pool> {
const token = await signer.getAuthToken();
return new Pool({
host: process.env.RDS_PROXY_ENDPOINT,
port: 5432,
database: process.env.DB_NAME,
user: process.env.DB_USER,
password: token,
ssl: { rejectUnauthorized: true },
max: 1, // Lambda 使用单个连接
idleTimeoutMillis: 120000,
connectionTimeoutMillis: 10000
});
}
// Lambda 中的使用
let pool: Pool | null = null;
export async function handler(event: any) {
if (!pool) {
pool = await getPool();
}
const result = await pool.query('SELECT * FROM users WHERE id = $1', [event.userId]);
return result.rows[0];
}
# Lambda 工作负载的关键设置
MaxConnectionsPercent: 90 # 使用大部分数据库连接
MaxIdleConnectionsPercent: 10 # 保留部分空闲连接以应对突发流量
ConnectionBorrowTimeout: 30s # 等待可用连接的时间
IdleClientTimeout: 30min # 关闭空闲的代理连接
# 监控这些 CloudWatch 指标:
# - DatabaseConnectionsCurrentlyBorrowed
# - DatabaseConnectionsCurrentlySessionPinned
# - QueryDatabaseResponseLatency
# 必须是 Aurora Serverless
aws rds modify-db-cluster \
--db-cluster-identifier my-cluster \
--enable-http-endpoint
npm install data-api-client
// lib/db.ts
import DataAPIClient from 'data-api-client';
const db = DataAPIClient({
secretArn: process.env.DB_SECRET_ARN!,
resourceArn: process.env.DB_CLUSTER_ARN!,
database: process.env.DB_NAME!,
region: process.env.AWS_REGION!
});
// 简单查询
const users = await db.query('SELECT * FROM users WHERE active = :active', {
active: true
});
// 插入并返回结果
const result = await db.query(
'INSERT INTO users (email, name) VALUES (:email, :name) RETURNING *',
{ email: 'user@test.com', name: 'Test User' }
);
// 事务
const transaction = await db.transaction();
try {
await transaction.query('UPDATE accounts SET balance = balance - :amount WHERE id = :from', {
amount: 100, from: 1
});
await transaction.query('UPDATE accounts SET balance = balance + :amount WHERE id = :to', {
amount: 100, to: 2
});
await transaction.commit();
} catch (error) {
await transaction.rollback();
throw error;
}
# requirements.txt
boto3>=1.34.0
# db.py
import boto3
import os
rds_data = boto3.client('rds-data')
CLUSTER_ARN = os.environ['DB_CLUSTER_ARN']
SECRET_ARN = os.environ['DB_SECRET_ARN']
DATABASE = os.environ['DB_NAME']
def execute_sql(sql: str, parameters: list = None):
"""通过 Data API 执行 SQL。"""
params = {
'resourceArn': CLUSTER_ARN,
'secretArn': SECRET_ARN,
'database': DATABASE,
'sql': sql
}
if parameters:
params['parameters'] = parameters
return rds_data.execute_statement(**params)
def get_user(user_id: int):
result = execute_sql(
'SELECT * FROM users WHERE id = :id',
[{'name': 'id', 'value': {'longValue': user_id}}]
)
return result.get('records', [])
def create_user(email: str, name: str):
result = execute_sql(
'INSERT INTO users (email, name) VALUES (:email, :name) RETURNING *',
[
{'name': 'email', 'value': {'stringValue': email}},
{'name': 'name', 'value': {'stringValue': name}}
]
)
return result.get('generatedFields')
# 事务
def transfer_funds(from_id: int, to_id: int, amount: float):
transaction = rds_data.begin_transaction(
resourceArn=CLUSTER_ARN,
secretArn=SECRET_ARN,
database=DATABASE
)
transaction_id = transaction['transactionId']
try:
execute_sql(
'UPDATE accounts SET balance = balance - :amount WHERE id = :id',
[
{'name': 'amount', 'value': {'doubleValue': amount}},
{'name': 'id', 'value': {'longValue': from_id}}
]
)
execute_sql(
'UPDATE accounts SET balance = balance + :amount WHERE id = :id',
[
{'name': 'amount', 'value': {'doubleValue': amount}},
{'name': 'id', 'value': {'longValue': to_id}}
]
)
rds_data.commit_transaction(
resourceArn=CLUSTER_ARN,
secretArn=SECRET_ARN,
transactionId=transaction_id
)
except Exception as e:
rds_data.rollback_transaction(
resourceArn=CLUSTER_ARN,
secretArn=SECRET_ARN,
transactionId=transaction_id
)
raise e
npm install prisma @prisma/client
npx prisma init
// prisma/schema.prisma
generator client {
provider = "prisma-client-js"
}
datasource db {
provider = "postgresql"
url = env("DATABASE_URL")
}
model User {
id Int @id @default(autoincrement())
email String @unique
name String
posts Post[]
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
}
model Post {
id Int @id @default(autoincrement())
title String
content String?
published Boolean @default(false)
author User @relation(fields: [authorId], references: [id])
authorId Int
createdAt DateTime @default(now())
}
# 使用 RDS Proxy 端点
DATABASE_URL="postgresql://user:password@proxy-endpoint.proxy-xxx.region.rds.amazonaws.com:5432/mydb?schema=public&connection_limit=1"
// handlers/users.ts
import { PrismaClient } from '@prisma/client';
// 在多次调用间复用客户端
let prisma: PrismaClient | null = null;
function getPrisma(): PrismaClient {
if (!prisma) {
prisma = new PrismaClient({
datasources: {
db: { url: process.env.DATABASE_URL }
}
});
}
return prisma;
}
export async function handler(event: any) {
const db = getPrisma();
const users = await db.user.findMany({
include: { posts: true },
take: 10
});
return {
statusCode: 200,
body: JSON.stringify(users)
};
}
// CDK
const cluster = new rds.DatabaseCluster(this, 'Cluster', {
engine: rds.DatabaseClusterEngine.auroraPostgres({
version: rds.AuroraPostgresEngineVersion.VER_15_4
}),
serverlessV2MinCapacity: 0.5, // 最小 ACU
serverlessV2MaxCapacity: 16, // 最大 ACU
writer: rds.ClusterInstance.serverlessV2('writer'),
readers: [
rds.ClusterInstance.serverlessV2('reader', { scaleWithWriter: true })
],
vpc,
vpcSubnets: { subnetType: ec2.SubnetType.PRIVATE_WITH_EGRESS }
});
| 工作负载 | 最小 ACU | 最大 ACU |
|---|---|---|
| 开发/测试 | 0.5 | 2 |
| 小型生产环境 | 2 | 8 |
| 中型生产环境 | 4 | 32 |
| 大型生产环境 | 8 | 128 |
// Data API 客户端 v2 会自动处理此情况
// 对于直接连接,实现重试逻辑:
import { Pool } from 'pg';
async function queryWithRetry(
pool: Pool,
sql: string,
params: any[],
maxRetries = 3
): Promise<any> {
for (let attempt = 1; attempt <= maxRetries; attempt++) {
try {
return await pool.query(sql, params);
} catch (error: any) {
// Aurora Serverless 正在唤醒
if (error.code === 'ETIMEDOUT' || error.message?.includes('Communications link failure')) {
if (attempt === maxRetries) throw error;
// 指数退避
await new Promise(resolve => setTimeout(resolve, Math.pow(2, attempt) * 1000));
continue;
}
throw error;
}
}
}
# 开发环境(创建迁移)
npx prisma migrate dev --name add_users_table
# 生产环境(应用迁移)
npx prisma migrate deploy
# 生成客户端
npx prisma generate
# .github/workflows/deploy.yml
- name: 运行迁移
run: |
# 通过堡垒机连接或使用迁移 Lambda
npx prisma migrate deploy
env:
DATABASE_URL: ${{ secrets.DATABASE_URL }}
// lambdas/migrate.ts
import { execSync } from 'child_process';
export async function handler() {
try {
execSync('npx prisma migrate deploy', {
env: {
...process.env,
DATABASE_URL: process.env.DATABASE_URL
},
stdio: 'inherit'
});
return { statusCode: 200, body: '迁移已应用' };
} catch (error) {
console.error('迁移失败:', error);
throw error;
}
}
# docker-compose.yml
services:
app:
build: .
environment:
DATABASE_URL: postgresql://user:pass@pgbouncer:6432/mydb
pgbouncer:
image: edoburu/pgbouncer
environment:
DATABASE_URL: postgresql://user:pass@aurora-endpoint:5432/mydb
POOL_MODE: transaction
MAX_CLIENT_CONN: 1000
DEFAULT_POOL_SIZE: 20
// 适用于长时间运行的服务器(非 Lambda)
import { Pool } from 'pg';
const pool = new Pool({
host: process.env.DB_HOST,
port: 5432,
database: process.env.DB_NAME,
user: process.env.DB_USER,
password: process.env.DB_PASSWORD,
max: 20, // 最大连接数
idleTimeoutMillis: 30000, // 空闲 30 秒后关闭
connectionTimeoutMillis: 10000
});
// 对所有查询使用连接池
export async function query(sql: string, params?: any[]) {
const client = await pool.connect();
try {
return await client.query(sql, params);
} finally {
client.release();
}
}
# Aurora
- CPUUtilization
- DatabaseConnections
- FreeableMemory
- ServerlessDatabaseCapacity (ACUs)
- AuroraReplicaLag
# RDS Proxy
- DatabaseConnectionsCurrentlyBorrowed
- DatabaseConnectionsCurrentlySessionPinned
- QueryDatabaseResponseLatency
- ClientConnectionsReceived
# 通过控制台或 CLI 启用
aws rds modify-db-cluster \
--db-cluster-identifier my-cluster \
--enable-performance-insights \
--performance-insights-retention-period 7
import { Signer } from '@aws-sdk/rds-signer';
const signer = new Signer({
hostname: process.env.DB_HOST!,
port: 5432,
username: 'iam_user',
region: 'us-east-1'
});
const token = await signer.getAuthToken();
// 使用令牌作为密码(有效期为 15 分钟)
const pool = new Pool({
host: process.env.DB_HOST,
user: 'iam_user',
password: token,
ssl: true
});
import { SecretsManagerClient, GetSecretValueCommand } from '@aws-sdk/client-secrets-manager';
const client = new SecretsManagerClient({ region: 'us-east-1' });
async function getDbCredentials() {
const response = await client.send(
new GetSecretValueCommand({ SecretId: process.env.DB_SECRET_ARN })
);
return JSON.parse(response.SecretString!);
}
# 集群操作
aws rds describe-db-clusters
aws rds create-db-cluster --engine aurora-postgresql --db-cluster-identifier my-cluster
aws rds delete-db-cluster --db-cluster-identifier my-cluster --skip-final-snapshot
# Serverless v2
aws rds modify-db-cluster \
--db-cluster-identifier my-cluster \
--serverless-v2-scaling-configuration MinCapacity=0.5,MaxCapacity=16
# Data API
aws rds-data execute-statement \
--resource-arn $CLUSTER_ARN \
--secret-arn $SECRET_ARN \
--database mydb \
--sql "SELECT * FROM users"
# Proxy
aws rds describe-db-proxies
aws rds create-db-proxy --db-proxy-name my-proxy --engine-family POSTGRESQL ...
# 快照
aws rds create-db-cluster-snapshot --db-cluster-identifier my-cluster --db-cluster-snapshot-identifier backup-1
aws rds restore-db-cluster-from-snapshot --db-cluster-identifier restored --snapshot-identifier backup-1
max: 1,为服务器使用连接池每周安装次数
67
代码仓库
GitHub 星标数
529
首次出现
2026年1月20日
安全审计
已安装于
claude-code54
opencode50
gemini-cli48
codex47
cursor44
antigravity40
Load with: base.md + [typescript.md | python.md]
Amazon Aurora is a MySQL/PostgreSQL-compatible relational database with serverless scaling, high availability, and enterprise features.
Sources: Aurora Docs | Serverless v2 | RDS Proxy
Use RDS Proxy for serverless, Data API for simplicity, connection pooling always.
Aurora excels at ACID-compliant workloads. For serverless architectures (Lambda), always use RDS Proxy or Data API to handle connection management. Never open raw connections from Lambda functions.
| Option | Best For |
|---|---|
| Aurora Serverless v2 | Variable workloads, auto-scaling (0.5-128 ACUs) |
| Aurora Provisioned | Predictable workloads, maximum performance |
| Aurora Global | Multi-region, disaster recovery |
| Data API | Serverless without VPC, simple HTTP access |
| RDS Proxy | Connection pooling for Lambda, high concurrency |
Lambda → RDS Proxy → Aurora
(pool)
Lambda → Data API (HTTP) → Aurora
App Server → Aurora
(persistent connection)
// CDK example
import * as rds from 'aws-cdk-lib/aws-rds';
const proxy = new rds.DatabaseProxy(this, 'Proxy', {
proxyTarget: rds.ProxyTarget.fromCluster(cluster),
secrets: [cluster.secret!],
vpc,
securityGroups: [proxySecurityGroup],
requireTLS: true,
idleClientTimeout: cdk.Duration.minutes(30),
maxConnectionsPercent: 90,
maxIdleConnectionsPercent: 10,
borrowTimeout: cdk.Duration.seconds(30)
});
// lib/db.ts
import { Pool } from 'pg';
import { Signer } from '@aws-sdk/rds-signer';
const signer = new Signer({
hostname: process.env.RDS_PROXY_ENDPOINT!,
port: 5432,
username: process.env.DB_USER!,
region: process.env.AWS_REGION!
});
// IAM authentication
async function getPool(): Promise<Pool> {
const token = await signer.getAuthToken();
return new Pool({
host: process.env.RDS_PROXY_ENDPOINT,
port: 5432,
database: process.env.DB_NAME,
user: process.env.DB_USER,
password: token,
ssl: { rejectUnauthorized: true },
max: 1, // Single connection for Lambda
idleTimeoutMillis: 120000,
connectionTimeoutMillis: 10000
});
}
// Usage in Lambda
let pool: Pool | null = null;
export async function handler(event: any) {
if (!pool) {
pool = await getPool();
}
const result = await pool.query('SELECT * FROM users WHERE id = $1', [event.userId]);
return result.rows[0];
}
# Key settings for Lambda workloads
MaxConnectionsPercent: 90 # Use most of DB connections
MaxIdleConnectionsPercent: 10 # Keep some idle for bursts
ConnectionBorrowTimeout: 30s # Wait for available connection
IdleClientTimeout: 30min # Close idle proxy connections
# Monitor these CloudWatch metrics:
# - DatabaseConnectionsCurrentlyBorrowed
# - DatabaseConnectionsCurrentlySessionPinned
# - QueryDatabaseResponseLatency
# Must be Aurora Serverless
aws rds modify-db-cluster \
--db-cluster-identifier my-cluster \
--enable-http-endpoint
npm install data-api-client
// lib/db.ts
import DataAPIClient from 'data-api-client';
const db = DataAPIClient({
secretArn: process.env.DB_SECRET_ARN!,
resourceArn: process.env.DB_CLUSTER_ARN!,
database: process.env.DB_NAME!,
region: process.env.AWS_REGION!
});
// Simple query
const users = await db.query('SELECT * FROM users WHERE active = :active', {
active: true
});
// Insert with returning
const result = await db.query(
'INSERT INTO users (email, name) VALUES (:email, :name) RETURNING *',
{ email: 'user@test.com', name: 'Test User' }
);
// Transaction
const transaction = await db.transaction();
try {
await transaction.query('UPDATE accounts SET balance = balance - :amount WHERE id = :from', {
amount: 100, from: 1
});
await transaction.query('UPDATE accounts SET balance = balance + :amount WHERE id = :to', {
amount: 100, to: 2
});
await transaction.commit();
} catch (error) {
await transaction.rollback();
throw error;
}
# requirements.txt
boto3>=1.34.0
# db.py
import boto3
import os
rds_data = boto3.client('rds-data')
CLUSTER_ARN = os.environ['DB_CLUSTER_ARN']
SECRET_ARN = os.environ['DB_SECRET_ARN']
DATABASE = os.environ['DB_NAME']
def execute_sql(sql: str, parameters: list = None):
"""Execute SQL via Data API."""
params = {
'resourceArn': CLUSTER_ARN,
'secretArn': SECRET_ARN,
'database': DATABASE,
'sql': sql
}
if parameters:
params['parameters'] = parameters
return rds_data.execute_statement(**params)
def get_user(user_id: int):
result = execute_sql(
'SELECT * FROM users WHERE id = :id',
[{'name': 'id', 'value': {'longValue': user_id}}]
)
return result.get('records', [])
def create_user(email: str, name: str):
result = execute_sql(
'INSERT INTO users (email, name) VALUES (:email, :name) RETURNING *',
[
{'name': 'email', 'value': {'stringValue': email}},
{'name': 'name', 'value': {'stringValue': name}}
]
)
return result.get('generatedFields')
# Transaction
def transfer_funds(from_id: int, to_id: int, amount: float):
transaction = rds_data.begin_transaction(
resourceArn=CLUSTER_ARN,
secretArn=SECRET_ARN,
database=DATABASE
)
transaction_id = transaction['transactionId']
try:
execute_sql(
'UPDATE accounts SET balance = balance - :amount WHERE id = :id',
[
{'name': 'amount', 'value': {'doubleValue': amount}},
{'name': 'id', 'value': {'longValue': from_id}}
]
)
execute_sql(
'UPDATE accounts SET balance = balance + :amount WHERE id = :id',
[
{'name': 'amount', 'value': {'doubleValue': amount}},
{'name': 'id', 'value': {'longValue': to_id}}
]
)
rds_data.commit_transaction(
resourceArn=CLUSTER_ARN,
secretArn=SECRET_ARN,
transactionId=transaction_id
)
except Exception as e:
rds_data.rollback_transaction(
resourceArn=CLUSTER_ARN,
secretArn=SECRET_ARN,
transactionId=transaction_id
)
raise e
npm install prisma @prisma/client
npx prisma init
// prisma/schema.prisma
generator client {
provider = "prisma-client-js"
}
datasource db {
provider = "postgresql"
url = env("DATABASE_URL")
}
model User {
id Int @id @default(autoincrement())
email String @unique
name String
posts Post[]
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
}
model Post {
id Int @id @default(autoincrement())
title String
content String?
published Boolean @default(false)
author User @relation(fields: [authorId], references: [id])
authorId Int
createdAt DateTime @default(now())
}
# Use RDS Proxy endpoint
DATABASE_URL="postgresql://user:password@proxy-endpoint.proxy-xxx.region.rds.amazonaws.com:5432/mydb?schema=public&connection_limit=1"
// handlers/users.ts
import { PrismaClient } from '@prisma/client';
// Reuse client across invocations
let prisma: PrismaClient | null = null;
function getPrisma(): PrismaClient {
if (!prisma) {
prisma = new PrismaClient({
datasources: {
db: { url: process.env.DATABASE_URL }
}
});
}
return prisma;
}
export async function handler(event: any) {
const db = getPrisma();
const users = await db.user.findMany({
include: { posts: true },
take: 10
});
return {
statusCode: 200,
body: JSON.stringify(users)
};
}
// CDK
const cluster = new rds.DatabaseCluster(this, 'Cluster', {
engine: rds.DatabaseClusterEngine.auroraPostgres({
version: rds.AuroraPostgresEngineVersion.VER_15_4
}),
serverlessV2MinCapacity: 0.5, // Minimum ACUs
serverlessV2MaxCapacity: 16, // Maximum ACUs
writer: rds.ClusterInstance.serverlessV2('writer'),
readers: [
rds.ClusterInstance.serverlessV2('reader', { scaleWithWriter: true })
],
vpc,
vpcSubnets: { subnetType: ec2.SubnetType.PRIVATE_WITH_EGRESS }
});
| Workload | Min ACUs | Max ACUs |
|---|---|---|
| Dev/Test | 0.5 | 2 |
| Small Production | 2 | 8 |
| Medium Production | 4 | 32 |
| Large Production | 8 | 128 |
// Data API Client v2 handles this automatically
// For direct connections, implement retry logic:
import { Pool } from 'pg';
async function queryWithRetry(
pool: Pool,
sql: string,
params: any[],
maxRetries = 3
): Promise<any> {
for (let attempt = 1; attempt <= maxRetries; attempt++) {
try {
return await pool.query(sql, params);
} catch (error: any) {
// Aurora Serverless waking up
if (error.code === 'ETIMEDOUT' || error.message?.includes('Communications link failure')) {
if (attempt === maxRetries) throw error;
// Exponential backoff
await new Promise(resolve => setTimeout(resolve, Math.pow(2, attempt) * 1000));
continue;
}
throw error;
}
}
}
# Development (creates migration)
npx prisma migrate dev --name add_users_table
# Production (apply migrations)
npx prisma migrate deploy
# Generate client
npx prisma generate
# .github/workflows/deploy.yml
- name: Run migrations
run: |
# Connect via bastion or use a migration Lambda
npx prisma migrate deploy
env:
DATABASE_URL: ${{ secrets.DATABASE_URL }}
// lambdas/migrate.ts
import { execSync } from 'child_process';
export async function handler() {
try {
execSync('npx prisma migrate deploy', {
env: {
...process.env,
DATABASE_URL: process.env.DATABASE_URL
},
stdio: 'inherit'
});
return { statusCode: 200, body: 'Migrations applied' };
} catch (error) {
console.error('Migration failed:', error);
throw error;
}
}
# docker-compose.yml
services:
app:
build: .
environment:
DATABASE_URL: postgresql://user:pass@pgbouncer:6432/mydb
pgbouncer:
image: edoburu/pgbouncer
environment:
DATABASE_URL: postgresql://user:pass@aurora-endpoint:5432/mydb
POOL_MODE: transaction
MAX_CLIENT_CONN: 1000
DEFAULT_POOL_SIZE: 20
// For long-running servers (not Lambda)
import { Pool } from 'pg';
const pool = new Pool({
host: process.env.DB_HOST,
port: 5432,
database: process.env.DB_NAME,
user: process.env.DB_USER,
password: process.env.DB_PASSWORD,
max: 20, // Max connections
idleTimeoutMillis: 30000, // Close idle after 30s
connectionTimeoutMillis: 10000
});
// Use pool for all queries
export async function query(sql: string, params?: any[]) {
const client = await pool.connect();
try {
return await client.query(sql, params);
} finally {
client.release();
}
}
# Aurora
- CPUUtilization
- DatabaseConnections
- FreeableMemory
- ServerlessDatabaseCapacity (ACUs)
- AuroraReplicaLag
# RDS Proxy
- DatabaseConnectionsCurrentlyBorrowed
- DatabaseConnectionsCurrentlySessionPinned
- QueryDatabaseResponseLatency
- ClientConnectionsReceived
# Enable via console or CLI
aws rds modify-db-cluster \
--db-cluster-identifier my-cluster \
--enable-performance-insights \
--performance-insights-retention-period 7
import { Signer } from '@aws-sdk/rds-signer';
const signer = new Signer({
hostname: process.env.DB_HOST!,
port: 5432,
username: 'iam_user',
region: 'us-east-1'
});
const token = await signer.getAuthToken();
// Use token as password (valid for 15 minutes)
const pool = new Pool({
host: process.env.DB_HOST,
user: 'iam_user',
password: token,
ssl: true
});
import { SecretsManagerClient, GetSecretValueCommand } from '@aws-sdk/client-secrets-manager';
const client = new SecretsManagerClient({ region: 'us-east-1' });
async function getDbCredentials() {
const response = await client.send(
new GetSecretValueCommand({ SecretId: process.env.DB_SECRET_ARN })
);
return JSON.parse(response.SecretString!);
}
# Cluster operations
aws rds describe-db-clusters
aws rds create-db-cluster --engine aurora-postgresql --db-cluster-identifier my-cluster
aws rds delete-db-cluster --db-cluster-identifier my-cluster --skip-final-snapshot
# Serverless v2
aws rds modify-db-cluster \
--db-cluster-identifier my-cluster \
--serverless-v2-scaling-configuration MinCapacity=0.5,MaxCapacity=16
# Data API
aws rds-data execute-statement \
--resource-arn $CLUSTER_ARN \
--secret-arn $SECRET_ARN \
--database mydb \
--sql "SELECT * FROM users"
# Proxy
aws rds describe-db-proxies
aws rds create-db-proxy --db-proxy-name my-proxy --engine-family POSTGRESQL ...
# Snapshots
aws rds create-db-cluster-snapshot --db-cluster-identifier my-cluster --db-cluster-snapshot-identifier backup-1
aws rds restore-db-cluster-from-snapshot --db-cluster-identifier restored --snapshot-identifier backup-1
max: 1 for Lambda, use pooling for serversWeekly Installs
67
Repository
GitHub Stars
529
First Seen
Jan 20, 2026
Security Audits
Gen Agent Trust HubPassSocketPassSnykPass
Installed on
claude-code54
opencode50
gemini-cli48
codex47
cursor44
antigravity40
Supabase Postgres 最佳实践指南 - 8大类别性能优化规则与SQL示例
76,000 周安装