redis-expert by personamanagmentlayer/pcl
npx skills add https://github.com/personamanagmentlayer/pcl --skill redis-expert为 Redis 提供专家指导——这是一个用作缓存、消息代理和数据库的内存数据结构存储,具有微秒级延迟。
# 开发环境
docker run --name redis -p 6379:6379 -d redis:7-alpine
# 生产环境(带持久化)
docker run --name redis \
-p 6379:6379 \
-v redis-data:/data \
-d redis:7-alpine \
redis-server --appendonly yes --requirepass strongpassword
# 使用配置文件启动 Redis
docker run --name redis \
-p 6379:6379 \
-v ./redis.conf:/usr/local/etc/redis/redis.conf \
-d redis:7-alpine \
redis-server /usr/local/etc/redis/redis.conf
广告位招租
在这里展示您的产品或服务
触达数万 AI 开发者,精准高效
# 网络
bind 0.0.0.0
port 6379
protected-mode yes
# 安全
requirepass strongpassword
# 内存
maxmemory 2gb
maxmemory-policy allkeys-lru
# 持久化
save 900 1 # 如果 1 个键发生变化,900 秒后保存
save 300 10 # 如果 10 个键发生变化,300 秒后保存
save 60 10000 # 如果 10000 个键发生变化,60 秒后保存
appendonly yes
appendfilename "appendonly.aof"
appendfsync everysec
# 复制
replica-read-only yes
repl-diskless-sync yes
# 性能
tcp-backlog 511
timeout 0
tcp-keepalive 300
import Redis from 'ioredis';
const redis = new Redis({
host: 'localhost',
port: 6379,
password: 'strongpassword',
db: 0,
retryStrategy: (times) => {
const delay = Math.min(times * 50, 2000);
return delay;
},
});
// 字符串
await redis.set('user:1000:name', 'Alice');
await redis.set('counter', 42);
await redis.get('user:1000:name'); // 'Alice'
// 过期时间 (TTL)
await redis.setex('session:abc123', 3600, JSON.stringify({ userId: 1000 }));
await redis.expire('user:1000:name', 300); // 5 分钟
await redis.ttl('user:1000:name'); // 返回剩余秒数
// 原子操作
await redis.incr('page:views'); // 1
await redis.incr('page:views'); // 2
await redis.incrby('score', 10); // 增加 10
await redis.decr('inventory:item123');
// 哈希(对象)
await redis.hset('user:1000', {
name: 'Alice',
email: 'alice@example.com',
age: 30,
});
await redis.hget('user:1000', 'name'); // 'Alice'
await redis.hgetall('user:1000'); // { name: 'Alice', email: '...', age: '30' }
await redis.hincrby('user:1000', 'loginCount', 1);
// 列表(队列,栈)
await redis.lpush('queue:jobs', 'job1', 'job2', 'job3'); // 从左侧推入
await redis.rpush('queue:jobs', 'job4'); // 从右侧推入
await redis.lpop('queue:jobs'); // 从左侧弹出 (FIFO)
await redis.rpop('queue:jobs'); // 从右侧弹出 (LIFO)
await redis.lrange('queue:jobs', 0, -1); // 获取所有项目
// 集合(唯一值)
await redis.sadd('tags:post:1', 'javascript', 'nodejs', 'redis');
await redis.smembers('tags:post:1'); // ['javascript', 'nodejs', 'redis']
await redis.sismember('tags:post:1', 'nodejs'); // 1 (true)
await redis.scard('tags:post:1'); // 3 (计数)
// 集合操作
await redis.sadd('tags:post:2', 'nodejs', 'typescript', 'docker');
await redis.sinter('tags:post:1', 'tags:post:2'); // ['nodejs'] (交集)
await redis.sunion('tags:post:1', 'tags:post:2'); // 所有唯一标签
await redis.sdiff('tags:post:1', 'tags:post:2'); // ['javascript', 'redis']
// 有序集合(排行榜)
await redis.zadd('leaderboard', 1000, 'player1', 1500, 'player2', 800, 'player3');
await redis.zrange('leaderboard', 0, -1, 'WITHSCORES'); // 升序
await redis.zrevrange('leaderboard', 0, 9); // 前 10 名 (降序)
await redis.zincrby('leaderboard', 50, 'player1'); // 增加分数
await redis.zrank('leaderboard', 'player1'); // 获取排名 (0 起始索引)
await redis.zscore('leaderboard', 'player1'); // 获取分数
// 缓存助手类
class CacheService {
constructor(private redis: Redis) {}
async get<T>(key: string): Promise<T | null> {
const data = await this.redis.get(key);
return data ? JSON.parse(data) : null;
}
async set(key: string, value: any, ttl: number = 3600): Promise<void> {
await this.redis.setex(key, ttl, JSON.stringify(value));
}
async delete(key: string): Promise<void> {
await this.redis.del(key);
}
async getOrSet<T>(
key: string,
factory: () => Promise<T>,
ttl: number = 3600
): Promise<T> {
const cached = await this.get<T>(key);
if (cached) return cached;
const fresh = await factory();
await this.set(key, fresh, ttl);
return fresh;
}
}
// 使用示例
const cache = new CacheService(redis);
const user = await cache.getOrSet(
'user:1000',
async () => await db.user.findById(1000),
3600
);
class RateLimiter {
constructor(private redis: Redis) {}
async checkRateLimit(
key: string,
limit: number,
window: number
): Promise<{ allowed: boolean; remaining: number }> {
const current = await this.redis.incr(key);
if (current === 1) {
await this.redis.expire(key, window);
}
return {
allowed: current <= limit,
remaining: Math.max(0, limit - current),
};
}
}
// 使用示例:每个 IP 每小时 100 次请求
const limiter = new RateLimiter(redis);
const result = await limiter.checkRateLimit(`ratelimit:${ip}`, 100, 3600);
if (!result.allowed) {
return res.status(429).json({ error: '请求过多' });
}
async function slidingWindowRateLimit(
redis: Redis,
key: string,
limit: number,
window: number
): Promise<boolean> {
const now = Date.now();
const windowStart = now - window * 1000;
// 移除旧条目
await redis.zremrangebyscore(key, 0, windowStart);
// 统计窗口内的请求数
const count = await redis.zcard(key);
if (count < limit) {
// 添加当前请求
await redis.zadd(key, now, `${now}-${Math.random()}`);
await redis.expire(key, window);
return true;
}
return false;
}
class RedisLock {
constructor(private redis: Redis) {}
async acquire(
resource: string,
ttl: number = 10000,
retryDelay: number = 50,
retryCount: number = 100
): Promise<string | null> {
const lockKey = `lock:${resource}`;
const lockValue = crypto.randomUUID();
for (let i = 0; i < retryCount; i++) {
const acquired = await this.redis.set(
lockKey,
lockValue,
'PX',
ttl,
'NX'
);
if (acquired === 'OK') {
return lockValue;
}
await new Promise((resolve) => setTimeout(resolve, retryDelay));
}
return null;
}
async release(resource: string, lockValue: string): Promise<boolean> {
const lockKey = `lock:${resource}`;
// 使用 Lua 脚本确保原子性
const script = `
if redis.call("get", KEYS[1]) == ARGV[1] then
return redis.call("del", KEYS[1])
else
return 0
end
`;
const result = await this.redis.eval(script, 1, lockKey, lockValue);
return result === 1;
}
async withLock<T>(
resource: string,
fn: () => Promise<T>,
ttl: number = 10000
): Promise<T> {
const lockValue = await this.acquire(resource, ttl);
if (!lockValue) {
throw new Error('获取锁失败');
}
try {
return await fn();
} finally {
await this.release(resource, lockValue);
}
}
}
// 使用示例
const lock = new RedisLock(redis);
await lock.withLock('resource:123', async () => {
// 临界区 - 只有一个进程可以执行此代码
const data = await fetchData();
await processData(data);
});
// 发布者
const publisher = new Redis();
await publisher.publish('notifications', JSON.stringify({
type: 'new_message',
userId: 1000,
message: 'Hello!',
}));
// 订阅者
const subscriber = new Redis();
subscriber.subscribe('notifications', (err, count) => {
console.log(`已订阅 ${count} 个频道`);
});
subscriber.on('message', (channel, message) => {
const data = JSON.parse(message);
console.log(`从 ${channel} 收到:`, data);
});
// 模式订阅
subscriber.psubscribe('user:*:notifications', (err, count) => {
console.log(`已订阅 ${count} 个模式`);
});
subscriber.on('pmessage', (pattern, channel, message) => {
console.log(`模式 ${pattern} 匹配到 ${channel}:`, message);
});
// 取消订阅
await subscriber.unsubscribe('notifications');
await subscriber.punsubscribe('user:*:notifications');
// 添加到流
await redis.xadd(
'events',
'*', // 自动生成 ID
'type', 'user_registered',
'userId', '1000',
'email', 'alice@example.com'
);
// 从流读取
const messages = await redis.xread('COUNT', 10, 'STREAMS', 'events', '0');
/*
[
['events', [
['1609459200000-0', ['type', 'user_registered', 'userId', '1000']],
['1609459201000-0', ['type', 'order_placed', 'orderId', '500']]
]]
]
*/
// 消费者组
await redis.xgroup('CREATE', 'events', 'worker-group', '0', 'MKSTREAM');
// 作为消费者读取
const messages = await redis.xreadgroup(
'GROUP', 'worker-group', 'consumer-1',
'COUNT', 10,
'STREAMS', 'events', '>'
);
// 确认消息
await redis.xack('events', 'worker-group', '1609459200000-0');
// 待处理消息
const pending = await redis.xpending('events', 'worker-group');
// Multi/Exec (事务)
const pipeline = redis.multi();
pipeline.set('key1', 'value1');
pipeline.set('key2', 'value2');
pipeline.incr('counter');
const results = await pipeline.exec();
// Watch (乐观锁)
await redis.watch('balance:1000');
const balance = parseInt(await redis.get('balance:1000') || '0');
if (balance >= amount) {
const multi = redis.multi();
multi.decrby('balance:1000', amount);
multi.incrby('balance:2000', amount);
await multi.exec(); // 仅当 balance:1000 未被修改时执行
} else {
await redis.unwatch();
}
// 管道化多个命令
const pipeline = redis.pipeline();
pipeline.set('key1', 'value1');
pipeline.set('key2', 'value2');
pipeline.get('key1');
pipeline.get('key2');
const results = await pipeline.exec();
// [[null, 'OK'], [null, 'OK'], [null, 'value1'], [null, 'value2']]
// 批量操作
async function batchSet(items: Record<string, string>) {
const pipeline = redis.pipeline();
for (const [key, value] of Object.entries(items)) {
pipeline.set(key, value);
}
await pipeline.exec();
}
// 带最大值的原子递增
const script = `
local current = redis.call('GET', KEYS[1])
local max = tonumber(ARGV[1])
if current and tonumber(current) >= max then
return tonumber(current)
else
return redis.call('INCR', KEYS[1])
end
`;
const result = await redis.eval(script, 1, 'counter', 100);
// 加载脚本一次,多次执行
const sha = await redis.script('LOAD', script);
const result = await redis.evalsha(sha, 1, 'counter', 100);
# 创建 6 个节点 (3 个主节点,3 个副本节点)
for port in {7000..7005}; do
mkdir -p cluster/${port}
cat > cluster/${port}/redis.conf <<EOF
port ${port}
cluster-enabled yes
cluster-config-file nodes.conf
cluster-node-timeout 5000
appendonly yes
EOF
redis-server cluster/${port}/redis.conf &
done
# 创建集群
redis-cli --cluster create \
127.0.0.1:7000 127.0.0.1:7001 127.0.0.1:7002 \
127.0.0.1:7003 127.0.0.1:7004 127.0.0.1:7005 \
--cluster-replicas 1
import Redis from 'ioredis';
const cluster = new Redis.Cluster([
{ host: '127.0.0.1', port: 7000 },
{ host: '127.0.0.1', port: 7001 },
{ host: '127.0.0.1', port: 7002 },
]);
// 操作透明地工作
await cluster.set('key', 'value');
await cluster.get('key');
allkeys-lru:移除最近最少使用的键allkeys-lfu:移除最不经常使用的键volatile-lru:移除设置了过期时间的 LRU 键volatile-ttl:移除 TTL 最短的键INFO memory// 良好:层次化,描述性
'user:1000:profile'
'session:abc123'
'cache:api:users:page:1'
'ratelimit:ip:192.168.1.1:2024-01-19'
// 使用一致的分隔符
const key = ['user', userId, 'profile'].join(':');
redis-cli --bigkeys# 实时监控命令
redis-cli MONITOR
# 统计信息
redis-cli INFO
# 慢查询
redis-cli SLOWLOG GET 10
# 内存分析
redis-cli --bigkeys
# 延迟
redis-cli --latency
const redis = new Redis({
host: 'localhost',
port: 6379,
maxRetriesPerRequest: 3,
enableReadyCheck: true,
lazyConnect: true,
});
// ❌ 不好:阻塞整个服务器
const keys = await redis.keys('user:*');
// ✅ 良好:对于大数据集使用 SCAN
async function* scanKeys(pattern: string) {
let cursor = '0';
do {
const [newCursor, keys] = await redis.scan(
cursor,
'MATCH',
pattern,
'COUNT',
100
);
cursor = newCursor;
yield* keys;
} while (cursor !== '0');
}
for await (const key of scanKeys('user:*')) {
console.log(key);
}
// 使用哈希存储对象,而不是多个键
// ❌ 不好:3 个键
await redis.set('user:1000:name', 'Alice');
await redis.set('user:1000:email', 'alice@example.com');
await redis.set('user:1000:age', '30');
// ✅ 良好:1 个键
await redis.hset('user:1000', {
name: 'Alice',
email: 'alice@example.com',
age: '30',
});
❌ 将 Redis 用作主数据库:应将其用于缓存/会话 ❌ 不为缓存键设置 TTL:导致内存膨胀 ❌ 在生产环境中使用 KEYS:应使用 SCAN ❌ 键中存储大值:保持值较小 (<1MB) ❌ 没有监控:跟踪内存、延迟、命中率 ❌ 同步阻塞操作:使用异步操作 ❌ 不处理连接失败:实现重试逻辑 ❌ 在单个键中存储大集合:拆分为多个键
import session from 'express-session';
import RedisStore from 'connect-redis';
app.use(
session({
store: new RedisStore({ client: redis }),
secret: 'secret',
resave: false,
saveUninitialized: false,
cookie: {
secure: true,
httpOnly: true,
maxAge: 1000 * 60 * 60 * 24, // 24 小时
},
})
);
import { Queue, Worker } from 'bullmq';
const queue = new Queue('emails', { connection: redis });
// 添加任务
await queue.add('send-email', {
to: 'user@example.com',
subject: 'Welcome',
body: 'Hello!',
});
// 处理任务
const worker = new Worker('emails', async (job) => {
await sendEmail(job.data);
}, { connection: redis });
每周安装数
78
代码仓库
GitHub 星标数
11
首次出现
2026 年 1 月 23 日
安全审计
安装于
opencode68
codex66
gemini-cli63
github-copilot60
cursor58
amp56
Expert guidance for Redis - the in-memory data structure store used as cache, message broker, and database with microsecond latency.
# Development
docker run --name redis -p 6379:6379 -d redis:7-alpine
# Production with persistence
docker run --name redis \
-p 6379:6379 \
-v redis-data:/data \
-d redis:7-alpine \
redis-server --appendonly yes --requirepass strongpassword
# Redis with config file
docker run --name redis \
-p 6379:6379 \
-v ./redis.conf:/usr/local/etc/redis/redis.conf \
-d redis:7-alpine \
redis-server /usr/local/etc/redis/redis.conf
# Network
bind 0.0.0.0
port 6379
protected-mode yes
# Security
requirepass strongpassword
# Memory
maxmemory 2gb
maxmemory-policy allkeys-lru
# Persistence
save 900 1 # Save after 900s if 1 key changed
save 300 10 # Save after 300s if 10 keys changed
save 60 10000 # Save after 60s if 10000 keys changed
appendonly yes
appendfilename "appendonly.aof"
appendfsync everysec
# Replication
replica-read-only yes
repl-diskless-sync yes
# Performance
tcp-backlog 511
timeout 0
tcp-keepalive 300
import Redis from 'ioredis';
const redis = new Redis({
host: 'localhost',
port: 6379,
password: 'strongpassword',
db: 0,
retryStrategy: (times) => {
const delay = Math.min(times * 50, 2000);
return delay;
},
});
// Strings
await redis.set('user:1000:name', 'Alice');
await redis.set('counter', 42);
await redis.get('user:1000:name'); // 'Alice'
// Expiration (TTL)
await redis.setex('session:abc123', 3600, JSON.stringify({ userId: 1000 }));
await redis.expire('user:1000:name', 300); // 5 minutes
await redis.ttl('user:1000:name'); // Returns remaining seconds
// Atomic operations
await redis.incr('page:views'); // 1
await redis.incr('page:views'); // 2
await redis.incrby('score', 10); // Increment by 10
await redis.decr('inventory:item123');
// Hashes (objects)
await redis.hset('user:1000', {
name: 'Alice',
email: 'alice@example.com',
age: 30,
});
await redis.hget('user:1000', 'name'); // 'Alice'
await redis.hgetall('user:1000'); // { name: 'Alice', email: '...', age: '30' }
await redis.hincrby('user:1000', 'loginCount', 1);
// Lists (queues, stacks)
await redis.lpush('queue:jobs', 'job1', 'job2', 'job3'); // Push to left
await redis.rpush('queue:jobs', 'job4'); // Push to right
await redis.lpop('queue:jobs'); // Pop from left (FIFO)
await redis.rpop('queue:jobs'); // Pop from right (LIFO)
await redis.lrange('queue:jobs', 0, -1); // Get all items
// Sets (unique values)
await redis.sadd('tags:post:1', 'javascript', 'nodejs', 'redis');
await redis.smembers('tags:post:1'); // ['javascript', 'nodejs', 'redis']
await redis.sismember('tags:post:1', 'nodejs'); // 1 (true)
await redis.scard('tags:post:1'); // 3 (count)
// Set operations
await redis.sadd('tags:post:2', 'nodejs', 'typescript', 'docker');
await redis.sinter('tags:post:1', 'tags:post:2'); // ['nodejs'] (intersection)
await redis.sunion('tags:post:1', 'tags:post:2'); // All unique tags
await redis.sdiff('tags:post:1', 'tags:post:2'); // ['javascript', 'redis']
// Sorted Sets (leaderboards)
await redis.zadd('leaderboard', 1000, 'player1', 1500, 'player2', 800, 'player3');
await redis.zrange('leaderboard', 0, -1, 'WITHSCORES'); // Ascending
await redis.zrevrange('leaderboard', 0, 9); // Top 10 (descending)
await redis.zincrby('leaderboard', 50, 'player1'); // Add to score
await redis.zrank('leaderboard', 'player1'); // Get rank (0-indexed)
await redis.zscore('leaderboard', 'player1'); // Get score
// Cache helper
class CacheService {
constructor(private redis: Redis) {}
async get<T>(key: string): Promise<T | null> {
const data = await this.redis.get(key);
return data ? JSON.parse(data) : null;
}
async set(key: string, value: any, ttl: number = 3600): Promise<void> {
await this.redis.setex(key, ttl, JSON.stringify(value));
}
async delete(key: string): Promise<void> {
await this.redis.del(key);
}
async getOrSet<T>(
key: string,
factory: () => Promise<T>,
ttl: number = 3600
): Promise<T> {
const cached = await this.get<T>(key);
if (cached) return cached;
const fresh = await factory();
await this.set(key, fresh, ttl);
return fresh;
}
}
// Usage
const cache = new CacheService(redis);
const user = await cache.getOrSet(
'user:1000',
async () => await db.user.findById(1000),
3600
);
class RateLimiter {
constructor(private redis: Redis) {}
async checkRateLimit(
key: string,
limit: number,
window: number
): Promise<{ allowed: boolean; remaining: number }> {
const current = await this.redis.incr(key);
if (current === 1) {
await this.redis.expire(key, window);
}
return {
allowed: current <= limit,
remaining: Math.max(0, limit - current),
};
}
}
// Usage: 100 requests per hour per IP
const limiter = new RateLimiter(redis);
const result = await limiter.checkRateLimit(`ratelimit:${ip}`, 100, 3600);
if (!result.allowed) {
return res.status(429).json({ error: 'Too many requests' });
}
async function slidingWindowRateLimit(
redis: Redis,
key: string,
limit: number,
window: number
): Promise<boolean> {
const now = Date.now();
const windowStart = now - window * 1000;
// Remove old entries
await redis.zremrangebyscore(key, 0, windowStart);
// Count requests in window
const count = await redis.zcard(key);
if (count < limit) {
// Add current request
await redis.zadd(key, now, `${now}-${Math.random()}`);
await redis.expire(key, window);
return true;
}
return false;
}
class RedisLock {
constructor(private redis: Redis) {}
async acquire(
resource: string,
ttl: number = 10000,
retryDelay: number = 50,
retryCount: number = 100
): Promise<string | null> {
const lockKey = `lock:${resource}`;
const lockValue = crypto.randomUUID();
for (let i = 0; i < retryCount; i++) {
const acquired = await this.redis.set(
lockKey,
lockValue,
'PX',
ttl,
'NX'
);
if (acquired === 'OK') {
return lockValue;
}
await new Promise((resolve) => setTimeout(resolve, retryDelay));
}
return null;
}
async release(resource: string, lockValue: string): Promise<boolean> {
const lockKey = `lock:${resource}`;
// Use Lua script to ensure atomicity
const script = `
if redis.call("get", KEYS[1]) == ARGV[1] then
return redis.call("del", KEYS[1])
else
return 0
end
`;
const result = await this.redis.eval(script, 1, lockKey, lockValue);
return result === 1;
}
async withLock<T>(
resource: string,
fn: () => Promise<T>,
ttl: number = 10000
): Promise<T> {
const lockValue = await this.acquire(resource, ttl);
if (!lockValue) {
throw new Error('Failed to acquire lock');
}
try {
return await fn();
} finally {
await this.release(resource, lockValue);
}
}
}
// Usage
const lock = new RedisLock(redis);
await lock.withLock('resource:123', async () => {
// Critical section - only one process can execute this
const data = await fetchData();
await processData(data);
});
// Publisher
const publisher = new Redis();
await publisher.publish('notifications', JSON.stringify({
type: 'new_message',
userId: 1000,
message: 'Hello!',
}));
// Subscriber
const subscriber = new Redis();
subscriber.subscribe('notifications', (err, count) => {
console.log(`Subscribed to ${count} channels`);
});
subscriber.on('message', (channel, message) => {
const data = JSON.parse(message);
console.log(`Received from ${channel}:`, data);
});
// Pattern subscription
subscriber.psubscribe('user:*:notifications', (err, count) => {
console.log(`Subscribed to ${count} patterns`);
});
subscriber.on('pmessage', (pattern, channel, message) => {
console.log(`Pattern ${pattern} matched ${channel}:`, message);
});
// Unsubscribe
await subscriber.unsubscribe('notifications');
await subscriber.punsubscribe('user:*:notifications');
// Add to stream
await redis.xadd(
'events',
'*', // Auto-generate ID
'type', 'user_registered',
'userId', '1000',
'email', 'alice@example.com'
);
// Read from stream
const messages = await redis.xread('COUNT', 10, 'STREAMS', 'events', '0');
/*
[
['events', [
['1609459200000-0', ['type', 'user_registered', 'userId', '1000']],
['1609459201000-0', ['type', 'order_placed', 'orderId', '500']]
]]
]
*/
// Consumer Groups
await redis.xgroup('CREATE', 'events', 'worker-group', '0', 'MKSTREAM');
// Read as consumer
const messages = await redis.xreadgroup(
'GROUP', 'worker-group', 'consumer-1',
'COUNT', 10,
'STREAMS', 'events', '>'
);
// Acknowledge message
await redis.xack('events', 'worker-group', '1609459200000-0');
// Pending messages
const pending = await redis.xpending('events', 'worker-group');
// Multi/Exec (transaction)
const pipeline = redis.multi();
pipeline.set('key1', 'value1');
pipeline.set('key2', 'value2');
pipeline.incr('counter');
const results = await pipeline.exec();
// Watch (optimistic locking)
await redis.watch('balance:1000');
const balance = parseInt(await redis.get('balance:1000') || '0');
if (balance >= amount) {
const multi = redis.multi();
multi.decrby('balance:1000', amount);
multi.incrby('balance:2000', amount);
await multi.exec(); // Executes only if balance:1000 wasn't modified
} else {
await redis.unwatch();
}
// Pipeline multiple commands
const pipeline = redis.pipeline();
pipeline.set('key1', 'value1');
pipeline.set('key2', 'value2');
pipeline.get('key1');
pipeline.get('key2');
const results = await pipeline.exec();
// [[null, 'OK'], [null, 'OK'], [null, 'value1'], [null, 'value2']]
// Batch operations
async function batchSet(items: Record<string, string>) {
const pipeline = redis.pipeline();
for (const [key, value] of Object.entries(items)) {
pipeline.set(key, value);
}
await pipeline.exec();
}
// Atomic increment with max
const script = `
local current = redis.call('GET', KEYS[1])
local max = tonumber(ARGV[1])
if current and tonumber(current) >= max then
return tonumber(current)
else
return redis.call('INCR', KEYS[1])
end
`;
const result = await redis.eval(script, 1, 'counter', 100);
// Load script once, execute many times
const sha = await redis.script('LOAD', script);
const result = await redis.evalsha(sha, 1, 'counter', 100);
# Create 6 nodes (3 masters, 3 replicas)
for port in {7000..7005}; do
mkdir -p cluster/${port}
cat > cluster/${port}/redis.conf <<EOF
port ${port}
cluster-enabled yes
cluster-config-file nodes.conf
cluster-node-timeout 5000
appendonly yes
EOF
redis-server cluster/${port}/redis.conf &
done
# Create cluster
redis-cli --cluster create \
127.0.0.1:7000 127.0.0.1:7001 127.0.0.1:7002 \
127.0.0.1:7003 127.0.0.1:7004 127.0.0.1:7005 \
--cluster-replicas 1
import Redis from 'ioredis';
const cluster = new Redis.Cluster([
{ host: '127.0.0.1', port: 7000 },
{ host: '127.0.0.1', port: 7001 },
{ host: '127.0.0.1', port: 7002 },
]);
// Operations work transparently
await cluster.set('key', 'value');
await cluster.get('key');
allkeys-lru: Remove least recently used keysallkeys-lfu: Remove least frequently used keysvolatile-lru: Remove LRU keys with expire setvolatile-ttl: Remove keys with shortest TTLINFO memory// Good: hierarchical, descriptive
'user:1000:profile'
'session:abc123'
'cache:api:users:page:1'
'ratelimit:ip:192.168.1.1:2024-01-19'
// Use consistent separators
const key = ['user', userId, 'profile'].join(':');
redis-cli --bigkeys# Monitor commands in real-time
redis-cli MONITOR
# Stats
redis-cli INFO
# Slow queries
redis-cli SLOWLOG GET 10
# Memory analysis
redis-cli --bigkeys
# Latency
redis-cli --latency
const redis = new Redis({
host: 'localhost',
port: 6379,
maxRetriesPerRequest: 3,
enableReadyCheck: true,
lazyConnect: true,
});
// ❌ Bad: Blocks entire server
const keys = await redis.keys('user:*');
// ✅ Good: Use SCAN for large datasets
async function* scanKeys(pattern: string) {
let cursor = '0';
do {
const [newCursor, keys] = await redis.scan(
cursor,
'MATCH',
pattern,
'COUNT',
100
);
cursor = newCursor;
yield* keys;
} while (cursor !== '0');
}
for await (const key of scanKeys('user:*')) {
console.log(key);
}
// Use hashes for objects instead of multiple keys
// ❌ Bad: 3 keys
await redis.set('user:1000:name', 'Alice');
await redis.set('user:1000:email', 'alice@example.com');
await redis.set('user:1000:age', '30');
// ✅ Good: 1 key
await redis.hset('user:1000', {
name: 'Alice',
email: 'alice@example.com',
age: '30',
});
❌ Using Redis as primary database : Use for caching/sessions ❌ Not setting TTL on cache keys : Causes memory bloat ❌ Using KEYS in production : Use SCAN instead ❌ Large values in keys : Keep values small (<1MB) ❌ No monitoring : Track memory, latency, hit rate ❌ Synchronous blocking operations : Use async operations ❌ Not handling connection failures : Implement retry logic ❌ Storing large collections in single key : Split into multiple keys
import session from 'express-session';
import RedisStore from 'connect-redis';
app.use(
session({
store: new RedisStore({ client: redis }),
secret: 'secret',
resave: false,
saveUninitialized: false,
cookie: {
secure: true,
httpOnly: true,
maxAge: 1000 * 60 * 60 * 24, // 24 hours
},
})
);
import { Queue, Worker } from 'bullmq';
const queue = new Queue('emails', { connection: redis });
// Add job
await queue.add('send-email', {
to: 'user@example.com',
subject: 'Welcome',
body: 'Hello!',
});
// Process jobs
const worker = new Worker('emails', async (job) => {
await sendEmail(job.data);
}, { connection: redis });
Weekly Installs
78
Repository
GitHub Stars
11
First Seen
Jan 23, 2026
Security Audits
Gen Agent Trust HubPassSocketPassSnykFail
Installed on
opencode68
codex66
gemini-cli63
github-copilot60
cursor58
amp56
Azure 升级评估与自动化工具 - 轻松迁移 Functions 计划、托管层级和 SKU
104,900 周安装
反逆向工程技术详解:合法授权下的恶意软件分析与二进制保护绕过指南
163 周安装
iOS应用本地化指南:Xcode字符串目录、SwiftUI/UIKit国际化与RTL支持
159 周安装
iOS Auto Layout 调试指南:快速解决约束冲突与布局错误
160 周安装
依赖审计完整指南:安全漏洞扫描、过时包检测与清理工作流
164 周安装
iOS Apple Intelligence 路由器使用指南 - Foundation Models 与 AI 方法分流
160 周安装
Agent Skills Creator 指南:创建遵循开放格式的AI技能,涵盖模式选择到验证
164 周安装