香港服务器1.1 Redis vs Memcached 对比分析特性对比矩阵:bash复制下载#!/bin/bash#cache-comparison.shecho"===RedisvsMemcached特性对比==="echo""···
香港服务器1.1 Redis vs Memcached 对比分析
特性对比矩阵:
bash
#!/bin/bash# cache-comparison.shecho "=== Redis vs Memcached 特性对比 ==="echo ""echo "Redis 优势:"echo "✅ 丰富的数据结构(字符串、列表、集合、哈希、有序集合)"echo "✅ 数据持久化(RDB快照、AOF日志)"echo "✅ 主从复制与高可用"echo "✅ Lua脚本支持"echo "✅ 发布订阅功能"echo "✅ 事务支持"echo "✅ 原生集群支持"echo ""echo "Memcached 优势:"echo "✅ 极致简单,性能稳定"echo "✅ 多线程架构,CPU利用充分"echo "✅ 内存分配效率高"echo "✅ 分布式算法简单高效"echo "✅ 在纯缓存场景下性能优异"echo ""echo "选择建议:"echo "📌 需要复杂数据结构 → Redis"echo "📌 纯KV缓存,追求极致性能 → Memcached"echo "📌 需要数据持久化 → Redis"echo "📌 大规模分布式缓存 → Redis Cluster"
1.2 缓存架构设计模式
多级缓存架构:
text
┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ │ 应用层缓存 │ │ 分布式缓存 │ │ 数据库 │ │ - 本地缓存 │───▶│ - Redis │───▶│ - MySQL │ │ - Guava/Caffeine│ │ - Memcached │ │ - PostgreSQL │ └─────────────────┘ └─────────────────┘ └─────────────────┘
二、Redis 实战配置与优化
2.1 Redis 安装与基础配置
Redis 安装脚本:
bash
#!/bin/bash# install-redis.shset -eecho "开始安装 Redis..."echo ""# 安装依赖sudo apt updatesudo apt install -y build-essential tcl# 下载编译 Rediscd /tmpREDIS_VERSION="7.2.0"wget https://download.redis.io/releases/redis-${REDIS_VERSION}.tar.gztar xzf redis-${REDIS_VERSION}.tar.gzcd redis-${REDIS_VERSION}# 编译安装make -j$(nproc)sudo make install# 创建系统用户和目录sudo adduser --system --group --no-create-home redissudo mkdir -p /var/lib/redissudo mkdir -p /var/log/redissudo chown redis:redis /var/lib/redissudo chown redis:redis /var/log/redis# 创建配置文件sudo mkdir -p /etc/redissudo tee /etc/redis/redis.conf > /dev/null <<'EOF'
# 基础配置
bind 127.0.0.1 ::1
port 6379
timeout 0
tcp-keepalive 300
daemonize yes
pidfile /var/run/redis/redis-server.pid
loglevel notice
logfile /var/log/redis/redis-server.log
databases 16
# 内存管理
maxmemory 2gb
maxmemory-policy allkeys-lru
# 持久化配置
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename dump.rdb
dir /var/lib/redis
# AOF 配置
appendonly yes
appendfilename "appendonly.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
# 慢查询日志
slowlog-log-slower-than 10000
slowlog-max-len 128
# 性能优化
tcp-backlog 511
latency-monitor-threshold 1
EOFsudo chown redis:redis /etc/redis/redis.conf# 创建 systemd 服务sudo tee /etc/systemd/system/redis.service > /dev/null <<'EOF'
[Unit]
Description=Redis In-Memory Data Store
After=network.target
[Service]
User=redis
Group=redis
ExecStart=/usr/local/bin/redis-server /etc/redis/redis.conf
ExecStop=/usr/local/bin/redis-cli shutdown
Restart=always
Type=notify
[Install]
WantedBy=multi-user.target
EOF# 创建必要的目录和权限sudo mkdir -p /var/run/redissudo chown redis:redis /var/run/redis# 启动服务sudo systemctl daemon-reloadsudo systemctl enable redissudo systemctl start redisecho ""echo "Redis 安装完成!"echo "检查状态: sudo systemctl status redis"echo "测试连接: redis-cli ping"2.2 Redis 性能优化配置
生产环境优化配置:
ini
# /etc/redis/redis-prod.conf# 网络优化bind 0.0.0.0 protected-mode yes port 6379 tcp-backlog 65536 timeout 0 tcp-keepalive 300# 通用配置daemonize yes pidfile /var/run/redis/redis-server.pid loglevel notice logfile /var/log/redis/redis-server.log databases 16# 内存优化maxmemory 16gb maxmemory-policy allkeys-lru maxmemory-samples 10# 持久化优化 - 根据业务需求选择# 方案1: RDB 快照(高性能)save 900 1 save 300 10 save 60 10000 stop-writes-on-bgsave-error yes rdbcompression yes rdbchecksum yes dbfilename dump.rdb# 方案2: AOF 日志(高安全)appendonly yes appendfilename "appendonly.aof" appendfsync everysec no-appendfsync-on-rewrite no auto-aof-rewrite-percentage 100 auto-aof-rewrite-min-size 64mb aof-load-truncated yes# 性能调优hz 10 dynamic-hz yes lazyfree-lazy-eviction yes lazyfree-lazy-expire yes lazyfree-lazy-server-del yes replica-lazy-flush yes# 慢查询配置slowlog-log-slower-than 10000 slowlog-max-len 128# 客户端配置maxclients 10000 client-output-buffer-limit normal 0 0 0 client-output-buffer-limit replica 256mb 64mb 60 client-output-buffer-limit pubsub 32mb 8mb 60# 监控配置latency-monitor-threshold 1
2.3 Redis 高可用配置
Redis Sentinel 配置:
bash
#!/bin/bash# setup-redis-sentinel.sh# Sentinel 配置文件sudo tee /etc/redis/sentinel.conf > /dev/null <<'EOF' port 26379 daemonize yes pidfile "/var/run/redis/sentinel.pid" logfile "/var/log/redis/sentinel.log" # 监控主节点 sentinel monitor mymaster 127.0.0.1 6379 2 sentinel down-after-milliseconds mymaster 30000 sentinel parallel-syncs mymaster 1 sentinel failover-timeout mymaster 180000 # 安全 sentinel auth-pass mymaster YourPassword123 # 通知脚本 sentinel notification-script mymaster /etc/redis/notify.sh sentinel client-reconfig-script mymaster /etc/redis/reconfig.sh EOF# 创建通知脚本sudo tee /etc/redis/notify.sh > /dev/null <<'EOF' #!/bin/bash # 哨兵通知脚本 echo "[$(date)] Sentinel Notification: $1 $2 $3 $4 $5 $6 $7" >> /var/log/redis/sentinel-notify.log case $1 in +sdown) echo "主观下线: $2" >> /var/log/redis/sentinel-notify.log ;; -sdown) echo "主观下线恢复: $2" >> /var/log/redis/sentinel-notify.log ;; +odown) echo "客观下线: $2" >> /var/log/redis/sentinel-notify.log ;; +switch-master) echo "主节点切换: $2 $3 $4" >> /var/log/redis/sentinel-notify.log # 可以在这里添加通知逻辑(邮件、短信等) ;; esac EOFsudo chmod +x /etc/redis/notify.sh
三、Memcached 实战配置与优化
3.1 Memcached 安装与配置
Memcached 安装脚本:
bash
#!/bin/bash# install-memcached.shset -eecho "开始安装 Memcached..."echo ""# 安装 Memcachedsudo apt updatesudo apt install -y memcached libmemcached-tools# 优化配置sudo tee /etc/memcached.conf > /dev/null <<'EOF' # 基础配置 -d logfile /var/log/memcached.log -m 2048 -p 11211 -u memcache -l 0.0.0.0 # 性能优化 -c 1024 -t 8 -R 20 -C # 内存分配 -n 72 -f 1.25 # 连接优化 -B auto -k -vv EOF# 重启服务sudo systemctl restart memcachedsudo systemctl enable memcachedecho ""echo "Memcached 安装完成!"echo "检查状态: sudo systemctl status memcached"echo "测试连接: echo 'stats' | nc localhost 11211"
3.2 Memcached 性能调优
高级优化配置:
ini
# /etc/memcached-optimized.conf# 内存设置-m 4096 # 4GB 内存 -M # 内存用尽时返回错误,而不是LRU淘汰# 连接设置-c 65536 # 最大连接数 -t 16 # 线程数,建议等于CPU核心数 -R 50 # 每个事件最大请求数 -B auto # 自动选择协议# 网络优化-l 0.0.0.0 # 监听所有接口 -p 11211 # 端口 -U 11211 # UDP端口 -s /var/run/memcached/memcached.sock # Unix Socket -a 0766 # Socket权限# 内存分配优化-n 72 # 初始chunk大小 -f 1.25 # 增长因子# 日志和监控-vv # 详细日志# -v # 普通日志# -d # 守护进程模式
四、缓存应用实战
4.1 Python 应用集成
Redis Python客户端配置:
python
# redis_client.pyimport redisimport jsonimport timefrom functools import wrapsclass RedisCache:
def __init__(self, host='localhost', port=6379, password=None, db=0):
self.redis_client = redis.Redis(
host=host,
port=port,
password=password,
db=db,
decode_responses=True,
socket_connect_timeout=5,
socket_timeout=5,
retry_on_timeout=True,
max_connections=100
)
def cache_result(self, timeout=300, key_prefix=""):
"""缓存装饰器"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
# 生成缓存键
cache_key = f"{key_prefix}:{func.__name__}:{str(args)}:{str(kwargs)}"
# 尝试从缓存获取
cached_result = self.redis_client.get(cache_key)
if cached_result is not None:
return json.loads(cached_result)
# 执行函数并缓存结果
result = func(*args, **kwargs)
self.redis_client.setex(
cache_key,
timeout,
json.dumps(result, default=str)
)
return result return wrapper return decorator
def get_or_set(self, key, default_func, timeout=300):
"""获取或设置缓存"""
cached = self.redis_client.get(key)
if cached is not None:
return json.loads(cached)
result = default_func()
self.redis_client.setex(key, timeout, json.dumps(result, default=str))
return result# 使用示例cache = RedisCache()@cache.cache_result(timeout=600, key_prefix="user")def get_user_profile(user_id):
"""获取用户信息 - 缓存10分钟"""
# 模拟数据库查询
time.sleep(0.1)
return {
"id": user_id,
"name": f"User{user_id}",
"email": f"user{user_id}@example.com",
"created_at": "2024-01-01"
}def get_active_users():
"""获取活跃用户列表"""
return cache.get_or_set(
"active_users",
lambda: ["user1", "user2", "user3"], # 模拟数据库查询
timeout=300
)Memcached Python客户端配置:
python
# memcached_client.pyimport memcacheimport picklefrom functools import wrapsclass MemcachedCache:
def __init__(self, servers=['localhost:11211']):
self.mc = memcache.Client(servers, debug=0)
def set(self, key, value, timeout=300):
"""设置缓存"""
return self.mc.set(key, pickle.dumps(value), timeout)
def get(self, key):
"""获取缓存"""
cached = self.mc.get(key)
if cached:
return pickle.loads(cached)
return None
def delete(self, key):
"""删除缓存"""
return self.mc.delete(key)
def increment(self, key, delta=1):
"""计数器递增"""
return self.mc.incr(key, delta)
def cache_html(self, key, html_content, timeout=300):
"""缓存HTML片段"""
return self.set(f"html:{key}", html_content, timeout)# 使用示例cache = MemcachedCache()# 缓存HTML片段cache.cache_html("homepage_header", "<header>...</header>", 3600)# 计数器应用cache.increment("page_views", 1)4.2 PHP 应用集成
Redis PHP集成:
php
<?php// redis_cache.phpclass RedisCache {
private $redis;
public function __construct($host = '127.0.0.1', $port = 6379, $password = null) {
$this->redis = new Redis();
$this->redis->connect($host, $port, 2.5); // 2.5秒超时
if ($password) {
$this->redis->auth($password);
}
}
/**
* 缓存数据
*/
public function set($key, $value, $ttl = 3600) {
return $this->redis->setex($key, $ttl, serialize($value));
}
/**
* 获取缓存数据
*/
public function get($key) {
$data = $this->redis->get($key);
return $data ? unserialize($data) : null;
}
/**
* 删除缓存
*/
public function delete($key) {
return $this->redis->del($key);
}
/**
* 缓存数据库查询结果
*/
public function cacheQuery($key, $queryCallback, $ttl = 3600) {
$cached = $this->get($key);
if ($cached !== null) {
return $cached;
}
$result = $queryCallback();
$this->set($key, $result, $ttl);
return $result;
}
/**
* 批量获取缓存
*/
public function mget($keys) {
$data = $this->redis->mget($keys);
return array_map(function($item) {
return $item ? unserialize($item) : null;
}, $data);
}}// 使用示例$cache = new RedisCache();// 缓存用户信息$user = $cache->cacheQuery('user:123', function() {
// 模拟数据库查询
return [
'id' => 123,
'name' => '张三',
'email' => 'zhangsan@example.com'
];}, 600); // 缓存10分钟// 缓存页面片段$cache->set('page:home:header', '<div>Header Content</div>', 1800);?>五、高级缓存策略
5.1 缓存穿透解决方案
布隆过滤器实现:
python
# bloom_filter.pyimport redisimport hashlibimport mathclass BloomFilter:
def __init__(self, redis_client, key, expected_elements=1000000, false_positive_rate=0.01):
self.redis = redis_client
self.key = key
self.expected_elements = expected_elements
self.false_positive_rate = false_positive_rate
# 计算最优参数
self.size = self._calculate_size(expected_elements, false_positive_rate)
self.hash_count = self._calculate_hash_count(self.size, expected_elements)
def _calculate_size(self, n, p):
"""计算位数组大小"""
return int(-(n * math.log(p)) / (math.log(2) ** 2))
def _calculate_hash_count(self, m, n):
"""计算哈希函数数量"""
return int((m / n) * math.log(2))
def _hashes(self, item):
"""生成多个哈希值"""
hash1 = hashlib.md5(item.encode()).hexdigest()
hash2 = hashlib.sha1(item.encode()).hexdigest()
for i in range(self.hash_count):
yield int(hash1, 16) + i * int(hash2, 16) % self.size
def add(self, item):
"""添加元素到布隆过滤器"""
for hash_val in self._hashes(item):
self.redis.setbit(self.key, hash_val, 1)
def exists(self, item):
"""检查元素是否存在"""
for hash_val in self._hashes(item):
if not self.redis.getbit(self.key, hash_val):
return False
return True# 使用示例redis_client = redis.Redis()bloom = BloomFilter(redis_client, "user_bloom_filter")# 添加存在的用户IDbloom.add("user_123")bloom.add("user_456")# 检查用户是否存在if bloom.exists("user_123"):
print("用户可能存在")else:
print("用户肯定不存在")5.2 缓存雪崩防护
分级缓存与随机过期:
python
# cache_avalanche_protection.pyimport randomimport timefrom redis_client import RedisCacheclass AvalancheProtectedCache(RedisCache):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def set_with_jitter(self, key, value, base_timeout=300, jitter_range=60):
"""设置缓存,添加随机抖动避免雪崩"""
jitter = random.randint(-jitter_range, jitter_range)
timeout = max(60, base_timeout + jitter) # 确保最小1分钟
return self.redis_client.setex(key, timeout, value)
def get_with_fallback(self, key, fallback_func, base_timeout=300):
"""获取缓存,支持降级"""
try:
cached = self.redis_client.get(key)
if cached:
return cached
# 缓存未命中,执行原函数
result = fallback_func()
# 异步更新缓存
self.set_with_jitter(key, result, base_timeout)
return result
except Exception as e:
# 缓存服务异常,直接执行原函数
print(f"缓存服务异常: {e}, 使用降级方案")
return fallback_func()# 热点数据永不过期 + 后台刷新class HotDataCache(AvalancheProtectedCache):
def get_hot_data(self, key, data_fetcher, refresh_threshold=60):
"""热点数据缓存策略"""
# 获取数据
data = self.redis_client.get(key)
if data:
# 检查是否需要后台刷新
ttl = self.redis_client.ttl(key)
if ttl < refresh_threshold:
# 异步刷新缓存
self._refresh_background(key, data_fetcher)
return data
# 缓存未命中,同步获取
fresh_data = data_fetcher()
self.redis_client.setex(key, 3600, fresh_data) # 1小时过期
return fresh_data
def _refresh_background(self, key, data_fetcher):
"""后台刷新缓存"""
import threading
def refresh():
try:
fresh_data = data_fetcher()
self.redis_client.setex(key, 3600, fresh_data)
except Exception as e:
print(f"后台刷新缓存失败: {e}")
thread = threading.Thread(target=refresh)
thread.daemon = True
thread.start()5.3 分布式锁实现
Redis分布式锁:
python
# distributed_lock.pyimport redisimport timeimport uuidclass RedisDistributedLock:
def __init__(self, redis_client, lock_key, timeout=10):
self.redis = redis_client
self.lock_key = lock_key
self.timeout = timeout
self.identifier = str(uuid.uuid4())
def acquire(self, block=True, block_timeout=None):
"""获取锁"""
start_time = time.time()
while True:
# 尝试获取锁
if self.redis.set(self.lock_key, self.identifier, nx=True, ex=self.timeout):
return True
if not block:
return False
# 检查是否超时
if block_timeout and (time.time() - start_time) > block_timeout:
return False
# 短暂等待后重试
time.sleep(0.1)
def release(self):
"""释放锁"""
# 使用Lua脚本确保原子性
lua_script = """
if redis.call("get", KEYS[1]) == ARGV[1] then
return redis.call("del", KEYS[1])
else
return 0
end
"""
result = self.redis.eval(lua_script, 1, self.lock_key, self.identifier)
return result == 1
def __enter__(self):
self.acquire()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.release()# 使用示例def process_with_lock():
redis_client = redis.Redis()
with RedisDistributedLock(redis_client, "order_lock", timeout=30):
# 执行需要加锁的操作
print("获取锁成功,执行关键操作...")
time.sleep(5)
print("操作完成")六、性能监控与调优
6.1 缓存监控脚本
Redis监控面板:
bash
#!/bin/bash# redis-monitor.shecho "=== Redis 性能监控 ==="echo "监控时间: $(date)"echo ""REDIS_CLI="redis-cli"# 基础信息echo "1. Redis基本信息:"$REDIS_CLI info server | grep -E "(redis_version|redis_mode|os|process_id)"echo ""# 内存使用echo "2. 内存使用情况:"$REDIS_CLI info memory | grep -E "(used_memory|used_memory_rss|mem_fragmentation_ratio|maxmemory)"echo ""# 命中率统计echo "3. 命中率统计:"$REDIS_CLI info stats | grep -E "(keyspace_hits|keyspace_misses)"hits=$($REDIS_CLI info stats | grep "keyspace_hits:" | cut -d: -f2)misses=$($REDIS_CLI info stats | grep "keyspace_misses:" | cut -d: -f2)if [ $hits -gt 0 ] || [ $misses -gt 0 ]; then
total=$((hits + misses))
hit_rate=$(echo "scale=2; $hits * 100 / $total" | bc)
echo " 命中率: ${hit_rate}%"fiecho ""# 慢查询日志echo "4. 慢查询日志:"$REDIS_CLI slowlog get | head -10echo ""# 客户端连接echo "5. 客户端连接:"$REDIS_CLI info clients | grep -E "(connected_clients|blocked_clients)"echo ""# Key空间分析echo "6. Key空间分析:"$REDIS_CLI info keyspaceecho ""Memcached监控脚本:
bash
#!/bin/bash# memcached-monitor.shecho "=== Memcached 性能监控 ==="echo "监控时间: $(date)"echo ""echo "stats" | nc localhost 11211 | grep -E \"(uptime|curr_connections|cmd_get|cmd_set|get_hits|get_misses|bytes_read|bytes_written|evictions)"# 计算命中率hits=$(echo "stats" | nc localhost 11211 | grep "get_hits" | awk '{print $3}')misses=$(echo "stats" | nc localhost 11211 | grep "get_misses" | awk '{print $3}')if [ $hits -gt 0 ] || [ $misses -gt 0 ]; then
total=$((hits + misses))
hit_rate=$(echo "scale=2; $hits * 100 / $total" | bc)
echo "命中率: ${hit_rate}%"fi6.2 性能基准测试
Redis基准测试:
python
# redis_benchmark.pyimport redisimport timeimport threadingimport statisticsclass RedisBenchmark:
def __init__(self, redis_client):
self.redis = redis_client
def benchmark_set(self, key_count=10000, value_size=100):
"""SET操作基准测试"""
print(f"开始 SET 基准测试: {key_count} 个键, 值大小 {value_size} 字节")
value = 'x' * value_size
start_time = time.time()
for i in range(key_count):
self.redis.set(f"bench_key_{i}", value)
duration = time.time() - start_time
ops_per_sec = key_count / duration
print(f"SET 测试完成: {duration:.2f} 秒, {ops_per_sec:.2f} 操作/秒")
return ops_per_sec
def benchmark_get(self, key_count=10000):
"""GET操作基准测试"""
print(f"开始 GET 基准测试: {key_count} 个键")
start_time = time.time()
for i in range(key_count):
self.redis.get(f"bench_key_{i}")
duration = time.time() - start_time
ops_per_sec = key_count / duration
print(f"GET 测试完成: {duration:.2f} 秒, {ops_per_sec:.2f} 操作/秒")
return ops_per_sec
def benchmark_concurrent(self, thread_count=10, ops_per_thread=1000):
"""并发基准测试"""
print(f"开始并发测试: {thread_count} 个线程, 每个 {ops_per_thread} 次操作")
results = []
threads = []
def worker(thread_id):
start_time = time.time()
for i in range(ops_per_thread):
key = f"concurrent_{thread_id}_{i}"
self.redis.set(key, f"value_{i}")
self.redis.get(key)
duration = time.time() - start_time
results.append(ops_per_thread * 2 / duration) # SET + GET
# 创建并启动线程
for i in range(thread_count):
thread = threading.Thread(target=worker, args=(i,))
threads.append(thread)
thread.start()
# 等待所有线程完成
for thread in threads:
thread.join()
avg_ops = statistics.mean(results)
print(f"并发测试完成: 平均 {avg_ops:.2f} 操作/秒")
return avg_ops# 运行基准测试if __name__ == "__main__":
redis_client = redis.Redis()
benchmark = RedisBenchmark(redis_client)
benchmark.benchmark_set()
benchmark.benchmark_get()
benchmark.benchmark_concurrent()七、生产环境最佳实践
7.1 配置检查清单
Redis生产环境检查:
bash
#!/bin/bash# redis-production-checklist.shecho "=== Redis 生产环境检查清单 ==="echo "检查时间: $(date)"echo ""# 1. 配置检查echo "1. 配置检查:"redis-cli config get bind | grep -v "^$"redis-cli config get protected-mode | grep -v "^$"redis-cli config get requirepass | grep -v "^$"echo ""# 2. 内存检查echo "2. 内存检查:"redis-cli info memory | grep -E "(used_memory_human|maxmemory_human|mem_fragmentation_ratio)"echo ""# 3. 持久化检查echo "3. 持久化检查:"redis-cli info persistence | grep -E "(rdb_last_save_time|aof_enabled|aof_last_rewrite_time_sec)"echo ""# 4. 复制检查echo "4. 复制检查:"redis-cli info replication | grep -E "(role|connected_slaves|master_link_status)"echo ""# 5. 安全检查echo "5. 安全检查:"echo " 密码保护: $(redis-cli config get requirepass | tail -1)"echo " 绑定地址: $(redis-cli config get bind | tail -1)"echo " 保护模式: $(redis-cli config get protected-mode | tail -1)"echo ""echo "检查完成"
7.2 备份与恢复
Redis数据备份:
bash
#!/bin/bash# redis-backup.shset -eBACKUP_DIR="/backup/redis"DATE=$(date +%Y%m%d_%H%M%S)RETENTION_DAYS=7echo "开始 Redis 数据备份..."echo "备份时间: $(date)"echo ""# 创建备份目录mkdir -p $BACKUP_DIR/$DATE# 执行 RDB 备份echo "执行 RDB 备份..."redis-cli save# 等待备份完成sleep 2# 复制 RDB 文件if [ -f /var/lib/redis/dump.rdb ]; then
cp /var/lib/redis/dump.rdb $BACKUP_DIR/$DATE/dump_$DATE.rdb echo "RDB 备份完成: $BACKUP_DIR/$DATE/dump_$DATE.rdb"else
echo "警告: RDB 文件不存在"fi# 如果启用 AOF,也备份 AOF 文件if redis-cli config get appendonly | grep -q "yes"; then
if [ -f /var/lib/redis/appendonly.aof ]; then
cp /var/lib/redis/appendonly.aof $BACKUP_DIR/$DATE/appendonly_$DATE.aof echo "AOF 备份完成: $BACKUP_DIR/$DATE/appendonly_$DATE.aof"
fifi# 备份配置文件cp /etc/redis/redis.conf $BACKUP_DIR/$DATE/redis.conf.backup# 清理旧备份find $BACKUP_DIR -type d -mtime +$RETENTION_DAYS -exec rm -rf {} \;echo ""echo "备份完成: $BACKUP_DIR/$DATE"总结
通过本文的实战指南,你可以构建高性能的缓存系统:
核心要点:
正确选型:根据业务需求选择Redis或Memcached
合理配置:优化内存、网络、持久化参数
高级策略:实现缓存穿透、雪崩、击穿防护
监控告警:建立完善的监控体系
高可用:配置主从复制和故障转移
性能优化关键:
命中率:保持在95%以上
内存使用:合理设置maxmemory和淘汰策略
网络延迟:优化客户端连接池
持久化平衡:根据业务需求选择RDB/AOF
最佳实践:
使用连接池管理客户端连接
实现缓存降级策略
定期进行性能基准测试
建立完善的备份恢复机制
通过系统化的缓存架构设计和精细的性能优化,可以显著提升应用性能,为业务提供稳定高效的数据访问服务。

您好:云优数据云计算 www.yunyoushuju.cn 2核2G6M最低19.9元/月 欢迎开机

发表评论
最近发表
标签列表