1. 缓存概述
1.1 缓存的重要性
缓存是提升Web应用性能的关键技术: - 减少数据库负载:避免重复查询 - 降低响应时间:快速返回数据 - 提高并发能力:减少资源竞争 - 节省带宽成本:减少网络传输 - 改善用户体验:页面加载更快
1.2 OpenResty中的缓存层次
┌─────────────────┐
│ 浏览器缓存 │
├─────────────────┤
│ CDN缓存 │
├─────────────────┤
│ Nginx缓存 │
├─────────────────┤
│ Lua共享内存 │
├─────────────────┤
│ Redis缓存 │
├─────────────────┤
│ 数据库缓存 │
└─────────────────┘
1.3 缓存策略类型
- Cache-Aside(旁路缓存):应用程序管理缓存
- Read-Through(读穿透):缓存层自动加载数据
- Write-Through(写穿透):同时写入缓存和数据库
- Write-Behind(写回):异步写入数据库
- Refresh-Ahead(预刷新):主动刷新即将过期的缓存
2. Lua共享内存缓存
2.1 配置共享内存
# nginx.conf
http {
# 定义共享内存区域
lua_shared_dict my_cache 100m; # 通用缓存
lua_shared_dict user_cache 50m; # 用户缓存
lua_shared_dict config_cache 10m; # 配置缓存
lua_shared_dict rate_limit 10m; # 限流缓存
lua_shared_dict stats 20m; # 统计缓存
# 其他配置...
}
2.2 基础缓存操作
-- 缓存管理模块
local cache = {}
local cjson = require "cjson"
-- 获取共享内存实例
local my_cache = ngx.shared.my_cache
-- 设置缓存
function cache.set(key, value, ttl)
ttl = ttl or 300 -- 默认5分钟
local serialized_value
if type(value) == "table" then
serialized_value = cjson.encode(value)
else
serialized_value = tostring(value)
end
local success, err, forcible = my_cache:set(key, serialized_value, ttl)
if not success then
ngx.log(ngx.ERR, "failed to set cache: ", err)
return false, err
end
if forcible then
ngx.log(ngx.WARN, "cache set with forcible eviction for key: ", key)
end
return true
end
-- 获取缓存
function cache.get(key)
local value, flags = my_cache:get(key)
if not value then
return nil, "not found"
end
-- 尝试解析JSON
local ok, decoded = pcall(cjson.decode, value)
if ok then
return decoded
else
return value
end
end
-- 删除缓存
function cache.delete(key)
my_cache:delete(key)
return true
end
-- 增量操作
function cache.incr(key, value, init, ttl)
value = value or 1
init = init or 0
ttl = ttl or 300
local new_value, err = my_cache:incr(key, value, init, ttl)
if not new_value then
ngx.log(ngx.ERR, "failed to incr cache: ", err)
return nil, err
end
return new_value
end
-- 获取缓存统计信息
function cache.stats()
local capacity = my_cache:capacity()
local free_space = my_cache:free_space()
local used_space = capacity - free_space
return {
capacity = capacity,
used = used_space,
free = free_space,
usage_percent = math.floor((used_space / capacity) * 100)
}
end
-- 清空缓存
function cache.flush_all()
my_cache:flush_all()
return true
end
-- 获取所有键
function cache.get_keys(max_count)
max_count = max_count or 1024
return my_cache:get_keys(max_count)
end
return cache
2.3 高级缓存模式
-- 高级缓存管理器
local advanced_cache = {}
local cache = require "cache" -- 上面定义的基础缓存模块
local cjson = require "cjson"
-- Cache-Aside模式
function advanced_cache.get_user(user_id)
local cache_key = "user:" .. user_id
-- 1. 先从缓存获取
local cached_user, err = cache.get(cache_key)
if cached_user then
ngx.log(ngx.INFO, "Cache hit for user: ", user_id)
return cached_user
end
-- 2. 缓存未命中,从数据库获取
ngx.log(ngx.INFO, "Cache miss for user: ", user_id)
local user_data = get_user_from_db(user_id) -- 假设的数据库查询函数
if user_data then
-- 3. 将数据写入缓存
cache.set(cache_key, user_data, 600) -- 缓存10分钟
return user_data
end
return nil, "User not found"
end
-- Write-Through模式
function advanced_cache.update_user(user_id, user_data)
local cache_key = "user:" .. user_id
-- 1. 更新数据库
local success, err = update_user_in_db(user_id, user_data)
if not success then
return false, err
end
-- 2. 更新缓存
cache.set(cache_key, user_data, 600)
return true
end
-- 多级缓存
function advanced_cache.get_with_fallback(key, loader_func, l1_ttl, l2_ttl)
l1_ttl = l1_ttl or 60 -- L1缓存1分钟
l2_ttl = l2_ttl or 600 -- L2缓存10分钟
local l1_key = "l1:" .. key
local l2_key = "l2:" .. key
-- 1. 检查L1缓存(短期缓存)
local value = cache.get(l1_key)
if value then
ngx.log(ngx.INFO, "L1 cache hit for key: ", key)
return value
end
-- 2. 检查L2缓存(长期缓存)
value = cache.get(l2_key)
if value then
ngx.log(ngx.INFO, "L2 cache hit for key: ", key)
-- 将数据提升到L1缓存
cache.set(l1_key, value, l1_ttl)
return value
end
-- 3. 缓存都未命中,调用加载函数
ngx.log(ngx.INFO, "Cache miss for key: ", key)
value = loader_func(key)
if value then
-- 同时设置L1和L2缓存
cache.set(l1_key, value, l1_ttl)
cache.set(l2_key, value, l2_ttl)
end
return value
end
-- 缓存预热
function advanced_cache.warmup(keys_and_loaders)
local success_count = 0
local total_count = #keys_and_loaders
for _, item in ipairs(keys_and_loaders) do
local key = item.key
local loader = item.loader
local ttl = item.ttl or 300
-- 检查缓存是否已存在
local existing = cache.get(key)
if not existing then
local value = loader(key)
if value then
cache.set(key, value, ttl)
success_count = success_count + 1
end
else
success_count = success_count + 1
end
end
ngx.log(ngx.INFO, "Cache warmup completed: ", success_count, "/", total_count)
return success_count, total_count
end
-- 批量获取缓存
function advanced_cache.mget(keys, loader_func)
local results = {}
local missing_keys = {}
-- 1. 批量从缓存获取
for _, key in ipairs(keys) do
local value = cache.get(key)
if value then
results[key] = value
else
table.insert(missing_keys, key)
end
end
-- 2. 加载缺失的数据
if #missing_keys > 0 and loader_func then
local loaded_data = loader_func(missing_keys)
for key, value in pairs(loaded_data) do
results[key] = value
cache.set(key, value, 300) -- 缓存5分钟
end
end
return results
end
return advanced_cache
2.4 缓存穿透和雪崩防护
-- 缓存防护模块
local cache_guard = {}
local cache = require "cache"
local cjson = require "cjson"
-- 防止缓存穿透(空值缓存)
function cache_guard.get_with_null_cache(key, loader_func, ttl, null_ttl)
ttl = ttl or 300
null_ttl = null_ttl or 60 -- 空值缓存时间较短
local value = cache.get(key)
-- 检查是否是缓存的空值
if value == "__NULL__" then
ngx.log(ngx.INFO, "Null cache hit for key: ", key)
return nil
end
if value then
return value
end
-- 加载数据
value = loader_func(key)
if value then
cache.set(key, value, ttl)
else
-- 缓存空值,防止穿透
cache.set(key, "__NULL__", null_ttl)
end
return value
end
-- 防止缓存雪崩(随机TTL)
function cache_guard.set_with_jitter(key, value, base_ttl, jitter_range)
base_ttl = base_ttl or 300
jitter_range = jitter_range or 60 -- 随机范围
-- 添加随机抖动
local jitter = math.random(-jitter_range, jitter_range)
local actual_ttl = base_ttl + jitter
return cache.set(key, value, actual_ttl)
end
-- 分布式锁防止缓存击穿
local locks = ngx.shared.locks or ngx.shared.my_cache
function cache_guard.get_with_lock(key, loader_func, ttl, lock_timeout)
ttl = ttl or 300
lock_timeout = lock_timeout or 10
-- 先尝试从缓存获取
local value = cache.get(key)
if value then
return value
end
local lock_key = "lock:" .. key
-- 尝试获取锁
local ok, err = locks:add(lock_key, 1, lock_timeout)
if not ok then
if err == "exists" then
-- 锁已存在,等待一段时间后重试
ngx.sleep(0.01) -- 等待10ms
value = cache.get(key)
if value then
return value
end
-- 如果还是没有,返回默认值或错误
return nil, "Resource is being loaded"
else
ngx.log(ngx.ERR, "Failed to acquire lock: ", err)
return nil, err
end
end
-- 获得锁,加载数据
local success, result = pcall(loader_func, key)
-- 释放锁
locks:delete(lock_key)
if success and result then
cache.set(key, result, ttl)
return result
else
ngx.log(ngx.ERR, "Failed to load data for key: ", key)
return nil, "Failed to load data"
end
end
-- 热点数据检测和保护
local hot_keys = ngx.shared.stats or ngx.shared.my_cache
function cache_guard.get_with_hotkey_protection(key, loader_func, ttl)
local access_key = "access:" .. key
local hot_threshold = 100 -- 访问次数阈值
local time_window = 60 -- 时间窗口(秒)
-- 记录访问次数
local access_count = hot_keys:incr(access_key, 1, 0, time_window)
-- 检查是否是热点数据
if access_count > hot_threshold then
ngx.log(ngx.WARN, "Hot key detected: ", key, " (access count: ", access_count, ")")
-- 对热点数据使用更长的缓存时间
ttl = ttl * 3
-- 可以考虑将热点数据复制到多个缓存实例
local hot_key = "hot:" .. key
local hot_value = cache.get(hot_key)
if hot_value then
return hot_value
end
end
return cache_guard.get_with_lock(key, loader_func, ttl)
end
return cache_guard
3. Redis缓存集成
3.1 Redis缓存层
-- Redis缓存管理器
local redis_cache = {}
local redis = require "resty.redis"
local cjson = require "cjson"
-- Redis连接配置
local redis_config = {
host = "127.0.0.1",
port = 6379,
timeout = 5000,
pool = "redis_cache_pool",
pool_size = 100,
keepalive_timeout = 30000,
keepalive_pool = 50
}
-- 获取Redis连接
local function get_redis()
local red = redis:new()
red:set_timeout(redis_config.timeout)
local ok, err = red:connect(redis_config.host, redis_config.port, {
pool = redis_config.pool,
pool_size = redis_config.pool_size
})
if not ok then
ngx.log(ngx.ERR, "failed to connect to redis: ", err)
return nil, err
end
return red
end
-- 释放Redis连接
local function close_redis(red)
if not red then
return
end
local ok, err = red:set_keepalive(redis_config.keepalive_timeout, redis_config.keepalive_pool)
if not ok then
ngx.log(ngx.ERR, "failed to set redis keepalive: ", err)
red:close()
end
end
-- 设置缓存
function redis_cache.set(key, value, ttl)
local red, err = get_redis()
if not red then
return false, err
end
local serialized_value
if type(value) == "table" then
serialized_value = cjson.encode(value)
else
serialized_value = tostring(value)
end
local ok, err
if ttl then
ok, err = red:setex(key, ttl, serialized_value)
else
ok, err = red:set(key, serialized_value)
end
close_redis(red)
if not ok then
ngx.log(ngx.ERR, "failed to set redis cache: ", err)
return false, err
end
return true
end
-- 获取缓存
function redis_cache.get(key)
local red, err = get_redis()
if not red then
return nil, err
end
local value, err = red:get(key)
close_redis(red)
if not value or value == ngx.null then
return nil, "not found"
end
if err then
ngx.log(ngx.ERR, "failed to get redis cache: ", err)
return nil, err
end
-- 尝试解析JSON
local ok, decoded = pcall(cjson.decode, value)
if ok then
return decoded
else
return value
end
end
-- 删除缓存
function redis_cache.delete(key)
local red, err = get_redis()
if not red then
return false, err
end
local ok, err = red:del(key)
close_redis(red)
if not ok then
ngx.log(ngx.ERR, "failed to delete redis cache: ", err)
return false, err
end
return ok > 0
end
-- 批量操作
function redis_cache.mget(keys)
local red, err = get_redis()
if not red then
return nil, err
end
local values, err = red:mget(unpack(keys))
close_redis(red)
if not values then
ngx.log(ngx.ERR, "failed to mget redis cache: ", err)
return nil, err
end
local result = {}
for i, value in ipairs(values) do
if value ~= ngx.null then
local ok, decoded = pcall(cjson.decode, value)
result[keys[i]] = ok and decoded or value
end
end
return result
end
-- 管道操作
function redis_cache.pipeline(operations)
local red, err = get_redis()
if not red then
return nil, err
end
-- 开始管道
red:init_pipeline()
-- 执行操作
for _, op in ipairs(operations) do
local cmd = op.cmd
local args = op.args or {}
red[cmd](red, unpack(args))
end
-- 提交管道
local results, err = red:commit_pipeline()
close_redis(red)
if not results then
ngx.log(ngx.ERR, "failed to execute redis pipeline: ", err)
return nil, err
end
return results
end
-- 原子操作
function redis_cache.atomic_incr(key, increment, initial, ttl)
local red, err = get_redis()
if not red then
return nil, err
end
increment = increment or 1
initial = initial or 0
-- 使用Lua脚本确保原子性
local script = [[
local current = redis.call('GET', KEYS[1])
if current == false then
redis.call('SET', KEYS[1], ARGV[2])
if ARGV[3] then
redis.call('EXPIRE', KEYS[1], ARGV[3])
end
return tonumber(ARGV[2])
else
local new_value = redis.call('INCRBY', KEYS[1], ARGV[1])
if ARGV[3] then
redis.call('EXPIRE', KEYS[1], ARGV[3])
end
return new_value
end
]]
local result, err = red:eval(script, 1, key, increment, initial, ttl)
close_redis(red)
if not result then
ngx.log(ngx.ERR, "failed to atomic incr: ", err)
return nil, err
end
return result
end
return redis_cache
3.2 多级缓存架构
-- 多级缓存管理器
local multi_cache = {}
local local_cache = require "cache" -- Lua共享内存缓存
local redis_cache = require "redis_cache" -- Redis缓存
local cjson = require "cjson"
-- 缓存层级配置
local cache_levels = {
{
name = "local",
cache = local_cache,
ttl_factor = 0.1, -- 本地缓存时间为基础TTL的10%
priority = 1
},
{
name = "redis",
cache = redis_cache,
ttl_factor = 1.0, -- Redis缓存时间为基础TTL
priority = 2
}
}
-- 多级获取
function multi_cache.get(key, loader_func, base_ttl)
base_ttl = base_ttl or 300
-- 按优先级从各级缓存获取
for _, level in ipairs(cache_levels) do
local value, err = level.cache.get(key)
if value then
ngx.log(ngx.INFO, "Cache hit at level: ", level.name, " for key: ", key)
-- 将数据回填到更高优先级的缓存
for i = 1, level.priority - 1 do
local higher_level = cache_levels[i]
local ttl = math.floor(base_ttl * higher_level.ttl_factor)
higher_level.cache.set(key, value, ttl)
end
return value
end
end
-- 所有缓存都未命中,调用加载函数
if loader_func then
ngx.log(ngx.INFO, "All cache miss for key: ", key, ", loading from source")
local value = loader_func(key)
if value then
-- 写入所有级别的缓存
for _, level in ipairs(cache_levels) do
local ttl = math.floor(base_ttl * level.ttl_factor)
level.cache.set(key, value, ttl)
end
end
return value
end
return nil, "not found"
end
-- 多级设置
function multi_cache.set(key, value, base_ttl)
base_ttl = base_ttl or 300
local success_count = 0
for _, level in ipairs(cache_levels) do
local ttl = math.floor(base_ttl * level.ttl_factor)
local ok, err = level.cache.set(key, value, ttl)
if ok then
success_count = success_count + 1
else
ngx.log(ngx.WARN, "Failed to set cache at level: ", level.name, ", error: ", err)
end
end
return success_count > 0
end
-- 多级删除
function multi_cache.delete(key)
local success_count = 0
for _, level in ipairs(cache_levels) do
local ok, err = level.cache.delete(key)
if ok then
success_count = success_count + 1
end
end
return success_count
end
-- 缓存统计
function multi_cache.stats()
local stats = {}
for _, level in ipairs(cache_levels) do
if level.cache.stats then
stats[level.name] = level.cache.stats()
end
end
return stats
end
-- 缓存预热
function multi_cache.warmup(warmup_data)
local total = 0
local success = 0
for key, item in pairs(warmup_data) do
total = total + 1
local value = item.value
local ttl = item.ttl or 300
if multi_cache.set(key, value, ttl) then
success = success + 1
end
end
ngx.log(ngx.INFO, "Multi-cache warmup completed: ", success, "/", total)
return success, total
end
return multi_cache
4. HTTP缓存控制
4.1 HTTP缓存头设置
-- HTTP缓存控制模块
local http_cache = {}
-- 设置缓存控制头
function http_cache.set_cache_headers(cache_type, max_age, options)
options = options or {}
if cache_type == "public" then
ngx.header["Cache-Control"] = "public, max-age=" .. max_age
elseif cache_type == "private" then
ngx.header["Cache-Control"] = "private, max-age=" .. max_age
elseif cache_type == "no-cache" then
ngx.header["Cache-Control"] = "no-cache, no-store, must-revalidate"
ngx.header["Pragma"] = "no-cache"
ngx.header["Expires"] = "0"
return
end
-- 设置ETag
if options.etag then
ngx.header["ETag"] = '"' .. options.etag .. '"'
end
-- 设置Last-Modified
if options.last_modified then
ngx.header["Last-Modified"] = ngx.http_time(options.last_modified)
end
-- 设置Vary头
if options.vary then
ngx.header["Vary"] = options.vary
end
-- 设置过期时间
if max_age > 0 then
ngx.header["Expires"] = ngx.http_time(ngx.time() + max_age)
end
end
-- 检查条件请求
function http_cache.check_conditional_request(etag, last_modified)
local if_none_match = ngx.var.http_if_none_match
local if_modified_since = ngx.var.http_if_modified_since
-- 检查ETag
if if_none_match and etag then
if if_none_match == '"' .. etag .. '"' or if_none_match == "*" then
ngx.status = 304
ngx.header["ETag"] = '"' .. etag .. '"'
ngx.exit(304)
end
end
-- 检查Last-Modified
if if_modified_since and last_modified then
local client_time = ngx.parse_http_time(if_modified_since)
if client_time and client_time >= last_modified then
ngx.status = 304
ngx.header["Last-Modified"] = ngx.http_time(last_modified)
ngx.exit(304)
end
end
end
-- 生成ETag
function http_cache.generate_etag(content)
local resty_md5 = require "resty.md5"
local str = require "resty.string"
local md5 = resty_md5:new()
md5:update(content)
local digest = md5:final()
return str.to_hex(digest)
end
-- 缓存中间件
function http_cache.middleware(cache_config)
return function()
local uri = ngx.var.uri
local args = ngx.var.args or ""
local cache_key = uri .. "?" .. args
-- 检查缓存
local cached_response = multi_cache.get("http:" .. cache_key)
if cached_response then
-- 设置响应头
for name, value in pairs(cached_response.headers) do
ngx.header[name] = value
end
-- 检查条件请求
if cached_response.etag then
http_cache.check_conditional_request(cached_response.etag, cached_response.last_modified)
end
ngx.status = cached_response.status
ngx.say(cached_response.body)
ngx.exit(cached_response.status)
end
-- 缓存未命中,继续处理请求
-- 在header_filter_by_lua中缓存响应
end
end
-- 响应缓存
function http_cache.cache_response(cache_key, ttl)
local status = ngx.status
local headers = ngx.resp.get_headers()
-- 只缓存成功响应
if status == 200 then
local body = ngx.arg[1]
local cached_response = {
status = status,
headers = headers,
body = body,
etag = headers["ETag"],
last_modified = headers["Last-Modified"] and ngx.parse_http_time(headers["Last-Modified"]),
cached_at = ngx.time()
}
multi_cache.set("http:" .. cache_key, cached_response, ttl)
end
end
return http_cache
4.2 静态资源缓存
# nginx.conf中的静态资源缓存配置
location ~* \.(css|js|png|jpg|jpeg|gif|ico|svg|woff|woff2|ttf|eot)$ {
# 设置过期时间
expires 1y;
add_header Cache-Control "public, immutable";
# 启用gzip压缩
gzip_static on;
# 设置ETag
etag on;
# 访问日志
access_log off;
# 跨域设置
add_header Access-Control-Allow-Origin *;
# 如果文件不存在,返回404
try_files $uri =404;
}
# 动态内容缓存
location /api/ {
# 缓存控制
header_filter_by_lua_block {
local http_cache = require "http_cache"
-- 根据接口类型设置不同的缓存策略
local uri = ngx.var.uri
if string.match(uri, "/api/users/") then
-- 用户信息缓存5分钟
http_cache.set_cache_headers("private", 300)
elseif string.match(uri, "/api/config/") then
-- 配置信息缓存1小时
http_cache.set_cache_headers("public", 3600)
elseif string.match(uri, "/api/stats/") then
-- 统计信息不缓存
http_cache.set_cache_headers("no-cache", 0)
end
}
# 代理到后端
proxy_pass http://backend;
}
5. 性能优化策略
5.1 缓存性能监控
-- 缓存性能监控模块
local cache_monitor = {}
local stats_cache = ngx.shared.stats
-- 记录缓存命中率
function cache_monitor.record_hit(cache_type, key)
local hit_key = "hit:" .. cache_type
local total_key = "total:" .. cache_type
stats_cache:incr(hit_key, 1, 0, 3600)
stats_cache:incr(total_key, 1, 0, 3600)
-- 记录热点key
local key_hit = "key_hit:" .. key
local count = stats_cache:incr(key_hit, 1, 0, 3600)
if count > 100 then -- 热点阈值
ngx.log(ngx.WARN, "Hot key detected: ", key, " (hits: ", count, ")")
end
end
-- 记录缓存未命中
function cache_monitor.record_miss(cache_type, key)
local miss_key = "miss:" .. cache_type
local total_key = "total:" .. cache_type
stats_cache:incr(miss_key, 1, 0, 3600)
stats_cache:incr(total_key, 1, 0, 3600)
end
-- 获取缓存统计
function cache_monitor.get_stats()
local stats = {}
for cache_type in pairs({local_cache = true, redis = true}) do
local hits = stats_cache:get("hit:" .. cache_type) or 0
local misses = stats_cache:get("miss:" .. cache_type) or 0
local total = hits + misses
stats[cache_type] = {
hits = hits,
misses = misses,
total = total,
hit_rate = total > 0 and (hits / total * 100) or 0
}
end
return stats
end
-- 获取热点key统计
function cache_monitor.get_hot_keys(limit)
limit = limit or 10
local keys = stats_cache:get_keys(1000)
local hot_keys = {}
for _, key in ipairs(keys) do
if string.match(key, "^key_hit:") then
local count = stats_cache:get(key)
local actual_key = string.sub(key, 9) -- 去掉"key_hit:"前缀
table.insert(hot_keys, {
key = actual_key,
hits = count
})
end
end
-- 按命中次数排序
table.sort(hot_keys, function(a, b) return a.hits > b.hits end)
-- 返回前N个
local result = {}
for i = 1, math.min(limit, #hot_keys) do
table.insert(result, hot_keys[i])
end
return result
end
-- 性能报告
function cache_monitor.generate_report()
local stats = cache_monitor.get_stats()
local hot_keys = cache_monitor.get_hot_keys(5)
local report = {
timestamp = ngx.time(),
cache_stats = stats,
hot_keys = hot_keys,
memory_usage = {
my_cache = ngx.shared.my_cache:capacity() - ngx.shared.my_cache:free_space(),
stats = ngx.shared.stats:capacity() - ngx.shared.stats:free_space()
}
}
return report
end
return cache_monitor
5.2 缓存优化建议
-- 缓存优化分析器
local cache_optimizer = {}
local monitor = require "cache_monitor"
-- 分析缓存性能
function cache_optimizer.analyze_performance()
local stats = monitor.get_stats()
local suggestions = {}
for cache_type, stat in pairs(stats) do
-- 命中率过低
if stat.hit_rate < 70 then
table.insert(suggestions, {
type = "low_hit_rate",
cache = cache_type,
current_rate = stat.hit_rate,
suggestion = "Consider increasing TTL or reviewing cache keys"
})
end
-- 请求量过高
if stat.total > 10000 then
table.insert(suggestions, {
type = "high_volume",
cache = cache_type,
total_requests = stat.total,
suggestion = "Consider implementing cache warming or increasing cache size"
})
end
end
-- 检查热点key
local hot_keys = monitor.get_hot_keys(10)
if #hot_keys > 0 then
for _, hot_key in ipairs(hot_keys) do
if hot_key.hits > 1000 then
table.insert(suggestions, {
type = "hot_key",
key = hot_key.key,
hits = hot_key.hits,
suggestion = "Consider implementing key sharding or local caching"
})
end
end
end
return suggestions
end
-- 自动优化
function cache_optimizer.auto_optimize()
local suggestions = cache_optimizer.analyze_performance()
local actions_taken = {}
for _, suggestion in ipairs(suggestions) do
if suggestion.type == "hot_key" then
-- 为热点key增加本地缓存
local key = suggestion.key
local value = redis_cache.get(key)
if value then
local_cache.set(key, value, 60) -- 本地缓存1分钟
table.insert(actions_taken, "Added local cache for hot key: " .. key)
end
elseif suggestion.type == "low_hit_rate" and suggestion.cache == "local_cache" then
-- 增加本地缓存大小(通过配置建议)
table.insert(actions_taken, "Recommend increasing local cache size")
end
end
return actions_taken
end
return cache_optimizer
5.3 缓存管理接口
-- 缓存管理API
location /cache-admin {
content_by_lua_block {
local cjson = require "cjson"
local monitor = require "cache_monitor"
local optimizer = require "cache_optimizer"
local multi_cache = require "multi_cache"
local method = ngx.var.request_method
local uri = ngx.var.uri
-- 设置响应头
ngx.header.content_type = "application/json"
if method == "GET" then
if uri == "/cache-admin/stats" then
-- 获取缓存统计
local stats = monitor.get_stats()
ngx.say(cjson.encode(stats))
elseif uri == "/cache-admin/hot-keys" then
-- 获取热点key
local hot_keys = monitor.get_hot_keys(20)
ngx.say(cjson.encode(hot_keys))
elseif uri == "/cache-admin/report" then
-- 生成性能报告
local report = monitor.generate_report()
ngx.say(cjson.encode(report))
elseif uri == "/cache-admin/suggestions" then
-- 获取优化建议
local suggestions = optimizer.analyze_performance()
ngx.say(cjson.encode(suggestions))
else
ngx.status = 404
ngx.say(cjson.encode({error = "Not found"}))
end
elseif method == "POST" then
if uri == "/cache-admin/optimize" then
-- 执行自动优化
local actions = optimizer.auto_optimize()
ngx.say(cjson.encode({actions = actions}))
elseif uri == "/cache-admin/clear" then
-- 清空缓存
ngx.req.read_body()
local body = ngx.req.get_body_data()
local data = body and cjson.decode(body) or {}
if data.cache_type == "all" then
local_cache.flush_all()
-- Redis清空需要谨慎,这里只是示例
ngx.say(cjson.encode({message = "All caches cleared"}))
elseif data.key then
multi_cache.delete(data.key)
ngx.say(cjson.encode({message = "Key deleted: " .. data.key}))
else
ngx.status = 400
ngx.say(cjson.encode({error = "Invalid request"}))
end
else
ngx.status = 404
ngx.say(cjson.encode({error = "Not found"}))
end
else
ngx.status = 405
ngx.say(cjson.encode({error = "Method not allowed"}))
end
}
}
6. 总结
缓存是OpenResty性能优化的核心技术,通过合理的缓存策略可以显著提升应用性能:
6.1 最佳实践
- 多级缓存:结合本地缓存和分布式缓存
- 合理的TTL:根据数据特性设置过期时间
- 缓存预热:提前加载热点数据
- 监控告警:实时监控缓存性能
- 防护机制:防止缓存穿透、击穿和雪崩
6.2 性能优化要点
- 选择合适的缓存层级
- 优化缓存key设计
- 实施缓存压缩
- 使用连接池
- 监控缓存命中率
6.3 注意事项
- 数据一致性问题
- 内存使用控制
- 缓存更新策略
- 故障恢复机制
- 安全性考虑
通过系统性的缓存设计和优化,可以构建高性能、高可用的Web应用系统。