标签:缓存 mlcache cache will lua 多级 ttl 牛叉 local
# nginx.conf
http {
# you do not need to configure the following line when you
# use LuaRocks or opm.
lua_package_path "/path/to/lua-resty-mlcache/lib/?.lua;;";
# 'on' already is the default for this directive. If 'off', the L1 cache
# will be inefective since the Lua VM will be re-created for every
# request. This is fine during development, but ensure production is 'on'.
lua_code_cache on;
lua_shared_dict cache_dict 1m;
init_by_lua_block {
local mlcache = require "resty.mlcache"
local cache, err = mlcache.new("my_cache", "cache_dict", {
lru_size = 500, -- size of the L1 (Lua VM) cache
ttl = 3600, -- 1h ttl for hits
neg_ttl = 30, -- 30s ttl for misses
})
if err then
end
-- we put our instance in the global table for brivety in
-- this example, but prefer an upvalue to one of your modules
-- as recommended by ngx_lua
_G.cache = cache
}
server {
listen 8080;
location / {
content_by_lua_block {
local function callback(username)
-- this only runs *once* until the key expires, so
-- do expensive operations like connecting to a remote
-- backend here. i.e: call a MySQL server in this callback
return db:get_user(username) -- { name = "John Doe", email = "john@example.com" }
end
-- this call will try L1 and L2 before running the callback (L3)
-- the returned value will then be stored in L2 and L1
-- for the next request.
local user, err = cache:get("my_key", nil, callback, "John Doe")
ngx.say(user.username) -- "John Doe"
}
}
}
}
标签:缓存,mlcache,cache,will,lua,多级,ttl,牛叉,local 来源: https://www.cnblogs.com/justart/p/12384920.html
本站声明: 1. iCode9 技术分享网(下文简称本站)提供的所有内容,仅供技术学习、探讨和分享; 2. 关于本站的所有留言、评论、转载及引用,纯属内容发起人的个人观点,与本站观点和立场无关; 3. 关于本站的所有言论和文字,纯属内容发起人的个人观点,与本站观点和立场无关; 4. 本站文章均是网友提供,不完全保证技术分享内容的完整性、准确性、时效性、风险性和版权归属;如您发现该文章侵犯了您的权益,可联系我们第一时间进行删除; 5. 本站为非盈利性的个人网站,所有内容不会用来进行牟利,也不会利用任何形式的广告来间接获益,纯粹是为了广大技术爱好者提供技术内容和技术思想的分享性交流网站。