前面26、27、28讲到的博文环境即可,上报kafka ,只需在应用层nginx上操作(192.168.0.16,192.168.0.17)
请求上报kafka 其实很简单,大致思路是:
yum install -y unzip
cd /usr/local/servers/ && wget https://github.com/doujiang24/lua-resty-kafka/archive/master.zip
unzip master.zip
cp -rf lua-resty-kafka-master/lib/resty/kafka /usr/local/test/lualib/resty/
/usr/local/servers/nginx/sbin/nginx -s reload
vim /usr/local/test/lua/test.lua
代码如下:
// 引入 kafka 生产者 类库
local producer = require("resty.kafka.producer")
// 引入json 解析类库
local cjson = require("cjson")
// 构造kafka 集群节点 broker
local broker_list = {
{ host = "192.168.0.16", port = 9092},
{ host = "192.168.0.17", port = 9092},
{ host = "192.168.0.18", port = 9092}
}
// 定义上报对象
local log_obj = {}
// 自定义模块名称
log_obj["request_module"] = "product_detail_info"
// 获取请求头信息
log_obj["headers"] = ngx.req.get_headers()
// 获取请求uri 参数
log_obj["uri_args"] = ngx.req.get_uri_args()
// 获取请求body
log_obj["body"] = ngx.req.read_body()
// 获取请求的http协议版本
log_obj["http_version"] = ngx.req.http_version()
// 获取请求方法
log_obj["method"] = ngx.req.get_method()
// 获取未解析的请求头字符串
log_obj["raw_reader"] = ngx.req.raw_header()
// 获取解析的请求body体内容字符串
log_obj["body_data"] = ngx.req.get_body_data()
// 上报对象json 字符串编码
local message = cjson.encode(log_obj)
local uri_args = ngx.req.get_uri_args()
local product_id = uri_args["productId"]
local shop_id = uri_args["shopId"]
// 创建kafka producer 连接对象,producer_type = "async" 异步
local async_producer = producer:new(broker_list, {producer_type = "async"})
// 请求上报kafka,kafka 生产者发送数据,async_prodecer:send(a,b,c),a : 主题名称,b:分区(保证相同id,全部到相同的kafka node 去,并且顺序一致),c:消息(上报数据)
local ok, err = async_producer:send("access-log", product_id, message)
// 上报异常处理
if not ok then
ngx.log(ngx.ERR, "kafka send err:", err)
return
end
local cache_ngx = ngx.shared.test_cache
local product_cache_key = "product_info_"..product_id
local shop_cache_key = "shop_info_"..shop_id
local product_cache = cache_ngx:get(product_cache_key)
local shop_cache = cache_ngx:get(shop_cache_key)
if product_cache == "" or product_cache == nil then
local http = require("resty.http")
local httpc = http.new()
local resp, err = httpc:request_uri("http://192.168.0.3:81",{
method = "GET",
path = "/getProductInfo?productId="..product_id
})
product_cache = resp.body
cache_ngx:set(product_cache_key, product_cache, 10 * 60)
end
if shop_cache == "" or shop_cache == nil then
local http = require("resty.http")
local httpc = http.new()
local resp, err = httpc:request_uri("http://192.168.0.3:81",{
method = "GET",
path = "/getShopInfo?shopId="..shop_id
})
shop_cache = resp.body
cache_ngx:set(shop_cache_key, shop_cache, 10 * 60)
end
local product_cache_json = cjson.decode(product_cache)
local shop_cache_json = cjson.decode(shop_cache)
local context = {
productId = product_cache_json.id,
productName = product_cache_json.name,
productPrice = product_cache_json.price,
productPictureList = product_cache_json.pictureList,
productSecification = product_cache_json.secification,
productService = product_cache_json.service,
productColor = product_cache_json.color,
productSize = product_cache_json.size,
shopId = shop_cache_json.id,
shopName = shop_cache_json.name,
shopLevel = shop_cache_json.level,
shopRate = shop_cache_json.rate
}
local template = require("resty.template")
template.render("product.html", context)
vim /usr/local/servers/nginx/conf/nginx.conf
在 http 部分添加以下内容,如下图:
resolver 8.8.8.8
配置nginx dns resolver
(注:以上操作 nginx 应用服务器(192.168.0.16,192.168.0.17)都需要进行)
advertised.host.name = 本机ip
vim /usr/local/kafka/config/server.properties
配置advertised.host.name
/usr/local/servers/nginx/sbin/nginx -t && /usr/local/servers/nginx/sbin/nginx -s reload
cd /usr/local/kafka && nohup bin/kafka-server-start.sh config/server.properties &
cd cd /usr/local/kafka && bin/kafka-topics.sh –zookeeper my-cache1:2181,my-cache2:2181,my-cache3:2181 –topic access-log –replication-factor 1 –partitions 1 –create
bin/kafka-console-consumer.sh –zookeeper my-cache1:2181,my-cache2:2181,my-cache3:2181 –topic access-log –from-beginning
nginx请求
shell 打开kafka 消费端查看请求上报kafka 数据
完毕,利用nginx + lua 实现请求流量上报kafka就到这里了。
以上就是本章内容,如有不对的地方,请多多指教,谢谢!