local _M = {}

local http = require("resty.http")
local json = require("cjson.safe")

local utils = require("core.utils")

local ngx = ngx

local ngx_timer_every = ngx.timer.every
local ngx_timer_at = ngx.timer.at
local ngx_worker_exiting = ngx.worker.exiting

local log = ngx.log
local ERR = ngx.ERR
local INFO = ngx.INFO


local function choose_nacos_address()

    local len = #_M.nacos_addresses_array
    
    if _M.choose_index > len then
        _M.choose_index = 1
    end
    
    local address = _M.nacos_addresses_array[_M.choose_index]

    _M.choose_index = _M.choose_index + 1

    return address
    
end


local function send_req_service_info(service_name) 

    local httpc = http.new()
    
    httpc:set_timeouts(1000, 1000, 2000)

    local nacos_address = choose_nacos_address()

    local res, err = httpc:request_uri(nacos_address, {
        method = "GET",
        path = "/nacos/v1/ns/instance/list",
        query = "serviceName=" .. service_name
                .. "&groupName=" .. _M.nacos_group_name
                .. "&namespaceId=" .. _M.nacos_namespace
                .. "&clusters=" .. _M.nacos_clusters
                .. "&healthOnly=true",
        headers = {
            [_M.nacos_identity_key] = _M.nacos_identity_val,
        },
    })

    return res, err

end


local function get_address(service_name) 
    
    local res, err = send_req_service_info(service_name)

    if not res or res.status ~= 200 then
        -- 重试一次
        res, err = send_req_service_info(service_name)
    end

    if not res then
        log(ERR, "service name 【"..service_name.."】 failed to get server list from nacos. ", err)
        return
    end

    if res.status == 200 then
        local server_list = {}
        local list_inst_resp = json.decode(res.body)

        local hosts = list_inst_resp.hosts
        if not hosts then
            return {}, 0, 0
        end

        for _, inst in pairs(hosts) do
            local key = inst.ip .. ":" .. inst.port
            server_list[key] = inst.weight
        end

        return server_list, list_inst_resp.lastRefTime, #hosts
    end
    
    log(ERR, res.body)

    return
end


-- 将服务同步到storage中存储
local function sync_to_storage(chunk) 

    for i = 1, #chunk do
        
        local service_name = chunk[i].name

        local server_list, revision, servers_length = get_address(service_name)
        
        if not server_list then
            log(ERR, "service_name:" .. service_name .. " get server address is nil")
            goto continue
        end

        local server_info_json = _M.storage:get(service_name)
        local server_info = json.decode(server_info_json)
        local unq = server_info.unq

        local server_list_in_json = json.encode(server_list);
        local newUnq = ngx.md5(server_list_in_json)

        if newUnq == unq then
            goto continue
        end
        
        local new_server_info = {
            server_list = server_list,
            revision = revision,
            servers_length = servers_length,
            unq = newUnq,
        }

        server_info_json = json.encode(new_server_info)

        _M.storage:set(service_name, server_info_json)

        log(INFO, "sync_to_storage work_id【"..ngx.worker.id().."】"..service_name.." upstream: " .. server_info_json .. " , revision: " .. revision)

        ::continue::

    end

end


-- 将storage中的服务同步到service_balancer
local function subscribe(chunk) 

    for i = 1, #chunk do

        local service_name = chunk[i].name
        local server_info_json = _M.storage:get(service_name)

        if not server_info_json then
            -- server_info_json 为nil说明还没有初始化
            goto continue
        end

        local server_info = json.decode(server_info_json)

        local service_balancer = _M.services_balancer[service_name]

        local newUnq = server_info.unq
        local unq = service_balancer.unq

        -- 相等说明ip和port没有变化
        if newUnq == unq then
            goto continue
        end

        local len = server_info.servers_length

        service_balancer.len = len
        service_balancer.unq = newUnq

        if len <= 0 then
            goto continue
        end

        local server_list = server_info.server_list

        if 0 == service_balancer.init then
            -- 第一次初始化
            service_balancer.init = 1
            service_balancer.balancer = require("balancer").new(service_name, server_list, service_balancer.btype)
        else
            -- 重新初始化
            service_balancer.balancer = require("balancer").renew(service_name, server_list)
        end

        ::continue::

    end

end

-- 将group_services中的services对应的服务地址提取出来
local function to_server_list(services)

    if not services then
        return 
    end

    local server_list = {}
    local len = 0

    for i = 1, #services do

        local service = services[i]
        local name = service.name
        local weight = service.weight

        local server_info_json = _M.storage:get(name)

        if not server_info_json then
            goto continue
        end

        local server_info = json.decode(server_info_json)

        local info_server_list = server_info.server_list

        if not info_server_list then
            goto continue
        end

        for key, val in pairs(info_server_list) do
            server_list[key] = weight
            len = len + 1
        end

        ::continue::
    end

    return len, server_list

end

-- 将storage中的地址信息同步到组服务中
local function subscribe_group() 

    local group_services = _M.group_services

    if not group_services or #group_services < 0 then
        return 
    end

    for i = 1, #group_services do

        local group = group_services[i]
        local group_name = group.name
        local services = group.services

        local len, server_list = to_server_list(services)

        if not server_list then
            -- server_list 为nil说明还没有初始化
            goto continue
        end

        local server_list_in_json = json.encode(server_list)
        local service_balancer = _M.services_balancer[group_name]

        local newUnq = ngx.md5(server_list_in_json)
        local unq = service_balancer.unq

        -- 相等说明ip和port没有变化
        if newUnq == unq then
            goto continue
        end

        service_balancer.len = len
        service_balancer.unq = newUnq

        if len <= 0 then
            goto continue
        end

        if 0 == service_balancer.init then
            -- 第一次初始化
            service_balancer.init = 1
            service_balancer.balancer = require("balancer").new(group_name, server_list, service_balancer.btype)
        else
            -- 重新初始化
            service_balancer.balancer = require("balancer").renew(group_name, server_list)
        end

        ::continue::

    end

end


local function work0_sync_chunk(premature, chunk) 
    if premature or ngx_worker_exiting() then
        return
    end
    
    sync_to_storage(chunk)

end 

-- workid=0异步将服务地址同步到storage
local function work0_sync_to_storage(premature) 
    if premature or ngx_worker_exiting() then
        return
    end

    local chunk_services = _M.chunk_services

    for i = 1, #chunk_services do
        
        local chunk = chunk_services[i]
        
        local ok, err = ngx_timer_at(0, work0_sync_chunk, chunk)

        if not ok then
            log(ERR, "work0_sync_to_storage->work0_sync_chunk index ["..i.."] err: ", err)
        end
    end

end 

-- 所有work定时同步 storage中的服务地址
local function every_work_subscribe(premature) 

    if premature or ngx_worker_exiting() then
        return
    end

    local chunk_services = _M.chunk_services

    for i = 1, #chunk_services do
        
        local chunk = chunk_services[i]
        
        subscribe(chunk)

    end

    subscribe_group()

end


-- 启动或reload 需要优先执行的方法
local function bootstrap(premature)
    
    if premature or ngx_worker_exiting() then
        return
    end

    local chunk_services = _M.chunk_services

    local sucs = 0

    for i = 1, #chunk_services do
        
        local chunk = chunk_services[i]

        sync_to_storage(chunk)

        subscribe(chunk)

        sucs = sucs + 1

    end

    subscribe_group()

    log(INFO, "bootstrap sucs: " .. sucs)

end


-- 对_M.services的服务初始化相关数据到 _M.services_balancer 和 _M.storage 
local function initialize() 

    local services = _M.services

    local services_balancer = {}

    -- 初始化services
    for i = 1, #services do

        local service = services[i]
        
        local service_name = service.name;

        local val = {
            balancer = nil,
            btype = service.balance_type,
            revision = 0,
            len = 0, -- 服务数量
            init = 0,
            unq = "0",
            name = service_name
        }

        services_balancer[service_name] = val
        
        local server_info = {
            server_list = {},
            revision = 0,
            servers_length = 0,
            unq = "0"
        }

        local server_info_json = json.encode(server_info)

        local ok, err = _M.storage:set(service_name, server_info_json)
        
        if not ok then
            log(ERR, "initialize_services _M.storage set service_name 【"..service_name.."】err: ", err)
        end

    end
    

    log(INFO, "work_id【"..ngx.worker.id().."】 initialize services ...")

    -- 初始化group_services
    local group_services = _M.group_services
    
    if group_services and #group_services > 0 then
        
        for i = 1, #group_services do

            local service = group_services[i]
            
            local service_name = service.name;
    
            local val = {
                balancer = nil,
                btype = service.balance_type,
                revision = 0,
                len = 0, -- 服务数量
                init = 0,
                unq = "0",
                name = service_name,
            }
    
            services_balancer[service_name] = val
            
        end

        log(INFO, "work_id【"..ngx.worker.id().."】 initialize group_services ...")

    end

    -- 将services 转换成 chunk_array
    local chunk_services = utils.array_to_chunkarray(_M.services, _M.services_chunk_size)
    _M.chunk_services = chunk_services

    _M.services_balancer = services_balancer

end

-- 目前balancer_type只支持 roundrobin
-- conf = {
--   services_storage = 定义全局字典用于存储服务列表,
--   nacos_addresses = "http://127.0.0.1:8848", 多个使用逗号分割
--   nacos_group_name = "",
--   nacos_clusters = "",
--   nacos_identity_key = "",
--   nacos_identity_val = "",
--   nacos_namespace = "",
--   services = {{name="a", balance_type="chash"}}, -- name必须全局唯一
--   services_array_size = 5, 将services拆分成多个作为一个组去并发查询
--   pull_sec = 5, 每隔多少秒去nacos拉取服务
--   group_services = {  -- 将在services中不同的service组合在一起进行负载访问
--      {
--          name = "service-admin-group", -- name必须全局唯一
--          balancer_type = "roundrobin",
--          services = {{name="a", weight = 8}, {name="b", weight = 2}}
--      }
--   } 
-- }
-- 1.启动或reload，每个work进程需要先去nacos将服务地址拉取同步到storage中，在将storage中的地址同步到每个work的缓存中
-- 2.workid = 0 开启定时任务，定时从nacos中拉取服务地址存在storage中
-- 3.每个work开启定时任务，从storage中拉取服务地址同步到每个work的缓存中
function _M.init(conf)

    -- 必须配置services才可以去拉取服务
    if not conf.services then
        log(ERR, "failed to start watch services is nil")
        return
    end

    -- storage中的存储的格式
    -- key 为services中的name 或 group_services中的name
    -- value 为 {server_list:"", revision, servers_length, unq} json字符串
    _M.storage = conf.services_storage
    _M.services = conf.services
    _M.group_services = conf.group_services
    _M.nacos_addresses = conf.nacos_addresses
    _M.nacos_namespace = conf.nacos_namespace
    _M.nacos_group_name = conf.nacos_group_name
    _M.nacos_clusters = conf.nacos_clusters
    _M.nacos_identity_key = conf.nacos_identity_key
    _M.nacos_identity_val = conf.nacos_identity_val
    _M.services_chunk_size = conf.services_chunk_size
    _M.pull_sec = conf.pull_sec
    _M.nacos_addresses_array = utils.dotToArray(conf.nacos_addresses) -- 将nacos地址转换成数组
    _M.choose_index = 1 -- 默认选择base_urls的下标

    if not conf.nacos_clusters then
        _M.nacos_clusters = ""
    end
    
    if not conf.nacos_namespace then
        _M.nacos_namespace = ""
    end

    if not conf.nacos_group_name then
        _M.nacos_group_name = "DEFAULT_GROUP"
    end

    if not conf.services_chunk_size then
        _M.services_chunk_size = 5
    end

    if not conf.pull_sec then
        _M.pull_sec = 5
    end 

    -- 对_M.service_balancer 和 _M.storage 设置初始化数据
    -- 对group_service进行数据初始化
    initialize()

    -- 所有的work进程都需要执行的方法
    local ok, err = ngx_timer_at(0, bootstrap)
    if not ok then
        log(ERR, "bootstrap start err: " .. err)
    end

    -- 只让workid = 0 进行同步注册中心
    if ngx.worker.id() == 0 then    
        log(INFO, "start timer sync services...")
        local ok, err = ngx_timer_every(_M.pull_sec, work0_sync_to_storage)
        if not ok then
            log(ERR, "work0_sync_to_storage start err:", err)
        end 
    end

    -- 每个work定时从storage中同步到本地缓存
    local every_work_pull_sec = _M.pull_sec + 3

    -- 所有的work都定时从storage中同步服务地址
    local ok, err = ngx_timer_every(every_work_pull_sec, every_work_subscribe)
    if not ok then
        log(ERR, "every_work_subscribe start err:", err)
    end 

end


function _M.pick_server(service_name) 
    
    local server_balancer = _M.services_balancer[service_name]

    log(INFO, "pick peer service_name:"..service_name .." server_balancer ".. server_balancer.name .." workId:" .. ngx.worker.id())

    if server_balancer.len <= 0 then
        return nil
    end

    return server_balancer.balancer:find(service_name)

end

return _M

