import gc
import json
import pickle
import time
import traceback
import zlib
from datetime import datetime
from uuid import uuid4

import requests

from global_ver import sceneMap, predict_logger
from src.util.common_util import is_port_listening, clean_floats, format_size, get_total_size, split_data


def predict_by_scene(final_list, scene_type, public_dict, save_queue, stream_map, compress="pickle"):
    port = sceneMap[scene_type]['port']
    if not is_port_listening(port):
        predict_logger.info(f"[预测队列][{scene_type}]端口未启动")
        return []
    final_list = clean_floats(final_list)
    if len(final_list) == 0:
        predict_logger.info(f"[预测队列][{scene_type}]数据为0，不参与预测")
        return []
    if compress == "zlib":
        data = {
            "datas": final_list,
            "model_type": scene_type,
            "requestCode": f"{uuid4()}"
        }
        predict_logger.info(f"[预测队列][{scene_type}]开始zlib压缩")
        compressed_text = zlib.compress(json.dumps(data).encode('utf-8'))
        startpredict = datetime.now()
        predict_logger.info(
            f"[预测队列][{scene_type}]准备传给预测，场景:{scene_type}|数据大小:{format_size(len(compressed_text))}|数据长度:{len(final_list)}|当前时间:{startpredict}")
        res = requests.post(f"http://10.28.6.144:{port}/model_predict",
                            data=compressed_text)
        endTime = datetime.now()
        predict_logger.info(
            f"[预测队列][{scene_type}]发送到收到耗时：{(endTime - startpredict).total_seconds()}s {(endTime - startpredict).total_seconds() * 1000 * 1000}us")
        predict_logger.info(
            f"[预测队列][{scene_type}]服务器返回,每条平均:█████:{(endTime - startpredict).total_seconds() * 1000 * 1000 / len(final_list)}us")
    elif compress == "pickle":
        compressed_text = pickle.dumps(final_list)
        startpredict = datetime.now()
        res = requests.post(f"http://10.28.6.144:{port}/model_predict", data=compressed_text)
        endTime = datetime.now()
        predict_logger.info(
            f"[预测队列][{scene_type}]∨∨∨∨∨∨∨∨∨∨∨∨ 数据大小:{format_size(len(compressed_text))}|数据长度:{len(final_list)}｜耗时{(endTime - startpredict).total_seconds()}s {(endTime - startpredict).total_seconds() * 1000 * 1000}us")
    else:
        startpredict = datetime.now()
        predict_logger.info(
            f"[预测队列][{scene_type}]准备传给预测|数据大小:{format_size(get_total_size(final_list))}|数据数量:{len(final_list)}|当前时间:{startpredict}")
        res = requests.post(f"http://10.28.6.144:{port}/model_predict",
                            json={
                                "datas": final_list,
                                "model_type": scene_type,
                                "requestCode": f"{uuid4()}"})
        endTime = datetime.now()
        predict_logger.info(
            f"[预测队列][{scene_type}]发送到收到耗时：{(endTime - startpredict).total_seconds()}s {(endTime - startpredict).total_seconds() * 1000 * 1000}us")
        predict_logger.info(
            f"[预测队列][{scene_type}]服务器返回,每条平均:█████:{(endTime - startpredict).total_seconds() * 1000 * 1000 / len(final_list)}us")
    list_all = res.json()['prediction']
    len1 = len([item for item in list_all if f'{item}' == '1'])
    len0 = len([item for item in list_all if f'{item}' == '0'])

    if f'{scene_type}_all' in public_dict:
        public_dict[f'{scene_type}_all'] = public_dict[f'{scene_type}_all'] + len(list_all)
    else:
        public_dict[f'{scene_type}_all'] = len(list_all)
    if f'{scene_type}_0' in public_dict:
        public_dict[f'{scene_type}_0'] = public_dict[f'{scene_type}_0'] + len0
        public_dict[f'{scene_type}_1'] = public_dict[f'{scene_type}_1'] + len1
    else:
        public_dict[f'{scene_type}_0'] = len0
        public_dict[f'{scene_type}_1'] = len1
    predict_logger.info(
        f"[预测队列][{scene_type}]本次预测结果|0:{len0}|1:{len1}|共:{len(list_all)}|异常占比:{round((len1 / len(list_all)) * 100, 4)}%|正常占比:{round((len0 / len(list_all)) * 100, 4)}%")
    all0 = public_dict[f'{scene_type}_0']
    all1 = public_dict[f'{scene_type}_1']
    all = public_dict[f'{scene_type}_all']
    predict_logger.info(
        f"[预测队列][{scene_type}]累积预测结果|0:{all0}|1:{all1}|共:{all}|异常占比:{round((all1 / all) * 100, 4)}%|正常占比:{round((all0 / all) * 100, 4)}%|")
    for i in range(len(final_list)):
        final_list[i]['predict_res'] = list_all[i]
    predict_logger.info(f"[预测队列][{scene_type}]预测完毕，当前预测队列数量：{save_queue.qsize()}\n")
    origin_data_list = [item for item in final_list if f"{item['predict_res']}" == "1"]
    if len(origin_data_list) != 0:
        save_queue.put((origin_data_list, stream_map, scene_type))


def predict_direct(data_list, stream_map, scene, global_dict, save_queue):
    time = datetime.now()
    # split_way = sceneMap[f'expression']['split_by']
    # return_normal_result = []
    # return_webshell_result = []
    # return_dns_result = []
    # return_cs_result = []
    # data_list = handle_file_field(pd.DataFrame(data_list))
    # if split_way == "none":
    #     predict_logger.info("[预测队列]none不分割为不同场景，直接灌入")
    #     if "normal" in scene:
    #         result = predict_by_scene(data_list, "normal", global_dict, save_queue, return_data=return_data)
    #         return_normal_result.extend(result)
    #     if "webshell" in scene:
    #         result = predict_by_scene(data_list, "webshell", global_dict, save_queue, return_data=return_data)
    #         return_webshell_result.extend(result)
    #     if "dns" in scene:
    #         result = predict_by_scene(data_list, "dns", global_dict, save_queue, return_data=return_data)
    #         return_dns_result.extend(result)
    #     if "cs" in scene:
    #         result = predict_by_scene(data_list, "cs", global_dict, save_queue, return_data=return_data)
    #         return_cs_result.extend(result)
    # elif split_way == "size":
    #     normal, webshell, dns, cs = split_data(data_list, is_df=False, use_protocol=False)
    #     if "normal" in scene:
    #         result = predict_by_scene(normal, "normal", global_dict, save_queue, return_data=return_data)
    #         return_normal_result.extend(result)
    #     if "webshell" in scene:
    #         result = predict_by_scene(webshell, "webshell", global_dict, save_queue, return_data=return_data)
    #         return_webshell_result.extend(result)
    #     if "dns" in scene:
    #         result = predict_by_scene(dns, "dns", global_dict, save_queue, return_data=return_data)
    #         return_dns_result.extend(result)
    #     if "cs" in scene:
    #         result = predict_by_scene(cs, "cs", global_dict, save_queue, return_data=return_data)
    #         return_cs_result.extend(result)
    # elif split_way == "protocol":
    #     normal, webshell, dns, cs = split_data(data_list, is_df=False, use_protocol=True)
    #     if "normal" in scene:
    #         result = predict_by_scene(normal, "normal", global_dict, save_queue, return_data=return_data)
    #         return_normal_result.extend(result)
    #     if "webshell" in scene:
    #         result = predict_by_scene(webshell, "webshell", global_dict, save_queue, return_data=return_data)
    #         return_webshell_result.extend(result)
    #     if "dns" in scene:
    #         result = predict_by_scene(dns, "dns", global_dict, save_queue, return_data=return_data)
    #         return_dns_result.extend(result)
    #     if "cs" in scene:
    #         result = predict_by_scene(cs, "cs", global_dict, save_queue, return_data=return_data)
    #         return_cs_result.extend(result)

    normal, webshell, dns, cs = split_data(data_list, is_df=False, use_protocol=True)
    if "normal" in scene:
        predict_by_scene(normal, "normal", global_dict, save_queue, stream_map)
        # return_normal_result.extend(result)
    if "webshell" in scene:
        predict_by_scene(webshell, "webshell", global_dict, save_queue, stream_map)
        # return_webshell_result.extend(result)
    if "dns" in scene:
        predict_by_scene(dns, "dns", global_dict, save_queue, stream_map)
        # return_dns_result.extend(result)
    if "cs" in scene:
        predict_by_scene(cs, "cs", global_dict, save_queue, stream_map)
        # return_cs_result.extend(result)
    predict_logger.info(f"[预测队列]请求预测接口总耗时:{datetime.now() - time}\n\n")
    # if return_data:
    #     return return_normal_result, return_webshell_result, return_dns_result, return_cs_result
    # else:
    #     return None, None, None, None


def start_predict_queue(predict_queue, save_queue, shared_dict):
    try:
        while True:
            predict_logger.info("[预测队列]开始等待")
            param = predict_queue.get()
            if param is None:
                predict_logger.info("[预测队列]已停止")
                break
            data_list, stream_map, scene, data_start_time, data_end_time = param
            predict_logger.info(
                f"[预测队列]当前数据的时间：{data_start_time.strftime('%Y-%m-%d %H:%M:%S')}-{data_end_time.strftime('%Y-%m-%d %H:%M:%S')}")
            predict_direct(data_list, stream_map, scene, shared_dict, save_queue)
            gc.collect()
            predict_logger.info(f"[预测队列]队列数量:{predict_queue.qsize()}")
            time.sleep(1)
    except Exception:
        predict_logger.info("[预测队列]报错")
        traceback.print_exc()
