import gc
import time
import traceback
from concurrent.futures import ProcessPoolExecutor
from datetime import datetime

import pandas as pd
from xbase_util.add_column_util import handle_dns
from xbase_util.common_util import build_es_expression, extract_session_fields, split_data_by_num
from xbase_util.es_db_util import EsDb
from xbase_util.handle_features_util import handle_uri, handle_ua
from global_ver import es_req, global_geo_util, base_config, sceneMap, capture_logger, error_logger
from src.util.package import PacketUtil
from src.util.session import scroll_slice_method_test


def circle_capture(expression, start_time, end_time, manager):
    time = datetime.now()
    config_session_size = 5000
    config_session_alive = "10m"
    config_start_time = start_time
    config_end_time = end_time
    config_expression = expression
    # 80个线程下
    # 15:8-20
    # 12个进程的情况下
    # 150:4-11
    # 160:5-13
    # 120:4-11
    # 100:3-11.7
    # 80:3.58-10.9
    # 70:6-13

    # (非空闲)
    # 12-80:8-20
    # 12-120:9-20
    # 12-70:3-12(训练)
    # 12-70:3-10(非训练)
    config_pcap_thread_in_process = 12
    config_processor_count = 70
    try:
        es_expression = build_es_expression(size=config_session_size,
                                            start_time=datetime.strptime(config_start_time,
                                                                         "%Y-%m-%d %H:%M:%S"),
                                            end_time=datetime.strptime(config_end_time, "%Y-%m-%d %H:%M:%S"),
                                            arkime_expression=config_expression, bounded_type="last")
        session_list = scroll_slice_method_test(es_expression, config_session_alive)
        capture_logger.info(f"[提数队列]第一步，获取session {len(session_list)}条，耗时:{datetime.now() - time}")
        time = datetime.now()
        if len(session_list) == 0:
            return [], {}
        data_list = extract_session_fields(session_list, global_geo_util)
        capture_logger.info(f"[提数队列]第二步，根据session获取全部字段耗时:{datetime.now() - time}")
        time = datetime.now()
        # tqdm.pandas()
        df = handle_uri(pd.DataFrame(data_list), use_tqdm=False)
        df = handle_ua(df, use_tqdm=False)
        df = handle_dns(df, isDataFrame=True, use_tqdm=False)
        data_list = df.to_dict(orient='records')
        capture_logger.info(f"[提数队列]第三步，处理URI、UA、DNS_HOST耗时:{datetime.now() - time}")
        time = datetime.now()
        global_esDb = EsDb(es_req, manager)
        final_list = []
        stream_map = {}
        chunks = split_data_by_num(data_list, config_processor_count)
        with ProcessPoolExecutor(max_workers=config_processor_count) as executor:
            capture_logger.info("[提数队列]开始多进程读取pcap")
            futures = []
            for idx, chunk in enumerate(chunks):
                capture_logger.info(f"[提数队列]给Core {idx + 1}/{len(chunks)} 分到的数据{len(chunk)}条")
                futures.append(executor.submit(PacketUtil.package_session_process,
                                               config_pcap_thread_in_process,
                                               chunk,
                                               base_config['path_prefix'],
                                               global_esDb, ))
            for core_index, future_item in enumerate(futures):
                session_list, map = future_item.result()
                final_list.extend(session_list)
                stream_map.update(map)
                capture_logger.info(f"[提数队列]当前已处理{len(final_list)}条，内核进度{core_index + 1}/{len(futures)}")
        capture_logger.info(f"[提数队列]第四步，共:{len(final_list)}读取pcap耗时:{datetime.now() - time}")
        if len(final_list) == 0:
            return [], {}
        del session_list
        del df
        del chunks
        del data_list
        gc.collect()
        return final_list, stream_map
    except Exception as e:
        traceback.print_exc()
        error_logger.info(f"[提数队列]报错：{e}")
        return [], {}


def start_capture_queue(capture_queue, predict_queue, manager):
    try:
        while True:
            capture_logger.info("[提数队列]开始等待")
            param = capture_queue.get()
            if param is None:
                capture_logger.info("[提数队列]队列任务结束")
                break
            start_time, end_time, exp, scene = param
            capture_logger.info(f"[提数队列]本次时间范围：start_time:{start_time}  end_time:{end_time}")
            t = datetime.now()
            data_list, stream_map = circle_capture(
                expression=exp if exp is not None else sceneMap[f'expression']['base_expression'],
                start_time=start_time.strftime("%Y-%m-%d %H:%M:%S"),
                end_time=end_time.strftime("%Y-%m-%d %H:%M:%S"),
                manager=manager,
            )
            capture_logger.info(
                f"[提数队列]获取数据{len(data_list) if data_list is not None else 0}条总耗时:{datetime.now() - t}")
            if data_list is None or len(data_list) == 0:
                continue
            predict_queue.put((data_list,stream_map, scene, start_time, end_time))
            capture_logger.info(f"[提数队列]队列数量:{capture_queue.qsize()}")
            time.sleep(1)
            gc.collect()
    except Exception as e:
        traceback.print_exc()
        # clear_queue(capture_queue)
        capture_logger.info("[提数队列]获取数据队列报错")
