import copy
import io
import multiprocessing
import os
import traceback
from concurrent.futures import ProcessPoolExecutor
from concurrent.futures.thread import ThreadPoolExecutor
from os import cpu_count

import numpy as np
import pandas as pd
from scapy.utils import rdpcap, wrpcap
from tqdm import tqdm
from xbase_util.common_util import extract_session_fields, split_data_by_num, entropy, get_dns_domain, \
    process_origin_pos, get_statistic_fields
from xbase_util.es_db_util import EsDb
from xbase_util.handle_features_util import handle_uri, handle_ua
from xbase_util.pcap_util import process_session_id_disk_simple, reassemble_session_pcap, reassemble_tcp_pcap

from src.col_bean.dns_domain_enum import ColDnsDomainEnum
from src.constant import field_map, pcap_path_prefix, ColEntropyFieldEnum, ColTimeFieldEnum, req, hide_log, server, \
    project_root_path
from src.extract.packet_fill_util import fill_geo, fill_session_before_split_packet, fill_single_packet, \
    fill_session_after_single, fill_statistic_field, fill_single_empty
from src.model.model_common_util import remove_df
from src.util.common_util import printx
from src.util.config_manager import ConfigManager
from src.value_bean.value_dns_domain_enum import ValueDnsDomainEnum


def get_session_from_es(scroll_exp, alive: str):
    """
    滚动获取session数据
    :param alive:
    :param scroll_exp: ES表达式
    :return: 返回所有获取到的数据
    """
    # printx("[session]清理滚动id...")
    printx(req.clear_all_scroll())
    try:
        res = req.start_scroll(exp=scroll_exp, scroll=alive, index="arkime_sessions3-*")
        res = res.json()
        scroll_id = res['_scroll_id']
        results = res['hits']['hits']
        # printx(f"保存:{len(results)}")
        if len(results) == 0:
            printx("首次没有数据")
            return []
    except Exception as e:
        printx("首次请求报错:")
        traceback.print_exc()
        return []
    while True:
        origin_res = req.scroll_by_id(scroll_id=scroll_id, scroll=alive)
        try:
            res = origin_res.json()
            current_scroll_id = res['_scroll_id']
            current_data = res['hits']['hits']
            if len(current_data) == 0:
                # printx("获取完毕")
                break
            else:
                # printx("正在获取")
                scroll_id = current_scroll_id
                # printx(f"保存:{len(current_data)}")
                results += current_data
        except Exception as e:
            printx(origin_res.text)
            traceback.print_exc()
            printx(f"[session]查询出错：{e},将继续查询", is_error=True)
            continue
    return results


def get_session_thread_chunk_session2(data, process_index, process_count, thread_index, thread_count):
    """
    最终的处理线程
    :param data:
    :param process_index:
    :param process_count:
    :param thread_index:
    :param thread_count:
    :return:
    """
    copied_session_list = []
    if isinstance(data, pd.DataFrame):
        data = data.to_dict(orient='records')
    strlog = ""
    for index, s in enumerate(data):
        session = copy.deepcopy(s)
        strlog = strlog + f"id:{session['id']}"
        if hide_log:
            printx(
                f"进程:{process_index}/{process_count},线程:{thread_index}/{thread_count}，session:{index}/{len(data)}")
        fill_geo(session)
        byte_array, obj = process_session_id_disk_simple(session['id'], session['node'],
                                                         process_origin_pos(session['packetPos']),
                                                         EsDb(req, multiprocessing.Manager()),
                                                         pcap_path_prefix)
        if byte_array is None:
            strlog = strlog + '没有byte_array,添加1\n'
            fill_single_empty(session)
            # 7月8号要求：没有pcap包就不获取session了
            copied_session_list.append(session)
        elif 'dns' in session['protocol']:
            pcap = rdpcap(io.BytesIO(byte_array))
            dns_domain = get_dns_domain(pcap)
            wrpcap(os.path.join(project_root_path, 'pcap', f"{session['id']}.pcap"), pcap)
            if dns_domain is None or dns_domain == "":
                dns_domain = ValueDnsDomainEnum.dns_no_domain.value
                dns_domain_entropy = 0.0
            else:
                dns_domain_entropy = entropy(dns_domain)
            session.update({ColDnsDomainEnum.dns_domain.value: dns_domain,
                            ColEntropyFieldEnum.dns_domain.value: dns_domain_entropy})
            fill_single_empty(session)
            copied_session_list.append(session)
            strlog = strlog + 'dns,添加1\n'
        elif 'http' in session['protocol']:
            try:
                pcap = rdpcap(io.BytesIO(byte_array))
                wrpcap(os.path.join(project_root_path, 'pcap', f"{session['id']}.pcap"), pcap)
                session.update(get_statistic_fields(pcap))
                tcp = reassemble_tcp_pcap(pcap)
                all_packets = reassemble_session_pcap(tcp, f"{session['source.ip']}:{session['source.port']}",
                                                      session_id=session['id'])
                fill_session_before_split_packet(session, all_packets)
                if len(all_packets) == 1:
                    req_body_text = all_packets[0]['req_body']
                    req_body_bytes = all_packets[0]['req_body_bytes']
                    res_body_text = all_packets[0]['res_body']
                    res_body_bytes = all_packets[0]['res_body_bytes']
                    req_header = all_packets[0]['req_header']
                    res_header = all_packets[0]['res_header']
                    fill_single_packet(session, req_body_text, res_body_text, req_body_bytes, res_body_bytes,
                                       all_packets[0]['req_body_parse_err'], all_packets[0]['res_body_parse_err'],
                                       session['firstPacket'], session['lastPacket'], req_header, res_header)
                    fill_session_after_single([session],
                                              req_body_bytes,
                                              res_body_bytes,
                                              req_body_bytes + res_body_bytes,
                                              time_diff_mean=session['lastPacket'] - session['firstPacket'])
                    copied_session_list.append(session)
                    strlog = strlog + 'http1个包,添加1\n'
                else:
                    session_req_body = b''
                    session_res_body = b''
                    session_all_body = b''
                    temp_copied_session_list = []
                    req_time_list, res_time_list, time_tiff_list = [], [], []
                    strlog = strlog + f'http包数量,将要添加{len(all_packets)}\n'
                    for pkt in all_packets:
                        copied_session = copy.deepcopy(session)
                        req_time_list.append(pkt['req_time'])
                        if pkt['res_time'] != 0:
                            res_time_list.append(pkt['res_time'])
                            time_tiff_list.append(pkt['res_time'] - pkt['req_time'])
                        req_body_text = pkt['req_body']
                        req_body_bytes = pkt['req_body_bytes']
                        session_req_body += req_body_bytes
                        res_body_text = pkt['res_body']
                        res_body_bytes = pkt['res_body_bytes']
                        session_res_body += res_body_bytes
                        session_all_body += session_req_body + session_res_body
                        fill_single_packet(copied_session, req_body_text, res_body_text, req_body_bytes,
                                           res_body_bytes, pkt['req_body_parse_err'],
                                           pkt['res_body_parse_err'], pkt['req_time'], pkt['res_time'],
                                           pkt['req_header'],
                                           pkt['res_header'])
                        temp_copied_session_list.append(copied_session)
                    req_time_diff_list = sorted(req_time_list)
                    req_time_diff = [req_time_diff_list[i + 1] - req_time_diff_list[i] for i in
                                     range(len(req_time_diff_list) - 1)]
                    res_time_diff_list = sorted(res_time_list)
                    res_time_diff = [res_time_diff_list[i + 1] - res_time_diff_list[i] for i in
                                     range(len(res_time_diff_list) - 1)]
                    req_time_diff_mean = np.mean(req_time_diff)
                    res_time_diff_mean = np.mean(res_time_diff)
                    fill_session_after_single(temp_copied_session_list, session_req_body, session_res_body,
                                              session_all_body,
                                              req_time_diff_mean=float(req_time_diff_mean),
                                              res_time_diff_mean=float(res_time_diff_mean),
                                              time_diff_mean=np.mean(time_tiff_list))
                    copied_session_list.extend(temp_copied_session_list)
                    strlog = strlog + f'http多个包,添加{len(temp_copied_session_list)}\n'
            except:
                traceback.print_exc()
                printx(f"reassemble_session_pcap报错 {session['id']}", is_error=True)
                copied_session_list.append(session)
                strlog = strlog + f'http报错,添加1\n'
        else:
            fill_single_empty(session)
            copied_session_list.append(session)
            strlog = strlog + f'{session["protocol"]},添加1\n'
    if not hide_log:
        if len(copied_session_list) < len(data):
            print(f"copied_session_list/data：{len(copied_session_list)}/{len(data)}  {strlog}")
    return copied_session_list


# entrance
def get_session_process_chunk_session(process_index, process_count, chunks):
    """
    处理session的进程
    :param data:
    :param process_index:
    :param process_count:
    :return:
    """
    with tqdm(total=len(chunks)) as pbs:
        if server == 140:
            max_workers = 5
        else:
            max_workers = 5
        sessions = []
        with ThreadPoolExecutor(max_workers=max_workers) as executor:
            futures = []
            for thread_index, chunk in enumerate(chunks):
                futures.append(
                    executor.submit(get_session_thread_chunk_session2, chunk, process_index, process_count,
                                    thread_index,
                                    len(chunks)))

            for p, future in enumerate(futures):
                try:
                    sessions.extend(future.result())
                    if hide_log:
                        pbs.set_description(f"进程:{process_index:02d}")
                        pbs.update()
                except Exception:
                    traceback.print_exc()
            pbs.close()
    # print(f"返回{len(sessions)}")
    return sessions


def get_session(es_exp, config: ConfigManager, use_process=True) -> pd.DataFrame:
    """
    开始获取session,need_geo=False，后续再获取geo的东西
    :param es_exp:
    :param use_process:
    :param config:
    :return:
    """
    sessions = extract_session_fields(get_session_from_es(es_exp, "30m"), None, need_geo=False, check_dangerous=False)
    for session in sessions:
        session.update(field_map)
        session.update({ColTimeFieldEnum.session_time_diff.value: session['lastPacket'] - session['firstPacket']})
    chunks = [item for item in split_data_by_num(sessions, cpu_count()) if len(item) != 0]
    sessions_result = []
    if use_process:
        if server == 140:
            max_workers = 20
        else:
            max_workers = 20
        with ProcessPoolExecutor(max_workers=max_workers) as executor:
            futures = []
            for process_index, chunk in enumerate(chunks):
                child_chunks = [item for item in split_data_by_num(chunk, 15) if len(item) != 0]
                futures.append(
                    executor.submit(get_session_process_chunk_session, process_index, len(chunks), child_chunks))
            for future in futures:
                try:
                    result = future.result()
                    sessions_result.extend(result)
                except Exception as e:
                    printx("子进程异常:", is_error=True)
    else:
        sessions_result.extend(get_session_thread_chunk_session2(sessions, 0, 0, 0, 0))
    if len(sessions_result) == 0:
        return pd.DataFrame()
    fill_statistic_field(sessions_result, config, is_update_cache=True, is_abnormal=False)
    df = pd.DataFrame(sessions_result)
    df = remove_df(df)
    df = handle_uri(df, use_tqdm=False)
    df = handle_ua(df, use_tqdm=False)
    return df
