import multiprocessing
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
from multiprocessing import Pool

import numpy as np
import pandas as pd
from scapy.all import *
from xbase_util.common_util import check_path, process_origin_pos, get_packets_percentage, get_statistic_fields, \
    get_res_status_code_list, get_cookie_end_with_semicolon_count, get_ua_duplicate_count, get_dns_domain, \
    get_dns_domain_suffix, split_samples, split_process, date2s
from xbase_util.db.bean import ConfigBean
from xbase_util.packet_util import get_all_columns, get_detail_by_package
from xbase_util.pcap_util import process_session_id_disk_simple, reassemble_tcp_pcap, reassemble_session_pcap
from xbase_util.xbase_constant import statisticHeader, src_dst_header, abnormal_features_column, plain_body_columns

from base.bean.beans import ProcedureEnum
from base.bean.status_enum import StatusEnum
from base.util.common_util import split_data_by_chunk
from base.util.file_util import gen_catalogue_path
from base.util.redis_util import UpdateStatusParams
from global_ver import out_folder


class PacketUtil:
    @staticmethod
    def save_packets(origin_data, file_path, file_lock):
        """
        保存读取完Pcap包后的数据
        :param origin_data:
        :param file_path:
        :param file_lock:
        :return:
        """
        print(f"开始保存:{len(origin_data)}")
        with file_lock:
            import os
            csv_file_path = file_path
            file_exists = os.path.isfile(csv_file_path)
            write_header = not file_exists or os.path.getsize(csv_file_path) == 0
            df = pd.DataFrame(origin_data)
            all_columns = get_all_columns(contains_packet_column=True,
                                          contains_src_dst_column=True,
                                          contains_statistic_column=True,
                                          contains_features_column=True, contains_plain_body_column=True,
                                          contains_abnormal_features_column=True, )
            df = df.reindex(columns=all_columns, fill_value='')
            df.drop(columns=['packetPos'], inplace=True)
            all_columns.remove('packetPos')
            df['same_src_dst_size_mean'] = df.groupby(['destination.ip', 'destination.port'])[
                'packet_size_mean'].transform('mean')
            df['same_src_dst_size_var'] = df.groupby(['destination.ip', 'destination.port'])[
                'packet_size_mean'].transform('var')
            df['same_src_dst_size_var'] = df['same_src_dst_size_var'].fillna(-1)
            df['same_src_dst_size_mean'] = df['same_src_dst_size_mean'].fillna(-1)
            if write_header:

                df.to_csv(check_path(file_path), index=False, columns=all_columns)
            else:
                df.to_csv(check_path(csv_file_path), mode='a', index=False, header=False, columns=all_columns)

    @staticmethod
    def get_typed(all_packages, publicField):
        """
        分离有多个包的session
        :param all_packages:
        :param publicField:
        :return:
        """
        try:
            return [
                get_detail_by_package(publicField, package['req_header'], package['req_body'], package['res_header'],
                                      package['res_body']) for package in all_packages]
        except Exception as e:
            traceback.print_exc()
            print(f"get_typed:{e}")
            return []

    @staticmethod
    def thread_in_process(base_config,
                          index_session, session, per_process,
                          start_time,
                          log, queue, esdb, showLog):
        """
        该线程方法中对单条session进行处理，返回一个读取完Pape包计算完相关统计字段的完整数据
        :param base_config: 配置文件
        :param index_session: 处理位置
        :param session: 原始数据
        :param per_process: 进程数
        :param start_time: 开始时间
        :param log: 日志信息
        :param queue: 状态对列
        :param esdb: 读取PCAp文件所用到的文件缓存类
        :param showLog: 是否打印日志
        :return:
        """
        initial_time = datetime.now()
        protocol = session['protocol']
        read_packet_log = ""
        for h in src_dst_header + statisticHeader:
            if h.endswith("_var") or h.endswith("_mean") or h.endswith("_count") or h.endswith(
                    "_average") or h.endswith("_rate") or h.endswith("_min") or h.endswith("_max") or h.endswith(
                "_length") or h.endswith("_percentage"):
                session.update({h: -1})
            else:
                session.update({h: ""})
        pos = process_origin_pos(session['packetPos'])
        session.update({
            "dns_base_domain": "",
            "dns_base_domain_length": -1,
            "dns_domain_suffix": "",
            "dns_domain_suffix_length": -1,
            "dns_domain": "",
            "dns_domain_length": -1,
            "dns_domain_length_mean": -1,
            "dns_domain_length_var": -1,
            "req_res_period_mean": -1,
            "req_res_period_var": -1,
            "status_code_1x_count": -1,
            "status_code_2x_count": -1,
            "status_code_3x_count": -1,
            "status_code_4x_count": -1,
            "status_code_5x_count": -1,
            "req_bytes_percentage": get_packets_percentage(session, True),
            "res_bytes_percentage": get_packets_percentage(session, False),
            "cookie_end_with_semicolon_count": -1,
            "ua_duplicate_count": -1,
        })
        for plain_body in plain_body_columns:
            session.update({plain_body: ""})
        for abnormal_features in abnormal_features_column:
            session.update({abnormal_features: False})
        if len(pos) == 0:
            print("pos len is 0")
            return [PacketUtil.edit_session_and_save(session=session)]

        try:
            need_read_stream = ("tcp" in protocol and "http" in protocol) or "dns" in protocol
            if need_read_stream:
                stream, packet_list = process_session_id_disk_simple(id=session['id'], node=session['node'],
                                                                     packet_pos=pos, esdb=esdb,
                                                                     pcap_path_prefix=base_config['pcap'][
                                                                         'path_prefix'])
                if stream is None:
                    return [PacketUtil.edit_session_and_save(session=session)]
                packets_scapy = rdpcap(io.BytesIO(stream))
                if packets_scapy is None or len(packets_scapy) == 0:
                    return [PacketUtil.edit_session_and_save(session=session)]
                if "tcp" in protocol and "http" in protocol:
                    session.update(get_statistic_fields(packets_scapy))
                    skey = f"{session['source.ip']}:{session['source.port']}"
                    all_packets = reassemble_session_pcap(reassemble_tcp_pcap(packets_scapy), skey,
                                                          session_id=session['id'])
                    if len(all_packets) != 0:
                        all_req_size = [item['req_size'] for item in all_packets if item['key'] == skey]
                        all_res_size = [item['res_size'] for item in all_packets if item['key'] != skey]
                        num_1, num_2, num_3, num_4, num_5 = get_res_status_code_list(all_packets)
                        # 获取请求头参数数量
                        req_header_count_list = [req['req_header'].count(":") for req in all_packets]
                        #请求的时间间隔
                        request_flattened_time = [item['req_time'] for item in all_packets]
                        request_time_diffs = [request_flattened_time[i + 1] - request_flattened_time[i] for i in
                                              range(len(request_flattened_time) - 1)]
                        request_mean_diff = round(np.nanmean(request_time_diffs), 5) or 0
                        request_variance_diff = round(np.nanvar(request_time_diffs), 5) or 0
                        # 响应的时间间隔
                        response_flattened_time = [item['res_time'] for item in all_packets]
                        response_time_diffs = [response_flattened_time[i + 1] - response_flattened_time[i] for i in
                                               range(len(response_flattened_time) - 1)]
                        response_mean_diff = round(np.nanmean(response_time_diffs), 5) or 0
                        response_variance_diff = round(np.nanvar(response_time_diffs), 5) or 0

                        time_period = [(abs(item['res_time'] - item['req_time'])) for item in
                                       all_packets if item['res_time'] != 0 and item['req_time'] != 0]

                        session.update({
                            "all_req_packet_size_mean": round(np.nanmean(all_req_size), 5) or 0,
                            "all_req_packet_size_var": round(np.nanvar(all_req_size), 5) or 0,
                            "all_req_packet_time_period_mean": request_mean_diff,
                            "all_req_packet_time_period_var": request_variance_diff,
                            "all_res_packet_size_mean": round(np.nanmean(all_res_size), 5) or 0,
                            "all_res_packet_size_var": round(np.nanvar(all_res_size), 5) or 0,
                            "all_res_packet_time_period_mean": response_mean_diff,
                            "all_res_packet_time_period_var": response_variance_diff,
                            "req_header_count_mean": round(np.nanmean(req_header_count_list), 5) or 0,
                            "req_header_count_var": round(np.nanvar(req_header_count_list), 5) or 0,
                            "req_res_period_mean": round(np.nanmean(time_period), 5) or 0,
                            "req_res_period_var": round(np.nanvar(time_period), 5) or 0,
                            "status_code_1x_count": num_1,
                            "status_code_2x_count": num_2,
                            "status_code_3x_count": num_3,
                            "status_code_4x_count": num_4,
                            "status_code_5x_count": num_5,
                            "cookie_end_with_semicolon_count": get_cookie_end_with_semicolon_count(all_packets),
                            "ua_duplicate_count": get_ua_duplicate_count(all_packets),
                        })
                        if showLog:
                            print(
                                f"{log}第{index_session + 1}/{len(per_process)}个session;本条session耗时：{datetime.now() - initial_time};总耗时：{str(datetime.now() - start_time)};{read_packet_log}")
                        return PacketUtil.get_typed(all_packets, publicField=session)
                elif "dns" in protocol:
                    dns_domain = get_dns_domain(packets_scapy)
                    dns_domain_length = len(dns_domain)
                    dns_domain_suffix = get_dns_domain_suffix(dns_domain)
                    dns_base_domain = dns_domain.replace(f".{dns_domain_suffix}", "")
                    dns_base_domain = f"{dns_base_domain}"
                    session.update({
                        "dns_base_domain": dns_base_domain,
                        "dns_base_domain_length": len(dns_base_domain),
                        "dns_domain_suffix": dns_domain_suffix,
                        "dns_domain_suffix_length": len(dns_domain_suffix),
                        "dns_domain": dns_domain,
                        "dns_domain_length": dns_domain_length,
                        "dns_domain_length_mean": round(np.mean(dns_domain_length), 5),
                        "dns_domain_length_var": round(np.var(dns_domain_length), 5)})
                    if showLog:
                        print(
                            f"{log}第{index_session + 1}/{len(per_process)}个session;本条session耗时：{datetime.now() - initial_time};总耗时：{str(datetime.now() - start_time)};{read_packet_log}")
                    return [PacketUtil.edit_session_and_save(session=session)]
            else:
                if showLog:
                    print(
                        f"{log}第{index_session + 1}/{len(per_process)}个session;本条session耗时：{datetime.now() - initial_time};总耗时：{str(datetime.now() - start_time)};{read_packet_log}")
                return [PacketUtil.edit_session_and_save(session=session)]
        except Exception as e:
            if queue:
                queue.put(
                    UpdateStatusParams(status=StatusEnum.error.value, procedure=ProcedureEnum.pcap.value, error=f'{e}'))
            print(f"错误:{e}")
            traceback.print_exc()
        return [PacketUtil.edit_session_and_save(session=session)]

    @staticmethod
    def package_session_process(config: ConfigBean, per_process, start_time, log, base_config, queue, esdb,
                                showLog):
        try:
            with ThreadPoolExecutor(max_workers=config.pcap_thread_in_process) as pool:
                futures = []
                for index_session, session in enumerate(per_process):
                    future = pool.submit(PacketUtil.thread_in_process,
                                         base_config,
                                         index_session, session, per_process,
                                         start_time, log, queue, esdb, showLog)
                    futures.append(future)
                res = []
                for index, future in enumerate(futures):
                    res.extend(future.result())
                return res
        except Exception as e:
            traceback.print_exc()
            return []

    @staticmethod
    def edit_session_and_save(session):
        session["initRTT"] = session.get("initRTT", 0)
        session["length"] = session.get("length", 0)
        return session

    @staticmethod
    def get_and_save_pcap(session, esdb, path_prefix, log):
        print(log)
        try:
            stream, packet_list = process_session_id_disk_simple(session['id'],
                                                                 session['node'],
                                                                 process_origin_pos(session['packetPos']),
                                                                 esdb,
                                                                 path_prefix)
            if stream is not None:
                if 'source.ip' in session and 'id' in session and 'source.port' in session:
                    session_id = session['id']
                    if session['source.ip'] != '' and session['source.port'] != '' and session_id != '':
                        print("开始写入")
                        with open(check_path(os.path.join(out_folder, "extract_pcap", f"{session_id}.pcap")),
                                  'wb') as f:
                            f.write(stream)
        except Exception as e:
            traceback.print_exc()

    @staticmethod
    def extract_pcap_thread(data_list, esdb, path_prefix, process_index):
        with ThreadPoolExecutor(max_workers=10) as executor:
            futures = []
            for index, session in enumerate(data_list):
                future = executor.submit(PacketUtil.get_and_save_pcap,
                                         session,
                                         esdb,
                                         path_prefix, f"|进程：{process_index + 1}/12|线程进度:{index}/{len(data_list)}")
                futures.append(future)
            for item in futures:
                item.result()

    @staticmethod
    def extract_pcap(config: ConfigBean, path_prefix, esdb):
        session_id_file = f"{gen_catalogue_path(config.catalogue)}session/"
        df = pd.DataFrame()
        for index_path, path in enumerate(os.listdir(session_id_file)):
            print(f"[pcap]读取文件：{index_path + 1}/{len(os.listdir(session_id_file))}")
            df = pd.concat([df, pd.read_csv(f"{session_id_file}{path}", delimiter=',')], ignore_index=True)
        df = df.fillna("")
        data_list = df[['id', 'node', 'packetPos', 'source.ip', 'source.port']].to_dict(orient='records')
        data_list = split_data_by_chunk(data_list, 5)
        with ProcessPoolExecutor(max_workers=12) as pool:
            futures = []
            for index, item_list in enumerate(data_list):
                future = pool.submit(PacketUtil.extract_pcap_thread, item_list, esdb, path_prefix, index)
                futures.append(future)
            for item in futures:
                item.result()
        print("写入完毕")

    @staticmethod
    def get_pcap(config: ConfigBean, base_config, queue, esdb) -> bool:
        """
        开始处理读取pcap文件的相关逻辑，根据数据生成多个分段，每个分段采用多进程加多现成的模式，对每个session处理
        :param config:
        :param base_config:
        :param queue:
        :param esdb:
        :return:
        """
        print(f"当前pcap包位置用：{base_config['pcap']['path_prefix']}")
        queue.put(
            UpdateStatusParams(status=StatusEnum.processing.value, process=0.4, procedure=ProcedureEnum.pcap.value))
        session_id_file = f"{gen_catalogue_path(config.catalogue)}session/"
        pcap_file = f"{gen_catalogue_path(config.catalogue)}pcap/"
        if not os.path.exists(session_id_file):
            print(f"[pcap]{session_id_file} file not found")
            queue.put(
                UpdateStatusParams(status=StatusEnum.error.value, error=f"{session_id_file} file not found",
                                   procedure=ProcedureEnum.pcap.value,
                                   process=0.5,
                                   execEndTime=date2s(datetime.now())))
            queue.put(None)
            return False
        df = pd.DataFrame()
        for index_path, path in enumerate(os.listdir(session_id_file)):
            print(f"[pcap]读取文件：{index_path + 1}/{len(os.listdir(session_id_file))}")
            df = pd.concat([df, pd.read_csv(f"{session_id_file}{path}", delimiter=',')], ignore_index=True)
        df = df.fillna("")
        sample = df.to_dict(orient='records')
        start_time = datetime.now()
        num_subsections, subsection_sizes = split_samples(sample, config.pcap_per_subsection)
        start_index = 0
        file_index = 0
        manager = multiprocessing.Manager()
        file_lock = manager.Lock()
        final_list = []
        for ind, item in enumerate(subsection_sizes):
            subsection_item = sample[start_index:start_index + item]
            res = split_process(subsection_item, config.pcap_process)
            split_process_res = [x for x in res if x != 0]
            params = []
            subsection_index = 0
            for index, item2 in enumerate(split_process_res):
                per_process = subsection_item[subsection_index:subsection_index + item2]
                params.append((
                    config,
                    per_process,
                    start_time,
                    f"第{ind + 1}/{len(subsection_sizes)}个分段；第{index + 1}/{len(split_process_res)}个进程;",
                    base_config, queue, esdb
                    , False
                ))
                subsection_index += item2
            pool = Pool(processes=len(split_process_res))
            results = [pool.apply_async(PacketUtil.package_session_process, argument) for argument in params]
            for index, result in enumerate(results):
                final_list.extend(result.get())
            pool.close()
            pool.join()
            print(f"[pcap]本次分段执行完毕:{len(final_list)}")
            while len(final_list) >= int(config.splitNumber):
                file_index = file_index + 1
                print(f"[pcap]保存到{file_index}.csv")
                PacketUtil.save_packets(origin_data=final_list, file_path=f"{pcap_file}{file_index}.csv",
                                        file_lock=file_lock)
                final_list = final_list[int(config.splitNumber):]
            start_index += item
        print("[pcap]全部分段执行完毕，再次检测")
        if len(final_list) != 0:
            file_index = file_index + 1
            print(f"[pcap]最后还有{len(final_list)}个，写入{file_index}.csv")
            PacketUtil.save_packets(origin_data=final_list, file_path=f"{pcap_file}{file_index}.csv",
                                    file_lock=file_lock)

        print("[pcap]包分段处理完毕，记得启动 arkime capture")
        queue.put(UpdateStatusParams(status=StatusEnum.finished.value, procedure=ProcedureEnum.pcap.value, process=0.5))
        return True
