import json
import os
import warnings
from concurrent.futures import ThreadPoolExecutor, as_completed
from datetime import timedelta
from multiprocessing import cpu_count, Pool

import pandas as pd
from elasticsearch import Elasticsearch

from base.constant import out_folder
from base.util.common_util import build_expression
from base.util.file_util import get_config
from set_label.imperva import get_dangerous_from_imperva
from set_label.splunk import get_dangerous_ips


def split_list_into_chunks(lst, num_chunks):
    avg = len(lst) / float(num_chunks)
    chunks = []
    last = 0.0
    while last < len(lst):
        chunks.append(lst[int(last):int(last + avg)])
        last += avg
    return chunks


def process_chunk(param):
    index, all, process_index, traffic1, es = param
    matched_traffic = []
    traffic = traffic1[1]
    if traffic['type'] == "ids":
        exp = build_expression(size=100, start_time=None,
                               end_time=None,
                               arkime_expression=f"ip.src == {traffic['SIP']} && port.src == {traffic['S_PORT']} && ip.dst == {traffic['DIP']} && port.dst == {traffic['D_PORT']}")
    else:
        exp = build_expression(size=100, start_time=None,
                               end_time=None,
                               arkime_expression=f"ip.xff == {traffic['SIP']} && port.src == {traffic['S_PORT']} && ip.dst == {traffic['DIP']} && port.dst == {traffic['D_PORT']}")

    response = es.search(body=exp)
    if response['hits']['total']['value'] > 0:
        print(f"{traffic['type']}有数据")
        matched_traffic.extend(response['hits']['hits'])
    else:
        print(f"{traffic['type']}没数据")
    print(f"分段进程：{process_index}|线程：{index}/{all}")
    return matched_traffic


def process_method(process_index, chunks, config_yaml):
    print(f"开启进程{process_index}")
    warnings.filterwarnings('ignore')
    http_auth = (config_yaml['es']['account'], config_yaml['es']['password']) if config_yaml['es'][
        'use_http_auth'] else None
    es = Elasticsearch(config_yaml['es']['url'],
                       basic_auth=http_auth,
                       verify_certs=False, timeout=1200)
    res = []
    with ThreadPoolExecutor(max_workers=10) as executor:
        future_to_chunk = {
            executor.submit(process_chunk, (index, len(chunks), process_index, chunk_item, es)): chunk_item
            for index, chunk_item in enumerate(chunks.iterrows())
        }
        for future in as_completed(future_to_chunk):
            try:
                chunk_result = future.result()
                res.extend(chunk_result)
            except Exception as e:
                print(f"进程{process_index}中的任务执行出错: {e}")
    return res


def get_all_dangerous_session(config_yaml):
    print("[获取异常session]开始")
    session_id_file = f"{out_folder}{os.sep}{config_yaml['session']['time_session_out']}"
    startTime = config_yaml['session']["start_time"]
    endTime = config_yaml['session']["end_time"]
    print(f"[获取异常session]时间范围：{startTime}----{endTime}")
    dangerous_ids = get_dangerous_ips(config_yaml, "")
    dangerous_waf = get_dangerous_from_imperva(startTime, endTime)
    dangerous_waf['type'] = 'waf'
    dangerous_ids['type'] = 'ids'
    dangerous_all = pd.concat([dangerous_ids, dangerous_waf])
    print(f"[获取异常session]ids({len(dangerous_ids)})|waf({len(dangerous_waf)})|总计{len(dangerous_all)}条")
    res = []
    chunks_with_extra_param = [(index, chunk, config_yaml) for index, chunk in
                               enumerate(split_list_into_chunks(dangerous_all, cpu_count()))]
    pool = Pool(processes=cpu_count())
    results1 = [pool.apply_async(process_method, args) for args in chunks_with_extra_param]
    for result in results1:
        res.extend(result.get())
    from session.session import save_session_id
    save_session_id(origin_list=res, session_id_file=session_id_file)
    print("[获取异常session]获取完毕")
