import os
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
from datetime import timedelta
from multiprocessing import cpu_count

import pandas as pd
from xbase_util.common_util import s2date, split_data_by_num, extract_session_fields, build_es_expression
from xbase_util.handle_features_util import handle_ua, handle_uri

from src.bean.splunk_config_enum import SplunkConfigEnum
from src.col_bean.dangerous_enum import ColDangerousEnum
from src.col_bean.time_field_enum import ColTimeFieldEnum
from src.constant import req, field_map, project_root_path, server, hide_log
from src.extract.packet_fill_util import fill_statistic_field
from src.extract.session import get_session_process_chunk_session
from src.util.common_util import printx, get_all_threats, remove_df
from src.util.config_manager import ConfigManager


def search_with_cross_minutes(time, arkime_expression, initial_minutes=1):
    """
    递归获取异常信息匹配到的session数据
    :param time: 时间
    :param arkime_expression: 表达式
    :param initial_minutes:最开始的偏移分钟
    :return:搜索他的数据
    """
    cross_minutes = initial_minutes
    while cross_minutes <= 8:
        exp = build_es_expression(
            size=100,
            start_time=time - timedelta(minutes=cross_minutes),
            end_time=time + timedelta(minutes=cross_minutes),
            arkime_expression=arkime_expression
        )
        response = req.search(body=exp).json()
        try:
            current_count = response['hits']['total']['value']
            if current_count >= 1:
                hits = response['hits']['hits']
                return hits
            if cross_minutes == 1:
                cross_minutes = 5
            elif cross_minutes == 5:
                cross_minutes = 8
            else:
                break
        except Exception as e:
            printx("报错", is_error=True)
            printx(response, is_error=True)
    return []


def get_abnormal_session_thread_chunk_session(index, total, traffic, process_index):
    if hide_log:
        printx(f"进程:{process_index:02d}的第{index:02d}/{total}个数据")
    try:
        time = traffic['THREAT_TIME'] = s2date(f"{traffic['THREAT_TIME']}")
        arkime_expression = f"(ip.src == {traffic['SIP']}||ip.xff == {traffic['SIP']}) && port.src == {int(traffic['S_PORT'])} && ip.dst == {traffic['DIP']} && port.dst == {int(traffic['D_PORT'])}"
        hits = search_with_cross_minutes(time, arkime_expression, initial_minutes=5)
        for item in hits:
            item.update({
                'traffic_type': traffic['type'],
                'PROTOCOL': traffic['PROTOCOL'],
                'DENY_METHOD': traffic['DENY_METHOD'],
                'THREAT_SUMMARY': traffic['THREAT_SUMMARY'],
                'SEVERITY': traffic['SEVERITY'],
                'isDangerous': True
            })
        return hits
    except Exception as e:
        printx(f'{e}', is_error=True)
        printx(f"报错get_abnormal_session_thread_chunk_session：{traffic}", is_error=True)
        return []


def process_method(process_index, chunks):
    printx(f"开启进程{process_index + 1}")
    res = []
    if server == 140:
        max_workers = 5
    else:
        max_workers = 5
    with ThreadPoolExecutor(max_workers=max_workers) as executor:
        future_to_chunk = []
        for index, chunk_item in enumerate(chunks):
            future_to_chunk.append(executor.submit(get_abnormal_session_thread_chunk_session,
                                                   index,
                                                   len(chunks),
                                                   chunk_item,
                                                   process_index))
        for future in future_to_chunk:
            try:
                res.extend(future.result())
            except Exception:
                continue
    return res


def get_abnormal_session(config, time_start: str, time_end):
    dangerous = get_all_threats(host=SplunkConfigEnum.host.value,
                                port=SplunkConfigEnum.port.value,
                                username=SplunkConfigEnum.username.value,
                                password=SplunkConfigEnum.password.value,
                                scheme=SplunkConfigEnum.scheme.value,
                                start_time=s2date(time_start),
                                end_time=s2date(time_end),
                                use_df=False)
    split_list = split_data_by_num(dangerous, cpu_count())
    sessions = []
    if server == 140:
        max_workers = 20
    else:
        max_workers = 20
    with ProcessPoolExecutor(max_workers=max_workers) as executor:
        futures = []
        for index, chunk in enumerate(split_list):
            if len(chunk) != 0:
                futures.append(executor.submit(process_method, index, chunk))
        for res in futures:
            try:
                r = res.result()
                sessions.extend(r)
            except Exception:
                pass
    sessions = extract_session_fields(sessions, None, need_geo=False, check_dangerous=True)
    for session in sessions:
        session.update(field_map)
        session.update({ColTimeFieldEnum.session_time_diff.value: session['lastPacket'] - session['firstPacket']})
        session[ColDangerousEnum.deny_method.value] = session['DENY_METHOD']
        session[ColDangerousEnum.protocol.value] = session['PROTOCOL']
        session[ColDangerousEnum.severity.value] = session['SEVERITY']
        session[ColDangerousEnum.threat_summary.value] = session['THREAT_SUMMARY']
        session[ColDangerousEnum.traffic_type.value] = session['traffic_type']
        session[ColDangerousEnum.is_dangerous.value] = session['isDangerous']
        del session['DENY_METHOD']
        del session['PROTOCOL']
        del session['SEVERITY']
        del session['THREAT_SUMMARY']
        del session['traffic_type']
        del session['isDangerous']
    chunks = [item for item in split_data_by_num(sessions, cpu_count()) if len(item) != 0]
    sessions_result = []
    with ProcessPoolExecutor(max_workers=8) as executor:
        futures = []
        for process_index, chunk in enumerate(chunks):
            child_chunks = [item for item in split_data_by_num(chunk, 15) if len(item) != 0]
            futures.append(executor.submit(get_session_process_chunk_session, process_index, len(chunks), child_chunks))
        for future in futures:
            try:
                sessions_result.extend(future.result())
            except Exception:
                pass
    if len(sessions_result) == 0:
        return None
    fill_statistic_field(sessions_result, config, is_update_cache=False, is_abnormal=True)
    df = pd.DataFrame(sessions_result)
    df = remove_df(df)
    df = handle_uri(df, use_tqdm=False)
    df = handle_ua(df, use_tqdm=False)
    return df


def extract_abnormal_entrance(config: ConfigManager) -> None:
    days = ['9', '10', '11', '12', '13', '14', '15', '16']
    year = 2025
    month = 8
    for day in days:
        start_time = f"{year}/{month:02d}/{day} 00:00:00"
        end_time = f"{year}/{month:02d}/{day} 23:59:59"
        df = get_abnormal_session(config, start_time, end_time)
        if df is None:
            continue
        path = os.path.join(project_root_path, 'data', f"abnormal_{month}_{day}.csv")
        df.to_csv(path, index=False)
