import math
import os
import re
from collections import Counter
from urllib.parse import urlparse

import joblib
import numpy as np
import pandas as pd
from termcolor import cprint
from xbase_util.common_util import s2date
from xbase_util.dangerous_util import get_splunk_pa, get_splunk_waf

from src.bean.cache_value_enum import CacheValueEnum
from src.bean.config_column_enum import ConfigColumnEnum
from src.constant import VulnerabilityEnum, project_root_path, req, drop_columns, same_value, high_dup_cols, \
    file_suffix_keyword, content_type_has_binary, remove_missing_data_when_train
from src.util.cache_value_util import CacheValueUtil
from src.util.config_manager import ConfigManager
from src.value_bean.value_dangerous_enum import ValueDangerousEnum


def get_internal_address(ip):
    """
    判断是否内部ip，否则返回None
    :param ip:
    :return:
    """
    ip_match = r"^(?:(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|0?[0-9]?[1-9]|0?[1-9]0)\.)(?:(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){2}(?:25[0-4]|2[0-4][0-9]|1[0-9][0-9]|0?[0-9]?[1-9]|0?[1-9]0)$"
    if re.match(ip_match, ip):
        # 分割IP地址
        octets = ip.split('.')
        first_octet = int(octets[0])
        second_octet = int(octets[1])
        # 判断是否是本地地址
        if ip == "10.28.0.0" or ip.startswith("10.28.0.") or ip.startswith("10.28.0.0/16"):
            return "LOCAL_ADDRESS"
        # 判断是否是VPN地址
        if ip.startswith("10.28.15"):
            return "VPN_ADDRESS"
        # 判断是否是分支机构地址
        if (192 <= first_octet <= 195) or (first_octet == 192 and 144 <= second_octet <= 255):
            return "DEPARTMENT_ADDRESS"
        return None
    else:
        return None


def text_to_frequency_vector(text):
    """
    统计文本词频
    :param text:
    :return:
    """
    str_map = {}
    for t in list(VulnerabilityEnum):
        count_list = [Counter(re.findall(r'\w+', text.lower())).get(word, 0) for word in t.value]
        str_map[f"{t.name}_count"] = count_list
        str_map[f"{t.name}_exist"] = len([item for item in count_list if item != 0]) > 0
    return str_map


def build_es_statistic_expression(columns, start_time, end_time, max_count=100, bounded_type="bounded"):
    aggregations = {}
    for column in columns:
        aggregations[column] = {
            "terms": {"field": column, "size": max_count},
        }
    filter = []
    if bounded_type == "bounded":
        if start_time:
            filter.append({"range": {"firstPacket": {"gte": round(start_time.timestamp() * 1000)}}})
        if end_time:
            filter.append({"range": {"lastPacket": {"lte": round(end_time.timestamp() * 1000)}}})
    elif bounded_type == "last" and start_time and end_time:
        filter.append({"range": {
            "lastPacket": {"gte": round(start_time.timestamp() * 1000), "lte": round(end_time.timestamp() * 1000)}}})
    return {"size": 0, "timeout": "300s", "query": {"bool": dict(filter=filter)}, "aggregations": aggregations,
            "profile": False}


def get_all_threats(host, port, username, password, end_time, start_time, scheme='https', use_df=True):
    threat_new_pa = pd.DataFrame(get_splunk_pa(start_time=start_time, end_time=end_time,
                                               splunk_host=host,
                                               splunk_port=port,
                                               splunk_scheme=scheme,
                                               splunk_username=username,
                                               splunk_password=password,
                                               pa_type="NEW_PA"
                                               ))
    threat_pa = pd.DataFrame(get_splunk_pa(start_time=start_time, end_time=end_time,
                                           splunk_host=host,
                                           splunk_port=port,
                                           splunk_scheme=scheme,
                                           splunk_username=username,
                                           splunk_password=password,
                                           pa_type="PA"
                                           ))

    printx(f"异常数量pa:{len(threat_pa)} new_pa:{len(threat_new_pa)}")
    threat_new_pa['type'] = 'ids'
    threat_pa['type'] = 'ids'
    threat_waf = pd.DataFrame(get_splunk_waf(start_time=start_time, end_time=end_time,
                                             splunk_host=host,
                                             splunk_port=port,
                                             splunk_scheme=scheme,
                                             splunk_username=username,
                                             splunk_password=password))
    threat_waf['type'] = 'waf'
    printx(f"异常数量waf:{len(threat_waf)}")
    dangerous_all = pd.concat([
        threat_new_pa.reset_index(drop=True),
        threat_pa.reset_index(drop=True),

        threat_waf.reset_index(drop=True)], axis=0)
    if len(dangerous_all) == 0:
        return []
    dangerous_all['DENY_METHOD'] = dangerous_all['DENY_METHOD'].apply(
        lambda x: ValueDangerousEnum.abnormal_no_value.value if x == '' else x)
    dangerous_all['PROTOCOL'] = dangerous_all['PROTOCOL'].apply(
        lambda x: ValueDangerousEnum.abnormal_no_value.value if x == '' else x)
    dangerous_all['SEVERITY'] = dangerous_all['SEVERITY'].apply(
        lambda x: ValueDangerousEnum.abnormal_no_value.value if x == '' else x)
    dangerous_all['THREAT_SUMMARY'] = dangerous_all['THREAT_SUMMARY'].apply(
        lambda x: ValueDangerousEnum.abnormal_no_value.value if x == '' else x)
    # 该死有nan
    dangerous_all = dangerous_all[
        (~dangerous_all['SIP'].isna()) & (~dangerous_all['DIP'].isna()) & (~dangerous_all['S_PORT'].isna()) & (
            ~dangerous_all['D_PORT'].isna())]
    if use_df:
        return dangerous_all
    else:
        return dangerous_all.to_dict('records')


def printx(a, is_error=False):
    if is_error:
        cprint(a, color='red')
    else:
        cprint(a, color='cyan')


def printc(a, color='green'):
    cprint(a, color=color)


def input_is_yes(text) -> bool:
    s = input(f"{text} Y/y/yes[enter] N/n/not/<Any key>:")
    return s.lower() == 'y' or s.lower() == 'yes' or s == ''


def init_cache():
    tag_name = '[初始化]'
    printx(f'{tag_name}初始化：删除缓存数据需要重新更新缓存，确保有缓存值才可以继续获取数据')
    data_path = os.path.join(project_root_path, 'model', 'cache.pkl')
    data_cache = {}
    if os.path.exists(data_path):
        if not input_is_yes(f"{tag_name}是否 不 删除"):
            os.remove(data_path)
            printx(f"{tag_name}已删除缓存数据")
        else:
            data_cache = joblib.load(data_path)
            printx(f"{tag_name}本地：{data_cache}")
    else:
        printx(f"{tag_name}当前没有缓存数据")
    return data_cache, data_path


def update_cache(config, start_time, end_time, is_incremental_save=True, is_abnormal=False):
    field_map = {}
    for col in list(CacheValueEnum):
        if col is CacheValueEnum.session_per_packet_time_mean:
            printx(f"[初始化]session_per_packet_time_mean不是常用值，需要用新的统计方法")
            continue
        exp = build_es_statistic_expression([col.value], s2date(start_time), s2date(end_time),
                                            max_count=65535,
                                            bounded_type='last')
        buckets = req.search(
            body=exp, index="arkime_sessions3-*").json()['aggregations'][
            col.value]['buckets']
        size = 0
        bCount = 0
        sum_square_diff = 0
        for bucket in buckets:
            doc_count = bucket['doc_count']
            key = bucket['key']

            bCount += doc_count
            size += doc_count * key
            # 计算平方差
            sum_square_diff += doc_count * (key - (size / bCount)) ** 2
        var = sum_square_diff / bCount
        bMean = size / bCount
        std = math.sqrt(var)
        field_map[col.value] = {
            'count': bCount,
            'mean': bMean,
            'var': var,
            'std': std,
        }
        if is_incremental_save:
            aCount, aMean, aStd = CacheValueUtil.get_cache_value(config.get_config(ConfigColumnEnum.data_cache.value),
                                                                 col, is_abnormal)
            newCount = aCount + bCount
            newMean = (aCount * aMean + bMean * bCount) / newCount
            aVar = aStd ** 2
            delta = bMean - aMean
            newVar = (aCount * aVar + bCount * var + (delta ** 2 * aCount * bCount) / newCount) / newCount
            newStd = math.sqrt(newVar)
            data_cache = CacheValueUtil.update_cache_value(col, config.get_config(ConfigColumnEnum.data_cache.value),
                                                           (newCount, newMean, newStd), is_abnormal)
            config.set_config(ConfigColumnEnum.data_cache.value, data_cache)
        else:
            data_cache = CacheValueUtil.update_cache_value(col, config.get_config(ConfigColumnEnum.data_cache.value),
                                                           (bCount, bMean, std), is_abnormal)
            config.set_config(ConfigColumnEnum.data_cache.value, data_cache)
    CacheValueUtil.save_data_cache(config.get_config(ConfigColumnEnum.data_cache.value),
                                   config.get_config(ConfigColumnEnum.data_path.value))


def update_local_cache(config: ConfigManager) -> None:
    if input_is_yes("[初始化]是否更新缓存？"):
        start_time = input(f'[初始化]请输入开始时间(YYYY/mm/dd HH:MM:SS|YYYY-mm-dd HH:MM:SS):')
        end_time = input(f'[初始化]请输入结束时间(YYYY/mm/dd HH:MM:SS|YYYY-mm-dd HH:MM:SS):')
        if input_is_yes("[初始化]是否增量更新？"):
            update_cache(config, start_time, end_time, is_incremental_save=True, is_abnormal=False)
        else:
            update_cache(config, start_time, end_time, is_incremental_save=False, is_abnormal=False)
    else:
        printx(f'[初始化]未主动更新缓存')


def remove_df(data):
    if remove_missing_data_when_train:
        before = len(data)
        cols = ["server.bytes", "req_uri", "res_status_code", "req_method"]
        mask = ~data[cols].isin([0, "", "/"]).any(axis=1) & data[cols].notna().all(axis=1)
        data = data[mask]
        print(f"删除了：{before - len(data)}条数据")
    return data


def is_valid_datetime(text):
    pattern = r'^\d{4}[-/]\d{2}[-/]\d{2} \d{2}:\d{2}:\d{2}$'
    if re.match(pattern, text):
        return True
    return False


def is_file_exist(path):
    return os.path.isfile(path)


def is_multi_file_exist(path):
    return not (False in [os.path.isfile(file_path) for file_path in path.split(',')])


def is_int(text):
    try:
        int(text)
        return True
    except ValueError:
        return False


def is_int_between(text, param):
    # 两边都是闭区间
    try:
        return param[0] <= int(text) <= param[1]
    except ValueError:
        return False


def is_float(text):
    try:
        float(text)
        return True
    except ValueError:
        return False


def while_input(tip, fun, param=None):
    while True:
        try:
            res = input(tip)
            if fun is None:
                return res
            if param is None:
                if fun(res):
                    return res
                else:
                    printx('输入有误，请重新输入', is_error=True)
            else:
                if fun(res, param):
                    return res
                else:
                    printx('输入有误，请重新输入', is_error=True)
        except Exception as e:
            printx(f"{e}", is_error=True)
            printx('输入有误，请重新输入', is_error=True)


def exclude_dropped_columns(value):
    """
    :param value:原始列表字段
    """
    return [item for item in value if item not in list(set(drop_columns + same_value + high_dup_cols))]


def loop_exe(op_map, config: ConfigManager) -> None:
    while True:
        printc("——" * 30, 'light_red')
        for i, item in enumerate(op_map):
            printc(f"{i + 1}. {item.key}", "magenta")
        num = int(while_input("请输入执行序号:", is_int_between, (1, len(op_map))))
        if op_map[num - 1].func is None:
            break
        else:
            op_map[num - 1].func(config)


def is_file_suffix(url):
    try:
        parsed = urlparse(f'{url}')
        if parsed.path in ('', '/'):
            return False
    except ValueError:
        pass
    for col in file_suffix_keyword:
        if col in f'{url}':
            return True
    return False


def content_type_is_binary(x):
    """
    判断content_type是否包含binary_cols的关键字
    """
    return any(col in x for col in content_type_has_binary)


def delete_row_if_true(df, col, delete_percentage: float):
    """
    删除col列为True的行，按delete_percentage百分比随机删除
    并打印删除信息
    """
    printx(f"要删除的字段{col}")
    printx(f"当前总数:{len(df)}")
    true_rows = df[df[col]]
    total_rows = len(df)
    true_count = len(true_rows)
    delete_count = int(round(delete_percentage * true_count))

    if delete_count == 0:
        print("无删除（delete_count == 0）")
        return df
    delete_indices = np.random.choice(true_rows.index, delete_count, replace=False)
    df_new = df.drop(delete_indices)
    print(f"True数量: {len(true_rows)}")
    print(f"删除数量: {delete_count}")
    print(f"删除占True比例: {delete_count / true_count:.2%}")
    print(f"剩余总数: {len(df_new)}")
    print(f"删除占总体比例: {delete_count / total_rows:.2%}")
    return df_new
