import pandas as pd
from joblib import Parallel, delayed
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.preprocessing import MultiLabelBinarizer

from src.bean.vector_name_enum import VectorNameEnum
from src.bean.x_typing import PRE_HANDLE_TYPE
from src.constant import global_geo_util
from src.model.model_common_util import save_vector, load_vectors, \
    convert_to_int_list, parse_str_to_list
from src.util.common_util import printx
from src.value_bean.value_geo_field_enum import ValueGeoFieldEnum


def convert_to_str(obj):
    try:
        v = ','.join(parse_str_to_list(obj))
    except:
        v = str(obj) if not pd.isna(obj) else ''
    return v


def convert_to_float(x):
    try:
        return float(f'{x}')
    except ValueError:
        return 0.0


def handle_request_referer(x):
    referer_list = [referer.strip()
                    for referer in f"{x}"
                    .replace("]", '')
                    .replace("[", '')
                    .replace('"', '')
                    .replace("'", '')
                    .split(",") if referer != '']
    if len(referer_list) == 0:
        return []
    return [referer[:referer.index("?")] if '?' in referer else referer for referer in referer_list]


def xffip(x):
    x = f"{x}".replace("]", '').replace("[", '').replace('"', '').replace("'", '').split(",")
    if len(x) == 0:
        return ValueGeoFieldEnum.db_no_value.value, ValueGeoFieldEnum.db_no_value.value, ValueGeoFieldEnum.db_no_value.value
    try:
        response = global_geo_util.reader.city(f"{x[0]}")
        country_iso = ValueGeoFieldEnum.db_no_value.value if response.country.iso_code is None else response.country.iso_code
        province_name = ValueGeoFieldEnum.db_no_value.value if response.subdivisions.most_specific.name is None else response.subdivisions.most_specific.name
        city_name = ValueGeoFieldEnum.db_no_value.value if response.city.name is None else response.city.name
        return country_iso, province_name, city_name
    except:
        return ValueGeoFieldEnum.db_no_value.value, ValueGeoFieldEnum.db_no_value.value, ValueGeoFieldEnum.db_no_value.value


vect = HashingVectorizer(
    n_features=10,
    analyzer='char_wb',
    ngram_range=(3, 5),
    alternate_sign=False,
    norm='l2',
    lowercase=True
)


def convert_to_int_list_partition(s: pd.Series) -> pd.Series:
    def convert_element(obj):
        int_list = []
        for item in parse_str_to_list(obj):
            try:
                int_list.append(int(item))
            except:
                pass
        return int_list

    return s.map(convert_element)


def process_col(col_data, col_name, i, all):
    printx(f"[预处理]多进程处理文本数组:{i + 1}/{all}")
    col_data = col_data.apply(convert_to_str)
    new_df = pd.DataFrame(
        vect.transform(col_data).toarray(),
        columns=[f"{col_name}_{j}" for j in range(10)]
    ).reset_index(drop=True)
    return new_df


def pre_handle_array_v2(df, columns, model_id, pre_handle_type: PRE_HANDLE_TYPE):
    num_array = [item for item in df.columns if item in [
        'deserialize_count', 'xxe_attack_count', 'sql_injection_count', 'sensitive_data_count',
        'cmd_injection_count', 'log_injection_count', 'http.statuscode', 'xss_attack_count']]
    str_array = [item for item in columns if item not in num_array + ['http.xffIp']]
    printx("[预处理]http.request-referer")
    df['http.request-referer'] = df['http.request-referer'].progress_apply(handle_request_referer)
    printx("[预处理]拆分xffIp")
    df['http.xffIp'] = df['http.xffIp'].progress_apply(xffip)
    printx("[预处理]拆分xffIp获取xffIp.country_iso")
    df['xffIp.country_iso'] = df['http.xffIp'].progress_apply(lambda x: x[0])
    printx("[预处理]拆分xffIp获取xffIp.province_name")
    df['xffIp.province_name'] = df['http.xffIp'].progress_apply(lambda x: x[1])
    printx("[预处理]拆分xffIp获取xffIp.city_name")
    df['xffIp.city_name'] = df['http.xffIp'].progress_apply(lambda x: x[2])
    geo_cols = ['xffIp.country_iso', 'xffIp.province_name', 'xffIp.city_name']
    str_array += geo_cols
    dfs = Parallel(n_jobs=-1)(
        delayed(process_col)(df[col], col, i, len(str_array))
        for i, col in enumerate(str_array)
    )
    df = pd.concat([df.reset_index(drop=True), *dfs], axis=1)
    printx("[预处理]开始用MultiLabelBinarizer处理数值数组类型")
    column_map = {}
    if pre_handle_type != PRE_HANDLE_TYPE.train:
        column_map = load_vectors(VectorNameEnum.v2_array_count.value, model_id)

    def process_numeric_col(col, col_data, pre_handle_type, column_map):

        printx(f"[预处理]多进程处理数值 {col}")
        col_data = col_data.apply(convert_to_int_list)
        if pre_handle_type == PRE_HANDLE_TYPE.train:
            # 训练需要返回column_map用于保存
            column_map = {}
            mlb = MultiLabelBinarizer()
            multi_hot = mlb.fit_transform(col_data)
            column_map[col] = mlb
        else:
            # 非训练则用函数传入的column_map
            mlb = column_map[col]
            multi_hot = mlb.transform(col_data)
        df_multi = pd.DataFrame(multi_hot, columns=[f"{col}_{c}" for c in mlb.classes_])
        return df_multi, column_map

    results = Parallel(n_jobs=-1)(
        delayed(process_numeric_col)(col, df[col], pre_handle_type, column_map)
        for col in num_array
    )
    df_list = [df.reset_index(drop=True)]
    for res, map in results:
        column_map.update(map)
        df_list.append(res.reset_index(drop=True))
    df = pd.concat(df_list, axis=1)
    if pre_handle_type == PRE_HANDLE_TYPE.train:
        save_vector(column_map, VectorNameEnum.v2_array_count.value, model_id)
    df = df.drop(columns=num_array + str_array + ['http.xffIp'])
    return df
