import json
import time
import traceback
from itertools import chain
from urllib.parse import urlparse, parse_qs

import pandas as pd
from joblib import Parallel, delayed
from tqdm import tqdm

from src.bean.column_type_enum import ColumnTypeEnum
from src.bean.x_typing import PRE_HANDLE_TYPE
from src.model.common.hdbscan import scan
from src.model.model_common_util import fill_num, fill_bool_with_num, drop_and_get_cols, parse_str_to_list
from src.model.v2.pre_handle_array import pre_handle_array_v2
from src.model.v2.pre_handle_str import pre_handle_str_v2
from src.util.common_util import content_type_is_binary, delete_row_if_true, is_file_suffix, printx


def extract_uri_path(uris):
    uri_list = []
    for uri in parse_str_to_list(uris):
        if not uri.startswith(('http://', 'https://')):
            uri = 'http://' + uri
        try:
            uri_list.append(urlparse(uri).path)
        except:
            pass
    return json.dumps(list(set(uri_list)))


def extract_uri_domain(uri_list):
    l = []
    for uri in parse_str_to_list(uri_list):
        if not uri.startswith(('http://', 'https://')):
            uri = 'http://' + uri
        try:
            l.append(urlparse(uri).netloc)
        except:
            pass
    return json.dumps(list(set(l)))


def extract_uri_params(uri_list):
    l = []
    for uri in parse_str_to_list(uri_list):
        if not uri.startswith(('http://', 'https://')):
            uri = 'http://' + uri
        try:
            query_map = parse_qs(urlparse(uri).query)
            params_list = [f"{key}={value[0]}" for key, value in query_map.items()]
            if len(query_map) > 0:
                l.append(params_list)
        except:
            pass
    return json.dumps(list(set(chain(*l))))


def extract_all(uri):
    return extract_uri_domain(uri), extract_uri_path(uri), extract_uri_params(uri)


# def split_uri_to_cols(df):
# 19.994048357009888
#     new_cols = df['http.uri'].progress_apply(extract_all).apply(pd.Series)
#     new_cols.columns = ['http.uri_domain', 'http.uri_path', 'http.uri_params']
#     return pd.concat([df, new_cols], axis=1).reset_index(drop=True)


def split_uri_to_cols(df):
    # 10.53
    results = Parallel(n_jobs=-1)(delayed(extract_all)(uri) for uri in df['http.uri'])
    new_cols = pd.DataFrame(results, columns=['http.uri_domain', 'http.uri_path', 'http.uri_params'])
    return pd.concat([df.reset_index(drop=True), new_cols], axis=1)


# def split_uri_to_cols(df):
#     import swifter
#     print(swifter)
#     # 20.92842698097229
#     new_cols = df['http.uri'].swifter.apply(extract_all).apply(pd.Series)
#     new_cols.columns = ['http.uri_domain', 'http.uri_path', 'http.uri_params']
#     return pd.concat([df, new_cols], axis=1).reset_index(drop=True)


def v2_file_pre_handle(df, model_id, handle_type: PRE_HANDLE_TYPE, old_model=None) -> (
        pd.DataFrame, str, pd.DataFrame):
    # df2是删除了文件后缀的df
    try:
        start_time = time.time()
        origin_time = start_time
        printx(f"[预处理]删除95%的文件后缀、95%的二进制数据，数据量:{len(df)}")
        tqdm.pandas()
        df['is_file_suffix'] = df['req_uri'].progress_apply(is_file_suffix)
        df['content_type_is_binary'] = df['http.response-content-type'].progress_apply(content_type_is_binary)
        if handle_type == PRE_HANDLE_TYPE.predict:
            printx("预测删除所有的文件后缀True")
            df = delete_row_if_true(df, 'is_file_suffix', 1)
            df = delete_row_if_true(df, 'content_type_is_binary', 1)
        else:
            printx("训练删除95%的文件后缀True")
            df = delete_row_if_true(df, 'is_file_suffix', 0.95)
            df = delete_row_if_true(df, 'content_type_is_binary', 0.95)
        df = df.drop(columns=['is_file_suffix', 'content_type_is_binary'])
        df2 = df.copy(deep=True)
        printx(f"[预处理]删除95%的文件耗时：{time.time() - start_time}")
        start_time = time.time()
        printx(f"[预处理]已删除，数据量:{len(df)}")
        printx("[预处理]提取uri的参数、路径、域名")
        df = split_uri_to_cols(df)
        printx(f"[预处理]提取uri的参数耗时：{time.time() - start_time}")
        start_time = time.time()

        printx("[预处理]删除一些字段后确定每种类型字段")
        df, num_cols, str_cols, bool_cols, array_cols = drop_and_get_cols(df)
        printx(f"[预处理]数值字段：{len(num_cols)}")
        printx(f"[预处理]文本字段：{len(str_cols)}")
        printx(f"[预处理]布尔字段：{len(bool_cols)}")
        printx(f"[预处理]数组字段：{len(array_cols)}")
        df = pre_handle_str_v2(df, str_cols)
        printx(
            f"[预处理]处理文本数据耗时：{time.time() - start_time}")  # 14.660828828811646 12.385878562927246
        start_time = time.time()
        printx('[预处理]用-1填充数值字段')
        fill_num(df,
                 num_cols + ColumnTypeEnum.v2_new_number_col.value, handle_type, model_id,
                 type_='fill_na_-1',
                 old_model=old_model)
        printx(f"[预处理]填充数值耗时：{time.time() - start_time}")  # 0.12473511695861816
        start_time = time.time()
        printx('[预处理]开始处理数组字段')
        df = pre_handle_array_v2(
            df,
            array_cols + ColumnTypeEnum.v2_new_array_col.value,
            model_id,
            handle_type)
        printx(f"[预处理]处理数组耗时：{time.time() - start_time}")  # 14.76184344291687
        start_time = time.time()
        printx('[预处理]填充布字段')
        fill_bool_with_num(df, bool_cols)
        printx(f"[预处理]处理逻辑值耗时：{time.time() - start_time}")  # 0.9069101810455322
        start_time = time.time()
        printx('[预处理]开始聚类降噪')
        df, cluster = scan(df, handle_type)
        printx(f"[预处理]降噪耗时：{time.time() - start_time}")
        printx(f"[预处理]总耗时：{time.time() - origin_time}")
        return df, cluster, df2
    except Exception as e:
        traceback.print_exc()
