import json
import os
from itertools import chain
from urllib.parse import urlparse, parse_qs

import joblib
import pandas as pd
from imblearn.over_sampling import RandomOverSampler
from imblearn.under_sampling import RandomUnderSampler
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score, precision_score, f1_score, \
    recall_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MultiLabelBinarizer

from constant import vector_path, use_drop_cols, use_same_value_cols, use_high_dup_cols, use_geo_cols, \
    predict_or_train_exclude_statistic_cols, predict_or_train_exclude_geo_cols
from src.columns.col_provider import pre_handle_drop_columns, pre_handle_same_value, pre_handle_high_dup_cols, pre_handle_geo_cols, exclude_cols_statistic, exclude_cols_geo
from src.columns.cols import all_cols
from src.util.interact_util import while_input, is_float, is_int_between

vect = HashingVectorizer(
    n_features=10,
    analyzer='char_wb',
    ngram_range=(3, 5),
    alternate_sign=False,
    norm='l2',
    lowercase=True
)


def parse_str_to_list(obj):
    if isinstance(obj, list):
        return [str(i) for i in obj]
    return [item for item in f"{obj}"
    .replace("]", ",")
    .replace("[", ",")
    .replace("\\", ",")
    .replace("\"", ",")
    .replace(" ", ",")
    .replace("/", ",")
    .replace("=", ",")
    .replace("-", ",")
    .replace(".", ",")
    .replace(";", ",")
    .replace("'", ",")
    .split(",") if item != '']


def convert_to_str(obj):
    return ','.join(parse_str_to_list(obj))


def fill_str_with_blank(df, str_cols):
    for col in str_cols:
        df[col] = df[col].fillna('blank').progress_apply(str)
    return df


def text_to_vec(text):
    return vect.transform([str(text)]).toarray().flatten().tolist()


def process_str(series, col, i, all):
    print(f"[预处理]文本字段 {i + 1}/{all} {col} ")
    idf_max_features = 10
    vec_series = series.apply(text_to_vec)
    return pd.DataFrame(vec_series.tolist(), columns=[f'{col}_{i}' for i in range(idf_max_features)])


def process_str_array(col_data, col_name, i, all):
    print(f"[预处理]多进程处理文本数组:{i + 1}/{all}")
    col_data = col_data.apply(convert_to_str)
    new_df = pd.DataFrame(
        vect.transform(col_data).toarray(),
        columns=[f"{col_name}_{j}" for j in range(10)]
    ).reset_index(drop=True)
    return new_df


def extract_uri_path(uris):
    uri_list = []
    for uri in parse_str_to_list(uris):
        if not uri.startswith(('http://', 'https://')):
            uri = 'http://' + uri
        try:
            uri_list.append(urlparse(uri).path)
        except:
            pass
    return json.dumps(list(set(uri_list)))


def extract_uri_domain(uri_list):
    l = []
    for uri in parse_str_to_list(uri_list):
        if not uri.startswith(('http://', 'https://')):
            uri = 'http://' + uri
        try:
            l.append(urlparse(uri).netloc)
        except:
            pass
    return json.dumps(list(set(l)))


def extract_uri_params(uri_list):
    l = []
    for uri in parse_str_to_list(uri_list):
        if not uri.startswith(('http://', 'https://')):
            uri = 'http://' + uri
        try:
            query_map = parse_qs(urlparse(uri).query)
            params_list = [f"{key}={value[0]}" for key, value in query_map.items()]
            if len(query_map) > 0:
                l.append(params_list)
        except:
            pass
    return json.dumps(list(set(chain(*l))))


def extract_all(uri):
    return extract_uri_domain(uri), extract_uri_path(uri), extract_uri_params(uri)


def convert_to_int_list(obj):
    int_list = []
    for item in parse_str_to_list(obj):
        try:
            int_list.append(int(item))
        except:
            pass
    return int_list


def process_numeric_col(col, col_data, is_train, column_map):
    print(f"[预处理]多进程处理数值 {col}")
    col_data = col_data.apply(convert_to_int_list)
    if is_train:
        # 训练需要返回column_map用于保存
        column_map = {}
        mlb = MultiLabelBinarizer()
        multi_hot = mlb.fit_transform(col_data)
        column_map[col] = mlb
    else:
        # 非训练则用函数传入的column_map
        mlb = column_map[col]
        multi_hot = mlb.transform(col_data)
    df_multi = pd.DataFrame(multi_hot, columns=[f"{col}_{c}" for c in mlb.classes_])
    return df_multi, column_map


def predict_sample(df):
    print("[训练]开始读取、合并数据")
    x_data = df.drop(columns=['is_dangerous'])
    y = df["is_dangerous"]
    print("[训练]划分训练集和测试集")
    test_size = float(while_input("请输入测试集占比(建议0.2):", is_float))
    X_train, x_test, y_train, y_test = train_test_split(x_data, y, test_size=test_size, random_state=42,
                                                        stratify=y)
    sample_type = while_input("请输入采样类型[1:过采样 其他:欠采样]:", is_int_between, (1, 2))
    if f"{sample_type}" == '1':
        print("[训练]开始过采样")
        x_resampled, y_resampled = RandomOverSampler(random_state=42).fit_resample(X_train, y_train)
    else:
        print("[训练]开始欠采样")
        x_resampled, y_resampled = RandomUnderSampler(random_state=42).fit_resample(X_train, y_train)
    return x_resampled, y_resampled, x_test, y_test, test_size, sample_type


def test_evaluate(new_model, X_test, y_test):
    print("[测试集评估]计算测试集结果")
    y_pred = new_model.predict(X_test)
    print("[测试集评估]混淆矩阵:")
    print(confusion_matrix(y_test, y_pred))
    print("[测试集评估]分类报告:")
    print(classification_report(y_test, y_pred))
    accuracy = accuracy_score(y_test, y_pred)
    precision = precision_score(y_test, y_pred)
    f1 = f1_score(y_test, y_pred)
    recall = recall_score(y_test, y_pred)
    print(f"[测试集评估]准确率: {accuracy}")
    print(f"[测试集评估]精确率: {precision}")
    print(f"[测试集评估]F1分数: {f1}")
    print(f"[测试集评估]召回率: {recall}")
    return accuracy, precision, recall, f1


def save_vectory(m):
    os.makedirs(os.path.dirname(vector_path), exist_ok=True)
    joblib.dump(m, vector_path)


def load_vectory():
    os.makedirs(os.path.dirname(vector_path), exist_ok=True)
    return joblib.load(vector_path)


def loop_exe(op_map) -> None:
    while True:
        print("——" * 30)
        for i, item in enumerate(op_map):
            print(f"{i + 1}. {item.key}")
        num = int(while_input("请输入执行序号:", is_int_between, (1, len(op_map))))
        if op_map[num - 1].func is None:
            return
        else:
            op_map[num - 1].func()


def get_required_columns():
    # 动态排除的列
    exclude = []
    if not use_drop_cols:
        exclude += pre_handle_drop_columns
    if not use_same_value_cols:
        exclude += pre_handle_same_value
    if not use_high_dup_cols:
        exclude += pre_handle_high_dup_cols
    if not use_geo_cols:
        exclude += pre_handle_geo_cols
    return [
        col['col'] for col in all_cols
        if col['is_origin_col'] and col['col'] not in exclude
    ]


def get_exclude_cols():
    cols = []
    if predict_or_train_exclude_statistic_cols:
        cols += exclude_cols_statistic
    if predict_or_train_exclude_geo_cols:
        cols += exclude_cols_geo
    return cols
