import os
import re
from datetime import datetime
from typing import Optional

import joblib
import numpy as np
import pandas as pd
from catboost import CatBoostClassifier
from pandas import DataFrame
from sklearn.impute import SimpleImputer
from sklearn.metrics import classification_report, accuracy_score, confusion_matrix, precision_score, f1_score, \
    recall_score
from xbase_util.common_util import check_path, date2s

from src.bean.column_type_enum import ColumnTypeEnum
from src.bean.database_file_enum import DatabaseFileEnum
from src.bean.increasing_param import IncreasingParam
from src.bean.model_database_enum import ModelDatabaseEnum
from src.bean.vector_name_enum import VectorNameEnum
from src.bean.x_typing import IncreasingFuncType, FILL_NUM_TYPE, PRE_HANDLE_TYPE
from src.col_bean.dangerous_enum import ColDangerousEnum
from src.constant import vector_file_path, project_root_path
from src.model.common.model_list import print_model_list_entrance
from src.util.common_util import printx, while_input, is_int, exclude_dropped_columns, \
    input_is_yes, is_int_between, remove_df
from src.util.config_manager import ConfigManager


# def multi_delim_tokenizer(text):
#     return [t.strip() for t in re.split(r'[/|,]+', text) if t]


def parse_str_to_list(obj):
    if isinstance(obj, list):
        return [str(i) for i in obj]
    if isinstance(obj, str):
        return [item for item in f"{obj}"
        .replace("]", ",")
        .replace("[", ",")
        .replace("\\", ",")
        .replace("\"", ",")
        .replace(" ", ",")
        .replace("/", ",")
        .replace("=", ",")
        .replace("-", ",")
        .replace(".", ",")
        .replace(";", ",")
        .replace("'", ",")
        .split(",") if item != '']
    return []


def convert_to_int_list(obj):
    int_list = []
    for item in parse_str_to_list(obj):
        try:
            int_list.append(int(item))
        except:
            pass
    return int_list


def __get_base_vector():
    vectors = {}
    if os.path.exists(vector_file_path):
        vectors = joblib.load(vector_file_path)
    return vectors


def load_vectors(encoder_type, model_id):
    printx(f"加载编码器的模型id:{model_id}")
    vectors = __get_base_vector()
    if f"{model_id}" not in vectors:
        vectors[f"{model_id}"] = {}
    if encoder_type not in vectors[f"{model_id}"]:
        return None
    else:
        return vectors[f"{model_id}"][encoder_type]


def save_vector(sub_vectors, encoder_type, model_id):
    vectors = __get_base_vector()
    if f"{model_id}" not in vectors:
        vectors[f"{model_id}"] = {}
    vectors[f"{model_id}"][encoder_type] = sub_vectors
    joblib.dump(vectors, check_path(vector_file_path))


def get_model(config):
    """
    从配置中获取模型
    """
    model_list = config.get_model_list()
    if len(model_list) == 0:
        printx("没有训练过模型")
        return None
    print_model_list_entrance(config)
    num = while_input("请输入要预测的模型id:", is_int)
    models = [model for model in model_list if f'{model[ModelDatabaseEnum.id.value]}' == f'{num}']
    if len(models) == 0:
        printx("模型不存在")
        return None
    return models[0]


def split_req_uri(uri):
    return list(set([seg for seg in re.split(r'[/.]', f"{uri}".replace("\"", "").replace("\'", "l")) if seg]))


def drop_and_get_cols(df):
    keep_cols = exclude_dropped_columns(df.columns.tolist())
    keep_cols = list(set(df.columns) & set(keep_cols))
    df = df.drop(columns=df.columns.difference(keep_cols))
    try:
        df = df.drop_duplicates(subset=keep_cols, keep='first')
    except:
        pass
    num_cols = list(set(df.columns) & set(exclude_dropped_columns(ColumnTypeEnum.number_col.value)))
    str_cols = list(set(df.columns) & set(exclude_dropped_columns(ColumnTypeEnum.str_col.value)))
    bool_cols = list(set(df.columns) & set(exclude_dropped_columns(ColumnTypeEnum.bool_col.value)))
    array_cols = list(set(df.columns) & set(exclude_dropped_columns(ColumnTypeEnum.array_col.value)))
    return df, num_cols, str_cols, bool_cols, array_cols


def save_model(config, model_id, model, train_time, files, sample_type, iterations, depth, learning_rate, test_size, f1,
               recall,
               accuracy, precision, selected_metric, use_best_model, is_grid_search, cluster: str):
    if input_is_yes("[首次训练]是否保存模型?"):
        model_name = while_input("[首次训练]请输入模型名称:", None)
        model_description = while_input("[首次训练]请输入模型描述:", None)
        current_model_path = os.path.join(project_root_path, 'model', f"model_{model_id}.pkl")
        model.save_model(current_model_path)
        config.database.append({
            ModelDatabaseEnum.id.value: model_id,
            ModelDatabaseEnum.model_path.value: current_model_path,
            ModelDatabaseEnum.model_name.value: model_name,
            ModelDatabaseEnum.model_description.value: model_description,
            ModelDatabaseEnum.train_time.value: train_time,
            ModelDatabaseEnum.model_data_files.value: files,
            ModelDatabaseEnum.increasing_base_on.value: "无",
            ModelDatabaseEnum.sample_type.value: '过采样' if f"{sample_type}" == '1' else "欠采样",

            ModelDatabaseEnum.iterations.value: iterations,
            ModelDatabaseEnum.depth.value: depth,
            ModelDatabaseEnum.learning_rate.value: learning_rate,
            ModelDatabaseEnum.test_size.value: test_size,
            ModelDatabaseEnum.f1.value: f1,
            ModelDatabaseEnum.recall.value: recall,
            ModelDatabaseEnum.accuracy.value: accuracy,
            ModelDatabaseEnum.precision.value: precision,
            ModelDatabaseEnum.eval_metric.value: selected_metric,
            ModelDatabaseEnum.use_best_model.value: use_best_model,
            ModelDatabaseEnum.cluster.value: cluster,
            ModelDatabaseEnum.is_grid_search.value: is_grid_search,
        })
        config.db_save()


def fill_num(df, num_cols, pre_handle_type: PRE_HANDLE_TYPE, model_id, old_model, type_: FILL_NUM_TYPE):
    cols = [col for col in df.columns if col in num_cols]
    if type_ in ["fill_na_-1", "fill_na"]:
        fill_val = -1 if type_ == "fill_na_-1" else 0
        # df.loc[:, cols] = df[cols].fillna(fill_val).astype(int)
        df.loc[:, cols] = (
            df[cols]
            .replace('', np.nan)  # 把空字符串转 NaN
            .fillna(fill_val)  # 再填充
            .astype(int)  # 再转 int
        )
    else:
        if pre_handle_type == PRE_HANDLE_TYPE.train:
            df_pandas = df[cols]
            imputer = SimpleImputer(strategy='mean')
            imputed = imputer.fit_transform(df_pandas)
            save_vector(imputer, VectorNameEnum.v2_imputer_encoder.value, model_id)
            save_vector(cols, VectorNameEnum.v2_imputer_columns.value, model_id)
            df_imputed = pd.DataFrame(imputed, columns=cols, index=df_pandas.index)
            df[cols] = df_imputed
        elif pre_handle_type in [PRE_HANDLE_TYPE.predict, PRE_HANDLE_TYPE.increase]:
            old_model_id = model_id if pre_handle_type == PRE_HANDLE_TYPE.predict else old_model[
                ModelDatabaseEnum.id.value]
            imputer = load_vectors(VectorNameEnum.v2_imputer_encoder.value, old_model_id)
            saved_columns = pd.Index(load_vectors(VectorNameEnum.v2_imputer_columns.value, old_model_id))
            df_pandas = df[saved_columns]
            df_transformed = imputer.transform(df_pandas)
            df_imputed = pd.DataFrame(df_transformed, columns=saved_columns, index=df_pandas.index)
            df[saved_columns] = df_imputed
            if pre_handle_type == PRE_HANDLE_TYPE.increase:
                save_vector(imputer, VectorNameEnum.v2_imputer_encoder.value, model_id)
                save_vector(cols, VectorNameEnum.v2_imputer_columns.value, model_id)
    return df


def fill_bool_with_num(df, bool_cols):
    for i, col in enumerate(bool_cols):
        printx(f"[预处理]逻辑值:{i + 1}/{len(bool_cols)}  {col}")
        df[col] = df[col].astype(str).isin(["True", "1"]).astype(int)


def split_df_by_chunk(df, chunk_size=1000):
    """
    将DataFrame分割成多个子DataFrame，每个包含约chunk_size行

    :param df: 输入的pandas DataFrame
    :param chunk_size: 每个分块的行数
    :return: 包含分块DataFrame的列表
    """
    if len(df) == 0:
        return []
    return [df.iloc[i:i + chunk_size].copy() for i in range(0, len(df), chunk_size)]


def get_old_model(config):
    model_list = config.get_model_list()
    if len(model_list) == 0:
        printx("没有训练过模型")
        return None

    if input_is_yes(f"是否使用上一次的模型{model_list[-1][ModelDatabaseEnum.model_name.value]}"):
        old_model = model_list[-1]
    else:
        print_model_list_entrance(config)
        num = while_input("请输入要增量的模型id:", is_int)
        models = [model for model in model_list if f'{model[ModelDatabaseEnum.id.value]}' == f'{num}']
        if len(models) == 0:
            printx("模型不存在")
            return None
        old_model = models[0]
    return old_model


def is_depth_int(text):
    try:
        return int(text) <= 16
    except ValueError:
        return False


def get_df_by_path(file_path_list, origin_predict_file=''):
    files = []
    df = pd.DataFrame()
    for path in file_path_list:
        data = pd.read_csv(path)
        data = remove_df(data)
        files.append({
            DatabaseFileEnum.path.value: path,
            DatabaseFileEnum.abnormal_count.value: len(data[data[ColDangerousEnum.is_dangerous.value] == True]),
            DatabaseFileEnum.normal_count.value: len(data[data[ColDangerousEnum.is_dangerous.value] == False]),
            DatabaseFileEnum.start_time.value: '',
            DatabaseFileEnum.end_time.value: '',
            DatabaseFileEnum.expression.value: '',
            DatabaseFileEnum.origin_predict_file.value: origin_predict_file,
        })
        df = pd.concat([df.reset_index(drop=True), data.reset_index(drop=True)], ignore_index=True)
    return df, files


def test_evaluate(new_model, X_test, y_test):
    printx("[测试集评估]计算测试集结果")
    y_pred = new_model.predict(X_test)
    printx("[测试集评估]混淆矩阵:")
    printx(confusion_matrix(y_test, y_pred))
    printx("[测试集评估]分类报告:")
    printx(classification_report(y_test, y_pred))
    accuracy = accuracy_score(y_test, y_pred)
    precision = precision_score(y_test, y_pred)
    f1 = f1_score(y_test, y_pred)
    recall = recall_score(y_test, y_pred)
    printx(f"[测试集评估]准确率: {accuracy}")
    printx(f"[测试集评估]精确率: {precision}")
    printx(f"[测试集评估]F1分数: {f1}")
    printx(f"[测试集评估]召回率: {recall}")
    return accuracy, precision, recall, f1


def extract_predict_error(df2, y_pred):
    printx("[文件预测]正在筛选预测错误的数据...")
    incorrect_list = []
    for i, pred in enumerate(y_pred):
        is_dangerous = str(df2.iloc[i]["is_dangerous"]).lower()
        pred_label = '1' if f'{pred}' == '1' else '0'
        if (is_dangerous in ['true', '1'] and pred_label == '0') or (
                is_dangerous in ['false', '0'] and pred_label == '1'):
            incorrect_list.append(df2.iloc[i])
    if len(incorrect_list) == 0:
        printx("[文件预测]无预测错误的数据，将不进行增量训练", is_error=True)
        return pd.DataFrame(), None, None
    data = pd.DataFrame(incorrect_list)
    normal_data_predict_as_abnormal = len(data[data['is_dangerous'] == False])
    abnormal_data_predict_as_normal = len(data[data['is_dangerous'] == True])
    printx(
        f"[文件预测]预测错误的数据共:{len(data)},其中正常数据预测为异常有:{normal_data_predict_as_abnormal},异常数据预测成正常有:{abnormal_data_predict_as_normal}")
    return data, normal_data_predict_as_abnormal, abnormal_data_predict_as_normal


def save_increasing_data(data, n2a, a2n, still_save=False):
    if not still_save and (n2a == 0 or a2n == 0):
        printx("[文件预测]不符合增量条件\n", is_error=True)
        return None
    f = os.path.join(project_root_path, "data", f"increasing_{date2s(datetime.now(), '%m_%d_%H_%M')}.csv")
    data.to_csv(f, index=False)
    printx(f"[文件预测]已保存预测错误的数据到: {f}")
    return f


def common_predict(config: ConfigManager, df: DataFrame, df2: DataFrame, old_model: dict, file_path_list,
                   increasing_method: Optional[IncreasingFuncType | None] = None):
    model = CatBoostClassifier()
    model.load_model(old_model[ModelDatabaseEnum.model_path.value])
    x_data = df[[item for item in df.columns.tolist() if item != 'is_dangerous']]
    y_pred = model.predict(x_data)
    result_normal = sum(1 for item in y_pred if f'{item}' == '0')
    result_abnormal = sum(1 for item in y_pred if f'{item}' == '1')
    printx(f"[文件预测]文件预测结果：异常有{result_abnormal}个|正常有{result_normal}个")
    if result_abnormal <= 0 or result_normal <= 0:
        printx("[文件预测]预测结果中没有正常或异常数据")
        return
    num = int(while_input("预测错误的数据选项 1:查看|2:保存|3:增量:", is_int_between, (1, 3)))
    data, normal_data_predict_as_abnormal, abnormal_data_predict_as_normal = extract_predict_error(df2, y_pred)
    if num == 2:
        save_increasing_data(data, normal_data_predict_as_abnormal, abnormal_data_predict_as_normal, still_save=True)
    if num == 3:
        predict_file = save_increasing_data(data, normal_data_predict_as_abnormal, abnormal_data_predict_as_normal)
        if increasing_method is not None:
            increasing_method(config, IncreasingParam(origin_file=file_path_list, file=predict_file,
                                                      model=old_model))
        else:
            printx("(未进行增量操作)")


def print_vector_entrance(config: ConfigManager) -> None:
    v = __get_base_vector()
    for c, item in v.items():
        printx(f"模型id:{c} 字段数量:{len(item)}")


def print_vector_entrance_detail(config: ConfigManager) -> None:
    v = __get_base_vector()
    for c, item in v.items():
        printx(f"模型id:{c}")
        printx(f"模型:{item}")
