# -*- coding:utf-8 -*-
"""
通用工具函数 - 不依赖TensorFlow和Keras
包含数据预处理、可视化、工具函数等
"""

import time
import os
import math
import itertools
from functools import wraps
import numpy as np
import pandas as pd
from sklearn import __version__ as sklearn_version
from packaging import version
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import (accuracy_score,
                             balanced_accuracy_score,
                             precision_score,
                             f1_score,
                             recall_score,
                             roc_auc_score)
from sklearn.decomposition import PCA
from sklearn.utils import shuffle
from imblearn.metrics import geometric_mean_score
import matplotlib.pyplot as plt
import psutil
from pandarallel import pandarallel
import gwlsa_settings as GS

import warnings
warnings.filterwarnings('ignore', message="You are on Windows. If you detect any issue with pandarallel, be sure you checked out the Troubleshooting page:")

def check_data_settings_folder():
    '''
    检查设置保存结果的各个文件夹是否存在，不存在则创建
    '''
    if False==os.path.exists(GS.PREDICTED_Y_DIR):
        os.makedirs(GS.PREDICTED_Y_DIR, exist_ok=True)
    if False==os.path.exists(GS.FPR_TPR_DIR):
        os.makedirs(GS.FPR_TPR_DIR, exist_ok=True)
    if False==os.path.exists(GS.SAVED_MODEL_DIR):
        os.makedirs(GS.SAVED_MODEL_DIR, exist_ok=True)
    if False==os.path.exists(GS.SAVED_PLOTS):
        os.makedirs(GS.SAVED_PLOTS, exist_ok=True)
    if False==os.path.exists(GS.gwlsa_train_outputs):
        os.makedirs(GS.gwlsa_train_outputs, exist_ok=True)


def read_trainValTest_fromCSV(data_folder=None,
                              train_csv_filename_lst=None,
                              val_csv_filename_lst = None,
                              test_csv_filename_lst=None,
                              drop_columname=None,
                              encoding='gb2312',
                              is_shuffle=True,
                              whether_scaled=True,  #是否进行标准化缩放
                              scaled_base='x_train',  # 标准化缩放的基准，可以选择x_train或x
                              onlyReturn_scaled_allData=False):#是否只返回缩放后的整个数据，缩放的基准由scaled_base参数确定

    # 默认的文件夹和文件列表
    if data_folder is None:
        data_folder = GS.DATA_LOAD_DIR
    if train_csv_filename_lst is None:
        train_csv_filename_lst = GS.TRAIN_CSV_FILENAME_LIST

    if val_csv_filename_lst is None:
        val_csv_filename_lst = GS.VAL_CSV_FILENAME_LIST

    if test_csv_filename_lst is None:
        test_csv_filename_lst = GS.TEST_CSV_FILENAME_LIST

    # 需要保留的列名，由于x_columns后面需要移除列，需要深拷贝
    x_columns = GS.x_column_names.copy()

    if drop_columname and type(drop_columname) is str:
        x_columns.remove(drop_columname)
    elif drop_columname and type(drop_columname) is list:
        for drop_col in drop_columname:
            x_columns.remove(drop_col)
    y_column = GS.y_column_name
    nb_features = len(x_columns)

    train_df = None
    val_df = None
    test_df = None
    file_encoding = GS.net_params['csv_encoding']
    if encoding is None:
        file_encoding = encoding
    # 读取训练（train）数据
    for train_csv_filename in train_csv_filename_lst:
        file_path = data_folder + '/' + train_csv_filename
        t_df = pd.read_csv(file_path, encoding=file_encoding)
        t_df.dropna(inplace=True)
        if train_df is None:
            train_df = t_df
        else:
            train_df = pd.concat([train_df, t_df])
    # 读取验证数据
    for val_csv_filename in val_csv_filename_lst:
        file_path = data_folder + '/' + val_csv_filename
        t_df = pd.read_csv(file_path, encoding=file_encoding)
        t_df.dropna(inplace=True)
        if val_df is None:
            val_df = t_df
        else:
            val_df = pd.concat([val_df, t_df])

    # 读取测试数据
    for test_csv_filename in test_csv_filename_lst:
        file_path = data_folder + '/' + test_csv_filename
        t_df = pd.read_csv(file_path, encoding=file_encoding)
        t_df.dropna(inplace=True)
        if test_df is None:
            test_df = t_df
        else:
            test_df = pd.concat([test_df, t_df])

    # 打乱顺序
    if is_shuffle:
        train_df = shuffle(train_df)
        val_df = shuffle(val_df)
        test_df = shuffle(test_df)

    # 只取必要的列
    geoId_train_ss = train_df[GS.id_column]
    x_train_df = train_df[x_columns]
    y_train_ss = train_df[y_column]

    geoId_val_ss = val_df[GS.id_column]
    x_val_df = val_df[x_columns]
    y_val_ss = val_df[y_column]

    geoId_test_ss = test_df[GS.id_column]
    x_test_df = test_df[x_columns]
    y_test_ss = test_df[y_column]

    X = pd.concat([x_train_df, x_val_df, x_test_df])
    Y = pd.concat([y_train_ss, y_val_ss, y_test_ss])
    geoId_ss = pd.concat([geoId_train_ss, geoId_val_ss, geoId_test_ss])

    X_arr, Y_arr, geoId_arr = X.values, Y.values, geoId_ss.values
    x_train_arr, y_train_arr, geoId_train_arr = x_train_df.values, y_train_ss.values, geoId_train_ss.values
    x_val_arr, y_val_arr,       geoId_val_arr = x_val_df.values,    y_val_ss.values,  geoId_val_ss.values
    x_test_arr, y_test_arr,    geoId_test_arr = x_test_df.values,   y_test_ss.values, geoId_test_ss.values

    if whether_scaled and scaled_base=='x_train':
        # 以x_train为标准，对x_val和x_test进行标准化缩放
        base_, x_arr_scaled,   scaler = scale_X(x_train_arr, X_arr)
        base_, x_train_scaled, scaler = scale_X(x_train_arr, x_train_arr)
        base_, x_val_scaled,   scaler = scale_X(x_train_arr, x_val_arr)
        base_, x_test_scaled,  scaler = scale_X(x_train_arr, x_test_arr)
    elif whether_scaled and scaled_base=='x':
        # 以x为标准，对x_val和x_test进行标准化缩放
        base_, x_arr_scaled,   scaler = scale_X(X_arr, X_arr)
        base_, x_train_scaled, scaler = scale_X(X_arr, x_train_arr)
        base_, x_val_scaled,   scaler = scale_X(X_arr, x_val_arr)
        base_, x_test_scaled,  scaler = scale_X(X_arr, x_test_arr)
    else:
        x_arr_scaled = X_arr
        x_train_scaled = x_train_arr
        x_val_scaled = x_val_arr
        x_test_scaled = x_test_arr
    if not onlyReturn_scaled_allData:
        return (x_arr_scaled, Y_arr, geoId_arr), (x_train_scaled, y_train_arr, geoId_train_arr), (x_val_scaled, y_val_arr, geoId_val_arr),  (x_test_scaled, y_test_arr, geoId_test_arr), nb_features
    else:
        return (x_arr_scaled, Y_arr, geoId_arr)


def scale_X(x_base, x_procesed, scaling_type='standard'):
    scaler=None
    if scaling_type == 'standard':
        scaler = StandardScaler().fit(x_base)
        scaled_x_base = scaler.transform(x_base)
        scaled_x_processed = scaler.transform(x_procesed)
        return scaled_x_base, scaled_x_processed, scaler
    else:
        return x_base, x_procesed, scaler


def get_pca_X(x_train, x_test, nb_pca_components=1):
    pca = PCA(n_components=nb_pca_components)
    x_train_pca = pca.fit_transform(x_train)
    x_test_pca = pca.transform(x_test)
    return x_train_pca, x_test_pca, pca


def reshape_lstm_X(X, time_steps, nb_features):
    X_new = X.reshape((X.shape[0], time_steps, nb_features))
    return X_new


def reshape_dnn_X(X, nb_features, channels=1):
    X_new = X.reshape((X.shape[0], nb_features, channels))
    return X_new


def prepare_X(X,
              nb_pca_or_nb_features,
              channels=1,
              do_pca=True,
              x_reshape_format=None):
    scaler = StandardScaler().fit(X)
    scaled_x = scaler.transform(X)
    x_pca = scaled_x
    pca=None
    if do_pca:
        pca = PCA(n_components=nb_pca_or_nb_features)
        x_pca = pca.fit_transform(scaled_x)
    if x_reshape_format is None:
        raise Exception('x_reshape_format必须设置正确的值。可选值为：3d_channel_first, 3d_channel_last, 2d')
    # step 3: 重塑X形状
    if x_reshape_format == '3d_channel_first' or x_reshape_format == 'lstm':
        # 适用于lstm网络，改变x的形状为（samples, time_steps, nb_features)
        time_steps = channels
        new_x = reshape_lstm_X(x_pca, time_steps, nb_pca_or_nb_features)
    elif x_reshape_format == '3d_channel_last' or x_reshape_format == 'dnn' or x_reshape_format == 'cnn':
        # 如果是DNN或CNN网络，改变x的形状为（samples, nb_features, channels)
        nb_features = nb_pca_or_nb_features
        new_x = reshape_dnn_X(x_pca, nb_features, channels)
    elif x_reshape_format == '2d' or x_reshape_format == 'bp':
        # 如果BP网络，形状保持为（samples, nb_features)
        new_x = x_pca
    return new_x, scaler, pca


def prepare_X_use_scaler_pca(X, nb_pca_or_nb_features,channels,
                             scaler, pca, x_reshape_format=None):

    scaled_x = scaler.transform(X)
    x_pca = scaled_x
    if pca is not None:
        x_pca = pca.fit_transform(scaled_x)

    if x_reshape_format is None:
        raise Exception('x_reshape_format必须设置正确的值。可选值为：3d_channel_first, 3d_channel_last, 2d')

    # step 3: 重塑X形状
    if x_reshape_format == '3d_channel_first' or x_reshape_format == 'lstm':
        # 适用于lstm网络，改变x的形状为（samples, time_steps, nb_features)
        time_steps = channels
        new_x = reshape_lstm_X(x_pca, time_steps, nb_pca_or_nb_features)
    elif x_reshape_format == '3d_channel_last' or x_reshape_format == 'dnn' or x_reshape_format == 'cnn':
        # 如果是DNN或CNN网络，改变x的形状为（samples, nb_features, channels)
        nb_features = nb_pca_or_nb_features
        new_x = reshape_dnn_X(x_pca, nb_features, channels)
    elif x_reshape_format == '2d' or x_reshape_format == 'bp':
        # 如果BP网络，形状保持为（samples, nb_features)
        new_x = x_pca
    return new_x


def prepare_data(x_train, x_test, y_train,
                 nb_pca_or_nb_features,
                 channels=1,
                 do_pca=True,
                 x_reshape_format=None,
                 return_transformer=False,
                 y_to_onehot=True):
    '''
    x_train, x_test: 2D Array ---> 3D Array
    y_train: 1D Array ---> 2D Array（独热编码）
    如果return_transformer=False，返回：new_x_train, y_train_onehot, new_x_test；
    如果return_transformer=True，返回：new_x_train, y_train_onehot, new_x_test, scaler, pca

    Parameters
    ----------
    x_train : 2D Array
        训练数据X.
    x_test : 2D Array
        测试数据X.
    y_train : 1D Array
        训练数据目标列y.
    nb_pca_or_nb_features : int
        训练数据X中包含的特征数量；或者是PCA变换时的输出分量数目（do_pca为True时）。
    channels : int, optional
        通道数量. The default is 1.
    do_pca : bool, optional
        是否做pca变换。如果do_pca为True，就做PCA变换; 为False，不做PCA变换。
    x_reshape_format : string, optional
        重塑X时的返回形式。可选值为：3d_channel_first或lstm, 3d_channel_last或dnn或cnn, 2d或bp。
        3d_channel_first形如：（samples, channels/timesteps, nb_features)，适用于lstm网络。
        3d_channel_last形如：(samples, nb_features, channels)，适用于DNN和CNN网络。
        2d形如：(samples, nb_features)，适用于BP网络。
    return_transformer : bool, optional
        默认值为False。是否返回Scaler对象和PCA对象。设置为True时返回，否则不返回。
    y_to_onehot : bool, optional
        默认为值True，是否将y转换为独热编码。为False时不转换。

    Returns
    -------
    new_x_train : 3D Array or 2D Array
        改变形状之后的训练数据X.
    y_train_onehot : 2D Array
        训练数据目标列y，独热编码形式。
    new_x_test : 3D Array or 2D Array
            改变形状之后的测试数据X.
    scaler ： Scaler
            缩放X的对象
    pca : PCA对象
        PCA变换对象
    '''

    # step 1： 先对数据作标准化缩放(对X)
    scaled_x_train, scaled_x_test, scaler = scale_X(x_train, x_test)

    # step 2: PCA变换（对X）
    x_train_pca, x_test_pca = scaled_x_train, scaled_x_test
    # 如果do_pca为True，就做PCA变换; 如果不做pca变换，则
    if do_pca:
        x_train_pca, x_test_pca, pca = get_pca_X(scaled_x_train, scaled_x_test, nb_pca_or_nb_features)
    else:
        pca = None
    if x_reshape_format is None:
        raise Exception('x_reshape_format必须设置正确的值。可选值为：3d_channel_first, 3d_channel_last, 2d')

    # step 3: 重塑X形状
    if x_reshape_format=='3d_channel_first' or x_reshape_format=='lstm':
        # 适用于lstm网络，改变x的形状为（samples, time_steps, nb_features)
        time_steps = channels
        new_x_train = reshape_lstm_X(x_train_pca, time_steps, nb_pca_or_nb_features)
        new_x_test = reshape_lstm_X(x_test_pca, time_steps, nb_pca_or_nb_features)
    elif x_reshape_format=='3d_channel_last' or x_reshape_format=='dnn' or x_reshape_format=='cnn':
        # 如果是DNN或CNN网络，改变x的形状为（samples, nb_features, channels)
        nb_features = nb_pca_or_nb_features
        new_x_train = reshape_dnn_X(x_train_pca, nb_features, channels)
        new_x_test = reshape_dnn_X(x_test_pca, nb_features, channels)
    elif x_reshape_format=='2d' or x_reshape_format=='bp' :
        #如果BP网络，形状保持为（samples, nb_features)
        new_x_train = x_train_pca
        new_x_test = x_test_pca

    # step 4：获取独热编码（对训练数据的y）
    if y_to_onehot:
        new_y_train = get_onehot(y_train)
    else:
        new_y_train = y_train
    
    #----------返回处理好的数据----------------------
    if not return_transformer:
        return new_x_train, new_y_train, new_x_test
    else:
        return new_x_train, new_y_train, new_x_test, scaler, pca


def read_XY_fromCSV(nb_features, csvpath='../wanzhouLandslides.csv', delimiter=',', enc='bytes'):
    data=np.genfromtxt(csvpath, delimiter=delimiter,skip_header=1, encoding=enc)
    # nb_features列特征+1列标签+1个GeoID
    X = data[:,0:nb_features]
    Y = data[:,nb_features]
    return X, Y


def plot_singleROC_1DArray(y_pred_prob, y_test, plotROC=True, title='ROC curve'):
    '''
    y_pred_prob和y_test均为1D Array
    Parameters
    ----------
    y_pred_prob: 1d Array
         测试数据的预测为正类的概率得分.
    y_test : 1d Array
        测试数据的实际类别.
    plotROC : bool
        是否绘制roc曲线. The default is True.
    title : str
        ROC曲线的标题
    Returns
    -------
    auc_val : float
        auc值.
    '''

    fpr, tpr, thresholds = roc_curve(y_test, y_pred_prob)
    auc_val = auc(fpr, tpr)

    if plotROC:
        plt.figure()
        plt.plot([0, 1], [0, 1], 'k--')
        plt.plot(fpr, tpr, label='ROC Curve (AUC = {:.3f})'.format(auc_val))
        plt.xlabel('False positive rate')
        plt.ylabel('True positive rate')
        plt.title(title)
        plt.legend(loc='best')
        plt.show()
    return auc_val, fpr, tpr

def plot_singleROC_2DArray(y_pred_prob, y_test, plotROC=True, title='ROC curve'):
    '''
    y_pred_prob和y_test均为2D Array. y_test为独热编码的结果，形状为（samples, nb_class)
        对滑坡而言，nb_class=2.（二分类问题）
    y_pred_prob为调用model.predict_prob得到的结果。对于二分类问题，每个样本预测后会得到一个元组，
        分别代表属于2个类别的概率。形如：(0_prob, 1_prob) , 0_prob + 1_prob = 1.0
    Parameters
    ----------
    y_pred_prob : 2d array
        测试数据的预测概率得分（一般是predict_prob的结果）.
    y_test : 2d array
        测试数据的实际值.
    plotROC : bool, optional
        是否绘制roc曲线. The default is True.

    Returns
    -------
    auc_val : float
        auc值.

    '''
    Y_pred_1 = [y[1] for y in y_pred_prob]  # 取出y中的一列
    Y_test_1 = [y[1] for y in y_test]
    fpr, tpr, thresholds_keras = roc_curve(Y_test_1, Y_pred_1)
    auc_val = auc(fpr, tpr)
    print("AUC : {0:.3f}".format(auc_val))
    if plotROC:
        plt.figure()
        plt.plot([0, 1], [0, 1], 'k--')
        plt.plot(fpr, tpr, label='ROC Curve (AUC = {:.3f})'.format(auc_val))
        plt.xlabel('False positive rate')
        plt.ylabel('True positive rate')
        plt.title(title)
        plt.legend(loc='best')
        plt.show()
    return auc_val

def plotLoss(history):
    '''
    绘制训练时的损失曲线图

    Parameters
    ----------
    history : History对象
        DESCRIPTION.

    Returns
    -------
    None.

    '''
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('model loss')
    plt.ylabel('loss')
    plt.xlabel('epoch')
    plt.legend(['train','validation'], loc='upper left')
    plt.show()

def getPCA_data(x_train, x_test, out_components='mle'):
    pca = PCA(n_components=out_components)
    pca.fit(x_train)
    train_pca = pca.transform(x_train)
    test_pca = pca.transform(x_test)
    train_pca_r = train_pca.reshape((train_pca.shape[0],train_pca.shape[1],1))
    test_pca_r = test_pca.reshape((test_pca.shape[0],test_pca.shape[1],1))
    return train_pca_r,test_pca_r    

def get_onehot(y):
    '''
    将1d的y转化为2d的onehot编码
    Parameters
    ----------
    y: Array
    1d Array，只有0和1两个类别

    Returns
    -------
    y_onehot: 2D Array
        返回y的独热编码
    '''
    y_2D = y.reshape(y.size, 1)
    # 从 scikit-learn 1.2 版本开始，OneHotEncoder的 sparse参数被重命名为 sparse_output
    if version.parse(sklearn_version) >= version.parse("1.2"):
        encoder = OneHotEncoder(sparse_output=False, categories='auto')
    else:
        encoder = OneHotEncoder(sparse=False, categories='auto')
    y_onehot = encoder.fit_transform(y_2D)
    return y_onehot

def timer(func):
    @wraps(func)  # 这将保留被装饰函数的原始名称和文档
    def call_func(*args, **kwargs):
        start_time = time.time()
        result = func(*args, **kwargs)
        end_time = time.time()
        spend_seconds = end_time - start_time
        print(f'完成函数{func.__name__}的调用，耗时如下：')
        if spend_seconds<60:
            print("spend time: {0:0.2f} seconds".format(spend_seconds))
        else:
            hours = spend_seconds // 3600
            left_seconds = spend_seconds % 3600
            mins = left_seconds // 60
            left_seconds2 = left_seconds % 60
            print(f"spend time: {hours} hours {mins} minutes {left_seconds2:0.0f} seconds")
            print("spend time: {0:0.2f} seconds".format(spend_seconds))
        print()
        return result
    return call_func

def save_results(GeoID, y_pred, y_predprob, result_file, print_info=False):
    '''
    将指定的GeoID, y_pred, y_predprob三列保存到csv文件
    Parameters
    ----------
    GeoID：地理标识列

    y_pred：1D array，预测类别

    y_predprob： 1D array，预测为滑坡的概率

    result_file：str,保存文件路径

    print_info：bool，是否打印保存的路径信息

    Returns
    -------
        无
    '''
    results = np.vstack((GeoID, y_pred, y_predprob))
    results = np.transpose(results)
    header_string = 'GeoID, y_pred_class, y_pred_prob'
    np.savetxt(result_file, results, header=header_string, fmt='%d,%d,%0.3f',delimiter=',')
    if print_info:
        print(f'Saving {result_file} Done!')

def evaluate_metrics(y_test, y_pred, y_pred_prob, model_name, print_results=False, return_AIC=None, return_R2=True):
    # print(f'y_test.shape: {y_test.shape}, y_pred.shape:{y_pred.shape}, y_pred_prob.shape:{y_pred_prob.shape}')
    auc_val = roc_auc_score(y_test, y_pred_prob)
    f1_val = f1_score(y_test, y_pred, average='macro')
    recall_val = recall_score(y_test, y_pred, average='macro')
    balanced_val = balanced_accuracy_score(y_test, y_pred)
    geometric_val = geometric_mean_score(y_test, y_pred)
    acc_val = accuracy_score(y_test, y_pred)
    AIC_val = -99999
    r2_val = -99999
    if return_AIC is not None:
        need_return_AIC =return_AIC.get('return_AIC',False)
        if need_return_AIC:
            n = return_AIC.get('n',1)
            k = return_AIC.get('k',1)
            AIC_val = evaluate_AIC(y_test, y_pred, n, k)
    if return_R2:
        r2_val = R2(y_test, y_pred_prob)
    if print_results:
        print(f'{model_name} performance in evaluate_metrics:')
        print(f'AUC:{auc_val:.3f} - F1 score:{f1_val:.3f} - Recall {recall_val:.3f} ')
        print(f'Accuracy {acc_val:.3f} - Balanced accuracy: {balanced_val:.3f} - Geometric mean {geometric_val:.3f}')
        if return_R2:
            print(f'R2: {r2_val:.3f}')
        if return_AIC is not None:
            print(f'AIC: {AIC_val:.3f}')
        print('-'*30)
    results = {'auc': auc_val, 'f1': f1_val, 'recall': recall_val, 'aic': AIC_val, 'r2': r2_val}
    return results

def __SSR(y_true, y_pred):
    return np.sum((y_pred - y_true) ** 2)

def evaluate_AIC(y_true, y_pred, n, k):
    __ssr = __SSR(y_true, y_pred)
    return n * (math.log(__ssr / n * 2 * math.pi, math.e)) + n + k

def AICc(y_true, y_pred, n, S):
    __ssr = __SSR(y_true, y_pred)
    return n * (math.log(__ssr / n * 2 * math.pi, math.e) + (n + S) / (n - S - 2))

def R2(y_true, y_prob):
    return 1 - np.sum((y_prob - y_true) ** 2) / np.sum((y_true - np.mean(y_true)) ** 2)

def evaluate_metrics_plotROC(y_test_1d, y_pred, y_pred_prob, title='', show_roc=False,print_results=True,return_AIC_params=None):
    # 注意：这里的y_test_1d为1d Array，不能使用独热编码后的y_test, 独热编码后的y_test为2D Array
    metrics_dict = evaluate_metrics(y_test_1d.ravel(), y_pred, y_pred_prob, title,
                                    print_results=print_results,
                                    return_AIC=return_AIC_params)
    plot_singleROC_1DArray(y_pred_prob, y_test_1d.ravel(), plotROC=show_roc, title=title)
    return metrics_dict

def round_dict(d, digits=3):
    '''
    将字典中的浮点数保留digits位小数
    Parameters
    ----------
    d：dict
        输入dict形式如：
        {  'lr': {1: (0.6062435208878383, 0.0, 0.0)},
           'rf': {1: (0.524179358082276, 0.06091493750758403, 0.06091493750758403)},
           'wlr': {1: (0.6062435208878383, 0.0, 0.0)},
           'wrf': {1: (0.5700541433727327, 0.11026175584896918, 0.5775998058488048)}
         }
    digits

    Returns
    -------
        修改后的字典
    '''
    for key, value in d.items():
        for k, ls in value.items():
            new_ls = [round(f, digits) for f in ls]
            d[key][k] = new_ls
    return d

def cm2inch(value):
    '''
    厘米(cm)转为英寸(inch)
    :param value: 以cm为单位的数值
    :return: 以inch为单位的转换数值
    '''
    return value/2.54


def repeat_and_sum_dicts(times):
    """
    装饰器，用于重复执行函数指定次数，并统计每次返回的字典中各键值的总和。
    :param times: 重复执行的次数
    """
    def decorator(func):
        def wrapper(*args, **kwargs):
            # 初始化一个字典用于累加每个键的值
            sum_dict = {}
            for _ in range(times):
                result_dict = func(*args, **kwargs)
                # 对返回的字典进行遍历，累加每个键的值
                for key, value in result_dict.items():
                    sum_dict[key] = sum_dict.get(key, 0) + value
            return sum_dict

        return wrapper

    return decorator

##########下面的代码适用于LR,RF, lightGBM##########
################################################
def print_performance(y_test, y_pred, y_pred_prob, prefix_info=''):
    fpr_, tpr_, _ = roc_curve(y_test, y_pred_prob)
    auc_val = roc_auc_score(y_test, y_pred_prob)
    f1_val = f1_score(y_test, y_pred, average='macro')
    recall_val = recall_score(y_test, y_pred)
    b_acc = balanced_accuracy_score(y_test, y_pred)
    g_score = geometric_mean_score(y_test, y_pred)
    acc_score = accuracy_score(y_test, y_pred)
    metrics_str = f'{prefix_info}: auc:{auc_val:.3f}, f1-score: {f1_val:.3f}, recall: {recall_val:.3f}'
    print(metrics_str)
    return fpr_, tpr_, auc_val

def plot_confusion_matrix(cm, classes, ax,
                          normalize=False,
                          title='Confusion matrix',
                          cmap=plt.cm.Blues):
    """
    This function prints and plots the confusion matrix.
    Normalization can be applied by setting `normalize=True`.
    """
    print(cm)
    print('')
    ax.imshow(cm, interpolation='nearest', cmap=cmap)
    ax.set_title(title)
    tick_marks = np.arange(len(classes))
    plt.xticks(tick_marks, classes, rotation=45)
    plt.sca(ax)
    plt.yticks(tick_marks, classes)
    fmt = '.2f' if normalize else 'd'
    thresh = cm.max() / 2.
    for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
        ax.text(j, i, format(cm[i, j], fmt),
                horizontalalignment="center",
                color="white" if cm[i, j] > thresh else "black")
    ax.set_ylabel('True label')
    ax.set_xlabel('Predicted label')

def plot_all_confusion_matrix(cm_lgb, cm_wlgb):
    fig, ax = plt.subplots(ncols=2)
    plot_confusion_matrix(cm_lgb, classes=[0, 1], ax=ax[0], title='LightGBM')
    plot_confusion_matrix(cm_wlgb, classes=[0, 1], ax=ax[1], title='WLightGBM')
    plt.show()


def plot_roc(fpr_lgb, tpr_lgb, auc_lgb, fpr_wlgb, tpr_wlgb, auc_wlgb):
    plt.figure(1)
    plt.plot([0, 1], [0, 1], 'k--')
    plt.plot(fpr_lgb, tpr_lgb, label='LightGBM (AUC=%0.3f)' % (auc_lgb), lw=2)
    plt.plot(fpr_wlgb, tpr_wlgb, label='WLightGBM(AUC=%0.3f)' % (auc_wlgb), lw=2)
    plt.xlabel('False positive rate')
    plt.ylabel('True positive rate')
    plt.title('ROC curve')
    plt.legend(loc='best')
    plt.show()

def flatten_neighbours(df, x_columns, y_column, id_column, neighbours_column='neighbours'):
    """
        【该函数废弃，建议使用flatten_neighbours_parallel并行化版本】将指定列neighbours_column的每个元素展开，拼接到原df后面
    Parameters
    ----------
    df: DataFrame
        原始的DataFrame，包含neighbours_column列，该列的每个元素也是一个小的DataFrame
    x_columns: list
        x特征列
    y_column: str
        y标签列
    id_column: str
        标识符列，用于标识每个点的地理位置
    neighbours_column: str
        neighbours列，默认值为neighbours

    Returns
    -------
    ndarray
        返回形式如：id_data, combined_X, y_data，其中id_data表示标识符，combined_X表示拼接后的特征DataFrame，y_data表示标签列DataFrame

    """
    # 定义一个函数来处理每个小DataFrame
    def flatten_neighbour(neighbour_series):
        # 展平小DataFrame中的x_columns列，并将每个元素作为单独的一列
        flattened = neighbour_series.values.flatten()
        return pd.Series(flattened)

    # 使用apply函数对neighbours_column列中的每个小DataFrame应用flatten_neighbour函数
    neighbours_flattened = df[neighbours_column].apply(
        lambda x: flatten_neighbour(x[x_columns]) if isinstance(x, pd.DataFrame) else pd.Series([]))

    # 将主DataFrame的x_columns列和展平后的邻居数据合并
    main_df_x = df[x_columns]
    print('main_df_x.shape:', main_df_x.shape)
    print('neighbours_flattened.shape:', neighbours_flattened.shape)
    combined_df = pd.concat([main_df_x, neighbours_flattened], axis=1)

    # 提取y_column和id_column的值
    y_data = df[y_column]
    id_data = df[id_column]

    # 将合并后的DataFrame转换为numpy数组
    combined_X = combined_df

    print('combined_X.shape:', combined_X.shape)
    print()
    return id_data, combined_X, y_data

@timer
def flatten_neighbours_parallel(df, x_columns, y_column, id_column, neighbours_column='neighbours',
                                show_progress=False, print_df_info=False):
    """
        【并行化版本】将指定列neighbours_column的每个元素展开，拼接到原df后面
    Parameters
    ----------
    df: DataFrame
        原始的DataFrame，包含neighbours_column列，该列的每个元素也是一个小的DataFrame
    x_columns: list
        x特征列
    y_column: str
        y标签列
    id_column: str
        标识符列，用于标识每个点的地理位置
    neighbours_column: str
        neighbours列，默认值为neighbours
    show_progress: bool
        是否显示进度条。默认为False，不显示。
    print_df_info: bool
        是否打印df的信息。

    Returns
    -------
        DataFrame
        返回形式如：id_data, combined_X, y_data，其中id_data表示标识符，combined_X表示拼接后的特征DataFrame，y_data表示标签列DataFrame
    """

    # 初始化Pandarallel
    logic_workers = psutil.cpu_count(logical=True)  # 逻辑处理器总数
    use_workers = int(logic_workers*0.8)
    pandarallel.initialize(progress_bar=show_progress, verbose=0, nb_workers=use_workers)

    # 定义一个函数来处理每个小DataFrame
    def flatten_neighbour(neighbour_series):
        import pandas as pd
        # 展平小DataFrame中的x_columns列，并将每个元素作为单独的一列
        flattened = neighbour_series.values.flatten()
        return pd.Series(flattened)

    # 使用parallel_apply函数对neighbours_column列中的每个小DataFrame应用flatten_neighbour函数
    neighbours_flattened = df[neighbours_column].parallel_apply(
        lambda x: flatten_neighbour(x[x_columns]) if isinstance(x, pd.DataFrame) else pd.Series([]))

    # 将主DataFrame的x_columns列和展平后的邻居数据合并
    main_df_x = df[x_columns]
    if print_df_info:
        print('main_df_x.shape:', main_df_x.shape)
        print('neighbours_flattened.shape:', neighbours_flattened.shape)
    combined_df = pd.concat([main_df_x, neighbours_flattened], axis=1)

    # 提取y_column和id_column的值
    y_data = df[y_column]
    id_data = df[id_column]

    # 将合并后的DataFrame转换为numpy数组
    combined_X = combined_df

    if print_df_info:
        print('combined_X.shape:', combined_X.shape)
        print()
    return id_data, combined_X, y_data