# coding=utf-8

import pandas as pd
import numpy as np
import lightgbm as lgb
import os
import xgboost as xgb
import sys
from sklearn import __version__
from sklearn.ensemble import RandomForestClassifier as RF
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.model_selection import KFold
from sklearn.metrics import roc_curve, auc  ###计算roc和auc
from sklearn.metrics import confusion_matrix
from sklearn.model_selection  import learning_curve
from sklearn.model_selection import ShuffleSplit
import datetime
from imblearn.combine import SMOTETomek
from imblearn.pipeline import Pipeline
from imblearn.under_sampling import RandomUnderSampler
from imblearn.over_sampling import SMOTE
from collections import Counter

old_version = ["0.19.0","0.19.1","0.19.2","0.20.0","0.20.1","0.20.2","0.20.3"]
if __version__ in old_version:
    from sklearn.externals import joblib
else:
    import joblib

class model_train(object):
    def __init__(self, work_dir="", dataframe=pd.DataFrame(), filename="", mode="", label_name="label", F1_mode="weighted",
                 Issmote=False, IsKfold=True, KFcount=5, SmoteType='SMOTETomek', OverRatio=0.3, UnderRatio=0.9):
        """
        ####################################################################################################
         function : initial the model train class
         return   : None
         ---------------------------------------parameter ---------------------------------------
         dataframe : the training data, must contain positive sample and negative sample
         mode : optional choice from "lightgbm, randomforest, xgboost, all"
                default is lightgbm
                all will output all models
         label_name: the label name of dataframe
         ----------------------------------------------------------------------------------------
         history :
         huangkunyang : create   '2021-10-19'      <huangkunyang@eversec.cn>
        ####################################################################################################
        """
        self.__help__ = "哈哈，model_train help编辑中。。。。。"
        self.F1_mode = F1_mode   # F1 score 的模式
        self.Issmote = Issmote   # 是否扩展样本
        self.IsKfold = IsKfold   # 是否需要交叉验证
        self.KFcount = KFcount   # 交叉验证的折数
        self.SmoteType = SmoteType  # 采样模式
        self.OverRatio = OverRatio  # 上采样比例
        self.UnderRatio = UnderRatio  # 下采样比例
        self.sample_df_ori_train = pd.DataFrame()  # 原始训练数据集初始化
        self.sample_df_ori_verify = pd.DataFrame()  # 原始验证数据集初始化

        # 参数完整性检查 -------------
        if dataframe.empty:
            print("dataframe is empty")
            if filename == "":
                raise RuntimeError("parameter filename is empty, data needs to entered while initial, programe will exit automatically")
            else:
                if type(filename) == list:
                    print("warning, if enter a filename list, first file needs to be positive sample, second negative")
                    df_p = self._read_file(filename[0])
                    df_n = self._read_file(filename[1])
                    df_n["label"] = 0
                    df_p["label"] = 1
                    # 筛选出正负样本均有的列名
                    column_list = df_p.columns
                    self._sample = pd.concat([df_p, df_n], axis=0)
                    self._sample = self._sample[column_list]
                    if "msisdn" in self._sample.columns:
                        self._sample = self._sample.set_index("msisdn")
                    else:
                        raise RuntimeError("msisdn is not in dataframe read from %s or %s, please check the training data" %(filename[0], filename[1]))
                else:
                    self._sample = self._read_file(filename)
                    if "msisdn" in self._sample.columns:
                        self._sample = self._sample.set_index("msisdn")
                    else:
                        raise RuntimeError("msisdn is not in dataframe read from %s, please check the training data" %(filename))
        else:
            self._sample = dataframe
            if "msisdn" in self._sample.columns:
                self._sample = self._sample.set_index("msisdn")
            else:
                raise RuntimeError("msisdn is not in dataframe your entered, please check the training data")

        if label_name not in self._sample.columns:
            raise RuntimeError("label_name %s is not in dataframe columns, please give parameter with model_train(label_name='xxx')" %label_name)
        else:
            self.label_name = label_name

        # 清洗列名顺序，使label_name 在最后一列 --------------
        if self.label_name != self._sample.columns[-1]:
            column = list(self._sample.columns)
            column.remove(self.label_name)
            column.append(self.label_name)
            self._sample = self._sample[column]

        # 生成路径代码 -----------
        if work_dir == "":
            config_path = os.path.join(work_dir, sys.argv[0].split(".")[0])
            self._config_path_set(config_path, mode)
        else:
            config_path = work_dir
            self._config_path_set(config_path, mode)

    def _config_path_set(self, config_path, mode):
        if not os.path.exists(config_path):
            raise RuntimeError("%s does not exist, please give parameter a correct model config path" % config_path)

        else:
            model_path = os.path.join(config_path, "model")
            if not os.path.exists(model_path):
                os.mkdir(model_path)

            self.figure_filename = os.path.join(model_path, "graph")
            if not os.path.exists(self.figure_filename):
                os.mkdir(self.figure_filename)

            now = datetime.datetime.now()
            if mode == "":
                self._model_file = os.path.join(model_path, "model-%s.model" % now.strftime("%Y%m%d-%H%M%S"))
            else:
                self._model_file = os.path.join(model_path, mode + "-%s.model" % now.strftime("%Y%m%d-%H%M%S"))

    def _read_file(self, filename):
        """
        ####################################################################################################
             function : read file from the input filename
             return   : None
             ----------------------------------------------------------------------------------------
             history :
             huangkunyang : create   '2021-08-16'      <huangkunyang@eversec.cn>
        ####################################################################################################
        """
        dataframe = pd.read_csv(filename)
        print("file %s has been read successfully with %d logs" % (filename, dataframe.shape[0]))
        return dataframe

    def sampling(self, x, y, ratio=1, random_state=None,
                 SmoteType='SMOTETomek', OverRatio=0.3, UnderRatio=0.9):
        """
        ####################################################################################################
             function : to extend or abstract sample modulefsampling
             return   : the solved sample and label
             ---------------------------------------parameter ---------------------------------------
             x : dataframe
             y : label
             ratio: ratio of SMOTETomek sampling
             random_state：random seed
             SmoteType：type of sampling（SMOTETomek、pipeline）
             OverRatio：over sampling ratio
             UnderRatio：over sampling ratio
             ----------------------------------------------------------------------------------------
             history :
             huangkunyang : create   '2023-04-10'      <huangkunyang@eversec.cn>
             fuxin : update   '2023-05-17'      <fuxin@eversec.cn>
             fuxin : update   '2023-07-25'      <fuxin@eversec.cn>
        ####################################################################################################
        """
        print("*" * 10, "sampling", "*" * 10)
        _negative_df = x[y == 0]   # 获取负样本
        _positive_df = x[y == 1]   # 获取正样本

        negative_cnt = int(_negative_df.shape[0])  # 获取负样本
        positive_cnt = int(_positive_df.shape[0])  # 获取正样本

        # 正负样本量级检验
        if negative_cnt > positive_cnt:
            original_ratio = float(positive_cnt) / float(negative_cnt)
        else:
            raise RuntimeError('原始正样本量级大于负样本，不建议进行采样')

        # 计算采样上限
        line = positive_cnt * (positive_cnt - 1) / 2  # 采样上限为Cn2

        _temp_positive_df = _positive_df.copy()
        _temp_positive_y = y[y.index.isin(_temp_positive_df.index)]
        _temp_negative_y = y[~y.index.isin(_temp_positive_df.index)]

        if (positive_cnt * ratio) > line:
            _positive_df = pd.concat([_positive_df, _temp_positive_df])
            _positive_df_y = _temp_positive_y.append(_temp_positive_y)

            x = pd.concat([_negative_df, _positive_df])
            y = _temp_negative_y.append(_positive_df_y)
        x = x.reset_index()
        col = x.columns

        if SmoteType == 'SMOTETomek':
            if type(ratio) == int:
                # 如果是int值，正样本按倍数进行采样
                ratio_temp = {0: int(_negative_df.shape[0]), 1: int(_positive_df.shape[0] * ratio)}
                print(ratio_temp)
            elif type(ratio) == float:
                # 如果是float值，正样本按负样本的数量*ratio 进行采样
                if int(_negative_df.shape[0] * ratio) > _positive_df.shape[0]:
                    ratio_temp = {0: int(_negative_df.shape[0]), 1: int(_negative_df.shape[0] * ratio)}
                else:
                    ratio_temp = {0: int(_negative_df.shape[0]), 1: _positive_df.shape[0]}
            else:
                ratio_temp = 'auto'
            sampling = SMOTETomek(random_state=random_state, sampling_strategy=ratio_temp)

        elif SmoteType == 'pipeline':
            # print(UnderRatio,OverRatio , original_ratio)
            if OverRatio > 1 or OverRatio <= 0 or UnderRatio > 1 or UnderRatio <= 0:
                raise RuntimeError('OverRatio、UnderRatio两个参数的正确取值区间为 (0, 1]，请重新取值')
                # print('OverRatio、UnderRatio两个参数的正确取值区间为 (0, 1]，请重新取值')
            elif OverRatio < original_ratio and original_ratio > 0:
                raise RuntimeError('上采样比例过低，低于原始比例值,请重新取值')
                # print('上采样比例过低，低于原始比例值,请重新取值')
            elif UnderRatio < OverRatio and original_ratio > 0:
                raise RuntimeError('pipeline模式下UnderRatio 需要大于OverRatio，请重新取值')
                # print('pipeline模式下UnderRatio 需要大于OverRatio，请重新取值'

            else:
                if UnderRatio == OverRatio:
                    print('当前UnderRatio == OverRatio，下采样失效')
                elif (negative_cnt * OverRatio) + ((negative_cnt * OverRatio) / UnderRatio) < (
                        negative_cnt + positive_cnt):
                    print('原始样本数为：', (negative_cnt + positive_cnt))
                    print('预计采样后的样本数约为：%.0f' % ((negative_cnt * OverRatio) + ((negative_cnt * OverRatio) / UnderRatio)))
                    print('当前采样会导致样本数量总体降低，在此进行提醒')
                else:
                    pass
                # 上采样
                over = SMOTE(sampling_strategy=OverRatio, random_state=random_state)
                # 下采样
                under = RandomUnderSampler(random_state=random_state,
                                           ratio=UnderRatio)
                # pipeline
                steps = [
                    ('1', over),
                    ("0", under)]
                sampling = Pipeline(steps=steps)
        else:
            raise RuntimeError("SmoteType当前必须指定为[SMOTETomek|pipeline],请检查model_train(SmoteType)参数是否指定正确")

        # from collections import Counter
        print('样本扩展前 %s' % Counter(y))
        x_smote_resampled, y_smote_resampled = sampling.fit_resample(x, y)
        print('样本扩展后 %s' % Counter(y_smote_resampled))
        x_smote_resampled = pd.DataFrame(x_smote_resampled, columns=col)  # 将数据转换为数据框并命名列名
        y_smote_resampled = pd.DataFrame(y_smote_resampled, columns=['label'])  # 将数据转换为数据框并命名列名
        smote_resampled = pd.concat([x_smote_resampled, y_smote_resampled], axis=1)  # 按列合并数据框
        smote_resampled = smote_resampled.drop_duplicates()
        smote_resampled = smote_resampled.set_index('msisdn')
        return smote_resampled

    @staticmethod
    def draw_learning_rate(model_tmp, X_train, Y_train, figure_filename, n):
        """
        ####################################################################################################
             function : to draw learning curve
             return   : None
             ---------------------------------------parameter ---------------------------------------
             model_tmp :
             X_train :
             Y_train:
             figure_filename：
             n：
             ----------------------------------------------------------------------------------------
             history :
             huangkunyang : create   '2022-07-10'      <huangkunyang@eversec.cn>
        ####################################################################################################
        """
        cv = ShuffleSplit(n_splits=5, test_size=0.2, random_state=0)
        train_sizes, train_scores, test_scores = learning_curve(model_tmp, X_train, Y_train, cv=cv, n_jobs=1,
                                                                random_state=8764, train_sizes=np.linspace(0.1, 1, 10))

        train_mean = np.mean(train_scores, axis=1)
        train_std = np.std(train_scores, axis=1)
        test_mean = np.mean(test_scores, axis=1)
        test_std = np.std(test_scores, axis=1)
        # 绘制效果
        plt.plot(train_sizes, train_mean, color='blue', marker='o', markersize=5, label='training accuracy')
        plt.fill_between(train_sizes, train_mean + train_std, train_mean - train_std, alpha=0.15, color='blue')

        plt.plot(train_sizes, test_mean, color='green', linestyle='--', marker='s', markersize=5, label='test accuracy')
        plt.fill_between(train_sizes, test_mean + test_std, test_mean - test_std, alpha=0.15, color='green')
        plt.grid()

        plt.xlabel('Number of training samples')
        plt.ylabel('Accuracy')
        plt.legend(loc='lower right')

        plt.savefig(figure_filename + "/" + "traing_curve_"+ n + ".png", dpi=400)
        plt.close()

    @staticmethod
    def draw_roc(model_tmp, X_test, Y_test, figure_filename, n):
        """
        ####################################################################################################
             function : to draw roc
             return   : None
             ---------------------------------------parameter ---------------------------------------
             model : the model
             X_test :  the feature
             Y_test :  figure_filename need to be saved
             figure_filename ：
             n :  the series in kfold
             ----------------------------------------------------------------------------------------
             history :
             huangkunyang : create   '2022-07-10'      <huangkunyang@eversec.cn>
        ####################################################################################################
        """
        y_pred = model_tmp.predict(X_test)
        y_score = model_tmp.predict_proba(X_test)
        fpr, tpr, thresholds = roc_curve(Y_test, y_score[:, -1], pos_label=1)
        roc_auc = auc(fpr, tpr)
        plt.plot(fpr, tpr, color='darkorange',
                 lw=2, label='ROC curve (area = %0.2f)' % roc_auc)  # 假正率为横坐标，真正率为纵坐标做曲线
        plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
        plt.xlim([0.0, 1.0])
        plt.ylim([0.0, 1.05])
        plt.xlabel('False Positive Rate')
        plt.ylabel('True Positive Rate')
        plt.title('Receiver operating characteristic example')
        plt.legend(loc="lower right")
        plt.savefig(figure_filename +"/"+ "roc_"+ n + ".png", dpi=400)
        plt.close()

    @staticmethod
    def draw_feature_importance(model, column_name,figure_filename, n):
        """
        ####################################################################################################
             function : to draw feature importance graph
             return   : None
             ---------------------------------------parameter ---------------------------------------
             model : the model
             column_name :  feature name list
             figure_filename :  figure_filename need to be saved
             n :  the series in kfold
             ----------------------------------------------------------------------------------------
             history :
             huangkunyang : create   '2022-07-10'      <huangkunyang@eversec.cn>
        ####################################################################################################
        """
        feature_df = pd.DataFrame(np.array([column_name[:-1], model.feature_importances_]).T,
                                  columns=["feature", "value"])
        feature_df = feature_df.sort_values(by="value", ascending=False)
        feature_df = feature_df[feature_df["value"] != 0]
        plt.bar(feature_df["feature"], feature_df["value"])
        plt.xlabel('feature')
        plt.ylabel('importance value')
        lenth = len(feature_df["feature"])
        # plt.xticks([1::2], list(feature_df["feature"])[1::2], ha="right", rotation=35, fontsize=5)
        plt.xticks(range(0,lenth,4), list(feature_df["feature"])[::2], ha="right", rotation=20, fontsize=7)
        plt.title('feature importance')
        plt.legend(loc="lower right")
        plt.savefig(figure_filename + "/" + "feature_importance_try_"+ n + "png", dpi=400)
        plt.close()

    def judge_model(self, model, x_original, y_original, set_name):
        """
        ####################################################################################################
             function : describe the performance of the model
             return   : score, F1_score, pre_score, re_score, auc_score
             ---------------------------------------parameter ---------------------------------------
             model :   model
             x_original :   the original predict dataset
             y_original :   the original label of the dataset
             set_name :  dataset‘s name
             ----------------------------------------------------------------------------------------
             history :
             huangkunyang : create   '2023-04-13'      <huangkunyang@eversec.cn>
        ####################################################################################################
        """
        y_pred = model.predict(x_original)
        score = accuracy_score(y_original, y_pred)  # 准确率
        F1_score = f1_score(y_original, y_pred, average=self.F1_mode)  # f1分数
        pre_score = precision_score(y_original, y_pred)  # 精准率分数
        re_score = recall_score(y_original, y_pred)  # 召回率

        fpr, tpr, thresholds = roc_curve(y_original, y_pred, pos_label=1)
        auc_score = auc(fpr, tpr)  # auc
        print("-------%s-------" % set_name)
        print(confusion_matrix(y_original, y_pred, labels=None, sample_weight=None))
        print('model_accuracy_score:', score)
        print('model_f1score:', F1_score)
        print('model_precision_score:', pre_score)
        print('model_recall_score:', re_score)
        print('model_auc_score:', auc_score)
        return (score, F1_score, pre_score, re_score, auc_score)

    @staticmethod
    def drop_samiliar_feature(sample_df, thd=0.95):
        """
        ####################################################################################################
             function : read file from the input filename
             return   : None
         ---------------------------------------parameter ---------------------------------------
             sample_df : feature data without label information
             thd : the limited related value, [0,1]
         ----------------------------------------------------------------------------------------
             history :
             huangkunyang : create   '2021-08-16'      <huangkunyang@eversec.cn>
        ####################################################################################################
        """
        correlation_df = sample_df.corr().fillna(0)
        print(correlation_df)
        column_list = []
        count = 0
        for column in correlation_df.columns:
            if column not in column_list:
                temp_df = correlation_df[(correlation_df[column] > thd) | (correlation_df[column] < -thd)]
                if column in temp_df.index:
                    temp_df.drop(column, axis=0, inplace=True)
                column_list = column_list + list(temp_df.index)
                count += 1
        print(sample_df.shape)
        sample_df.drop(column_list, axis=1, inplace=True)
        return sample_df

    @staticmethod
    def drop_same_value_feature(df):
        """
        ####################################################################################################
             function : check and drop sample value feauter
             return   : solved dataframe
             ---------------------------------------parameter ---------------------------------------
             df :  dataframe
             ----------------------------------------------------------------------------------------
             history :
             huangkunyang : create   '2022-08-13'      <huangkunyang@eversec.cn>
        ####################################################################################################
        """
        dataframe = df.copy()
        data_new = dataframe.copy()
        drop_column_list = []
        for column in dataframe.columns:
            nrow = dataframe.shape[0]
            column_number = dataframe[dataframe[column] == dataframe[column].iloc[0]].shape[0]
            if nrow == column_number:
                drop_column_list.append(column)
                data_new = data_new.drop(column, axis=1)
        print("删除特征%d 个" %len(drop_column_list))
        return data_new

    def data_solve(self, df, random_state, set_thd, IsSmote, SmoteRatio, SmoteType, OverRatio,UnderRatio):
        """
        ####################################################################################################
             function : solve the training date with split, sampling operation
             return   : sample_df_train, sample_df_verify
             ---------------------------------------parameter ---------------------------------------
             df : original dataframe with label, label value = 1 or 0
             random_state :
             set_thd :
             IsSmote :
             SmoteRatio :
             SmoteType :
             OverRatio :
             UnderRatio :
             ----------------------------------------------------------------------------------------
             history :
             huangkunyang : create   '2023-04-17'      <huangkunyang@eversec.cn>
             fuxin : update   '2023-05-17'      <fuxin@eversec.cn>
        ####################################################################################################
        """
        # print(SmoteType, OverRatio,UnderRatio)
        print("*" * 30,"dataset solve", "*" * 30)
        sample_df_ori = df.copy()
        sample_df_ori = sample_df_ori.drop_duplicates()
        sample_df_ori.fillna(0, inplace=True)
        # 数据集处理 ----------------------------
        # 数据集切分
        positive_df = sample_df_ori[sample_df_ori.label == 1].copy()
        negative_df = sample_df_ori[sample_df_ori.label == 0].copy()
        positive_df = positive_df.sample(frac=1, random_state=random_state)
        negative_df = negative_df.sample(frac=1, random_state=random_state)
        print("原始正负样本数量  {1：%d || 0:%d}" %(positive_df.shape[0], negative_df.shape[0]))

        # 将正负样本按set_thd的比例分为训练集与验证集
        length_p = positive_df.shape[0]
        length_n = negative_df.shape[0]
        self.sample_df_ori_train = pd.concat([positive_df.iloc[:int(length_p * set_thd), :], negative_df.iloc[:int(length_n * set_thd), :]])   # 训练集切分
        self.sample_df_ori_verify = pd.concat([positive_df.iloc[int(length_p * set_thd):, :], negative_df.iloc[int(length_n * set_thd):, :]])   # 验证集切分
        sample_df_ori_train = self.sample_df_ori_train.copy()
        sample_df_ori_verify = self.sample_df_ori_verify.copy()

        print("原始训练集样本", sample_df_ori_train.shape)
        print("原始验证集样本", sample_df_ori_verify.shape)
        # 是否进行采样
        if IsSmote:
            # 扩展训练集 ----
            sample_df_train = self.sampling(sample_df_ori_train.iloc[:, :-1], sample_df_ori_train.iloc[:,-1],
                                             SmoteRatio, random_state=random_state,
                                             SmoteType=SmoteType, OverRatio=OverRatio,UnderRatio=UnderRatio)

            # 扩展验证集 -----
            sample_df_verify = self.sampling(sample_df_ori_verify.iloc[:, :-1], sample_df_ori_verify.iloc[:, -1],
                                               SmoteRatio, random_state=random_state,
                                               SmoteType=SmoteType, OverRatio=OverRatio,UnderRatio=UnderRatio)
        else:
            sample_df_train = sample_df_ori_train
            sample_df_verify = sample_df_ori_verify

        print("|Train_Set: 0: %d | 1 : %d|" % (sample_df_train[sample_df_train["label"] == 0].shape[0], sample_df_train[sample_df_train["label"] == 1].shape[0]))
        print("|Verify_Set: 0 : %d | 1: %d|" % (sample_df_verify[sample_df_verify["label"] == 0].shape[0], sample_df_verify[sample_df_verify["label"] == 1].shape[0]))

        sample_df_train = sample_df_train.sample(frac=1, random_state=random_state)
        sample_df_verify = sample_df_verify.sample(frac=1, random_state=random_state)
        return sample_df_train, sample_df_verify

    def training(self, sample_df_train, search_time, judge_score_mode, para_dict, mode_type=""):
        """
        ####################################################################################################
             function : solve the training date with split, sampling operation
             return   : sample_df_train, sample_df_verify
             ---------------------------------------parameter ---------------------------------------
             sample_df_train :
             search_time :
             judge_score_mode :
             para_dict :
             mode_type :
             ----------------------------------------------------------------------------------------
             history :
             huangkunyang : create   '2023-04-17'      <huangkunyang@eversec.cn>
        ####################################################################################################
        """
        print("*" * 30,"training", "*" * 30)
        train_score, train_F1_score, train_pre_score, train_re_score, train_auc_score = 0, 0, 0, 0, 0
        best_score = 0
        X_train, Y_train = sample_df_train.iloc[:, :-1], sample_df_train.iloc[:, -1]
        while search_time > 0:
            if mode_type == 'randomforest':
                _model = RF(**para_dict)
            elif mode_type == 'xgboost':
                _model = xgb.XGBClassifier(**para_dict)
            elif mode_type == 'lightgbm':
                _model = lgb.LGBMClassifier(**para_dict)
            else:
                raise RuntimeError("parameter mode_type >>%s<< is unrecognised, please check")

            _model.fit(X_train, Y_train)

            result = self.judge_model(_model, X_train, Y_train, "train")
            train_score = result[0]   #  准确率
            train_F1_score = result[1]   # F1
            train_pre_score = result[2]  # 精准率
            train_re_score = result[3]   # 召回率
            train_auc_score = result[4]   #  AUC

            if judge_score_mode == "auc":
                stardard_score = train_score

            if stardard_score > best_score:
                best_score = stardard_score
                best_mode = _model

            search_time -= 1
        return best_mode

    def KFold(self, para_dict, dataframe, random_state, nfold=5, shuffle=False):
        """
        ####################################################################################################
             function : solve the training date with split, sampling operation
             return   : sample_df_train, sample_df_verify
             ---------------------------------------parameter ---------------------------------------
             model :
             dataframe :
             random_state :
             nfold :
             ----------------------------------------------------------------------------------------
             history :
             huangkunyang : create   '2023-04-17'      <huangkunyang@eversec.cn>
        ####################################################################################################
        """
        print("*" * 30,"KFold", "*" * 30)
        dataframe = dataframe.sample(frac=1, random_state=random_state)
        X_set, Y_set = dataframe.iloc[:,:-1], dataframe.iloc[:,-1]
        kf = KFold(n_splits=nfold, shuffle=shuffle, random_state=random_state)
        i = 1
        best_score = 0
        kf_bestmodel = 0
        auc_score_list = []
        for train_index, test_index in kf.split(X_set, Y_set):
            print("------- 第%d折训练 -------" % i)
            X_train = X_set.iloc[train_index, :]
            Y_train = Y_set.iloc[train_index]
            X_test = X_set.iloc[test_index, :]
            Y_test = Y_set.iloc[test_index]

            _model = lgb.LGBMClassifier(**para_dict)
            model = _model.fit(X_train, Y_train)
            self.judge_model(model, X_train, Y_train, "train")  # train -------------

            score, F1_score, pre_score, re_score, auc_score = self.judge_model(model, X_test, Y_test, "test")  # 模型评估得分
            auc_score_list.append(auc_score)
            i += 1

        np_list = np.array(auc_score_list)
        std_score = np_list.std()
        print("KFold %d 折测试标准差为：%.2f " % (nfold, std_score))
        return std_score

    def verify(self, __mode, sample_df_verify, IsSmote):
        """
        ####################################################################################################
             function : solve the training date with split, sampling operation
             return   : sample_df_train, sample_df_verify
             ---------------------------------------parameter ---------------------------------------
             __mode :
             sample_df_verify :
             IsSmote :
             ----------------------------------------------------------------------------------------
             history :
             huangkunyang : create   '2023-04-17'      <huangkunyang@eversec.cn>
        ####################################################################################################
        """
        print("*" * 30,"verify", "*" * 30)
        # 1、验证采样后的结果
        # 2、验证不采样的验证集的结果
        if IsSmote:
            print("***** 采样后验证集结果 *****")
            X_verify, Y_verify = sample_df_verify.iloc[:,:-1], sample_df_verify.iloc[:,-1]
            self.judge_model(__mode, X_verify, Y_verify, "verify")

        print("***** 采样前原始验证集结果 *****")
        X_verify_ori, Y_verify_ori = self.sample_df_ori_verify.iloc[:,:-1], self.sample_df_ori_verify.iloc[:,-1]
        self.judge_model(__mode, X_verify_ori, Y_verify_ori, "verify")

    def general_training_reault_output(self, __mode):
        """
        ####################################################################################################
             function : solve the training date with split, sampling operation
             return   : sample_df_train, sample_df_verify
             ---------------------------------------parameter ---------------------------------------
             __mode :
             ----------------------------------------------------------------------------------------
             history :
             huangkunyang : create   '2023-04-17'      <huangkunyang@eversec.cn>
        ####################################################################################################
        """
        print("*" * 30,"training general_output", "*" * 30)
        X_verify_ori, Y_verify_ori = self.sample_df_ori_verify.iloc[:,:-1], self.sample_df_ori_verify.iloc[:,-1]
        print("************ 特征权重 *************")
        index = X_verify_ori.columns
        df = pd.DataFrame(__mode.feature_importances_.T, index=index, columns=["value"])
        df.sort_values(by="value", ascending=False, inplace=True)
        print("权重为非0的特征有%d 个 详细列表如下：" % df[df.values != 0].shape[0])
        print(list(df[df.values != 0].index))
        print("权重为0的特征有 %d 个特征" % len(df[df.values == 0].index))
        print("-----特征重要性前10为-------")
        print(df.head(10))

    def atuo_update_config(self, importance=0):
        """
        ####################################################################################################
             function : 自动更新配置文件，要求采用配置文件为True模式
             return   : None
             ---------------------------------------parameter ---------------------------------------
             ----------------------------------------------------------------------------------------
             history :
             huangkunyang : create   '2023-06-12'      <huangkunyang@eversec.cn>
        ####################################################################################################
        """
        pass

    def lightgbm(self,
                 # lightGBM透传超参
                 boosting_type='gbdt', class_weight=None,importance_type='split', learning_rate=0.1, max_bin=16,
                 max_depth=5, metrics='auc', silent=True, feature_fraction=1,min_split_gain=0.0, objective='binary',
                 colsample_bytree=1, n_estimators=100, n_jobs=-1, num_leaves=14, random_state=None, reg_alpha=0.7,
                 reg_lambda=0.5, subsample=1.0, subsample_freq=0, is_unbalance=True,
                 # 本方法私有超参
                 nfold=5, IsSmote=False, SmoteRatio='auto', set_thd=0.8, search_time=1,
                 judge_score_mode="auc", ISKFold=True, SmoteType='SMOTETomek', OverRatio=0.3, UnderRatio=0.9):
        """
        ####################################################################################################
             function : solve the training date with split, sampling operation
             return   : sample_df_train, sample_df_verify
             ---------------------------------------parameter ---------------------------------------
             nfold ：
             IsSmote=False ：
             SmoteRatio ：'auto'
             set_thd ：0.8,
             search_time ：
             judge_score_mode ： "auc"
             ISKFold： True
             SmoteType ：'SMOTETomek'
             OverRatio ：0.3
             UnderRatio ：0.9
             ----------------------------------------------------------------------------------------
             history :
             huangkunyang : create   '2023-04-17'      <huangkunyang@eversec.cn>
        ####################################################################################################
        """
        print("=" * 40, "lightgbm training start", "=" * 40)
        # 1、数据预处理 --------------------------
        sample_df_train, sample_df_verify = self.data_solve(df=self._sample.copy(), random_state=random_state,
                                                            set_thd=set_thd, IsSmote=IsSmote, SmoteRatio=SmoteRatio,
                                                            SmoteType=SmoteType, OverRatio=OverRatio,
                                                            UnderRatio=UnderRatio)

        # 2、模型训练 -----------------
        # 2.1、 模型传参
        para_dict = {"boosting_type":boosting_type, "class_weight":class_weight, "importance_type":importance_type,
                     "learning_rate":learning_rate, "max_bin":max_bin, "max_depth":max_depth, "metrics":metrics,
                     "silent":silent, "feature_fraction":feature_fraction,"min_split_gain":min_split_gain,
                     "objective":objective, "colsample_bytree":colsample_bytree, "n_estimators":n_estimators,
                     "n_jobs":n_jobs, "num_leaves":num_leaves,"random_state":random_state, "reg_alpha":reg_alpha,
                     "reg_lambda":reg_lambda,"subsample":subsample, "subsample_freq":subsample_freq,
                     "is_unbalance":is_unbalance}

        __mode = self.training(sample_df_train=sample_df_train, search_time=search_time,
                               judge_score_mode=judge_score_mode, para_dict=para_dict,mode_type='lightgbm')

        # 2.2、 5折交叉验证
        if ISKFold:
            self.KFold(para_dict, sample_df_train, random_state=random_state, nfold=nfold)
        else:
            pass

        # 3、模型验证 ------------------
        self.verify(__mode, sample_df_verify, IsSmote)

        # 4、模型结果综合信息输出 ------------------
        self.general_training_reault_output(__mode)

        print("$" * 40, "lightgbm training end", "$" * 40)
        return __mode

    def randomforest(self,
                     # randomforest 私有超参
                     n_estimators=100, criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1,
                     min_weight_fraction_leaf=0.0, max_features='sqrt', max_leaf_nodes=None, min_impurity_decrease=0.0,
                     bootstrap=True, oob_score=False, n_jobs=None, random_state=None, verbose=0, warm_start=False,
                     class_weight=None,
                     # 本方法私有超参
                     nfold=5, IsSmote=False, SmoteRatio='auto', set_thd=0.8, search_time=1, judge_score_mode="auc",
                     ISKFold=True, SmoteType='SMOTETomek', OverRatio=0.3, UnderRatio=0.9):
        """
        ####################################################################################################
             function : solve the training date with split, sampling operation
             return   : sample_df_train, sample_df_verify
             ---------------------------------------parameter ---------------------------------------
             nfold ：
             IsSmote=False ：
             SmoteRatio ： 'auto'
             set_thd ：0.8,
             search_time ：
             judge_score_mode ： "auc"
             ISKFold： True
             SmoteType ：'SMOTETomek'
             OverRatio ：0.3
             UnderRatio ：0.9
             ----------------------------------------------------------------------------------------
             history :
             huangkunyang : create   '2023-04-17'      <huangkunyang@eversec.cn>
             fuxin : update   '2023-04-17'      <fuxin@eversec.cn>
        ####################################################################################################
        """
        print("=" * 40, "randomforest training start", "=" * 40)
        # 1、数据预处理 --------------------------
        sample_df_train, sample_df_verify = self.data_solve(df=self._sample.copy(), random_state=random_state,
                                                            set_thd=set_thd, IsSmote=IsSmote, SmoteRatio=SmoteRatio,
                                                            SmoteType = SmoteType, OverRatio= OverRatio,UnderRatio= UnderRatio)
        # 2、模型训练 -----------------
        # 2.1、 模型传参
        para_dict = {'n_estimators':n_estimators, 'criterion':criterion, 'max_depth':max_depth,
                     'min_samples_split':min_samples_split, 'min_samples_leaf':min_samples_leaf,
                     'min_weight_fraction_leaf':min_weight_fraction_leaf, 'max_features':max_features,
                     'max_leaf_nodes':max_leaf_nodes, 'min_impurity_decrease':min_impurity_decrease,
                     'bootstrap':bootstrap, 'oob_score':oob_score, 'n_jobs': n_jobs, 'random_state':random_state,
                     'verbose':verbose, 'warm_start':warm_start,'class_weight':class_weight
                     }

        __mode = self.training(sample_df_train=sample_df_train, search_time=search_time,
                               judge_score_mode=judge_score_mode, para_dict=para_dict,mode_type='randomforest')

        # 2.2、 5折交叉验证
        if ISKFold:
            self.KFold(__mode, sample_df_train, random_state=random_state, nfold=nfold)
        else:
            pass

        # 3、模型验证 ------------------
        self.verify(__mode, sample_df_verify, IsSmote)

        # 4、模型结果综合信息输出 ------------------
        self.general_training_reault_output(__mode)

        print("$" * 40, "randomforest training end", "$" * 40)

        self.mode = __mode
        self.name = "xgboost"
        return __mode

    def xgboost(self,
                # xgboost 透传超参
                base_score=0.5, booster='gbtree', colsample_bylevel=1,
                colsample_bynode=1, colsample_bytree=1, gamma=0, gpu_id=-1, importance_type='gain',
                interaction_constraints='', learning_rate=0.3, max_delta_step=0, max_depth=6,
                min_child_weight=1, missing=np.nan, monotone_constraints=None,
                n_estimators=100, n_jobs=12, num_parallel_tree=1, random_state=0,
                reg_alpha=0, reg_lambda=1, scale_pos_weight=1, subsample=1,
                tree_method='exact', validate_parameters=1, verbosity=None,
                # 方法私有超参
                nfold=5, IsSmote=False, SmoteRatio='auto', set_thd=0.8, search_time=1, judge_score_mode="auc",
                ISKFold=True, SmoteType='SMOTETomek', OverRatio=0.3, UnderRatio=0.9):
        """
        ####################################################################################################
             function : solve the training date with split, sampling operation
             return   : sample_df_train, sample_df_verify
             ---------------------------------------parameter ---------------------------------------
             nfold ：
             IsSmote=False ：
             SmoteRatio ： 'auto'
             set_thd ：0.8,
             search_time ：
             judge_score_mode ： "auc"
             ISKFold： True
             SmoteType ：'SMOTETomek'
             OverRatio ：0.3
             UnderRatio ：0.9
             ----------------------------------------------------------------------------------------
             history :
             huangkunyang : create   '2023-04-17'      <huangkunyang@eversec.cn>
             fuxin : update   '2023-04-17'      <fuxin@eversec.cn>
        ############
        """

        print("=" * 40, "xgboost training start", "=" * 40)
        # 1、数据预处理 --------------------------
        sample_df_train, sample_df_verify = self.data_solve(df=self._sample.copy(), random_state=random_state,
                                                            set_thd=set_thd, IsSmote=IsSmote, SmoteRatio=SmoteRatio,
                                                            SmoteType=SmoteType, OverRatio=OverRatio,
                                                            UnderRatio=UnderRatio)
        # 2、模型训练 -----------------
        # 2.1、 模型传参
        para_dict = {'base_score':base_score,'booster':booster,'colsample_bylevel':colsample_bylevel,
                     'colsample_bynode':colsample_bynode,'colsample_bytree':colsample_bytree, 'gamma':gamma,
                     'gpu_id':gpu_id,'importance_type':importance_type, 'interaction_constraints':interaction_constraints,
                     'learning_rate':learning_rate, 'max_delta_step':max_delta_step, 'max_depth':max_depth,
                     'min_child_weight':min_child_weight, 'missing':missing, 'monotone_constraints':monotone_constraints,
                     'n_estimators':n_estimators, 'n_jobs':n_jobs, 'num_parallel_tree':num_parallel_tree,
                     'random_state':random_state,'reg_alpha':reg_alpha, 'reg_lambda':reg_lambda,
                     'scale_pos_weight':scale_pos_weight, 'subsample':subsample,'tree_method':tree_method,
                     'validate_parameters':validate_parameters, 'verbosity':verbosity}

        __mode = self.training(sample_df_train=sample_df_train, search_time=search_time,
                               judge_score_mode=judge_score_mode, para_dict=para_dict,mode_type='xgboost')

        # 2.2、 5折交叉验证
        if ISKFold:
            self.KFold(__mode, sample_df_train, random_state=random_state, nfold=nfold)
        else:
            pass

        # 3、模型验证 ------------------
        self.verify(__mode, sample_df_verify, IsSmote)

        # 4、模型结果综合信息输出 ------------------
        self.general_training_reault_output(__mode)

        print("$" * 40, "xgboost training end", "$" * 40)
        return __mode

    def mode_save(self, model="", path=""):
        if path == "":
            filename = self._model_file
        else:
            if os.path.isdir(path):
                now = datetime.datetime.now()
                filename = os.path.join(path, "model-%s.model" % now.strftime("%Y%m%d-%H%M%S"))
            else:
                filename = path

        if model == "":
            raise RuntimeError("please give the parameter model a model to save")
        else:
            joblib.dump(value=model, filename=filename)

    def predict(self):
        print("i am here")
        exit()

if __name__ == "__main__":
    pd.set_option("display.max_columns", None)
    pd.set_option("display.width", 200)
    positive_Sample_name = "E:/工作内容/2022年/20220321_cn_telecom_type3/work_place/feature/positive_0408.csv"
    negative_Sample_name = "E:/工作内容/2022年/20220321_cn_telecom_type3/work_place/feature/negative_0408.csv"
    work_dir = "E:/工作内容/2022年/20220321_cn_telecom_type3/work_place"

    model = model_train(work_dir= work_dir, filename=[positive_Sample_name, negative_Sample_name])
    model.mode_save()
