# 将数据分为测试集和训练集
from sklearn.model_selection import train_test_split
# 利用邻近点方式训练数据
from sklearn.neighbors import KNeighborsClassifier
import numpy as np
# 用于评价
import sklearn.metrics as metrics
import pandas as pd
from tqdm import tqdm
from sklearn.linear_model import LogisticRegression
## 集成学习
from sklearn import ensemble
# DT决策树
from sklearn.tree import DecisionTreeClassifier
from mlxtend.plotting import plot_confusion_matrix
from sklearn.metrics import confusion_matrix, accuracy_score, roc_auc_score

import pylab as plt


# K近邻
class KNN:
    # 初始化函数
    def __init__(self, X, Y, k=5):
        """
        1、KNN最近邻模型初始化函数
        2、可以考虑加入评价项目
        :param X: 自变量数据
        :param Y: 预测的数据
        :param k_range: k值的范围，默认从0-10迭代结果
        """
        self.X = X
        self.Y = Y
        self.k = 5
        # 统计正确率
        self.Accuracy = []
        # 参数
        self.paramses = []
        # 预测得分
        self.Scores = []
        # 统计结果
        self.summaries = []

        self.best_k = None

    def split_X(self, test_percentage=0.2, random_seed=42):
        """
        将输入的训练数据分割为训练数据和测试数据
        :param test_percentage: 验证集所占的百分比
        :param random_seed: 随机数种子
        :return:
        """
        # X_train, X_test, y_train, y_test = train_test_split(self.X, self.Y, test_size=test_percentage, random_state=42)
        # https://blog.csdn.net/qq_39355550/article/details/82688014
        X_train, X_test, y_train, y_test = train_test_split(self.X, self.Y, test_size=test_percentage)
        # 训练集
        self.X_train = X_train
        self.X_test = X_test
        self.y_train = y_train
        self.y_test = y_test
        self.KNN_models = []
        self.mean_scores = []
        # 当前的最好的模型
        self.bestmodel = None
        self.bestmodel_params = None
        self.best_k = None

    def train_model(self, k_range=[]):
        print('KNN模型参数训练中......')
        if k_range:
            # 对多组的参数k进行多重训练，并取平均值
            for k in range(k_range[0], k_range[1]):
                params_current = []
                current_scores = []
                current_knn = []
                # 选取多组训练集和验证集
                # 使用五折交叉验证
                for split_nums in tqdm(range(5)):
                    # 分割数据集
                    self.split_X()
                    # 重置k值
                    knn = KNeighborsClassifier(n_neighbors=k, n_jobs=10)
                    # 训练集训练
                    knn.fit(self.X_train, self.y_train)
                    params = knn.get_params()
                    # 验证集
                    score = knn.score(self.X_test, self.y_test)
                    # print("预测得分为：%s" % score)
                    # print("样本总数：\n", len(X))
                    # print("错误总数：\n", failed)
                    # print('正确率:\n', )
                    # print(result_summary2)
                    current_scores.append(score)
                    params_current.append(params)
                    current_knn.append(knn)
                # 将k取值的五次分数加入总的计算结果
                self.Scores.append(current_scores)
                # 计算平均分数
                current_mean_score = np.mean(current_scores)
                # 加入当前k的平均分数
                self.mean_scores.append(current_mean_score)
                self.paramses.append(params_current)
                k += 1
                # 添加每一轮的五个结果到训练模型类
                self.KNN_models.append(current_knn)
            # 根据分数组选取平均分数最高的参数
            best_knn_index = np.argmax(self.mean_scores)
            # 对平均参数最好的knn重新训练,准备预测使用
            best_knn_k = range(k_range[0], k_range[1])[best_knn_index]
            self.best_k = best_knn_k
            knn = KNeighborsClassifier(n_neighbors=best_knn_k)
            # 分割数据集
            self.split_X()
            # 使用所有训练集训练
            best_knn = knn.fit(self.X, self.Y)
            params = knn.get_params()
            # 保存模型和参数
            self.bestmodel = best_knn
            self.bestmodel_params = params
            return self.bestmodel
        else:
            # 使用当前k进行训练
            # 使用所有训练集训练
            knn = KNeighborsClassifier(n_neighbors=self.k)
            best_knn = knn.fit(self.X, self.Y)
            params = knn.get_params()
            # 保存模型和参数
            self.bestmodel = best_knn
            self.bestmodel_params = params
            return self.bestmodel

    # def Predict(self):
    #     y_pred = knn.predict(self)
    #     failed = np.sum(y_pred != self.y_test)
    #     result_summary2 = metrics.classification_report(y_pred, Y)
    #     self.Accuracy.append((len(self.X) - failed) / len(self.X))
    #     self.summaries.append(result_summary2)
    #     # print(params)


# logistic模型
class Logisitic:
    # 初始化函数
    def __init__(self, X, Y):
        """
        1、KNN最近邻模型初始化函数
        2、可以考虑加入评价项目
        :param X: 自变量数据
        :param Y: 预测的数据
        :param k_range: k值的范围，默认从0-10迭代结果
        """
        self.X = X
        self.Y = Y
        # 统计正确率
        self.Accuracy = []
        # 参数
        self.paramses = []
        # 预测得分
        self.Scores = []
        # 统计结果
        self.summaries = []
        self.logistic_models = []
        # 最优的正则化项
        self.best_penalties = None

    def split_X(self, test_percentage=0.2, random_seed=42):
        """
        将输入的训练数据分割为训练数据和测试数据
        :param test_percentage: 验证集所占的百分比
        :param random_seed: 随机数种子
        :return:
        """
        # X_train, X_test, y_train, y_test = train_test_split(self.X, self.Y, test_size=test_percentage, random_state=42)
        X_train, X_test, y_train, y_test = train_test_split(self.X, self.Y, test_size=test_percentage)
        # 训练集
        self.X_train = X_train
        self.X_test = X_test
        self.y_train = y_train
        self.y_test = y_test
        self.KNN_models = []
        self.mean_scores = []
        # 当前的最好的模型
        self.bestmodel = None
        self.bestmodel_params = None

    def train_model(self, C=1.0):
        """
        :param C:** 大于0的浮点数。C越小对损失函数的惩罚越重
        :return:最优的模型
        """
        print('Logistic模型参数训练中......')
        # 对两组正则化项多重训练，并取平均值
        # l1正则化、l2正则化
        penalties = ['l1', 'l2']
        for penalty_index in range(2):
            params_current = []
            current_scores = []
            current_logisitic = []
            # 选取多组训练集和验证集
            # 使用五折交叉验证
            for split_nums in tqdm(range(5)):
                # 分割数据集
                self.split_X()
                # 重置正则化项值
                logisitic = LogisticRegression(penalty=penalties[penalty_index], C=C, n_jobs=10, solver='saga')
                # 训练集训练
                logisitic.fit(self.X_train, self.y_train)
                params = logisitic.get_params()
                # 验证集
                score = logisitic.score(self.X_test, self.y_test)
                # print("预测得分为：%s" % score)
                # print("样本总数：\n", len(X))
                # print("错误总数：\n", failed)
                # print('正确率:\n', )
                # print(result_summary2)
                current_scores.append(score)
                params_current.append(params)
                current_logisitic.append(logisitic)
            self.Scores.append(current_scores)
            # 计算平均分数
            current_mean_score = np.mean(current_scores)
            # 加入当前k的平均分数
            self.mean_scores.append(current_mean_score)
            self.paramses.append(params_current)
            self.logistic_models.append(current_logisitic)

        # 根据分数组选取平均分数最高的参数
        best_knn_index = np.argmax(self.mean_scores)
        # 对平均参数最好的knn重新训练,准备预测使用
        best_penalty = penalties[best_knn_index]
        # 保存最深度
        self.best_penalties = best_penalty
        logisitic = LogisticRegression(penalty=best_penalty, C=C, n_jobs=10, solver='saga')
        # 分割数据集
        self.split_X()
        # 使用所有训练集训练
        best_logisitic = logisitic.fit(self.X, self.Y)
        params = logisitic.get_params()
        # 保存模型和参数
        self.bestmodel = best_logisitic
        self.bestmodel_params = params
        return self.bestmodel

    # def Predict(self):
    #     y_pred = knn.predict(self)
    #     failed = np.sum(y_pred != self.y_test)
    #     result_summary2 = metrics.classification_report(y_pred, Y)
    #     self.Accuracy.append((len(self.X) - failed) / len(self.X))
    #     self.summaries.append(result_summary2)
    #     # print(params)


# DT决策树
class DT:
    # 初始化函数
    def __init__(self, X, Y):
        """
        1、KNN最近邻模型初始化函数
        2、可以考虑加入评价项目
        :param X: 自变量数据
        :param Y: 预测的数据
        :param k_range: k值的范围，默认从0-10迭代结果
        """
        self.X = X
        self.Y = Y
        # 统计正确率
        self.Accuracy = []
        # 参数
        self.paramses = []
        # 预测得分
        self.Scores = []
        # 统计结果
        self.summaries = []
        self.All_DT = []
        self.best_deepth = None

    def split_X(self, test_percentage=0.2):
        """
        将输入的训练数据分割为训练数据和测试数据
        :param test_percentage: 验证集所占的百分比
        :param random_seed: 随机数种子
        :return:
        """
        # X_train, X_test, y_train, y_test = train_test_split(self.X, self.Y, test_size=test_percentage, random_state=42)
        # https://blog.csdn.net/qq_39355550/article/details/82688014
        X_train, X_test, y_train, y_test = train_test_split(self.X, self.Y, test_size=test_percentage)
        # 训练集
        self.X_train = X_train
        self.X_test = X_test
        self.y_train = y_train
        self.y_test = y_test
        self.KNN_models = []
        self.mean_scores = []
        # 当前的最好的模型
        self.bestmodel = None
        self.bestmodel_params = None

    def Change_Params(self, criterion='gini', splitter='best', min_samples_split=2):
        """
        :param criterion:接收gini或entropy。表示衡量分割质量的功能。默认为gini
        :param splitter:接收best或random。表示用于在每个节点上选择拆分的策略。默认为best
        :return:
        """
        self.criterion = 'gini'
        self.splitter = splitter
        self.min_samples_split = min_samples_split

    def train_model(self, deepth_max_range=[]):
        """
        :param deepth_range:
        :param min_samples_split:
        :return:
        """
        print('DT决策数模型参数训练中......')
        if deepth_max_range:
            # 对多组的参数k进行多重训练，并取平均值
            for max_depth in range(deepth_max_range[0], deepth_max_range[1]):
                params_current = []
                current_scores = []
                current_DT = []
                # 选取多组训练集和验证集
                # 使用五折交叉验证
                for split_nums in tqdm(range(5)):
                    # 分割数据集
                    self.split_X()
                    # 使用其他参数会出问题？？？？
                    # 出错参数：criterion、splitter，未测试min_samples_split
                    # 重置最深度 dt_model = DecisionTreeClassifier(splitter=self.splitter,max_depth=max_depth,
                    # min_samples_split=self.min_samples_split)
                    dt_model = DecisionTreeClassifier(criterion='entropy', max_depth=max_depth)
                    # 训练集训练
                    dt_model.fit(self.X_train, self.y_train)
                    params = dt_model.get_params()
                    # 验证集
                    score = dt_model.score(self.X_test, self.y_test)
                    # print("预测得分为：%s" % score)
                    # print("样本总数：\n", len(X))
                    # print("错误总数：\n", failed)
                    # print('正确率:\n', )
                    # print(result_summary2)
                    current_scores.append(score)
                    params_current.append(params)
                    current_DT.append(dt_model)
                # 将k取值的五次分数加入总的计算结果
                self.Scores.append(current_scores)
                # 计算平均分数
                current_mean_score = np.mean(current_scores)
                # 加入当前k的平均分数
                self.mean_scores.append(current_mean_score)
                self.paramses.append(params_current)
                self.All_DT.append(current_DT)
            # 根据分数组选取平均分数最高的参数
            best_DT_index = np.argmax(self.mean_scores)
            # 对平均参数最好的DT重新训练,准备预测使用
            best_DT_depth = range(deepth_max_range[0], deepth_max_range[1])[best_DT_index]
            self.best_deepth = best_DT_depth
            best_DT = DecisionTreeClassifier(criterion='entropy', max_depth=best_DT_depth)
            # 分割数据集
            self.split_X()
            # 使用所有训练集训练
            best_DT = best_DT.fit(self.X, self.Y)
            params = best_DT.get_params()
            # 保存模型和参数
            self.bestmodel = best_DT
            self.bestmodel_params = params
            return self.bestmodel
        else:
            # 使用当前k进行训练
            # 使用所有训练集训练
            best_DT = DecisionTreeClassifier(criterion='entropy')
            best_DT.fit(self.X, self.Y)
            params = best_DT.get_params()
            # 保存模型和参数
            self.bestmodel = best_DT
            self.bestmodel_params = params
            return self.bestmodel


# 集成模型
class Bagging:
    # 初始化函数
    def __init__(self, X, Y):
        self.bagging_model = ensemble.BaggingClassifier()
        """
        1、KNN最近邻模型初始化函数
        2、可以考虑加入评价项目
        :param X: 自变量数据
        :param Y: 预测的数据
        :param k_range: k值的范围，默认从0-10迭代结果
        """
        self.X = X
        self.Y = Y
        # 统计正确率
        self.Accuracy = []
        # 参数
        self.paramses = []
        # 预测得分
        self.Scores = []
        # 统计结果
        self.summaries = []
        # 所有的集成训练结果
        self.All_Ensemble = []
        # 最优数量
        self.best_count = None

    def split_X(self, test_percentage=0.2, random_seed=42):
        """
        将输入的训练数据分割为训练数据和测试数据
        :param test_percentage: 验证集所占的百分比
        :param random_seed: 随机数种子
        :return:
        """
        # X_train, X_test, y_train, y_test = train_test_split(self.X, self.Y, test_size=test_percentage, random_state=42)
        X_train, X_test, y_train, y_test = train_test_split(self.X, self.Y, test_size=test_percentage)
        # 训练集
        self.X_train = X_train
        self.X_test = X_test
        self.y_train = y_train
        self.y_test = y_test
        self.KNN_models = []
        self.mean_scores = []
        # 当前的最好的模型
        self.bestmodel = None
        self.bestmodel_params = None

    def train_model(self, BaseClassfier_init_function_name, n_estimators_range=[]):
        """
        集成模型的训练函数
        :param BaseClassfier_init_function_name: 分类的类名，也就是其初始化函数
        例如,如果使用KNN作为基础的分类模型的话，我们传输的参数就是neighbors.KNeighborsClassifier
        # 在实例化时，会自动的调用其实例化函数,neighbors.KNeighborsClassifier
        :param n_estimators_range:分类器的数目
        :return:返回最佳的分类器数目训练后的集成模型
        """
        print('集成模型参数训练中......')
        if n_estimators_range:
            # 对多组的参数k进行多重训练，并取平均值
            params_current = []
            current_scores = []
            current_ensemble = []
            for estimators_count in range(n_estimators_range[0], n_estimators_range[1]):

                # 选取多组训练集和验证集
                # 使用五折交叉验证
                for split_nums in tqdm(range(5)):
                    # 分割数据集
                    self.split_X()
                    # 重置k值
                    Ensemble_model = ensemble.BaggingClassifier(
                        # 基础的分类器模型
                        base_estimator=BaseClassfier_init_function_name(),
                        n_estimators=estimators_count,
                        n_jobs=10)
                    # 训练集训练
                    Ensemble_model.fit(self.X_train, self.y_train)
                    params = Ensemble_model.get_params()
                    # 验证集
                    score = Ensemble_model.score(self.X_test, self.y_test)
                    # print("预测得分为：%s" % score)
                    # print("样本总数：\n", len(X))
                    # print("错误总数：\n", failed)
                    # print('正确率:\n', )
                    # print(result_summary2)
                    current_scores.append(score)
                    params_current.append(params)
                    current_ensemble.append(Ensemble_model)
                # 将k取值的五次分数加入总的计算结果
                self.Scores.append(current_scores)
                # 计算平均分数
                current_mean_score = np.mean(current_scores)
                # 加入当前k的平均分数
                self.mean_scores.append(current_mean_score)
                self.paramses.append(params_current)
                self.All_Ensemble.append(current_ensemble)
            # 根据分数组选取平均分数最高的参数
            best_Ensemble_index = np.argmax(self.mean_scores)
            # 对平均参数最好的knn重新训练,准备预测使用
            best_knn_estimators_count = range(n_estimators_range[0], n_estimators_range[1])[best_Ensemble_index]
            self.best_count = best_knn_estimators_count
            best_Ensemble = ensemble.BaggingClassifier(
                # 基础的分类器模型
                base_estimator=BaseClassfier_init_function_name(),
                n_estimators=best_knn_estimators_count,
                n_jobs=10)
            # 分割数据集
            self.split_X()
            # 使用所有训练集训练
            best_Ensemble.fit(self.X, self.Y)
            params = best_Ensemble.get_params()
            # 保存模型和参数
            self.bestmodel = best_Ensemble
            self.bestmodel_params = params
            return self.bestmodel
        else:
            # 使用当前k进行训练
            # 使用所有训练集训练
            best_Ensemble = ensemble.BaggingClassifier(
                # 基础的分类器模型
                base_estimator=BaseClassfier_init_function_name(),
                n_jobs=10)
            best_Ensemble.fit(self.X, self.Y)
            params = best_Ensemble.get_params()
            # 保存模型和参数
            self.bestmodel = best_Ensemble
            self.bestmodel_params = params
            return self.bestmodel


# 评价类
class Evaluate:
    def __init__(self, model, X, Y):
        # 总体指标报告
        y_pred = model.predict(X)
        # 混淆矩阵
        self.cm = confusion_matrix(Y, y_pred)
        print(self.cm)
        # 准确率
        self.Y=Y
        self.X=X
        self.y_pred=y_pred

    def Draw_confusion_matrix(self):
        # 混淆矩阵可视化
        fig, ax = plot_confusion_matrix(conf_mat=self.cm, figsize=(6, 6), cmap=plt.cm.Greens)
        plt.xlabel('预测', fontsize=18)
        plt.ylabel('实际', fontsize=18)
        plt.title('混淆矩阵', fontsize=18)
    def Get_accury(self):
        """
        获取 TP,FN,FP,FN
        :return:
        """
        return accuracy_score(self.Y, self.y_pred)
    def Print_report(self):
        print(metrics.classification_report(self.Y, self.y_pred))

    def Get_confusion(self):

        return self.cm

