import math

import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.model_selection import KFold
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error as mae # 平方绝对误差
from sklearn.metrics import r2_score  # R square

def rmse(y_true,y_predict):
    return mean_squared_error(y_true,y_predict,squared = False)

def mse(y_true,y_predict):
    return mean_squared_error(y_true,y_predict)



class RandomForestRegressorModel():


    def __init__(self, run_trainByKfold=0):
        self.run_trainByKfold = run_trainByKfold
        self.afterTrainData = []  # 用来保存训练好的相关模型数据
        self.features = None
        self.target = None
        self.y_train_true,  self.y_train_predict,  self.y_val_true,  self.y_val_predict = [], [], [], []  # 拿来存放画图用的属性 是长度为k的二维数组
        self.hasOneModel = 0
        self.aModelWithAllTrainData = None


    def trainByKfold(self, features, target, k=10, seed=2022, adjust_parameter=False,
                     optionParameters=None
                     ):

        '''
        交叉验证训练模型
        :param features: 输入特征值 df对象就行
        :param target: 输入域标签值 df对象
        :param k: 交叉验证次数 默认10折
        :param seed: 随机数 默认2022
        :param adjust_parameter: 是否调参，默认False 不调
        :param optionParameters : 参数配置项，
        :return:
        '''
        if optionParameters is None:
            optionParameters = {'n_estimators': 10,
                                # 数值型参数，默认值为100，此参数指定了弱分类器的个数。设置的值越大，精确度越好，但是当 n_estimators 大于特定值之后，带来的提升效果非常有限。
                                'criterion': 'mse',  # 其中，参数criterion 是字符串类型，默认值为 ‘mse’，是衡量回归效果的指标。可选的还有‘mae’ 。
                                'max_depth': None,
                                # 数值型，默认值None。这是与剪枝相关的参数，设置为None时，树的节点会一直分裂，直到：（1）每个叶子都是“纯”的；（2）或者叶子中包含于min_sanples_split个样本。推荐从 max_depth = 3 尝试增加，观察是否应该继续加大深度。
                                'min_samples_split': 2,
                                # 数值型，默认值2，指定每个内部节点(非叶子节点)包含的最少的样本数。与min_samples_leaf这个参数类似，可以是整数也可以是浮点数。
                                'min_samples_leaf': 1,
                                # 数值型，默认值1，指定每个叶子结点包含的最少的样本数。参数的取值除了整数之外，还可以是浮点数，此时（min_samples_leaf * n_samples）向下取整后的整数是每个节点的最小样本数。此参数设置的过小会导致过拟合，反之就会欠拟合。
                                'min_weight_fraction_leaf': 0.0,  # (default=0) 叶子节点所需要的最小权值
                                'max_features': 'auto',  # 可以为整数、浮点、字符或者None，默认值为None。此参数用于限制分枝时考虑的特征个数，超过限制个数的特征都会被舍弃。
                                'max_leaf_nodes': None,
                                # 数值型参数，默认值为None，即不限制最大叶子节点数。这个参数通过限制树的最大叶子数量来防止过拟合，如果设置了一个正整数，则会在建立的最大叶节点内的树中选择最优的决策树。
                                'min_impurity_split': 1e-07,
                                # float, optional (default=0.)如果节点的分裂导致不纯度的减少(分裂后样本比分裂前更加纯净)大于或等于min_impurity_decrease，则分裂该节点。
                                'bootstrap': True,  # 是否有放回的采样。
                                'oob_score': False,  # oob（out of band，带外）数据，即：在某次决策树训练中没有被bootstrap选中的数据
                                'n_jobs': 1,  # 并行job个数。
                                'random_state': None,  # # 随机种子
                                'verbose': 0,  # (default=0) 是否显示任务进程
                                'warm_start': False}
        else:
            pass
        self.features = features
        self.target = target
        self.run_trainByKfold = 1  # 一旦trainByKfold被调用过，则run_trainByKfold=1
        models, train_index, val_index = [], [], []

        kfold = KFold(n_splits=k, shuffle=True, random_state=seed)
        if not adjust_parameter:
            for train, val in kfold.split(self.features, self.target):
                rfr = RandomForestRegressor(random_state=seed)  # 随机森林实例化 调参在这
                rfr.fit(self.features.iloc[train, :], self.target.iloc[train])
                models.append(rfr)
                train_index.append(list(train))
                val_index.append(list(val))
            self.afterTrainData = [models, train_index, val_index]  # 训练好的模型 一维数组，十折划分的训练集和验证集二维数组
            return self.afterTrainData
        else:

            return None

    def modelEvaluate(self):
        '''
        评估模型
        :return: 评估结果
        '''
        if self.run_trainByKfold == 0:
            return print("模型还未训练！")
        elif self.run_trainByKfold == 1:  # 已经调用过trainByKfold训练好模型了
            train_rmse_arry, val_rmse_arry, train_mae_arry, val_mae_arry, trian_r2_arry, val_r2_arry = [], [], [], [], [], []

            models_arry = self.afterTrainData[0]
            train_index_arry = self.afterTrainData[1]
            val_index_arry = self.afterTrainData[2]

            for i in range(len(models_arry)):
                y_train_predict = np.squeeze(models_arry[i].predict(self.features.iloc[train_index_arry[i]]))
                ############
                #                 print(self.features)
                #                 print(self.target)
                #                 print(self.target.iloc[train_index_arry[i]])
                ############
                y_val_predict = np.squeeze(models_arry[i].predict(self.features.iloc[val_index_arry[i]]))
                train_rmse_arry.append(rmse(self.target.iloc[train_index_arry[i]], y_train_predict))  # 训练集的误差
                val_rmse_arry.append(rmse(self.target.iloc[val_index_arry[i]], y_val_predict))
                train_mae_arry.append(mae(self.target.iloc[train_index_arry[i]], y_train_predict))
                val_mae_arry.append(mae(self.target.iloc[val_index_arry[i]], y_val_predict))

                # trian_r2_arry.append(calc_corr(self.target.iloc[train_index_arry[i]], y_train_predict))
                # val_r2_arry.append(calc_corr(self.target.iloc[val_index_arry[i]], y_val_predict))
                # pearsonr()的参数为一维数组，所以先把其转为np.array，再转成一维数组，这坨代码就是一堆屎！千万不能动！！！
                trian_r2_arry.append(
                    r2_score(np.squeeze(np.array(self.target.iloc[train_index_arry[i]], dtype='float')),
                             y_train_predict))
                val_r2_arry.append(
                    r2_score(np.squeeze(np.array(self.target.iloc[val_index_arry[i]], dtype='float')),
                             y_val_predict))

                #     将训练集合验证集的预测变迁值和真实值存入对象属性中，方便后面画图用
                #                 print("有鬼#######",self.target.iloc[train_index_arry[i]])
                #                 print("有大鬼#####",self.y_train_true)
                self.y_train_true.append(self.target.iloc[train_index_arry[i]])
                #                 print("这里真有鬼#######",self.target.iloc[train_index_arry[i]])
                #                 print("有大鬼#####",self.y_train_true[i])
                self.y_train_predict.append(y_train_predict)
                self.y_val_true.append(self.target.iloc[val_index_arry[i]])
                self.y_val_predict.append(y_val_predict)
            trian_r2_arry = pd.DataFrame(trian_r2_arry).iloc[:, 0]
            val_r2_arry = pd.DataFrame(val_r2_arry).iloc[:, 0]
            # 求个误差的平均值
            train_rmse_mean = np.mean(train_rmse_arry)
            val_rmse_mean = np.mean(val_rmse_arry)
            train_mae_mean = np.mean(train_mae_arry)
            val_mae_mean = np.mean(val_mae_arry)
            trian_r2_mean = np.mean(trian_r2_arry)
            val_r2_mean = np.mean(val_r2_arry)

            # print("训练集rmse ： ", train_rmse_mean)
            # print("训练集mae ： ", train_mae_mean)
            # print("训练集r2 ： ", trian_r2_mean)
            # print("验证集rmse ： ", val_rmse_mean)
            # print("验证集mae ： ", val_mae_mean)
            # print("验证集mae所有值: ", val_mae_arry)
            # print("验证集r2 ： ", val_r2_mean)
            # print("验证集r2所有值 ： ", val_r2_arry)
            # return [train_rmse_arry, val_rmse_arry, train_mae_arry, val_mae_arry, trian_r2_arry, val_r2_arry,
            #         train_rmse_mean, val_rmse_mean, train_mae_mean, val_mae_mean
            #     , trian_r2_mean, val_r2_mean]
            return {'train_rmse_arry':train_rmse_arry,'val_rmse_arry':val_rmse_arry,'train_mae_arry':train_mae_arry,
             'val_mae_arry':val_mae_arry,'trian_r2_arry':trian_r2_arry,'val_r2_arry':val_r2_arry,
             'train_rmse_mean':train_rmse_mean,'val_rmse_mean':val_rmse_mean,'train_mae_mean':train_mae_mean,
             'val_mae_mean':val_mae_mean,'trian_r2_mean':trian_r2_mean,'val_r2_mean':val_r2_mean}

    def trainWithTest(self,features_train,target_train,features_test,target_test, trainNum=1, seed=2022, adjust_parameter=False,
                     optionParameters=None):
        '''

        :param features_train: 训练特征
        :param target_train: 标签特征
        :param features_test: 测试特征
        :param target_test: 标签特征
        :param trainNum: 训练次数
        :param seed: 随机数
        :param adjust_parameter: 调参？
        :param optionParameters: 参数配置项
        :return:
        '''
        train_rmse_arr, train_mse_arr, train_mae_arr, train_r2_arr, test_rmse_arr, test_mse_arr,test_mae_arr,test_r2_arr=[],[],[],[],[],[],[],[]
        for i in range(trainNum):
            rfr = RandomForestRegressor(random_state=seed)  # 随机森林实例化 调参在这
            rfr.fit(features_train, target_train)

            y_train_true = target_train
            y_train_predict = rfr.predict(features_train, target_train)
            y_test_true = target_test
            y_test_predict = rfr.predict(features_test, target_test)
            train_rmse = rmse(y_train_true,y_train_predict)
            train_mse = mse(y_train_true,y_train_predict)
            train_mae = mae(y_train_true,y_train_predict)
            train_r2 = r2_score(y_train_true,y_train_predict)
            test_rmse = rmse(y_test_true,y_test_predict)
            test_mse = mse(y_test_true,y_test_predict)
            test_mae = mae(y_test_true,y_test_predict)
            test_r2 = r2_score(y_test_true,y_test_predict)

            train_rmse_arr.append(train_rmse)
            train_mse_arr.append(train_mse)
            train_mae_arr.append(train_mae)
            train_r2_arr.append(train_r2)
            test_rmse_arr.append(test_rmse)
            test_mse_arr.append(test_mse)
            test_mae_arr.append(test_mae)
            test_r2_arr.append(test_r2)
        train_rmse_arr_std = np.std(train_rmse_arr)
        train_mse_arr_std = np.std(train_mse_arr)
        train_mae_arr_std = np.std(train_mae_arr)
        train_r2_arr_std = np.std(train_r2_arr)
        test_rmse_arr_std = np.std(test_rmse_arr)
        test_mse_arr_std = np.std(test_mse_arr)
        test_mae_arr_std = np.std(test_mae_arr)
        test_r2_arr_std = np.std(test_r2_arr)
        return {'train_rmse_arr':train_rmse_arr,'train_rmse_arr_std':train_rmse_arr_std,'train_mse_arr':train_mse_arr,
                'train_mse_arr_std':train_mse_arr_std,'train_mae_arr':train_mae_arr,'train_mae_arr_std':train_mae_arr_std,
                'train_r2_arr':train_r2_arr,'train_r2_arr_std':train_r2_arr_std,'test_rmse_arr':test_rmse_arr,
                'test_rmse_arr_std':test_rmse_arr_std,'test_mse_arr':test_mse_arr,'test_mse_arr_std':test_mse_arr_std,
                'test_mae_arr':test_mae_arr,'test_mae_arr_std':test_mae_arr_std,'test_r2_arr':test_r2_arr,
                'test_r2_arr_std':test_r2_arr_std}


    def importanceRank(self,features, target,seed=2022,trainNum=1):
        '''
        计算每个特征对应的重要性
        :param features: 输入特征 df
        :param target: 输入标签 df
        :param seed: 模型随机数
        :param trainNum: 训练次数
        :return: 特征名和重要性分数的字典{特征名：重要值}
                trainNum次训练总的特征重要性数据df
        '''
        feature_important = []
        for i in range(trainNum):
            rfr = RandomForestRegressor(random_state=seed+i)  # 随机森林实例化 调参在这
            rfr.fit(features, target)
            fi = rfr.feature_importances_
            r2 = rfr.score(features, target)
            feature_important.append(fi)

        sum_important = [0] * features.shape[1]
        for i in feature_important:
            sum_important = i + sum_important

        ava_important = sum_important / trainNum
        sorted_idx = np.argsort(ava_important)
        rank = list(features.columns[sorted_idx])
        rank.reverse()
        rank_dic={}
        for i,col in enumerate(rank):
            rank_dic[col] = ava_important[i]
        df_export = pd.DataFrame(feature_important, columns=rank)

        return {'rank_dic':rank_dic,'df_export':df_export,'r2':r2}

# df = pd.read_excel(r'D:\jupyter code\物理特征提升带隙预测精度\第一次修改\表数据\11特征+标签数据训练集.xlsx')
# features = df.iloc[:,1:-1]
# target = df.iloc[:,-1]
#
# RandomForestRegressorModel=RandomForestRegressorModel()
# rank=RandomForestRegressorModel.importanceRank(features,target,trainNum=1)
# print(rank)