#  select  SVR in SVM
import os
import numpy
from pandas import read_csv
from sklearn.svm import SVR
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedKFold #交叉验证
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import r2_score
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler

from ML_Methods.utils import create_dataset, set_label_dataset, mean_absolute_percentage_error, get_r2_numpy, score_R2

class SVRmodel:
    def __init__(self, file_path, spei_n, train_rate):
        # C:float, kernel:str='rbf', degree:int=3, gamma='scale', coef0:float=0.0
        '''
        SVR model packaging
        :param file_path:  file path
        :param spei_n:  SPEI-n
        :param train_rate:  train persentage
        :return:
        '''

        self.spei_n = 'SPEI-'+str(spei_n)
        self.train_rate = train_rate
        # self.C = C,
        # self.kernel = kernel
        # self.degree =degree
        # self.gamma = gamma,
        # self.coef0 = coef0

        # 加载数据
        dataframe = read_csv(file_path, header=None, names=('TIME', spei_n ))
        dataframe = dataframe.set_index(['TIME'], drop=True)  # 把日期作为索引

        # 创建训练和测试数据集
        train_dataset, test_dataset = create_dataset(numpy.array(dataframe), train_rate) # 0.9

        # 将数据归一化处理，缩放到[-1,1]范围
        # MaxAbsScaler是归到[-1,1],MinMaxScaler归到[0,1]
        self.scaler = MinMaxScaler(feature_range=(0, 1))
        self.scaler.fit(train_dataset)
        train_dataset = self.scaler.fit_transform(train_dataset)
        test_dataset = self.scaler.fit_transform(test_dataset)

        # get label data
        self.X_train, self.Y_train = set_label_dataset(train_dataset, 1)
        self.X_test, self.Y_test = set_label_dataset(test_dataset, 1)

    def train(self, search:bool=False, model=None, param_grid=None, ):
        #训练
        if search == False:
            #不搜索，请提供模实例化的型：
            if model is not None:
                self.svr = model
                # self.svr = SVR(kernel = kernel, degree=degree, C=C, gamma=gamma, coef0=coef0)  # gamma='scale', C=1.0, epsilon=0.2
                # self.svr = SVR(kernel='rbf', C=1.0, gamma='scale')  # gamma='scale', C=1.0, epsilon=0.2
                temp_Y = self.Y_train
                self.svr.fit(self.X_train, temp_Y.ravel())

                return
            else:
                print('In train() parameter `model` not None' )
                return
        else:
            #格点搜做，请提供param_grid
            if param_grid is not None:
                # 初始化SVR模型
                # 上 GridSearchCV 网格搜索 寻找最优参数
                # 损失函数使用 sklearn.metrics的  mean_squared_error  ，sklearn.metrics包中有的函数名称可以直接字符串输入
                # Thus metrics which measure the distance between the model and the data, like metrics.mean_squared_error, are available as ''' neg_mean_squared_error '''  which return the negated value of the metric
                self.svr = GridSearchCV(estimator=SVR(), param_grid=param_grid, verbose=2, n_jobs=-1, refit=True,
                                        return_train_score=True,
                                        scoring='neg_mean_squared_error')  # return_train_score后续版本需要指定True才有score方法
                # 训练
                temp_Y = self.Y_train
                self.svr.fit(self.X_train, temp_Y.ravel())  # fit 的时候 GridSearchCV开始发挥作用
                return
            else:
                print('In train() parameter `param_grid` not None' )
                return

    def predict(self):
        # 预测
        y_train_pred_temp = self.svr.predict(self.X_train)
        y_test_pred_temp = self.svr.predict(self.X_test)
        y_train_pred_temp = y_train_pred_temp.reshape(-1, 1)  # transform from 1D to 2D
        y_test_pred_temp = y_test_pred_temp.reshape(-1, 1)  # transform from 1D to 2D

        # 反归一化
        # self.y_train_original = self.scaler.inverse_transform(self.Y_train)
        self.y_test_original = self.scaler.inverse_transform(self.Y_test)

        # self.y_train_pred = self.scaler.inverse_transform(y_train_pred_temp)
        self.y_test_pred = self.scaler.inverse_transform(y_test_pred_temp)

        return

    def print(self):
        '''
        打印，显示，输出结果至文件（svr）
        :return:
        '''
        # print("train rmse: ", numpy.sqrt(mean_squared_error(self.y_train_original, self.y_train_pred)))
        # print("train mae: ", mean_absolute_error(self.y_train_original, self.y_train_pred))
        # print("train mape: ", mean_absolute_percentage_error(self.y_train_original, self.y_train_pred))
        # print("train r2: ", r2_score(self.y_train_original, self.y_train_pred))
        # print('')

        predict_rmse = numpy.sqrt(mean_squared_error(self.y_test_original, self.y_test_pred))
        predict_mae = mean_absolute_error(self.y_test_original, self.y_test_pred)
        predict_mape = mean_absolute_percentage_error(self.y_test_original, self.y_test_pred)
        predict_r2 = r2_score(self.y_test_original, self.y_test_pred)
        '''
            predict self.spei_n, self.train_rate, rmse:%s, mae:%s, mape:%s, r2:%s 
        '''
        print("%s,%s,%s,%s,%s,%s," %(self.spei_n, self.train_rate, predict_rmse, predict_mae, predict_mape, predict_r2) )

        # 写入文件
        # write_file_path = os.path.join(WORK_PATH, r'ML_Methods\SVR_result.txt')
        # file_write_obj = open(write_file_path, 'a+')

        # 文件格式： SPEI-n, train_rate, kernel, degree, C, gamma, coef0, rmse, mae, mape, r2
        # print('%s,%s,%s,%s,%s,%s,%s,\n' % (self.spei_n, self.train_rate, self.C, self.kernel, self.degree, self.gamma, self.coef0))
        # file_write_obj.write('%s,%s,%s,%s,%s,%s,%s,\n' %(self.spei_n, self.train_rate, self.C, self.kernel, self.degree, self.gamma, self.coef0)  )

        #
        # # # only prediction
        # original_list = numpy.array(self.y_test_original)
        # pred_list = numpy.array(self.y_test_pred)
        # #
        # # #############################################################################
        # # # 对结果进行显示
        # figure = plt.figure()
        # axes = figure.add_subplot(1, 1, 1)
        # axes.plot(range(len(original_list)), original_list, c="black")
        # axes.plot(range(len(pred_list)), pred_list, c="red")
        #
        # axes.set_xlabel("Month")
        # axes.set_ylabel(self.spei_n)
        # axes.legend(['true', 'pred'])
        # plt.show()  # plt.plot(X_plot, y_svr, c='r', label='SVR (fit: %.3fs, predict: %.3fs)' % (svr_fit, svr_predict))

        return



WORK_PATH = os.getcwd() #获取当前项目工作目录，
# DATA_PATH = os.path.join(WORK_PATH, r'indices_caculate\result\ROW_SPEI-12\SPEI-12_52533.txt')

'''

C_list = [0.8, 1.0, 1.2] #默认值是1.0
kernel_list = ['linear', 'poly', 'rbf', 'sigmoid', 'precomputed' ]
degree_list = [3, 4, 5] # poly函数的维度，默认是3
gamma_list = ['scale','auto', 0.001 ],# 'rbf','poly' 和'sigmoid'的核函数参数。默认是'auto'，则会选择1/n_features
coef0_list = [10e-5, 0.0, 10 ] #默认 0.0
'''


# 1) 用 spei-12  90%训练数据 网格搜索最优模型参数

param_grid = [
    {
        'kernel' : ['sigmoid' ],
        'gamma' : ['scale','auto',1, 0.1, 0.01],
        'C' : [0.1, 1, 10, 100],
        'coef0': [10e-2, 10e-1, 10e1 ] #默认 0.0
    },
    {
        'kernel' : ['poly' ],
        'gamma' : ['scale','auto', 1, 0.1, 0.01 ],
        'degree' : [3, 4, 5],
        'C' : [0.1, 1, 10, 100],
        'coef0': [0.01, 0.0] #默认 0.0
    },
    {
        'kernel' : ['linear', 'rbf'],
        'gamma' : ['scale','auto',1, 0.1, 0.01],
        'C' : [0.1, 1, 10, 100],
    }
]
# param_grid = {
#     'kernel' : ['linear', 'rbf'],
#     'gamma' : ['scale','auto',1, 0.1, 0.01],
#     'C' : [0.1, 1, 10, 100],
# }

DATA_PATH = os.path.join(WORK_PATH, r'indices_caculate\result\ROW_SPEI-12\SPEI-12_52533.txt') #models 目录
svr_model = SVRmodel(file_path=DATA_PATH, spei_n=12, train_rate=0.9 )
svr_model.train(search=True, param_grid=param_grid)

#训练后打印最好的所以 得分 参数 模型
print('训练后打印最好的所以 得分 参数 模型: ')
print(svr_model.svr.best_estimator_)
print(svr_model.svr.best_params_)
print(svr_model.svr.best_score_)
print(svr_model.svr.best_index_)

# 网格搜素结果 最优解
print('网格搜素结果 最优解: ')
print(svr_model.svr.cv_results_)
print('')


print("Best: %f using %s" % (svr_model.svr.best_score_,svr_model.svr.best_params_))
#grid_scores_：给出不同参数情况下的评价结果。best_params_：描述了已取得最佳结果的参数的组合
#best_score_：成员提供优化过程期间观察到的最好的评分
#具有键作为列标题和值作为列的dict，可以导入到DataFrame中。
#注意，“params”键用于存储所有参数候选项的参数设置列表。
means = svr_model.svr.cv_results_['mean_test_score']
params = svr_model.svr.cv_results_['params']
for mean,param in zip(means,params):
    print("%f  with:   %r" % (mean,param))


# 2） 讲最优模型 对比 spei-1~12  train比率0.7 0.8 0.9做对比

SPEI_n_list = [1,3,6,9,12]
train_rate_list = [0.7, 0.8, 0.9]

# SVR（svr_model.svr.best_params_）  这一项由 第一步得出结果填写进去
best_model = SVR(kernel='linear', C=10, gamma='scale')

print('spei_n,train_rate,rmse,mae,mape,r2')
for spei in SPEI_n_list:
    file_path = os.path.join(WORK_PATH, r'indices_caculate\result\ROW_SPEI-'+str(spei)+'\SPEI-'+str(spei)+'_52533.txt') #models 目录
    for rate in train_rate_list:
        svr_model = SVRmodel(file_path=DATA_PATH, spei_n=spei, train_rate=rate)
        svr_model.train(search=False, model=best_model)
        svr_model.predict()
        svr_model.print()

