# coding: utf-8
import os
import numpy
from pandas import read_csv
from keras.models import Sequential
from keras.layers import Dense
from keras.callbacks import EarlyStopping
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from sklearn.model_selection import GridSearchCV

from ML_Methods.utils import create_dataset, set_label_dataset, mean_absolute_percentage_error

class ANNmodel:
    def __init__(self, file_path, spei_n, train_rate, dense_num):
        '''
        ANN model packaging
        :param file_path:  file path
        :param spei_n:  SPEI-n
        :param train_rate:  train persentage
        :param dense_num: dense number in ANN
        :return:
        '''

        self.spei_n = 'SPEI-'+str(spei_n)
        self.train_rate = train_rate
        self.dense_num = dense_num

        # 加载数据
        dataframe = read_csv(file_path, header=None, names=('TIME', spei_n ))
        dataframe = dataframe.set_index(['TIME'], drop=True)  # 把日期作为索引

        # 创建训练和测试数据集
        train_dataset, test_dataset = create_dataset(numpy.array(dataframe), train_rate) # 0.9

        # 将数据归一化处理，缩放到[-1,1]范围
        # MaxAbsScaler是归到[-1,1],MinMaxScaler归到[0,1]
        self.scaler = MinMaxScaler(feature_range=(0, 1))
        self.scaler.fit(train_dataset)
        train_dataset = self.scaler.fit_transform(train_dataset)
        test_dataset = self.scaler.fit_transform(test_dataset)

        # get label data
        self.X_train, self.Y_train = set_label_dataset(train_dataset, 1)
        self.X_test, self.Y_test = set_label_dataset(test_dataset, 1)

    # Find best parameters
    def build_classifier(self, optimizer):
        # Init ANN
        ANN_model = Sequential()
        # Add input and first hidden layers
        ANN_model.add(Dense(self.dense_num, input_dim=self.X_train.shape[1], activation='relu'))
        ANN_model.add(Dense(1))
        ANN_model.summary()
        # Compile ANN
        ANN_model.compile(loss='mean_squared_error', optimizer=optimizer, metrics=['accuracy'])
        return ANN_model

    def train(self, search:bool=False, param_grid=None, optimizer=None, epochs=None, batch_size=None ):
        if search == False:
            #不搜索，请提供模实例化的型：
            temp_Y = self.Y_train

            # Init ANN
            self.model = Sequential()
            # Add input and first hidden layers
            self.model.add(Dense(self.dense_num, input_dim=self.X_train.shape[1], activation='relu'))
            self.model.add(Dense(1))
            self.model.summary()
            # Compile ANN
            self.model.compile(loss='mean_squared_error', optimizer=optimizer, metrics=['accuracy'])

            self.model.fit(self.X_train, temp_Y.ravel(), epochs=epochs, batch_size=batch_size, verbose=1, shuffle=False)

            return
        else:
            #格点搜做，请提供param_grid
            if param_grid is not None:
                # 初始化SVR模型
                # 上 GridSearchCV 网格搜索 寻找最优参数
                # 损失函数使用 sklearn.metrics的  mean_squared_error  ，sklearn.metrics包中有的函数名称可以直接字符串输入
                # Thus metrics which measure the distance between the model and the data, like metrics.mean_squared_error, are available as ''' neg_mean_squared_error '''  which return the negated value of the metric

                ANN_classifier = KerasClassifier(build_fn=self.build_classifier)

                self.model = GridSearchCV(estimator=ANN_classifier, param_grid=param_grid, verbose=2, n_jobs=-1, refit=True,return_train_score=True,
                                           scoring="neg_mean_squared_error")
                # 训练
                temp_Y = self.Y_train
                self.model.fit(self.X_train, temp_Y.ravel())  # fit 的时候 GridSearchCV开始发挥作用
                return
            else:
                print('In train() parameter `param_grid` not None' )
                return

    def predict(self):
        # 预测
        y_test_pred_temp = self.model.predict(self.X_test)
        y_test_pred_temp = y_test_pred_temp.reshape(-1, 1)  # transform from 1D to 2D

        # 反归一化
        self.y_test_original = self.scaler.inverse_transform(self.Y_test)
        self.y_test_pred = self.scaler.inverse_transform(y_test_pred_temp)

        return

    def print(self):
        '''
        打印，显示，输出结果至文件（svr）
        :return:
        '''

        predict_rmse = numpy.sqrt(mean_squared_error(self.y_test_original, self.y_test_pred))
        predict_mae = mean_absolute_error(self.y_test_original, self.y_test_pred)
        predict_mape = mean_absolute_percentage_error(self.y_test_original, self.y_test_pred)
        predict_r2 = r2_score(self.y_test_original, self.y_test_pred)
        '''
            predict spei_n, train_rate, dense_number, rmse:%s, mae:%s, mape:%s, r2:%s 
        '''
        print("%s,%s,%s,%s,%s,%s,%s" %(self.spei_n, self.train_rate, self.dense_num,predict_rmse, predict_mae, predict_mape, predict_r2) )
        return "%s,%s,%s,%s,%s,%s,%s" %(self.spei_n, self.train_rate, self.dense_num,predict_rmse, predict_mae, predict_mape, predict_r2)



WORK_PATH = os.getcwd() #获取当前项目工作目录，


# 1) 用 spei-12  90% dense12 训练数据 网格搜索最优模型参数

# 第一次实验：
# param_grid = {
#     "batch_size": [1, 100, 500],
#     "epochs": [500, 1000, 2000],
#     'optimizer': ['SGD', 'RMSprop', 'Adagrad', 'Adadelta', 'Adam', 'Adamax', 'Nadam']
# }
# 这组实验跑出来结果是：Best: -0.176473 using {'batch_size': 1, 'epochs': 2000, 'optimizer': 'Adadelta'}
# 说明epochs还可以再往上加。

# 第二次实验： 根据第一次结果，仅对epochs做调整
param_grid = {
    "batch_size": [1],
    "epochs": [3000, 5000, 6000],
    'optimizer': ['Adadelta']
}
# 结果 Best: -0.176473 using {'batch_size': 1, 'epochs': 3000, 'optimizer': 'Adadelta'}


DATA_PATH = os.path.join(WORK_PATH, r'indices_caculate\result\ROW_SPEI-12\SPEI-12_52533.txt') #models 目录
ann_model = ANNmodel(file_path=DATA_PATH, spei_n=12, train_rate=0.9, dense_num=12 )
ann_model.train(search=True, param_grid=param_grid)


#训练后打印最好的所以 得分 参数 模型
print('训练后打印最好的所以 得分 参数 模型: ')
print(ann_model.model.best_estimator_)
print(ann_model.model.best_params_)
print(ann_model.model.best_score_)
print(ann_model.model.best_index_)

# 网格搜素结果 最优解
print('网格搜素结果 最优解: ')
print(ann_model.model.cv_results_)
print('')


print("Best: %f using %s" % (ann_model.model.best_score_,ann_model.model.best_params_))
#grid_scores_：给出不同参数情况下的评价结果。best_params_：描述了已取得最佳结果的参数的组合
#best_score_：成员提供优化过程期间观察到的最好的评分
#具有键作为列标题和值作为列的dict，可以导入到DataFrame中。
#注意，“params”键用于存储所有参数候选项的参数设置列表。
means = ann_model.model.cv_results_['mean_test_score']
params = ann_model.model.cv_results_['params']
for mean,param in zip(means,params):
    print("%f  with:   %r" % (mean,param))


# 2） 讲最优模型 对比 spei-1~12  train比率0.7 0.8 0.9做对比
# result  Best: -0.176473 using {'batch_size': 1, 'epochs': 3000, 'optimizer': 'Adadelta'}

SPEI_n_list = [1,3,6,9,12]
train_rate_list = [0.7, 0.8, 0.9]
dense_num_list = [10, 15, 20]


#  这一项由 第一步得出结果填写进去
result_printer = ''
head = 'spei_n,train_rate,dense_number,rmse,mae,mape,r2\n'
result_printer+=head
print(head)
for spei in SPEI_n_list:
    file_path = os.path.join(WORK_PATH, r'indices_caculate\result\ROW_SPEI-'+str(spei)+'\SPEI-'+str(spei)+'_52533.txt') #models 目录
    for rate in train_rate_list:
        for dense_num in dense_num_list:
            ann_model = ANNmodel(file_path=file_path, spei_n=spei, train_rate=rate, dense_num=dense_num)
            ann_model.train(search=False, optimizer='Adadelta', epochs=3000, batch_size=1 )
            ann_model.predict()
            result_str = ann_model.print()
            result_printer += (result_str+'\n')
#统一打印结果：
print(result_printer)

