# -*- coding: utf-8 -*-
# @Time : 2021/5/17 19:12
# @Author : ShaneGao
# @File : main.py
# @Software: PyCharm
# import tensorflow as tf
# coding=utf-8


import pandas
from pandas import Series
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from math import sqrt
from matplotlib import pyplot
import numpy
import os
# 读取时间数据的格式化
from endcourse.load import load_runoff

# 转换成差分数据
from endcourse.tosupervised import timeseries_to_supervised


def difference(dataset, interval=1):
    '''

    :param dataset: 原始一维向量数据
    :param interval: 差分间隔，默认为1
    :return: 差分后的数据
    '''
    diff = list()
    for i in range(interval, len(dataset)):
        value = dataset[i] - dataset[i - interval]
        diff.append(value)
    return Series(diff)


# 逆差分
def inverse_difference(history, yhat, interval=1):  # 历史数据，预测数据，差分间隔
    '''

    :param history:
    :param yhat:
    :param interval:
    :return:
    '''
    return yhat + history[-interval]


# 缩放
def scale(train, test):
    '''

    :param train: 训练集
    :param test: 验证集
    :return: 归一化之后的训练集和验证集
    '''
    # 根据训练数据建立缩放器
    scaler = MinMaxScaler(feature_range=(0, 1))
    scaler = scaler.fit(train)
    # 转换train data 这一步：作用？在这里没有变化
    train = train.reshape(train.shape[0], train.shape[1])
    train_scaled = scaler.transform(train)
    # 转换test data
    test = test.reshape(test.shape[0], test.shape[1])
    test_scaled = scaler.transform(test)
    return scaler, train_scaled, test_scaled


# 逆缩放
def invert_scale(scaler, X, value):
    '''

    :param scaler:
    :param X:
    :param value:
    :return:
    '''
    new_row = [x for x in X] + [value]
    array = numpy.array(new_row)
    array = array.reshape(1, len(array))
    inverted = scaler.inverse_transform(array)
    return inverted[0, -1]


# fit LSTM来训练数据
def fit_lstm(train, batch_size, nb_epoch, neurons):
    '''

    :param train:
    :param batch_size:
    :param nb_epoch:
    :param neurons:
    :return:
    '''
    X, y = train[:, 0:-1], train[:, -1]
    X = X.reshape(X.shape[0], 1, X.shape[1]) #对着
    model = Sequential()
    # 添加LSTM层
    model.add(LSTM(neurons, batch_input_shape=(batch_size, X.shape[1], X.shape[2]), stateful=True))
    model.add(Dense(1))  # 输出层1个node

    # 编译，损失函数mse+优化算法adam
    model.compile(loss='mean_squared_error', optimizer='adam')
    for i in range(nb_epoch):
        # 按照batch_size，一次读取batch_size个数据
        model.fit(X, y, epochs=1, batch_size=batch_size, verbose=0, shuffle=False)
        model.reset_states()
        print("当前计算次数："+str(i))
    return model


# 1步长预测
def forcast_lstm(model, batch_size, X):
    '''

    :param model:
    :param batch_size:
    :param X:
    :return:
    '''
    X = X.reshape(1, 1, len(X))
    yhat = model.predict(X, batch_size=batch_size)
    return yhat[0, 0]

if __name__=='__main__':
    #设置
    n_steps = 3 #步长
    epoches=500
    cells=20
    train_perc=0.85
    root_path=r'./output/step_'+str(n_steps)+'/'
    if not os.path.exists(root_path):
        os.makedirs(root_path)
    logfile='log.txt'
    # 加载数据
    series=load_runoff() #Series格式
    # 数组
    raw_values = series.values
    # 让数据变成稳定的
    diff_values = difference(raw_values, 1)#转换成差分数据
    # 把稳定的数据变成有监督数据

    supervised = timeseries_to_supervised(diff_values, n_steps)
    supervised_values = supervised.values

    # 数据拆分：训练数据、测试数据，前**行是训练集，后**行是测试集
    train_len=int(len(supervised_values)*train_perc)
    print('训练集长度:',train_len,'/',len(supervised_values))
    train, test = supervised_values[0:train_len], supervised_values[train_len:]

    # 数据缩放
    scaler, train_scaled, test_scaled = scale(train, test)


    # fit 模型
    lstm_model = fit_lstm(train_scaled, 1, epoches, cells)  # 训练数据，batch_size，epoche次数, 神经元个数
    # 预测
    train_reshaped = train_scaled[:,:-1].reshape(len(train_scaled), 1, n_steps)#训练数据集转换为可输入的矩阵
    lstm_model.predict(train_reshaped, batch_size=1) #用模型对训练数据矩阵进行预测
    # 测试数据的前向验证，实验发现，如果训练次数很少的话，模型回简单的把数据后移，以昨天的数据作为今天的预测值，当训练次数足够多的时候
    # 才会体现出来训练结果
    predictions = list()
    with open(root_path+logfile, mode='w') as file_handle:
        for i in range(len(test_scaled)): #根据测试数据进行预测，取测试数据的一个数值作为输入，计算出下一个预测值，以此类推
            # 1步长预测
            X, y = test_scaled[i, 0:-1], test_scaled[i, -1]
            yhat = forcast_lstm(lstm_model, 1, X)
            # 逆缩放
            yhat = invert_scale(scaler, X, yhat)
            # 逆差分
            yhat = inverse_difference(raw_values, yhat, len(test_scaled) + 1 - i)
            predictions.append(yhat)
            expected = raw_values[len(train) + i]
            text='Time:%s, Predicted=%f, Expected=%f\n' % (i + 1, yhat, expected)
            # print(text)
            file_handle.write(text)

    pandas.DataFrame(predictions).to_csv(root_path+'predictions.csv')
    # 性能报告
    res_dic={}

    y_true=numpy.array(raw_values[n_steps+train_len:-1])
    y_hat=numpy.array(predictions)
    #rmse
    rmse = sqrt(mean_squared_error(y_true, y_hat))
    res_dic['RMSE']=rmse
    #mre
    mre=1/len(y_hat)*numpy.sum(abs(y_hat-y_true)/y_true)*100
    res_dic['MRE']=mre
    #r
    r=numpy.sum((y_true-y_true.mean())*(y_hat-y_hat.mean()))/(sqrt(numpy.sum((y_true-y_true.mean())**2))*sqrt(numpy.sum((y_hat-y_hat.mean())**2)))
    res_dic['R']=r
    #nse
    nse=1-numpy.sum((y_true-y_hat)**2)/numpy.sum((y_true-y_true.mean())**2)
    res_dic['NSE']=nse
    print('Test MRE:%.3f' %mre )
    print('Test R:%.3f' % r)
    print('Test NSE:%.3f' % nse)

    pandas.DataFrame(res_dic,index = [0]).to_csv(root_path+'statistic.csv')
    # 绘图
    pyplot.plot(raw_values[n_steps+train_len:-1])
    pyplot.plot(predictions)
    pyplot.savefig(root_path+'results.jpg')
    pyplot.show()
