"""
单变量的LSTM模型应用，主要是用来寻找模型的参数
"""
import matplotlib.pyplot as plt
import pandas as pd
from pandas import DataFrame
from pandas import concat
from pandas import read_csv
from pandas import datetime
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import TimeDistributed
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from math import sqrt
import numpy
import numpy as np
from keras.callbacks import EarlyStopping
from keras.constraints import maxnorm
from keras.regularizers import l1_l2,l1,l2
from keras.layers import Bidirectional
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score


# date-time parsing function for loading the dataset
def parser(x):
    return datetime.strptime('190'+x, '%Y-%m')


def create_dataset(dataset, scale):
    train_size = int(len(dataset) * scale)
    # 将数据集分为两部分
    train, test = dataset[0:train_size], dataset[train_size:]
    return train, test


# frame a sequence as a supervised learning problem
def timeseries_to_supervised(data, lag=1):
    df = DataFrame(data)
    columns = [df.shift(i) for i in range(1, lag+1)]
    columns.append(df)
    df = concat(columns, axis=1)
    df = df.drop(0)
    return df


# difference dataset
def difference(data, interval):
    return [data[i] - data[i - interval] for i in range(interval, len(data))]


# invert difference
def invert_difference(orig_data, diff_data, interval):
    return [diff_data[i-interval] + orig_data[i-interval] for i in range(interval, len(orig_data))]


# scale train and test data to [-1, 1]
def scale(train, test):
    # fit scaler
    scaler = MinMaxScaler(feature_range=(-1, 1))
    scaler = scaler.fit(train)
    # transform train
    train = train.reshape(train.shape[0], train.shape[1])
    train_scaled = scaler.transform(train)
    # transform test
    test = test.reshape(test.shape[0], test.shape[1])
    test_scaled = scaler.transform(test)
    return scaler, train_scaled, test_scaled


# transform series into train and test sets for supervised learning
def prepare_data(series, test_percent, n_lag):

    # extract raw values
    raw_values = series.values

    # transform data to be stationary
    # diff_series = np.array(difference(raw_values, 1))
    # diff_values = diff_series
    # diff_values = diff_values.reshape(len(diff_values), 1)

    # split into train and test sets
    raw_train, raw_test = create_dataset(raw_values,test_percent)

    # scale train and test data to [-1, 1]
    scaler, train, test = scale(raw_train, raw_test)

    # transform into supervised learning problem X, y
    train = timeseries_to_supervised(train, n_lag).values
    test = timeseries_to_supervised(test, n_lag).values

    return scaler, train, test


# inverse scaling for a forecasted value
def invert_scale(scaler, yhat):
    return scaler.inverse_transform(yhat)


# fit an LSTM network to training data
def fit_lstm(train, n_lag, batch_size, nb_epoch, neurons):
    X, y = train[:, 0:n_lag], train[:, n_lag:]
    # 基础、双向和堆叠
    X = X.reshape(X.shape[0], 1, X.shape[1])

    model = Sequential()
    # 堆叠LSTM
    model.add(LSTM(neurons, activation='relu', return_sequences=True, input_shape=(X.shape[1], X.shape[2])))
    model.add(LSTM(neurons, activation='relu'))
    model.add(Dense(1))
    model.summary()
    model.compile(loss='mean_squared_error', optimizer='adam')
    early_stop = EarlyStopping(monitor='loss', patience=5, verbose=1)
    model.fit(X, y, epochs=nb_epoch, batch_size=batch_size, verbose=1, shuffle=False,callbacks=[early_stop])
    return model


def mean_absolute_percentage_error(y_true, y_pred):
    # 平均绝对百分比误差（MAPE）的计算
    y_true, y_pred = np.array(y_true), np.array(y_pred)
    return np.mean(np.abs((y_true - y_pred) / y_true)) * 100


# evaluate the RMSE for each forecast time step
def evaluate_forecasts(data, forecasts):

    rmse = sqrt(mean_squared_error(data, forecasts))
    mae = mean_absolute_error(data, forecasts)
    mape = mean_absolute_percentage_error(data, forecasts)
    r2 = r2_score(data, forecasts)
    print(' RMSE: %f' % rmse)
    print(' MAE: %f' % mae)
    print(' MAPE: %f' % mape)
    print(' R2: %f' % r2)

    return rmse,mae,mape,r2


# run a repeated experiment
def experiment(series, epochs, batch_size, n_neurons, test_percent, n_lag):
    # prepare data
    scaler, train, test = prepare_data(series, test_percent, n_lag)

    lstm_model = fit_lstm(train, n_lag, batch_size, epochs, n_neurons)

    # 基础、双向和堆叠
    train_x = train[:, 0:-1].reshape(len(train), 1, 1)
    test_x = test[:, 0:-1].reshape(len(test), 1, 1)

    train_y = train[:, 1:2]
    test_y = test[:, 1:2]

    y_train_pred = lstm_model.predict(train_x)
    y_test_pred = lstm_model.predict(test_x)

    y_train_true_lstm = invert_scale(scaler, train_y)
    y_test_true_lstm = invert_scale(scaler, test_y)
    y_train_pred_lstm = invert_scale(scaler, y_train_pred)
    y_test_pred_lstm = invert_scale(scaler, y_test_pred)

    print("训练模型的评估：" )
    evaluate_forecasts(y_train_true_lstm, y_train_pred_lstm)
    print("模型测试的评估：")
    evaluate_forecasts(y_test_true_lstm, y_test_pred_lstm)

    plt.figure()
    plt.plot(y_test_true_lstm, label='True', c="black")
    plt.plot(y_test_pred_lstm, label='LSTM', c="red")
    plt.title("Stacked LSTM's Prediction")
    plt.xlabel('Month')
    plt.ylabel('SPEI')
    plt.legend()
    plt.show();


if __name__ == '__main__':
    # 加载数据
    dataframe = read_csv('E:\lyf_ML_Drought\coding\ML_Drought_Prediction\indices_caculate\\result\ROW_SPEI\ROW_SPEI-12\SPEI-12_52533.txt', header=None, names=('TIME','SPEI-1'))
    dataframe = dataframe.set_index(['TIME'], drop=True) # 把日期作为索引

    n_lag = 1
    test_percent = 0.9
    n_batch = 50
    n_neurons = 50
    epochs = 3000
    experiment(dataframe, epochs, n_batch, n_neurons, test_percent, n_lag)
