"""
多变量lstm预测，还没有修改，不可用
"""
from math import sqrt
from numpy import concatenate
from pandas import read_csv
from pandas import DataFrame
from pandas import concat
from pandas import Series
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import mean_squared_error
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from pandas import datetime
import numpy as np
import matplotlib.pyplot as plt
from keras.callbacks import EarlyStopping
from keras.layers import Bidirectional
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score


def parse(x):
    return datetime.strptime(x, '%Y %m')


def create_dataset(dataset, scale):
    train_size = int(len(dataset) * scale)
    # 将数据集分为两部分
    train, test = dataset[0:train_size], dataset[train_size:]
    return train, test


# convert series to supervised learning
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
    n_vars = 1 if type(data) is list else data.shape[1]
    df = DataFrame(data)
    cols, names = list(), list()
    # input sequence (t-n, ... t-1)
    for i in range(n_in, 0, -1):
        cols.append(df.shift(i))
        names += [('var%d(t-%d)' % (j + 1, i)) for j in range(n_vars)]
    # forecast sequence (t, t+1, ... t+n)
    for i in range(0, n_out):
        cols.append(df.shift(-i))
        if i == 0:
            names += [('var%d(t)' % (j + 1)) for j in range(n_vars)]
        else:
            names += [('var%d(t+%d)' % (j + 1, i)) for j in range(n_vars)]
    # put it all together
    agg = concat(cols, axis=1)
    agg.columns = names
    # drop rows with NaN values
    if dropnan:
        agg.dropna(inplace=True)
    return agg


def scale(train, test):
    # fit scaler
    scaler = MinMaxScaler(feature_range=(-1, 1))
    scaler = scaler.fit(train)
    # transform train
    train = train.reshape(train.shape[0], train.shape[1])
    train_scaled = scaler.transform(train)
    # transform test
    test = test.reshape(test.shape[0], test.shape[1])
    test_scaled = scaler.transform(test)
    return scaler, train_scaled, test_scaled


# inverse scaling for a forecasted value
def invert_scale(scaler, yhat):
    return scaler.inverse_transform(yhat)


# transform series into train and test sets for supervised learning
def prepare_data(series, test_percent):
    raw_values = series.values

    # 确保数据都是浮点型
    raw_values = raw_values.astype('float32')

    # 将数据转化为监督数据,再把不需要的列删除(注意，这里删除的列在每次修改后都要手动修改)
    sv_values = series_to_supervised(raw_values, 1, 1)
    sv_values.drop(sv_values.columns[[5, 6, 7]], axis=1, inplace=True)

    # split into train and test sets
    train, test = create_dataset(sv_values.values, test_percent)

    # scale train and test data to [-1, 1]
    scaler, train, test = scale(train, test)

    return scaler, train, test

# fit an LSTM network to training data
def fit_lstm(X, y, test_X, test_y, batch_size, nb_epoch, neurons):
    model = Sequential()
    # 基础LSTM
    model.add(LSTM(neurons, activation='relu', input_shape=(X.shape[1], X.shape[2]), stateful=False))
    model.add(Dense(1))
    model.summary()
    model.compile(loss='mae', optimizer='adam')
    early_stop = EarlyStopping(monitor='loss', patience=10, verbose=1)
    model.fit(X, y, epochs=nb_epoch, batch_size=batch_size,  verbose=1, validation_data=[test_X, test_y], callbacks=[early_stop], shuffle=False)
    # model.fit(X, y, epochs=nb_epoch, batch_size=batch_size, verbose=1, shuffle=False, callbacks=[early_stop])

    return model


def mean_absolute_percentage_error(y_true, y_pred):
    # 平均绝对百分比误差（MAPE）的计算
    y_true, y_pred = np.array(y_true), np.array(y_pred)
    return np.mean(np.abs((y_true - y_pred) / y_true)) * 100


# evaluate the RMSE for each forecast time step
def evaluate_forecasts(test, forecasts):
    rmse = sqrt(mean_squared_error(test, forecasts))
    mae = mean_absolute_error(test, forecasts)
    mape = mean_absolute_percentage_error(test, forecasts)
    r2 = r2_score(test, forecasts)
    print(' RMSE: %f' % rmse)
    print(' MAE: %f' % mae)
    print(' MAPE: %f' % mape)
    print(' R2: %f' % r2)

    return r2

def experiment(series, epochs, batch_size, n_neurons, test_percent):
    scaler, train, test = prepare_data(series, test_percent)

    # split into input and outputs
    train_X, train_y = train[:, :-1], train[:, -1]
    test_X, test_y = test[:, :-1], test[:, -1]

    # reshape input to be 3D [samples, timesteps, features]
    train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
    test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))

    model = fit_lstm(train_X, train_y, test_X, test_y, batch_size, epochs, n_neurons)

    # make a prediction
    train_pred = model.predict(train_X)
    test_pred = model.predict(test_X)
    train_X = train_X.reshape((train_X.shape[0], train_X.shape[2]))
    test_X = test_X.reshape((test_X.shape[0], test_X.shape[2]))

    # invert scaling for forecast
    train_pred_invert = concatenate((train_X,train_pred), axis=1)
    train_pred_invert = invert_scale(scaler, train_pred_invert)
    train_pred_invert = train_pred_invert[:, -1]

    test_pred_invert = concatenate((test_X, test_pred), axis=1)
    test_pred_invert = invert_scale(scaler, test_pred_invert)
    test_pred_invert = test_pred_invert[:, -1]

    # invert scaling for actual
    train_true = train_y.reshape((len(train_y), 1))
    train_true_invert = concatenate((train_X, train_true), axis=1)
    train_true_invert = invert_scale(scaler, train_true_invert)
    train_true_invert = train_true_invert[:, -1]

    test_true = test_y.reshape((len(test_y), 1))
    test_true_invert = concatenate((test_X, test_true), axis=1)
    test_true_invert = invert_scale(scaler, test_true_invert)
    test_true_invert = test_true_invert[:, -1]

    print("训练模型的评估：")
    evaluate_forecasts(train_true_invert, train_pred_invert)
    print("模型测试的评估：")
    evaluate_forecasts(test_true_invert, test_pred_invert)

    fig = plt.figure()
    ax = fig.add_subplot(1, 1, 1)
    ax.plot(range(len(test_true_invert)), test_true_invert, c="black")
    ax.plot(range(len(test_pred_invert)), test_pred_invert, c="red")
    plt.title("Vanilla LSTM's Prediction")
    ax.set_xlabel("Month")
    ax.set_ylabel("SPEI")
    ax.legend(['true', 'pred'])
    plt.show()


if __name__ == '__main__':

    # 加载数据
    dataset = read_csv(
        'E:\lyf_ML_Drought\coding\ML_Drought_Prediction\indices_caculate\\result\multi_spei_csv\SPEI-12\Multi_SPEI-12_52533.txt',
        header=0, parse_dates=[['year', 'month']], index_col=0, date_parser=parse)
    dataset.index.name = 'time'
    dataset.drop(columns=['average_air_pressure', 'average_water_air_pressure', 'low_temp', 'high_temp'], inplace=True)

    test_percent = 0.9
    n_batch = 50
    n_neurons = 40
    epochs = 3000
    experiment(dataset, epochs, n_batch, n_neurons, test_percent)