from math import sqrt
import os
from numpy import concatenate
from pandas import read_csv
from pandas import DataFrame
from pandas import concat
from pandas import Series
from numpy import array
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import mean_squared_error
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from pandas import datetime
import numpy as np
import matplotlib.pyplot as plt
from keras.callbacks import EarlyStopping
from keras.layers import Bidirectional
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from pandas import read_csv
from pandas import read_csv
from matplotlib import pyplot


def parse(x):
    return datetime.strptime(x, '%Y %m')


def create_dataset(dataset, scale):
    train_size = int(len(dataset) * scale)
    # 将数据集分为两部分
    train, test = dataset[0:train_size], dataset[train_size:]
    return train, test

# 将数据转化为监督学习
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
    n_vars = 1 if type(data) is list else data.shape[1]
    df = DataFrame(data)
    cols, names = list(), list()
    # input sequence (t-n, ... t-1)
    for i in range(n_in, 0, -1):
        cols.append(df.shift(i))
        names += [('var%d(t-%d)' % (j + 1, i)) for j in range(n_vars)]
    # forecast sequence (t, t+1, ... t+n)
    for i in range(0, n_out):
        cols.append(df.shift(-i))
        if i == 0:
            names += [('var%d(t)' % (j + 1)) for j in range(n_vars)]
        else:
            names += [('var%d(t+%d)' % (j + 1, i)) for j in range(n_vars)]
    # put it all together
    agg = concat(cols, axis=1)
    agg.columns = names
    # drop rows with NaN values
    if dropnan:
        agg.dropna(inplace=True)
    return agg

# 将数据切分为训练X和预测y（多步监督）
def split_sequence(sequence, n_steps):
    X, y = list(), list()
    for i in range(len(sequence)):
        # 寻找n步后的数据
        end_ix = i + n_steps
        # 检查是否还在序列内
        if end_ix > len(sequence)-1:
            break
        # 划分X和y
        seq_x, seq_y = sequence[i:end_ix], sequence[end_ix]
        X.append(seq_x)
        y.append(seq_y)
    return array(X), array(y)

# 归一化
def scale(train, test):
    # 归一范围->[-1,1]
    scaler = MinMaxScaler(feature_range=(-1, 1))
    scaler = scaler.fit(train)
    # transform train
    train = train.reshape(train.shape[0], train.shape[1])
    train_scaled = scaler.transform(train)
    # transform test
    test = test.reshape(test.shape[0], test.shape[1])
    test_scaled = scaler.transform(test)
    return scaler, train_scaled, test_scaled

# 反归一化
def invert_scale(scaler, yhat):
    return scaler.inverse_transform(yhat)

# 数据预处理
def prepare_data_one(series, test_percent):
    raw_values = series.values

    # 确保数据都是浮点型
    raw_values = raw_values.astype('float32')

    # 将数据转化为监督数据,再把不需要的列删除(注意，这里删除的列在每次修改后都要手动修改)
    sv_values = series_to_supervised(raw_values, 1, 1)
    sv_values.drop(sv_values.columns[[5, 6, 7]], axis=1, inplace=True)

    # split into train and test sets
    train, test = create_dataset(sv_values.values, test_percent)

    # scale train and test data to [-1, 1]
    scaler, train, test = scale(train, test)

    return scaler, train, test

# fit an LSTM network to training data
def fit_lstm(X, y,test_X, test_y, batch_size, nb_epoch, neurons):
    model = Sequential()
    # 双向LSTM
    model.add(Bidirectional(LSTM(neurons, activation='relu'), input_shape=(X.shape[1], X.shape[2])))
    model.add(Dense(1))
    model.summary()
    model.compile(loss='mae', optimizer='adam')
    # 早停
    early_stop = EarlyStopping(monitor='loss', patience=10, verbose=1)
    # 不带验证，不带早停
    # model.fit(X, y, epochs=nb_epoch, batch_size=batch_size, verbose=1, shuffle=False)
    # 不带验证
    model.fit(X, y, epochs=nb_epoch, batch_size=batch_size, verbose=1, shuffle=False, callbacks=[early_stop])
    # 都有
    # model.fit(X, y, epochs=nb_epoch, batch_size=batch_size, verbose=1, shuffle=False, validation_data=[test_X, test_y], callbacks=[early_stop])

    return model

def mean_absolute_percentage_error(y_true, y_pred):
    # 平均绝对百分比误差（MAPE）的计算
    y_true, y_pred = np.array(y_true), np.array(y_pred)
    return np.mean(np.abs((y_true - y_pred) / y_true)) * 100


# evaluate the RMSE for each forecast time step
def evaluate_forecasts(test, forecasts):
    rmse = sqrt(mean_squared_error(test, forecasts))
    mae = mean_absolute_error(test, forecasts)
    mape = mean_absolute_percentage_error(test, forecasts)
    r2 = r2_score(test, forecasts)
    print(' RMSE: %f' % rmse)
    print(' MAE: %f' % mae)
    print(' MAPE: %f' % mape)
    print(' R2: %f' % r2)
    fw.write(' RMSE: %f' % rmse + '\n')
    fw.write(' MAE: %f' % mae + '\n')
    fw.write(' MAPE: %f' % mape + '\n')
    fw.write(' R2: %f' % r2 + '\n')

    return r2


def experiment_one(series, epochs, batch_size, n_neurons, test_percent):
    scaler, train, test = prepare_data_one(series, test_percent)

    # split into input and outputs
    train_X, train_y = train[:, :-1], train[:, -1]
    test_X, test_y = test[:, :-1], test[:, -1]

    # reshape input to be 3D [samples, timesteps, features]
    train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
    test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))

    model = fit_lstm(train_X, train_y,test_X,test_y, batch_size, epochs, n_neurons)

    # make a prediction
    train_pred = model.predict(train_X)
    test_pred = model.predict(test_X)
    train_X = train_X.reshape((train_X.shape[0], train_X.shape[2]))
    test_X = test_X.reshape((test_X.shape[0], test_X.shape[2]))

    # invert scaling for forecast
    train_pred_invert = concatenate((train_X, train_pred), axis=1)
    train_pred_invert = invert_scale(scaler, train_pred_invert)
    train_pred_invert = train_pred_invert[:, -1]

    test_pred_invert = concatenate((test_X, test_pred), axis=1)
    test_pred_invert = invert_scale(scaler, test_pred_invert)
    test_pred_invert = test_pred_invert[:, -1]

    # invert scaling for actual
    train_true = train_y.reshape((len(train_y), 1))
    train_true_invert = concatenate((train_X, train_true), axis=1)
    train_true_invert = invert_scale(scaler, train_true_invert)
    train_true_invert = train_true_invert[:, -1]

    test_true = test_y.reshape((len(test_y), 1))
    test_true_invert = concatenate((test_X, test_true), axis=1)
    test_true_invert = invert_scale(scaler, test_true_invert)
    test_true_invert = test_true_invert[:, -1]

    print("训练模型的评估：")
    evaluate_forecasts(train_true_invert, train_pred_invert)
    print("测试模型的评估：")
    evaluate_forecasts(test_true_invert, test_pred_invert)

    fig = plt.figure()
    ax = fig.add_subplot(1, 1, 1)
    ax.plot(range(len(test_true_invert)), test_true_invert, c="black")
    ax.plot(range(len(test_pred_invert)), test_pred_invert, c="red")
    plt.title("Bidirectional LSTM's Prediction")
    ax.set_xlabel("Month")
    ax.set_ylabel("SPEI")
    ax.legend(['true', 'pred'])
    plt.show()

def experiment_two(series, epochs, batch_size, n_neurons, test_percent):
    # 转化为列表
    series_value = series.values.tolist()
    series_list =[]
    for value in series_value:
        series_list.append(value[0])

    # 数据准备（不带归一化）
    steps = int(len(series_list)*0.9)
    X, y = split_sequence(series_list, steps)
    X_train, X_test = X[:-12], X[-12:]
    y_train, y_test = y[:-12], y[-12:]

    # # split into input and outputs
    # train_X, train_y = train[:, :-1], train[:, -1]
    # test_X, test_y = test[:, :-1], test[:, -1]
    #
    # # reshape input to be 3D [samples, timesteps, features]
    # train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
    # test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
    #
    # model = fit_lstm(train_X, train_y,test_X,test_y, batch_size, epochs, n_neurons)
    #
    # # make a prediction
    # train_pred = model.predict(train_X)
    # test_pred = model.predict(test_X)
    # train_X = train_X.reshape((train_X.shape[0], train_X.shape[2]))
    # test_X = test_X.reshape((test_X.shape[0], test_X.shape[2]))
    #
    # # invert scaling for forecast
    # train_pred_invert = concatenate((train_X, train_pred), axis=1)
    # train_pred_invert = invert_scale(scaler, train_pred_invert)
    # train_pred_invert = train_pred_invert[:, -1]
    #
    # test_pred_invert = concatenate((test_X, test_pred), axis=1)
    # test_pred_invert = invert_scale(scaler, test_pred_invert)
    # test_pred_invert = test_pred_invert[:, -1]
    #
    # # invert scaling for actual
    # train_true = train_y.reshape((len(train_y), 1))
    # train_true_invert = concatenate((train_X, train_true), axis=1)
    # train_true_invert = invert_scale(scaler, train_true_invert)
    # train_true_invert = train_true_invert[:, -1]
    #
    # test_true = test_y.reshape((len(test_y), 1))
    # test_true_invert = concatenate((test_X, test_true), axis=1)
    # test_true_invert = invert_scale(scaler, test_true_invert)
    # test_true_invert = test_true_invert[:, -1]
    #
    # print("训练模型的评估：")
    # evaluate_forecasts(train_true_invert, train_pred_invert)
    # print("测试模型的评估：")
    # evaluate_forecasts(test_true_invert, test_pred_invert)
    #
    # fig = plt.figure()
    # ax = fig.add_subplot(1, 1, 1)
    # ax.plot(range(len(test_true_invert)), test_true_invert, c="black")
    # ax.plot(range(len(test_pred_invert)), test_pred_invert, c="red")
    # plt.title("Bidirectional LSTM's Prediction")
    # ax.set_xlabel("Month")
    # ax.set_ylabel("SPEI")
    # ax.legend(['true', 'pred'])
    # plt.show()

if __name__ == '__main__':
    # 加载数据
    dataset = read_csv(
        'E:\lyf_ML_Drought\coding\ML_Drought_Prediction\indices_caculate\\result\multi_spei_csv\SPEI-12\Multi_SPEI-12_56080.txt',
        header=0, parse_dates=[['year', 'month']], index_col=0, date_parser=parse)
    dataset.index.name = 'time'
    # 删掉一些列
    dataset.drop(columns=['average_air_pressure',
                          'average_water_air_pressure',
                          'low_temp', 'high_temp',
                          'precipitation',
                          'temperature',
                          'humidity'], inplace=True)
    # 显示数据的基本信息
    print(dataset.describe())

    # 线图
    dataset.plot()
    # 直方图
    dataset.hist()
    # 密度分布图
    dataset.plot(kind='kde')
    pyplot.show()

    test_percent = 0.9
    n_batch = [10,20,50,100]
    n_neurons =range(20,51)
    epochs = [3000,5000,8000]

    root = os.path.abspath('result')  # 结果存放
    fp = os.path.join(root, '2020-07-18-' + '站点56080'+ 'BiLSTM' + '.txt')
    fw = open(fp, 'a+')
    times = 1
    for epoch in epochs:
        for batch in n_batch:
            for neurons in n_neurons:
                fw.write(' test' + str(times) + ' : ' +  '\n')
                fw.write(' epochs :' + str(epoch) + '\n')
                fw.write(' batch :' + str(batch) + '\n')
                fw.write(' neuron :' + str(neurons) + '\n')
                experiment_two(dataset, epochs, n_batch, n_neurons, test_percent)
                times = times + 1



