from math import sqrt
import os
from numpy import concatenate
from pandas import read_csv
from pandas import DataFrame
from pandas import concat
from pandas import Series
from numpy import array
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import mean_squared_error
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from pandas import datetime
import numpy as np
import matplotlib.pyplot as plt
from keras.callbacks import EarlyStopping
from keras.layers import Bidirectional
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from pandas import read_csv
from pandas import read_csv
from matplotlib import pyplot


def parse(x):
    return datetime.strptime(x, '%Y %m')


def create_dataset(dataset, scale):
    train_size = int(len(dataset) * scale)
    # 将数据集分为两部分
    train, test = dataset[0:train_size], dataset[train_size:]
    return train, test

# 将数据转化为监督学习
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
    n_vars = 1 if type(data) is list else data.shape[1]
    df = DataFrame(data)
    cols, names = list(), list()
    # input sequence (t-n, ... t-1)
    for i in range(n_in, 0, -1):
        cols.append(df.shift(i))
        names += [('var%d(t-%d)' % (j + 1, i)) for j in range(n_vars)]
    # forecast sequence (t, t+1, ... t+n)
    for i in range(0, n_out):
        cols.append(df.shift(-i))
        if i == 0:
            names += [('var%d(t)' % (j + 1)) for j in range(n_vars)]
        else:
            names += [('var%d(t+%d)' % (j + 1, i)) for j in range(n_vars)]
    # put it all together
    agg = concat(cols, axis=1)
    agg.columns = names
    # drop rows with NaN values
    if dropnan:
        agg.dropna(inplace=True)
    return agg

# 将数据切分为训练X和预测y（多步监督）
def split_sequence(sequence, n_steps):
    X, y = list(), list()
    for i in range(len(sequence)):
        # 寻找n步后的数据
        end_ix = i + n_steps
        # 检查是否还在序列内
        if end_ix > len(sequence)-1:
            break
        # 划分X和y
        seq_x, seq_y = sequence[i:end_ix], sequence[end_ix]
        X.append(seq_x)
        y.append(seq_y)
    return array(X), array(y)

# 归一化
def scale(train, test):
    # 归一范围->[-1,1]
    scaler = MinMaxScaler(feature_range=(-1, 1))
    scaler = scaler.fit(train)
    # transform train
    train = train.reshape(train.shape[0], train.shape[1])
    train_scaled = scaler.transform(train)
    # transform test
    test = test.reshape(test.shape[0], test.shape[1])
    test_scaled = scaler.transform(test)
    return scaler, train_scaled, test_scaled

# 反归一化
def invert_scale(scaler, yhat):
    return scaler.inverse_transform(yhat)

# 数据预处理
def prepare_data_one(series, test_percent):
    raw_values = series.values

    # 确保数据都是浮点型
    raw_values = raw_values.astype('float32')

    # 将数据转化为监督数据,再把不需要的列删除(注意，这里删除的列在每次修改后都要手动修改)
    sv_values = series_to_supervised(raw_values, 1, 1)
    sv_values.drop(sv_values.columns[[5, 6, 7]], axis=1, inplace=True)

    # split into train and test sets
    train, test = create_dataset(sv_values.values, test_percent)

    # scale train and test data to [-1, 1]
    scaler, train, test = scale(train, test)

    return scaler, train, test

# fit an LSTM network to training data
# def fit_lstm(X, y,test_X, test_y, batch_size, nb_epoch, neurons):
#     # 模型定义(基础LSTM)
#     model = Sequential()
#     model.add(LSTM(n_neurons, activation='relu', input_shape=(X.shape[1], X.shape[2]), stateful=False))
#     model.add(Dense(1))
#     model.summary()
#     model.compile(optimizer='adam', loss='mae')
#     # 模型拟合
#     early_stop = EarlyStopping(monitor='loss', patience=10, verbose=1)
#     model.fit(X, y, epochs=epochs, batch_size=batch_size, verbose=0, shuffle=False, callbacks=[early_stop])
#     return model

def fit_lstm(train, batch_size, nb_epoch, neurons):
    X, y = train[:, 0:-1], train[:, -1]
    X = X.reshape(X.shape[0], 1, X.shape[1])
    model = Sequential()
    model.add(LSTM(neurons, batch_input_shape=(batch_size, X.shape[1], X.shape[2]), stateful=True)) # stateful = True为有状态网络，一般在序列之间有关系用
    model.add(Dense(1))
    model.compile(loss='mean_squared_error', optimizer='adam')
    for i in range(nb_epoch): # 在每个epoch结束再重置状态，不手动的话，一般是在每个batch就重置了
        # Batch：通过训练数据集中的样本子集，然后更新网络权值。一个周期是由一个或者多个批次组成的。
        # 下面是batch size一些常用的配置：
        # batch_size=1:在每个样本之后更新权重，并且该过程被称为随机梯度下降。
        # batch_size=32：通常的值是32，64和128，调整来符合预期的效率和模型更新速率。如果批大小不是一个周期中一个样本数量的因素，那么在周期的结尾，则在结束时运行剩余样本的一个额外的批的大小。
        # batch_size=n：其中，n是训练数据集的样本数。权重在每个周期中被更新，这个过程被称为批梯度下降。
        # batch_size为32的小批量梯度下降法是LSTM的一个常见配置。
        model.fit(X, y, epochs=1, batch_size=batch_size, verbose=0, shuffle=False) # shuffle=False为禁止洗牌
        model.reset_states()
    return model

def mean_absolute_percentage_error(y_true, y_pred):
    # 平均绝对百分比误差（MAPE）的计算
    y_true, y_pred = np.array(y_true), np.array(y_pred)
    return np.mean(np.abs((y_true - y_pred) / y_true)) * 100


# evaluate the RMSE for each forecast time step
def evaluate_forecasts(test, forecasts):
    rmse = sqrt(mean_squared_error(test, forecasts))
    mae = mean_absolute_error(test, forecasts)
    mape = mean_absolute_percentage_error(test, forecasts)
    r2 = r2_score(test, forecasts)
    print(' RMSE: %f' % rmse)
    print(' MAE: %f' % mae)
    print(' MAPE: %f' % mape)
    print(' R2: %f' % r2)
    fw.write(' RMSE:' + str(rmse) + '\n')
    fw.write(' MAE: ' + str(mae) + '\n')
    fw.write(' MAPE:' + str(mape) + '\n')
    fw.write(' R2: ' + str(r2) + '\n')

    return r2


# 步长不固定，随着新加入数据不断变长（这样相当于每次都新训练一个模型？）（难办到）
def experiment_three(series, epochs, batch_size, test_percent):
    # 转化为列表
    series_value = series.values.tolist()
    series_list =[]
    for value in series_value:
        series_list.append(value[0])

    # 数据准备（不带归一化）
    n_steps = int(test_percent*len(series_list))
    X, y = series_list[0:n_steps], series_list[n_steps:]
    # 再留出12个月的数据作为测试
    y_train, y_test = y[:-12], y[-12:]

    for i in range(len(y_train)):
        X_train = X
        # 每一次都加一个数据来训练
        for t in range(0,i):
            X_train = X_train.append(y_train[t])
        n_neurons = len(X_train) # 单元的个数为输入数据的个数
        # 重塑[samples, timesteps]=>[samples, timesteps, features]
        # n_features = 1
        # X_train = X_train.reshape((X_train.shape[0], X_train.shape[1], n_features))
        lstm_model = fit_lstm(X_train, 1, epochs, n_neurons) # batch_size必须为1（不知道为什么）


    # 预测
    y_prediction = list()
    for i in range(len(X_test)):
        x_input = X_test[i].reshape((1, n_steps, n_features))
        yhat = model.predict(x_input, verbose=0)[0]
        y_prediction.append(yhat)
        y_ture = y_test[i]
        print('>Predicted=%.5f, Expected=%.5f' % (yhat, y_ture))
    print("测试模型的评估：")
    evaluate_forecasts(y_test, y_prediction)

    fig = plt.figure()
    ax = fig.add_subplot(1, 1, 1)
    ax.plot(range(len(y_test)), y_test, c="black")
    ax.plot(range(len(y_prediction)), y_prediction, c="red")
    plt.title("LSTM's Prediction")
    ax.set_xlabel("Month")
    ax.set_ylabel("SPEI")
    ax.legend(['true', 'pred'])
    plt.show()


if __name__ == '__main__':
    # 加载数据
    dataset = read_csv(
        'E:\lyf_ML_Drought\coding\ML_Drought_Prediction\indices_caculate\\result\multi_spei_csv\SPEI-12\Multi_SPEI-12_56080.txt',
        header=0, parse_dates=[['year', 'month']], index_col=0, date_parser=parse)
    dataset.index.name = 'time'
    # 删掉一些列
    dataset.drop(columns=['average_air_pressure',
                          'average_water_air_pressure',
                          'low_temp', 'high_temp',
                          'precipitation',
                          'temperature',
                          'humidity'], inplace=True)
    # 显示数据的基本信息
    print(dataset.describe())

    # 线图
    dataset.plot()
    # 直方图
    dataset.hist()
    # 密度分布图
    dataset.plot(kind='kde')
    pyplot.show()

    test_percent = 0.9
    n_batch = [10,20,50,100]
    n_neurons =range(20,51)
    epochs = [3000,5000]

    root = os.path.abspath('result')  # 结果存放
    fp = os.path.join(root, '2020-07-20-' + '站点56080-'+ 'LSTM（1）' + '.txt')
    fw = open(fp, 'a+')
    times = 1
    for epoch in epochs:
        for batch in n_batch:
            for neurons in n_neurons:
                fw.write(' test' + str(times) + ' : ' + '\n')
                fw.write(' epochs :' + str(epoch) + '\n')
                fw.write(' batch :' + str(batch) + '\n')
                fw.write(' neuron :' + str(neurons) + '\n')
                experiment_two(dataset, epoch, batch, neurons, test_percent)
                times = times + 1



