from keras.layers import LSTM, Dense, Dropout
from numpy import concatenate
from pandas import read_csv, DataFrame
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from sklearn.preprocessing import MinMaxScaler
from tensorflow.python.keras.callbacks import EarlyStopping
from tensorflow.python.keras.models import Sequential
import numpy as np
from tensorflow.python.keras.utils.np_utils import to_categorical
import keras

import util
from util import TIME_STEP, series_to_supervised, draw_data

def no_attention():
    # 切换19年和20年数据只需要改下面两行
    multi_dataset = read_csv('./data/2019allday.csv', header=0, index_col=0)
    day_num = 26  # 数据包含的天数
    # multi_dataset = read_csv('./data/多源数据总表.csv', header=0, index_col=None)
    # day_num = 31  # 数据包含的天数

    dataset = DataFrame()
    # 取in_card_flow(流出机场客流)、实际降落载客数 arr_ALDT_passenger、时段、天气作为参数，预测in_card_flow
    # dataset['in_flow'] = multi_dataset['in_flow']
    dataset['out_flow'] = multi_dataset['out_flow']
    # dataset['arr_ALDT_passenger'] = multi_dataset['arr_ALDT_passenger']
    dataset['arr_SIBT_passenger'] = multi_dataset['arr_SIBT_passenger']
    dataset['dep_ATOT_passenger'] = multi_dataset['dep_ATOT_passenger']
    dataset['dep_SOBT_passenger'] = multi_dataset['dep_SOBT_passenger']

    dataset['hour'] = multi_dataset['hour']
    dataset['weather'] = multi_dataset['weather']
    dataset['workday'] = multi_dataset['workday']
    # 对hour、weather、workday进行one-hot编码
    decode = to_categorical(dataset['hour'])

    # flight = DataFrame()
    # flight['arr_ALDT_passenger'] = multi_dataset['arr_ALDT_passenger']
    # dataset['arr_ALDT_passenger_t-10'] = flight.shift(10)['arr_ALDT_passenger']
    # dataset['arr_ALDT_passenger_t-2'] = flight.shift(2)['arr_ALDT_passenger']
    # dataset['arr_ALDT_passenger_t-1'] = flight.shift(1)['arr_ALDT_passenger']
    # 将NAN替换为0
    dataset.fillna(0, inplace=True)
    # 计算差分
    # dataset = dataset.diff()
    # dataset.dropna(inplace=True)

    PARAMETER_NUM = dataset.shape[1]  # 使用的数据种类数
    values = dataset.values
    # ensure all data is float
    values = values.astype('float32')

    # 归一化特征
    scaler = MinMaxScaler(feature_range=(0, 1))
    values = scaler.fit_transform(values)
    """
    将客流数据每天客流量拉到统一水平（同平均值算法一样的预处理）

    flow_by_day = []
    DAY_NUM = 31
    values = np.array(values).reshape(DAY_NUM, -1)
    for one_day_data in values:
        sum = np.sum(one_day_data)
        flow_by_day.append(sum)
    # 计算每一天客流相对第31的比例
    rate = []
    for flow in flow_by_day:
        rate.append(flow / flow_by_day[-1])
    # 将每一天的时间片客流除以对应的比例值
    for i in range(DAY_NUM):
        values[i] = np.divide(values[i], rate[i])
    # 拉平之后再变回原来的格式
    values = np.array(values).reshape(-1, 1)
    """
    # 构建监督学习问题
    reframed = series_to_supervised(values, TIME_STEP, 1)  # 6步预测下一步
    # 丢弃我们并不想预测的列
    reframed.drop(reframed.columns[[-1, -2, -3, -4, -5, -6]], axis=1, inplace=True)


    # 分割为训练集和测试集
    values = reframed.values
    n_train_time_slice = (day_num - 1) * 100
    train = values[:n_train_time_slice, :]
    test = values[n_train_time_slice:, :]
    # 分为输入输出
    train_X, train_y = train[:, :-1], train[:, -1]
    test_X, test_y = test[:, :-1], test[:, -1]
    # 重塑成3D形状 [样例, 时间步, 特征]
    train_X = train_X.reshape((train_X.shape[0], TIME_STEP, int(train_X.shape[1] / TIME_STEP)))
    test_X = test_X.reshape((test_X.shape[0], TIME_STEP, int(test_X.shape[1] / TIME_STEP)))

    # 设计网络
    model = Sequential()
    # model.add(LSTM(20, input_shape=(train_X.shape[1], train_X.shape[2])))
    model.add(keras.layers.Bidirectional(
        keras.layers.LSTM(units=util.UNITS_NUM, input_shape=(train_X.shape[1], train_X.shape[2]),activation='sigmoid',return_sequences=True)))
    model.add(keras.layers.Bidirectional(
        keras.layers.LSTM(units=util.UNITS_NUM, input_shape=(train_X.shape[1], train_X.shape[2]),
                          activation='sigmoid')))
    model.add(Dropout(0.1))
    model.add(Dense(1))
    model.compile(loss='mse', optimizer='adam')
    # model.summary()
    # 拟合神经网络模型
    early_stopping = EarlyStopping(monitor='val_loss', patience=20, verbose=2)
    # model.fit(train_X, train_y, epochs=1000, batch_size=128, validation_data=(test_X, test_y),
    #           callbacks=[early_stopping], verbose=2,
    #           shuffle=False)
    model.fit(train_X, train_y, epochs=300, batch_size=128, verbose=2, shuffle=False)


    yhat = model.predict(test_X, verbose=1)
    yhat = np.array(yhat).reshape(yhat.shape[0], -1)
    # 这一部分是为了得到用lstm+attention模型对训练集和测试集分别进行预测
    # train_hat = model.predict(train_X, verbose=1)
    # train_hat = np.array(train_hat).reshape(train_hat.shape[0], -1)
    #
    # temp_test_X = test_X.reshape((test_X.shape[0], -1))
    # inv_yhat = concatenate((yhat, temp_test_X[:, 1:PARAMETER_NUM]), axis=1)
    # inv_yhat = scaler.inverse_transform(inv_yhat)
    # inv_yhat = inv_yhat[:, 0]
    #
    # temp_train_X = train_X.reshape((train_X.shape[0], -1))
    # inv_train_hat = concatenate((train_hat, temp_train_X[:, 1:PARAMETER_NUM]), axis=1)
    # inv_train_hat = scaler.inverse_transform(inv_train_hat)
    # inv_train_hat = inv_train_hat[:, 0]
    # lss = 1



    # 做出预测
    yhat = model.predict(test_X)
    # 反向转换预测值比例
    test_X = test_X.reshape((test_X.shape[0], -1))
    inv_yhat = concatenate((yhat, test_X[:, 1:PARAMETER_NUM]), axis=1)
    inv_yhat = scaler.inverse_transform(inv_yhat)
    inv_yhat = inv_yhat[:, 0]
    # 反向转换实际值比例
    test_y = test_y.reshape((len(test_y), 1))
    inv_y = concatenate((test_y, test_X[:, 1:PARAMETER_NUM]), axis=1)
    inv_y = scaler.inverse_transform(inv_y)
    inv_y = inv_y[:, 0]
    np.save("test",inv_y)
    np.save("lstm", inv_yhat)
    # 计算RMSE
    # 验证lstm滞后性
    # shift_yhat = inv_yhat[1:]
    # shift_yhat = np.append(shift_yhat, 0)

    mse = mean_squared_error(inv_y, inv_yhat)
    mae = mean_absolute_error(inv_y, inv_yhat)
    r2 = r2_score(inv_y, inv_yhat)
    out = inv_yhat.tolist()
    lss = 1
    return mse,mae,r2
    # print('Test MSE: %.3f' % mse)
    # print('Test MAE: %.3f' % mae)
    # pic_path = "./picture/lstm.png"
    # pic_title = "Bi-LSTM\n MSE=%.3f" % mse
    # draw_data(inv_yhat, inv_y, pic_path, pic_title)


if __name__ == '__main__':
    # mse = 0
    # mae = 0
    # for i in range(10):
    #     mse = mse + no_attention()[0]
    #     mae = mae + no_attention()[1]
    # print('Test MSE: %.3f, mae:%.3f' %(mse, mae))

    f_mse = 0
    f_mae = 0
    f_r2 = 0
    N = 1
    for i in range(N):
        mse, mae, r2 = no_attention()
        f_mse = f_mse + mse
        f_mae = f_mae + mae
        f_r2 = f_r2 + r2
    print(f_mse / N, f_mae / N, f_r2 / N)
    # 研究神经元数的效果
    # mses = np.zeros(7)
    # maes = np.zeros(7)
    # r2s = np.zeros(7)
    #
    # units = [30, 40, 60, 70, 80, 90, 100]
    # for n in range(10):
    #     for i in range(7):
    #         util.UNITS_NUM = units[i]
    #         mse, mae, r2 = no_attention()
    #         mses[i] = mses[i] + mse
    #         maes[i] = maes[i] + mae
    #         r2s[i] = r2s[i] + r2
    #
    # mses = np.array(mses) / 10
    # maes = np.array(maes) / 10
    # r2s = np.array(r2s) / 10
    # print("mse:")
    # print(mses)
    # print("mae:")
    # print(maes)
    # print("r2:")
    # print(r2s)

