from numpy import concatenate
from pandas import read_csv, DataFrame
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from sklearn.preprocessing import MinMaxScaler
from tensorflow.python.keras import Input
import keras
from keras import backend as K
import tensorflow as tf
from tensorflow.python.keras.callbacks import EarlyStopping
import numpy as np
from tensorflow.python.keras.models import load_model

from my_self_attention import My_Self_Attention
from keras_self_attention import SeqSelfAttention
from keras_self_attention import SeqWeightedAttention

from util import series_to_supervised, TIME_STEP, draw_data, UNITS_NUM
from keras.layers import LSTM, RepeatVector, Dense, \
    Activation, Add, Reshape, Input, Lambda, Multiply, Concatenate, Dot

def one_encoder_attention_step(X, t):
    # 计算第t个时间片加入注意力后的数据
    # 扩展维度
    # timeslice_add_SA = tf.expand_dims(timeslice_add_SA,1)
    X_add_SA = SA_array[t](X)
    # 提取t时间片计算出的结果
    timeslice_add_SA = X_add_SA[:,t:t+1,:]

    # 维度变换，保留none维，就用-1代替
    # timeslice_add_SA = tf.reshape(timeslice_add_SA, (-1,1,7))
    return timeslice_add_SA


def encoder_attention(T, X):
    # X_SA是经过注意力机制后的时间序列
    X_SA = []
    for t in range(T):
        context = one_encoder_attention_step(X, t)  # (none,1,n)
        X_SA.append(context)
    X_SA = tf.concat(X_SA, axis = 1)
    return X_SA

def get_model(PARAMETER_NUM):
    inputs = Input(shape=(T, PARAMETER_NUM))  # 输入时间序列数据
    # X_SA = encoder_attention(T, X)  # 加权后的X [T*n]
    # lstm = keras.layers.Bidirectional(keras.layers.LSTM(units=UNITS_NUM,
    #                                                     return_sequences=True))(inputs)
    # lstm2 = keras.layers.Bidirectional(keras.layers.LSTM(units=UNITS_NUM, return_sequences=True))(
    #     lstm)
    lstm = LSTM(UNITS_NUM, return_sequences=True, activation='sigmoid')(inputs)
    # lstm2 = LSTM(UNITS_NUM, return_sequences=True, activation='sigmoid')(lstm)
    att = encoder_attention(T, lstm)

    # X_SA = Reshape((T, PARAMETER_NUM))(X_SA)  # 设置为 T*n
    # h_en_all = LSTM(m, return_sequences=True)(X_SA)  # 将经过编码器加权后的输入X_放入LSTM中，返回T个大小为m的隐含层
    # h_en_all = Reshape((T, -1))(h_en_all)  # 确保有T组数据，-1意味着自动计算，其实值一定为m ，代码修改点
    output = Dense(1)(att)
    output = Reshape((-1,))(output)
    output = Dense(1)(output)  # 最后预测，代码修改点

    model = keras.models.Model(inputs=inputs, outputs=output)
    return model


def get_mse_with_attention_without_diff():

# self_attention数组，用来记录每个时间片用的self-atten模型

    multi_dataset = read_csv('./data/2019allday.csv', header=0, index_col=0)
    day_num = 26  # 数据包含的天数
    # multi_dataset = read_csv('./data/多源数据总表.csv', header=0, index_col=None)
    # day_num = 31  # 数据包含的天数

    dataset = DataFrame()
    # 取in_card_flow(流出机场客流)、实际降落载客数 arr_ALDT_passenger、时段、天气作为参数，预测in_card_flow
    dataset['in_flow'] = multi_dataset['in_flow']
    # dataset['out_flow'] = multi_dataset['out_flow']
    dataset['arr_ALDT_passenger'] = multi_dataset['arr_ALDT_passenger']
    # dataset['arr_SIBT_passenger'] = multi_dataset['arr_SIBT_passenger']
    dataset['dep_ATOT_passenger'] = multi_dataset['dep_ATOT_passenger']
    dataset['dep_SOBT_passenger'] = multi_dataset['dep_SOBT_passenger']

    dataset['hour'] = multi_dataset['hour']
    dataset['weather'] = multi_dataset['weather']
    dataset['workday'] = multi_dataset['workday']
    dataset.fillna(0, inplace=True)

    PARAMETER_NUM = dataset.shape[1]  # 使用的数据种类数
    values = dataset.values
    # ensure all data is float
    values = values.astype('float32')

    # 归一化特征
    scaler = MinMaxScaler(feature_range=(0, 1))
    values = scaler.fit_transform(values)

    # mean = values.mean(axis=0)
    # values -= mean
    # std = values.std(axis=0)
    # values /= std

    # 构建监督学习问题
    reframed = series_to_supervised(values, TIME_STEP, 1)  # 6步预测下一步
    # 丢弃我们并不想预测的列
    reframed.drop(reframed.columns[[-1, -2, -3, -4, -5, -6]], axis=1, inplace=True)
    print(reframed.head())

    # 分割为训练集和测试集
    values = reframed.values
    n_train_time_slice = (day_num - 1) * 100
    train = values[:600, :]
    test = values[n_train_time_slice:, :]
    # 分为输入输出
    train_X, train_y = train[:, :-1], train[:, -1]
    test_X, test_y = test[:, :-1], test[:, -1]
    # 重塑成3D形状 [样例, 时间步, 特征]
    train_X = train_X.reshape((train_X.shape[0], TIME_STEP, int(train_X.shape[1] / TIME_STEP)))
    test_X = test_X.reshape((test_X.shape[0], TIME_STEP, int(test_X.shape[1] / TIME_STEP)))



    model = get_model(PARAMETER_NUM)
    model.compile(loss='mae', optimizer='adam')
    model.summary()

    history = model.fit(train_X, train_y, epochs=epochs, batch_size=batch_size, verbose=2,
                        shuffle=False)


    yhat = model.predict(test_X, verbose=1)
    yhat = np.array(yhat).reshape(yhat.shape[0],-1)
    # 反向转换预测值比例
    temp_test_X = test_X.reshape((test_X.shape[0], -1))
    inv_yhat = concatenate((yhat, temp_test_X[:, 1:PARAMETER_NUM]), axis=1)
    inv_yhat = scaler.inverse_transform(inv_yhat)
    inv_yhat = inv_yhat[:, 0]
    # 反向转换实际值比例
    test_y = test_y.reshape((len(test_y), 1))
    inv_y = concatenate((test_y, temp_test_X[:, 1:PARAMETER_NUM]), axis=1)
    inv_y = scaler.inverse_transform(inv_y)
    inv_y = inv_y[:, 0]

    np.save("./result/out/test", inv_y)
    # 验证滞后性
    # shift_yhat = inv_yhat[1:]
    # shift_yhat = np.append(shift_yhat, 0)


    mse = mean_squared_error(inv_y, inv_yhat)
    mae = mean_absolute_error(inv_y, inv_yhat)
    r2 = r2_score(inv_y, inv_yhat)
    print('Test MSE: %.3f' % mse)
    print('Test MAE: %.3f' % mae)
    print('Test R2: %.3f' % r2)
    pic_path = "./picture/in/time-feature-attention-without-diff.png"
    pic_title = "time-feature-attention-without-diff\n MSE=%.3f" % mse
    draw_data(inv_yhat, inv_y, pic_path, pic_title)

    return mse, mae, r2, inv_yhat




def get_mse_with_attention_with_diff():

# self_attention数组，用来记录每个时间片用的self-atten模型

    multi_dataset = read_csv('./data/2019allday.csv', header=0, index_col=0)
    day_num = 26  # 数据包含的天数
    # multi_dataset = read_csv('./data/多源数据总表.csv', header=0, index_col=None)
    # day_num = 31  # 数据包含的天数

    dataset = DataFrame()
    # 取in_card_flow(流出机场客流)、实际降落载客数 arr_ALDT_passenger、时段、天气作为参数，预测in_card_flow
    # dataset['in_flow'] = multi_dataset['in_flow']
    dataset['out_flow'] = multi_dataset['out_flow']
    # dataset['arr_ALDT_passenger'] = multi_dataset['arr_ALDT_passenger']
    dataset['arr_SIBT_passenger'] = multi_dataset['arr_SIBT_passenger']
    dataset['dep_ATOT_passenger'] = multi_dataset['dep_ATOT_passenger']
    dataset['dep_SOBT_passenger'] = multi_dataset['dep_SOBT_passenger']

    dataset['hour'] = multi_dataset['hour']
    dataset['weather'] = multi_dataset['weather']
    dataset['workday'] = multi_dataset['workday']
    dataset.fillna(0, inplace=True)

    PARAMETER_NUM = dataset.shape[1]  # 使用的数据种类数
    values = dataset.values
    # ensure all data is float
    values = values.astype('float32')

    # 归一化特征
    scaler = MinMaxScaler(feature_range=(0, 1))
    values = scaler.fit_transform(values)

    # mean = values.mean(axis=0)
    # values -= mean
    # std = values.std(axis=0)
    # values /= std

    # 构建监督学习问题
    reframed = series_to_supervised(values, TIME_STEP, 1)  # 6步预测下一步
    # 丢弃我们并不想预测的列
    reframed.drop(reframed.columns[[-1, -2, -3, -4, -5, -6]], axis=1, inplace=True)
    print(reframed.head())

    # 分割为训练集和测试集
    values = reframed.values
    n_train_time_slice = (day_num - 1) * 100
    train = values[:n_train_time_slice, :]
    test = values[n_train_time_slice:, :]
    # 分为输入输出
    train_X, train_y = train[:, :-1], train[:, -1]
    test_X, test_y = test[:, :-1], test[:, -1]
    # 重塑成3D形状 [样例, 时间步, 特征]
    train_X = train_X.reshape((train_X.shape[0], TIME_STEP, int(train_X.shape[1] / TIME_STEP)))
    test_X = test_X.reshape((test_X.shape[0], TIME_STEP, int(test_X.shape[1] / TIME_STEP)))


    """
    model = get_model()
    model.compile(loss='mse', optimizer='adam')
    model.summary()
    early_stopping = EarlyStopping(monitor='val_loss', patience=20, verbose=2)
    history = model.fit(train_X, train_y, epochs=epochs, batch_size=batch_size, verbose=2,
                        validation_data=(test_X, test_y), callbacks=[early_stopping], shuffle=False)
    """

    """
    model = keras.models.Sequential()
    model.add(keras.layers.Bidirectional(keras.layers.LSTM(units=50,activation='relu', input_shape=(7, 1),
                                                           return_sequences=True)))
    model.add(SeqSelfAttention(attention_activation='sigmoid'))
    model.add(keras.layers.Dense(units=5))
    model.compile(loss='mse', optimizer='adam')
    early_stopping = EarlyStopping(monitor='val_loss', patience=20, verbose=2)
    history = model.fit(train_X, train_y, epochs=epochs, batch_size=batch_size, verbose=2,
                        validation_data=(test_X, test_y), callbacks=[early_stopping], shuffle=False)
    model.summary()
    """
    inputs = Input(shape=(T, PARAMETER_NUM))
    lstm = keras.layers.Bidirectional(keras.layers.LSTM(units=20,
                                                        return_sequences=True))(inputs)

    att = SeqSelfAttention(attention_width=T,
                           attention_type=SeqSelfAttention.ATTENTION_TYPE_MUL,
                           kernel_regularizer=keras.regularizers.l2(1e-6),
                           bias_regularizer=keras.regularizers.l1(1e-6),
                           attention_regularizer_weight=1e-6,
                           history_only=True,
                           name='Attention')(lstm)

    # att = att+lstm
    # att = keras.layers.LayerNormalization()(att)
    # att = SeqSelfAttention(attention_width=T,
    #                        attention_type=SeqSelfAttention.ATTENTION_TYPE_MUL,
    #                        kernel_regularizer=keras.regularizers.l2(1e-6),
    #                        bias_regularizer=keras.regularizers.l1(1e-6),
    #                        attention_regularizer_weight=1e-6,
    #                        history_only=True,
    #                        name='Attention')(att)
    # att = att+lstm
    # att = keras.layers.LayerNormalization()(att)
    # att = SeqSelfAttention(attention_width=T,
    #                        attention_type=SeqSelfAttention.ATTENTION_TYPE_MUL,
    #                        kernel_regularizer=keras.regularizers.l2(1e-6),
    #                        bias_regularizer=keras.regularizers.l1(1e-6),
    #                        attention_regularizer_weight=1e-6,
    #                        history_only=True,
    #                        name='Attention')(lstm)

    dense = keras.layers.Dense(units=1, name='Dense')(att)
    output = Reshape((-1,))(dense)
    output = Dense(1)(output)  # 最后预测，代码修改点
    model = keras.models.Model(inputs=inputs, outputs=[output])
    model.compile(loss='mae', optimizer='adam')
    early_stopping = EarlyStopping(monitor='val_loss', patience=20, verbose=2)
    history = model.fit(train_X, train_y, epochs=epochs, batch_size=batch_size, verbose=2,
                        validation_data=(test_X, test_y), callbacks=[early_stopping], shuffle=False)

    yhat = model.predict(test_X, verbose=1)
    yhat = np.array(yhat).reshape(yhat.shape[0],-1)
    # 这一部分是为了得到用lstm+attention模型对训练集和测试集分别进行预测
    # train_hat = model.predict(train_X, verbose=1)
    # train_hat = np.array(train_hat).reshape(train_hat.shape[0],-1)
    #
    # temp_test_X = test_X.reshape((test_X.shape[0], -1))
    # inv_yhat = concatenate((yhat, temp_test_X[:, 1:PARAMETER_NUM]), axis=1)
    # inv_yhat = scaler.inverse_transform(inv_yhat)
    # inv_yhat = inv_yhat[:, 0]
    #
    # temp_train_X = train_X.reshape((train_X.shape[0], -1))
    # inv_train_hat = concatenate((train_hat, temp_train_X[:, 1:PARAMETER_NUM]), axis=1)
    # inv_train_hat = scaler.inverse_transform(inv_train_hat)
    # inv_train_hat = inv_train_hat[:, 0]
    # lss = 1

    # 反向转换预测值比例
    # 改写到下面，加上残差之后，再一起反归一化
    # temp_test_X = test_X.reshape((test_X.shape[0], -1))
    # inv_yhat = concatenate((yhat, temp_test_X[:, 1:PARAMETER_NUM]), axis=1)
    # inv_yhat = scaler.inverse_transform(inv_yhat)
    # inv_yhat = inv_yhat[:, 0]
    # # 反向转换实际值比例
    # test_y = test_y.reshape((len(test_y), 1))
    # inv_y = concatenate((test_y, temp_test_X[:, 1:PARAMETER_NUM]), axis=1)
    # inv_y = scaler.inverse_transform(inv_y)
    # inv_y = inv_y[:, 0]


    # 残差预测网络**********************************************************************************************
    # 残差预测网络**********************************************************************************************

    inputs = Input(shape=(T, PARAMETER_NUM))
    lstm = keras.layers.Bidirectional(keras.layers.LSTM(units=50,
                                                        return_sequences=True))(inputs)

    att = SeqSelfAttention(attention_width=T,
                           attention_type=SeqSelfAttention.ATTENTION_TYPE_MUL,
                           kernel_regularizer=keras.regularizers.l2(1e-4),
                           bias_regularizer=keras.regularizers.l1(1e-4),
                           attention_regularizer_weight=1e-4,
                           history_only=True,
                           name='Attention2')(lstm)
    # att = att+lstm
    # att = keras.layers.LayerNormalization()(att)
    # att = SeqSelfAttention(attention_width=T,
    #                        attention_type=SeqSelfAttention.ATTENTION_TYPE_MUL,
    #                        kernel_regularizer=keras.regularizers.l2(1e-6),
    #                        bias_regularizer=keras.regularizers.l1(1e-6),
    #                        attention_regularizer_weight=1e-6,
    #                        history_only=True,
    #                        name='Attention')(att)
    # att = att+lstm
    # att = keras.layers.LayerNormalization()(att)
    # att = SeqSelfAttention(attention_width=T,
    #                        attention_type=SeqSelfAttention.ATTENTION_TYPE_MUL,
    #                        kernel_regularizer=keras.regularizers.l2(1e-6),
    #                        bias_regularizer=keras.regularizers.l1(1e-6),
    #                        attention_regularizer_weight=1e-6,
    #                        history_only=True,
    #                        name='Attention')(lstm)

    dense = keras.layers.Dense(units=1, name='Dense')(att)
    output = Reshape((-1,))(dense)
    output = Dense(1)(output)  # 最后预测，代码修改点
    model_diff = keras.models.Model(inputs=inputs, outputs=[output])
    model_diff.compile(loss='mae', optimizer='adam')


    # 计算训练集残差,注意这里就是要用model，而不是model_diff
    yhat_train = model.predict(train_X, verbose=1)
    yhat_train = np.array(yhat_train).reshape(yhat_train.shape[0],)
    train_diff = train_y - yhat_train
    # 计算测试集残差
    yhat_test = model.predict(test_X, verbose=1)
    yhat_test = np.array(yhat_test).reshape(yhat_test.shape[0],)
    test_diff = test_y - yhat_test

    early_stopping = EarlyStopping(monitor='val_loss', patience=20, verbose=2)
    history = model_diff.fit(train_X, train_diff, epochs=epochs, batch_size=batch_size, verbose=2,
                        validation_data=(test_X, test_diff), callbacks=[early_stopping], shuffle=False)

    # model_diff = load_model("./attModel.h5")
    # 使用model_diff预测残差
    diff_hat = model_diff.predict(test_X, verbose=1)
    diff_hat = np.array(diff_hat).reshape(diff_hat.shape[0],-1)

    # yhat + diff_hat，将预测的残差和预测值相加后，作为最终预测结果
    temp_test_X = test_X.reshape((test_X.shape[0], -1))
    inv_yhat = concatenate((yhat + diff_hat, temp_test_X[:, 1:PARAMETER_NUM]), axis=1)
    inv_yhat = scaler.inverse_transform(inv_yhat)
    inv_yhat = inv_yhat[:, 0]
    # 反向转换实际值比例
    test_y = test_y.reshape((len(test_y), 1))
    inv_y = concatenate((test_y, temp_test_X[:, 1:PARAMETER_NUM]), axis=1)
    inv_y = scaler.inverse_transform(inv_y)
    inv_y = inv_y[:, 0]
    np.save("./result/out/test",inv_y)

    # 验证滞后性
    # shift_yhat = inv_yhat[1:]
    # shift_yhat = np.append(shift_yhat, 0)
    mse = mean_squared_error(inv_y, inv_yhat)
    mae = mean_absolute_error(inv_y, inv_yhat)
    r2 = r2_score(inv_y, inv_yhat)

    return mse, mae, r2,inv_yhat


if __name__ == '__main__':
    T = TIME_STEP  # 时间序列长度，即T个时间片预测下一个时间片
    m = n_h = n_s = 20  # length of hidden state m
    p = n_hde0 = n_sde0 = 30  # p
    batch_size = 16
    epochs = 100
    test_split = 0.2

    SA0 = SeqSelfAttention(
                           attention_activation='sigmoid',
                           attention_type=SeqSelfAttention.ATTENTION_TYPE_MUL,
                           kernel_regularizer=keras.regularizers.l2(1e-4),
                           bias_regularizer=keras.regularizers.l1(1e-4),
                           attention_regularizer_weight=1e-4,
                           )
    SA1 = SeqSelfAttention(
                           attention_activation='sigmoid',
                           attention_type=SeqSelfAttention.ATTENTION_TYPE_MUL,
                           kernel_regularizer=keras.regularizers.l2(1e-4),
                           bias_regularizer=keras.regularizers.l1(1e-4),
                           attention_regularizer_weight=1e-4,
                           )
    SA2 = SeqSelfAttention(
                           attention_activation='sigmoid',
                           attention_type=SeqSelfAttention.ATTENTION_TYPE_MUL,
                           kernel_regularizer=keras.regularizers.l2(1e-4),
                           bias_regularizer=keras.regularizers.l1(1e-4),
                           attention_regularizer_weight=1e-4,
                           )
    SA3 = SeqSelfAttention(
                           attention_activation='sigmoid',
                           attention_type=SeqSelfAttention.ATTENTION_TYPE_MUL,
                           kernel_regularizer=keras.regularizers.l2(1e-4),
                           bias_regularizer=keras.regularizers.l1(1e-4),
                           attention_regularizer_weight=1e-4,
                           )
    SA4 = SeqSelfAttention(
                           attention_activation='sigmoid',
                           attention_type=SeqSelfAttention.ATTENTION_TYPE_MUL,
                           kernel_regularizer=keras.regularizers.l2(1e-4),
                           bias_regularizer=keras.regularizers.l1(1e-4),
                           attention_regularizer_weight=1e-4,
                           )
    SA5 = SeqSelfAttention(
                           attention_activation='sigmoid',
                           attention_type=SeqSelfAttention.ATTENTION_TYPE_MUL,
                           kernel_regularizer=keras.regularizers.l2(1e-4),
                           bias_regularizer=keras.regularizers.l1(1e-4),
                           attention_regularizer_weight=1e-4,
                           )
    SA_array = [SA0, SA1, SA2, SA3, SA4, SA5]

    # get_mse_with_attention_with_diff()
    mse = 0
    mae = 0
    r2 = 0
    N = 1
    temp = np.zeros(94)
    for i in range(N):
        a,b,c, inv_yhat =get_mse_with_attention_with_diff()
        mse = mse + a
        mae = mae + b
        r2 = r2 + c
        temp = temp + np.array(inv_yhat)



    mse = mse / N
    mae = mae / N
    r2 = r2 / N
    temp = temp / N
    if(mse<117):
        np.save("./result/out/att", inv_yhat)
    print("mse:%.3f, mae: %.3f, r2: %.3f" % (mse, mae, r2))


    # get_mse_with_attention_without_diff()
    # get_mse_with_attention()