# -*- coding: utf-8 -*-

# 指定当前的工作目录

import keras
from keras import backend as K
from keras.engine.topology import Layer
import tensorflow as tf

# 参数为(inputs,output_dim)  作用等价于Dense层,只不过,Dense层是w*inputs+b .而这里没有加上b
from pandas import read_csv
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler


class Dense_no_bias(Layer):
    def __init__(self, output_dim, **kwargs):
        self.output_dim = output_dim
        super(Dense_no_bias, self).__init__(**kwargs)

    def build(self, input_shape):
        self.kernel = self.add_weight(name='kernel', shape=(input_shape[-1], self.output_dim),
                                      initializer='uniform', trainable=True)
        super(Dense_no_bias, self).build(input_shape)

    def call(self, inputs, **kwargs):
        print('Mydot:', K.dot(inputs, self.kernel))
        return K.dot(inputs, self.kernel)

    def compute_output_shape(self, input_shape):
        return (input_shape[0], input_shape[1], self.output_dim)


class My_Transpose(Layer):  # 参数为(inputs,axis)  改变维度
    def __init__(self, axis, **kwargs):
        self.axis = axis
        super(My_Transpose, self).__init__(**kwargs)

    def build(self, input_shape):
        super(My_Transpose, self).build(input_shape)

    def call(self, inputs, **kwargs):
        return K.permute_dimensions(inputs, pattern=self.axis)

    def compute_output_shape(self, input_shape):
        return (input_shape[self.axis[0]], input_shape[self.axis[1]], input_shape[self.axis[2]])


def one_encoder_attention_step(h_prev, s_prev, X):
    '''
    :param h_prev: previous hidden state  # LSTM的隐含层
    :param s_prev: previous cell state  # 记忆细胞状态 ,论文中的s
    :param X: (T,n),n is length of input series at time t,T is length of time series  n代表维度,X代表输入的一个序列，步长为T，每一步的维度是n
    :return: x_t's attention weights,total n numbers,sum these are 1
    '''
    concat = Concatenate()([h_prev, s_prev])  # (none,1,2m)  按最后一个维度进行拼接 m+m=2m
    result1 = en_densor_We(concat)  # (none,1,T) # 通过全连接层，等价于 w*[h_prev;s_prev]
    result1 = RepeatVector(X.shape[2], )(result1)  # (none,n,T)  扩展为n*T
    X_temp = My_Transpose(axis=(0, 2, 1))(X)  # X_temp(None,n,T) 改变X的维度，也可使用permute((0,2,1))(X)
    result2 = Dense_no_bias(T)(X_temp)  # (none,n,T) * Ue(T,T)   ==n*T
    result3 = Add()([result1, result2])  # (none,n,T)  w*[h_prev;s_prev] + Ue*X
    result4 = Activation(activation='tanh')(result3)  # (none,n,T)

    result5 = Dense_no_bias(1)(result4)  # Ve(1*T)*(n,T) == 1*n
    result5 = My_Transpose(axis=(0, 2, 1))(result5)
    print('result5 ', result5)
    alphas = Activation(activation='softmax')(result5)

    return alphas


def encoder_attention(T, X, s0, h0):
    s = s0
    h = h0
    # initialize empty list of outputs
    attention_weight_t = None
    for t in range(T):
        context = one_encoder_attention_step(h, s, X)  # (none,1,n)
        x = Lambda(lambda x: X[:, t, :])(X)  # 取出第t个时间步的数据
        x = Reshape((1, x.shape[1]))(x)  # 设置维度为1*n
        h, _, s = en_LSTM_cell(x, initial_state=[h, s])
        if t != 0:
            # attention_weight_t= Merge(mode='concat', concat_axis=1)([attention_weight_t,context]) # 旧版本
            if t == T - 1:
                attention_weight_t = Lambda(lambda x: K.concatenate([x[0], x[1]], axis=1),
                                            name='attention_weight_local')([attention_weight_t, context])
            else:
                attention_weight_t = Lambda(lambda x: K.concatenate([x[0], x[1]], axis=1))(
                    [attention_weight_t, context])
            # my_concat([attention_weight_t, context])
            # attention_weight_t = Concatenate(axis=1)([attention_weight_t, context]) # 新版本的Keras
        else:
            attention_weight_t = context

        # break

    X_ = Multiply()([attention_weight_t, X])  # 获得各维度加权后的值 T*n
    return X_


def one_decoder_attention_step(h_de_prev, s_de_prev, h_en_all, t):
    '''
    :param h_prev: previous hidden state
    :param s_prev: previous cell state
    :param h_en_all: (None,T,m),m is hidden size at time t,T is length of time series
    :return: x_t's attention weights,total T numbers,sum these are 1
    '''
    concat = Concatenate()([h_de_prev, s_de_prev])  # (None,1,2p)
    result1 = de_densor_We(concat)  # (None,1,m)
    result1 = RepeatVector(T)(result1)  # (None,T,m)
    result2 = Dense_no_bias(m)(h_en_all)  # m*m dot (T,m)=T*m  或直接写 Dense(m)(h_en_all)
    result3 = Add()([result1, result2])  # (None,T,m)
    result4 = Activation(activation='tanh')(result3)  # (None,T,m)
    result5 = Dense_no_bias(1)(result4)  # 1*m dot T*m= 1*T
    result5 = Reshape((1, result5.shape[1]))(result5)
    if t == T - 2:
        beta = Activation(activation='softmax', name='attention_weight_time')(result5)
    else:
        beta = Activation(activation='softmax')(result5)
    beta = Reshape((beta.shape[2], 1))(beta)
    context = Dot(axes=1)([beta, h_en_all])  # (1,m) 将T个（1*m)的向量，按比例合并，最终为一个1*m。
    return context


def decoder_attention(T, h_en_all, Y, s0, h0):
    s = s0
    h = h0
    for t in range(T - 1):
        y_prev = Lambda(lambda y_prev: Y[:, t, :])(Y)  # Y是输入的预测序列
        y_prev = Reshape((1, y_prev.shape[1]))(y_prev)  # (None,1,1)  ，代码修改点
        context = one_decoder_attention_step(h, s, h_en_all, t)  # (None,1,20)
        y_prev = Concatenate(axis=2)([y_prev, context])  # (None,1,21)
        y_prev = Dense(1)(y_prev)  # (None,1,1) w [y;c]
        h, _, s = de_LSTM_cell(y_prev, initial_state=[h, s])


    context = one_decoder_attention_step(h, s, h_en_all, T - 1)  # Ct 1*m
    return h, context  # h === 最后一个隐含层


# 对数据进行标准化
def normal(float_data):
    mean = float_data.mean(axis=0)
    float_data -= mean
    std = float_data.std(axis=0)
    float_data /= std
    return float_data


# print(float_data[:,5])

def get_data(data_path):
    f = open(data_path)
    data = f.read()
    f.close
    lines = data.split('\n')
    # print(len(lines))
    header = lines[0].split(',')
    lines = lines[1:]
    float_data = np.zeros((len(lines), len(header)))
    for i, line in enumerate(lines):
        f = 1
        # print(i)
        for j in line.split(','):
            if j == 'None':
                f = 0
                break
        if i == len(lines) - 1:
            break
        if f == 1:
            tmp = [float(x) for x in line.split(',')]
            float_data[i] = tmp
    return float_data


def get_model():
    X = Input(shape=(T, X_column_length))  # 输入时间序列数据
    s0 = Input(shape=(n_s,))  # initialize the first cell state  初始化一个20大小的记忆细胞
    h0 = Input(shape=(n_h,))  # initialize the first hidden state 隐含层
    h_de0 = Input(shape=(n_hde0,))
    s_de0 = Input(shape=(n_sde0,))
    Y = Input(shape=(T - 1, Y_column_length))  # 9个大小为1的数据 ,代码修改点
    X_ = encoder_attention(T, X, s0, h0)  # 加权后的X [T*n]
    X_ = Reshape((T, X_column_length))(X_)  # 设置为 T*n
    h_en_all = LSTM(m, return_sequences=True)(X_)  # 将经过编码器加权后的输入X_放入LSTM中，返回T个大小为m的隐含层
    h_en_all = Reshape((T, -1))(h_en_all)  # 确保有T组数据，-1意味着自动计算，其实值一定为m ，代码修改点

    h, context = decoder_attention(T, h_en_all, Y, s_de0, h_de0)
    h = Reshape((1, p))(h)  # h为隐含层
    concat = Concatenate(axis=2)([h, context])  # 1,p+m
    concat = Reshape((-1,))(concat)
    result = Dense(p)(concat)  # 输出p维
    output = Dense(Y_column_length * future)(result)  # 最后预测，代码修改点
    model = keras.models.Model(inputs=[X, Y, s0, h0, s_de0, h_de0], outputs=output)
    return model


from keras.layers import LSTM, RepeatVector, Dense, \
    Activation, Add, Reshape, Input, Lambda, Multiply, Concatenate, Dot
from sklearn.model_selection import train_test_split
import numpy as np


# 不加这句话 model.fit()会报错
tf.compat.v1.disable_eager_execution()

T = 10  # 时间序列长度
step = 1
future = 1  # 预测未来多少天
m = n_h = n_s = 20  # length of hidden state m
p = n_hde0 = n_sde0 = 30  # p
batch_size = 128
epochs = 1000
test_split = 0.2

# 数据准备
float_data = get_data('./data/201908-inflow-X.csv')
mean = float_data.mean(axis=0)
float_data -= mean
std = float_data.std(axis=0)
float_data /= std

input_X = []
X_row_length = len(float_data)
X_column_length = len(float_data[0])
for i in range(0, X_row_length - T + 1 - step - future + 1, step):
    X_data = float_data[i:i + T, 0:X_column_length]
    input_X.append(np.array(X_data))

# print(len(input_X))
input_X = np.array(input_X).reshape(-1, T, X_column_length)

Y_float_data = get_data('./data/201908-inflow-Y.csv')
Y_mean = Y_float_data.mean(axis=0)
Y_float_data -= Y_mean
Y_std = Y_float_data.std(axis=0)
Y_float_data /= Y_std
input_Y = []
label_Y = []
Y_row_length = len(Y_float_data)
Y_column_length = len(Y_float_data[0])
for i in range(0, Y_row_length - T + 1 - step - future + 1, step):
    Y_data = Y_float_data[i:i + T - 1, 0:Y_column_length]
    label = []
    for j in range(future):
        label.append(Y_float_data[i + T - 1 + j, 0:Y_column_length])
    # label = Y_float_data[i+T-1,0:Y_column_length]
    label = np.array(label).reshape(-1, Y_column_length * future)
    # print(label.shape)
    input_Y.append(np.array(Y_data))
    label_Y.append(np.array(label))

# print('label_Y ',label_Y[0])
input_Y = np.array(input_Y).reshape(-1, T - 1, Y_column_length)
label_Y = np.array(label_Y).reshape(-1, Y_column_length * future)


"""
dataset = read_csv('./201908-inflow-X.csv', header=0, index_col=None)
values = dataset.values
# ensure all data is float
values = values.astype('float32')
# 归一化特征
scaler_X = MinMaxScaler(feature_range=(0, 1))
scaled = scaler_X.fit_transform(values)
input_X = []
X_row_length = len(scaled)
X_column_length = len(scaled[0])
for i in range(0, X_row_length - T + 1 - step - future + 1, step):
    X_data = scaled[i:i + T, 0:X_column_length]
    input_X.append(np.array(X_data))
input_X = np.array(input_X).reshape(-1, T, X_column_length)


Y_dataset = read_csv('./201908-inflow-Y.csv', header=0, index_col=None)
Y_value = Y_dataset.values
Y_value = Y_value.astype('float32')
# 归一化特征
scaler_Y = MinMaxScaler(feature_range=(0, 1))
scaled_Y = scaler_Y.fit_transform(Y_value)
input_Y = []
label_Y = []
Y_row_length = len(scaled_Y)
Y_column_length = len(scaled_Y[0])
for i in range(0, Y_row_length - T + 1 - step - future + 1, step):
    Y_data = scaled_Y[i:i + T - 1, 0:Y_column_length]
    label = []
    for j in range(future):
        label.append(scaled_Y[i + T - 1 + j, 0:Y_column_length])
    # label = Y_float_data[i+T-1,0:Y_column_length]
    label = np.array(label).reshape(-1, Y_column_length * future)
    # print(label.shape)
    input_Y.append(np.array(Y_data))
    label_Y.append(np.array(label))
input_Y = np.array(input_Y).reshape(-1, T - 1, Y_column_length)
label_Y = np.array(label_Y).reshape(-1, Y_column_length * future)
"""

day_num = 12
n_train_time_slice = (day_num-1) * 144
input_X_train = input_X[:n_train_time_slice, :]
input_X_test = input_X[n_train_time_slice:, :]
input_Y_train = input_Y[:n_train_time_slice, :]
input_Y_test = input_Y[n_train_time_slice:, :]
label_Y_train = label_Y[:n_train_time_slice, :]
label_Y_test = label_Y[n_train_time_slice:, :]

# 调用 train_test_split 划分得到的训练和测试集是随机划分的
# input_X_train, input_X_test, input_Y_train, input_Y_test, label_Y_train, label_Y_test = train_test_split(input_X,
#                                                                                                          input_Y,
#                                                                                                          label_Y,
#                                                                                                          test_size=0.083,
#                                                                                                          random_state=0)


s0_train = h0_train = np.zeros((input_X_train.shape[0], m))
h_de0_train = s_de0_train = np.zeros((input_X_train.shape[0], p))

en_densor_We = Dense(T)
en_LSTM_cell = LSTM(n_h, return_state=True)
de_LSTM_cell = LSTM(p, return_state=True)
de_densor_We = Dense(m)
LSTM_cell = LSTM(p, return_state=True)

model = get_model()
model.compile(loss='mse', optimizer='adam', metrics=['mse'])
model.summary()
# early_stopping = EarlyStopping(monitor='val_mean_squared_error',patience=20,mode='min')
# model_checkpoint = ModelCheckpoint(fname_model,monitor='val_mean_squared_error',verbose=0)

from keras.callbacks import EarlyStopping
early_stopping = EarlyStopping(monitor='val_loss', patience=20, verbose=2)

s0_test = h0_test = np.zeros((input_X_test.shape[0], m))
h_de0_test = s_de0_test = np.zeros((input_X_test.shape[0], p))
# history = model.fit([input_X_train, input_Y_train, s0_train, h0_train, s_de0_train, h_de0_train], label_Y_train,
#                     epochs=epochs, batch_size=batch_size, validation_split=0.1, callbacks=[early_stopping])
history = model.fit([input_X_train, input_Y_train, s0_train, h0_train, s_de0_train, h_de0_train], label_Y_train,
                    epochs=epochs, batch_size=batch_size, verbose=2, validation_data=([input_X_test, input_Y_test, s0_test, h0_test, s_de0_test, h_de0_test], label_Y_test), callbacks=[early_stopping], shuffle=False)

import matplotlib.pyplot as plt

loss = history.history['loss']
val_loss = history.history['val_loss']
ep = range(1, len(loss) + 1)
plt.plot(ep, loss, 'bo', label='Training loss')
plt.plot(ep, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()


score = model.evaluate([input_X_test, input_Y_test, s0_test, h0_test, s_de0_test, h_de0_test], label_Y_test,
                       batch_size=input_X_test.shape[0], verbose=1)
print('loss:', score[0])
print('mse:', score[1])

predicted = model.predict([input_X_test, input_Y_test, s0_test, h0_test, s_de0_test, h_de0_test], verbose=1)
# 反向转换预测值比例
# inv_yhat = scaler_Y.inverse_transform(predicted)
# 反向转换实际值比例
# inv_y = scaler_Y.inverse_transform(label_Y_test)


predicted *= Y_std
predicted += Y_mean
label_Y_test *= Y_std
label_Y_test += Y_mean

# for i in range(27):
for i in range(17):
    predicted[i] = 0
# predicted[130] = 0
# predicted[131] = 0
# predicted[132] = 0
# predicted[133] = 0
# predicted[134] = 0

plt.rcParams['font.sans-serif'] = ['KaiTi']  # 指定默认字体
plt.rcParams['axes.unicode_minus'] = False  # 解决保存图像是负号'-'显示为方块的问题
font = {'family': 'KaiTi',
        'weight': 'normal',
        'size': 14,
        }
def draw_data(pre_data, true_data, picture_path, title):
    plt.plot(pre_data, label='prediction')
    plt.plot(true_data, label='true')
    plt.legend()  # 显示图例
    plt.title(title)
    plt.xlabel("时间片（10min为间隔）", font)
    plt.ylabel("客流数", font)
    plt.savefig(picture_path)
    plt.show()


mse = mean_squared_error(predicted, label_Y_test)
print('Test MSE: %.3f' % mse)
pic_path = "./attention.png"
draw_data(predicted, label_Y_test, pic_path, "attention\n MSE=%.3f" % mse)


def get_activations(model, inputs, print_shape_only=False, layer_name=None):
    # Documentation is available online on Github at the address below.
    # From: https://github.com/philipperemy/keras-visualize-activations
    #    print('----- activations -----')
    #  print(model)
    activations = []
    inp = model.input
    # print(inp.shape)
    if layer_name is None:
        outputs = [layer.output for layer in model.layers]
    else:
        outputs = [layer.output for layer in model.layers if layer.name == layer_name]  # all layer outputs
    funcs = [K.function([inp] + [K.learning_phase()], [out]) for out in outputs]  # evaluation functions
    layer_outputs = [func([inputs, 1])[0] for func in funcs]
    for layer_activations in layer_outputs:
        activations.append(layer_activations)
    #        if print_shape_only:
    #            print(layer_activations.shape)
    #        else:
    #            print(layer_activations)
    return activations


import pandas as pd

attention_vectors = []
tmp = get_activations(model, [input_X_test, input_Y_test, s0_test, h0_test, s_de0_test, h_de0_test],
                      print_shape_only=True, layer_name='attention_weight_time')

attention_vector = np.mean(
    get_activations(model, [input_X_test, input_Y_test, s0_test, h0_test, s_de0_test, h_de0_test],
                    print_shape_only=True, layer_name='attention_weight_time')[0], axis=0).squeeze()
# print(attention_vector.shape)
# 对各列求平均值后 ,为1*10，然后删除第1个维度，最后变成(10,)
# plot part.


mat = tmp[0]
# mat_new = Reshape((len(input_X_test),T))

pd.DataFrame(attention_vector, columns=['attention (%)']).plot(kind='bar',
                                                               title='Attention Mechanism as '
                                                                     'a function of input'
                                                                     ' dimensions.')
plt.show()

tmp = get_activations(model, [input_X_test, input_Y_test, s0_test, h0_test, s_de0_test, h_de0_test],
                      print_shape_only=True, layer_name='attention_weight_local')

attention_vector = np.mean(
    get_activations(model, [input_X_test, input_Y_test, s0_test, h0_test, s_de0_test, h_de0_test],
                    print_shape_only=True, layer_name='attention_weight_local')[0], axis=0).squeeze()
# print(attention_vector.shape)

mat = tmp[0]
# mat_new = Reshape((len(input_X_test),T))
# print('mat_new ',mat)


pd.DataFrame(attention_vector[0], columns=['attention (%)']).plot(kind='bar',
                                                                  title='Attention Mechanism as '
                                                                        'a function of input'
                                                                        ' dimensions.')
plt.show()

import seaborn as sns

import csv

f = open('p_attention.csv', 'w', newline='')
csv_writer = csv.writer(f)
for t in attention_vector[0]:
    tmp = []
    tmp.append(t)
    csv_writer.writerow(tmp)

f.close()

x_size = attention_vector.shape[0]
y_size = attention_vector.shape[1]
f, ax = plt.subplots(figsize=(x_size, y_size))
x_list = []
for i in range(x_size):
    x_list.append(i + 1)
y_list = []
for i in range(y_size):
    y_list.append(i + 1)
sns.heatmap(attention_vector, xticklabels=x_list, yticklabels=y_list, cmap="YlGnBu")
ax.set_xticklabels(ax.get_xticklabels(), fontsize=15, rotation=90)
ax.set_yticklabels(ax.get_yticklabels(), fontsize=15)