
# Create and build seq2seq.
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import GRU
from tensorflow.keras import Input
from tensorflow.keras.layers import LSTM, Bidirectional, Dense, Attention
from tensorflow.keras.models import Model
import tensorflow.keras.backend as K
import tensorflow as tf

in_feature = 5
out_feature = 5

def r_square(y_true, y_pred):
    SSR = K.mean(K.square(y_pred-K.mean(y_true)),axis=-1)
    SST = K.mean(K.square(y_true-K.mean(y_true)),axis=-1)
    return SSR/SST

def mse(y_true, y_pred):
    return K.mean(K.square(y_pred - y_true), axis=-1)

# def create_seq2seq(encoder_length, n_lstm, decoder_length):
#     # 输入
#     encoder_inputs = Input(shape=(encoder_length, 5))

#     # 编码器 encoder
#     # encoder = Bidirectional(LSTM(n_lstm, activation='relu', return_sequences=True, return_state=True), name='encoder')
#     encoder = LSTM(n_lstm, activation='relu', return_sequences=True, return_state=True, name='encoder', dropout=0.1)
#     encoder_outputs, state_h, state_c = encoder(encoder_inputs)
#     encoder_state = [state_h, state_c]

#     # 解码器
#     decoder_inputs = Input(shape=(decoder_length, 5))
#     decoder = LSTM(n_lstm, activation='relu', return_sequences=True, return_state=True, name='decoder', dropout=0.1)
#     decoder_outputs, _, _ = decoder(decoder_inputs, initial_state=encoder_state)

#     # 添加全连接层
#     decoder_dense = Dense(5, activation='softmax')
#     outputs = decoder_dense(decoder_outputs)

#     model = Model([encoder_inputs, decoder_inputs], outputs)
#     # model = Model([encoder_inputs], outputs)

#     return model

# 全relu 测试集能收敛, 35个epoch左右会loss变高
# tanh, tanh, relu
def create_lstm2lstm(encoder_length, n_lstm, decoder_length):
    # 输入
    encoder_inputs = Input(shape=(encoder_length, in_feature))

    # 编码器 encoder
    # encoder = Bidirectional(LSTM(n_lstm, activation='tanh', return_sequences=True, return_state=True, dropout=0.1), name='encoder')
    # # encoder = Bidirectional(LSTM(n_lstm, activation='elu', return_sequences=True, return_state=True, dropout=0.1), name='encoder')
    # # encoder = LSTM(n_lstm, activation='relu', return_sequences=True, return_state=True, name='encoder')
    # encoder_outputs, state_h01, state_c01, state_h02, state_c02 = encoder(encoder_inputs)
    # state_h = state_h01 + state_h02
    # state_c = state_c01 + state_c02
    # # state_h = K.concatenate(state_h01, state_h02)
    # # state_c = K.concatenate(state_c01, state_c02)
    # encoder_state = [state_h, state_c]

    encoder = LSTM(n_lstm, activation='relu', return_sequences=True, return_state=True, name='encoder', dropout=0.1)
    encoder_outputs, state_h, state_c = encoder(encoder_inputs)
    encoder_state = [state_h, state_c]
    # 解码器
    decoder_inputs = Input(shape=(decoder_length, out_feature))
    decoder = LSTM(n_lstm, activation='relu', return_sequences=True, return_state=True, name='decoder', dropout=0.1)
    # decoder = LSTM(n_lstm, activation='elu', return_sequences=True, return_state=True, name='decoder')
    decoder_outputs, _, _ = decoder(decoder_inputs, initial_state=encoder_state)

    # 添加全连接层
    decoder_dense = Dense(out_feature, activation='relu')
    outputs = decoder_dense(decoder_outputs)
    model = Model([encoder_inputs, decoder_inputs], outputs)

    return model

def create_gru2gru(encoder_length, n_lstm, decoder_length):
    # 输入
    encoder_inputs = Input(shape=(encoder_length, in_feature))

    # 编码器 encoder
    # encoder = Bidirectional(LSTM(n_lstm, activation='relu', return_sequences=True, return_state=True), name='encoder')
    encoder = GRU(n_lstm, activation='relu', return_sequences=True, return_state=True, name='encoder', dropout=0.1)

    encoder_outputs, state_h01 = encoder(encoder_inputs)
    state_h = state_h01
    # state_c = state_c01 + state_c02
    # state_h = K.concatenate(state_h01, state_h02)
    # state_c = K.concatenate(state_c01, state_c02)
    encoder_state = state_h

    # 解码器
    decoder_inputs = Input(shape=(decoder_length, in_feature))
    decoder = GRU(n_lstm, activation='relu', return_sequences=True, return_state=True, name='decoder', dropout=0.1)
    decoder_outputs, _ = decoder(decoder_inputs, initial_state=encoder_state)

    # 添加全连接层
    decoder_dense = Dense(out_feature, activation='relu')
    outputs = decoder_dense(decoder_outputs)
    model = Model([encoder_inputs, decoder_inputs], outputs)

    return model


###################################
###################################

class Attention(tf.keras.Model):
    """Attention layer of seq2seq model for trajectory prediction.
    Choose between three score function: ['dot', 'general', 'concat']
    The number of parameters of three function: [0, n_lstm*(n_lstm+1), 2*(n_lstm * (n_lstm+1))+1]
    """

    def __init__(self, n_lstm, attention_func):
        super().__init__()
        self.attention_func = attention_func
        # if attention_func not in ['dot', 'general', 'concat']:
        #     raise ValueError(
        #         'Unknown attention score function! Must be either dot, general or concat.')
        # if attention_func == 'general':
        #     # General score function
        self.wa = tf.keras.layers.Dense(n_lstm)
        # elif attention_func == 'concat':
        #     # Concat score function
        #     self.wa = tf.keras.layers.Dense(n_lstm, activation='tanh')
        #     self.va = tf.keras.layers.Dense(1)

    @tf.function
    def call(self, decoder_output, encoder_output):
        # if self.attention_func == 'dot':
        #     # dot score function: decoder_output (dot) encoder_output
        #     # decoder_output.shape: (batch_size, 1, n_lstm), encoder_output.shape: (batch_size, encoder_length, n_lstm)
        #     # => score.shape: (batch_size, 1, encoder_length)
        #     score = tf.matmul(decoder_output, encoder_output, transpose_b=True)
        # elif self.attention_func == 'general':
        #     # general score function: decoder_output (dot) (Wa (dot) encoder_output)
        #     # decoder_output.shape: (batch_size, 1, n_lstm), encoder_output.shape: (batch_size, encoder_length, n_lstm)
        #     # => score.shape: (batch_size, 1, encoder_length)
        score = tf.matmul(decoder_output, self.wa(encoder_output), transpose_b=True)
        # elif self.attention_func == 'concat':
        #     # concat score function: va (dot) tanh(Wa (dot) concat(decoder_out + encoder_output))
        #     # decoder_output must be broadcasted to encoder output's shape first
        #     decoder_output = tf.tile(decoder_output, [1, encoder_output.shape[1], 1])
        #     # tf.concat => Wa => va
        #     # (batch_size, encoder_output, n_lstm * 2) => (batch_size, encoder_output, n_lstm) => (batch_size, encoder_output, 1)
        #     score = self.va(self.wa(tf.concat((decoder_output, encoder_output), axis=-1)))
        #     # transose to the shape of (batch_size, 1, encoder_length)
        #     score = tf.transpose(score, [0, 2, 1])

        alignment = tf.nn.softmax(score, axis=2)
        # context vector c_t is the weighted average sum of encoder output
        context = tf.matmul(alignment, encoder_output)
        return context, alignment


# class DecoderAttention(tf.keras.Model):
#     """Decoder with Attention mechanism of seq2seq model for trajectory prediction.
#     The decoder can only process one point, so you need call it in a loop when traning and predicting.
#     Build RNN layer with keras.layers.LSTM.
#     """
#     def __init__(self, n_lstm, attention_func):
#         super().__init__()
#         # 使用自定义的attention
#         self.attention = Attention(n_lstm, attention_func)
#         # 使用keras的attention
#         # self.attention = Attention()

#         self.n_lstm = n_lstm
#         self.lstm = tf.keras.layers.LSTM(self.n_lstm, return_sequences=True, return_state=True, activation=tf.nn.relu, dropout=0.1)
#         self.wc = tf.keras.layers.Dense(n_lstm, activation='tanh', name="wc_layer")
#         self.out = tf.keras.layers.Dense(units = 5, name="output_layer")
#     @tf.function
#     def call(self, seq_in, state, encoder_outputs):
#         # lstm_out.shape: (batch_size, 1, n_lstm)
# lstm_out, state_h, state_c = self.lstm(seq_in, initial_state=state)
# # context.shape: (batch_size, 1, n_lstm)
# # alignment.shape: (batch_size, 1, seq_length)
# context, alignment = self.attention(decoder_output=lstm_out, encoder_output=encoder_outputs)
# # context = self.attention([lstm_out, encoder_outputs])

# # Combine the context vector and the LSTM output
# # lstm_out = tf.concat([tf.squeeze(context, 1), tf.squeeze(lstm_out, 1)], 1)
# lstm_out = tf.concat([context, lstm_out], 2)
# # lstm_out now has shape (batch_size, rnn_size)
# lstm_out = self.wc(lstm_out)
# # Finally, it is converted back to trajectory space: (batch_size, 5)
# logits = self.out(lstm_out)

# return logits, lstm_out, state_h, state_c


class DecoderAttention(tf.keras.Model):
    """Decoder with Attention mechanism of seq2seq model for trajectory prediction.
    The decoder can only process one point, so you need call it in a loop when traning and predicting.
    Build RNN layer with keras.layers.LSTM.
    """

    def __init__(self, n_lstm, attention_func):
        super().__init__()
        self.wa = tf.keras.layers.Dense(n_lstm)
        self.n_lstm = n_lstm
        self.lstm = tf.keras.layers.LSTM(self.n_lstm, return_sequences=True, return_state=True, activation=tf.nn.relu,
                                         dropout=0.1)
        self.wc = tf.keras.layers.Dense(n_lstm, activation='tanh', name="wc_layer")
        self.out = tf.keras.layers.Dense(units=out_feature, name="output_layer")

    # @tf.function
    def call(self, seq_in, state, encoder_outputs):
        # lstm_out.shape: (batch_size, 1, n_lstm)
        lstm_out, state_h, state_c = self.lstm(seq_in, initial_state=state)
        # context.shape: (batch_size, 1, n_lstm)
        # alignment.shape: (batch_size, 1, seq_length)
        score = tf.matmul(lstm_out, self.wa(encoder_outputs), transpose_b=True)
        alignment = tf.nn.softmax(score, axis=2)
        # context vector c_t is the weighted average sum of encoder output
        context = tf.matmul(alignment, encoder_outputs)

        # Combine the context vector and the LSTM output
        # lstm_out = tf.concat([tf.squeeze(context, 1), tf.squeeze(lstm_out, 1)], 1)
        lstm_out = tf.concat([context, lstm_out], 2)
        # lstm_out now has shape (batch_size, rnn_size)
        lstm_out = self.wc(lstm_out)
        # Finally, it is converted back to trajectory space: (batch_size, 5)
        logits = self.out(lstm_out)

        return logits, lstm_out, state_h, state_c


# # Attention, Attention 模块单独创建
# def create_Attention_seq2seq(encoder_length, n_lstm, decoder_length):
#     # 输入
#     encoder_inputs = Input(shape=(encoder_length, 5))

#     # 编码器 encoder
#     # encoder = Bidirectional(LSTM(n_lstm, activation='relu', return_sequences=True, return_state=True), name='encoder')
#     encoder = LSTM(n_lstm, activation='relu', return_sequences=True, return_state=True, name='encoder', dropout=0.1)
#     encoder_outputs, state_h, state_c = encoder(encoder_inputs)
#     encoder_state = [state_h, state_c]

#     # 解码器
#     decoder_inputs = Input(shape=(decoder_length, 5))
#     decoder = DecoderAttention(n_lstm=n_lstm, attention_func='general')
#     outputs, _, _, _ = decoder(seq_in=decoder_inputs, state=encoder_state, encoder_outputs=encoder_outputs)

#     model = Model([encoder_inputs, decoder_inputs], outputs)

#     return model


# Attention 一起创建
def create_Attention_seq2seq(encoder_length, n_lstm, decoder_length):
    # 输入
    encoder_inputs = Input(shape=(encoder_length, in_feature))

    # 编码器 encoder
    # encoder = Bidirectional(LSTM(n_lstm, activation='relu', return_sequences=True, return_state=True), name='encoder')
    # encoder = LSTM(n_lstm, activation='relu', return_sequences=True, return_state=True, name='encoder', dropout=0.1)
    encoder = LSTM(n_lstm, activation='tanh', return_sequences=True, return_state=True, name='encoder', dropout=0.1)
    encoder_outputs, state_h, state_c = encoder(encoder_inputs)
    encoder_state = [state_h, state_c]

    # Attention 解码器
    decoder_inputs = Input(shape=(decoder_length, out_feature))

    wa = tf.keras.layers.Dense(n_lstm)
    # lstm = tf.keras.layers.LSTM(n_lstm, return_sequences=True, return_state=True, activation=tf.nn.relu, dropout=0.1)
    lstm = tf.keras.layers.LSTM(n_lstm, return_sequences=True, return_state=True, activation='tanh', dropout=0.1)
    wc = tf.keras.layers.Dense(n_lstm, activation='tanh', name="wc_layer")
    out = tf.keras.layers.Dense(units=out_feature, name="output_layer")

    lstm_out, state_h, state_c = lstm(decoder_inputs, initial_state=encoder_state)
    # context.shape: (batch_size, 1, n_lstm)
    # alignment.shape: (batch_size, 1, seq_length)
    score = tf.matmul(lstm_out, wa(encoder_outputs), transpose_b=True)
    alignment = tf.nn.softmax(score, axis=2)
    # context vector c_t is the weighted average sum of encoder output
    context = tf.matmul(alignment, encoder_outputs)

    # Combine the context vector and the LSTM output
    # lstm_out = tf.concat([tf.squeeze(context, 1), tf.squeeze(lstm_out, 1)], 1)
    lstm_out = tf.concat([context, lstm_out], 2)
    # lstm_out now has shape (batch_size, rnn_size)
    lstm_out = wc(lstm_out)
    # Finally, it is converted back to trajectory space: (batch_size, 5)
    outputs = out(lstm_out)

    # decoder = DecoderAttention(n_lstm=n_lstm, attention_func='general')
    # outputs, _, _, _ = decoder(seq_in=decoder_inputs, state=encoder_state, encoder_outputs=encoder_outputs)

    model = Model([encoder_inputs, decoder_inputs], outputs)

    return model