from tensorflow import keras
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Embedding,GRU,Dense,Input,Layer
import tensorflow as tf
import numpy as np
import os
import time

from utils.layers import BahdanauAttentionLayer

from utils.dataloader import UbuntuCornellDataLoader
from utils.datagenerator import Seq2seqDataGenerator
from utils.word_vocab import create_vocab

class Encoder(Layer):
    def __init__(self,vocab_size,embedding_dim,enc_units,batch_sz,embedding_layer=None):
        super(Encoder,self).__init__()
        self.batch_sz = batch_sz
        self.enc_units = enc_units
        if embedding_layer == None:
            self.embedding = Embedding(vocab_size,embedding_dim)
        else:
            self.embedding = embedding_layer
        self.gru = GRU(self.enc_units,
                       return_sequences=True,
                       return_state=True,
                       recurrent_initializer='glorot_uniform')

    def call(self,x):
        x = self.embedding(x)
        output,state = self.gru(x)
        return output,state

    def initialize_hidden_state(self):
        return tf.zeros((self.batch_sz,self.enc_units))

class Decoder(Layer):
    def __init__(self,vocab_size,embedding_dim,dec_units,batch_sz,embedding_layer=None):
        super(Decoder,self).__init__()
        self.batch_sz = batch_sz
        self.dec_units = dec_units
        if embedding_layer == None:
            self.embedding = Embedding(vocab_size, embedding_dim)
        else:
            self.embedding = embedding_layer
        self.gru = GRU(self.dec_units,
                       return_state=True,
                       return_sequences=True,
                       recurrent_initializer='glorot_uniform')
        self.fc = Dense(vocab_size)
        self.attention = BahdanauAttentionLayer(self.dec_units)

    def call(self,x,hidden,enc_output):
        # 编码器输出 （enc_output） 的形状 == （批大小，最大长度，隐藏层大小）
        context_vector ,attention_weights = self.attention(hidden,enc_output)

        # x 在通过嵌入层后的形状 == （批大小，1，嵌入维度）
        x = self.embedding(x)

        # x 在拼接 （concatenation） 后的形状 == （批大小，1，嵌入维度 + 隐藏层大小
        x = tf.concat([tf.expand_dims(context_vector,1),x],axis=-1)

        # 将合并后的向量传送到 GRU
        output ,state = self.gru(x,initial_state=hidden)

        # 输出的形状 == （批大小 * 1，隐藏层大小）
        output = tf.reshape(output,(-1,output.shape[2]))

        # 输出的形状 == （批大小，vocab）
        x = self.fc(output)

        return x,state,attention_weights


optimizer = tf.keras.optimizers.Adam()
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
    from_logits=True,
    reduction='none'
)

def loss_function(real,pred):
    mask = tf.math.logical_not(tf.math.equal(real,0))
    loss_ = loss_object(real,pred)
    mask = tf.cast(mask,dtype=loss_.dtype)
    loss_ *= mask

    return tf.reduce_mean(loss_)



if __name__ == '__main__':
    #### 参数定义
    max_len = 2
    start_id = 1
    embedding_dim = 1
    attention_dim = 1
    units = 1
    BATCH_SIZE = 1


    ######### 数据加载
    ubuntu_data_object = UbuntuCornellDataLoader()
    train_data = ubuntu_data_object.get_train_data()
    vocab = create_vocab(train_data)
    vocab_size = len(vocab.items())
    ubuntu_data_generator = Seq2seqDataGenerator(train_data, vocab, max_len)


    ##### 模型定义
    enc_input = Input(shape=(max_len,))
    dec_input = Input(shape=(1,))
    dec_hidden_state = Input(shape=(units,))
    embedding_layer = Embedding(vocab_size,embedding_dim)

    # 编码层
    encoder_layer = Encoder(vocab_size, embedding_dim, units, BATCH_SIZE,embedding_layer)


    enc_output, enc_last_hidden = encoder_layer(enc_input)
    print('Encoder output shape: (batch size, sequence length, units) {}'.format(enc_output.shape))
    print('Encoder Hidden state shape: (batch size, units) {}'.format(enc_last_hidden.shape))

    decoder_layer = Decoder(vocab_size,embedding_dim,units,BATCH_SIZE,embedding_layer)
    decoder_output, decoder_hidden_state, _ = decoder_layer(dec_input,
                                          enc_last_hidden, enc_output)

    print('Decoder output shape: (batch_size, vocab size) {}'.format(decoder_output.shape))


    seq2seq_model = Model([enc_input,dec_input,dec_hidden_state],[decoder_output,decoder_hidden_state,enc_last_hidden])
    seq2seq_model.summary()
    # checkpoint_dir = './training_checkpoints'
    # checkpoint_prefix = os.path.join(checkpoint_dir, 'ckpt')
    # checkpoint = tf.train.Checkpoint(optimizer=optimizer,
    #                                  encoder=encoder,
    #                                  decoder=decoder)
    #
    @tf.function
    def train_step(inp,targ,final_targ):
        loss = 0
        with tf.GradientTape() as tape:
            enc_output_train ,enc_hidden = seq2seq_model.layers[2](inp)
            dec_hidden = enc_hidden


            for t in range(0,targ.shape[1]):
                # 教师强制 - 将目标词作为下一个输入
                dec_input_train = tf.expand_dims(targ[:, t], 1)
                # 将编码器输出 （enc_output） 传送至解码器
                predictions,dec_hidden,_ = seq2seq_model.layers[4](dec_input_train,dec_hidden,enc_output_train)

                loss += loss_function(final_targ[:,t],predictions)

        batch_loss = (loss/int(targ.shape[1]))
        variables = seq2seq_model.trainable_variables
        gradients = tape.gradient(loss,variables)
        optimizer.apply_gradients(zip(gradients,variables))
        return batch_loss

    input_tensor_train,target_tensor_train,target_final_tensor_train = ubuntu_data_generator.get_all_data()
    steps_per_epoch = len(input_tensor_train) // BATCH_SIZE

    dataset = tf.data.Dataset.from_tensor_slices((input_tensor_train, target_tensor_train,target_final_tensor_train)).shuffle(1000)
    dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)

    EPOCHS = 10
    for epoch in range(EPOCHS):
        start = time.time()
        total_loss = 0
        for (batch,(inp,targ,final_targ)) in enumerate(dataset.take(BATCH_SIZE)):
            batch_loss = train_step(inp,targ,final_targ)
            total_loss += batch_loss

            if batch % 1 == 0:
                print('Epoch {} Batch {} Loss {:.4f}'.format(epoch + 1,
                                                             batch,
                                                             batch_loss.numpy()))
            break
        if (epoch + 1) % 2 == 0:
            seq2seq_model.save('./best_model')

        print('Epoch {} Loss {:.4f}'.format(epoch + 1,
                                            total_loss / 100))
        print('Time taken for 1 epoch {} sec\n'.format(time.time() - start))
