"""
    Transformer
"""
import tensorflow as tf
import time
import numpy as np
import more_itertools

from .common_function import loss_function, create_masks
from .encoder import Encoder
from .decoder import Decoder
import config
from .utils import time_since

# 优化器
class CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
    def __init__(self, warmup_steps=4000):
        super(CustomSchedule, self).__init__()

        self.d_model = config.d_model
        self.d_model = tf.cast(self.d_model, tf.float32)

        self.warmup_steps = warmup_steps

    def __call__(self, step):
        arg1 = tf.math.rsqrt(step)
        arg2 = step * (self.warmup_steps ** -1.5)

        return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2)


class Transformer(tf.keras.Model):
    def __init__(self, input_vocab_size, target_vocab_size,
                 pe_input, pe_target,
                 learning_rate, optimizer, loss_object,
                 input_tokenizer, target_tokenizer,
                 checkpoint_path=None):
        """
            参数:
                1.input_vocab_size --> 输入向量字向量大小(字典大小)
                2.target_vocab_size --> 输出向量字向量大小(字典大小)
                3.pe_input   --> 输入位置编码信息
                4.pe_target  --> 输出位置编码信息
                5.learning_rate :学习率
                6.optimizer     :优化器
                7.checkpoint_path:模型保存路径

        """
        super(Transformer, self).__init__()
        # 实例化编码器
        self.encoder = Encoder(input_vocab_size, pe_input)
        # 解码器
        self.decoder = Decoder(target_vocab_size, pe_target)
        # 全连接层(输出层)
        self.final_layer = tf.keras.layers.Dense(target_vocab_size)

        # 初始化模型保存路径
        self.checkpoint_path = checkpoint_path or "./checkpoint/train"

        # 定义损失函数
        self.learning_rate = learning_rate

        # 定义优化器
        self.optimizer = optimizer or tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98,
                                                               epsilon=1e-9)
        # 定义损失函数
        self.loss_object = loss_object

        self.ckpt = tf.train.Checkpoint(transformer=self,
                                        optimizer=self.optimizer)
        self.ckpt_manager = tf.train.CheckpointManager(self.ckpt, self.checkpoint_path, max_to_keep=3)

        self.input_tokenizer = input_tokenizer
        self.target_tokenizer = target_tokenizer

        # 定义损失
        self.train_loss = tf.keras.metrics.Mean(name='train_loss')
        # 定义准确率
        self.train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
            name='train_accuracy')

    def call(self, inp, tar, training, enc_padding_mask,
             look_ahead_mask, dec_padding_mask):
        """执行函数"""
        # 将输入放进编码器中
        enc_output = self.encoder(inp, training, enc_padding_mask)  # (batch_size, inp_seq_len, d_model)
        # 将编码器的输出和目标文本放进解码器中获取注意力权重和输出
        # dec_output.shape == (batch_size, tar_seq_len, d_model)
        dec_output, attention_weights = self.decoder(
            tar, enc_output, training, look_ahead_mask, dec_padding_mask)
        # 将编码器输出放进全连接层(输出层)
        final_output = self.final_layer(dec_output)  # (batch_size, tar_seq_len, target_vocab_size)
        # 返回输出和注意力权重
        return final_output, attention_weights

    def train(self, train_dataset, epochs):
        """定义模型训练方法"""
        # 获取数据集总batch
        total_batch = more_itertools.ilen(train_dataset)

        for epoch in range(epochs):
            start = time.time()
            # 初始化损失值
            self.train_loss.reset_states()
            # 初始化准确率
            self.train_accuracy.reset_states()

            # inp -> portuguese, tar -> english
            for (batch, (inp, tar)) in enumerate(train_dataset):
                self.train_step(inp, tar)

                if batch % 100 == 0:
                    print(f'| epoch:{epoch+1} '
                          f'| batches:{batch}/{total_batch} '
                          f'| ms/batch:{time_since(start)} '
                          f'| loss:{self.train_loss.result():.4f} '
                          f'| accuracy:{self.train_accuracy.result():.4f}'
                         )
                    #print('Epoch {} Batch {} Loss {:.4f} Accuracy {:.4f}'.format(
                    #    epoch + 1, batch, self.train_loss.result(), self.train_accuracy.result()))

            ckpt_save_path = self.ckpt_manager.save()
            #print('Saving checkpoint for epoch {} at {}'.format(epoch + 1,
            #                                                    ckpt_save_path))
            print('-' * 89)
            
            print(f'| end of 1 epoch '
                  f'| time:{time_since(start)} '
                  f'| loss:{self.train_loss.result():.4f} '
                  f'|accuracy:{self.train_accuracy.result():.4f}')
            print('-'* 89)
            #print('Epoch {} Loss {:.4f} Accuracy {:.4f}'.format(epoch + 1,
            #                                                    self.train_loss.result(),
            #                                                    self.train_accuracy.result()))
        
            #print('Time taken for 1 epoch: {} secs\n'.format(time.time() - start))
    train_step_signature = [
        tf.TensorSpec(shape=(None, None), dtype=tf.int64),
        tf.TensorSpec(shape=(None, None), dtype=tf.int64),
    ]

    @tf.function(input_signature=train_step_signature)
    def train_step(self, inp, tar):
        # 该 @tf.function 将追踪-编译 train_step 到 TF 图中，以便更快地
        # 执行。该函数专用于参数张量的精确形状。为了避免由于可变序列长度或可变
        # 批次大小（最后一批次较小）导致的再追踪，使用 input_signature 指定
        # 更多的通用形状。

        tar_inp = tar[:, :-1]
        tar_real = tar[:, 1:]

        enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp, tar_inp)

        with tf.GradientTape() as tape:
            predictions, _ = self.call(inp, tar_inp,
                                       True,
                                       enc_padding_mask,
                                       combined_mask,
                                       dec_padding_mask)
            loss = loss_function(self.loss_object, tar_real, predictions)

        gradients = tape.gradient(loss, self.trainable_variables)
        self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))

        self.train_loss(loss)
        self.train_accuracy(tar_real, predictions)

    @tf.function(input_signature=train_step_signature)
    def test_step(self, inp, tar):
        # 该 @tf.function 将追踪-编译 train_step 到 TF 图中，以便更快地
        # 执行。该函数专用于参数张量的精确形状。为了避免由于可变序列长度或可变
        # 批次大小（最后一批次较小）导致的再追踪，使用 input_signature 指定
        # 更多的通用形状。

        tar_inp = tar[:, :-1]
        tar_real = tar[:, 1:]

        enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp, tar_inp)

        with tf.GradientTape() as tape:
            predictions, _ = self.call(inp, tar_inp,
                                       True,
                                       enc_padding_mask,
                                       combined_mask,
                                       dec_padding_mask)
            loss = loss_function(self.loss_object, tar_real, predictions)

        self.train_loss(loss)
        self.train_accuracy(tar_real, predictions)

    def eval(self, test_dataset, epochs=2):
        """
            模型的评估
        :param test_dataset: 测试集
        :return:
        """
        """定义模型训练方法"""
        total_loss = []
        total_accuracy = []
        for epoch in range(epochs):
            start = time.time()
            # 初始化损失值
            self.train_loss.reset_states()
            # 初始化准确率
            self.train_accuracy.reset_states()

            # inp -> portuguese, tar -> english
            for (batch, (inp, tar)) in enumerate(test_dataset):
                self.test_step(inp, tar)
            print('Epoch {} Loss {:.4f} Accuracy {:.4f}'.format(epoch + 1,
                                                                self.train_loss.result(),
                                                                self.train_accuracy.result()))
            total_loss.append(self.train_loss.result())
            total_accuracy.append(self.train_accuracy.result())
        
        print('=' * 89)
        print(f'| End of training | test loss {np.mean(total_loss):.4f} | test accuracy {np.mean(total_accuracy):.4f}')
        print('=' * 89)
        #print("Loss:{:.4f} accuracy:{:.4f}".format(np.mean(total_loss), np.mean(total_accuracy)))

    def predict(self, inp_sentence):
        """
            翻译文本
        :param input_sentence: 输入文本
        :return:
        """
        start_token = [self.input_tokenizer.vocab_size]
        end_token = [self.input_tokenizer.vocab_size + 1]

        # 增加开始和结束标记
        inp_sentence = start_token + self.input_tokenizer.encode(inp_sentence) + end_token
        encoder_input = tf.expand_dims(inp_sentence, 0)

        # 开始标记。
        decoder_input = [self.target_tokenizer.vocab_size]
        output = tf.expand_dims(decoder_input, 0)

        for i in range(config.MAX_LENGTH):
            enc_padding_mask, combined_mask, dec_padding_mask = create_masks(
                encoder_input, output)

            # predictions.shape == (batch_size, seq_len, vocab_size)
            predictions, attention_weights = self.call(encoder_input,
                                                       output,
                                                       False,
                                                       enc_padding_mask,
                                                       combined_mask,
                                                       dec_padding_mask)

            # 从 seq_len 维度选择最后一个词
            predictions = predictions[:, -1:, :]  # (batch_size, 1, vocab_size)

            predicted_id = tf.cast(tf.argmax(predictions, axis=-1), tf.int32)

            # 如果 predicted_id 等于结束标记，就返回结果
            if predicted_id == self.target_tokenizer.vocab_size + 1:
                return tf.squeeze(output, axis=0), attention_weights

            # 连接 predicted_id 与输出，作为解码器的输入传递到解码器。
            output = tf.concat([output, predicted_id], axis=-1)

        return tf.squeeze(output, axis=0), attention_weights

    def translate(self, sentence):
        """
            翻译文本
        :param sentence:输入文本
        :return:输入文本和预测文本
        """
        result, attention_weights = self.predict(sentence)
        predicted_sentence = self.target_tokenizer.decode([i for i in result
                                                           if i < self.target_tokenizer.vocab_size])

        return sentence, predicted_sentence

    def load_model(self):
        """如何检查点存在加载模型"""
        if self.ckpt_manager.latest_checkpoint:
            self.ckpt.restore(self.ckpt_manager.latest_checkpoint)
            print('Latest checkpoint restored!!')
