from abc import ABC
import tensorflow as tf
import time
import numpy as np
import pandas as pd
import re
from sklearn.model_selection import train_test_split
from tensorflow.python.keras.layers import einsum_dense
from tensorflow.python.ops import special_math_ops

print(tf.__version__)


# process function
def process_en(s):
    s = s.lower()
    s = re.sub(r"([?.!])", r" \1 ", s)
    s = re.sub(r'[" "]+', " ", s)
    s = re.sub(r"[^a-zA-Z?.!,¿]+", " ", s)
    s = '<start> ' + s + ' <end>'
    return s


def process_cn(s):
    s = re.sub(r"([？。，！])", r" \1 ", s)
    # 将每个中文后面加上空格，便于分字
    s = re.sub(r"([\u2E80-\u9FFF？。，！])", r"\1 ", s)
    s = re.sub(r'[" "]+', " ", s)
    s = re.sub(r"[^\u2E80-\u9FFF？。，！]+", " ", s)
    s = '<start> ' + s + ' <end>'
    return s


def process_dataset(text_path):
    df_texts = pd.read_table(text_path, header=None, sep=r"\t", engine='python')[[0, 1]]
    df_texts[0] = df_texts[0].apply(process_en)
    df_texts[1] = df_texts[1].apply(process_cn)
    return df_texts


def tokenize(lang):
    lang_tokenizer = tf.keras.preprocessing.text.Tokenizer(filters='')
    lang_tokenizer.fit_on_texts(lang)
    # fit on texts: 1. loop texts 2. count words freq 3. match word to index
    tensor = lang_tokenizer.texts_to_sequences(lang)
    # texts_to_sequences: 1.Call seq2text generator
    # 2. for each sequence: for each word: get word index
    # 3. store in a list
    tensor = tf.keras.preprocessing.sequence.pad_sequences(tensor, padding='post')
    return tensor, lang_tokenizer


def load_dataset(text_path, num_examples=None):
    df_texts = process_dataset(text_path)
    input_tensor, inp_lang_tokenizer = tokenize(df_texts[0])
    target_tensor, targ_lang_tokenizer = tokenize(df_texts[1])
    return input_tensor, target_tensor, inp_lang_tokenizer, targ_lang_tokenizer


def init_dataset_setting(BUFFER_SIZE, BATCH_SIZE):
    def dataset_generator(inp_lang, targ_lang, input_tensor_train, target_tensor_train):
        vocab_inp_size = len(inp_lang.word_index) + 1
        vocab_tar_size = len(targ_lang.word_index) + 1
        dataset = tf.data.Dataset.from_tensor_slices((input_tensor_train, target_tensor_train)).shuffle(BUFFER_SIZE)
        dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)
        return dataset

    return dataset_generator


def data_pipeline(text_path, num_examples=None, test_size=0.2):
    global BUFFER_SIZE, BATCH_SIZE
    en_tensor, cn_tensor, en_lang, cn_lang = load_dataset(text_path, num_examples)
    en_tensor = en_tensor.astype('int64')
    cn_tensor = cn_tensor.astype('int64')
    x_train, x_val, y_train, y_val = train_test_split(en_tensor, cn_tensor, test_size=test_size)
    dataset_generator = init_dataset_setting(BUFFER_SIZE, BATCH_SIZE)
    train_dataset = dataset_generator(en_lang, cn_lang, x_train, y_train)
    test_dataset = dataset_generator(en_lang, cn_lang, x_val, y_val)
    return train_dataset, test_dataset, en_lang, cn_lang


def positional_encoding(pos, d_model):
    i = np.arange(d_model)[np.newaxis, :]
    angle = np.arange(pos)[:, np.newaxis] / np.power(10000, 2 * (i // 2) / d_model)
    angle[:, ::2] = np.sin(angle[:, ::2])
    angle[:, 1::2] = np.cos(angle[:, ::2])
    pos_encoding = angle[np.newaxis, ...]
    return tf.cast(pos_encoding, dtype=tf.float32)


def create_padding_mask(seq):
    seq = tf.cast(tf.math.equal(seq, 0), tf.float32)
    return seq[:, np.newaxis, np.newaxis, :] * -1e9  # (batch_size,1,1,seq_len)


def create_look_ahead_mask(len_seq):
    mark = 1 - tf.linalg.band_part(tf.ones((len_seq, len_seq)), -1, 0)
    return mark * -1e9


def scaled_dot_product_attention(q, k, v, mask=None):
    equation = 'aecd,abcd->acbe'  # c 为head, b，e分别为 q,k的seq长度
    MatMul_1 = special_math_ops.einsum(equation, k, q)
    # MatMul_1 size: (batch_size, num head, seq_q, seq_k)
    d_k = tf.dtypes.cast(k.shape[-1], tf.float32)
    scaled_MatMul_1 = MatMul_1 / tf.math.sqrt(d_k)
    if mask is not None:
        #         print("*** adding mask ****"*10)
        scaled_MatMul_1 += mask
    W = tf.nn.softmax(scaled_MatMul_1, axis=-1)
    # shape L_q,L_k
    equation2 = 'acbe,aecd->abcd'
    MatMul_2 = special_math_ops.einsum(equation2, W, v)
    return MatMul_2, W


def get_output_shape(output_rank, known_last_dims):
    return [None] * (output_rank - len(known_last_dims)) + list(known_last_dims)


class MultiHeadAttention(tf.keras.layers.Layer):
    def __init__(self, d_model, num_head):
        """
        input shape: value & k: (batch_size, len_k, d_model)
        q: (batch_size, len_q, d_model)
        :param d_model: embedding dimension
        :param num_head: number of heads
        output shape:
        output: (batch_size, len_q, d_model)
            usually input_seq_length is same with len_k
        attention weight: (batch_size, number of head, len_q, len_k )
        """
        super(MultiHeadAttention, self).__init__()
        self.h = num_head
        self.d_model = d_model
        assert d_model % num_head == 0
        self.depth = d_model // num_head
        self.W_q = einsum_dense.EinsumDense(
            'abc,cde->abde',
            output_shape=get_output_shape(3, [self.h, self.depth]))
        self.W_k = einsum_dense.EinsumDense(
            'abc,cde->abde',
            output_shape=get_output_shape(3, [self.h, self.depth]))
        self.W_v = einsum_dense.EinsumDense(
            'abc,cde->abde',
            output_shape=get_output_shape(3, [self.h, self.depth]))

        self.dense = tf.keras.layers.Dense(d_model)

    def __call__(self, v, k, q, mask):
        """
        :param v: size: (batch_size, L_k ,h, d_model)
        :param k: size: (batch_size, L_k ,h, d_model)
        :param q: size: (batch_size, L_q ,h, d_model)
        :param mask: size: (batch_size, h, L_q, L_k)
        :return: Multi-head Attention final output and attention weight
        """
        batch_size = tf.shape(q)[0]

        q = self.W_q(q)
        k = self.W_k(k)
        v = self.W_v(v)

        scaled_attention, attention_weights = scaled_dot_product_attention(
            q, k, v, mask)
        # Attention_weights size: (batch_size, h, L_q, L_k)
        scaled_attention = tf.transpose(scaled_attention, [0, 2, 1, 3])
        # (batch_size, seq_len_v, num_heads, depth)
        concat_attention = tf.reshape(scaled_attention,
                                      (batch_size, -1, self.d_model))

        output = self.dense(concat_attention)
        return output, attention_weights


def feed_forward(d_model, dff):
    model = tf.keras.Sequential([
        tf.keras.layers.Dense(dff, activation='relu'),
        tf.keras.layers.Dense(d_model)
    ])
    return model


class EncoderLayer(tf.keras.layers.Layer):
    """
    This implementation is same with
    https://www.tensorflow.org/tutorials/text/transformer?hl=en

    """

    def __init__(self, d_model=512, num_heads=8, dff=2048, rate=0.1):
        super(EncoderLayer, self).__init__()

        self.mha = MultiHeadAttention(d_model, num_heads)
        self.ffn = feed_forward(d_model, dff)

        self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
        self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)

        self.dropout1 = tf.keras.layers.Dropout(rate)
        self.dropout2 = tf.keras.layers.Dropout(rate)

    def call(self, x, training, mask):
        # x: (batch_size, input_seq_len, d_model)
        attn_output, _ = self.mha(x, x, x, mask)
        # (batch_size, input_seq_len, d_model)

        attn_output = self.dropout1(attn_output, training=training)
        out1 = self.layernorm1(x + attn_output)
        # (batch_size, input_seq_len, d_model)

        ffn_output = self.ffn(out1)
        # (batch_size, input_seq_len, d_model)

        ffn_output = self.dropout2(ffn_output, training=training)
        out2 = self.layernorm2(out1 + ffn_output)
        # (batch_size, input_seq_len, d_model)
        return out2


class DecoderLayer(tf.keras.layers.Layer):
    """
    This implementation is almost the same with
    https://www.tensorflow.org/tutorials/text/transformer?hl=en
    output: (batch_size, output_len, d_model)
    """

    def __init__(self, d_model=512, num_heads=8, dff=2048, rate=0.1):
        super(DecoderLayer, self).__init__()

        self.mha1 = MultiHeadAttention(d_model, num_heads)
        self.mha2 = MultiHeadAttention(d_model, num_heads)

        self.ffn = feed_forward(d_model, dff)

        self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
        self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
        self.layernorm3 = tf.keras.layers.LayerNormalization(epsilon=1e-6)

        self.dropout1 = tf.keras.layers.Dropout(rate)
        self.dropout2 = tf.keras.layers.Dropout(rate)
        self.dropout3 = tf.keras.layers.Dropout(rate)

    def call(self, x, enc_output, training, look_ahead_mask, padding_mask):
        # enc_output: (batch_size, input_len, d_model)

        attn1, attn_weights_block1 = self.mha1(x, x, x, look_ahead_mask)
        # (batch_size, len_output, d_model)
        attn1 = self.dropout1(attn1, training=training)
        out1 = self.layernorm1(attn1 + x)

        attn2, attn_weights_block2 = self.mha2(
            enc_output, enc_output, out1, padding_mask)
        # (batch_size, output_len, d_model)

        attn2 = self.dropout2(attn2, training=training)
        out2 = self.layernorm2(attn2 + out1)
        # (batch_size, output_len, d_model)

        ffn_output = self.ffn(out2)
        # (batch_size, output_len, d_model)
        ffn_output = self.dropout3(ffn_output, training=training)
        out3 = self.layernorm3(ffn_output + out2)
        # (batch_size, output_len, d_model)

        return out3, attn_weights_block1, attn_weights_block2


class Encoder(tf.keras.layers.Layer):
    def __init__(self, num_layers, d_model, num_heads, dff, input_vocab_size,
                 maximum_position_encoding, rate=0.1):
        super(encoder, self).__init__()

        self.d_model = d_model
        self.num_layers = num_layers

        self.embedding = tf.keras.layers.Embedding(input_vocab_size, self.d_model)
        self.pos_encoding = positional_encoding(maximum_position_encoding,
                                                self.d_model)

        self.enc_layers = [EncoderLayer(self.d_model, num_heads, dff, rate)
                           for _ in range(num_layers)]

        self.dropout = tf.keras.layers.Dropout(rate)

    def call(self, x, training, mask):
        seq_len = tf.shape(x)[1]
        # x: (batch_size, len_input)
        x = self.embedding(x)
        # x: (batch_size, len_input, d_model)

        x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
        x += self.pos_encoding[:, :seq_len, :]
        x = self.dropout(x, training=training)
        # x: (batch_size, len_input, d_model)

        for i in range(self.num_layers):
            x = self.enc_layers[i](x, training, mask)
            # (batch_size, len_input, d_model)
        return x  # (batch_size, len_input, d_model)


class Decoder(tf.keras.layers.Layer):
    def __init__(self, num_layers, d_model, num_heads, dff, target_vocab_size,
                 maximum_position_encoding, rate=0.1):
        super(decoder, self).__init__()

        self.d_model = d_model
        self.num_layers = num_layers

        self.embedding = tf.keras.layers.Embedding(target_vocab_size, d_model)
        self.pos_encoding = positional_encoding(maximum_position_encoding, d_model)

        self.dec_layers = [DecoderLayer(d_model, num_heads, dff, rate)
                           for _ in range(num_layers)]
        self.dropout = tf.keras.layers.Dropout(rate)

    def call(self, x, enc_output, training,
             look_ahead_mask, padding_mask):
        seq_len = tf.shape(x)[1]
        attention_weights = {}

        x = self.embedding(x)  # (batch_size, target_seq_len, d_model)
        x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
        x += self.pos_encoding[:, :seq_len, :]

        x = self.dropout(x, training=training)

        for i in range(self.num_layers):
            x, block1, block2 = self.dec_layers[i](x, enc_output, training,
                                                   look_ahead_mask, padding_mask)

            attention_weights[f'decoder_layer{i + 1}_block1'] = block1
            attention_weights[f'decoder_layer{i + 1}_block2'] = block2

        # x.shape == (batch_size, target_seq_len, d_model)
        return x, attention_weights


class Transformer(tf.keras.Model, ABC):
    def __init__(self, num_layers, d_model, num_heads, dff, input_vocab_size,
                 target_vocab_size, maximum_input_pos_encoding, maximum_output_pos_encoding, rate=0.1):
        """
        :param num_layers: number of layers in encoder and decoder
        :param dff: inner layer size of FFN
        """
        super(Transformer, self).__init__()

        self.encoder = encoder(num_layers, d_model, num_heads, dff,
                               input_vocab_size, maximum_input_pos_encoding, rate)
        # (batch_size, length of input sequence, d_model)

        self.decoder = decoder(num_layers, d_model, num_heads, dff,
                               target_vocab_size, maximum_output_pos_encoding, rate)
        # (bath_size, length of input sequence, d_model)
        self.final_layer = tf.keras.layers.Dense(target_vocab_size, activation='softmax')
        # (bath_size, length of output sequence, size of word index)

    def __call__(self, x, y, training, enc_padding_mask,
                 look_ahead_mask, dec_padding_mask):
        # x: (batch_size, len_input)
        enc_output = self.encoder(x, training, enc_padding_mask)
        # enc_output: (batch_size, len_input, d_model)

        dec_output, attention_weights = self.decoder(
            y, enc_output, training, look_ahead_mask, dec_padding_mask)
        # dec_output: (batch_size, tar_seq_len, d_model)

        final_output = self.final_layer(dec_output)
        # (batch_size, tar_seq_len, target_vocab_size)

        return final_output, attention_weights


# Optimizer
class CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
    def __init__(self, d_model, warmup_steps=4000):
        super(CustomSchedule, self).__init__()
        self.d_model = tf.cast(d_model, tf.float32)
        self.warmup_steps = warmup_steps

    def __call__(self, step):
        arg1 = step ** -0.5
        arg2 = step * (self.warmup_steps ** -1.5)

        return self.d_model ** -0.5 * tf.math.minimum(arg1, arg2)


def loss_function(real, pred):
    mask = tf.math.logical_not(tf.math.equal(real, 0))
    loss_ = loss_object(real, pred)

    mask = tf.cast(mask, dtype=loss_.dtype)
    loss_ *= mask

    return tf.reduce_sum(loss_) / tf.reduce_sum(mask)


def accuracy_function(real, pred):
    accuracies = tf.equal(real, tf.argmax(pred, axis=2))

    mask = tf.math.logical_not(tf.math.equal(real, 0))
    accuracies = tf.math.logical_and(mask, accuracies)

    accuracies = tf.cast(accuracies, dtype=tf.float32)
    mask = tf.cast(mask, dtype=tf.float32)
    return tf.reduce_sum(accuracies) / tf.reduce_sum(mask)


def create_masks(inp, tar):
    # Encoder padding mask
    enc_padding_mask = create_padding_mask(inp)

    # Used in the 2nd attention block in the decoder.
    # This padding mask is used to mask the encoder outputs.
    dec_padding_mask = create_padding_mask(inp)

    # Used in the 1st attention block in the decoder.
    # It is used to pad and mask future tokens in the input received by
    # the decoder.
    look_ahead_mask = create_look_ahead_mask(tf.shape(tar)[1])
    dec_target_padding_mask = create_padding_mask(tar)
    combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask)

    return enc_padding_mask, combined_mask, dec_padding_mask


train_step_signature = [
    tf.TensorSpec(shape=(None, None), dtype=tf.int64),
    tf.TensorSpec(shape=(None, None), dtype=tf.int64),
]


@tf.function(input_signature=train_step_signature)
def train_step(inp, tar):
    tar_inp = tar[:, :-1]
    tar_real = tar[:, 1:]

    enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp, tar_inp)

    with tf.GradientTape() as tape:
        predictions, _ = transformer(inp, tar_inp,
                                     True,
                                     enc_padding_mask,
                                     combined_mask,
                                     dec_padding_mask)
        loss = loss_function(tar_real, predictions)

    gradients = tape.gradient(loss, transformer.trainable_variables)
    optimizer.apply_gradients(zip(gradients, transformer.trainable_variables))

    train_loss(loss)
    train_accuracy(accuracy_function(tar_real, predictions))


def evaluate(sentence, en_lang, cn_lang, input_length=37, max_length=40):
    # inp sentence is portuguese, hence adding the start and end token
    sentence = process_en(sentence)
    sentence = en_lang.texts_to_sequences([sentence])
    sentence = tf.keras.preprocessing.sequence.pad_sequences(sentence, padding='post', maxlen=37)
    #     vocab_tar_size = len(en_lang.word_index)+1
    sentence = tf.cast(sentence, tf.int32)
    #     sentence = tf.data.Dataset.from_tensor_slices(sentence).shuffle(BUFFER_SIZE)
    #     sentence = sentence.batch(1, drop_remainder=True)

    encoder_input = sentence

    # as the target is english, the first word to the NMT should be the
    # english start token.
    start, end = en_lang.texts_to_sequences([process_en(' ')])[0]
    output = tf.convert_to_tensor([start])
    output = tf.cast(output, tf.int32)
    output = tf.expand_dims(output, 0)
    print(output)
    print(encoder_input)
    for i in range(max_length):
        enc_padding_mask, combined_mask, dec_padding_mask = create_masks(
            encoder_input, output)

        # predictions.shape == (batch_size, seq_len, vocab_size)
        predictions, attention_weights = transformer(encoder_input,
                                                     output,
                                                     False,
                                                     enc_padding_mask,
                                                     combined_mask,
                                                     dec_padding_mask,
                                                     )

        # select the last word from the seq_len dimension
        predictions = predictions[:, -1:, :]  # (batch_size, 1, vocab_size)

        predicted_id = tf.argmax(predictions, axis=-1)

        # concatentate the predicted_id to the output which is given to the decoder
        # as its input.
        output = tf.concat([output, predicted_id], axis=-1)

        # return the result if the predicted_id is equal to the end token
        if predicted_id == end:
            break

    # output.shape (1, tokens)
    print(output)
    text = cn_lang.sequences_to_texts(output.numpy())
    # text = tokenizers.en.detokenize(output)[0]  # shape: ()

    tokens = 0

    return text, tokens, attention_weights


if __name__ == '__main__':
    print(tf.config.experimental.list_physical_devices('GPU'))
    text_path = 'data/cmn.txt'
    BUFFER_SIZE = 300000
    BATCH_SIZE = 64
    test_size = 0.1
    num_layers = 2
    d_model = 128
    dff = 512
    num_heads = 8
    dropout_rate = 0.1
    EPOCHS = 2

    train_dataset, test_dataset, en_lang, cn_lang = data_pipeline(text_path, test_size=test_size)

    learning_rate = CustomSchedule(d_model)
    optimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98,
                                         epsilon=1e-9)
    temp_learning_rate_schedule = CustomSchedule(d_model)
    loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
        from_logits=True, reduction='none')
    train_loss = tf.keras.metrics.Mean(name='train_loss')
    train_accuracy = tf.keras.metrics.Mean(name='train_accuracy')
    transformer = Transformer(
        num_layers=num_layers,
        d_model=d_model,
        num_heads=num_heads,
        dff=dff,
        input_vocab_size=len(en_lang.word_index) + 2,
        target_vocab_size=len(cn_lang.word_index) + 2,
        maximum_input_pos_encoding=2000,
        maximum_output_pos_encoding=2000,
        rate=dropout_rate)
    checkpoint_path = "./checkpoints/train"

    ckpt = tf.train.Checkpoint(transformer=transformer,
                               optimizer=optimizer)

    ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5)

    # if a checkpoint exists, restore the latest checkpoint.
    if ckpt_manager.latest_checkpoint:
        ckpt.restore(ckpt_manager.latest_checkpoint)
        print('Latest checkpoint restored!!')

    for epoch in range(EPOCHS):
        start = time.time()

        train_loss.reset_states()
        train_accuracy.reset_states()

        # inp -> portuguese, tar -> english
        for (batch, (inp, tar)) in enumerate(train_dataset):
            train_step(inp, tar)

            if batch % 50 == 0:
                print(
                    f'Epoch {epoch + 1} Batch {batch} Loss {train_loss.result():.4f} Accuracy {train_accuracy.result():.4f}')

        if (epoch + 1) % 5 == 0:
            ckpt_save_path = ckpt_manager.save()
            print(f'Saving checkpoint for epoch {epoch + 1} at {ckpt_save_path}')

        print(f'Epoch {epoch + 1} Loss {train_loss.result():.4f} Accuracy {train_accuracy.result():.4f}')

        print(f'Time taken for 1 epoch: {time.time() - start:.2f} secs\n')
