# 导入必要的库
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers

# Transformer 模型的超参数
num_layers = 6  # 编码器/解码器层数
d_model = 512  # 嵌入层维度
num_heads = 8  # 注意力头的数量
dff = 2048  # 前馈网络的维度
# 定义常数
MAX_LENGTH = 200

# 输入嵌入层
inputs = layers.Input(shape=(MAX_LENGTH,))
embedding = layers.Embedding(vocab_size, d_model)(inputs)

# 位置编码
# 由于 Transformer 没有循环，我们需要为模型提供一种顺序感
pos_encoding = keras.Sequential([
    layers.Dense(d_model),  # 将输入映射到维度为 d_model 的空间
    layers.LayerNormalization(epsilon=1e-6),  # 应用层归一化
    layers.Activation('relu')  # 应用 ReLU 激活函数
])


## 多头注意力
class MultiHeadSelfAttention(layers.Layer):
    def __init__(self, embed_size, num_heads):
        super(MultiHeadSelfAttention, self).__init__()
        self.embed_size = embed_size
        self.num_heads = num_heads
        self.head_dim = embed_size // num_heads

        assert (
                self.head_dim * num_heads == embed_size
        ), "Embedding size needs to be divisible by number of heads"

        self.wq = layers.Dense(embed_size)
        self.wk = layers.Dense(embed_size)
        self.wv = layers.Dense(embed_size)
        self.dense = layers.Dense(embed_size)

    def call(self, x, mask):
        # x.shape: (batch_size, seq_len, embed_size)
        batch_size = tf.shape(x)[0]

        query = self.wq(x)  # (batch_size, seq_len, embed_size)
        key = self.wk(x)  # (batch_size, seq_len, embed_size)
        value = self.wv(x)  # (batch_size, seq_len, embed_size)

        # Split the embedding into self.num_heads different pieces
        query = self.split_heads(query, batch_size)  # (batch_size, num_heads, seq_len, head_dim)
        key = self.split_heads(key, batch_size)  # (batch_size, num_heads, seq_len, head_dim)
        value = self.split_heads(value, batch_size)  # (batch_size, num_heads, seq_len, head_dim)

        attention = self.scaled_dot_product_attention(query, key, value, mask)
        attention = tf.transpose(attention, perm=[0, 2, 1, 3])  # (batch_size, seq_len, num_heads, head_dim)
        concat_attention = tf.reshape(attention, (batch_size, -1, self.embed_size))  # (batch_size, seq_len, embed_size)

        output = self.dense(concat_attention)  # (batch_size, seq_len, embed_size)
        return output

    def split_heads(self, x, batch_size):
        """Split the last dimension into (num_heads, head_dim)."""
        x = tf.reshape(x, (batch_size, -1, self.num_heads, self.head_dim))
        return tf.transpose(x, perm=[0, 2, 1, 3])

    def scaled_dot_product_attention(self, query, key, value, mask):
        """Calculate the attention weights."""
        matmul_qk = tf.matmul(query, key, transpose_b=True)  # (batch_size, num_heads, seq_len, seq_len)

        # Scale matmul_qk
        depth = tf.cast(tf.shape(key)[-1], tf.float32)
        logits = matmul_qk / tf.math.sqrt(depth)

        # Add the mask to zero out padding tokens
        if mask is not None:
            logits += (mask * -1e9)

        # Softmax is normalized on the last axis (seq_len_k)
        attention_weights = tf.nn.softmax(logits, axis=-1)  # (batch_size, num_heads, seq_len, seq_len)

        output = tf.matmul(attention_weights, value)  # (batch_size, num_heads, seq_len, head_dim)

        return output


encodings = pos_encoding(embedding)

# 编码器
for i in range(num_layers):
    # 多头自注意力
    attn_out = layers.MultiHeadAttention(
        num_heads=num_heads,
        key_dim=d_model // num_heads
    )(encodings, encodings, encodings)

    # 残差连接后跟随层归一化
    encodings = layers.Add()([encodings, attn_out])
    encodings = layers.LayerNormalization(epsilon=1e-6)(encodings)

    # 前馈网络
    ff = keras.Sequential([
        layers.Dense(dff, activation='relu'),
        layers.Dense(d_model)
    ])
    ff_out = ff(encodings)

    # 残差连接后跟随层归一化
    encodings = layers.Add()([encodings, ff_out])
    encodings = layers.LayerNormalization(epsilon=1e-6)(encodings)


# 解码器
class DecoderLayer(layers.Layer):
    def __init__(self, d_model, num_heads, dff, rate=0.1):
        super(DecoderLayer, self).__init__()

        self.mha1 = MultiHeadSelfAttention(d_model, num_heads)
        self.mha2 = MultiHeadSelfAttention(d_model, num_heads)

        self.ffn = keras.Sequential(
            [layers.Dense(dff, activation="relu"), layers.Dense(d_model)]
        )

        self.layernorm1 = layers.LayerNormalization(epsilon=1e-6)
        self.layernorm2 = layers.LayerNormalization(epsilon=1e-6)
        self.layernorm3 = layers.LayerNormalization(epsilon=1e-6)

        self.dropout1 = layers.Dropout(rate)
        self.dropout2 = layers.Dropout(rate)
        self.dropout3 = layers.Dropout(rate)

    def call(self, x, enc_output, training, look_ahead_mask, padding_mask):
        # enc_output.shape == (batch_size, input_seq_len, d_model)

        attn1, attn_weights_block1 = self.mha1(x, x, x, look_ahead_mask)  # (batch_size, target_seq_len, d_model)
        attn1 = self.dropout1(attn1, training=training)
        out1 = self.layernorm1(attn1 + x)

        attn2, attn_weights_block2 = self.mha2(
            enc_output, enc_output, out1, padding_mask
        )  # (batch_size, target_seq_len, d_model)
        attn2 = self.dropout2(attn2, training=training)
        out2 = self.layernorm2(attn2 + out1)  # (batch_size, target_seq_len, d_model)

        ffn_output = self.ffn(out2)  # (batch_size, target_seq_len, d_model)
        ffn_output = self.dropout3(ffn_output, training=training)
        out3 = self.layernorm3(ffn_output + out2)  # (batch_size, target_seq_len, d_model)

        return out3, attn_weights_block1, attn_weights_block2


## 完整的 transformer
class Transformer(keras.Model):
    def __init__(self, num_layers, d_model, num_heads, dff, input_vocab_size,
                 target_vocab_size, pe_input, pe_target, rate=0.1):
        super(Transformer, self).__init__()

        self.encoder = Encoder(num_layers, d_model, num_heads, dff,
                               input_vocab_size, pe_input, rate)

        self.decoder = Decoder(num_layers, d_model, num_heads, dff,
                               target_vocab_size, pe_target, rate)

        self.final_layer = layers.Dense(target_vocab_size)

    def call(self, inp, tar, training, enc_padding_mask,
             look_ahead_mask, dec_padding_mask):
        enc_output = self.encoder(inp, training, enc_padding_mask)  # (batch_size, inp_seq_len, d_model)

        # dec_output.shape == (batch_size, tar_seq_len, d_model)
        dec_output, attention_weights = self.decoder(
            tar, enc_output, training, look_ahead_mask, dec_padding_mask
        )

        final_output = self.final_layer(dec_output)  # (batch_size, tar_seq_len, target_vocab_size)

        return final_output, attention_weights