"""
    编码器及编码层
"""
import tensorflow as tf
from .multi_head_attention import MultiHeadAttention
from .common_function import point_wise_feed_forward_network, positional_encoding

import config


class EncoderLayer(tf.keras.layers.Layer):
    # 初始化函数
    def __init__(self):
        """
            编码层
            参数:
                1.d_model    --> 字向量维度(embedding_size)
                2.num_heads  --> 多头注意力头的个数
                3.dff        --> 全连接神经元数量
                4.rate       --> drop层几率
        """
        # 继承父类方法和属性
        super(EncoderLayer, self).__init__()
        self.d_model = config.d_model
        self.num_heads = config.num_heads
        self.dff = config.dff
        self.rate = config.dropout_rate
        # 实例化多头注意力
        self.mha = MultiHeadAttention(self.d_model, self.num_heads)
        # 两个全连接神经网络
        self.ffn = point_wise_feed_forward_network(self.d_model, self.dff)
        # 归一化层
        self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
        self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
        # 泛化层
        self.dropout1 = tf.keras.layers.Dropout(self.rate)
        self.dropout2 = tf.keras.layers.Dropout(self.rate)

    # 定义执行函数
    def call(self, x, training, mask):
        attn_output, _ = self.mha(x, x, x, mask)  # (batch_size, input_seq_len, d_model)
        attn_output = self.dropout1(attn_output, training=training)
        out1 = self.layernorm1(x + attn_output)  # (batch_size, input_seq_len, d_model)

        ffn_output = self.ffn(out1)  # (batch_size, input_seq_len, d_model)
        ffn_output = self.dropout2(ffn_output, training=training)
        out2 = self.layernorm2(out1 + ffn_output)  # (batch_size, input_seq_len, d_model)

        return out2


class Encoder(tf.keras.layers.Layer):
    # 初始化函数
    def __init__(self, input_vocab_size,
                 maximum_position_encoding):
        """
            参数
                1.num_layers --> 编码层数量
                2.d_model    -->字向量维度(embedding_size)
                3.num_heads  -->多头注意力头的个数
                4.dff        -->全连接神经元数量
                5.input_vocab_size -->输入字向量的大小(字典大小)
                6.maximun_position_encoding --> 输入位置编码信息
                7.rate       -->drop层几率
        """
        # 继承父类方法和属性
        super(Encoder, self).__init__()
        # 编码层隐藏层数量
        self.d_model = config.d_model
        # 编码层数量
        self.num_layers = config.num_layers
        self.rate = config.dropout_rate
        # embedding层
        self.embedding = tf.keras.layers.Embedding(input_vocab_size, self.d_model)
        # 添加位置编码
        self.pos_encoding = positional_encoding(maximum_position_encoding,
                                                self.d_model)
        # 定义编码层
        self.enc_layers = [EncoderLayer()
                           for _ in range(self.num_layers)]
        # 泛化层(随机丢弃rate%个神经元)
        self.dropout = tf.keras.layers.Dropout(self.rate)

    # 定义执行函数
    def call(self, x, training, mask):
        # 获取序列长度
        seq_len = tf.shape(x)[1]

        # 将嵌入和位置编码相加。
        x = self.embedding(x)  # (batch_size, input_seq_len, d_model)
        x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
        # 将位置信息添加到向量中(嵌入位置信息)
        x += self.pos_encoding[:, :seq_len, :]

        x = self.dropout(x, training=training)
        # 生成num_layers个编码层
        for i in range(self.num_layers):
            x = self.enc_layers[i](x, training, mask)

        return x  # (batch_size, input_seq_len, d_model)


if __name__ == "__main__":
    pass