from dd02_encoder import *
import torch.nn as nn


def create_position_y():
    """
    解码器的输入
    :return: 返回经过词嵌入层和位置编码的y
    """
    x = torch.tensor([[100, 2, 421, 508], [491, 998, 1, 221]])  # [2,4]

    my_embeddings = Embeddings(vocab_size=1000, d_model=512)

    y = my_embeddings(x)  # [2,4,512]

    positional_encoding = PositionalEncoding(d_model=512, max_len=60, dropout=0.1)

    position_y = positional_encoding(y)

    return position_y


# 解码器层
class DecoderLayer(nn.Module):
    def __init__(self, size, self_attention, src_attention, feed_forward, dropout_p=0.1):
        super(DecoderLayer, self).__init__()
        self.size = size
        self.self_attention = self_attention
        self.src_attention = src_attention
        self.feed_forward = feed_forward

        # 克隆3个解码器子层对象
        self.sublayer = clones(SublayerConnection(size, dropout_p), 3)

        self.norm = LayerNorm(features=size)

    def forward(self, y, memory, source_mask, target_mask):
        # 填充带掩码的多头自注意机制层
        y1 = self.sublayer[0](y, lambda y: self.self_attention(y, y, y, target_mask))

        # 填充带掩码的多头一般注意机制层
        y2 = self.sublayer[1](y1, lambda y: self.src_attention(y, memory, memory, source_mask))

        # 填充前馈全连接层
        y3 = self.sublayer[2](y2, self.feed_forward)

        return y3


def dm01_decoder_layer():
    """
    测试解码器层
    """
    # 1、创建position_y，词嵌入加位置编码，[2,4,512]
    position_y = create_position_y()

    # 2、实例化多头注意力机制层
    my_attention = MultiHeadAttention(embedding_dim=512, head=8, dropout=0.1)

    # 3、复制实例化多头自注意力机制层和多头一般注意力机制层
    self_attention = copy.deepcopy(my_attention)
    src_attention = copy.deepcopy(my_attention)

    # 准备掩码
    mask = torch.zeros(8, 4, 4)
    source_mask = target_mask = mask

    # 准备编码器的输出结果
    encode_result = dm10_encoder()

    # 4、实例化前馈神经网络层
    my_feedforward = FeedForward(d_model=512, d_ff=1024, dropout_p=0.1)

    # 5、实例化解码器层对象、调用编码器层方法
    my_encoder_layer = DecoderLayer(size=512, self_attention=self_attention,
                                    src_attention=src_attention, feed_forward=my_feedforward, dropout_p=0.1)

    result = my_encoder_layer(y=position_y, memory=encode_result, source_mask=source_mask, target_mask=target_mask)

    print(f'解码器层的输出结果:\n{result}')
    print(f'解码器层的输出结果shape:{result.shape}')


# 解码器
class Decoder(nn.Module):
    def __init__(self, layer, N):
        super(Decoder, self).__init__()
        self.layers = clones(layer, N)
        self.norm = LayerNorm(features=layer.size)

    def forward(self, y, memory, source_mask, target_mask):
        for layer in self.layers:
            y = layer(y, memory, source_mask, target_mask)
        return self.norm(y)


def dm02_decoder():
    """
    测试解码器
    """
    # 1、创建position_y，词嵌入加位置编码，[2,4,512]
    position_y = create_position_y()

    # 2、实例化多头注意力机制层
    my_attention = MultiHeadAttention(embedding_dim=512, head=8, dropout=0.1)

    # 3、复制实例化多头自注意力机制层和多头一般注意力机制层
    self_attention = copy.deepcopy(my_attention)
    src_attention = copy.deepcopy(my_attention)

    # 准备掩码
    mask = torch.zeros(8, 4, 4)
    source_mask = target_mask = mask

    # 准备编码器的输出结果
    encode_result = dm10_encoder()

    # 4、实例化前馈神经网络层
    my_feedforward = FeedForward(d_model=512, d_ff=1024, dropout_p=0.1)

    # 5、实例化解码器层对象
    my_encoder_layer = DecoderLayer(size=512, self_attention=self_attention,
                                    src_attention=src_attention, feed_forward=my_feedforward, dropout_p=0.1)

    # 6、实例化解码器对象、调用解码器方法
    my_decoder = Decoder(layer=my_encoder_layer, N=6)
    result = my_decoder(y=position_y, memory=encode_result, source_mask=source_mask, target_mask=target_mask)

    print(f'解码器的输出结果:\n{result}')
    print(f'解码器的输出结果shape:{result.shape}')
    return result


if __name__ == '__main__':
    # dm01_decoder_layer()
    dm02_decoder()
