from dd01_input import *
from dd04_generator import *


class EncoderDecoder(nn.Module):
    def __init__(self, encoder, decoder, src_embed_position, target_embed_position, generator):
        super(EncoderDecoder, self).__init__()
        self.encoder = encoder  # 编码器
        self.decoder = decoder  # 解码器
        self.src_embed_position = src_embed_position  # 编码器输入的embedding+position对象
        self.target_embed_position = target_embed_position  # 解码器输入的embedding+position对象
        self.generator = generator  # 输出部分

    def forward(self, source, target, source_mask, target_mask):
        # 1、源词嵌入加位置编码
        encoder_embed_x = self.src_embed_position(source)

        # 2、处理后的x传入编码器
        encoder_output = self.encoder(encoder_embed_x, source_mask)

        # 3、目标词嵌入加位置编码
        encoder_embed_y = self.target_embed_position(target)

        # 4、处理后的y传入解码器
        decoder_output = self.decoder(encoder_embed_y, encoder_output, source_mask, target_mask)

        # 5、解码器的输出结果传入输出层
        return self.generator(decoder_output)


def make_model(source_vocab, target_vocab, N=6, d_model=512, d_ff=1024, head=8, dropout_p=0.1):
    """
    生成Transformer模型
    :param source_vocab: 源语言字典大小
    :param target_vocab: 目标语言字典大小
    :param N: 编码器解码器的层数，默认6
    :param d_model: 词嵌入维度，默认512
    :param d_ff: 前馈神经网络维度，默认1024
    :param head: 多头注意力机制的head数，默认8
    :param dropout_p: 随机失活概率，默认0.1
    """
    c = copy.deepcopy

    # 实例化编码器词嵌入层对象
    encoder_embedding = Embeddings(source_vocab, d_model)

    # 实例化位置编码器对象
    positional = PositionalEncoding(d_model, dropout_p, max_len=2000)

    # 实例化多头注意力机制对象
    attention = MultiHeadAttention(embedding_dim=d_model, head=head, dropout=dropout_p)

    # 实例化前馈全连接层对象
    feedforward = FeedForward(d_model=d_model, d_ff=d_ff, dropout_p=dropout_p)

    # 实例化解码器词嵌入层对象
    decoder_embedding = Embeddings(target_vocab, d_model)

    # 实例化输出层对象
    generator = Generator(d_model=d_model, vocab_size=target_vocab)

    model = EncoderDecoder(encoder=Encoder(EncoderLayer(d_model, c(attention), feedforward, dropout_p), N=N),

                           decoder=Decoder(layer=DecoderLayer(size=d_model, self_attention=c(attention),
                                                              src_attention=c(attention), feed_forward=feedforward,
                                                              dropout_p=dropout_p), N=N),

                           src_embed_position=nn.Sequential(encoder_embedding, c(positional)),

                           target_embed_position=nn.Sequential(decoder_embedding, c(positional)),

                           generator=generator)

    # 初始化参数
    for p in model.parameters():
        if p.dim() > 1:
            nn.init.xavier_uniform_(p)

    return model


if __name__ == '__main__':
    model = make_model(source_vocab=1000, target_vocab=1000)
    print(model)
    model = nn.Transformer()
    print(model)
    source = torch.tensor([[1, 2, 3, 4], [2, 4, 6, 8]])
    target = torch.tensor([[5, 3, 3, 4], [2, 40, 6, 80]])
    source_mask = target_mask = torch.zeros(8, 4, 4)
    result = model(source, target, source_mask, target_mask)
    print(f'transformer的输出结果：{result}')
    print(f'transformer的输出结果.shape：{result.shape}')  # [2,4,1000]
