from Transformer_output import *

# 定义EncoderDecoder类
class EncoderDecoder(nn.Module):
    def __init__(self,encoder,decoder,soure_embed,target_embed,generator):
        super().__init__()
        # encoder：编码器对象
        self.encoder = encoder
        # decoder:解码器的对象
        self.decoder = decoder
        # source_embed：源语言输入部分的对象：wordEmbedding+PositionEncoding
        self.soure_embed = soure_embed
        # target_embed：目标语言输入部分的对象：wordEmbedding+PositionEncoding
        self.target_embed = target_embed
        # generator:输出层对象
        self.generator = generator

    def forward(self,source,target,source_mask1,source_mask2,target_mask):
        # source:源语言的输入，形状--》[batch_size, seq_len]-->[2, 4]
        # target:目标语言的输入，形状--》[batch_size, seq_len]-->[2, 6]
        # source_mask1：padding mask:作用在编码器端多头自注意力机制-->[head, source_seq_len, source_seq_len]-->[8, 4, 4]
        # source_mask2：padding mask:作用在解码器端多头注意力机制-->[head, target_seq_len, source_seq_len]-->[8, 6, 4]
        # target_mask：sentence mask:作用在解码器端多头自注意力机制-->[head, target_seq_len, target_seq_len]-->[8, 6, 6]


        # 1.将原始的source源语言的输入，形状--》[batch_size, seq_len]-->[2, 4]送入编码器输入部分变成--》[2,4,512]
        # encode_word_embed:wordEmbedding+PositionEncoding
        encode_word_embed = self.soure_embed(source)
        # 2. encode_word_embed以及source_mask1送入编码器得到编码之后的结果:encoder_output-->[2, 4, 512]
        encoder_output = self.encoder(encode_word_embed,source_mask1)
        # 3. target:目标语言的输入，形状--》[batch_size, seq_len]-->[2, 6] 送入解码器输入部分变成--》[2,6,512]
        decoder_word_embed = self.target_embed(target)
        # 4. 将decode_word_embed，encoder_output,source_mask2，target_mask送入解码器
        decode_output = self.decoder(decoder_word_embed,encoder_output,target_mask,source_mask2)
        # 5.将decoder_output送入输出层
        output = self.generator(decode_output)

        return output

def test_complete():
    # 实例化模块
    attention = clones(MutiHeadAttention(heads,embedding_dim,dropout),3)
    feedforward = clones(PositionwiseFeedForward(embedding_dim,d_ff),2)

    # 实例化编码器对象
    encoder_layer = EncoderLayer(embedding_dim,attention[0],feedforward[0],dropout)
    encoder = Encoder(encoder_layer,6)

    # 实例化解码器对象
    decoder_layer = DecoderLayer(embedding_dim,attention[2],attention[1],feedforward[1],dropout)
    decoder = Decoder(decoder_layer,6)

    # 创建源语言输入对象
    encoder_word = Embedding(vocabulary_size,embedding_dim)
    encoder_pe = PositionalEncoding(embedding_dim,max_len,dropout)
    source_embed = nn.Sequential(encoder_word,encoder_pe)

    # 创建目标语言输入对象
    decoder_word = Embedding(vocabulary_size,embedding_dim)
    decoder_pe = PositionalEncoding(embedding_dim,max_len,dropout)
    target_embed = nn.Sequential(decoder_word,decoder_pe)

    # 实例化输出对象
    generator = Generator(embedding_dim,vocabulary_size)

    # 实例化transformer
    transformer = EncoderDecoder(encoder,decoder,source_embed,target_embed,generator)

    # 准备数据
    sorce = torch.tensor([[1,2,3,4],[7,8,9,10]])
    target = torch.tensor([[10,20,30,40,50,60],[70,80,90,100,110,120]])
    sorce_mask1 = torch.zeros(heads,sorce.shape[-1],sorce.shape[-1])
    sorce_mask2 = torch.zeros(heads,target.shape[-1],sorce.shape[-1])
    target_mask = torch.zeros(heads,target.shape[-1],target.shape[-1])


    # 获取输出
    result = transformer(sorce,target,sorce_mask1,sorce_mask2,target_mask)
    print(f'最终输出结果{result}')
    print(f'最终输出形状{result.shape}')

    sorce_input = untest_input(sorce)
    target_input = untest_input(target)
    print(sorce_input.shape)
    print(target_input.shape)
    nn_transformer = nn.Transformer(batch_first=True)
    nn_result = nn_transformer(sorce_input,target_input)
    print(f'nn_transformer最终输出结果{nn_result}')
    print(f'nn_transformer最终输出形状{nn_result.shape}')

if __name__ == '__main__':
    test_complete()








