import torch
import torch.nn as nn
from torch.autograd import Variable
from learn_decoder import de
from learn_encoder import en
from learn_generator import gen


# 使用EncoderDecoder类来实现编码器-解码器结构
class EncoderDecoder(nn.Module):
    def __init__(self, encoder, decoder, source_embed, target_embed, generator):
        """ 初始化函数中有5个参数, 分别是
            encoder:代表编码器对象
            decoder:代表解码器对象
            source_embed:代表源数据的嵌入函数
            target_embed:代表目标数据的嵌入函数
            generator:代表输出部分类别生成器对象 """
        super(EncoderDecoder, self).__init__()
        # 将参数传入到类中
        self.encoder = encoder
        self.decoder = decoder
        self.src_embed = source_embed
        self.tgt_embed = target_embed
        self.generator = generator

    def forward(self, source, target, source_mask, target_mask):
        """在forward函数中，有四个参数, source代表源数据, target代表目标数据, source_mask和target_mask代表对应的掩码张量"""
        # 在函数中, 将source, source_mask传入编码函数, 得到结果后,
        # 与source_mask，target，和target_mask一同传给解码函数.
        return self.decode(self.encode(source, source_mask), source_mask, target, target_mask)

    def encode(self, source, source_mask):
        """编码函数, 以source和source_mask为参数"""
        # 使用src_embed对source做处理, 然后和source_mask一起传给self.encoder
        return self.encoder(self.src_embed(source), source_mask)

    def decode(self, memory, source_mask, target, target_mask):
        """解码函数, 以memory即编码器的输出, source_mask, target, target_mask为参数"""
        # 使用tgt_embed对target做处理, 然后和source_mask, target_mask, memory一起传给self.decoder
        return self.decoder(self.tgt_embed(target), memory, source_mask, target_mask)


# 实例化参数
vocab_size = 1000
d_model = 512
encoder = en
decoder = de
source_embed = nn.Embedding(vocab_size, d_model)
target_embed = nn.Embedding(vocab_size, d_model)
generator = gen

# 输入参数:
# 假设源数据与目标数据相同, 实际中并不相同
source = target = Variable(torch.LongTensor([[100, 2, 421, 508], [491, 998, 1, 221]]))
# 假设src_mask与tgt_mask相同，实际中并不相同
source_mask = target_mask = Variable(torch.zeros(8, 4, 4))

# 调用:
ed = EncoderDecoder(encoder, decoder, source_embed, target_embed, generator)
ed_result = ed(source, target, source_mask, target_mask)
print("ed_result:", ed_result, ed_result.shape)

'''
ed_result: tensor([[[-0.0970, -1.0463,  0.6891,  ...,  0.0292, -0.3583, -0.7489],
         [-0.1938,  0.1176,  1.4818,  ..., -0.3689, -0.5409,  0.0905],
         [-0.0820, -0.4752,  1.5456,  ..., -0.6224,  0.2971,  0.1575],
         [ 0.2829, -1.0824,  1.4787,  ..., -0.2452, -0.7749, -0.0177]],

        [[ 0.0274, -0.7548,  0.5057,  ..., -0.8559,  0.1447, -0.4375],
         [-0.1599, -0.1679,  1.7399,  ..., -1.0126,  0.1200, -0.8509],
         [-0.5462, -0.2470,  0.7894,  ..., -0.7403,  0.2848, -0.8639],
         [ 0.3360,  0.1573,  1.5430,  ..., -0.5795, -0.5232, -0.5031]]],
       grad_fn=<AddBackward0>) torch.Size([2, 4, 512])'''