import torch
import torch.nn as nn
from learn_sublayerconnection import SublayerConnection
from learn_multihead_attention import clones, pe_result, MultiHeadedAttention
from learn_feedforward import PositionwiseFeedForward
# torch中变量封装函数Variable.
from torch.autograd import Variable

# 使用EncoderLayer类实现编码器层
class EncoderLayer(nn.Module):
    def __init__(self, size, self_attn, feed_forward, dropout):
        """它的初始化函数参数有四个，分别是size，其实就是我们词嵌入维度的大小，它也将作为我们编码器层的大小,
           第二个self_attn，之后我们将传入多头自注意力子层实例化对象, 并且是自注意力机制,
           第三个是feed_froward, 之后我们将传入前馈全连接层实例化对象, 最后一个是置0比率dropout."""
        super(EncoderLayer, self).__init__()

        # 首先将self_attn和feed_forward传入其中.
        self.self_attn = self_attn
        self.feed_forward = feed_forward

        # 如图所示, 编码器层中有两个子层连接结构(多头注意力机制和前馈全连接层的张量对象), 所以使用clones函数进行克隆
        self.sublayer = clones(SublayerConnection(size, dropout), 2)
        # 把size传入其中
        self.size = size

    def forward(self, x, mask):
        """forward函数中有两个输入参数，x和mask，分别代表上一层的输出，和掩码张量mask."""
        # 里面就是按照结构图的流程.
        # 首先通过第一个子层连接结构，其中包含多头自注意力子层，
        # 然后通过第二个子层连接结构，其中包含前馈全连接子层. 最后返回结果.
        x = self.sublayer[0](x, lambda x:self.self_attn(x, x, x, mask))
        return self.sublayer[1](x, self.feed_forward)

# 词嵌入维度
size = 512
d_model = 512
# 多头注意力机制的头数
head = 8
# 前馈全连接层线性层的输入维度
d_ff = 64

x = pe_result
dropout = 0.2
# 多头注意力机制对象
self_attn = MultiHeadedAttention(head, d_model)
# 前馈全连接层对象
ff = PositionwiseFeedForward(d_model, d_ff, dropout)
# 输入的掩码张量mask
mask = Variable(torch.zeros(8, 4, 4))

# 编码器层对象
el = EncoderLayer(size, self_attn, ff, dropout)
el_result = el(x, mask)
print(el_result)
print(el_result.shape)

'''
tensor([[[  1.6496,   2.4800, -34.8737,  ...,   8.4425,   9.1057,  -6.4368],
         [  3.3957,  12.7566,  30.5149,  ...,  26.2355, -19.4441,  58.0455],
         [ 55.6355,  19.1354,  14.1771,  ...,   7.4224, -30.4739,   9.9912],
         [-18.8120, -29.6787,  26.2313,  ...,  -9.0908,  41.8800,   7.7106]],

        [[ 11.1011,   5.8502,  15.8748,  ...,  -0.3211, -29.7448,   5.5804],
         [ 26.1652,  -8.2542,  -0.1729,  ...,  25.5072,   4.5789,  -0.9341],
         [-33.1142,   5.8393,  17.6434,  ...,  51.5775, -28.9824,  16.9969],
         [-31.6072,  -0.9791,  -6.9263,  ...,  21.5915,   4.1305,  12.4579]]],
       grad_fn=<AddBackward0>) 
torch.Size([2, 4, 512])
'''