import torch
import torch.nn as nn
from learn_layernorm import LayerNorm
from learn_multihead_attention import pe_result, MultiHeadedAttention
# torch中变量封装函数Variable.
from torch.autograd import Variable

# 使用SublayerConnection来实现子层连接结构的类
class SublayerConnection(nn.Module):
    def __init__(self, size, dropout=0.1):
        """它输入参数有两个, size以及dropout， size一般是都是词嵌入维度的大小，
           dropout本身是对模型结构中的节点数进行随机抑制的比率，
           又因为节点被抑制等效就是该节点的输出都是0，因此也可以把dropout看作是对输出矩阵的随机置0的比率.
        """
        super(SublayerConnection, self).__init__()
        # 实例化了规范化对象self.norm
        self.norm = LayerNorm(size)
        # 又使用nn中预定义的droupout实例化一个self.dropout对象.
        self.dropout = nn.Dropout(p=dropout)

    def forward(self, x, sublayer):
        """前向逻辑函数中, 接收上一个层或者子层的输入作为第一个参数，
           将该子层连接中的子层函数作为第二个参数"""

        # 我们首先对输出进行规范化，然后将结果传给子层处理，之后再对子层进行dropout操作，
        # 随机停止一些网络中神经元的作用，来防止过拟合. 最后还有一个add操作，
        # 因为存在跳跃连接，所以是将输入x与dropout后的子层输出结果相加作为最终的子层连接输出.
        return x + self.dropout(sublayer(self.norm(x)))


size = 512
dropout = 0.2
head = 8
d_model = 512

# 令x为位置编码器的输出
x = pe_result
print(x)
print(x.shape)
'''
tensor([[[ 34.9898, -14.1086, -33.2382,  ...,  20.0340,  -7.5179, -21.0056],
         [ 31.4033,   9.3267,  31.8292,  ...,   8.8717,  37.0096,  -1.2072],
         [ -3.1895,   5.1212, -26.1549,  ...,  -0.5746, -47.8235,  29.8276],
         [-24.2081,  -0.0000,   0.4665,  ..., -15.1393, -13.7451,   3.9803]],

        [[  5.5436, -13.6394, -25.3869,  ...,  38.7479, -31.0153, -26.0135],
         [  8.8690, -26.1330,   0.0000,  ..., -48.4889,   8.8162,  20.6100],
         [-30.5280, -50.7176,  -0.0000,  ..., -46.4209,   0.0000,  24.3850],
         [  1.4080,   1.7012, -14.0811,  ..., -20.4576,  34.2291, -45.8572]]],
       grad_fn=<MulBackward0>) torch.Size([2, 4, 512])
'''

mask = Variable(torch.zeros(8, 4, 4))

# 假设子层中装的是多头注意力层, 实例化这个类
self_attn =  MultiHeadedAttention(head, d_model)
# 使用lambda获得一个函数类型的子层,采用子注意力机制的多头注意力机制
sublayer = lambda x:self_attn(x, x, x, mask)

sc = SublayerConnection(size, dropout)
sc_result = sc(x, sublayer)
print(sc_result)
print(sc_result.shape)
'''
tensor([[[ 3.5020e+01, -1.3871e+01, -3.3354e+01,  ...,  2.0090e+01,
          -7.6642e+00, -2.0637e+01],
         [ 3.1403e+01,  9.6144e+00,  3.1693e+01,  ...,  8.8717e+00,
           3.7032e+01, -8.6222e-01],
         [-3.1234e+00,  5.1212e+00, -2.6182e+01,  ..., -6.2969e-01,
          -4.7818e+01,  3.0245e+01],
         [-2.4233e+01,  0.0000e+00,  3.7067e-01,  ..., -1.5277e+01,
          -1.3745e+01,  4.3492e+00]],

        [[ 5.3897e+00, -1.3414e+01, -2.5392e+01,  ...,  3.9312e+01,
          -3.1101e+01, -2.5859e+01],
         [ 8.7845e+00, -2.5820e+01, -5.2803e-03,  ..., -4.7979e+01,
           8.7429e+00,  2.0610e+01],
         [-3.0710e+01, -5.0718e+01,  4.5680e-02,  ..., -4.5869e+01,
           0.0000e+00,  2.4634e+01],
         [ 1.3805e+00,  1.9619e+00, -1.4081e+01,  ..., -1.9848e+01,
           3.4221e+01, -4.5610e+01]]], grad_fn=<AddBackward0>) torch.Size([2, 4, 512])
'''