import torch
import copy
import torch.nn as nn
from learn_multihead_attention import clones, pe_result, MultiHeadedAttention
from learn_feedforward import PositionwiseFeedForward
from learn_layernorm import LayerNorm
from learn_encoderlayer import EncoderLayer
# torch中变量封装函数Variable.
from torch.autograd import Variable

# 使用Encoder类来实现编码器
class Encoder(nn.Module):
    def __init__(self, layer, N):
        """初始化函数的两个参数分别代表编码器层和编码器层的个数"""
        super(Encoder, self).__init__()
        # 首先使用clones函数克隆N个编码器层放在self.layers中
        self.layers = clones(layer, N)
        # 再初始化一个规范化层, 它将用在编码器的最后面.
        self.norm = LayerNorm(layer.size)

    def forward(self, x, mask):
        """forward函数的输入和编码器层相同, x代表上一层的输出, mask代表掩码张量"""
        # 首先就是对我们克隆的编码器层进行循环，每次都会得到一个新的x，
        # 这个循环的过程，就相当于输出的x经过了N个编码器层的处理.
        # 最后再通过规范化层的对象self.norm进行处理，最后返回结果.
        for layer in self.layers:
            x = layer(x, mask)
            # print(layer, x)
        return self.norm(x)

# 实例化参数:
head = 8
size = 512
d_model = 512
dropout = 0.2
d_ff = 64

# 多头注意力机制对象
attn = MultiHeadedAttention(head, d_model)
# 前馈全连接层对象
ff = PositionwiseFeedForward(d_model, d_ff, dropout)
# 第一个实例化参数layer, 它是一个编码器层的实例化对象, 因此需要传入编码器层的参数
# 又因为编码器层中的子层是不共享的, 因此需要使用深度拷贝各个对象.
c = copy.deepcopy # 创建对象深拷贝的函数
layer = EncoderLayer(size, c(attn), c(ff), dropout)
# 编码器中编码器层的个数N
N = 8
mask = Variable(torch.zeros(8, 4, 4))

# 调用
x = pe_result
en = Encoder(layer, N)
en_result = en(x, mask)
print(en_result)
print(en_result.shape)

'''
tensor([[[ 1.8475,  0.3161, -0.0556,  ..., -0.4661, -0.6603, -1.1974],
         [-0.4137,  0.1930,  1.8347,  ...,  1.3668, -0.0689,  0.8014],
         [ 1.1263,  1.8678,  1.1257,  ..., -0.5364,  1.3183,  0.3189],
         [-0.2730, -0.9545, -0.1976,  ...,  0.0648,  1.4785,  0.6461]],

        [[-1.1482,  0.2537,  0.5268,  ...,  0.1398, -0.5102,  2.0777],
         [ 0.7164,  0.6231, -0.7073,  ...,  0.2521, -0.8158, -0.2535],
         [ 0.4761,  0.1412, -2.1816,  ...,  0.7627, -2.2520,  1.1982],
         [-1.4989,  0.5146, -0.0740,  ..., -0.9476, -0.0949,  0.1447]]],
       grad_fn=<AddBackward0>)
torch.Size([2, 4, 512]) 
'''