import torch
import copy
import torch.nn as nn
from learn_multihead_attention import clones, pe_result, MultiHeadedAttention
from learn_feedforward import PositionwiseFeedForward
from learn_decoderlayer import DecoderLayer, en_result
from learn_layernorm import LayerNorm
# torch中变量封装函数Variable.
from torch.autograd import Variable


# 使用类Decoder来实现解码器
class Decoder(nn.Module):
    def __init__(self, layer, N):
        """初始化函数的参数有两个，第一个就是解码器层layer，第二个是解码器层的个数N."""
        super(Decoder, self).__init__()
        # 首先使用clones方法克隆了N个layer，然后实例化了一个规范化层.
        # 因为数据走过了所有的解码器层后最后要做规范化处理.
        self.layers = clones(layer, N)
        # 实例化一个规范化层，layer.size为词嵌入的维度
        self.norm = LayerNorm(layer.size)

    def forward(self, x, memory, source_mask, target_mask):
        """forward函数中的参数有4个，
        x代表目标数据的嵌入表示，
        memory是编码器层的输出，
        source_mask 代表源数据的掩码张量
        target_mask 代表目标数据的掩码张量"""
        # 然后就是对每个层进行循环，当然这个循环就是变量x通过每一个层的处理，
        # 得出最后的结果，再进行一次规范化返回即可.
        for layer in self.layers:
            x = layer(x, memory, source_mask, target_mask)
        return self.norm(x)

# 实例化参数:
# 分别是解码器层layer和解码器层的个数N
size = 512
d_model = 512
head = 8
d_ff = 64
dropout = 0.2
c = copy.deepcopy
# 多头注意力机制对象
attn = MultiHeadedAttention(head, d_model)
# 前馈全连接层对象
ff = PositionwiseFeedForward(d_model, d_ff, dropout)
# 解码器层对象，self_attn和src_attn均采用多头注意力机制对象
layer = DecoderLayer(d_model, c(attn), c(attn), c(ff), dropout)
N = 8

# 输入参数
# 输入参数与解码器层的输入参数相同
x = pe_result
print("pe_result:", pe_result, pe_result.shape)
'''
pe_result: tensor([[[ -6.3314,   0.0000,  28.8388,  ...,  -1.3242,   0.0000,   8.9486],
         [-15.5073,  32.6991, -54.3432,  ...,  -0.0000,  30.8404,  -0.0000],
         [ 35.0168, -41.6491,  63.5541,  ...,  -7.1930,  -6.7864,  -1.4227],
         [ -4.3796, -15.3667, -27.2328,  ..., -11.2531,  19.3089,  -1.4406]],

        [[ 44.1043,  37.8490, -19.9735,  ..., -30.9300,  52.4097,   0.0000],
         [  5.5130,   0.0000,  13.3778,  ..., -44.0106, -26.0569,  -1.5895],
         [ 27.4017,  13.0755,   6.4080,  ...,   8.8293, -45.7528, -16.2449],
         [ -0.0000,  31.5947,  -0.0000,  ..., -18.9879,  -8.6824,  -2.6704]]],
       grad_fn=<MulBackward0>) torch.Size([2, 4, 512])'''

memory = en_result
print("en_result:", en_result, en_result.shape)
'''
en_result: tensor([[[-0.5250,  0.1888,  1.1794,  ..., -0.0333,  0.0340,  0.3595],
         [-0.7821,  1.3562, -2.1269,  ...,  0.0491,  1.3170,  0.0827],
         [ 1.2290, -1.6342,  2.6097,  ..., -0.3223, -0.2235, -0.1178],
         [-0.3080, -0.6263, -1.1543,  ..., -0.4985,  0.8867,  0.0533]],

        [[ 1.9802,  1.6643, -1.0832,  ..., -1.4233,  2.3126,  0.0756],
         [ 0.3175,  0.0876,  0.5658,  ..., -1.8632, -0.7575,  0.0457],
         [ 1.3525,  0.6584,  0.4524,  ...,  0.3704, -1.7200, -0.5108],
         [-0.0564,  1.3340,  0.0055,  ..., -0.8755, -0.3413, -0.0382]]],
       grad_fn=<AddBackward0>) torch.Size([2, 4, 512])'''

# 实际中source_mask和target_mask并不相同, 这里为了方便计算使他们都为mask
mask = Variable(torch.zeros(8, 4, 4))
source_mask = target_mask = mask
de = Decoder(layer, N)
de_result = de(x, memory, source_mask, target_mask)
print("de_result", de_result, de_result.shape)
'''
de_result tensor([[[-0.1672, -0.1198,  1.3687,  ..., -0.0244,  0.1114,  0.1968],
         [-0.4294,  1.3999, -2.0240,  ...,  0.1219,  1.3932, -0.1229],
         [ 1.5403, -1.6742,  2.6985,  ..., -0.2863, -0.1918, -0.1604],
         [ 0.0169, -0.5504, -1.1232,  ..., -0.4793,  1.0310, -0.0538]],

        [[ 1.8533,  1.5726, -0.8511,  ..., -1.2859,  2.2288, -0.1163],
         [ 0.2511, -0.1675,  0.6576,  ..., -1.6873, -1.0113, -0.1759],
         [ 1.1293,  0.5432,  0.3826,  ...,  0.2558, -1.8499, -0.9678],
         [-0.0072,  1.2828,  0.0968,  ..., -0.7719, -0.4136, -0.2768]]],
       grad_fn=<AddBackward0>) torch.Size([2, 4, 512])'''
