from Transformer_encoder import *

# 解码器层类 DecoderLayer 实现思路分析
# init函数 (self, size, self_attn, src_attn, feed_forward, dropout)
    # 词嵌入维度尺寸大小size 自注意力机制层对象self_attn 一般注意力机制层对象src_attn 前馈全连接层对象feed_forward
    # clones3子层连接结构 self.sublayer = clones(SublayerConnection(size,dropout),3)
# forward函数 (self, x, memory, source_mask, target_mask)
    # 数据经过子层连接结构1 self.sublayer[0](x, lambda x:self.self_attn(x, x, x, target_mask))
    # 数据经过子层连接结构2 self.sublayer[1](x, lambda x:self.src_attn(x, m, m, source_mask))
    # 数据经过子层连接结构3 self.sublayer[2](x, self.feed_forward)

class DecoderLayer(nn.Module):
    def __init__(self,embedding_dim,self_attention,src_attention,feedforward,dropout):
        super().__init__()
        # 词嵌入维度尺寸大小
        self.embedding_dim = embedding_dim
        # 自注意力机制层对象 q=k=v
        self.self_attention = self_attention
        # 一遍注意力机制对象 q!=k=v
        self.src_attention = src_attention
        # 前馈全连接层对象
        self.feedforward = feedforward
        # clones3子层连接结构
        self.sublayer = clones(SublayerConnection(embedding_dim,dropout),3)

    def forward(self,y,encoder_result,target_mask,source_mask):


        m = encoder_result
        # 数据经过子层连接结构1
        y = self.sublayer[0](y,lambda x:self.self_attention(x,x,x,target_mask))
        # 数据经过子层连接结构2
        y = self.sublayer[1](y,lambda x:self.src_attention(x,m,m,source_mask))
        # 数据经过子层连接结构3
        y = self.sublayer[2](y,self.feedforward)

        return y

# 解码器类 Decoder 实现思路分析
# init函数 (self, layer, N):
# self.layers clones N个解码器层clones(layer, N)
# self.norm 定义规范化层 LayerNorm(layer.size)
# forward函数 (self, x, memory, source_mask, target_mask)
# 数据以此经过各个子层  x = layer(x, memory, source_mask, target_mask)
# 数据最后经过规范化层  return self.norm(x)
# 返回处理好的数据

class Decoder(nn.Module):
    def __init__(self,layer,N):
        # 参数layer 解码器层对象
        # 参数N 解码器层对象的个数

        super().__init__()
        # clones N个解码器层
        self.layers = clones(layer,N)
        # 定义规范化层
        self.norm = LayerNorm(embedding_dim)

    def forward(self,y,encoder_result,target_mask,source_mask):
        # 数据以此经过各个子层
        for layer in self.layers:
            y = layer(y,encoder_result,target_mask,source_mask)
        y = self.norm(y)
        return y

def test_decoder():
    y = torch.tensor([[10,20,30,40,50,60],[70,80,90,100,110,120]])
    y = untest_input(y)
    # 编码器部分
    encoder_result = test_encoder()


    # 解码器部分
    attention = clones(MutiHeadAttention(8,embedding_dim,dropout),2)
    # self_attention = MutiHeadAttention(8,embedding_dim,dropout)
    # src_attention = MutiHeadAttention(8,embedding_dim,dropout)
    feedforward = PositionwiseFeedForward(embedding_dim,d_ff)
    decoder_layer = DecoderLayer(embedding_dim,attention[0],attention[1],feedforward,dropout)
    decoder = Decoder(decoder_layer,6)
    target_mask = torch.zeros(heads,6,6)
    source_mask = torch.zeros(heads,6,4)
    decoder_result = decoder(y,encoder_result,target_mask,source_mask)
    print(f'{"*"*30}编码后x的值{"*"*30}')
    print(decoder_result)
    print(f'编码后x的形状：{decoder_result.shape}')
    return decoder_result

if __name__ == '__main__':
    test_encoder()
