from Transformer_moduls import *

# 编码器层类 EncoderLayer 实现思路分析
# init函数 (self, size, self_attn, feed_forward, dropout):
    # 实例化多头注意力层对象self_attn # 前馈全连接层对象feed_forward  size词嵌入维度512
    # clones两个子层连接结构 self.sublayer = clones(SublayerConnection(size,dropout),2)
# forward函数 (self, x, mask)
    # 数据经过子层连接结构1 self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))
    # 数据经过子层连接结构2 self.sublayer[1](x, self.feed_forward)
class EncoderLayer(nn.Module):
    def __init__(self,embedding_dim,self_attention,feedforward,dropout):
        super().__init__()
        # 实例化多头注意力层对象
        self.self_attention = self_attention

        # 前馈全连接层对象feed_forward
        self.feedforward = feedforward

        # size词嵌入维度512
        self.embedding_dim = embedding_dim

        # clones两个子层连接结构 self.sublayer = clones(SublayerConnection(size,dropout),2)
        self.sublayer = clones(SublayerConnection(embedding_dim,dropout),2)

    def forward(self,x,mask):


        x = self.sublayer[0](x, lambda x:self.self_attention(x,x,x,mask))

        x = self.sublayer[1](x,self.feedforward)

        return  x

# 编码器类 Encoder 实现思路分析
# init函数 (self, layer, N)
    # 实例化多个编码器层对象self.layers   通过方法clones(layer, N)
    # 实例化规范化层 self.norm = LayerNorm(layer.size)
# forward函数 (self, x, mask)
    # 数据经过N个层 x = layer(x, mask)
    #  返回规范化后的数据 return self.norm(x)
class Encoder(nn.Module):
    def __init__(self,layer,N):
        # 参数layer 1个编码器层
        # 参数 编码器层的个数

        super().__init__()

        # 实例化多个编码器层对象
        self.layers = clones(layer,N)

        # 实例化规范化层
        self.norm = LayerNorm(embedding_dim)

    def forward(self,x,mask):
        # 数据经过N个层 x = layer(x, mask)
        for layer in self.layers:
            x = layer(x,mask)

        #  返回规范化后的数据 return self.norm(x)
        return self.norm(x)

def test_encoder():
    x = torch.arange(8).reshape(2,-1)
    x = untest_input(x)
    self_attention = MutiHeadAttention(8,embedding_dim,dropout)
    feedforward = PositionwiseFeedForward(embedding_dim,d_ff)
    encoder_layer = EncoderLayer(embedding_dim,self_attention,feedforward,dropout)
    encoder = Encoder(encoder_layer,6)
    # 此处应该使用padding mask，为演示方便用全0mask
    mask = torch.zeros(heads,4,4)
    encoder_result = encoder(x,mask)
    print(f'{"*"*30}编码后的值{"*"*30}')
    print(encoder_result)
    print(f'编码后的形状：{encoder_result.shape}')
    return encoder_result


if __name__ == '__main__':
    test_encoder()

