import torch.nn as nn

from multi_head_attention_layer import MultiHeadAttention
from add_and_norm_layer import AddAndNorm
from feed_forward_layer import FeedForwardNet

"""
编码器
"""


class EncoderLayer(nn.Module):
    def __init__(self, d_model=512, num_heads=8, d_ff=2048):
        super(EncoderLayer, self).__init__()
        self.multi_head_attention = MultiHeadAttention(d_model, num_heads)
        self.add_and_norm1 = AddAndNorm(d_model)
        self.feed_forward = FeedForwardNet(d_model, d_ff)
        self.add_and_norm2 = AddAndNorm(d_model)

    def forward(self, x):
        x = self.add_and_norm1(x, self.multi_head_attention)
        x = self.add_and_norm2(x, self.feed_forward)
        return x


if __name__ == '__main__':
    import torch
    from embedding_add_positional_encoding import EmbeddingAddPositionalEncoding

    test_input = torch.randint(0, 10, (4, 8))
    emb_pe = EmbeddingAddPositionalEncoding(vocab_size=10, embedding_size=512, seq_len=8, d_model=512)
    output = emb_pe(test_input)
    encoder_layer = EncoderLayer()
    memory = encoder_layer(output)
    print(memory)

    for _ in range(6):
        transformer_encoder = EncoderLayer()
        output = transformer_encoder(output)
    print(output)
