import torch
from torch import nn
from transformer.MultiHeadAttention import MultiHeadAttention
from transformer.AddNorm import AddNorm
from transformer.PointWiseFFN import PointWiseFFN


class EncoderBlock(nn.Module):
    def __init__(self, query_size, key_size, value_size, num_head, hidden_dims, dropout):
        super().__init__()

        self.attention1 = MultiHeadAttention(query_size, key_size, value_size, num_head, hidden_dims, dropout)
        self.add_norm1 = AddNorm(query_size, dropout)
        self.ffn = PointWiseFFN(query_size, hidden_dims, query_size)
        self.add_norm2 = AddNorm(query_size, dropout)

    def forward(self, queries, keys, values, valid_lens):
        x = self.attention1(queries, keys, values, valid_lens)
        x = self.add_norm1(queries, x)
        x = self.add_norm2(x, self.ffn(x))
        return x


# if __name__ == '__main__':
#     x = torch.randn(10, 20, 40)  # (batch_size,seq_len,embedding_dims)
#     block = EncoderBlock(40, 40, 40, 4, 100, 0.5)
#     result = block(x, x, x, torch.randint(0, 5, (10,)))
#     print(result.shape)  # (10, 20, 40)
