import torch.nn as nn

from embedding_layer import EmbeddingLayer
from positional_encoding_layer import positional_encoding


class EmbeddingAddPositionalEncoding(nn.Module):
    def __init__(self, vocab_size=4096, embedding_size=512, seq_len=512, d_model=512):
        super(EmbeddingAddPositionalEncoding, self).__init__()
        self.seq_len = seq_len
        self.d_model = d_model
        self.embedding = EmbeddingLayer(vocab_size, embedding_size)

    def forward(self, x):
        emb = self.embedding(x)
        pe = positional_encoding(self.seq_len, self.d_model)
        return emb + pe


if __name__ == '__main__':
    import torch

    test_input = torch.randint(0, 10, (3, 5))
    emb_pe = EmbeddingAddPositionalEncoding(vocab_size=10, embedding_size=8, seq_len=5, d_model=8)
    output = emb_pe(test_input)
    print(output)
