import math

import torch
from torch import nn
from EncoderDecoder import Encoder
from transformer.PositionEncoding import PositionEncoding
from EncoderBlock import EncoderBlock


class TransformerEncoder(Encoder):
    def __init__(self, vocab_size, embedding_dims, num_layers, key_size, value_size, num_head, hidden_dims, dropout):
        super().__init__()

        self.embedding = nn.Embedding(vocab_size, embedding_dims)
        self.position_enc = PositionEncoding(embedding_dims, max_len=1000, dropout=dropout)

        self.blks = nn.Sequential()
        for i in range(num_layers):
            self.blks.add_module(f"encoder_block{i}",
                                 EncoderBlock(embedding_dims, key_size, value_size, num_head, hidden_dims, dropout))

    def forward(self, x, valid_lens):
        d_model = x.shape[-1]
        x = self.position_enc(self.embedding(x) * math.sqrt(d_model))
        print(x.shape)
        for blk in self.blks:
            x = blk(x, x, x, valid_lens)
        return x


# if __name__ == '__main__':
#     vocab_size = 10
#     embedding_dims = 100
#     inputs = torch.randint(0, 10, (20, 9))
#     encoder = TransformerEncoder(vocab_size, embedding_dims, 2,
#                                  embedding_dims, embedding_dims, 2, 100, 0.5)
#
#     valid_lens = torch.randint(0, 8, (20,))
#     results = encoder(inputs, valid_lens)
#     print(results.shape)
