import math

import torch
from torch import nn
from EncoderDecoder import Decoder
from transformer.PositionEncoding import PositionEncoding
from DecoderBlock import DecoderBlock


class TransformerDecoder(Decoder):
    def __init__(self, vocab_size, embedding_dims, num_layers, key_size, value_size, num_head, hidden_dims, dropout):
        super().__init__()

        self.embedding = nn.Embedding(vocab_size, embedding_dims)
        self.position_enc = PositionEncoding(embedding_dims, max_len=1000, dropout=dropout)

        self.blks = nn.Sequential()
        for i in range(num_layers):
            self.blks.add_module(f"encoder_block{i}",
                                 DecoderBlock(embedding_dims, key_size, value_size, num_head, hidden_dims, dropout))

        self.ln = nn.Linear(embedding_dims, vocab_size)

    def forward(self, x, state):
        d_model = x.shape[-1]
        x = self.position_enc(self.embedding(x) * math.sqrt(d_model))  # d_model 存在的意义就是防止梯度消失
        print(x.shape)
        for blk in self.blks:
            x, state = blk(x, state)
        return self.ln(x), state


# if __name__ == '__main__':
#     enc_inputs = torch.randn(10, 9, 20)
#     enc_valid_lens = torch.randint(1, 6, (10,))
#     state = (enc_inputs, enc_valid_lens)
#
#     # Decoder输入(batch_size,seq_len)
#     dec_inputs = torch.randint(0, 10, (10, 12))
#
#     encoder = TransformerEncoder(10, 20, 2, 20,
#                                  20, 2, 30, 0.5)
#     dec_outputs, state = encoder(dec_inputs, state)
#     print(dec_outputs.shape)  # (10, 12, 10)
