from torch import nn

from model.MyTransformer import MyTransformer
from model.Embedding import PositionalEncoding, TokenEmbedding


class TranslationModel(nn.Module):
    def __init__(self, src_vocab_size,
                 tgt_vocab_size,
                 d_model=512,
                 nhead=8,
                 num_encoder_layers=6,
                 num_decoder_layers=6,
                 dim_feedforward=2048,
                 dropout=0.1):
        super(TranslationModel, self).__init__()
        self.my_transformer = MyTransformer(d_model=d_model,
                                            nhead=nhead,
                                            num_encoder_layers=num_encoder_layers,
                                            num_decoder_layers=num_decoder_layers,
                                            dim_feedforward=dim_feedforward,
                                            dropout=dropout)

        self.pos_embedding = PositionalEncoding(d_model=d_model, drop_out=dropout)

        self.src_token_embedding = TokenEmbedding(src_vocab_size, d_model)
        self.tgt_token_embedding = TokenEmbedding(tgt_vocab_size, d_model)
        self.classification = nn.Linear(d_model, tgt_vocab_size)

    def forward(self, src=None, tgt=None,
                src_mask=None,
                tgt_mask=None,
                memory_mask=None,
                src_key_padding_mask=None,
                tgt_key_padding_mask=None,
                memory_key_padding_mask=None):
        # src_embed shape [src_len, batch_size, embed_dim]
        src_embed = self.src_token_embedding(src)
        src_embed = self.pos_embedding(src_embed)
        # tgt_embed shape [tgt_len, batch_size, embed_dim]
        tgt_embed = self.tgt_token_embedding(tgt)
        tgt_embed = self.pos_embedding(tgt_embed)
        # out shape [tgt_len, batch_size, embed_dim]
        out = self.my_transformer(src=src_embed,
                                  tgt=tgt_embed,
                                  src_mask=src_mask,
                                  tgt_mask=tgt_mask,
                                  memory_mask=memory_mask,
                                  src_key_padding_mask=src_key_padding_mask,
                                  tgt_key_padding_mask=tgt_key_padding_mask,
                                  memory_key_padding_mask=memory_key_padding_mask)
        # logits shape [tgt_len,batch_size,tgt_vocab_size]
        logits = self.classification(out)
        return logits

    def encoder(self, src):
        src_embed = self.src_token_embedding(src)
        src_embed = self.pos_embedding(src_embed)

        memory = self.my_transformer.encoder(src_embed)
        return memory

    def decoder(self, tgt, memory):
        tgt_embed = self.tgt_token_embedding(tgt)
        tgt_embed = self.pos_embedding(tgt_embed)

        outs = self.my_transformer.decoder(tgt_embed, memory=memory)
        return outs
