import torch
import torch.nn as nn
import math
from tokenizer import ChineseTokenizer, EnglishTokenizer
from config import DIM_MODEL, NUM_HEADS, NUM_ENCODER_LAYERS, NUM_DECODER_LAYERS, PROCESSED_DATA_DIR


class PositionEncoding(nn.Module):
    def __init__(self, d_model, max_len=500):
        super().__init__()
        pe = torch.zeros(max_len, d_model, dtype=torch.float)

        for pos in range(max_len):
            for _2i in range(0, d_model, 2):
                pe[pos, _2i] = math.sin(pos / math.pow(10000, _2i / d_model))
                pe[pos, _2i + 1] = math.cos(pos / math.pow(10000, _2i / d_model))

        self.register_buffer('pe', pe)

    def forward(self, x):
        # x.shape: [batch_size, seq_len, d_model]
        seq_len = x.shape[1]
        # pe.shape: [max_len, d_model]
        pe_part = self.pe[:seq_len]
        # pe_part.shape: [seq_len, d_model]
        x = x + pe_part
        # x.shape: [batch_size, seq_len, d_model]
        return x


class TranslationModel(nn.Module):
    def __init__(self, zh_vocab_size, en_vocab_size, zh_padding_idx, en_padding_idx):
        super().__init__()
        self.src_embedding = nn.Embedding(
            num_embeddings=zh_vocab_size,
            embedding_dim=DIM_MODEL,
            padding_idx=zh_padding_idx
        )

        self.tgt_embedding = nn.Embedding(
            num_embeddings=en_vocab_size,
            embedding_dim=DIM_MODEL,
            padding_idx=en_padding_idx
        )

        self.position_encoding = PositionEncoding(DIM_MODEL)

        self.transformer = nn.Transformer(
            d_model=DIM_MODEL,
            nhead=NUM_HEADS,
            num_encoder_layers=NUM_ENCODER_LAYERS,
            num_decoder_layers=NUM_DECODER_LAYERS,
            batch_first=True
        )

        self.linear = nn.Linear(DIM_MODEL, en_vocab_size)

    def encoder(self, src, src_pad_mask):
        src_embed = self.src_embedding(src)
        src_embed = self.position_encoding(src_embed)
        # [batch_size, src_len, d_model]
        memory = self.transformer.encoder(src=src_embed, src_key_padding_mask=src_pad_mask)
        # [batch_size, src_len, d_model]
        return memory

    def decoder(self, tgt, memory, tgt_mask, tgt_pad_mask, src_pad_mask):
        tgt_embed = self.tgt_embedding(tgt)
        tgt_embed = self.position_encoding(tgt_embed)
        # [batch_size, tgt_len, d_model]
        output = self.transformer.decoder(tgt=tgt_embed, memory=memory, tgt_mask=tgt_mask,
                                          tgt_key_padding_mask=tgt_pad_mask,
                                          memory_key_padding_mask=src_pad_mask)
        # [batch_size, tgt_len, d_model]
        output = self.linear(output)
        # [batch_size, tgt_len, en_vocab_size]
        return output

    def forward(self, src, tgt, src_pad_mask, tgt_pad_mask, tgt_mask):
        memory = self.encoder(src, src_pad_mask)
        output = self.decoder(tgt, memory, tgt_mask, tgt_pad_mask, src_pad_mask)
        return output


if __name__ == '__main__':
    zh_tokenizer = ChineseTokenizer.from_vocab(PROCESSED_DATA_DIR / 'zh_vocab.txt')
    en_tokenizer = EnglishTokenizer.from_vocab(PROCESSED_DATA_DIR / 'en_vocab.txt')
    model = TranslationModel(zh_tokenizer.vocab_size, en_tokenizer.vocab_size,
                             zh_tokenizer.pad_token_index, en_tokenizer.pad_token_index)
    print(model)
