import torch
from torch import nn
from d2l import torch as d2l
from Transformer import transformer as tf

class BertEncoder(nn.Module):
    def __init__(self,vocab_size,num_hiddens,norm_shape,
                 ffn_num_input,ffn_num_hiddens,num_heads,num_layers,dropout,
                 max_len=1000,key_size=768,query_size=768,value_size=768,**kwargs):
        super.__init__(**kwargs)
        self.token_embedding = nn.Embedding(vocab_size,num_hiddens)
        self.segment_embedding = nn.Embedding(2,num_hiddens)
        self.blks = nn.Sequential()
        for i in range(num_layers):
            self.blks.add_module(str(i),tf.EncoderBlock(
                key_size,query_size,value_size,num_hiddens,norm_shape,
                ffn_num_input,ffn_num_hiddens,num_heads,dropout,True))
        self.pos_embedding = nn.Parameter(torch.randn(1,max_len,num_hiddens))
    
    def forward(self,tokens,segments,valid_lens):
        X = self.token_embedding(tokens) + self.segment_embedding(segments)
        X = X + self.pos_embedding.data[:,:X.shape[1],:]
        for blk in self.blks:
            X = blk(X,valid_lens)
        return X

class MaskLM(nn.Module):
    def __init__(self,vocab_size,num_hiddens,num_inputs=768,**kwargs):
        super.__init__(**kwargs)
        self.mlp = nn.Sequential(
            nn.Linear(num_inputs,num_hiddens),
            nn.ReLU(),
            nn.LayerNorm(num_hiddens),
            nn.Linear(num_hiddens,vocab_size)
        )
    
    def forward(self,X,pre_positions):
        num_pre_positions = pre_positions.shape[1]
        pre_positions = pre_positions.reshape(-1)
        batch_size = X.shape[0]
        batch_idx = torch.arange(0,batch_size)
        batch_idx = torch.repeat_interleave(batch_idx,num_pre_positions)
        mask_X = X[batch_idx,pre_positions]
        mask_X = mask_X.reshape((batch_size,num_pre_positions))
        mlm_Y_hat = self.mlp(mask_X)
        return mlm_Y_hat

class NextSentencePred(nn.Module):
    def __init__(self,num_inputs,**kwargs):
        super.__init__(**kwargs)
        self.output = nn.Linear(num_inputs,2)
    
    def forward(self,X):
        return self.output(X)

class BERTModel(nn.Module):
    def __init__(self,vocab_size,num_hiddens,norm_shape,
                 ffn_num_input,ffn_num_hiddens,num_heads,num_layers,dropout,
                 max_len=1000,key_size=768,query_size=768,value_size=768,
                 hid_in_features=768,mlm_in_features=768,nsp_in_features=768):
        super.__init__()
        self.encoder = BertEncoder(vocab_size,num_hiddens,norm_shape,
                                   ffn_num_input,ffn_num_hiddens,num_heads,num_layers,dropout,
                                   max_len=max_len,key_size=key_size,query_size=query_size,
                                   value_size=value_size)
        self.hidden = nn.Sequential(nn.Linear(hid_in_features,num_hiddens),nn.Tanh())
        self.mlm = MaskLM(vocab_size,num_hiddens,mlm_in_features)
        self.nsp = NextSentencePred(nsp_in_features)
    
    def forward(self,tokens,segments,valid_lens=None,pred_positions=None):
        encoded_X = self.encoder(tokens,segments,valid_lens)
        if pred_positions is not None:
            mlm_Y_hat = self.mlm(encoded_X,pred_positions)
        else:
            mlm_Y_hat = None
        nsp_Y_hat =  self.nsp(self.hidden(encoded_X[:,0,:]))
        return encoded_X,mlm_Y_hat,nsp_Y_hat
        