from transformers import BertConfig

from models.language_model._base import LMModule
from utils import Params
import torch.nn as nn
import torch


class Word2VecLMModule(LMModule):

    def __init__(self, params: Params):
        super(Word2VecLMModule, self).__init__(params=params)

        # NOTE：可以自行实现gensim训练的word2vec向量作为embedding的初始_weight参数
        self.emb = nn.Embedding(
            num_embeddings=self.params.config.vocab_size,
            embedding_dim=self.params.config.hidden_size
        )

    def forward(self, input_ids, input_mask, **kwargs):
        z = self.emb(input_ids)  # [N,T] -> [N,T,E]
        z = z * input_mask[..., None].to(z.dtype)
        return z


if __name__ == '__main__':
    pp = Params(
        config=BertConfig(
            vocab_size=100,
            hidden_size=12,
        )
    )
    mm = Word2VecLMModule(pp)
    ids = torch.tensor([
        [1, 3, 4, 1, 5],
        [3, 4, 1, 0, 0]
    ])
    masks = torch.tensor([
        [1, 1, 1, 1, 1],
        [1, 1, 1, 0, 0]
    ])
    result = mm(ids, masks)
    print(result.shape)  #[2,5,12] 2个样本，每个样本有5个token，每个token被映射成12维的词嵌入向量
    print(result)
