|
from transformers.models.bert.modeling_bert import BertEncoder, BertPooler, BertEmbeddings, BertForMaskedLM, MaskedLMOutput |
|
from transformers import BertModel |
|
from typing import List, Optional, Tuple, Union |
|
import torch |
|
|
|
class BertEmbeddingsV2(BertEmbeddings): |
|
def __init__(self, config): |
|
super().__init__(config) |
|
self.pad_token_id = config.pad_token_id |
|
self.position_embeddings = torch.nn.Embedding(config.max_position_embeddings, config.hidden_size, padding_idx=0) |
|
|
|
def forward( |
|
self, |
|
input_ids: torch.LongTensor, |
|
token_type_ids: Optional[torch.LongTensor] = None, |
|
position_ids: Optional[torch.LongTensor] = None, |
|
inputs_embeds: Optional[torch.FloatTensor] = None, |
|
past_key_values_length: int = 0, |
|
) -> torch.Tensor: |
|
inputs_embeds = self.word_embeddings(input_ids) |
|
position_ids = self.create_position_ids_from_input_ids(input_ids) |
|
position_embeddings = self.position_embeddings(position_ids) |
|
embeddings = inputs_embeds + position_embeddings |
|
return self.dropout(self.LayerNorm(embeddings)) |
|
|
|
def create_position_ids_from_input_ids(self, input_ids: torch.LongTensor) -> torch.Tensor: |
|
mask = input_ids.ne(self.pad_token_id).int() |
|
return torch.cumsum(mask, dim=1).long() * mask |
|
|
|
|
|
class BertModelV2(BertModel): |
|
def __init__(self, config): |
|
super().__init__(config) |
|
self.embeddings = BertEmbeddingsV2(config) |