|
import torch |
|
import torch.utils.checkpoint |
|
from torch import nn |
|
from torch.nn import CrossEntropyLoss |
|
from transformers.models.bert.modeling_bert import BertPreTrainedModel, BertModel, \ |
|
BERT_INPUTS_DOCSTRING, _TOKENIZER_FOR_DOC, _CHECKPOINT_FOR_DOC, TokenClassifierOutput, _CONFIG_FOR_DOC |
|
from transformers.file_utils import ( |
|
add_code_sample_docstrings, |
|
add_start_docstrings_to_model_forward, |
|
) |
|
|
|
|
|
class DateEmebdding(nn.Module): |
|
"""Construct the embeddings the creation date""" |
|
|
|
def __init__(self, config): |
|
super().__init__() |
|
self.word_embeddings = nn.Embedding(config.date_vocab_size, config.date_hidden_size, |
|
padding_idx=config.pad_token_id) |
|
self.position_embeddings = nn.Embedding(config.date_max_position_embeddings, config.date_hidden_size) |
|
|
|
|
|
|
|
self.LayerNorm = nn.LayerNorm(config.date_hidden_size, eps=config.layer_norm_eps) |
|
self.dropout = nn.Dropout(config.hidden_dropout_prob) |
|
|
|
|
|
self.register_buffer("position_ids", torch.arange(config.date_max_position_embeddings).expand((1, -1))) |
|
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") |
|
self.dense = nn.Linear(config.date_hidden_size, config.date_hidden_size) |
|
self.activation = nn.Tanh() |
|
|
|
def forward( |
|
self, input_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 |
|
): |
|
|
|
try: |
|
|
|
if input_ids is not None: |
|
input_shape = input_ids.shape |
|
else: |
|
input_shape = inputs_embeds.size()[:-1] |
|
|
|
seq_length = input_shape[1] |
|
|
|
if position_ids is None: |
|
position_ids = self.position_ids[:, past_key_values_length: seq_length + past_key_values_length] |
|
|
|
if inputs_embeds is None: |
|
inputs_embeds = self.word_embeddings(input_ids) |
|
|
|
embeddings = inputs_embeds |
|
if self.position_embedding_type == "absolute": |
|
position_embeddings = self.position_embeddings(position_ids) |
|
embeddings += position_embeddings |
|
embeddings = self.LayerNorm(embeddings) |
|
embeddings = self.dropout(embeddings) |
|
max_over_time = torch.max(embeddings, 1)[0] |
|
except Exception as ex: |
|
print(type(ex).__name__, ex.args) |
|
import pdb |
|
pdb.set_trace() |
|
return max_over_time |
|
|
|
|
|
class BERTWithDateLayerTokenClassification(BertPreTrainedModel): |
|
_keys_to_ignore_on_load_unexpected = [r"pooler"] |
|
|
|
def __init__(self, config): |
|
super().__init__(config) |
|
self.num_labels = config.num_labels |
|
|
|
self.bert = BertModel(config, add_pooling_layer=False) |
|
self.date_embedding = DateEmebdding(config) |
|
self.dropout = nn.Dropout(config.hidden_dropout_prob) |
|
self.classifier = nn.Linear(config.date_hidden_size + config.hidden_size, config.num_labels) |
|
|
|
|
|
self.init_weights() |
|
|
|
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) |
|
@add_code_sample_docstrings( |
|
tokenizer_class=_TOKENIZER_FOR_DOC, |
|
checkpoint=_CHECKPOINT_FOR_DOC, |
|
output_type=TokenClassifierOutput, |
|
config_class=_CONFIG_FOR_DOC, |
|
) |
|
def forward( |
|
self, |
|
input_ids=None, |
|
input_date_ids=None, |
|
attention_mask=None, |
|
token_type_ids=None, |
|
position_ids=None, |
|
head_mask=None, |
|
inputs_embeds=None, |
|
labels=None, |
|
output_attentions=None, |
|
output_hidden_states=None, |
|
return_dict=None, |
|
): |
|
r""" |
|
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): |
|
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels - |
|
1]``. |
|
""" |
|
try: |
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
|
_, seq_length = input_ids.shape |
|
outputs = self.bert( |
|
input_ids, |
|
attention_mask=attention_mask, |
|
token_type_ids=token_type_ids, |
|
position_ids=position_ids, |
|
head_mask=head_mask, |
|
inputs_embeds=inputs_embeds, |
|
output_attentions=output_attentions, |
|
output_hidden_states=output_hidden_states, |
|
return_dict=return_dict, |
|
) |
|
date_output = self.date_embedding(input_date_ids) |
|
sequence_output = torch.cat((outputs[0], date_output.unsqueeze(1).repeat(1, seq_length, 1)), 2) |
|
|
|
sequence_output = self.dropout(sequence_output) |
|
logits = self.classifier(sequence_output) |
|
|
|
loss = None |
|
if labels is not None: |
|
loss_fct = CrossEntropyLoss() |
|
|
|
if attention_mask is not None: |
|
active_loss = attention_mask.view(-1) == 1 |
|
active_logits = logits.view(-1, self.num_labels) |
|
active_labels = torch.where( |
|
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) |
|
) |
|
loss = loss_fct(active_logits, active_labels) |
|
else: |
|
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) |
|
|
|
if not return_dict: |
|
output = (logits,) + outputs[2:] |
|
return ((loss,) + output) if loss is not None else output |
|
except: |
|
import pdb |
|
pdb.set_trace() |
|
raise BrokenPipeError("Problems in forward pass") |
|
return TokenClassifierOutput( |
|
loss=loss, |
|
logits=logits, |
|
hidden_states=outputs.hidden_states, |
|
attentions=outputs.attentions, |
|
) |
|
|