|
|
|
import torch |
|
from transformers import BertModel, BertConfig |
|
from transformers.models.bert.modeling_bert import BertPreTrainedModel |
|
from torch import nn |
|
from torch.nn import CrossEntropyLoss, BCELoss, LayerNorm |
|
from transformers.modeling_outputs import TokenClassifierOutput |
|
|
|
|
|
BertLayerNorm = LayerNorm |
|
|
|
|
|
def log_sum_exp_batch(log_Tensor, axis=-1): |
|
return torch.max(log_Tensor, axis)[0]+torch.log(torch.exp(log_Tensor-torch.max(log_Tensor, axis)[0].view(log_Tensor.shape[0],-1,1)).sum(axis)) |
|
|
|
|
|
class BERT_CRF_NER(BertPreTrainedModel): |
|
_keys_to_ignore_on_load_unexpected = [r"pooler"] |
|
|
|
def __init__(self, config): |
|
super().__init__(config) |
|
self.hidden_size = 768 |
|
self.start_label_id = config.start_label_id |
|
self.stop_label_id = config.stop_label_id |
|
self.num_labels = config.num_classes |
|
|
|
self.batch_size = config.batch_size |
|
|
|
|
|
self.bert = BertModel(config, add_pooling_layer=False) |
|
|
|
self.dropout = torch.nn.Dropout(0.2) |
|
|
|
self.hidden2label = nn.Linear(self.hidden_size, self.num_labels) |
|
|
|
|
|
self.transitions = nn.Parameter( |
|
torch.randn(self.num_labels, self.num_labels)) |
|
|
|
|
|
|
|
|
|
|
|
self.transitions.data[self.start_label_id, :] = -10000 |
|
self.transitions.data[:, self.stop_label_id] = -10000 |
|
|
|
nn.init.xavier_uniform_(self.hidden2label.weight) |
|
nn.init.constant_(self.hidden2label.bias, 0.0) |
|
|
|
|
|
def init_bert_weights(self, module): |
|
""" Initialize the weights. |
|
""" |
|
if isinstance(module, (nn.Linear, nn.Embedding)): |
|
|
|
|
|
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) |
|
elif isinstance(module, BertLayerNorm): |
|
module.bias.data.zero_() |
|
module.weight.data.fill_(1.0) |
|
if isinstance(module, nn.Linear) and module.bias is not None: |
|
module.bias.data.zero_() |
|
|
|
def _forward_alg(self, feats): |
|
""" |
|
this also called alpha-recursion or forward recursion, to calculate log_prob of all barX |
|
""" |
|
|
|
|
|
T = feats.shape[1] |
|
batch_size = feats.shape[0] |
|
|
|
|
|
log_alpha = torch.Tensor(batch_size, 1, self.num_labels).fill_(-10000.).to(self.device) |
|
|
|
|
|
log_alpha[:, 0, self.start_label_id] = 0 |
|
|
|
|
|
|
|
for t in range(1, T): |
|
log_alpha = (log_sum_exp_batch(self.transitions + log_alpha, axis=-1) + feats[:, t]).unsqueeze(1) |
|
|
|
|
|
log_prob_all_barX = log_sum_exp_batch(log_alpha) |
|
return log_prob_all_barX |
|
|
|
def _get_bert_features(self, input_ids, |
|
attention_mask, |
|
token_type_ids, |
|
position_ids, |
|
head_mask, |
|
inputs_embeds, |
|
output_attentions, |
|
output_hidden_states, |
|
return_dict): |
|
""" |
|
sentences -> word embedding -> lstm -> MLP -> feats |
|
""" |
|
bert_seq_out = self.bert(input_ids, |
|
attention_mask=attention_mask, |
|
token_type_ids=token_type_ids, |
|
position_ids=position_ids, |
|
head_mask=head_mask, |
|
inputs_embeds=inputs_embeds, |
|
output_attentions=output_attentions, |
|
output_hidden_states=output_hidden_states, |
|
return_dict=return_dict) |
|
|
|
bert_seq_out_last = bert_seq_out[0] |
|
bert_seq_out_last = self.dropout(bert_seq_out_last) |
|
bert_feats = self.hidden2label(bert_seq_out_last) |
|
return bert_feats, bert_seq_out |
|
|
|
def _score_sentence(self, feats, label_ids): |
|
""" |
|
Gives the score of a provided label sequence |
|
p(X=w1:t,Zt=tag1:t)=...p(Zt=tag_t|Zt-1=tag_t-1)p(xt|Zt=tag_t)... |
|
""" |
|
|
|
|
|
T = feats.shape[1] |
|
batch_size = feats.shape[0] |
|
|
|
batch_transitions = self.transitions.expand(batch_size, self.num_labels, self.num_labels) |
|
batch_transitions = batch_transitions.flatten(1) |
|
|
|
score = torch.zeros((feats.shape[0], 1)).to(self.device) |
|
|
|
for t in range(1, T): |
|
|
|
score = score + \ |
|
batch_transitions.gather(-1, (label_ids[:, t] * self.num_labels + label_ids[:, t-1]).view(-1, 1)) + \ |
|
feats[:, t].gather(-1, label_ids[:, t].view(-1, 1)).view(-1, 1) |
|
return score |
|
|
|
def _viterbi_decode(self, feats): |
|
""" |
|
Max-Product Algorithm or viterbi algorithm, argmax(p(z_0:t|x_0:t)) |
|
""" |
|
|
|
|
|
|
|
T = feats.shape[1] |
|
batch_size = feats.shape[0] |
|
|
|
|
|
|
|
log_delta = torch.Tensor(batch_size, 1, self.num_labels).fill_(-10000.).to(self.device) |
|
log_delta[:, 0, self.start_label_id] = 0 |
|
|
|
|
|
psi = torch.zeros((batch_size, T, self.num_labels), dtype=torch.long).to(self.device) |
|
for t in range(1, T): |
|
|
|
|
|
log_delta, psi[:, t] = torch.max(self.transitions + log_delta, -1) |
|
|
|
|
|
|
|
log_delta = (log_delta + feats[:, t]).unsqueeze(1) |
|
|
|
|
|
path = torch.zeros((batch_size, T), dtype=torch.long).to(self.device) |
|
|
|
|
|
max_logLL_allz_allx, path[:, -1] = torch.max(log_delta.squeeze(), -1) |
|
|
|
for t in range(T-2, -1, -1): |
|
|
|
path[:, t] = psi[:, t+1].gather(-1, path[:, t+1].view(-1, 1)).squeeze() |
|
|
|
return max_logLL_allz_allx, path |
|
|
|
def neg_log_likelihood(self, input_ids, |
|
attention_mask, |
|
token_type_ids, |
|
position_ids, |
|
head_mask, |
|
inputs_embeds, |
|
output_attentions, |
|
output_hidden_states, |
|
return_dict, |
|
label_ids): |
|
|
|
bert_feats, _ = self._get_bert_features(input_ids, |
|
attention_mask, |
|
token_type_ids, |
|
position_ids, |
|
head_mask, |
|
inputs_embeds, |
|
output_attentions, |
|
output_hidden_states, |
|
return_dict) |
|
|
|
forward_score = self._forward_alg(bert_feats) |
|
|
|
gold_score = self._score_sentence(bert_feats, label_ids) |
|
|
|
return torch.mean(forward_score - gold_score) |
|
|
|
|
|
|
|
def forward( |
|
self, |
|
input_ids=None, |
|
attention_mask=None, |
|
token_type_ids=None, |
|
position_ids=None, |
|
head_mask=None, |
|
inputs_embeds=None, |
|
labels=None, |
|
output_attentions=None, |
|
output_hidden_states=None, |
|
return_dict=None, |
|
inference_mode=False, |
|
): |
|
|
|
bert_feats, bert_out = self._get_bert_features(input_ids, |
|
attention_mask, |
|
token_type_ids, |
|
position_ids, |
|
head_mask, |
|
inputs_embeds, |
|
output_attentions, |
|
output_hidden_states, |
|
return_dict) |
|
|
|
|
|
score, label_seq_ids = self._viterbi_decode(bert_feats) |
|
|
|
if not inference_mode: |
|
neg_log_likelihood = self.neg_log_likelihood(input_ids, |
|
attention_mask, |
|
token_type_ids, |
|
position_ids, |
|
head_mask, |
|
inputs_embeds, |
|
output_attentions, |
|
output_hidden_states, |
|
return_dict, |
|
labels) |
|
|
|
return TokenClassifierOutput( |
|
loss=neg_log_likelihood, |
|
logits=label_seq_ids, |
|
hidden_states=bert_out.hidden_states, |
|
attentions=bert_out.attentions, |
|
) |
|
else: |
|
neg_log_likelihood = None |
|
return TokenClassifierOutput( |
|
loss=neg_log_likelihood, |
|
logits=label_seq_ids, |
|
hidden_states=bert_out.hidden_states, |
|
attentions=bert_out.attentions, |
|
) |
|
|
|
|
|
|