satyaalmasian commited on
Commit
89c6521
1 Parent(s): 2a8c10b

Upload BERTWithDateLayerTokenClassification.py

Browse files
BERTWithDateLayerTokenClassification.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.utils.checkpoint
3
+ from torch import nn
4
+ from torch.nn import CrossEntropyLoss
5
+ from transformers.models.bert.modeling_bert import BertPreTrainedModel, BertModel, \
6
+ BERT_INPUTS_DOCSTRING, _TOKENIZER_FOR_DOC, _CHECKPOINT_FOR_DOC, TokenClassifierOutput, _CONFIG_FOR_DOC
7
+ from transformers.file_utils import (
8
+ add_code_sample_docstrings,
9
+ add_start_docstrings_to_model_forward,
10
+ )
11
+
12
+
13
+ class DateEmebdding(nn.Module):
14
+ """Construct the embeddings the creation date"""
15
+
16
+ def __init__(self, config):
17
+ super().__init__()
18
+ self.word_embeddings = nn.Embedding(config.date_vocab_size, config.date_hidden_size,
19
+ padding_idx=config.pad_token_id)
20
+ self.position_embeddings = nn.Embedding(config.date_max_position_embeddings, config.date_hidden_size)
21
+
22
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
23
+ # any TensorFlow checkpoint file
24
+ self.LayerNorm = nn.LayerNorm(config.date_hidden_size, eps=config.layer_norm_eps)
25
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
26
+
27
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
28
+ self.register_buffer("position_ids", torch.arange(config.date_max_position_embeddings).expand((1, -1)))
29
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
30
+ self.dense = nn.Linear(config.date_hidden_size, config.date_hidden_size)
31
+ self.activation = nn.Tanh()
32
+
33
+ def forward(
34
+ self, input_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
35
+ ):
36
+
37
+ try:
38
+
39
+ if input_ids is not None:
40
+ input_shape = input_ids.shape
41
+ else:
42
+ input_shape = inputs_embeds.size()[:-1]
43
+
44
+ seq_length = input_shape[1]
45
+
46
+ if position_ids is None:
47
+ position_ids = self.position_ids[:, past_key_values_length: seq_length + past_key_values_length]
48
+
49
+ if inputs_embeds is None:
50
+ inputs_embeds = self.word_embeddings(input_ids)
51
+
52
+ embeddings = inputs_embeds
53
+ if self.position_embedding_type == "absolute":
54
+ position_embeddings = self.position_embeddings(position_ids)
55
+ embeddings += position_embeddings
56
+ embeddings = self.LayerNorm(embeddings)
57
+ embeddings = self.dropout(embeddings)
58
+ max_over_time = torch.max(embeddings, 1)[0]
59
+ except Exception as ex:
60
+ print(type(ex).__name__, ex.args)
61
+ import pdb
62
+ pdb.set_trace()
63
+ return max_over_time
64
+
65
+
66
+ class BERTWithDateLayerTokenClassification(BertPreTrainedModel):
67
+ _keys_to_ignore_on_load_unexpected = [r"pooler"]
68
+
69
+ def __init__(self, config):
70
+ super().__init__(config)
71
+ self.num_labels = config.num_labels
72
+
73
+ self.bert = BertModel(config, add_pooling_layer=False)
74
+ self.date_embedding = DateEmebdding(config)
75
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
76
+ self.classifier = nn.Linear(config.date_hidden_size + config.hidden_size, config.num_labels)
77
+ # self.classifier = nn.Linear(config.hidden_size, config.num_labels)
78
+
79
+ self.init_weights()
80
+
81
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
82
+ @add_code_sample_docstrings(
83
+ tokenizer_class=_TOKENIZER_FOR_DOC,
84
+ checkpoint=_CHECKPOINT_FOR_DOC,
85
+ output_type=TokenClassifierOutput,
86
+ config_class=_CONFIG_FOR_DOC,
87
+ )
88
+ def forward(
89
+ self,
90
+ input_ids=None,
91
+ input_date_ids=None,
92
+ attention_mask=None,
93
+ token_type_ids=None,
94
+ position_ids=None,
95
+ head_mask=None,
96
+ inputs_embeds=None,
97
+ labels=None,
98
+ output_attentions=None,
99
+ output_hidden_states=None,
100
+ return_dict=None,
101
+ ):
102
+ r"""
103
+ labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
104
+ Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
105
+ 1]``.
106
+ """
107
+ try:
108
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
109
+
110
+ _, seq_length = input_ids.shape
111
+ outputs = self.bert(
112
+ input_ids,
113
+ attention_mask=attention_mask,
114
+ token_type_ids=token_type_ids,
115
+ position_ids=position_ids,
116
+ head_mask=head_mask,
117
+ inputs_embeds=inputs_embeds,
118
+ output_attentions=output_attentions,
119
+ output_hidden_states=output_hidden_states,
120
+ return_dict=return_dict,
121
+ )
122
+ date_output = self.date_embedding(input_date_ids)
123
+ sequence_output = torch.cat((outputs[0], date_output.unsqueeze(1).repeat(1, seq_length, 1)), 2)
124
+
125
+ sequence_output = self.dropout(sequence_output)
126
+ logits = self.classifier(sequence_output)
127
+
128
+ loss = None
129
+ if labels is not None:
130
+ loss_fct = CrossEntropyLoss()
131
+ # Only keep active parts of the loss
132
+ if attention_mask is not None:
133
+ active_loss = attention_mask.view(-1) == 1
134
+ active_logits = logits.view(-1, self.num_labels)
135
+ active_labels = torch.where(
136
+ active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
137
+ )
138
+ loss = loss_fct(active_logits, active_labels)
139
+ else:
140
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
141
+
142
+ if not return_dict:
143
+ output = (logits,) + outputs[2:]
144
+ return ((loss,) + output) if loss is not None else output
145
+ except:
146
+ import pdb
147
+ pdb.set_trace()
148
+ raise BrokenPipeError("Problems in forward pass")
149
+ return TokenClassifierOutput(
150
+ loss=loss,
151
+ logits=logits,
152
+ hidden_states=outputs.hidden_states,
153
+ attentions=outputs.attentions,
154
+ )