DeepLearning101 commited on
Commit
3169cc9
1 Parent(s): d131d1a

Upload kg.py

Browse files
Files changed (1) hide show
  1. models/kg.py +247 -0
models/kg.py ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 2022/2/17 11:26 上午
3
+ # @Author : JianingWang
4
+ # @File : kg.py
5
+ import torch
6
+ from torch import nn
7
+ from torch.nn import CrossEntropyLoss
8
+ import torch.nn.functional as F
9
+ from collections import OrderedDict
10
+ from transformers.models.bert import BertPreTrainedModel, BertModel
11
+ from transformers.models.bert.modeling_bert import BertOnlyMLMHead
12
+
13
+ class MLPLayer(nn.Module):
14
+ """
15
+ Head for getting sentence representations over RoBERTa/BERT"s CLS representation.
16
+ """
17
+
18
+ def __init__(self, config):
19
+ super().__init__()
20
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
21
+ self.activation = nn.Tanh()
22
+
23
+ def forward(self, features, **kwargs):
24
+ x = self.dense(features)
25
+ x = self.activation(x)
26
+
27
+ return x
28
+
29
+ class Similarity(nn.Module):
30
+ """
31
+ Dot product or cosine similarity
32
+ """
33
+
34
+ def __init__(self, temp):
35
+ super().__init__()
36
+ self.temp = temp
37
+ self.cos = nn.CosineSimilarity(dim=-1)
38
+
39
+ def forward(self, x, y):
40
+ return self.cos(x, y) / self.temp
41
+
42
+ class BertForPretrainWithKG(BertPreTrainedModel):
43
+
44
+ def __init__(self, config):
45
+ super().__init__(config)
46
+ self.num_labels = config.num_labels
47
+ self.config = config
48
+ self.bert = BertModel(config)
49
+ classifier_dropout = (
50
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
51
+ )
52
+ self.dropout = nn.Dropout(classifier_dropout)
53
+ self.cls = BertOnlyMLMHead(config)
54
+ self.classifiers = nn.ModuleList([nn.Linear(config.hidden_size, config.num_ner_labels) for _ in range(config.entity_type_num)])
55
+ self.post_init()
56
+
57
+ def forward(
58
+ self,
59
+ input_ids=None,
60
+ attention_mask=None,
61
+ token_type_ids=None,
62
+ position_ids=None,
63
+ head_mask=None,
64
+ inputs_embeds=None,
65
+ encoder_hidden_states=None,
66
+ encoder_attention_mask=None,
67
+ labels=None,
68
+ ner_labels=None,
69
+ output_attentions=None,
70
+ output_hidden_states=None,
71
+ return_dict=None,
72
+ ):
73
+
74
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
75
+ outputs = self.bert(
76
+ input_ids,
77
+ attention_mask=attention_mask,
78
+ token_type_ids=token_type_ids,
79
+ position_ids=position_ids,
80
+ head_mask=head_mask,
81
+ inputs_embeds=inputs_embeds,
82
+ encoder_hidden_states=encoder_hidden_states,
83
+ encoder_attention_mask=encoder_attention_mask,
84
+ output_attentions=output_attentions,
85
+ output_hidden_states=output_hidden_states,
86
+ return_dict=return_dict,
87
+ )
88
+
89
+ sequence_output = outputs.last_hidden_state
90
+ # mlm
91
+ prediction_scores = self.cls(sequence_output)
92
+ # ner
93
+ sequence_output = self.dropout(sequence_output)
94
+ ner_logits = torch.stack([classifier(sequence_output) for classifier in self.classifiers]).movedim(1, 0)
95
+
96
+ # mlm
97
+ masked_lm_loss, ner_loss, total_loss = None, None, None
98
+ if labels is not None:
99
+ loss_fct = CrossEntropyLoss() # -100 index = padding token
100
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
101
+
102
+ if ner_labels is not None:
103
+ loss_fct = CrossEntropyLoss()
104
+ # Only keep active parts of the loss
105
+
106
+ active_loss = attention_mask.repeat(self.config.entity_type_num, 1, 1).view(-1) == 1
107
+ active_logits = ner_logits.reshape(-1, self.config.num_ner_labels)
108
+ active_labels = torch.where(
109
+ active_loss, ner_labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(ner_labels)
110
+ )
111
+ ner_loss = loss_fct(active_logits, active_labels)
112
+
113
+ if masked_lm_loss:
114
+ total_loss = masked_lm_loss + ner_loss * 4
115
+
116
+ return OrderedDict([
117
+ ("loss", total_loss),
118
+ ("mlm_loss", masked_lm_loss.unsqueeze(0)),
119
+ ("ner_loss", ner_loss.unsqueeze(0)),
120
+ ("logits", prediction_scores.argmax(2)),
121
+ ("ner_logits", ner_logits.argmax(3))
122
+ ])
123
+ # MaskedLMOutput(
124
+ # loss=total_loss,
125
+ # logits=prediction_scores.argmax(2),
126
+ # ner_l
127
+ # hidden_states=outputs.hidden_states,
128
+ # attentions=outputs.attentions,
129
+ # )
130
+
131
+
132
+ class BertForPretrainWithKGV2(BertPreTrainedModel):
133
+ def __init__(self, config):
134
+ super().__init__(config)
135
+ self.num_labels = config.num_labels
136
+ self.config = config
137
+ self.bert = BertModel(config)
138
+ classifier_dropout = (
139
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
140
+ )
141
+ self.dropout = nn.Dropout(classifier_dropout)
142
+ self.cls = BertOnlyMLMHead(config)
143
+ self.classifiers = nn.ModuleList([nn.Linear(config.hidden_size, config.num_ner_labels) for _ in range(config.entity_type_num)])
144
+ self.mlp = MLPLayer(config)
145
+ self.sim = Similarity(0.05)
146
+ self.post_init()
147
+
148
+ def forward(
149
+ self,
150
+ input_ids=None,
151
+ attention_mask=None,
152
+ token_type_ids=None,
153
+ position_ids=None,
154
+ head_mask=None,
155
+ inputs_embeds=None,
156
+ encoder_hidden_states=None,
157
+ encoder_attention_mask=None,
158
+ labels=None,
159
+ ner_labels=None,
160
+ output_attentions=None,
161
+ output_hidden_states=None,
162
+ return_dict=None,
163
+ ):
164
+
165
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
166
+ outputs = self.bert(
167
+ input_ids,
168
+ attention_mask=attention_mask,
169
+ token_type_ids=token_type_ids,
170
+ position_ids=position_ids,
171
+ head_mask=head_mask,
172
+ inputs_embeds=inputs_embeds,
173
+ encoder_hidden_states=encoder_hidden_states,
174
+ encoder_attention_mask=encoder_attention_mask,
175
+ output_attentions=output_attentions,
176
+ output_hidden_states=output_hidden_states,
177
+ return_dict=return_dict,
178
+ )
179
+
180
+ sequence_output = outputs.last_hidden_state
181
+ # mlm
182
+ prediction_scores = self.cls(sequence_output)
183
+ # ner
184
+ sequence_output = self.dropout(sequence_output)
185
+ ner_logits = torch.stack([classifier(sequence_output) for classifier in self.classifiers]).movedim(1, 0)
186
+
187
+ # mlm
188
+ masked_lm_loss, ner_loss, total_loss = None, None, None
189
+ if labels is not None:
190
+ loss_fct = CrossEntropyLoss() # -100 index = padding token
191
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
192
+
193
+ if ner_labels is not None:
194
+ loss_fct = CrossEntropyLoss()
195
+ active_logits = ner_logits.reshape(-1, self.config.num_ner_labels)
196
+ # padding 的label是-100
197
+ ner_loss = loss_fct(active_logits, ner_labels.view(-1))
198
+
199
+ if masked_lm_loss:
200
+ total_loss = masked_lm_loss
201
+
202
+ if ner_loss:
203
+ total_loss = total_loss + ner_loss
204
+
205
+ # 对比cls loss
206
+ # cls_hidden = outputs.pooler_output
207
+ cls_hidden = sequence_output[:, 0]
208
+ simcse_loss = self.simcse_unsup_loss2(cls_hidden)
209
+ if simcse_loss:
210
+ total_loss = total_loss + simcse_loss*10
211
+
212
+ ner_out = ner_logits.argmax(3)
213
+ return OrderedDict([
214
+ ("loss", total_loss),
215
+ ("mlm_loss", masked_lm_loss.unsqueeze(0)),
216
+ ("ner_loss", ner_loss.unsqueeze(0)),
217
+ ("logits", prediction_scores.argmax(2)),
218
+ ("ner_logits", ner_out.view(ner_out.shape[0], -1)),
219
+ ("simcse_loss", simcse_loss.unsqueeze(0))
220
+ ])
221
+
222
+ def simcse_unsup_loss2(self, pooler_output):
223
+ pooler_output = pooler_output.view((-1, 2, pooler_output.size(-1)))
224
+ pooler_output = self.mlp(pooler_output)
225
+ z1, z2 = pooler_output[:, 0], pooler_output[:, 1]
226
+ cos_sim = self.sim(z1.unsqueeze(1), z2.unsqueeze(0))
227
+ labels = torch.arange(cos_sim.size(0)).long().to(pooler_output.device)
228
+ loss_fct = nn.CrossEntropyLoss()
229
+ loss = loss_fct(cos_sim, labels)
230
+ return loss
231
+
232
+ @staticmethod
233
+ def simcse_unsup_loss(y_pred: "tensor") -> "tensor":
234
+ # 得到y_pred对应的label, [1, 0, 3, 2, ..., batch_size-1, batch_size-2]
235
+ y_true = torch.arange(y_pred.shape[0], device=y_pred.device)
236
+ y_true = (y_true - y_true % 2 * 2) + 1
237
+ # batch内两两计算相似度, 得到相似度矩阵(对角矩阵)
238
+ sim = F.cosine_similarity(y_pred.unsqueeze(1), y_pred.unsqueeze(0), dim=-1)
239
+ # sim = torch.mm(y_pred, y_pred.transpose(0, 1))
240
+ # 将相似度矩阵对角线置为很小的值, 消除自身的影响
241
+ sim = sim - torch.eye(y_pred.shape[0], device=y_pred.device) * 1e12
242
+ # 相似度矩阵除以温度系数
243
+ sim = sim/0.05
244
+ # 计算相似度矩阵与y_true的交叉熵损失
245
+ loss = F.cross_entropy(sim, y_true)
246
+ print(loss)
247
+ return loss