CarlosMalaga commited on
Commit
1102db0
1 Parent(s): 1e2a3a3

Upload 18 files

Browse files
models/reader-exteded-small/added_tokens.json ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "--NME--": 128001,
3
+ "[E-0]": 128002,
4
+ "[E-10]": 128012,
5
+ "[E-11]": 128013,
6
+ "[E-12]": 128014,
7
+ "[E-13]": 128015,
8
+ "[E-14]": 128016,
9
+ "[E-15]": 128017,
10
+ "[E-16]": 128018,
11
+ "[E-17]": 128019,
12
+ "[E-18]": 128020,
13
+ "[E-19]": 128021,
14
+ "[E-1]": 128003,
15
+ "[E-20]": 128022,
16
+ "[E-21]": 128023,
17
+ "[E-22]": 128024,
18
+ "[E-23]": 128025,
19
+ "[E-24]": 128026,
20
+ "[E-25]": 128027,
21
+ "[E-26]": 128028,
22
+ "[E-27]": 128029,
23
+ "[E-28]": 128030,
24
+ "[E-29]": 128031,
25
+ "[E-2]": 128004,
26
+ "[E-30]": 128032,
27
+ "[E-31]": 128033,
28
+ "[E-32]": 128034,
29
+ "[E-33]": 128035,
30
+ "[E-34]": 128036,
31
+ "[E-35]": 128037,
32
+ "[E-36]": 128038,
33
+ "[E-37]": 128039,
34
+ "[E-38]": 128040,
35
+ "[E-39]": 128041,
36
+ "[E-3]": 128005,
37
+ "[E-40]": 128042,
38
+ "[E-41]": 128043,
39
+ "[E-42]": 128044,
40
+ "[E-43]": 128045,
41
+ "[E-44]": 128046,
42
+ "[E-45]": 128047,
43
+ "[E-46]": 128048,
44
+ "[E-47]": 128049,
45
+ "[E-48]": 128050,
46
+ "[E-49]": 128051,
47
+ "[E-4]": 128006,
48
+ "[E-50]": 128052,
49
+ "[E-51]": 128053,
50
+ "[E-52]": 128054,
51
+ "[E-53]": 128055,
52
+ "[E-54]": 128056,
53
+ "[E-55]": 128057,
54
+ "[E-56]": 128058,
55
+ "[E-57]": 128059,
56
+ "[E-58]": 128060,
57
+ "[E-59]": 128061,
58
+ "[E-5]": 128007,
59
+ "[E-60]": 128062,
60
+ "[E-61]": 128063,
61
+ "[E-62]": 128064,
62
+ "[E-63]": 128065,
63
+ "[E-64]": 128066,
64
+ "[E-65]": 128067,
65
+ "[E-66]": 128068,
66
+ "[E-67]": 128069,
67
+ "[E-68]": 128070,
68
+ "[E-69]": 128071,
69
+ "[E-6]": 128008,
70
+ "[E-70]": 128072,
71
+ "[E-71]": 128073,
72
+ "[E-72]": 128074,
73
+ "[E-73]": 128075,
74
+ "[E-74]": 128076,
75
+ "[E-75]": 128077,
76
+ "[E-76]": 128078,
77
+ "[E-77]": 128079,
78
+ "[E-78]": 128080,
79
+ "[E-79]": 128081,
80
+ "[E-7]": 128009,
81
+ "[E-80]": 128082,
82
+ "[E-81]": 128083,
83
+ "[E-82]": 128084,
84
+ "[E-83]": 128085,
85
+ "[E-84]": 128086,
86
+ "[E-85]": 128087,
87
+ "[E-86]": 128088,
88
+ "[E-87]": 128089,
89
+ "[E-88]": 128090,
90
+ "[E-89]": 128091,
91
+ "[E-8]": 128010,
92
+ "[E-90]": 128092,
93
+ "[E-91]": 128093,
94
+ "[E-92]": 128094,
95
+ "[E-93]": 128095,
96
+ "[E-94]": 128096,
97
+ "[E-95]": 128097,
98
+ "[E-96]": 128098,
99
+ "[E-97]": 128099,
100
+ "[E-98]": 128100,
101
+ "[E-99]": 128101,
102
+ "[E-9]": 128011,
103
+ "[MASK]": 128000
104
+ }
models/reader-exteded-small/config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/home/carlos/amr-parsing-master/sentence-similarity/relik-main/experiments/models/relik-small-best/17-47-48/wandb/run-20240430_174753-g4pz35kp/files/hf_model/hf_model",
3
+ "activation": "gelu",
4
+ "add_entity_embedding": null,
5
+ "additional_special_symbols": 101,
6
+ "additional_special_symbols_types": 0,
7
+ "architectures": [
8
+ "RelikReaderSpanModel"
9
+ ],
10
+ "auto_map": {
11
+ "AutoModel": "modeling_relik.RelikReaderSpanModel"
12
+ },
13
+ "default_reader_class": null,
14
+ "entity_type_loss": false,
15
+ "linears_hidden_size": 512,
16
+ "model_type": "relik-reader",
17
+ "num_layers": null,
18
+ "torch_dtype": "float32",
19
+ "training": true,
20
+ "transformer_model": "microsoft/deberta-v3-small",
21
+ "transformers_version": "4.33.3",
22
+ "use_last_k_layers": 1
23
+ }
models/reader-exteded-small/configuration_relik.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ from transformers import AutoConfig
4
+ from transformers.configuration_utils import PretrainedConfig
5
+
6
+
7
+ class RelikReaderConfig(PretrainedConfig):
8
+ model_type = "relik-reader"
9
+
10
+ def __init__(
11
+ self,
12
+ transformer_model: str = "microsoft/deberta-v3-base",
13
+ additional_special_symbols: int = 101,
14
+ additional_special_symbols_types: Optional[int] = 0,
15
+ num_layers: Optional[int] = None,
16
+ activation: str = "gelu",
17
+ linears_hidden_size: Optional[int] = 512,
18
+ use_last_k_layers: int = 1,
19
+ entity_type_loss: bool = False,
20
+ add_entity_embedding: bool = None,
21
+ training: bool = False,
22
+ default_reader_class: Optional[str] = None,
23
+ **kwargs
24
+ ) -> None:
25
+ # TODO: add name_or_path to kwargs
26
+ self.transformer_model = transformer_model
27
+ self.additional_special_symbols = additional_special_symbols
28
+ self.additional_special_symbols_types = additional_special_symbols_types
29
+ self.num_layers = num_layers
30
+ self.activation = activation
31
+ self.linears_hidden_size = linears_hidden_size
32
+ self.use_last_k_layers = use_last_k_layers
33
+ self.entity_type_loss = entity_type_loss
34
+ self.add_entity_embedding = (
35
+ True
36
+ if add_entity_embedding is None and entity_type_loss
37
+ else add_entity_embedding
38
+ )
39
+ self.training = training
40
+ self.default_reader_class = default_reader_class
41
+ super().__init__(**kwargs)
42
+
43
+
44
+ AutoConfig.register("relik-reader", RelikReaderConfig)
models/reader-exteded-small/modeling_relik.py ADDED
@@ -0,0 +1,980 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, Optional
2
+
3
+ import torch
4
+ from transformers import AutoModel, PreTrainedModel
5
+ from transformers.activations import ClippedGELUActivation, GELUActivation
6
+ from transformers.configuration_utils import PretrainedConfig
7
+ from transformers.modeling_utils import PoolerEndLogits
8
+
9
+ from .configuration_relik import RelikReaderConfig
10
+
11
+ torch.set_float32_matmul_precision('medium')
12
+
13
+ class RelikReaderSample:
14
+ def __init__(self, **kwargs):
15
+ super().__setattr__("_d", {})
16
+ self._d = kwargs
17
+
18
+ def __getattribute__(self, item):
19
+ return super(RelikReaderSample, self).__getattribute__(item)
20
+
21
+ def __getattr__(self, item):
22
+ if item.startswith("__") and item.endswith("__"):
23
+ # this is likely some python library-specific variable (such as __deepcopy__ for copy)
24
+ # better follow standard behavior here
25
+ raise AttributeError(item)
26
+ elif item in self._d:
27
+ return self._d[item]
28
+ else:
29
+ return None
30
+
31
+ def __setattr__(self, key, value):
32
+ if key in self._d:
33
+ self._d[key] = value
34
+ else:
35
+ super().__setattr__(key, value)
36
+
37
+
38
+ activation2functions = {
39
+ "relu": torch.nn.ReLU(),
40
+ "gelu": GELUActivation(),
41
+ "gelu_10": ClippedGELUActivation(-10, 10),
42
+ }
43
+
44
+
45
+ class PoolerEndLogitsBi(PoolerEndLogits):
46
+ def __init__(self, config: PretrainedConfig):
47
+ super().__init__(config)
48
+ self.dense_1 = torch.nn.Linear(config.hidden_size, 2)
49
+
50
+ def forward(
51
+ self,
52
+ hidden_states: torch.FloatTensor,
53
+ start_states: Optional[torch.FloatTensor] = None,
54
+ start_positions: Optional[torch.LongTensor] = None,
55
+ p_mask: Optional[torch.FloatTensor] = None,
56
+ ) -> torch.FloatTensor:
57
+ if p_mask is not None:
58
+ p_mask = p_mask.unsqueeze(-1)
59
+ logits = super().forward(
60
+ hidden_states,
61
+ start_states,
62
+ start_positions,
63
+ p_mask,
64
+ )
65
+ return logits
66
+
67
+
68
+ class RelikReaderSpanModel(PreTrainedModel):
69
+ config_class = RelikReaderConfig
70
+
71
+ def __init__(self, config: RelikReaderConfig, *args, **kwargs):
72
+ super().__init__(config)
73
+ # Transformer model declaration
74
+ self.config = config
75
+ self.transformer_model = (
76
+ AutoModel.from_pretrained(self.config.transformer_model)
77
+ if self.config.num_layers is None
78
+ else AutoModel.from_pretrained(
79
+ self.config.transformer_model, num_hidden_layers=self.config.num_layers
80
+ )
81
+ )
82
+ self.transformer_model.resize_token_embeddings(
83
+ self.transformer_model.config.vocab_size
84
+ + self.config.additional_special_symbols,
85
+ pad_to_multiple_of=8,
86
+ )
87
+
88
+ self.activation = self.config.activation
89
+ self.linears_hidden_size = self.config.linears_hidden_size
90
+ self.use_last_k_layers = self.config.use_last_k_layers
91
+
92
+ # named entity detection layers
93
+ self.ned_start_classifier = self._get_projection_layer(
94
+ self.activation, last_hidden=2, layer_norm=False
95
+ )
96
+ self.ned_end_classifier = PoolerEndLogits(self.transformer_model.config)
97
+
98
+ # END entity disambiguation layer
99
+ self.ed_start_projector = self._get_projection_layer(self.activation)
100
+ self.ed_end_projector = self._get_projection_layer(self.activation)
101
+
102
+ self.training = self.config.training
103
+
104
+ # criterion
105
+ self.criterion = torch.nn.CrossEntropyLoss()
106
+
107
+ def _get_projection_layer(
108
+ self,
109
+ activation: str,
110
+ last_hidden: Optional[int] = None,
111
+ input_hidden=None,
112
+ layer_norm: bool = True,
113
+ ) -> torch.nn.Sequential:
114
+ head_components = [
115
+ torch.nn.Dropout(0.1),
116
+ torch.nn.Linear(
117
+ self.transformer_model.config.hidden_size * self.use_last_k_layers
118
+ if input_hidden is None
119
+ else input_hidden,
120
+ self.linears_hidden_size,
121
+ ),
122
+ activation2functions[activation],
123
+ torch.nn.Dropout(0.1),
124
+ torch.nn.Linear(
125
+ self.linears_hidden_size,
126
+ self.linears_hidden_size if last_hidden is None else last_hidden,
127
+ ),
128
+ ]
129
+
130
+ if layer_norm:
131
+ head_components.append(
132
+ torch.nn.LayerNorm(
133
+ self.linears_hidden_size if last_hidden is None else last_hidden,
134
+ self.transformer_model.config.layer_norm_eps,
135
+ )
136
+ )
137
+
138
+ return torch.nn.Sequential(*head_components)
139
+
140
+ def _mask_logits(self, logits: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:
141
+ mask = mask.unsqueeze(-1)
142
+ if next(self.parameters()).dtype == torch.float16:
143
+ logits = logits * (1 - mask) - 65500 * mask
144
+ else:
145
+ logits = logits * (1 - mask) - 1e30 * mask
146
+ return logits
147
+
148
+ def _get_model_features(
149
+ self,
150
+ input_ids: torch.Tensor,
151
+ attention_mask: torch.Tensor,
152
+ token_type_ids: Optional[torch.Tensor],
153
+ ):
154
+ model_input = {
155
+ "input_ids": input_ids,
156
+ "attention_mask": attention_mask,
157
+ "output_hidden_states": self.use_last_k_layers > 1,
158
+ }
159
+
160
+ if token_type_ids is not None:
161
+ model_input["token_type_ids"] = token_type_ids
162
+
163
+ model_output = self.transformer_model(**model_input)
164
+
165
+ if self.use_last_k_layers > 1:
166
+ model_features = torch.cat(
167
+ model_output[1][-self.use_last_k_layers :], dim=-1
168
+ )
169
+ else:
170
+ model_features = model_output[0]
171
+
172
+ return model_features
173
+
174
+ def compute_ned_end_logits(
175
+ self,
176
+ start_predictions,
177
+ start_labels,
178
+ model_features,
179
+ prediction_mask,
180
+ batch_size,
181
+ ) -> Optional[torch.Tensor]:
182
+ # todo: maybe when constraining on the spans,
183
+ # we should not use a prediction_mask for the end tokens.
184
+ # at least we should not during training imo
185
+ start_positions = start_labels if self.training else start_predictions
186
+ start_positions_indices = (
187
+ torch.arange(start_positions.size(1), device=start_positions.device)
188
+ .unsqueeze(0)
189
+ .expand(batch_size, -1)[start_positions > 0]
190
+ ).to(start_positions.device)
191
+
192
+ if len(start_positions_indices) > 0:
193
+ expanded_features = model_features.repeat_interleave(
194
+ torch.sum(start_positions > 0, dim=-1), dim=0
195
+ )
196
+ expanded_prediction_mask = prediction_mask.repeat_interleave(
197
+ torch.sum(start_positions > 0, dim=-1), dim=0
198
+ )
199
+ end_logits = self.ned_end_classifier(
200
+ hidden_states=expanded_features,
201
+ start_positions=start_positions_indices,
202
+ p_mask=expanded_prediction_mask,
203
+ )
204
+
205
+ return end_logits
206
+
207
+ return None
208
+
209
+ def compute_classification_logits(
210
+ self,
211
+ model_features,
212
+ special_symbols_mask,
213
+ prediction_mask,
214
+ batch_size,
215
+ start_positions=None,
216
+ end_positions=None,
217
+ ) -> torch.Tensor:
218
+ if start_positions is None or end_positions is None:
219
+ start_positions = torch.zeros_like(prediction_mask)
220
+ end_positions = torch.zeros_like(prediction_mask)
221
+
222
+ model_start_features = self.ed_start_projector(model_features)
223
+ model_end_features = self.ed_end_projector(model_features)
224
+ model_end_features[start_positions > 0] = model_end_features[end_positions > 0]
225
+
226
+ model_ed_features = torch.cat(
227
+ [model_start_features, model_end_features], dim=-1
228
+ )
229
+
230
+ # computing ed features
231
+ classes_representations = torch.sum(special_symbols_mask, dim=1)[0].item()
232
+ special_symbols_representation = model_ed_features[special_symbols_mask].view(
233
+ batch_size, classes_representations, -1
234
+ )
235
+
236
+ logits = torch.bmm(
237
+ model_ed_features,
238
+ torch.permute(special_symbols_representation, (0, 2, 1)),
239
+ )
240
+
241
+ logits = self._mask_logits(logits, prediction_mask)
242
+
243
+ return logits
244
+
245
+ def forward(
246
+ self,
247
+ input_ids: torch.Tensor,
248
+ attention_mask: torch.Tensor,
249
+ token_type_ids: Optional[torch.Tensor] = None,
250
+ prediction_mask: Optional[torch.Tensor] = None,
251
+ special_symbols_mask: Optional[torch.Tensor] = None,
252
+ start_labels: Optional[torch.Tensor] = None,
253
+ end_labels: Optional[torch.Tensor] = None,
254
+ use_predefined_spans: bool = False,
255
+ *args,
256
+ **kwargs,
257
+ ) -> Dict[str, Any]:
258
+ batch_size, seq_len = input_ids.shape
259
+
260
+ model_features = self._get_model_features(
261
+ input_ids, attention_mask, token_type_ids
262
+ )
263
+
264
+ ned_start_labels = None
265
+
266
+ # named entity detection if required
267
+ if use_predefined_spans: # no need to compute spans
268
+ ned_start_logits, ned_start_probabilities, ned_start_predictions = (
269
+ None,
270
+ None,
271
+ torch.clone(start_labels)
272
+ if start_labels is not None
273
+ else torch.zeros_like(input_ids),
274
+ )
275
+ ned_end_logits, ned_end_probabilities, ned_end_predictions = (
276
+ None,
277
+ None,
278
+ torch.clone(end_labels)
279
+ if end_labels is not None
280
+ else torch.zeros_like(input_ids),
281
+ )
282
+
283
+ ned_start_predictions[ned_start_predictions > 0] = 1
284
+ ned_end_predictions[ned_end_predictions > 0] = 1
285
+
286
+ else: # compute spans
287
+ # start boundary prediction
288
+ ned_start_logits = self.ned_start_classifier(model_features)
289
+ ned_start_logits = self._mask_logits(ned_start_logits, prediction_mask)
290
+ ned_start_probabilities = torch.softmax(ned_start_logits, dim=-1)
291
+ ned_start_predictions = ned_start_probabilities.argmax(dim=-1)
292
+
293
+ # end boundary prediction
294
+ ned_start_labels = (
295
+ torch.zeros_like(start_labels) if start_labels is not None else None
296
+ )
297
+
298
+ if ned_start_labels is not None:
299
+ ned_start_labels[start_labels == -100] = -100
300
+ ned_start_labels[start_labels > 0] = 1
301
+
302
+ ned_end_logits = self.compute_ned_end_logits(
303
+ ned_start_predictions,
304
+ ned_start_labels,
305
+ model_features,
306
+ prediction_mask,
307
+ batch_size,
308
+ )
309
+
310
+ if ned_end_logits is not None:
311
+ ned_end_probabilities = torch.softmax(ned_end_logits, dim=-1)
312
+ ned_end_predictions = torch.argmax(ned_end_probabilities, dim=-1)
313
+ else:
314
+ ned_end_logits, ned_end_probabilities = None, None
315
+ ned_end_predictions = ned_start_predictions.new_zeros(batch_size)
316
+
317
+ # flattening end predictions
318
+ # (flattening can happen only if the
319
+ # end boundaries were not predicted using the gold labels)
320
+ if not self.training and ned_end_logits is not None:
321
+ flattened_end_predictions = torch.zeros_like(ned_start_predictions)
322
+
323
+ row_indices, start_positions = torch.where(ned_start_predictions > 0)
324
+ ned_end_predictions[ned_end_predictions<start_positions] = start_positions[ned_end_predictions<start_positions]
325
+
326
+ end_spans_repeated = (row_indices + 1)* seq_len + ned_end_predictions
327
+ cummax_values, _ = end_spans_repeated.cummax(dim=0)
328
+
329
+ end_spans_repeated = (end_spans_repeated > torch.cat((end_spans_repeated[:1], cummax_values[:-1])))
330
+ end_spans_repeated[0] = True
331
+
332
+ ned_start_predictions[row_indices[~end_spans_repeated], start_positions[~end_spans_repeated]] = 0
333
+
334
+ row_indices, start_positions, ned_end_predictions = row_indices[end_spans_repeated], start_positions[end_spans_repeated], ned_end_predictions[end_spans_repeated]
335
+
336
+ flattened_end_predictions[row_indices, ned_end_predictions] = 1
337
+
338
+ total_start_predictions, total_end_predictions = ned_start_predictions.sum(), flattened_end_predictions.sum()
339
+
340
+ assert (
341
+ total_start_predictions == 0
342
+ or total_start_predictions == total_end_predictions
343
+ ), (
344
+ f"Total number of start predictions = {total_start_predictions}. "
345
+ f"Total number of end predictions = {total_end_predictions}"
346
+ )
347
+ ned_end_predictions = flattened_end_predictions
348
+ else:
349
+ ned_end_predictions = torch.zeros_like(ned_start_predictions)
350
+
351
+ start_position, end_position = (
352
+ (start_labels, end_labels)
353
+ if self.training
354
+ else (ned_start_predictions, ned_end_predictions)
355
+ )
356
+
357
+ # Entity disambiguation
358
+ ed_logits = self.compute_classification_logits(
359
+ model_features,
360
+ special_symbols_mask,
361
+ prediction_mask,
362
+ batch_size,
363
+ start_position,
364
+ end_position,
365
+ )
366
+ ed_probabilities = torch.softmax(ed_logits, dim=-1)
367
+ ed_predictions = torch.argmax(ed_probabilities, dim=-1)
368
+
369
+ # output build
370
+ output_dict = dict(
371
+ batch_size=batch_size,
372
+ ned_start_logits=ned_start_logits,
373
+ ned_start_probabilities=ned_start_probabilities,
374
+ ned_start_predictions=ned_start_predictions,
375
+ ned_end_logits=ned_end_logits,
376
+ ned_end_probabilities=ned_end_probabilities,
377
+ ned_end_predictions=ned_end_predictions,
378
+ ed_logits=ed_logits,
379
+ ed_probabilities=ed_probabilities,
380
+ ed_predictions=ed_predictions,
381
+ )
382
+
383
+ # compute loss if labels
384
+ if start_labels is not None and end_labels is not None and self.training:
385
+ # named entity detection loss
386
+
387
+ # start
388
+ if ned_start_logits is not None:
389
+ ned_start_loss = self.criterion(
390
+ ned_start_logits.view(-1, ned_start_logits.shape[-1]),
391
+ ned_start_labels.view(-1),
392
+ )
393
+ else:
394
+ ned_start_loss = 0
395
+
396
+ # end
397
+ if ned_end_logits is not None:
398
+ ned_end_labels = torch.zeros_like(end_labels)
399
+ ned_end_labels[end_labels == -100] = -100
400
+ ned_end_labels[end_labels > 0] = 1
401
+
402
+ ned_end_loss = self.criterion(
403
+ ned_end_logits,
404
+ (
405
+ torch.arange(
406
+ ned_end_labels.size(1), device=ned_end_labels.device
407
+ )
408
+ .unsqueeze(0)
409
+ .expand(batch_size, -1)[ned_end_labels > 0]
410
+ ).to(ned_end_labels.device),
411
+ )
412
+
413
+ else:
414
+ ned_end_loss = 0
415
+
416
+ # entity disambiguation loss
417
+ start_labels[ned_start_labels != 1] = -100
418
+ ed_labels = torch.clone(start_labels)
419
+ ed_labels[end_labels > 0] = end_labels[end_labels > 0]
420
+ ed_loss = self.criterion(
421
+ ed_logits.view(-1, ed_logits.shape[-1]),
422
+ ed_labels.view(-1),
423
+ )
424
+
425
+ output_dict["ned_start_loss"] = ned_start_loss
426
+ output_dict["ned_end_loss"] = ned_end_loss
427
+ output_dict["ed_loss"] = ed_loss
428
+
429
+ output_dict["loss"] = ned_start_loss + ned_end_loss + ed_loss
430
+
431
+ return output_dict
432
+
433
+
434
+ class RelikReaderREModel(PreTrainedModel):
435
+ config_class = RelikReaderConfig
436
+
437
+ def __init__(self, config, *args, **kwargs):
438
+ super().__init__(config)
439
+ # Transformer model declaration
440
+ # self.transformer_model_name = transformer_model
441
+ self.config = config
442
+ self.transformer_model = (
443
+ AutoModel.from_pretrained(config.transformer_model)
444
+ if config.num_layers is None
445
+ else AutoModel.from_pretrained(
446
+ config.transformer_model, num_hidden_layers=config.num_layers
447
+ )
448
+ )
449
+ self.transformer_model.resize_token_embeddings(
450
+ self.transformer_model.config.vocab_size
451
+ + config.additional_special_symbols
452
+ + config.additional_special_symbols_types,
453
+ pad_to_multiple_of=8,
454
+ )
455
+
456
+ # named entity detection layers
457
+ self.ned_start_classifier = self._get_projection_layer(
458
+ config.activation, last_hidden=2, layer_norm=False
459
+ )
460
+
461
+ self.ned_end_classifier = PoolerEndLogitsBi(self.transformer_model.config)
462
+
463
+ self.relation_disambiguation_loss = (
464
+ config.relation_disambiguation_loss
465
+ if hasattr(config, "relation_disambiguation_loss")
466
+ else False
467
+ )
468
+
469
+ if self.config.entity_type_loss and self.config.add_entity_embedding:
470
+ input_hidden_ents = 3 * self.transformer_model.config.hidden_size
471
+ else:
472
+ input_hidden_ents = 2 * self.transformer_model.config.hidden_size
473
+
474
+ self.re_subject_projector = self._get_projection_layer(
475
+ config.activation, input_hidden=input_hidden_ents
476
+ )
477
+ self.re_object_projector = self._get_projection_layer(
478
+ config.activation, input_hidden=input_hidden_ents
479
+ )
480
+ self.re_relation_projector = self._get_projection_layer(config.activation)
481
+
482
+ if self.config.entity_type_loss or self.relation_disambiguation_loss:
483
+ self.re_entities_projector = self._get_projection_layer(
484
+ config.activation,
485
+ input_hidden=2 * self.transformer_model.config.hidden_size,
486
+ )
487
+ self.re_definition_projector = self._get_projection_layer(
488
+ config.activation,
489
+ )
490
+
491
+ self.re_classifier = self._get_projection_layer(
492
+ config.activation,
493
+ input_hidden=config.linears_hidden_size,
494
+ last_hidden=2,
495
+ layer_norm=False,
496
+ )
497
+
498
+ self.training = config.training
499
+
500
+ # criterion
501
+ self.criterion = torch.nn.CrossEntropyLoss()
502
+ self.criterion_type = torch.nn.BCEWithLogitsLoss()
503
+
504
+ def _get_projection_layer(
505
+ self,
506
+ activation: str,
507
+ last_hidden: Optional[int] = None,
508
+ input_hidden=None,
509
+ layer_norm: bool = True,
510
+ ) -> torch.nn.Sequential:
511
+ head_components = [
512
+ torch.nn.Dropout(0.1),
513
+ torch.nn.Linear(
514
+ self.transformer_model.config.hidden_size
515
+ * self.config.use_last_k_layers
516
+ if input_hidden is None
517
+ else input_hidden,
518
+ self.config.linears_hidden_size,
519
+ ),
520
+ activation2functions[activation],
521
+ torch.nn.Dropout(0.1),
522
+ torch.nn.Linear(
523
+ self.config.linears_hidden_size,
524
+ self.config.linears_hidden_size if last_hidden is None else last_hidden,
525
+ ),
526
+ ]
527
+
528
+ if layer_norm:
529
+ head_components.append(
530
+ torch.nn.LayerNorm(
531
+ self.config.linears_hidden_size
532
+ if last_hidden is None
533
+ else last_hidden,
534
+ self.transformer_model.config.layer_norm_eps,
535
+ )
536
+ )
537
+
538
+ return torch.nn.Sequential(*head_components)
539
+
540
+ def _mask_logits(self, logits: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:
541
+ mask = mask.unsqueeze(-1)
542
+ if next(self.parameters()).dtype == torch.float16:
543
+ logits = logits * (1 - mask) - 65500 * mask
544
+ else:
545
+ logits = logits * (1 - mask) - 1e30 * mask
546
+ return logits
547
+
548
+ def _get_model_features(
549
+ self,
550
+ input_ids: torch.Tensor,
551
+ attention_mask: torch.Tensor,
552
+ token_type_ids: Optional[torch.Tensor],
553
+ ):
554
+ model_input = {
555
+ "input_ids": input_ids,
556
+ "attention_mask": attention_mask,
557
+ "output_hidden_states": self.config.use_last_k_layers > 1,
558
+ }
559
+
560
+ if token_type_ids is not None:
561
+ model_input["token_type_ids"] = token_type_ids
562
+
563
+ model_output = self.transformer_model(**model_input)
564
+
565
+ if self.config.use_last_k_layers > 1:
566
+ model_features = torch.cat(
567
+ model_output[1][-self.config.use_last_k_layers :], dim=-1
568
+ )
569
+ else:
570
+ model_features = model_output[0]
571
+
572
+ return model_features
573
+
574
+ def compute_ned_end_logits(
575
+ self,
576
+ start_predictions,
577
+ start_labels,
578
+ model_features,
579
+ prediction_mask,
580
+ batch_size,
581
+ mask_preceding: bool = False,
582
+ ) -> Optional[torch.Tensor]:
583
+ # todo: maybe when constraining on the spans,
584
+ # we should not use a prediction_mask for the end tokens.
585
+ # at least we should not during training imo
586
+ start_positions = start_labels if self.training else start_predictions
587
+ start_positions_indices = (
588
+ torch.arange(start_positions.size(1), device=start_positions.device)
589
+ .unsqueeze(0)
590
+ .expand(batch_size, -1)[start_positions > 0]
591
+ ).to(start_positions.device)
592
+
593
+ if len(start_positions_indices) > 0:
594
+ expanded_features = model_features.repeat_interleave(
595
+ torch.sum(start_positions > 0, dim=-1), dim=0
596
+ )
597
+ expanded_prediction_mask = prediction_mask.repeat_interleave(
598
+ torch.sum(start_positions > 0, dim=-1), dim=0
599
+ )
600
+ if mask_preceding:
601
+ expanded_prediction_mask[
602
+ torch.arange(
603
+ expanded_prediction_mask.shape[1],
604
+ device=expanded_prediction_mask.device,
605
+ )
606
+ < start_positions_indices.unsqueeze(1)
607
+ ] = 1
608
+ end_logits = self.ned_end_classifier(
609
+ hidden_states=expanded_features,
610
+ start_positions=start_positions_indices,
611
+ p_mask=expanded_prediction_mask,
612
+ )
613
+
614
+ return end_logits
615
+
616
+ return None
617
+
618
+ def compute_relation_logits(
619
+ self,
620
+ model_entity_features,
621
+ special_symbols_features,
622
+ ) -> torch.Tensor:
623
+ model_subject_features = self.re_subject_projector(model_entity_features)
624
+ model_object_features = self.re_object_projector(model_entity_features)
625
+ special_symbols_start_representation = self.re_relation_projector(
626
+ special_symbols_features
627
+ )
628
+ re_logits = torch.einsum(
629
+ "bse,bde,bfe->bsdfe",
630
+ model_subject_features,
631
+ model_object_features,
632
+ special_symbols_start_representation,
633
+ )
634
+ re_logits = self.re_classifier(re_logits)
635
+
636
+ return re_logits
637
+
638
+ def compute_entity_logits(
639
+ self,
640
+ model_entity_features,
641
+ special_symbols_features,
642
+ ) -> torch.Tensor:
643
+ model_ed_features = self.re_entities_projector(model_entity_features)
644
+ special_symbols_ed_representation = self.re_definition_projector(
645
+ special_symbols_features
646
+ )
647
+
648
+ logits = torch.bmm(
649
+ model_ed_features,
650
+ torch.permute(special_symbols_ed_representation, (0, 2, 1)),
651
+ )
652
+ logits = self._mask_logits(
653
+ logits, (model_entity_features == -100).all(2).long()
654
+ )
655
+ return logits
656
+
657
+ def compute_loss(self, logits, labels, mask=None):
658
+ logits = logits.reshape(-1, logits.shape[-1])
659
+ labels = labels.reshape(-1).long()
660
+ if mask is not None:
661
+ return self.criterion(logits[mask], labels[mask])
662
+ return self.criterion(logits, labels)
663
+
664
+ def compute_ned_type_loss(
665
+ self,
666
+ disambiguation_labels,
667
+ re_ned_entities_logits,
668
+ ned_type_logits,
669
+ re_entities_logits,
670
+ entity_types,
671
+ mask,
672
+ ):
673
+ if self.config.entity_type_loss and self.relation_disambiguation_loss:
674
+ return self.criterion_type(
675
+ re_ned_entities_logits[disambiguation_labels != -100],
676
+ disambiguation_labels[disambiguation_labels != -100],
677
+ )
678
+ if self.config.entity_type_loss:
679
+ return self.criterion_type(
680
+ ned_type_logits[mask],
681
+ disambiguation_labels[:, :, :entity_types][mask],
682
+ )
683
+
684
+ if self.relation_disambiguation_loss:
685
+ return self.criterion_type(
686
+ re_entities_logits[disambiguation_labels != -100],
687
+ disambiguation_labels[disambiguation_labels != -100],
688
+ )
689
+ return 0
690
+
691
+ def compute_relation_loss(self, relation_labels, re_logits):
692
+ return self.compute_loss(
693
+ re_logits, relation_labels, relation_labels.view(-1) != -100
694
+ )
695
+
696
+ def forward(
697
+ self,
698
+ input_ids: torch.Tensor,
699
+ attention_mask: torch.Tensor,
700
+ token_type_ids: torch.Tensor,
701
+ prediction_mask: Optional[torch.Tensor] = None,
702
+ special_symbols_mask: Optional[torch.Tensor] = None,
703
+ special_symbols_mask_entities: Optional[torch.Tensor] = None,
704
+ start_labels: Optional[torch.Tensor] = None,
705
+ end_labels: Optional[torch.Tensor] = None,
706
+ disambiguation_labels: Optional[torch.Tensor] = None,
707
+ relation_labels: Optional[torch.Tensor] = None,
708
+ relation_threshold: float = 0.5,
709
+ is_validation: bool = False,
710
+ is_prediction: bool = False,
711
+ use_predefined_spans: bool = False,
712
+ *args,
713
+ **kwargs,
714
+ ) -> Dict[str, Any]:
715
+ batch_size = input_ids.shape[0]
716
+
717
+ model_features = self._get_model_features(
718
+ input_ids, attention_mask, token_type_ids
719
+ )
720
+
721
+ # named entity detection
722
+ if use_predefined_spans:
723
+ ned_start_logits, ned_start_probabilities, ned_start_predictions = (
724
+ None,
725
+ None,
726
+ torch.zeros_like(start_labels),
727
+ )
728
+ ned_end_logits, ned_end_probabilities, ned_end_predictions = (
729
+ None,
730
+ None,
731
+ torch.zeros_like(end_labels),
732
+ )
733
+
734
+ ned_start_predictions[start_labels > 0] = 1
735
+ ned_end_predictions[end_labels > 0] = 1
736
+ ned_end_predictions = ned_end_predictions[~(end_labels == -100).all(2)]
737
+ ned_start_labels = start_labels
738
+ ned_start_labels[start_labels > 0] = 1
739
+ else:
740
+ # start boundary prediction
741
+ ned_start_logits = self.ned_start_classifier(model_features)
742
+ if is_validation or is_prediction:
743
+ ned_start_logits = self._mask_logits(
744
+ ned_start_logits, prediction_mask
745
+ ) # why?
746
+ ned_start_probabilities = torch.softmax(ned_start_logits, dim=-1)
747
+ ned_start_predictions = ned_start_probabilities.argmax(dim=-1)
748
+
749
+ # end boundary prediction
750
+ ned_start_labels = (
751
+ torch.zeros_like(start_labels) if start_labels is not None else None
752
+ )
753
+
754
+ # start_labels contain entity id at their position, we just need 1 for start of entity
755
+ if ned_start_labels is not None:
756
+ ned_start_labels[start_labels == -100] = -100
757
+ ned_start_labels[start_labels > 0] = 1
758
+
759
+ # compute end logits only if there are any start predictions.
760
+ # For each start prediction, n end predictions are made
761
+ ned_end_logits = self.compute_ned_end_logits(
762
+ ned_start_predictions,
763
+ ned_start_labels,
764
+ model_features,
765
+ prediction_mask,
766
+ batch_size,
767
+ True,
768
+ )
769
+
770
+ if ned_end_logits is not None:
771
+ # For each start prediction, n end predictions are made based on
772
+ # binary classification ie. argmax at each position.
773
+ ned_end_probabilities = torch.softmax(ned_end_logits, dim=-1)
774
+ ned_end_predictions = ned_end_probabilities.argmax(dim=-1)
775
+ else:
776
+ ned_end_logits, ned_end_probabilities = None, None
777
+ ned_end_predictions = torch.zeros_like(ned_start_predictions)
778
+
779
+ if is_prediction or is_validation:
780
+ end_preds_count = ned_end_predictions.sum(1)
781
+ # If there are no end predictions for a start prediction, remove the start prediction
782
+ if (end_preds_count == 0).any() and (ned_start_predictions > 0).any():
783
+ ned_start_predictions[ned_start_predictions == 1] = (
784
+ end_preds_count != 0
785
+ ).long()
786
+ ned_end_predictions = ned_end_predictions[end_preds_count != 0]
787
+
788
+ if end_labels is not None:
789
+ end_labels = end_labels[~(end_labels == -100).all(2)]
790
+
791
+ start_position, end_position = (
792
+ (start_labels, end_labels)
793
+ if (not is_prediction and not is_validation)
794
+ else (ned_start_predictions, ned_end_predictions)
795
+ )
796
+
797
+ start_counts = (start_position > 0).sum(1)
798
+ if (start_counts > 0).any():
799
+ ned_end_predictions = ned_end_predictions.split(start_counts.tolist())
800
+ # limit to 30 predictions per document using start_counts, by setting all po after sum is 30 to 0
801
+ # if is_validation or is_prediction:
802
+ # ned_start_predictions[ned_start_predictions == 1] = start_counts
803
+ # We can only predict relations if we have start and end predictions
804
+ if (end_position > 0).sum() > 0:
805
+ ends_count = (end_position > 0).sum(1)
806
+ model_subject_features = torch.cat(
807
+ [
808
+ torch.repeat_interleave(
809
+ model_features[start_position > 0], ends_count, dim=0
810
+ ), # start position features
811
+ torch.repeat_interleave(model_features, start_counts, dim=0)[
812
+ end_position > 0
813
+ ], # end position features
814
+ ],
815
+ dim=-1,
816
+ )
817
+ ents_count = torch.nn.utils.rnn.pad_sequence(
818
+ torch.split(ends_count, start_counts.tolist()),
819
+ batch_first=True,
820
+ padding_value=0,
821
+ ).sum(1)
822
+ model_subject_features = torch.nn.utils.rnn.pad_sequence(
823
+ torch.split(model_subject_features, ents_count.tolist()),
824
+ batch_first=True,
825
+ padding_value=-100,
826
+ )
827
+
828
+ # if is_validation or is_prediction:
829
+ # model_subject_features = model_subject_features[:, :30, :]
830
+
831
+ # entity disambiguation. Here relation_disambiguation_loss would only be useful to
832
+ # reduce the number of candidate relations for the next step, but currently unused.
833
+ if self.config.entity_type_loss or self.relation_disambiguation_loss:
834
+ (re_ned_entities_logits) = self.compute_entity_logits(
835
+ model_subject_features,
836
+ model_features[
837
+ special_symbols_mask | special_symbols_mask_entities
838
+ ].view(batch_size, -1, model_features.shape[-1]),
839
+ )
840
+ entity_types = torch.sum(special_symbols_mask_entities, dim=1)[0].item()
841
+ ned_type_logits = re_ned_entities_logits[:, :, :entity_types]
842
+ re_entities_logits = re_ned_entities_logits[:, :, entity_types:]
843
+
844
+ if self.config.entity_type_loss:
845
+ ned_type_probabilities = torch.sigmoid(ned_type_logits)
846
+ ned_type_predictions = ned_type_probabilities.argmax(dim=-1)
847
+
848
+ if self.config.add_entity_embedding:
849
+ special_symbols_representation = model_features[
850
+ special_symbols_mask_entities
851
+ ].view(batch_size, entity_types, -1)
852
+
853
+ entities_representation = torch.einsum(
854
+ "bsp,bpe->bse",
855
+ ned_type_probabilities,
856
+ special_symbols_representation,
857
+ )
858
+ model_subject_features = torch.cat(
859
+ [model_subject_features, entities_representation], dim=-1
860
+ )
861
+ re_entities_probabilities = torch.sigmoid(re_entities_logits)
862
+ re_entities_predictions = re_entities_probabilities.round()
863
+ else:
864
+ (
865
+ ned_type_logits,
866
+ ned_type_probabilities,
867
+ re_entities_logits,
868
+ re_entities_probabilities,
869
+ ) = (None, None, None, None)
870
+ ned_type_predictions, re_entities_predictions = (
871
+ torch.zeros([batch_size, 1], dtype=torch.long).to(input_ids.device),
872
+ torch.zeros([batch_size, 1], dtype=torch.long).to(input_ids.device),
873
+ )
874
+
875
+ # Compute relation logits
876
+ re_logits = self.compute_relation_logits(
877
+ model_subject_features,
878
+ model_features[special_symbols_mask].view(
879
+ batch_size, -1, model_features.shape[-1]
880
+ ),
881
+ )
882
+
883
+ re_probabilities = torch.softmax(re_logits, dim=-1)
884
+ # we set a thresshold instead of argmax in cause it needs to be tweaked
885
+ re_predictions = re_probabilities[:, :, :, :, 1] > relation_threshold
886
+ # re_predictions = re_probabilities.argmax(dim=-1)
887
+ re_probabilities = re_probabilities[:, :, :, :, 1]
888
+ # re_logits, re_probabilities, re_predictions = (
889
+ # torch.zeros(
890
+ # [batch_size, 1, 1, special_symbols_mask.sum(1)[0]], dtype=torch.long
891
+ # ).to(input_ids.device),
892
+ # torch.zeros(
893
+ # [batch_size, 1, 1, special_symbols_mask.sum(1)[0]], dtype=torch.long
894
+ # ).to(input_ids.device),
895
+ # torch.zeros(
896
+ # [batch_size, 1, 1, special_symbols_mask.sum(1)[0]], dtype=torch.long
897
+ # ).to(input_ids.device),
898
+ # )
899
+
900
+ else:
901
+ (
902
+ ned_type_logits,
903
+ ned_type_probabilities,
904
+ re_entities_logits,
905
+ re_entities_probabilities,
906
+ ) = (None, None, None, None)
907
+ ned_type_predictions, re_entities_predictions = (
908
+ torch.zeros([batch_size, 1], dtype=torch.long).to(input_ids.device),
909
+ torch.zeros([batch_size, 1], dtype=torch.long).to(input_ids.device),
910
+ )
911
+ re_logits, re_probabilities, re_predictions = (
912
+ torch.zeros(
913
+ [batch_size, 1, 1, special_symbols_mask.sum(1)[0]], dtype=torch.long
914
+ ).to(input_ids.device),
915
+ torch.zeros(
916
+ [batch_size, 1, 1, special_symbols_mask.sum(1)[0]], dtype=torch.long
917
+ ).to(input_ids.device),
918
+ torch.zeros(
919
+ [batch_size, 1, 1, special_symbols_mask.sum(1)[0]], dtype=torch.long
920
+ ).to(input_ids.device),
921
+ )
922
+
923
+ # output build
924
+ output_dict = dict(
925
+ batch_size=batch_size,
926
+ ned_start_logits=ned_start_logits,
927
+ ned_start_probabilities=ned_start_probabilities,
928
+ ned_start_predictions=ned_start_predictions,
929
+ ned_end_logits=ned_end_logits,
930
+ ned_end_probabilities=ned_end_probabilities,
931
+ ned_end_predictions=ned_end_predictions,
932
+ ned_type_logits=ned_type_logits,
933
+ ned_type_probabilities=ned_type_probabilities,
934
+ ned_type_predictions=ned_type_predictions,
935
+ re_entities_logits=re_entities_logits,
936
+ re_entities_probabilities=re_entities_probabilities,
937
+ re_entities_predictions=re_entities_predictions,
938
+ re_logits=re_logits,
939
+ re_probabilities=re_probabilities,
940
+ re_predictions=re_predictions,
941
+ )
942
+
943
+ if (
944
+ start_labels is not None
945
+ and end_labels is not None
946
+ and relation_labels is not None
947
+ and is_prediction is False
948
+ ):
949
+ ned_start_loss = self.compute_loss(ned_start_logits, ned_start_labels)
950
+ end_labels[end_labels > 0] = 1
951
+ ned_end_loss = self.compute_loss(ned_end_logits, end_labels)
952
+ if self.config.entity_type_loss or self.relation_disambiguation_loss:
953
+ ned_type_loss = self.compute_ned_type_loss(
954
+ disambiguation_labels,
955
+ re_ned_entities_logits,
956
+ ned_type_logits,
957
+ re_entities_logits,
958
+ entity_types,
959
+ (model_subject_features != -100).all(2),
960
+ )
961
+ relation_loss = self.compute_relation_loss(relation_labels, re_logits)
962
+ # compute loss. We can skip the relation loss if we are in the first epochs (optional)
963
+ if self.config.entity_type_loss or self.relation_disambiguation_loss:
964
+ output_dict["loss"] = (
965
+ ned_start_loss + ned_end_loss + relation_loss + ned_type_loss
966
+ ) / 4
967
+ output_dict["ned_type_loss"] = ned_type_loss
968
+ else:
969
+ # output_dict["loss"] = ((1 / 4) * (ned_start_loss + ned_end_loss)) + (
970
+ # (1 / 2) * relation_loss
971
+ # )
972
+ output_dict["loss"] = ((1 / 16) * (ned_start_loss + ned_end_loss)) + (
973
+ (7 / 8) * relation_loss
974
+ )
975
+
976
+ output_dict["ned_start_loss"] = ned_start_loss
977
+ output_dict["ned_end_loss"] = ned_end_loss
978
+ output_dict["re_loss"] = relation_loss
979
+
980
+ return output_dict
models/reader-exteded-small/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:245f8768c1b1c43fb10a7a48fd8dfaa6595ff141858d9d38305e48af1df99549
3
+ size 577161722
models/reader-exteded-small/special_tokens_map.json ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "--NME--",
4
+ "[E-0]",
5
+ "[E-1]",
6
+ "[E-2]",
7
+ "[E-3]",
8
+ "[E-4]",
9
+ "[E-5]",
10
+ "[E-6]",
11
+ "[E-7]",
12
+ "[E-8]",
13
+ "[E-9]",
14
+ "[E-10]",
15
+ "[E-11]",
16
+ "[E-12]",
17
+ "[E-13]",
18
+ "[E-14]",
19
+ "[E-15]",
20
+ "[E-16]",
21
+ "[E-17]",
22
+ "[E-18]",
23
+ "[E-19]",
24
+ "[E-20]",
25
+ "[E-21]",
26
+ "[E-22]",
27
+ "[E-23]",
28
+ "[E-24]",
29
+ "[E-25]",
30
+ "[E-26]",
31
+ "[E-27]",
32
+ "[E-28]",
33
+ "[E-29]",
34
+ "[E-30]",
35
+ "[E-31]",
36
+ "[E-32]",
37
+ "[E-33]",
38
+ "[E-34]",
39
+ "[E-35]",
40
+ "[E-36]",
41
+ "[E-37]",
42
+ "[E-38]",
43
+ "[E-39]",
44
+ "[E-40]",
45
+ "[E-41]",
46
+ "[E-42]",
47
+ "[E-43]",
48
+ "[E-44]",
49
+ "[E-45]",
50
+ "[E-46]",
51
+ "[E-47]",
52
+ "[E-48]",
53
+ "[E-49]",
54
+ "[E-50]",
55
+ "[E-51]",
56
+ "[E-52]",
57
+ "[E-53]",
58
+ "[E-54]",
59
+ "[E-55]",
60
+ "[E-56]",
61
+ "[E-57]",
62
+ "[E-58]",
63
+ "[E-59]",
64
+ "[E-60]",
65
+ "[E-61]",
66
+ "[E-62]",
67
+ "[E-63]",
68
+ "[E-64]",
69
+ "[E-65]",
70
+ "[E-66]",
71
+ "[E-67]",
72
+ "[E-68]",
73
+ "[E-69]",
74
+ "[E-70]",
75
+ "[E-71]",
76
+ "[E-72]",
77
+ "[E-73]",
78
+ "[E-74]",
79
+ "[E-75]",
80
+ "[E-76]",
81
+ "[E-77]",
82
+ "[E-78]",
83
+ "[E-79]",
84
+ "[E-80]",
85
+ "[E-81]",
86
+ "[E-82]",
87
+ "[E-83]",
88
+ "[E-84]",
89
+ "[E-85]",
90
+ "[E-86]",
91
+ "[E-87]",
92
+ "[E-88]",
93
+ "[E-89]",
94
+ "[E-90]",
95
+ "[E-91]",
96
+ "[E-92]",
97
+ "[E-93]",
98
+ "[E-94]",
99
+ "[E-95]",
100
+ "[E-96]",
101
+ "[E-97]",
102
+ "[E-98]",
103
+ "[E-99]"
104
+ ],
105
+ "bos_token": "[CLS]",
106
+ "cls_token": "[CLS]",
107
+ "eos_token": "[SEP]",
108
+ "mask_token": "[MASK]",
109
+ "pad_token": "[PAD]",
110
+ "sep_token": "[SEP]",
111
+ "unk_token": "[UNK]"
112
+ }
models/reader-exteded-small/spm.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c679fbf93643d19aab7ee10c0b99e460bdbc02fedf34b92b05af343b4af586fd
3
+ size 2464616
models/reader-exteded-small/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
models/reader-exteded-small/tokenizer_config.json ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": true,
3
+ "additional_special_tokens": [
4
+ "--NME--",
5
+ "[E-0]",
6
+ "[E-1]",
7
+ "[E-2]",
8
+ "[E-3]",
9
+ "[E-4]",
10
+ "[E-5]",
11
+ "[E-6]",
12
+ "[E-7]",
13
+ "[E-8]",
14
+ "[E-9]",
15
+ "[E-10]",
16
+ "[E-11]",
17
+ "[E-12]",
18
+ "[E-13]",
19
+ "[E-14]",
20
+ "[E-15]",
21
+ "[E-16]",
22
+ "[E-17]",
23
+ "[E-18]",
24
+ "[E-19]",
25
+ "[E-20]",
26
+ "[E-21]",
27
+ "[E-22]",
28
+ "[E-23]",
29
+ "[E-24]",
30
+ "[E-25]",
31
+ "[E-26]",
32
+ "[E-27]",
33
+ "[E-28]",
34
+ "[E-29]",
35
+ "[E-30]",
36
+ "[E-31]",
37
+ "[E-32]",
38
+ "[E-33]",
39
+ "[E-34]",
40
+ "[E-35]",
41
+ "[E-36]",
42
+ "[E-37]",
43
+ "[E-38]",
44
+ "[E-39]",
45
+ "[E-40]",
46
+ "[E-41]",
47
+ "[E-42]",
48
+ "[E-43]",
49
+ "[E-44]",
50
+ "[E-45]",
51
+ "[E-46]",
52
+ "[E-47]",
53
+ "[E-48]",
54
+ "[E-49]",
55
+ "[E-50]",
56
+ "[E-51]",
57
+ "[E-52]",
58
+ "[E-53]",
59
+ "[E-54]",
60
+ "[E-55]",
61
+ "[E-56]",
62
+ "[E-57]",
63
+ "[E-58]",
64
+ "[E-59]",
65
+ "[E-60]",
66
+ "[E-61]",
67
+ "[E-62]",
68
+ "[E-63]",
69
+ "[E-64]",
70
+ "[E-65]",
71
+ "[E-66]",
72
+ "[E-67]",
73
+ "[E-68]",
74
+ "[E-69]",
75
+ "[E-70]",
76
+ "[E-71]",
77
+ "[E-72]",
78
+ "[E-73]",
79
+ "[E-74]",
80
+ "[E-75]",
81
+ "[E-76]",
82
+ "[E-77]",
83
+ "[E-78]",
84
+ "[E-79]",
85
+ "[E-80]",
86
+ "[E-81]",
87
+ "[E-82]",
88
+ "[E-83]",
89
+ "[E-84]",
90
+ "[E-85]",
91
+ "[E-86]",
92
+ "[E-87]",
93
+ "[E-88]",
94
+ "[E-89]",
95
+ "[E-90]",
96
+ "[E-91]",
97
+ "[E-92]",
98
+ "[E-93]",
99
+ "[E-94]",
100
+ "[E-95]",
101
+ "[E-96]",
102
+ "[E-97]",
103
+ "[E-98]",
104
+ "[E-99]"
105
+ ],
106
+ "bos_token": "[CLS]",
107
+ "clean_up_tokenization_spaces": true,
108
+ "cls_token": "[CLS]",
109
+ "do_lower_case": false,
110
+ "eos_token": "[SEP]",
111
+ "mask_token": "[MASK]",
112
+ "model_max_length": 1000000000000000019884624838656,
113
+ "pad_token": "[PAD]",
114
+ "sep_token": "[SEP]",
115
+ "sp_model_kwargs": {},
116
+ "split_by_punct": false,
117
+ "tokenizer_class": "DebertaV2Tokenizer",
118
+ "unk_token": "[UNK]",
119
+ "vocab_type": "spm"
120
+ }
models/reader-extended-large/added_tokens.json ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "--NME--": 128001,
3
+ "[E-0]": 128002,
4
+ "[E-10]": 128012,
5
+ "[E-11]": 128013,
6
+ "[E-12]": 128014,
7
+ "[E-13]": 128015,
8
+ "[E-14]": 128016,
9
+ "[E-15]": 128017,
10
+ "[E-16]": 128018,
11
+ "[E-17]": 128019,
12
+ "[E-18]": 128020,
13
+ "[E-19]": 128021,
14
+ "[E-1]": 128003,
15
+ "[E-20]": 128022,
16
+ "[E-21]": 128023,
17
+ "[E-22]": 128024,
18
+ "[E-23]": 128025,
19
+ "[E-24]": 128026,
20
+ "[E-25]": 128027,
21
+ "[E-26]": 128028,
22
+ "[E-27]": 128029,
23
+ "[E-28]": 128030,
24
+ "[E-29]": 128031,
25
+ "[E-2]": 128004,
26
+ "[E-30]": 128032,
27
+ "[E-31]": 128033,
28
+ "[E-32]": 128034,
29
+ "[E-33]": 128035,
30
+ "[E-34]": 128036,
31
+ "[E-35]": 128037,
32
+ "[E-36]": 128038,
33
+ "[E-37]": 128039,
34
+ "[E-38]": 128040,
35
+ "[E-39]": 128041,
36
+ "[E-3]": 128005,
37
+ "[E-40]": 128042,
38
+ "[E-41]": 128043,
39
+ "[E-42]": 128044,
40
+ "[E-43]": 128045,
41
+ "[E-44]": 128046,
42
+ "[E-45]": 128047,
43
+ "[E-46]": 128048,
44
+ "[E-47]": 128049,
45
+ "[E-48]": 128050,
46
+ "[E-49]": 128051,
47
+ "[E-4]": 128006,
48
+ "[E-50]": 128052,
49
+ "[E-51]": 128053,
50
+ "[E-52]": 128054,
51
+ "[E-53]": 128055,
52
+ "[E-54]": 128056,
53
+ "[E-55]": 128057,
54
+ "[E-56]": 128058,
55
+ "[E-57]": 128059,
56
+ "[E-58]": 128060,
57
+ "[E-59]": 128061,
58
+ "[E-5]": 128007,
59
+ "[E-60]": 128062,
60
+ "[E-61]": 128063,
61
+ "[E-62]": 128064,
62
+ "[E-63]": 128065,
63
+ "[E-64]": 128066,
64
+ "[E-65]": 128067,
65
+ "[E-66]": 128068,
66
+ "[E-67]": 128069,
67
+ "[E-68]": 128070,
68
+ "[E-69]": 128071,
69
+ "[E-6]": 128008,
70
+ "[E-70]": 128072,
71
+ "[E-71]": 128073,
72
+ "[E-72]": 128074,
73
+ "[E-73]": 128075,
74
+ "[E-74]": 128076,
75
+ "[E-75]": 128077,
76
+ "[E-76]": 128078,
77
+ "[E-77]": 128079,
78
+ "[E-78]": 128080,
79
+ "[E-79]": 128081,
80
+ "[E-7]": 128009,
81
+ "[E-80]": 128082,
82
+ "[E-81]": 128083,
83
+ "[E-82]": 128084,
84
+ "[E-83]": 128085,
85
+ "[E-84]": 128086,
86
+ "[E-85]": 128087,
87
+ "[E-86]": 128088,
88
+ "[E-87]": 128089,
89
+ "[E-88]": 128090,
90
+ "[E-89]": 128091,
91
+ "[E-8]": 128010,
92
+ "[E-90]": 128092,
93
+ "[E-91]": 128093,
94
+ "[E-92]": 128094,
95
+ "[E-93]": 128095,
96
+ "[E-94]": 128096,
97
+ "[E-95]": 128097,
98
+ "[E-96]": 128098,
99
+ "[E-97]": 128099,
100
+ "[E-98]": 128100,
101
+ "[E-99]": 128101,
102
+ "[E-9]": 128011,
103
+ "[MASK]": 128000
104
+ }
models/reader-extended-large/config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation": "gelu",
3
+ "add_entity_embedding": null,
4
+ "additional_special_symbols": 101,
5
+ "additional_special_symbols_types": 0,
6
+ "architectures": [
7
+ "RelikReaderSpanModel"
8
+ ],
9
+ "auto_map": {
10
+ "AutoModel": "modeling_relik.RelikReaderSpanModel"
11
+ },
12
+ "default_reader_class": null,
13
+ "entity_type_loss": false,
14
+ "linears_hidden_size": 512,
15
+ "model_type": "relik-reader",
16
+ "num_layers": null,
17
+ "torch_dtype": "float32",
18
+ "training": true,
19
+ "transformer_model": "microsoft/deberta-v3-large",
20
+ "transformers_version": "4.33.3",
21
+ "use_last_k_layers": 1
22
+ }
models/reader-extended-large/configuration_relik.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ from transformers import AutoConfig
4
+ from transformers.configuration_utils import PretrainedConfig
5
+
6
+
7
+ class RelikReaderConfig(PretrainedConfig):
8
+ model_type = "relik-reader"
9
+
10
+ def __init__(
11
+ self,
12
+ transformer_model: str = "microsoft/deberta-v3-base",
13
+ additional_special_symbols: int = 101,
14
+ additional_special_symbols_types: Optional[int] = 0,
15
+ num_layers: Optional[int] = None,
16
+ activation: str = "gelu",
17
+ linears_hidden_size: Optional[int] = 512,
18
+ use_last_k_layers: int = 1,
19
+ entity_type_loss: bool = False,
20
+ add_entity_embedding: bool = None,
21
+ training: bool = False,
22
+ default_reader_class: Optional[str] = None,
23
+ **kwargs
24
+ ) -> None:
25
+ # TODO: add name_or_path to kwargs
26
+ self.transformer_model = transformer_model
27
+ self.additional_special_symbols = additional_special_symbols
28
+ self.additional_special_symbols_types = additional_special_symbols_types
29
+ self.num_layers = num_layers
30
+ self.activation = activation
31
+ self.linears_hidden_size = linears_hidden_size
32
+ self.use_last_k_layers = use_last_k_layers
33
+ self.entity_type_loss = entity_type_loss
34
+ self.add_entity_embedding = (
35
+ True
36
+ if add_entity_embedding is None and entity_type_loss
37
+ else add_entity_embedding
38
+ )
39
+ self.training = training
40
+ self.default_reader_class = default_reader_class
41
+ super().__init__(**kwargs)
42
+
43
+
44
+ AutoConfig.register("relik-reader", RelikReaderConfig)
models/reader-extended-large/modeling_relik.py ADDED
@@ -0,0 +1,980 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, Optional
2
+
3
+ import torch
4
+ from transformers import AutoModel, PreTrainedModel
5
+ from transformers.activations import ClippedGELUActivation, GELUActivation
6
+ from transformers.configuration_utils import PretrainedConfig
7
+ from transformers.modeling_utils import PoolerEndLogits
8
+
9
+ from .configuration_relik import RelikReaderConfig
10
+
11
+ torch.set_float32_matmul_precision('medium')
12
+
13
+ class RelikReaderSample:
14
+ def __init__(self, **kwargs):
15
+ super().__setattr__("_d", {})
16
+ self._d = kwargs
17
+
18
+ def __getattribute__(self, item):
19
+ return super(RelikReaderSample, self).__getattribute__(item)
20
+
21
+ def __getattr__(self, item):
22
+ if item.startswith("__") and item.endswith("__"):
23
+ # this is likely some python library-specific variable (such as __deepcopy__ for copy)
24
+ # better follow standard behavior here
25
+ raise AttributeError(item)
26
+ elif item in self._d:
27
+ return self._d[item]
28
+ else:
29
+ return None
30
+
31
+ def __setattr__(self, key, value):
32
+ if key in self._d:
33
+ self._d[key] = value
34
+ else:
35
+ super().__setattr__(key, value)
36
+
37
+
38
+ activation2functions = {
39
+ "relu": torch.nn.ReLU(),
40
+ "gelu": GELUActivation(),
41
+ "gelu_10": ClippedGELUActivation(-10, 10),
42
+ }
43
+
44
+
45
+ class PoolerEndLogitsBi(PoolerEndLogits):
46
+ def __init__(self, config: PretrainedConfig):
47
+ super().__init__(config)
48
+ self.dense_1 = torch.nn.Linear(config.hidden_size, 2)
49
+
50
+ def forward(
51
+ self,
52
+ hidden_states: torch.FloatTensor,
53
+ start_states: Optional[torch.FloatTensor] = None,
54
+ start_positions: Optional[torch.LongTensor] = None,
55
+ p_mask: Optional[torch.FloatTensor] = None,
56
+ ) -> torch.FloatTensor:
57
+ if p_mask is not None:
58
+ p_mask = p_mask.unsqueeze(-1)
59
+ logits = super().forward(
60
+ hidden_states,
61
+ start_states,
62
+ start_positions,
63
+ p_mask,
64
+ )
65
+ return logits
66
+
67
+
68
+ class RelikReaderSpanModel(PreTrainedModel):
69
+ config_class = RelikReaderConfig
70
+
71
+ def __init__(self, config: RelikReaderConfig, *args, **kwargs):
72
+ super().__init__(config)
73
+ # Transformer model declaration
74
+ self.config = config
75
+ self.transformer_model = (
76
+ AutoModel.from_pretrained(self.config.transformer_model)
77
+ if self.config.num_layers is None
78
+ else AutoModel.from_pretrained(
79
+ self.config.transformer_model, num_hidden_layers=self.config.num_layers
80
+ )
81
+ )
82
+ self.transformer_model.resize_token_embeddings(
83
+ self.transformer_model.config.vocab_size
84
+ + self.config.additional_special_symbols,
85
+ pad_to_multiple_of=8,
86
+ )
87
+
88
+ self.activation = self.config.activation
89
+ self.linears_hidden_size = self.config.linears_hidden_size
90
+ self.use_last_k_layers = self.config.use_last_k_layers
91
+
92
+ # named entity detection layers
93
+ self.ned_start_classifier = self._get_projection_layer(
94
+ self.activation, last_hidden=2, layer_norm=False
95
+ )
96
+ self.ned_end_classifier = PoolerEndLogits(self.transformer_model.config)
97
+
98
+ # END entity disambiguation layer
99
+ self.ed_start_projector = self._get_projection_layer(self.activation)
100
+ self.ed_end_projector = self._get_projection_layer(self.activation)
101
+
102
+ self.training = self.config.training
103
+
104
+ # criterion
105
+ self.criterion = torch.nn.CrossEntropyLoss()
106
+
107
+ def _get_projection_layer(
108
+ self,
109
+ activation: str,
110
+ last_hidden: Optional[int] = None,
111
+ input_hidden=None,
112
+ layer_norm: bool = True,
113
+ ) -> torch.nn.Sequential:
114
+ head_components = [
115
+ torch.nn.Dropout(0.1),
116
+ torch.nn.Linear(
117
+ self.transformer_model.config.hidden_size * self.use_last_k_layers
118
+ if input_hidden is None
119
+ else input_hidden,
120
+ self.linears_hidden_size,
121
+ ),
122
+ activation2functions[activation],
123
+ torch.nn.Dropout(0.1),
124
+ torch.nn.Linear(
125
+ self.linears_hidden_size,
126
+ self.linears_hidden_size if last_hidden is None else last_hidden,
127
+ ),
128
+ ]
129
+
130
+ if layer_norm:
131
+ head_components.append(
132
+ torch.nn.LayerNorm(
133
+ self.linears_hidden_size if last_hidden is None else last_hidden,
134
+ self.transformer_model.config.layer_norm_eps,
135
+ )
136
+ )
137
+
138
+ return torch.nn.Sequential(*head_components)
139
+
140
+ def _mask_logits(self, logits: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:
141
+ mask = mask.unsqueeze(-1)
142
+ if next(self.parameters()).dtype == torch.float16:
143
+ logits = logits * (1 - mask) - 65500 * mask
144
+ else:
145
+ logits = logits * (1 - mask) - 1e30 * mask
146
+ return logits
147
+
148
+ def _get_model_features(
149
+ self,
150
+ input_ids: torch.Tensor,
151
+ attention_mask: torch.Tensor,
152
+ token_type_ids: Optional[torch.Tensor],
153
+ ):
154
+ model_input = {
155
+ "input_ids": input_ids,
156
+ "attention_mask": attention_mask,
157
+ "output_hidden_states": self.use_last_k_layers > 1,
158
+ }
159
+
160
+ if token_type_ids is not None:
161
+ model_input["token_type_ids"] = token_type_ids
162
+
163
+ model_output = self.transformer_model(**model_input)
164
+
165
+ if self.use_last_k_layers > 1:
166
+ model_features = torch.cat(
167
+ model_output[1][-self.use_last_k_layers :], dim=-1
168
+ )
169
+ else:
170
+ model_features = model_output[0]
171
+
172
+ return model_features
173
+
174
+ def compute_ned_end_logits(
175
+ self,
176
+ start_predictions,
177
+ start_labels,
178
+ model_features,
179
+ prediction_mask,
180
+ batch_size,
181
+ ) -> Optional[torch.Tensor]:
182
+ # todo: maybe when constraining on the spans,
183
+ # we should not use a prediction_mask for the end tokens.
184
+ # at least we should not during training imo
185
+ start_positions = start_labels if self.training else start_predictions
186
+ start_positions_indices = (
187
+ torch.arange(start_positions.size(1), device=start_positions.device)
188
+ .unsqueeze(0)
189
+ .expand(batch_size, -1)[start_positions > 0]
190
+ ).to(start_positions.device)
191
+
192
+ if len(start_positions_indices) > 0:
193
+ expanded_features = model_features.repeat_interleave(
194
+ torch.sum(start_positions > 0, dim=-1), dim=0
195
+ )
196
+ expanded_prediction_mask = prediction_mask.repeat_interleave(
197
+ torch.sum(start_positions > 0, dim=-1), dim=0
198
+ )
199
+ end_logits = self.ned_end_classifier(
200
+ hidden_states=expanded_features,
201
+ start_positions=start_positions_indices,
202
+ p_mask=expanded_prediction_mask,
203
+ )
204
+
205
+ return end_logits
206
+
207
+ return None
208
+
209
+ def compute_classification_logits(
210
+ self,
211
+ model_features,
212
+ special_symbols_mask,
213
+ prediction_mask,
214
+ batch_size,
215
+ start_positions=None,
216
+ end_positions=None,
217
+ ) -> torch.Tensor:
218
+ if start_positions is None or end_positions is None:
219
+ start_positions = torch.zeros_like(prediction_mask)
220
+ end_positions = torch.zeros_like(prediction_mask)
221
+
222
+ model_start_features = self.ed_start_projector(model_features)
223
+ model_end_features = self.ed_end_projector(model_features)
224
+ model_end_features[start_positions > 0] = model_end_features[end_positions > 0]
225
+
226
+ model_ed_features = torch.cat(
227
+ [model_start_features, model_end_features], dim=-1
228
+ )
229
+
230
+ # computing ed features
231
+ classes_representations = torch.sum(special_symbols_mask, dim=1)[0].item()
232
+ special_symbols_representation = model_ed_features[special_symbols_mask].view(
233
+ batch_size, classes_representations, -1
234
+ )
235
+
236
+ logits = torch.bmm(
237
+ model_ed_features,
238
+ torch.permute(special_symbols_representation, (0, 2, 1)),
239
+ )
240
+
241
+ logits = self._mask_logits(logits, prediction_mask)
242
+
243
+ return logits
244
+
245
+ def forward(
246
+ self,
247
+ input_ids: torch.Tensor,
248
+ attention_mask: torch.Tensor,
249
+ token_type_ids: Optional[torch.Tensor] = None,
250
+ prediction_mask: Optional[torch.Tensor] = None,
251
+ special_symbols_mask: Optional[torch.Tensor] = None,
252
+ start_labels: Optional[torch.Tensor] = None,
253
+ end_labels: Optional[torch.Tensor] = None,
254
+ use_predefined_spans: bool = False,
255
+ *args,
256
+ **kwargs,
257
+ ) -> Dict[str, Any]:
258
+ batch_size, seq_len = input_ids.shape
259
+
260
+ model_features = self._get_model_features(
261
+ input_ids, attention_mask, token_type_ids
262
+ )
263
+
264
+ ned_start_labels = None
265
+
266
+ # named entity detection if required
267
+ if use_predefined_spans: # no need to compute spans
268
+ ned_start_logits, ned_start_probabilities, ned_start_predictions = (
269
+ None,
270
+ None,
271
+ torch.clone(start_labels)
272
+ if start_labels is not None
273
+ else torch.zeros_like(input_ids),
274
+ )
275
+ ned_end_logits, ned_end_probabilities, ned_end_predictions = (
276
+ None,
277
+ None,
278
+ torch.clone(end_labels)
279
+ if end_labels is not None
280
+ else torch.zeros_like(input_ids),
281
+ )
282
+
283
+ ned_start_predictions[ned_start_predictions > 0] = 1
284
+ ned_end_predictions[ned_end_predictions > 0] = 1
285
+
286
+ else: # compute spans
287
+ # start boundary prediction
288
+ ned_start_logits = self.ned_start_classifier(model_features)
289
+ ned_start_logits = self._mask_logits(ned_start_logits, prediction_mask)
290
+ ned_start_probabilities = torch.softmax(ned_start_logits, dim=-1)
291
+ ned_start_predictions = ned_start_probabilities.argmax(dim=-1)
292
+
293
+ # end boundary prediction
294
+ ned_start_labels = (
295
+ torch.zeros_like(start_labels) if start_labels is not None else None
296
+ )
297
+
298
+ if ned_start_labels is not None:
299
+ ned_start_labels[start_labels == -100] = -100
300
+ ned_start_labels[start_labels > 0] = 1
301
+
302
+ ned_end_logits = self.compute_ned_end_logits(
303
+ ned_start_predictions,
304
+ ned_start_labels,
305
+ model_features,
306
+ prediction_mask,
307
+ batch_size,
308
+ )
309
+
310
+ if ned_end_logits is not None:
311
+ ned_end_probabilities = torch.softmax(ned_end_logits, dim=-1)
312
+ ned_end_predictions = torch.argmax(ned_end_probabilities, dim=-1)
313
+ else:
314
+ ned_end_logits, ned_end_probabilities = None, None
315
+ ned_end_predictions = ned_start_predictions.new_zeros(batch_size)
316
+
317
+ # flattening end predictions
318
+ # (flattening can happen only if the
319
+ # end boundaries were not predicted using the gold labels)
320
+ if not self.training and ned_end_logits is not None:
321
+ flattened_end_predictions = torch.zeros_like(ned_start_predictions)
322
+
323
+ row_indices, start_positions = torch.where(ned_start_predictions > 0)
324
+ ned_end_predictions[ned_end_predictions<start_positions] = start_positions[ned_end_predictions<start_positions]
325
+
326
+ end_spans_repeated = (row_indices + 1)* seq_len + ned_end_predictions
327
+ cummax_values, _ = end_spans_repeated.cummax(dim=0)
328
+
329
+ end_spans_repeated = (end_spans_repeated > torch.cat((end_spans_repeated[:1], cummax_values[:-1])))
330
+ end_spans_repeated[0] = True
331
+
332
+ ned_start_predictions[row_indices[~end_spans_repeated], start_positions[~end_spans_repeated]] = 0
333
+
334
+ row_indices, start_positions, ned_end_predictions = row_indices[end_spans_repeated], start_positions[end_spans_repeated], ned_end_predictions[end_spans_repeated]
335
+
336
+ flattened_end_predictions[row_indices, ned_end_predictions] = 1
337
+
338
+ total_start_predictions, total_end_predictions = ned_start_predictions.sum(), flattened_end_predictions.sum()
339
+
340
+ assert (
341
+ total_start_predictions == 0
342
+ or total_start_predictions == total_end_predictions
343
+ ), (
344
+ f"Total number of start predictions = {total_start_predictions}. "
345
+ f"Total number of end predictions = {total_end_predictions}"
346
+ )
347
+ ned_end_predictions = flattened_end_predictions
348
+ else:
349
+ ned_end_predictions = torch.zeros_like(ned_start_predictions)
350
+
351
+ start_position, end_position = (
352
+ (start_labels, end_labels)
353
+ if self.training
354
+ else (ned_start_predictions, ned_end_predictions)
355
+ )
356
+
357
+ # Entity disambiguation
358
+ ed_logits = self.compute_classification_logits(
359
+ model_features,
360
+ special_symbols_mask,
361
+ prediction_mask,
362
+ batch_size,
363
+ start_position,
364
+ end_position,
365
+ )
366
+ ed_probabilities = torch.softmax(ed_logits, dim=-1)
367
+ ed_predictions = torch.argmax(ed_probabilities, dim=-1)
368
+
369
+ # output build
370
+ output_dict = dict(
371
+ batch_size=batch_size,
372
+ ned_start_logits=ned_start_logits,
373
+ ned_start_probabilities=ned_start_probabilities,
374
+ ned_start_predictions=ned_start_predictions,
375
+ ned_end_logits=ned_end_logits,
376
+ ned_end_probabilities=ned_end_probabilities,
377
+ ned_end_predictions=ned_end_predictions,
378
+ ed_logits=ed_logits,
379
+ ed_probabilities=ed_probabilities,
380
+ ed_predictions=ed_predictions,
381
+ )
382
+
383
+ # compute loss if labels
384
+ if start_labels is not None and end_labels is not None and self.training:
385
+ # named entity detection loss
386
+
387
+ # start
388
+ if ned_start_logits is not None:
389
+ ned_start_loss = self.criterion(
390
+ ned_start_logits.view(-1, ned_start_logits.shape[-1]),
391
+ ned_start_labels.view(-1),
392
+ )
393
+ else:
394
+ ned_start_loss = 0
395
+
396
+ # end
397
+ if ned_end_logits is not None:
398
+ ned_end_labels = torch.zeros_like(end_labels)
399
+ ned_end_labels[end_labels == -100] = -100
400
+ ned_end_labels[end_labels > 0] = 1
401
+
402
+ ned_end_loss = self.criterion(
403
+ ned_end_logits,
404
+ (
405
+ torch.arange(
406
+ ned_end_labels.size(1), device=ned_end_labels.device
407
+ )
408
+ .unsqueeze(0)
409
+ .expand(batch_size, -1)[ned_end_labels > 0]
410
+ ).to(ned_end_labels.device),
411
+ )
412
+
413
+ else:
414
+ ned_end_loss = 0
415
+
416
+ # entity disambiguation loss
417
+ start_labels[ned_start_labels != 1] = -100
418
+ ed_labels = torch.clone(start_labels)
419
+ ed_labels[end_labels > 0] = end_labels[end_labels > 0]
420
+ ed_loss = self.criterion(
421
+ ed_logits.view(-1, ed_logits.shape[-1]),
422
+ ed_labels.view(-1),
423
+ )
424
+
425
+ output_dict["ned_start_loss"] = ned_start_loss
426
+ output_dict["ned_end_loss"] = ned_end_loss
427
+ output_dict["ed_loss"] = ed_loss
428
+
429
+ output_dict["loss"] = ned_start_loss + ned_end_loss + ed_loss
430
+
431
+ return output_dict
432
+
433
+
434
+ class RelikReaderREModel(PreTrainedModel):
435
+ config_class = RelikReaderConfig
436
+
437
+ def __init__(self, config, *args, **kwargs):
438
+ super().__init__(config)
439
+ # Transformer model declaration
440
+ # self.transformer_model_name = transformer_model
441
+ self.config = config
442
+ self.transformer_model = (
443
+ AutoModel.from_pretrained(config.transformer_model)
444
+ if config.num_layers is None
445
+ else AutoModel.from_pretrained(
446
+ config.transformer_model, num_hidden_layers=config.num_layers
447
+ )
448
+ )
449
+ self.transformer_model.resize_token_embeddings(
450
+ self.transformer_model.config.vocab_size
451
+ + config.additional_special_symbols
452
+ + config.additional_special_symbols_types,
453
+ pad_to_multiple_of=8,
454
+ )
455
+
456
+ # named entity detection layers
457
+ self.ned_start_classifier = self._get_projection_layer(
458
+ config.activation, last_hidden=2, layer_norm=False
459
+ )
460
+
461
+ self.ned_end_classifier = PoolerEndLogitsBi(self.transformer_model.config)
462
+
463
+ self.relation_disambiguation_loss = (
464
+ config.relation_disambiguation_loss
465
+ if hasattr(config, "relation_disambiguation_loss")
466
+ else False
467
+ )
468
+
469
+ if self.config.entity_type_loss and self.config.add_entity_embedding:
470
+ input_hidden_ents = 3 * self.transformer_model.config.hidden_size
471
+ else:
472
+ input_hidden_ents = 2 * self.transformer_model.config.hidden_size
473
+
474
+ self.re_subject_projector = self._get_projection_layer(
475
+ config.activation, input_hidden=input_hidden_ents
476
+ )
477
+ self.re_object_projector = self._get_projection_layer(
478
+ config.activation, input_hidden=input_hidden_ents
479
+ )
480
+ self.re_relation_projector = self._get_projection_layer(config.activation)
481
+
482
+ if self.config.entity_type_loss or self.relation_disambiguation_loss:
483
+ self.re_entities_projector = self._get_projection_layer(
484
+ config.activation,
485
+ input_hidden=2 * self.transformer_model.config.hidden_size,
486
+ )
487
+ self.re_definition_projector = self._get_projection_layer(
488
+ config.activation,
489
+ )
490
+
491
+ self.re_classifier = self._get_projection_layer(
492
+ config.activation,
493
+ input_hidden=config.linears_hidden_size,
494
+ last_hidden=2,
495
+ layer_norm=False,
496
+ )
497
+
498
+ self.training = config.training
499
+
500
+ # criterion
501
+ self.criterion = torch.nn.CrossEntropyLoss()
502
+ self.criterion_type = torch.nn.BCEWithLogitsLoss()
503
+
504
+ def _get_projection_layer(
505
+ self,
506
+ activation: str,
507
+ last_hidden: Optional[int] = None,
508
+ input_hidden=None,
509
+ layer_norm: bool = True,
510
+ ) -> torch.nn.Sequential:
511
+ head_components = [
512
+ torch.nn.Dropout(0.1),
513
+ torch.nn.Linear(
514
+ self.transformer_model.config.hidden_size
515
+ * self.config.use_last_k_layers
516
+ if input_hidden is None
517
+ else input_hidden,
518
+ self.config.linears_hidden_size,
519
+ ),
520
+ activation2functions[activation],
521
+ torch.nn.Dropout(0.1),
522
+ torch.nn.Linear(
523
+ self.config.linears_hidden_size,
524
+ self.config.linears_hidden_size if last_hidden is None else last_hidden,
525
+ ),
526
+ ]
527
+
528
+ if layer_norm:
529
+ head_components.append(
530
+ torch.nn.LayerNorm(
531
+ self.config.linears_hidden_size
532
+ if last_hidden is None
533
+ else last_hidden,
534
+ self.transformer_model.config.layer_norm_eps,
535
+ )
536
+ )
537
+
538
+ return torch.nn.Sequential(*head_components)
539
+
540
+ def _mask_logits(self, logits: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:
541
+ mask = mask.unsqueeze(-1)
542
+ if next(self.parameters()).dtype == torch.float16:
543
+ logits = logits * (1 - mask) - 65500 * mask
544
+ else:
545
+ logits = logits * (1 - mask) - 1e30 * mask
546
+ return logits
547
+
548
+ def _get_model_features(
549
+ self,
550
+ input_ids: torch.Tensor,
551
+ attention_mask: torch.Tensor,
552
+ token_type_ids: Optional[torch.Tensor],
553
+ ):
554
+ model_input = {
555
+ "input_ids": input_ids,
556
+ "attention_mask": attention_mask,
557
+ "output_hidden_states": self.config.use_last_k_layers > 1,
558
+ }
559
+
560
+ if token_type_ids is not None:
561
+ model_input["token_type_ids"] = token_type_ids
562
+
563
+ model_output = self.transformer_model(**model_input)
564
+
565
+ if self.config.use_last_k_layers > 1:
566
+ model_features = torch.cat(
567
+ model_output[1][-self.config.use_last_k_layers :], dim=-1
568
+ )
569
+ else:
570
+ model_features = model_output[0]
571
+
572
+ return model_features
573
+
574
+ def compute_ned_end_logits(
575
+ self,
576
+ start_predictions,
577
+ start_labels,
578
+ model_features,
579
+ prediction_mask,
580
+ batch_size,
581
+ mask_preceding: bool = False,
582
+ ) -> Optional[torch.Tensor]:
583
+ # todo: maybe when constraining on the spans,
584
+ # we should not use a prediction_mask for the end tokens.
585
+ # at least we should not during training imo
586
+ start_positions = start_labels if self.training else start_predictions
587
+ start_positions_indices = (
588
+ torch.arange(start_positions.size(1), device=start_positions.device)
589
+ .unsqueeze(0)
590
+ .expand(batch_size, -1)[start_positions > 0]
591
+ ).to(start_positions.device)
592
+
593
+ if len(start_positions_indices) > 0:
594
+ expanded_features = model_features.repeat_interleave(
595
+ torch.sum(start_positions > 0, dim=-1), dim=0
596
+ )
597
+ expanded_prediction_mask = prediction_mask.repeat_interleave(
598
+ torch.sum(start_positions > 0, dim=-1), dim=0
599
+ )
600
+ if mask_preceding:
601
+ expanded_prediction_mask[
602
+ torch.arange(
603
+ expanded_prediction_mask.shape[1],
604
+ device=expanded_prediction_mask.device,
605
+ )
606
+ < start_positions_indices.unsqueeze(1)
607
+ ] = 1
608
+ end_logits = self.ned_end_classifier(
609
+ hidden_states=expanded_features,
610
+ start_positions=start_positions_indices,
611
+ p_mask=expanded_prediction_mask,
612
+ )
613
+
614
+ return end_logits
615
+
616
+ return None
617
+
618
+ def compute_relation_logits(
619
+ self,
620
+ model_entity_features,
621
+ special_symbols_features,
622
+ ) -> torch.Tensor:
623
+ model_subject_features = self.re_subject_projector(model_entity_features)
624
+ model_object_features = self.re_object_projector(model_entity_features)
625
+ special_symbols_start_representation = self.re_relation_projector(
626
+ special_symbols_features
627
+ )
628
+ re_logits = torch.einsum(
629
+ "bse,bde,bfe->bsdfe",
630
+ model_subject_features,
631
+ model_object_features,
632
+ special_symbols_start_representation,
633
+ )
634
+ re_logits = self.re_classifier(re_logits)
635
+
636
+ return re_logits
637
+
638
+ def compute_entity_logits(
639
+ self,
640
+ model_entity_features,
641
+ special_symbols_features,
642
+ ) -> torch.Tensor:
643
+ model_ed_features = self.re_entities_projector(model_entity_features)
644
+ special_symbols_ed_representation = self.re_definition_projector(
645
+ special_symbols_features
646
+ )
647
+
648
+ logits = torch.bmm(
649
+ model_ed_features,
650
+ torch.permute(special_symbols_ed_representation, (0, 2, 1)),
651
+ )
652
+ logits = self._mask_logits(
653
+ logits, (model_entity_features == -100).all(2).long()
654
+ )
655
+ return logits
656
+
657
+ def compute_loss(self, logits, labels, mask=None):
658
+ logits = logits.reshape(-1, logits.shape[-1])
659
+ labels = labels.reshape(-1).long()
660
+ if mask is not None:
661
+ return self.criterion(logits[mask], labels[mask])
662
+ return self.criterion(logits, labels)
663
+
664
+ def compute_ned_type_loss(
665
+ self,
666
+ disambiguation_labels,
667
+ re_ned_entities_logits,
668
+ ned_type_logits,
669
+ re_entities_logits,
670
+ entity_types,
671
+ mask,
672
+ ):
673
+ if self.config.entity_type_loss and self.relation_disambiguation_loss:
674
+ return self.criterion_type(
675
+ re_ned_entities_logits[disambiguation_labels != -100],
676
+ disambiguation_labels[disambiguation_labels != -100],
677
+ )
678
+ if self.config.entity_type_loss:
679
+ return self.criterion_type(
680
+ ned_type_logits[mask],
681
+ disambiguation_labels[:, :, :entity_types][mask],
682
+ )
683
+
684
+ if self.relation_disambiguation_loss:
685
+ return self.criterion_type(
686
+ re_entities_logits[disambiguation_labels != -100],
687
+ disambiguation_labels[disambiguation_labels != -100],
688
+ )
689
+ return 0
690
+
691
+ def compute_relation_loss(self, relation_labels, re_logits):
692
+ return self.compute_loss(
693
+ re_logits, relation_labels, relation_labels.view(-1) != -100
694
+ )
695
+
696
+ def forward(
697
+ self,
698
+ input_ids: torch.Tensor,
699
+ attention_mask: torch.Tensor,
700
+ token_type_ids: torch.Tensor,
701
+ prediction_mask: Optional[torch.Tensor] = None,
702
+ special_symbols_mask: Optional[torch.Tensor] = None,
703
+ special_symbols_mask_entities: Optional[torch.Tensor] = None,
704
+ start_labels: Optional[torch.Tensor] = None,
705
+ end_labels: Optional[torch.Tensor] = None,
706
+ disambiguation_labels: Optional[torch.Tensor] = None,
707
+ relation_labels: Optional[torch.Tensor] = None,
708
+ relation_threshold: float = 0.5,
709
+ is_validation: bool = False,
710
+ is_prediction: bool = False,
711
+ use_predefined_spans: bool = False,
712
+ *args,
713
+ **kwargs,
714
+ ) -> Dict[str, Any]:
715
+ batch_size = input_ids.shape[0]
716
+
717
+ model_features = self._get_model_features(
718
+ input_ids, attention_mask, token_type_ids
719
+ )
720
+
721
+ # named entity detection
722
+ if use_predefined_spans:
723
+ ned_start_logits, ned_start_probabilities, ned_start_predictions = (
724
+ None,
725
+ None,
726
+ torch.zeros_like(start_labels),
727
+ )
728
+ ned_end_logits, ned_end_probabilities, ned_end_predictions = (
729
+ None,
730
+ None,
731
+ torch.zeros_like(end_labels),
732
+ )
733
+
734
+ ned_start_predictions[start_labels > 0] = 1
735
+ ned_end_predictions[end_labels > 0] = 1
736
+ ned_end_predictions = ned_end_predictions[~(end_labels == -100).all(2)]
737
+ ned_start_labels = start_labels
738
+ ned_start_labels[start_labels > 0] = 1
739
+ else:
740
+ # start boundary prediction
741
+ ned_start_logits = self.ned_start_classifier(model_features)
742
+ if is_validation or is_prediction:
743
+ ned_start_logits = self._mask_logits(
744
+ ned_start_logits, prediction_mask
745
+ ) # why?
746
+ ned_start_probabilities = torch.softmax(ned_start_logits, dim=-1)
747
+ ned_start_predictions = ned_start_probabilities.argmax(dim=-1)
748
+
749
+ # end boundary prediction
750
+ ned_start_labels = (
751
+ torch.zeros_like(start_labels) if start_labels is not None else None
752
+ )
753
+
754
+ # start_labels contain entity id at their position, we just need 1 for start of entity
755
+ if ned_start_labels is not None:
756
+ ned_start_labels[start_labels == -100] = -100
757
+ ned_start_labels[start_labels > 0] = 1
758
+
759
+ # compute end logits only if there are any start predictions.
760
+ # For each start prediction, n end predictions are made
761
+ ned_end_logits = self.compute_ned_end_logits(
762
+ ned_start_predictions,
763
+ ned_start_labels,
764
+ model_features,
765
+ prediction_mask,
766
+ batch_size,
767
+ True,
768
+ )
769
+
770
+ if ned_end_logits is not None:
771
+ # For each start prediction, n end predictions are made based on
772
+ # binary classification ie. argmax at each position.
773
+ ned_end_probabilities = torch.softmax(ned_end_logits, dim=-1)
774
+ ned_end_predictions = ned_end_probabilities.argmax(dim=-1)
775
+ else:
776
+ ned_end_logits, ned_end_probabilities = None, None
777
+ ned_end_predictions = torch.zeros_like(ned_start_predictions)
778
+
779
+ if is_prediction or is_validation:
780
+ end_preds_count = ned_end_predictions.sum(1)
781
+ # If there are no end predictions for a start prediction, remove the start prediction
782
+ if (end_preds_count == 0).any() and (ned_start_predictions > 0).any():
783
+ ned_start_predictions[ned_start_predictions == 1] = (
784
+ end_preds_count != 0
785
+ ).long()
786
+ ned_end_predictions = ned_end_predictions[end_preds_count != 0]
787
+
788
+ if end_labels is not None:
789
+ end_labels = end_labels[~(end_labels == -100).all(2)]
790
+
791
+ start_position, end_position = (
792
+ (start_labels, end_labels)
793
+ if (not is_prediction and not is_validation)
794
+ else (ned_start_predictions, ned_end_predictions)
795
+ )
796
+
797
+ start_counts = (start_position > 0).sum(1)
798
+ if (start_counts > 0).any():
799
+ ned_end_predictions = ned_end_predictions.split(start_counts.tolist())
800
+ # limit to 30 predictions per document using start_counts, by setting all po after sum is 30 to 0
801
+ # if is_validation or is_prediction:
802
+ # ned_start_predictions[ned_start_predictions == 1] = start_counts
803
+ # We can only predict relations if we have start and end predictions
804
+ if (end_position > 0).sum() > 0:
805
+ ends_count = (end_position > 0).sum(1)
806
+ model_subject_features = torch.cat(
807
+ [
808
+ torch.repeat_interleave(
809
+ model_features[start_position > 0], ends_count, dim=0
810
+ ), # start position features
811
+ torch.repeat_interleave(model_features, start_counts, dim=0)[
812
+ end_position > 0
813
+ ], # end position features
814
+ ],
815
+ dim=-1,
816
+ )
817
+ ents_count = torch.nn.utils.rnn.pad_sequence(
818
+ torch.split(ends_count, start_counts.tolist()),
819
+ batch_first=True,
820
+ padding_value=0,
821
+ ).sum(1)
822
+ model_subject_features = torch.nn.utils.rnn.pad_sequence(
823
+ torch.split(model_subject_features, ents_count.tolist()),
824
+ batch_first=True,
825
+ padding_value=-100,
826
+ )
827
+
828
+ # if is_validation or is_prediction:
829
+ # model_subject_features = model_subject_features[:, :30, :]
830
+
831
+ # entity disambiguation. Here relation_disambiguation_loss would only be useful to
832
+ # reduce the number of candidate relations for the next step, but currently unused.
833
+ if self.config.entity_type_loss or self.relation_disambiguation_loss:
834
+ (re_ned_entities_logits) = self.compute_entity_logits(
835
+ model_subject_features,
836
+ model_features[
837
+ special_symbols_mask | special_symbols_mask_entities
838
+ ].view(batch_size, -1, model_features.shape[-1]),
839
+ )
840
+ entity_types = torch.sum(special_symbols_mask_entities, dim=1)[0].item()
841
+ ned_type_logits = re_ned_entities_logits[:, :, :entity_types]
842
+ re_entities_logits = re_ned_entities_logits[:, :, entity_types:]
843
+
844
+ if self.config.entity_type_loss:
845
+ ned_type_probabilities = torch.sigmoid(ned_type_logits)
846
+ ned_type_predictions = ned_type_probabilities.argmax(dim=-1)
847
+
848
+ if self.config.add_entity_embedding:
849
+ special_symbols_representation = model_features[
850
+ special_symbols_mask_entities
851
+ ].view(batch_size, entity_types, -1)
852
+
853
+ entities_representation = torch.einsum(
854
+ "bsp,bpe->bse",
855
+ ned_type_probabilities,
856
+ special_symbols_representation,
857
+ )
858
+ model_subject_features = torch.cat(
859
+ [model_subject_features, entities_representation], dim=-1
860
+ )
861
+ re_entities_probabilities = torch.sigmoid(re_entities_logits)
862
+ re_entities_predictions = re_entities_probabilities.round()
863
+ else:
864
+ (
865
+ ned_type_logits,
866
+ ned_type_probabilities,
867
+ re_entities_logits,
868
+ re_entities_probabilities,
869
+ ) = (None, None, None, None)
870
+ ned_type_predictions, re_entities_predictions = (
871
+ torch.zeros([batch_size, 1], dtype=torch.long).to(input_ids.device),
872
+ torch.zeros([batch_size, 1], dtype=torch.long).to(input_ids.device),
873
+ )
874
+
875
+ # Compute relation logits
876
+ re_logits = self.compute_relation_logits(
877
+ model_subject_features,
878
+ model_features[special_symbols_mask].view(
879
+ batch_size, -1, model_features.shape[-1]
880
+ ),
881
+ )
882
+
883
+ re_probabilities = torch.softmax(re_logits, dim=-1)
884
+ # we set a thresshold instead of argmax in cause it needs to be tweaked
885
+ re_predictions = re_probabilities[:, :, :, :, 1] > relation_threshold
886
+ # re_predictions = re_probabilities.argmax(dim=-1)
887
+ re_probabilities = re_probabilities[:, :, :, :, 1]
888
+ # re_logits, re_probabilities, re_predictions = (
889
+ # torch.zeros(
890
+ # [batch_size, 1, 1, special_symbols_mask.sum(1)[0]], dtype=torch.long
891
+ # ).to(input_ids.device),
892
+ # torch.zeros(
893
+ # [batch_size, 1, 1, special_symbols_mask.sum(1)[0]], dtype=torch.long
894
+ # ).to(input_ids.device),
895
+ # torch.zeros(
896
+ # [batch_size, 1, 1, special_symbols_mask.sum(1)[0]], dtype=torch.long
897
+ # ).to(input_ids.device),
898
+ # )
899
+
900
+ else:
901
+ (
902
+ ned_type_logits,
903
+ ned_type_probabilities,
904
+ re_entities_logits,
905
+ re_entities_probabilities,
906
+ ) = (None, None, None, None)
907
+ ned_type_predictions, re_entities_predictions = (
908
+ torch.zeros([batch_size, 1], dtype=torch.long).to(input_ids.device),
909
+ torch.zeros([batch_size, 1], dtype=torch.long).to(input_ids.device),
910
+ )
911
+ re_logits, re_probabilities, re_predictions = (
912
+ torch.zeros(
913
+ [batch_size, 1, 1, special_symbols_mask.sum(1)[0]], dtype=torch.long
914
+ ).to(input_ids.device),
915
+ torch.zeros(
916
+ [batch_size, 1, 1, special_symbols_mask.sum(1)[0]], dtype=torch.long
917
+ ).to(input_ids.device),
918
+ torch.zeros(
919
+ [batch_size, 1, 1, special_symbols_mask.sum(1)[0]], dtype=torch.long
920
+ ).to(input_ids.device),
921
+ )
922
+
923
+ # output build
924
+ output_dict = dict(
925
+ batch_size=batch_size,
926
+ ned_start_logits=ned_start_logits,
927
+ ned_start_probabilities=ned_start_probabilities,
928
+ ned_start_predictions=ned_start_predictions,
929
+ ned_end_logits=ned_end_logits,
930
+ ned_end_probabilities=ned_end_probabilities,
931
+ ned_end_predictions=ned_end_predictions,
932
+ ned_type_logits=ned_type_logits,
933
+ ned_type_probabilities=ned_type_probabilities,
934
+ ned_type_predictions=ned_type_predictions,
935
+ re_entities_logits=re_entities_logits,
936
+ re_entities_probabilities=re_entities_probabilities,
937
+ re_entities_predictions=re_entities_predictions,
938
+ re_logits=re_logits,
939
+ re_probabilities=re_probabilities,
940
+ re_predictions=re_predictions,
941
+ )
942
+
943
+ if (
944
+ start_labels is not None
945
+ and end_labels is not None
946
+ and relation_labels is not None
947
+ and is_prediction is False
948
+ ):
949
+ ned_start_loss = self.compute_loss(ned_start_logits, ned_start_labels)
950
+ end_labels[end_labels > 0] = 1
951
+ ned_end_loss = self.compute_loss(ned_end_logits, end_labels)
952
+ if self.config.entity_type_loss or self.relation_disambiguation_loss:
953
+ ned_type_loss = self.compute_ned_type_loss(
954
+ disambiguation_labels,
955
+ re_ned_entities_logits,
956
+ ned_type_logits,
957
+ re_entities_logits,
958
+ entity_types,
959
+ (model_subject_features != -100).all(2),
960
+ )
961
+ relation_loss = self.compute_relation_loss(relation_labels, re_logits)
962
+ # compute loss. We can skip the relation loss if we are in the first epochs (optional)
963
+ if self.config.entity_type_loss or self.relation_disambiguation_loss:
964
+ output_dict["loss"] = (
965
+ ned_start_loss + ned_end_loss + relation_loss + ned_type_loss
966
+ ) / 4
967
+ output_dict["ned_type_loss"] = ned_type_loss
968
+ else:
969
+ # output_dict["loss"] = ((1 / 4) * (ned_start_loss + ned_end_loss)) + (
970
+ # (1 / 2) * relation_loss
971
+ # )
972
+ output_dict["loss"] = ((1 / 16) * (ned_start_loss + ned_end_loss)) + (
973
+ (7 / 8) * relation_loss
974
+ )
975
+
976
+ output_dict["ned_start_loss"] = ned_start_loss
977
+ output_dict["ned_end_loss"] = ned_end_loss
978
+ output_dict["re_loss"] = relation_loss
979
+
980
+ return output_dict
models/reader-extended-large/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:425244d83d202ff1b0765155e679a9265e3d2af0e71d19d2ba8d853913bd4577
3
+ size 1753453946
models/reader-extended-large/special_tokens_map.json ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "--NME--",
4
+ "[E-0]",
5
+ "[E-1]",
6
+ "[E-2]",
7
+ "[E-3]",
8
+ "[E-4]",
9
+ "[E-5]",
10
+ "[E-6]",
11
+ "[E-7]",
12
+ "[E-8]",
13
+ "[E-9]",
14
+ "[E-10]",
15
+ "[E-11]",
16
+ "[E-12]",
17
+ "[E-13]",
18
+ "[E-14]",
19
+ "[E-15]",
20
+ "[E-16]",
21
+ "[E-17]",
22
+ "[E-18]",
23
+ "[E-19]",
24
+ "[E-20]",
25
+ "[E-21]",
26
+ "[E-22]",
27
+ "[E-23]",
28
+ "[E-24]",
29
+ "[E-25]",
30
+ "[E-26]",
31
+ "[E-27]",
32
+ "[E-28]",
33
+ "[E-29]",
34
+ "[E-30]",
35
+ "[E-31]",
36
+ "[E-32]",
37
+ "[E-33]",
38
+ "[E-34]",
39
+ "[E-35]",
40
+ "[E-36]",
41
+ "[E-37]",
42
+ "[E-38]",
43
+ "[E-39]",
44
+ "[E-40]",
45
+ "[E-41]",
46
+ "[E-42]",
47
+ "[E-43]",
48
+ "[E-44]",
49
+ "[E-45]",
50
+ "[E-46]",
51
+ "[E-47]",
52
+ "[E-48]",
53
+ "[E-49]",
54
+ "[E-50]",
55
+ "[E-51]",
56
+ "[E-52]",
57
+ "[E-53]",
58
+ "[E-54]",
59
+ "[E-55]",
60
+ "[E-56]",
61
+ "[E-57]",
62
+ "[E-58]",
63
+ "[E-59]",
64
+ "[E-60]",
65
+ "[E-61]",
66
+ "[E-62]",
67
+ "[E-63]",
68
+ "[E-64]",
69
+ "[E-65]",
70
+ "[E-66]",
71
+ "[E-67]",
72
+ "[E-68]",
73
+ "[E-69]",
74
+ "[E-70]",
75
+ "[E-71]",
76
+ "[E-72]",
77
+ "[E-73]",
78
+ "[E-74]",
79
+ "[E-75]",
80
+ "[E-76]",
81
+ "[E-77]",
82
+ "[E-78]",
83
+ "[E-79]",
84
+ "[E-80]",
85
+ "[E-81]",
86
+ "[E-82]",
87
+ "[E-83]",
88
+ "[E-84]",
89
+ "[E-85]",
90
+ "[E-86]",
91
+ "[E-87]",
92
+ "[E-88]",
93
+ "[E-89]",
94
+ "[E-90]",
95
+ "[E-91]",
96
+ "[E-92]",
97
+ "[E-93]",
98
+ "[E-94]",
99
+ "[E-95]",
100
+ "[E-96]",
101
+ "[E-97]",
102
+ "[E-98]",
103
+ "[E-99]"
104
+ ],
105
+ "bos_token": "[CLS]",
106
+ "cls_token": "[CLS]",
107
+ "eos_token": "[SEP]",
108
+ "mask_token": "[MASK]",
109
+ "pad_token": "[PAD]",
110
+ "sep_token": "[SEP]",
111
+ "unk_token": "[UNK]"
112
+ }
models/reader-extended-large/spm.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c679fbf93643d19aab7ee10c0b99e460bdbc02fedf34b92b05af343b4af586fd
3
+ size 2464616
models/reader-extended-large/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
models/reader-extended-large/tokenizer_config.json ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": true,
3
+ "additional_special_tokens": [
4
+ "--NME--",
5
+ "[E-0]",
6
+ "[E-1]",
7
+ "[E-2]",
8
+ "[E-3]",
9
+ "[E-4]",
10
+ "[E-5]",
11
+ "[E-6]",
12
+ "[E-7]",
13
+ "[E-8]",
14
+ "[E-9]",
15
+ "[E-10]",
16
+ "[E-11]",
17
+ "[E-12]",
18
+ "[E-13]",
19
+ "[E-14]",
20
+ "[E-15]",
21
+ "[E-16]",
22
+ "[E-17]",
23
+ "[E-18]",
24
+ "[E-19]",
25
+ "[E-20]",
26
+ "[E-21]",
27
+ "[E-22]",
28
+ "[E-23]",
29
+ "[E-24]",
30
+ "[E-25]",
31
+ "[E-26]",
32
+ "[E-27]",
33
+ "[E-28]",
34
+ "[E-29]",
35
+ "[E-30]",
36
+ "[E-31]",
37
+ "[E-32]",
38
+ "[E-33]",
39
+ "[E-34]",
40
+ "[E-35]",
41
+ "[E-36]",
42
+ "[E-37]",
43
+ "[E-38]",
44
+ "[E-39]",
45
+ "[E-40]",
46
+ "[E-41]",
47
+ "[E-42]",
48
+ "[E-43]",
49
+ "[E-44]",
50
+ "[E-45]",
51
+ "[E-46]",
52
+ "[E-47]",
53
+ "[E-48]",
54
+ "[E-49]",
55
+ "[E-50]",
56
+ "[E-51]",
57
+ "[E-52]",
58
+ "[E-53]",
59
+ "[E-54]",
60
+ "[E-55]",
61
+ "[E-56]",
62
+ "[E-57]",
63
+ "[E-58]",
64
+ "[E-59]",
65
+ "[E-60]",
66
+ "[E-61]",
67
+ "[E-62]",
68
+ "[E-63]",
69
+ "[E-64]",
70
+ "[E-65]",
71
+ "[E-66]",
72
+ "[E-67]",
73
+ "[E-68]",
74
+ "[E-69]",
75
+ "[E-70]",
76
+ "[E-71]",
77
+ "[E-72]",
78
+ "[E-73]",
79
+ "[E-74]",
80
+ "[E-75]",
81
+ "[E-76]",
82
+ "[E-77]",
83
+ "[E-78]",
84
+ "[E-79]",
85
+ "[E-80]",
86
+ "[E-81]",
87
+ "[E-82]",
88
+ "[E-83]",
89
+ "[E-84]",
90
+ "[E-85]",
91
+ "[E-86]",
92
+ "[E-87]",
93
+ "[E-88]",
94
+ "[E-89]",
95
+ "[E-90]",
96
+ "[E-91]",
97
+ "[E-92]",
98
+ "[E-93]",
99
+ "[E-94]",
100
+ "[E-95]",
101
+ "[E-96]",
102
+ "[E-97]",
103
+ "[E-98]",
104
+ "[E-99]"
105
+ ],
106
+ "bos_token": "[CLS]",
107
+ "clean_up_tokenization_spaces": true,
108
+ "cls_token": "[CLS]",
109
+ "do_lower_case": false,
110
+ "eos_token": "[SEP]",
111
+ "mask_token": "[MASK]",
112
+ "model_max_length": 1000000000000000019884624838656,
113
+ "pad_token": "[PAD]",
114
+ "sep_token": "[SEP]",
115
+ "sp_model_kwargs": {},
116
+ "split_by_punct": false,
117
+ "tokenizer_class": "DebertaV2Tokenizer",
118
+ "unk_token": "[UNK]",
119
+ "vocab_type": "spm"
120
+ }