Quentin Lhoest commited on
Commit
e4c37ba
1 Parent(s): 99c0835

Release: 1.18.1

Browse files

Commit from https://github.com/huggingface/datasets/commit/218e496519ff14b4bc69ea559616af6f2ef89e57

Files changed (1) hide show
  1. lama.py +350 -350
lama.py CHANGED
@@ -1,350 +1,350 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """The LAMA Dataset"""
16
-
17
-
18
- import json
19
- from fnmatch import fnmatch
20
-
21
- import datasets
22
-
23
-
24
- _CITATION = """@inproceedings{petroni2019language,
25
- title={Language Models as Knowledge Bases?},
26
- author={F. Petroni, T. Rockt{\"{a}}schel, A. H. Miller, P. Lewis, A. Bakhtin, Y. Wu and S. Riedel},
27
- booktitle={In: Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing (EMNLP), 2019},
28
- year={2019}
29
- }
30
- @inproceedings{petroni2020how,
31
- title={How Context Affects Language Models' Factual Predictions},
32
- author={Fabio Petroni and Patrick Lewis and Aleksandra Piktus and Tim Rockt{\"a}schel and Yuxiang Wu and Alexander H. Miller and Sebastian Riedel},
33
- booktitle={Automated Knowledge Base Construction},
34
- year={2020},
35
- url={https://openreview.net/forum?id=025X0zPfn}
36
- }
37
- """
38
-
39
-
40
- _DESCRIPTION = """LAMA is a dataset used to probe and analyze the factual and commonsense knowledge contained in pretrained language models. See https://github.com/facebookresearch/LAMA.
41
- """
42
-
43
- _HOMEPAGE = "https://github.com/facebookresearch/LAMA"
44
-
45
- _LICENSE = "The Creative Commons Attribution-Noncommercial 4.0 International License. see https://github.com/facebookresearch/LAMA/blob/master/LICENSE"
46
-
47
- _RELATIONS_URL = "https://s3.amazonaws.com/datasets.huggingface.co/lama/relations.jsonl"
48
-
49
- _DATA_URL = "https://dl.fbaipublicfiles.com/LAMA/negated_data.tar.gz"
50
-
51
-
52
- class Lama(datasets.GeneratorBasedBuilder):
53
- """Lama Dataset"""
54
-
55
- VERSION = datasets.Version("1.1.0")
56
-
57
- BUILDER_CONFIGS = [
58
- datasets.BuilderConfig(name="trex", version=VERSION, description="The TRex part of the Lama dataset"),
59
- datasets.BuilderConfig(name="squad", version=VERSION, description="The Squad part of the Lama dataset"),
60
- datasets.BuilderConfig(
61
- name="google_re", version=VERSION, description="The Google_re part of the Lama dataset"
62
- ),
63
- datasets.BuilderConfig(
64
- name="conceptnet", version=VERSION, description="The Conceptnet part of the Lama dataset"
65
- ),
66
- ]
67
-
68
- DEFAULT_CONFIG_NAME = "trex"
69
-
70
- def _info(self):
71
- if self.config.name == "trex":
72
- features = datasets.Features(
73
- {
74
- "uuid": datasets.Value("string"),
75
- "obj_uri": datasets.Value("string"),
76
- "obj_label": datasets.Value("string"),
77
- "sub_uri": datasets.Value("string"),
78
- "sub_label": datasets.Value("string"),
79
- "predicate_id": datasets.Value("string"),
80
- "sub_surface": datasets.Value("string"),
81
- "obj_surface": datasets.Value("string"),
82
- "masked_sentence": datasets.Value("string"),
83
- "template": datasets.Value("string"),
84
- "template_negated": datasets.Value("string"),
85
- "label": datasets.Value("string"),
86
- "description": datasets.Value("string"),
87
- "type": datasets.Value("string"),
88
- }
89
- )
90
- return datasets.DatasetInfo(
91
- description=_DESCRIPTION,
92
- features=features,
93
- supervised_keys=None,
94
- homepage=_HOMEPAGE,
95
- license=_LICENSE,
96
- citation=_CITATION,
97
- )
98
- elif self.config.name == "conceptnet":
99
- features = datasets.Features(
100
- {
101
- "uuid": datasets.Value("string"),
102
- "sub": datasets.Value("string"),
103
- "obj": datasets.Value("string"),
104
- "pred": datasets.Value("string"),
105
- "obj_label": datasets.Value("string"),
106
- "masked_sentence": datasets.Value("string"),
107
- "negated": datasets.Value("string"),
108
- }
109
- )
110
- return datasets.DatasetInfo(
111
- description=_DESCRIPTION,
112
- features=features,
113
- supervised_keys=None,
114
- homepage=_HOMEPAGE,
115
- license=_LICENSE,
116
- citation=_CITATION,
117
- )
118
- elif self.config.name == "squad":
119
- features = datasets.Features(
120
- {
121
- "id": datasets.Value("string"),
122
- "sub_label": datasets.Value("string"),
123
- "obj_label": datasets.Value("string"),
124
- "negated": datasets.Value("string"),
125
- "masked_sentence": datasets.Value("string"),
126
- }
127
- )
128
- return datasets.DatasetInfo(
129
- description=_DESCRIPTION,
130
- features=features,
131
- supervised_keys=None,
132
- homepage=_HOMEPAGE,
133
- license=_LICENSE,
134
- citation=_CITATION,
135
- )
136
- elif self.config.name == "google_re":
137
- features = datasets.Features(
138
- {
139
- "pred": datasets.Value("string"),
140
- "sub": datasets.Value("string"),
141
- "obj": datasets.Value("string"),
142
- "evidences": datasets.Value("string"),
143
- "judgments": datasets.Value("string"),
144
- "sub_w": datasets.Value("string"),
145
- "sub_label": datasets.Value("string"),
146
- "sub_aliases": datasets.Value("string"),
147
- "obj_w": datasets.Value("string"),
148
- "obj_label": datasets.Value("string"),
149
- "obj_aliases": datasets.Value("string"),
150
- "uuid": datasets.Value("string"),
151
- "masked_sentence": datasets.Value("string"),
152
- "template": datasets.Value("string"),
153
- "template_negated": datasets.Value("string"),
154
- }
155
- )
156
- return datasets.DatasetInfo(
157
- description=_DESCRIPTION,
158
- features=features,
159
- supervised_keys=None,
160
- homepage=_HOMEPAGE,
161
- license=_LICENSE,
162
- citation=_CITATION,
163
- )
164
-
165
- def _split_generators(self, dl_manager):
166
- """Returns SplitGenerators."""
167
- archive = dl_manager.download(_DATA_URL)
168
- if self.config.name == "trex":
169
- relations_path = dl_manager.download(_RELATIONS_URL)
170
- return [
171
- datasets.SplitGenerator(
172
- name=datasets.Split.TRAIN,
173
- gen_kwargs={
174
- "filepaths": ["TREx/*"],
175
- "files": dl_manager.iter_archive(archive),
176
- "relations_path": relations_path,
177
- },
178
- ),
179
- ]
180
- elif self.config.name == "google_re":
181
- return [
182
- datasets.SplitGenerator(
183
- name=datasets.Split.TRAIN,
184
- gen_kwargs={
185
- "filepaths": [
186
- "Google_RE/date_of_birth_test.jsonl",
187
- "Google_RE/place_of_birth_test.jsonl",
188
- "Google_RE/place_of_death_test.jsonl",
189
- ],
190
- "files": dl_manager.iter_archive(archive),
191
- },
192
- ),
193
- ]
194
- elif self.config.name == "conceptnet":
195
- return [
196
- datasets.SplitGenerator(
197
- name=datasets.Split.TRAIN,
198
- gen_kwargs={
199
- "filepaths": ["ConceptNet/test.jsonl"],
200
- "files": dl_manager.iter_archive(archive),
201
- },
202
- ),
203
- ]
204
- elif self.config.name == "squad":
205
- return [
206
- datasets.SplitGenerator(
207
- name=datasets.Split.TRAIN,
208
- gen_kwargs={
209
- "filepaths": ["Squad/test.jsonl"],
210
- "files": dl_manager.iter_archive(archive),
211
- },
212
- ),
213
- ]
214
-
215
- def _generate_examples(self, filepaths, files, relations_path=None):
216
- """Yields examples from the LAMA dataset."""
217
- filepaths = list(filepaths)
218
- if self.config.name == "trex":
219
- all_rels = {}
220
- with open(relations_path, encoding="utf-8") as f:
221
- for row in f:
222
- data = json.loads(row)
223
- all_rels[data["relation"]] = data
224
- id_ = -1
225
- inside_trec_directory = False
226
- for path, f in files:
227
- if any(fnmatch(path, pattern) for pattern in filepaths):
228
- inside_trec_directory = True
229
- for row in f:
230
- data = json.loads(row)
231
- pred = all_rels.get(data["predicate_id"], {})
232
- for evidences in data["evidences"]:
233
- id_ += 1
234
- yield id_, {
235
- "uuid": str(data["uuid"]),
236
- "obj_uri": str(data["obj_uri"]),
237
- "obj_label": str(data["obj_label"]),
238
- "sub_uri": str(data["sub_uri"]),
239
- "sub_label": str(data["sub_label"]),
240
- "predicate_id": str(data["predicate_id"]),
241
- "sub_surface": str(evidences["sub_surface"]),
242
- "obj_surface": str(evidences["obj_surface"]),
243
- "masked_sentence": str(evidences["masked_sentence"]),
244
- "template": str(pred.get("template", "")),
245
- "template_negated": str(pred.get("template_negated", "")),
246
- "label": str(pred.get("label", "")),
247
- "description": str(pred.get("description", "")),
248
- "type": str(pred.get("type", "")),
249
- }
250
- elif inside_trec_directory:
251
- break
252
- elif self.config.name == "conceptnet":
253
- id_ = -1
254
- for path, f in files:
255
- if not filepaths:
256
- break
257
- if path in list(filepaths):
258
- for row in f:
259
- data = json.loads(row)
260
- if data.get("negated") is not None:
261
- for masked_sentence, negated in zip(data["masked_sentences"], data["negated"]):
262
- id_ += 1
263
- yield id_, {
264
- "uuid": str(data["uuid"]),
265
- "sub": str(data.get("sub", "")),
266
- "obj": str(data.get("obj", "")),
267
- "pred": str(data["pred"]),
268
- "obj_label": str(data["obj_label"]),
269
- "masked_sentence": str(masked_sentence),
270
- "negated": str(negated),
271
- }
272
- else:
273
- for masked_sentence in data["masked_sentences"]:
274
- id_ += 1
275
- yield id_, {
276
- "uuid": str(data["uuid"]),
277
- "sub": str(data.get("sub", "")),
278
- "obj": str(data.get("obj", "")),
279
- "pred": str(data["pred"]),
280
- "obj_label": str(data["obj_label"]),
281
- "masked_sentence": str(masked_sentence),
282
- "negated": str(""),
283
- }
284
- filepaths.remove(path)
285
- elif self.config.name == "squad":
286
- id_ = -1
287
- for path, f in files:
288
- if not filepaths:
289
- break
290
- if path in filepaths:
291
- for row in f:
292
- data = json.loads(row)
293
- for masked_sentence in data["masked_sentences"]:
294
- id_ += 1
295
- yield id_, {
296
- "id": str(data["id"]),
297
- "sub_label": str(data["sub_label"]),
298
- "obj_label": str(data["obj_label"]),
299
- "negated": str(data.get("negated", "")),
300
- "masked_sentence": str(masked_sentence),
301
- }
302
- filepaths.remove(path)
303
- elif self.config.name == "google_re":
304
- id_ = -1
305
- for path, f in files:
306
- if path in filepaths:
307
- if not filepaths:
308
- break
309
- if path in filepaths:
310
- # from https://github.com/facebookresearch/LAMA/blob/master/scripts/run_experiments.py
311
- if "place_of_birth" in path:
312
- pred = {
313
- "relation": "place_of_birth",
314
- "template": "[X] was born in [Y] .",
315
- "template_negated": "[X] was not born in [Y] .",
316
- }
317
- elif "date_of_birth" in path:
318
- pred = {
319
- "relation": "date_of_birth",
320
- "template": "[X] (born [Y]).",
321
- "template_negated": "[X] (not born [Y]).",
322
- }
323
- else:
324
- pred = {
325
- "relation": "place_of_death",
326
- "template": "[X] died in [Y] .",
327
- "template_negated": "[X] did not die in [Y] .",
328
- }
329
- for row in f:
330
- data = json.loads(row)
331
- for masked_sentence in data["masked_sentences"]:
332
- id_ += 1
333
- yield id_, {
334
- "pred": str(data["pred"]),
335
- "sub": str(data["sub"]),
336
- "obj": str(data["obj"]),
337
- "evidences": str(data["evidences"]),
338
- "judgments": str(data["judgments"]),
339
- "sub_w": str(data["sub_w"]),
340
- "sub_label": str(data["sub_label"]),
341
- "sub_aliases": str(data["sub_aliases"]),
342
- "obj_w": str(data["obj_w"]),
343
- "obj_label": str(data["obj_label"]),
344
- "obj_aliases": str(data["obj_aliases"]),
345
- "uuid": str(data["uuid"]),
346
- "masked_sentence": str(masked_sentence),
347
- "template": str(pred["template"]),
348
- "template_negated": str(pred["template_negated"]),
349
- }
350
- filepaths.remove(path)
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """The LAMA Dataset"""
16
+
17
+
18
+ import json
19
+ from fnmatch import fnmatch
20
+
21
+ import datasets
22
+
23
+
24
+ _CITATION = """@inproceedings{petroni2019language,
25
+ title={Language Models as Knowledge Bases?},
26
+ author={F. Petroni, T. Rockt{\"{a}}schel, A. H. Miller, P. Lewis, A. Bakhtin, Y. Wu and S. Riedel},
27
+ booktitle={In: Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing (EMNLP), 2019},
28
+ year={2019}
29
+ }
30
+ @inproceedings{petroni2020how,
31
+ title={How Context Affects Language Models' Factual Predictions},
32
+ author={Fabio Petroni and Patrick Lewis and Aleksandra Piktus and Tim Rockt{\"a}schel and Yuxiang Wu and Alexander H. Miller and Sebastian Riedel},
33
+ booktitle={Automated Knowledge Base Construction},
34
+ year={2020},
35
+ url={https://openreview.net/forum?id=025X0zPfn}
36
+ }
37
+ """
38
+
39
+
40
+ _DESCRIPTION = """LAMA is a dataset used to probe and analyze the factual and commonsense knowledge contained in pretrained language models. See https://github.com/facebookresearch/LAMA.
41
+ """
42
+
43
+ _HOMEPAGE = "https://github.com/facebookresearch/LAMA"
44
+
45
+ _LICENSE = "The Creative Commons Attribution-Noncommercial 4.0 International License. see https://github.com/facebookresearch/LAMA/blob/master/LICENSE"
46
+
47
+ _RELATIONS_URL = "https://s3.amazonaws.com/datasets.huggingface.co/lama/relations.jsonl"
48
+
49
+ _DATA_URL = "https://dl.fbaipublicfiles.com/LAMA/negated_data.tar.gz"
50
+
51
+
52
+ class Lama(datasets.GeneratorBasedBuilder):
53
+ """Lama Dataset"""
54
+
55
+ VERSION = datasets.Version("1.1.0")
56
+
57
+ BUILDER_CONFIGS = [
58
+ datasets.BuilderConfig(name="trex", version=VERSION, description="The TRex part of the Lama dataset"),
59
+ datasets.BuilderConfig(name="squad", version=VERSION, description="The Squad part of the Lama dataset"),
60
+ datasets.BuilderConfig(
61
+ name="google_re", version=VERSION, description="The Google_re part of the Lama dataset"
62
+ ),
63
+ datasets.BuilderConfig(
64
+ name="conceptnet", version=VERSION, description="The Conceptnet part of the Lama dataset"
65
+ ),
66
+ ]
67
+
68
+ DEFAULT_CONFIG_NAME = "trex"
69
+
70
+ def _info(self):
71
+ if self.config.name == "trex":
72
+ features = datasets.Features(
73
+ {
74
+ "uuid": datasets.Value("string"),
75
+ "obj_uri": datasets.Value("string"),
76
+ "obj_label": datasets.Value("string"),
77
+ "sub_uri": datasets.Value("string"),
78
+ "sub_label": datasets.Value("string"),
79
+ "predicate_id": datasets.Value("string"),
80
+ "sub_surface": datasets.Value("string"),
81
+ "obj_surface": datasets.Value("string"),
82
+ "masked_sentence": datasets.Value("string"),
83
+ "template": datasets.Value("string"),
84
+ "template_negated": datasets.Value("string"),
85
+ "label": datasets.Value("string"),
86
+ "description": datasets.Value("string"),
87
+ "type": datasets.Value("string"),
88
+ }
89
+ )
90
+ return datasets.DatasetInfo(
91
+ description=_DESCRIPTION,
92
+ features=features,
93
+ supervised_keys=None,
94
+ homepage=_HOMEPAGE,
95
+ license=_LICENSE,
96
+ citation=_CITATION,
97
+ )
98
+ elif self.config.name == "conceptnet":
99
+ features = datasets.Features(
100
+ {
101
+ "uuid": datasets.Value("string"),
102
+ "sub": datasets.Value("string"),
103
+ "obj": datasets.Value("string"),
104
+ "pred": datasets.Value("string"),
105
+ "obj_label": datasets.Value("string"),
106
+ "masked_sentence": datasets.Value("string"),
107
+ "negated": datasets.Value("string"),
108
+ }
109
+ )
110
+ return datasets.DatasetInfo(
111
+ description=_DESCRIPTION,
112
+ features=features,
113
+ supervised_keys=None,
114
+ homepage=_HOMEPAGE,
115
+ license=_LICENSE,
116
+ citation=_CITATION,
117
+ )
118
+ elif self.config.name == "squad":
119
+ features = datasets.Features(
120
+ {
121
+ "id": datasets.Value("string"),
122
+ "sub_label": datasets.Value("string"),
123
+ "obj_label": datasets.Value("string"),
124
+ "negated": datasets.Value("string"),
125
+ "masked_sentence": datasets.Value("string"),
126
+ }
127
+ )
128
+ return datasets.DatasetInfo(
129
+ description=_DESCRIPTION,
130
+ features=features,
131
+ supervised_keys=None,
132
+ homepage=_HOMEPAGE,
133
+ license=_LICENSE,
134
+ citation=_CITATION,
135
+ )
136
+ elif self.config.name == "google_re":
137
+ features = datasets.Features(
138
+ {
139
+ "pred": datasets.Value("string"),
140
+ "sub": datasets.Value("string"),
141
+ "obj": datasets.Value("string"),
142
+ "evidences": datasets.Value("string"),
143
+ "judgments": datasets.Value("string"),
144
+ "sub_w": datasets.Value("string"),
145
+ "sub_label": datasets.Value("string"),
146
+ "sub_aliases": datasets.Value("string"),
147
+ "obj_w": datasets.Value("string"),
148
+ "obj_label": datasets.Value("string"),
149
+ "obj_aliases": datasets.Value("string"),
150
+ "uuid": datasets.Value("string"),
151
+ "masked_sentence": datasets.Value("string"),
152
+ "template": datasets.Value("string"),
153
+ "template_negated": datasets.Value("string"),
154
+ }
155
+ )
156
+ return datasets.DatasetInfo(
157
+ description=_DESCRIPTION,
158
+ features=features,
159
+ supervised_keys=None,
160
+ homepage=_HOMEPAGE,
161
+ license=_LICENSE,
162
+ citation=_CITATION,
163
+ )
164
+
165
+ def _split_generators(self, dl_manager):
166
+ """Returns SplitGenerators."""
167
+ archive = dl_manager.download(_DATA_URL)
168
+ if self.config.name == "trex":
169
+ relations_path = dl_manager.download(_RELATIONS_URL)
170
+ return [
171
+ datasets.SplitGenerator(
172
+ name=datasets.Split.TRAIN,
173
+ gen_kwargs={
174
+ "filepaths": ["TREx/*"],
175
+ "files": dl_manager.iter_archive(archive),
176
+ "relations_path": relations_path,
177
+ },
178
+ ),
179
+ ]
180
+ elif self.config.name == "google_re":
181
+ return [
182
+ datasets.SplitGenerator(
183
+ name=datasets.Split.TRAIN,
184
+ gen_kwargs={
185
+ "filepaths": [
186
+ "Google_RE/date_of_birth_test.jsonl",
187
+ "Google_RE/place_of_birth_test.jsonl",
188
+ "Google_RE/place_of_death_test.jsonl",
189
+ ],
190
+ "files": dl_manager.iter_archive(archive),
191
+ },
192
+ ),
193
+ ]
194
+ elif self.config.name == "conceptnet":
195
+ return [
196
+ datasets.SplitGenerator(
197
+ name=datasets.Split.TRAIN,
198
+ gen_kwargs={
199
+ "filepaths": ["ConceptNet/test.jsonl"],
200
+ "files": dl_manager.iter_archive(archive),
201
+ },
202
+ ),
203
+ ]
204
+ elif self.config.name == "squad":
205
+ return [
206
+ datasets.SplitGenerator(
207
+ name=datasets.Split.TRAIN,
208
+ gen_kwargs={
209
+ "filepaths": ["Squad/test.jsonl"],
210
+ "files": dl_manager.iter_archive(archive),
211
+ },
212
+ ),
213
+ ]
214
+
215
+ def _generate_examples(self, filepaths, files, relations_path=None):
216
+ """Yields examples from the LAMA dataset."""
217
+ filepaths = list(filepaths)
218
+ if self.config.name == "trex":
219
+ all_rels = {}
220
+ with open(relations_path, encoding="utf-8") as f:
221
+ for row in f:
222
+ data = json.loads(row)
223
+ all_rels[data["relation"]] = data
224
+ id_ = -1
225
+ inside_trec_directory = False
226
+ for path, f in files:
227
+ if any(fnmatch(path, pattern) for pattern in filepaths):
228
+ inside_trec_directory = True
229
+ for row in f:
230
+ data = json.loads(row)
231
+ pred = all_rels.get(data["predicate_id"], {})
232
+ for evidences in data["evidences"]:
233
+ id_ += 1
234
+ yield id_, {
235
+ "uuid": str(data["uuid"]),
236
+ "obj_uri": str(data["obj_uri"]),
237
+ "obj_label": str(data["obj_label"]),
238
+ "sub_uri": str(data["sub_uri"]),
239
+ "sub_label": str(data["sub_label"]),
240
+ "predicate_id": str(data["predicate_id"]),
241
+ "sub_surface": str(evidences["sub_surface"]),
242
+ "obj_surface": str(evidences["obj_surface"]),
243
+ "masked_sentence": str(evidences["masked_sentence"]),
244
+ "template": str(pred.get("template", "")),
245
+ "template_negated": str(pred.get("template_negated", "")),
246
+ "label": str(pred.get("label", "")),
247
+ "description": str(pred.get("description", "")),
248
+ "type": str(pred.get("type", "")),
249
+ }
250
+ elif inside_trec_directory:
251
+ break
252
+ elif self.config.name == "conceptnet":
253
+ id_ = -1
254
+ for path, f in files:
255
+ if not filepaths:
256
+ break
257
+ if path in list(filepaths):
258
+ for row in f:
259
+ data = json.loads(row)
260
+ if data.get("negated") is not None:
261
+ for masked_sentence, negated in zip(data["masked_sentences"], data["negated"]):
262
+ id_ += 1
263
+ yield id_, {
264
+ "uuid": str(data["uuid"]),
265
+ "sub": str(data.get("sub", "")),
266
+ "obj": str(data.get("obj", "")),
267
+ "pred": str(data["pred"]),
268
+ "obj_label": str(data["obj_label"]),
269
+ "masked_sentence": str(masked_sentence),
270
+ "negated": str(negated),
271
+ }
272
+ else:
273
+ for masked_sentence in data["masked_sentences"]:
274
+ id_ += 1
275
+ yield id_, {
276
+ "uuid": str(data["uuid"]),
277
+ "sub": str(data.get("sub", "")),
278
+ "obj": str(data.get("obj", "")),
279
+ "pred": str(data["pred"]),
280
+ "obj_label": str(data["obj_label"]),
281
+ "masked_sentence": str(masked_sentence),
282
+ "negated": str(""),
283
+ }
284
+ filepaths.remove(path)
285
+ elif self.config.name == "squad":
286
+ id_ = -1
287
+ for path, f in files:
288
+ if not filepaths:
289
+ break
290
+ if path in filepaths:
291
+ for row in f:
292
+ data = json.loads(row)
293
+ for masked_sentence in data["masked_sentences"]:
294
+ id_ += 1
295
+ yield id_, {
296
+ "id": str(data["id"]),
297
+ "sub_label": str(data["sub_label"]),
298
+ "obj_label": str(data["obj_label"]),
299
+ "negated": str(data.get("negated", "")),
300
+ "masked_sentence": str(masked_sentence),
301
+ }
302
+ filepaths.remove(path)
303
+ elif self.config.name == "google_re":
304
+ id_ = -1
305
+ for path, f in files:
306
+ if path in filepaths:
307
+ if not filepaths:
308
+ break
309
+ if path in filepaths:
310
+ # from https://github.com/facebookresearch/LAMA/blob/master/scripts/run_experiments.py
311
+ if "place_of_birth" in path:
312
+ pred = {
313
+ "relation": "place_of_birth",
314
+ "template": "[X] was born in [Y] .",
315
+ "template_negated": "[X] was not born in [Y] .",
316
+ }
317
+ elif "date_of_birth" in path:
318
+ pred = {
319
+ "relation": "date_of_birth",
320
+ "template": "[X] (born [Y]).",
321
+ "template_negated": "[X] (not born [Y]).",
322
+ }
323
+ else:
324
+ pred = {
325
+ "relation": "place_of_death",
326
+ "template": "[X] died in [Y] .",
327
+ "template_negated": "[X] did not die in [Y] .",
328
+ }
329
+ for row in f:
330
+ data = json.loads(row)
331
+ for masked_sentence in data["masked_sentences"]:
332
+ id_ += 1
333
+ yield id_, {
334
+ "pred": str(data["pred"]),
335
+ "sub": str(data["sub"]),
336
+ "obj": str(data["obj"]),
337
+ "evidences": str(data["evidences"]),
338
+ "judgments": str(data["judgments"]),
339
+ "sub_w": str(data["sub_w"]),
340
+ "sub_label": str(data["sub_label"]),
341
+ "sub_aliases": str(data["sub_aliases"]),
342
+ "obj_w": str(data["obj_w"]),
343
+ "obj_label": str(data["obj_label"]),
344
+ "obj_aliases": str(data["obj_aliases"]),
345
+ "uuid": str(data["uuid"]),
346
+ "masked_sentence": str(masked_sentence),
347
+ "template": str(pred["template"]),
348
+ "template_negated": str(pred["template_negated"]),
349
+ }
350
+ filepaths.remove(path)