Datasets:
EMBO
/

Languages:
English
ArXiv:
DOI:
License:
drAbreu davidkartchner commited on
Commit
4ed364d
1 Parent(s): e3797e4

Add dataloader for full SourceData (including entity links) (#4)

Browse files

- Add dataloader for full SourceData (including entity links) (0367281d98d9e4c81a2bb98caf2f621fb4f6bbc0)


Co-authored-by: David Kartchner <davidkartchner@users.noreply.huggingface.co>

Files changed (1) hide show
  1. SourceData.py +217 -46
SourceData.py CHANGED
@@ -19,10 +19,12 @@
19
  from __future__ import absolute_import, division, print_function
20
 
21
  import json
 
22
  import datasets
23
 
24
  _BASE_URL = "https://huggingface.co/datasets/EMBO/SourceData/resolve/main/"
25
 
 
26
  class SourceData(datasets.GeneratorBasedBuilder):
27
  """SourceDataNLP provides datasets to train NLP tasks in cell and molecular biology."""
28
 
@@ -45,19 +47,26 @@ class SourceData(datasets.GeneratorBasedBuilder):
45
  "B-DISEASE",
46
  "I-DISEASE",
47
  "B-CELL_LINE",
48
- "I-CELL_LINE"
 
 
 
 
 
 
 
49
  ]
50
- _SEMANTIC_ROLES = ["O", "B-CONTROLLED_VAR", "I-CONTROLLED_VAR", "B-MEASURED_VAR", "I-MEASURED_VAR"]
51
  _PANEL_START_NAMES = ["O", "B-PANEL_START", "I-PANEL_START"]
52
  _ROLES_MULTI = ["O", "GENEPROD", "SMALL_MOLECULE"]
53
 
54
  _CITATION = """\
55
- @Unpublished{
56
- huggingface: dataset,
57
- title = {SourceData NLP},
58
- authors={Thomas Lemberger & Jorge Abreu-Vicente, EMBO},
59
- year={2023}
60
- }
 
61
  """
62
 
63
  _DESCRIPTION = """\
@@ -70,32 +79,73 @@ class SourceData(datasets.GeneratorBasedBuilder):
70
 
71
  DEFAULT_CONFIG_NAME = "NER"
72
 
73
- _LATEST_VERSION = "1.0.0"
74
 
75
  def _info(self):
76
- VERSION = self.config.version if self.config.version not in ["0.0.0", "latest"] else self._LATEST_VERSION
 
 
 
 
77
  self._URLS = {
78
  "NER": f"{_BASE_URL}token_classification/v_{VERSION}/ner/",
79
  "PANELIZATION": f"{_BASE_URL}token_classification/v_{VERSION}/panelization/",
80
  "ROLES_GP": f"{_BASE_URL}token_classification/v_{VERSION}/roles_gene/",
81
  "ROLES_SM": f"{_BASE_URL}token_classification/v_{VERSION}/roles_small_mol/",
82
  "ROLES_MULTI": f"{_BASE_URL}token_classification/v_{VERSION}/roles_multi/",
 
 
 
 
 
83
  }
84
  self.BUILDER_CONFIGS = [
85
- datasets.BuilderConfig(name="NER", version=VERSION, description="Dataset for named-entity recognition."),
86
- datasets.BuilderConfig(name="PANELIZATION", version=VERSION, description="Dataset to separate figure captions into panels."),
87
- datasets.BuilderConfig(name="ROLES_GP", version=VERSION, description="Dataset for semantic roles of gene products."),
88
- datasets.BuilderConfig(name="ROLES_SM", version=VERSION, description="Dataset for semantic roles of small molecules."),
89
- datasets.BuilderConfig(name="ROLES_MULTI", version=VERSION, description="Dataset to train roles. ROLES_GP and ROLES_SM at once."),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
  ]
91
-
92
  if self.config.name in ["NER", "default"]:
93
  features = datasets.Features(
94
  {
95
  "words": datasets.Sequence(feature=datasets.Value("string")),
96
  "labels": datasets.Sequence(
97
- feature=datasets.ClassLabel(num_classes=len(self._NER_LABEL_NAMES),
98
- names=self._NER_LABEL_NAMES)
 
 
99
  ),
100
  # "is_category": datasets.Sequence(feature=datasets.Value("int8")),
101
  "tag_mask": datasets.Sequence(feature=datasets.Value("int8")),
@@ -109,7 +159,7 @@ class SourceData(datasets.GeneratorBasedBuilder):
109
  "labels": datasets.Sequence(
110
  feature=datasets.ClassLabel(
111
  num_classes=len(self._SEMANTIC_ROLES),
112
- names=self._SEMANTIC_ROLES
113
  )
114
  ),
115
  # "is_category": datasets.Sequence(feature=datasets.Value("int8")),
@@ -124,7 +174,7 @@ class SourceData(datasets.GeneratorBasedBuilder):
124
  "labels": datasets.Sequence(
125
  feature=datasets.ClassLabel(
126
  num_classes=len(self._SEMANTIC_ROLES),
127
- names=self._SEMANTIC_ROLES
128
  )
129
  ),
130
  # "is_category": datasets.Sequence(feature=datasets.Value("int8")),
@@ -139,13 +189,12 @@ class SourceData(datasets.GeneratorBasedBuilder):
139
  "labels": datasets.Sequence(
140
  feature=datasets.ClassLabel(
141
  num_classes=len(self._SEMANTIC_ROLES),
142
- names=self._SEMANTIC_ROLES
143
  )
144
  ),
145
  "is_category": datasets.Sequence(
146
  feature=datasets.ClassLabel(
147
- num_classes=len(self._ROLES_MULTI),
148
- names=self._ROLES_MULTI
149
  )
150
  ),
151
  "tag_mask": datasets.Sequence(feature=datasets.Value("int8")),
@@ -157,13 +206,57 @@ class SourceData(datasets.GeneratorBasedBuilder):
157
  {
158
  "words": datasets.Sequence(feature=datasets.Value("string")),
159
  "labels": datasets.Sequence(
160
- feature=datasets.ClassLabel(num_classes=len(self._PANEL_START_NAMES),
161
- names=self._PANEL_START_NAMES)
 
 
162
  ),
163
  "tag_mask": datasets.Sequence(feature=datasets.Value("int8")),
164
  }
165
  )
166
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167
  return datasets.DatasetInfo(
168
  description=self._DESCRIPTION,
169
  features=features,
@@ -172,38 +265,49 @@ class SourceData(datasets.GeneratorBasedBuilder):
172
  license=self._LICENSE,
173
  citation=self._CITATION,
174
  )
175
-
176
  def _split_generators(self, dl_manager: datasets.DownloadManager):
177
  """Returns SplitGenerators.
178
- Uses local files if a data_dir is specified. Otherwise downloads the files from their official url."""
 
179
 
180
  try:
181
  config_name = self.config.name if self.config.name != "default" else "NER"
182
- urls = [
183
- self._URLS[config_name]+"train.jsonl",
184
- self._URLS[config_name]+"test.jsonl",
185
- self._URLS[config_name]+"validation.jsonl"
186
- ]
187
- data_files = dl_manager.download(urls)
 
 
 
 
 
 
 
 
 
 
 
 
 
188
  except:
189
  raise ValueError(f"unkonwn config name: {self.config.name}")
190
-
191
  return [
192
  datasets.SplitGenerator(
193
  name=datasets.Split.TRAIN,
194
  # These kwargs will be passed to _generate_examples
195
- gen_kwargs={
196
- "filepath": data_files[0]},
197
  ),
198
  datasets.SplitGenerator(
199
  name=datasets.Split.TEST,
200
- gen_kwargs={
201
- "filepath": data_files[1]},
202
  ),
203
  datasets.SplitGenerator(
204
  name=datasets.Split.VALIDATION,
205
- gen_kwargs={
206
- "filepath": data_files[2]},
207
  ),
208
  ]
209
 
@@ -212,40 +316,45 @@ class SourceData(datasets.GeneratorBasedBuilder):
212
  It is in charge of opening the given file and yielding (key, example) tuples from the dataset
213
  The key is not important, it's more here for legacy reason (legacy from tfds)"""
214
 
 
 
 
 
 
215
  with open(filepath, encoding="utf-8") as f:
216
  # logger.info("⏳ Generating examples from = %s", filepath)
217
  for id_, row in enumerate(f):
218
- data = json.loads(row)
219
  if self.config.name in ["NER", "default"]:
220
  yield id_, {
221
  "words": data["words"],
222
  "labels": data["labels"],
223
  "tag_mask": data["is_category"],
224
- "text": data["text"]
225
  }
226
  elif self.config.name == "ROLES_GP":
227
  yield id_, {
228
  "words": data["words"],
229
  "labels": data["labels"],
230
  "tag_mask": data["is_category"],
231
- "text": data["text"]
232
  }
233
  elif self.config.name == "ROLES_MULTI":
234
  labels = data["labels"]
235
- tag_mask = [1 if t!=0 else 0 for t in labels]
236
  yield id_, {
237
  "words": data["words"],
238
  "labels": data["labels"],
239
  "tag_mask": tag_mask,
240
  "is_category": data["is_category"],
241
- "text": data["text"]
242
  }
243
  elif self.config.name == "ROLES_SM":
244
  yield id_, {
245
  "words": data["words"],
246
  "labels": data["labels"],
247
  "tag_mask": data["is_category"],
248
- "text": data["text"]
249
  }
250
  elif self.config.name == "PANELIZATION":
251
  labels = data["labels"]
@@ -256,4 +365,66 @@ class SourceData(datasets.GeneratorBasedBuilder):
256
  "tag_mask": tag_mask,
257
  }
258
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
259
 
 
19
  from __future__ import absolute_import, division, print_function
20
 
21
  import json
22
+ import os
23
  import datasets
24
 
25
  _BASE_URL = "https://huggingface.co/datasets/EMBO/SourceData/resolve/main/"
26
 
27
+
28
  class SourceData(datasets.GeneratorBasedBuilder):
29
  """SourceDataNLP provides datasets to train NLP tasks in cell and molecular biology."""
30
 
 
47
  "B-DISEASE",
48
  "I-DISEASE",
49
  "B-CELL_LINE",
50
+ "I-CELL_LINE",
51
+ ]
52
+ _SEMANTIC_ROLES = [
53
+ "O",
54
+ "B-CONTROLLED_VAR",
55
+ "I-CONTROLLED_VAR",
56
+ "B-MEASURED_VAR",
57
+ "I-MEASURED_VAR",
58
  ]
 
59
  _PANEL_START_NAMES = ["O", "B-PANEL_START", "I-PANEL_START"]
60
  _ROLES_MULTI = ["O", "GENEPROD", "SMALL_MOLECULE"]
61
 
62
  _CITATION = """\
63
+ @article{abreu2023sourcedata,
64
+ title={The SourceData-NLP dataset: integrating curation into scientific publishing
65
+ for training large language models},
66
+ author={Abreu-Vicente, Jorge and Sonntag, Hannah and Eidens, Thomas and Lemberger, Thomas},
67
+ journal={arXiv preprint arXiv:2310.20440},
68
+ year={2023}
69
+ }
70
  """
71
 
72
  _DESCRIPTION = """\
 
79
 
80
  DEFAULT_CONFIG_NAME = "NER"
81
 
82
+ _LATEST_VERSION = "2.0.3" # Should this be updated to 2.0.3
83
 
84
  def _info(self):
85
+ VERSION = (
86
+ self.config.version
87
+ if self.config.version not in ["0.0.0", "latest"]
88
+ else self._LATEST_VERSION
89
+ )
90
  self._URLS = {
91
  "NER": f"{_BASE_URL}token_classification/v_{VERSION}/ner/",
92
  "PANELIZATION": f"{_BASE_URL}token_classification/v_{VERSION}/panelization/",
93
  "ROLES_GP": f"{_BASE_URL}token_classification/v_{VERSION}/roles_gene/",
94
  "ROLES_SM": f"{_BASE_URL}token_classification/v_{VERSION}/roles_small_mol/",
95
  "ROLES_MULTI": f"{_BASE_URL}token_classification/v_{VERSION}/roles_multi/",
96
+ "FULL": os.path.join(
97
+ _BASE_URL,
98
+ "bigbio",
99
+ # f"v_{VERSION}",
100
+ ),
101
  }
102
  self.BUILDER_CONFIGS = [
103
+ datasets.BuilderConfig(
104
+ name="NER",
105
+ version=VERSION,
106
+ description="Dataset for named-entity recognition.",
107
+ ),
108
+ datasets.BuilderConfig(
109
+ name="PANELIZATION",
110
+ version=VERSION,
111
+ description="Dataset to separate figure captions into panels.",
112
+ ),
113
+ datasets.BuilderConfig(
114
+ name="ROLES_GP",
115
+ version=VERSION,
116
+ description="Dataset for semantic roles of gene products.",
117
+ ),
118
+ datasets.BuilderConfig(
119
+ name="ROLES_SM",
120
+ version=VERSION,
121
+ description="Dataset for semantic roles of small molecules.",
122
+ ),
123
+ datasets.BuilderConfig(
124
+ name="ROLES_MULTI",
125
+ version=VERSION,
126
+ description="Dataset to train roles. ROLES_GP and ROLES_SM at once.",
127
+ ),
128
+ datasets.BuilderConfig(
129
+ name="FULL",
130
+ version=VERSION,
131
+ description="Full dataset including all NER + entity linking annotations, links to figure images, etc.",
132
+ ),
133
+ # datasets.BuilderConfig(
134
+ # name="BIGBIO_KB",
135
+ # version=VERSION,
136
+ # description="Full dataset formatted according to BigBio KB schema (see https://huggingface.co/bigbio). Includes all NER + entity linking annotations.",
137
+ # ),
138
  ]
139
+
140
  if self.config.name in ["NER", "default"]:
141
  features = datasets.Features(
142
  {
143
  "words": datasets.Sequence(feature=datasets.Value("string")),
144
  "labels": datasets.Sequence(
145
+ feature=datasets.ClassLabel(
146
+ num_classes=len(self._NER_LABEL_NAMES),
147
+ names=self._NER_LABEL_NAMES,
148
+ )
149
  ),
150
  # "is_category": datasets.Sequence(feature=datasets.Value("int8")),
151
  "tag_mask": datasets.Sequence(feature=datasets.Value("int8")),
 
159
  "labels": datasets.Sequence(
160
  feature=datasets.ClassLabel(
161
  num_classes=len(self._SEMANTIC_ROLES),
162
+ names=self._SEMANTIC_ROLES,
163
  )
164
  ),
165
  # "is_category": datasets.Sequence(feature=datasets.Value("int8")),
 
174
  "labels": datasets.Sequence(
175
  feature=datasets.ClassLabel(
176
  num_classes=len(self._SEMANTIC_ROLES),
177
+ names=self._SEMANTIC_ROLES,
178
  )
179
  ),
180
  # "is_category": datasets.Sequence(feature=datasets.Value("int8")),
 
189
  "labels": datasets.Sequence(
190
  feature=datasets.ClassLabel(
191
  num_classes=len(self._SEMANTIC_ROLES),
192
+ names=self._SEMANTIC_ROLES,
193
  )
194
  ),
195
  "is_category": datasets.Sequence(
196
  feature=datasets.ClassLabel(
197
+ num_classes=len(self._ROLES_MULTI), names=self._ROLES_MULTI
 
198
  )
199
  ),
200
  "tag_mask": datasets.Sequence(feature=datasets.Value("int8")),
 
206
  {
207
  "words": datasets.Sequence(feature=datasets.Value("string")),
208
  "labels": datasets.Sequence(
209
+ feature=datasets.ClassLabel(
210
+ num_classes=len(self._PANEL_START_NAMES),
211
+ names=self._PANEL_START_NAMES,
212
+ )
213
  ),
214
  "tag_mask": datasets.Sequence(feature=datasets.Value("int8")),
215
  }
216
  )
217
 
218
+ elif self.config.name == "FULL":
219
+ features = datasets.Features(
220
+ {
221
+ "doi": datasets.Value("string"),
222
+ "abstract": datasets.Value("string"),
223
+ # "split": datasets.Value("string"),
224
+ "figures": [
225
+ {
226
+ "fig_id": datasets.Value("string"),
227
+ "label": datasets.Value("string"),
228
+ "fig_graphic_url": datasets.Value("string"),
229
+ "panels": [
230
+ {
231
+ "panel_id": datasets.Value("string"),
232
+ "text": datasets.Value("string"),
233
+ "panel_graphic_url": datasets.Value("string"),
234
+ "entities": [
235
+ {
236
+ "annotation_id": datasets.Value("string"),
237
+ "source": datasets.Value("string"),
238
+ "category": datasets.Value("string"),
239
+ "entity_type": datasets.Value("string"),
240
+ "role": datasets.Value("string"),
241
+ "text": datasets.Value("string"),
242
+ "ext_ids": datasets.Value("string"),
243
+ "norm_text": datasets.Value("string"),
244
+ "ext_dbs": datasets.Value("string"),
245
+ "in_caption": datasets.Value("bool"),
246
+ "ext_names": datasets.Value("string"),
247
+ "ext_tax_ids": datasets.Value("string"),
248
+ "ext_tax_names": datasets.Value("string"),
249
+ "ext_urls": datasets.Value("string"),
250
+ "offsets": [datasets.Value("int64")],
251
+ }
252
+ ],
253
+ }
254
+ ],
255
+ }
256
+ ],
257
+ }
258
+ )
259
+
260
  return datasets.DatasetInfo(
261
  description=self._DESCRIPTION,
262
  features=features,
 
265
  license=self._LICENSE,
266
  citation=self._CITATION,
267
  )
268
+
269
  def _split_generators(self, dl_manager: datasets.DownloadManager):
270
  """Returns SplitGenerators.
271
+ Uses local files if a data_dir is specified. Otherwise downloads the files from their official url.
272
+ """
273
 
274
  try:
275
  config_name = self.config.name if self.config.name != "default" else "NER"
276
+
277
+ if config_name == "FULL":
278
+ url = os.path.join(
279
+ self._URLS[config_name],
280
+ # "source_data_full.zip"
281
+ "source_data_json_splits_2.0.2.zip",
282
+ )
283
+ data_dir = dl_manager.download_and_extract(url)
284
+ data_files = [
285
+ os.path.join(data_dir, filename)
286
+ for filename in ["train.jsonl", "test.jsonl", "validation.jsonl"]
287
+ ]
288
+ else:
289
+ urls = [
290
+ os.path.join(self._URLS[config_name], "train.jsonl"),
291
+ os.path.join(self._URLS[config_name], "test.jsonl"),
292
+ os.path.join(self._URLS[config_name], "validation.jsonl"),
293
+ ]
294
+ data_files = dl_manager.download(urls)
295
  except:
296
  raise ValueError(f"unkonwn config name: {self.config.name}")
297
+
298
  return [
299
  datasets.SplitGenerator(
300
  name=datasets.Split.TRAIN,
301
  # These kwargs will be passed to _generate_examples
302
+ gen_kwargs={"filepath": data_files[0]},
 
303
  ),
304
  datasets.SplitGenerator(
305
  name=datasets.Split.TEST,
306
+ gen_kwargs={"filepath": data_files[1]},
 
307
  ),
308
  datasets.SplitGenerator(
309
  name=datasets.Split.VALIDATION,
310
+ gen_kwargs={"filepath": data_files[2]},
 
311
  ),
312
  ]
313
 
 
316
  It is in charge of opening the given file and yielding (key, example) tuples from the dataset
317
  The key is not important, it's more here for legacy reason (legacy from tfds)"""
318
 
319
+ no_panels = 0
320
+ no_entities = 0
321
+ has_panels = 0
322
+ has_entities = 0
323
+
324
  with open(filepath, encoding="utf-8") as f:
325
  # logger.info("⏳ Generating examples from = %s", filepath)
326
  for id_, row in enumerate(f):
327
+ data = json.loads(row.strip())
328
  if self.config.name in ["NER", "default"]:
329
  yield id_, {
330
  "words": data["words"],
331
  "labels": data["labels"],
332
  "tag_mask": data["is_category"],
333
+ "text": data["text"],
334
  }
335
  elif self.config.name == "ROLES_GP":
336
  yield id_, {
337
  "words": data["words"],
338
  "labels": data["labels"],
339
  "tag_mask": data["is_category"],
340
+ "text": data["text"],
341
  }
342
  elif self.config.name == "ROLES_MULTI":
343
  labels = data["labels"]
344
+ tag_mask = [1 if t != 0 else 0 for t in labels]
345
  yield id_, {
346
  "words": data["words"],
347
  "labels": data["labels"],
348
  "tag_mask": tag_mask,
349
  "is_category": data["is_category"],
350
+ "text": data["text"],
351
  }
352
  elif self.config.name == "ROLES_SM":
353
  yield id_, {
354
  "words": data["words"],
355
  "labels": data["labels"],
356
  "tag_mask": data["is_category"],
357
+ "text": data["text"],
358
  }
359
  elif self.config.name == "PANELIZATION":
360
  labels = data["labels"]
 
365
  "tag_mask": tag_mask,
366
  }
367
 
368
+ elif self.config.name == "FULL":
369
+ doc_figs = data["figures"]
370
+ all_figures = []
371
+ for fig in doc_figs:
372
+ all_panels = []
373
+ figure = {
374
+ "fig_id": fig["fig_id"],
375
+ "label": fig["label"],
376
+ "fig_graphic_url": fig["fig_graphic_url"],
377
+ }
378
+
379
+ for p in fig["panels"]:
380
+ panel = {
381
+ "panel_id": p["panel_id"],
382
+ "text": p["text"].strip(),
383
+ "panel_graphic_url": p["panel_graphic_url"],
384
+ "entities": [
385
+ {
386
+ "annotation_id": t["tag_id"],
387
+ "source": t["source"],
388
+ "category": t["category"],
389
+ "entity_type": t["entity_type"],
390
+ "role": t["role"],
391
+ "text": t["text"],
392
+ "ext_ids": t["ext_ids"],
393
+ "norm_text": t["norm_text"],
394
+ "ext_dbs": t["ext_dbs"],
395
+ "in_caption": bool(t["in_caption"]),
396
+ "ext_names": t["ext_names"],
397
+ "ext_tax_ids": t["ext_tax_ids"],
398
+ "ext_tax_names": t["ext_tax_names"],
399
+ "ext_urls": t["ext_urls"],
400
+ "offsets": t["local_offsets"],
401
+ }
402
+ for t in p["tags"]
403
+ ],
404
+ }
405
+ for e in panel["entities"]:
406
+ assert type(e["offsets"]) == list
407
+ if len(panel["entities"]) == 0:
408
+ no_entities += 1
409
+ continue
410
+ else:
411
+ has_entities += 1
412
+ all_panels.append(panel)
413
+
414
+ figure["panels"] = all_panels
415
+
416
+ # Pass on all figures that aren't split into panels
417
+ if len(all_panels) == 0:
418
+ no_panels += 1
419
+ continue
420
+ else:
421
+ has_panels += 1
422
+ all_figures.append(figure)
423
+
424
+ output = {
425
+ "doi": data["doi"],
426
+ "abstract": data["abstract"],
427
+ "figures": all_figures,
428
+ }
429
+ yield id_, output
430