Datasets:

Languages:
English
ArXiv:
License:
gabrielaltay commited on
Commit
276d3b7
1 Parent(s): 174bebf

upload biored/biored.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. biored.py +78 -47
biored.py CHANGED
@@ -21,6 +21,7 @@ on a set of 600 PubMed articles
21
 
22
  import itertools
23
  import os
 
24
  from typing import Dict, List, Tuple
25
 
26
  import datasets
@@ -30,7 +31,7 @@ from .bigbiohub import kb_features
30
  from .bigbiohub import BigBioConfig
31
  from .bigbiohub import Tasks
32
 
33
- _LANGUAGES = ['English']
34
  _PUBMED = True
35
  _LOCAL = False
36
  _CITATION = """\
@@ -65,13 +66,13 @@ on a set of 600 PubMed articles
65
 
66
  _HOMEPAGE = "https://ftp.ncbi.nlm.nih.gov/pub/lu/BioRED/"
67
 
68
- _LICENSE = 'License information unavailable'
69
 
70
  _URLS = {
71
  _DATASETNAME: "https://ftp.ncbi.nlm.nih.gov/pub/lu/BioRED/BIORED.zip",
72
  }
73
 
74
- _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION, Tasks.RELATION_EXTRACTION]
75
 
76
  _SOURCE_VERSION = "1.0.0"
77
 
@@ -81,7 +82,8 @@ logger = datasets.utils.logging.get_logger(__name__)
81
 
82
 
83
  class BioredDataset(datasets.GeneratorBasedBuilder):
84
- """Relation Extraction corpus with multiple entity types (e.g., gene/protein, disease, chemical) and relation pairs (e.g., gene-disease; chemical-chemical), on a set of 600 PubMed articles"""
 
85
 
86
  # For bigbio_kb, this dataset uses a naming convention as
87
  # uid_[title/abstract/relation/entity_id]_[entity/relation_uid]
@@ -108,6 +110,15 @@ class BioredDataset(datasets.GeneratorBasedBuilder):
108
 
109
  DEFAULT_CONFIG_NAME = _DATASETNAME + "_source"
110
 
 
 
 
 
 
 
 
 
 
111
  def _info(self) -> datasets.DatasetInfo:
112
 
113
  if self.config.schema == "source":
@@ -127,9 +138,7 @@ class BioredDataset(datasets.GeneratorBasedBuilder):
127
  "text": datasets.Sequence(datasets.Value("string")),
128
  "offsets": datasets.Sequence([datasets.Value("int32")]),
129
  "concept_id": datasets.Value("string"),
130
- "semantic_type_id": datasets.Sequence(
131
- datasets.Value("string")
132
- ),
133
  }
134
  ],
135
  "relations": [
@@ -198,51 +207,77 @@ class BioredDataset(datasets.GeneratorBasedBuilder):
198
  with open(filepath, "r", encoding="utf8") as fstream:
199
  uid = itertools.count(0)
200
  for raw_document in self.generate_raw_docs(fstream):
201
- entities_in_doc = dict()
202
  document = self.parse_raw_doc(raw_document)
203
- pmid = document.pop("pmid")
204
  document["id"] = str(next(uid))
205
  document["document_id"] = pmid
206
- entities_ = []
207
- relations_ = []
208
- for entity in document["entities"]:
209
- temp_id = document["id"] + "_" + str(entity["concept_id"])
210
- curr_entity_count = entities_in_doc.get(temp_id, 0)
211
- entities_.append(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
212
  {
213
- "id": temp_id + "_" + str(curr_entity_count),
214
- "type": entity["semantic_type_id"],
215
  "text": entity["text"],
216
- "normalized": [],
217
  "offsets": entity["offsets"],
218
  }
219
  )
220
- entities_in_doc[temp_id] = curr_entity_count + 1
 
 
 
 
 
221
  rel_uid = itertools.count(0)
222
  for relation in document["relations"]:
223
- relations_.append(
224
- {
225
- "id": document["id"]
226
- + "_relation_"
227
- + str(next(rel_uid)),
228
- "type": relation["type"],
229
- "arg1_id": document["id"]
230
- + "_"
231
- + str(relation["concept_1"])
232
- + "_0",
233
- "arg2_id": document["id"]
234
- + "_"
235
- + str(relation["concept_2"])
236
- + "_0",
237
- "normalized": [],
238
- }
239
- )
240
  for passage in document["passages"]:
241
  passage["id"] = document["id"] + "_" + passage["type"]
242
- document["entities"] = entities_
243
- document["relations"] = relations_
 
244
  document["events"] = []
245
  document["coreferences"] = []
 
246
  yield document["document_id"], document
247
 
248
  def generate_raw_docs(self, fstream):
@@ -301,20 +336,16 @@ class BioredDataset(datasets.GeneratorBasedBuilder):
301
  # Entities handled here
302
  start_idx = _type_ind
303
  end_idx, mention, semantic_type_id, entity_ids = rest
304
- entity = [
305
  {
306
  "offsets": [[int(start_idx), int(end_idx)]],
307
  "text": [mention],
308
- "semantic_type_id": semantic_type_id.split(","),
309
- "concept_id": entity_id,
310
  }
311
- for entity_id in entity_ids.split(",")
312
- ]
313
- entities.extend(entity)
314
- else:
315
- logger.warn(
316
- f"Skipping annotation in Document ID: {_pmid}. Unexpected format"
317
  )
 
 
318
  return {
319
  "pmid": pmid,
320
  "passages": passages,
 
21
 
22
  import itertools
23
  import os
24
+ from collections import defaultdict
25
  from typing import Dict, List, Tuple
26
 
27
  import datasets
 
31
  from .bigbiohub import BigBioConfig
32
  from .bigbiohub import Tasks
33
 
34
+ _LANGUAGES = ["English"]
35
  _PUBMED = True
36
  _LOCAL = False
37
  _CITATION = """\
 
66
 
67
  _HOMEPAGE = "https://ftp.ncbi.nlm.nih.gov/pub/lu/BioRED/"
68
 
69
+ _LICENSE = "License information unavailable"
70
 
71
  _URLS = {
72
  _DATASETNAME: "https://ftp.ncbi.nlm.nih.gov/pub/lu/BioRED/BIORED.zip",
73
  }
74
 
75
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION, Tasks.NAMED_ENTITY_DISAMBIGUATION, Tasks.RELATION_EXTRACTION]
76
 
77
  _SOURCE_VERSION = "1.0.0"
78
 
 
82
 
83
 
84
  class BioredDataset(datasets.GeneratorBasedBuilder):
85
+ """Relation Extraction corpus with multiple entity types (e.g., gene/protein, disease, chemical) and
86
+ relation pairs (e.g., gene-disease; chemical-chemical), on a set of 600 PubMed articles"""
87
 
88
  # For bigbio_kb, this dataset uses a naming convention as
89
  # uid_[title/abstract/relation/entity_id]_[entity/relation_uid]
 
110
 
111
  DEFAULT_CONFIG_NAME = _DATASETNAME + "_source"
112
 
113
+ TYPE_TO_DATABASE = {
114
+ "CellLine": "Cellosaurus",
115
+ "ChemicalEntity": "MESH",
116
+ "DiseaseOrPhenotypicFeature": "MESH", # Some diseases are normalized to OMIM (~ handled by special rules)
117
+ "GeneOrGeneProduct": "NCBIGene",
118
+ "OrganismTaxon": "NCBITaxon",
119
+ "SequenceVariant": "dbSNP", # Not all variants are normalized to dbSNP (~ handled by special rules)
120
+ }
121
+
122
  def _info(self) -> datasets.DatasetInfo:
123
 
124
  if self.config.schema == "source":
 
138
  "text": datasets.Sequence(datasets.Value("string")),
139
  "offsets": datasets.Sequence([datasets.Value("int32")]),
140
  "concept_id": datasets.Value("string"),
141
+ "semantic_type_id": datasets.Value("string"),
 
 
142
  }
143
  ],
144
  "relations": [
 
207
  with open(filepath, "r", encoding="utf8") as fstream:
208
  uid = itertools.count(0)
209
  for raw_document in self.generate_raw_docs(fstream):
 
210
  document = self.parse_raw_doc(raw_document)
211
+ pmid = str(document.pop("pmid"))
212
  document["id"] = str(next(uid))
213
  document["document_id"] = pmid
214
+
215
+ # Parse entities
216
+ entities = []
217
+ entity_id_to_mentions = defaultdict(list) # Maps database ids to mention ids
218
+ for i, entity in enumerate(document["entities"]):
219
+ internal_id = pmid + "_" + str(i)
220
+
221
+ # Some entities are normalized to multiple database ids, therefore we
222
+ # may have multiple identifiers per mention
223
+ normalized_entity_ids = []
224
+ for database_id in entity["concept_id"].split(","):
225
+ database_id = database_id.strip()
226
+ entity_type = entity["semantic_type_id"]
227
+
228
+ # First check special db_name and database id assignment rules
229
+ if entity_type == "DiseaseOrPhenotypicFeature" and database_id.lower().startswith("omim"):
230
+ db_name = "OMIM"
231
+ database_id = database_id.split(":")[-1]
232
+ elif entity_type == "SequenceVariant" and not database_id.startswith("rs"):
233
+ db_name = "custom"
234
+
235
+ # If no special rule applies -> just take the default db_name for the entity type
236
+ else:
237
+ db_name = self.TYPE_TO_DATABASE[entity_type]
238
+
239
+ normalized_entity_ids.append({"db_name": db_name, "db_id": database_id})
240
+ entity_id_to_mentions[database_id].append(internal_id)
241
+
242
+ entities.append(
243
  {
244
+ "id": internal_id,
245
+ "type": entity_type,
246
  "text": entity["text"],
247
+ "normalized": normalized_entity_ids,
248
  "offsets": entity["offsets"],
249
  }
250
  )
251
+
252
+ # BioRed provides abstract-level annotations for entity-linked relation pairs rather than
253
+ # materializing links between all surface form mentions of relation. For example document 11009181
254
+ # in train has (Positive_Correlation, D007980, D004409). Analogous to BC5CDR we enumerate all
255
+ # mention pairs concerning the entities in the triple.
256
+ relations = []
257
  rel_uid = itertools.count(0)
258
  for relation in document["relations"]:
259
+ head_mentions = entity_id_to_mentions[str(relation["concept_1"])]
260
+ tail_mentions = entity_id_to_mentions[str(relation["concept_2"])]
261
+
262
+ for head, tail in itertools.product(head_mentions, tail_mentions):
263
+ relations.append(
264
+ {
265
+ "id": document["id"] + "_relation_" + str(next(rel_uid)),
266
+ "type": relation["type"],
267
+ "arg1_id": head,
268
+ "arg2_id": tail,
269
+ "normalized": [],
270
+ }
271
+ )
272
+
 
 
 
273
  for passage in document["passages"]:
274
  passage["id"] = document["id"] + "_" + passage["type"]
275
+
276
+ document["entities"] = entities
277
+ document["relations"] = relations
278
  document["events"] = []
279
  document["coreferences"] = []
280
+
281
  yield document["document_id"], document
282
 
283
  def generate_raw_docs(self, fstream):
 
336
  # Entities handled here
337
  start_idx = _type_ind
338
  end_idx, mention, semantic_type_id, entity_ids = rest
339
+ entities.append(
340
  {
341
  "offsets": [[int(start_idx), int(end_idx)]],
342
  "text": [mention],
343
+ "semantic_type_id": semantic_type_id,
344
+ "concept_id": entity_ids,
345
  }
 
 
 
 
 
 
346
  )
347
+ else:
348
+ logger.warn(f"Skipping annotation in Document ID: {_pmid}. Unexpected format")
349
  return {
350
  "pmid": pmid,
351
  "passages": passages,