gabrielaltay commited on
Commit
af21eb1
1 Parent(s): 537a276

upload hub_repos/bioid/bioid.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. bioid.py +372 -0
bioid.py ADDED
@@ -0,0 +1,372 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+
17
+ import os
18
+ from typing import Dict, Iterator, List, Tuple
19
+
20
+ import bioc
21
+ import datasets
22
+ import pandas as pd
23
+
24
+ from .bigbiohub import BigBioConfig, Tasks, kb_features
25
+
26
+ _LOCAL = False
27
+ _PUBMED = True
28
+ _LANGUAGES = ["English"]
29
+
30
+ _CITATION = """\
31
+ @inproceedings{arighi2017bio,
32
+ title={Bio-ID track overview},
33
+ author={Arighi, Cecilia and Hirschman, Lynette and Lemberger, Thomas and Bayer, Samuel and Liechti, Robin and Comeau, Donald and Wu, Cathy},
34
+ booktitle={Proc. BioCreative Workshop},
35
+ volume={482},
36
+ pages={376},
37
+ year={2017}
38
+ }
39
+ """
40
+
41
+ _DATASETNAME = "bioid"
42
+ _DISPLAYNAME = "BIOID"
43
+
44
+ _DESCRIPTION = """\
45
+ The Bio-ID track focuses on entity tagging and ID assignment to selected bioentity types.
46
+ The task is to annotate text from figure legends with the entity types and IDs for taxon (organism), gene, protein, miRNA, small molecules,
47
+ cellular components, cell types and cell lines, tissues and organs. The track draws on SourceData annotated figure
48
+ legends (by panel), in BioC format, and the corresponding full text articles (also BioC format) provided for context.
49
+ """
50
+
51
+ _HOMEPAGE = "https://biocreative.bioinformatics.udel.edu/tasks/biocreative-vi/track-1/"
52
+
53
+ _LICENSE = "UNKNOWN"
54
+
55
+ _URLS = {
56
+ _DATASETNAME: "https://biocreative.bioinformatics.udel.edu/media/store/files/2017/BioIDtraining_2.tar.gz",
57
+ }
58
+
59
+ _SUPPORTED_TASKS = [
60
+ Tasks.NAMED_ENTITY_RECOGNITION,
61
+ Tasks.NAMED_ENTITY_DISAMBIGUATION,
62
+ ]
63
+
64
+ _SOURCE_VERSION = "2.0.0"
65
+
66
+ _BIGBIO_VERSION = "1.0.0"
67
+
68
+
69
+ class BioidDataset(datasets.GeneratorBasedBuilder):
70
+ """TODO: Short description of my dataset."""
71
+
72
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
73
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
74
+
75
+ BUILDER_CONFIGS = [
76
+ BigBioConfig(
77
+ name="bioid_source",
78
+ version=SOURCE_VERSION,
79
+ description="bioid source schema",
80
+ schema="source",
81
+ subset_id="bioid",
82
+ ),
83
+ BigBioConfig(
84
+ name="bioid_bigbio_kb",
85
+ version=BIGBIO_VERSION,
86
+ description="bioid BigBio schema",
87
+ schema="bigbio_kb",
88
+ subset_id="bioid",
89
+ ),
90
+ ]
91
+
92
+ DEFAULT_CONFIG_NAME = "bioid_source"
93
+
94
+ ENTITY_TYPES_NOT_NORMALIZED = [
95
+ "cell",
96
+ "gene",
97
+ "molecule",
98
+ "protein",
99
+ "subcellular",
100
+ "tissue",
101
+ "organism",
102
+ ]
103
+
104
+ DB_NAME_TO_ENTITY_TYPE = {
105
+ "BAO": "assay", # https://www.ebi.ac.uk/ols/ontologies/bao
106
+ "CHEBI": "chemical",
107
+ "CL": "cell", # https://www.ebi.ac.uk/ols/ontologies/cl
108
+ "Corum": "protein", # https://mips.helmholtz-muenchen.de/corum/
109
+ "GO": "gene", # https://geneontology.org/
110
+ "PubChem": "chemical",
111
+ "Rfam": "rna", # https://rfam.org/
112
+ "Uberon": "anatomy",
113
+ "Cellosaurus": "cell",
114
+ "NCBI gene": "gene",
115
+ "NCBI taxon": "species",
116
+ "Uniprot": "protein",
117
+ }
118
+
119
+ def _info(self) -> datasets.DatasetInfo:
120
+
121
+ # Create the source schema; this schema will keep all keys/information/labels as close to the original dataset as possible.
122
+ # You can arbitrarily nest lists and dictionaries.
123
+ # For iterables, use lists over tuples or `datasets.Sequence`
124
+ if self.config.schema == "source":
125
+ features = datasets.Features(
126
+ {
127
+ "sourcedata_document": datasets.Value("string"),
128
+ "doi": datasets.Value("string"),
129
+ "pmc_id": datasets.Value("string"),
130
+ "figure": datasets.Value("string"),
131
+ "sourcedata_figure_dir": datasets.Value("string"),
132
+ "passages": [
133
+ {
134
+ "text": datasets.Value("string"),
135
+ "offset": datasets.Value("int32"),
136
+ "annotations": [
137
+ {
138
+ "thomas_article": datasets.Value("string"),
139
+ "doi": datasets.Value("string"),
140
+ "don_article": datasets.Value("int32"),
141
+ "figure": datasets.Value("string"),
142
+ "annot id": datasets.Value("int32"),
143
+ "paper id": datasets.Value("int32"),
144
+ "first left": datasets.Value("int32"),
145
+ "last right": datasets.Value("int32"),
146
+ "length": datasets.Value("int32"),
147
+ "byte length": datasets.Value("int32"),
148
+ "left alphanum": datasets.Value("string"),
149
+ "text": datasets.Value("string"),
150
+ "right alphanum": datasets.Value("string"),
151
+ "obj": datasets.Value("string"),
152
+ "overlap": datasets.Value("string"),
153
+ "identical span": datasets.Value("string"),
154
+ "overlap_label_count": datasets.Value("int32"),
155
+ }
156
+ ],
157
+ }
158
+ ],
159
+ }
160
+ )
161
+
162
+ # Choose the appropriate bigbio schema for your task and copy it here. You can find information on the schemas in the CONTRIBUTING guide.
163
+ # In rare cases you may get a dataset that supports multiple tasks requiring multiple schemas. In that case you can define multiple bigbio configs with a bigbio_[bigbio_schema_name] format.
164
+ # For example bigbio_kb, bigbio_t2t
165
+ elif self.config.schema == "bigbio_kb":
166
+ features = kb_features
167
+
168
+ return datasets.DatasetInfo(
169
+ description=_DESCRIPTION,
170
+ features=features,
171
+ homepage=_HOMEPAGE,
172
+ license=_LICENSE,
173
+ citation=_CITATION,
174
+ )
175
+
176
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
177
+ """Returns SplitGenerators."""
178
+ urls = _URLS[_DATASETNAME]
179
+ data_dir = dl_manager.download_and_extract(urls)
180
+
181
+ # Not all datasets have predefined canonical train/val/test splits.
182
+ # If your dataset has no predefined splits, use datasets.Split.TRAIN for all of the data.
183
+
184
+ return [
185
+ datasets.SplitGenerator(
186
+ name=datasets.Split.TRAIN,
187
+ # Whatever you put in gen_kwargs will be passed to _generate_examples
188
+ gen_kwargs={
189
+ "data_dir": data_dir,
190
+ "split": "train",
191
+ },
192
+ ),
193
+ ]
194
+
195
+ def load_annotations(self, path: str) -> Dict[str, Dict]:
196
+ """
197
+ We load annotations from `annotations.csv`
198
+ becuase the one in the BioC xml files have offsets issues.
199
+ """
200
+
201
+ df = pd.read_csv(path, sep=",")
202
+
203
+ df.fillna(-1, inplace=True)
204
+
205
+ annotations: Dict[str, Dict] = {}
206
+
207
+ for record in df.to_dict("records"):
208
+
209
+ article_id = str(record["don_article"])
210
+
211
+ if article_id not in annotations:
212
+ annotations[article_id] = {}
213
+
214
+ figure = record["figure"]
215
+
216
+ if figure not in annotations:
217
+ annotations[article_id][figure] = []
218
+
219
+ annotations[article_id][figure].append(record)
220
+
221
+ return annotations
222
+
223
+ def load_data(self, data_dir: str) -> List[Dict]:
224
+ """
225
+ Compose text from BioC files with annotations from `annotations.csv`.
226
+ We load annotations from `annotations.csv`
227
+ becuase the one in the BioC xml files have offsets issues.
228
+ """
229
+
230
+ text_dir = os.path.join(data_dir, "BioIDtraining_2", "caption_bioc")
231
+ annotation_file = os.path.join(data_dir, "BioIDtraining_2", "annotations.csv")
232
+
233
+ annotations = self.load_annotations(path=annotation_file)
234
+
235
+ data = []
236
+
237
+ for file_name in os.listdir(text_dir):
238
+
239
+ if file_name.startswith(".") or not file_name.endswith(".xml"):
240
+ continue
241
+
242
+ collection = bioc.load(os.path.join(text_dir, file_name))
243
+
244
+ for document in collection.documents:
245
+
246
+ item = document.infons
247
+
248
+ assert (
249
+ len(document.passages) == 1
250
+ ), "Document contains more than one passage (figure caption). This is not expected!"
251
+
252
+ passage = document.passages[0]
253
+
254
+ article_id = document.infons["pmc_id"]
255
+ figure = document.infons["sourcedata_figure_dir"]
256
+
257
+ try:
258
+ passage.annotations = annotations[article_id][figure]
259
+ except KeyError:
260
+ passage.annotations = []
261
+
262
+ item["passages"] = [
263
+ {
264
+ "text": passage.text,
265
+ "annotations": passage.annotations,
266
+ "offset": passage.offset,
267
+ }
268
+ ]
269
+
270
+ data.append(item)
271
+
272
+ return data
273
+
274
+ def get_entity(self, normalization: str) -> Tuple[str, List[Dict]]:
275
+ """
276
+ Compile normalization information from annotation
277
+ """
278
+
279
+ db_name_ids = normalization.split(":")
280
+
281
+ db_ids = None
282
+
283
+ # ids from cellosaurus do not have db name
284
+ if len(db_name_ids) == 1:
285
+ db_name = "Cellosaurus"
286
+ db_ids = db_name_ids[0].split("|")
287
+ else:
288
+ # quirk
289
+ if db_name_ids[0] == "CVCL_6412|CL":
290
+ db_name = "Cellosaurus"
291
+ db_ids = ["CVCL_6412"]
292
+ else:
293
+ db_name = db_name_ids[0]
294
+ # db_name hints for entity type: skip if does not provide normalization
295
+ if db_name not in self.ENTITY_TYPES_NOT_NORMALIZED:
296
+ # Uberon:UBERON:0001891
297
+ # NCBI gene:9341
298
+ db_id_idx = 2 if db_name == "Uberon" else 1
299
+ db_ids = [i.split(":")[db_id_idx] for i in normalization.split("|")]
300
+
301
+ normalized = (
302
+ [{"db_name": db_name, "db_id": i} for i in db_ids]
303
+ if db_ids is not None
304
+ else []
305
+ )
306
+
307
+ # ideally we should have canonical entity types w/ a dedicated enum like `Tasks`
308
+
309
+ if db_name in self.ENTITY_TYPES_NOT_NORMALIZED:
310
+ entity_type = db_name
311
+ else:
312
+ entity_type = self.DB_NAME_TO_ENTITY_TYPE[db_name]
313
+
314
+ return entity_type, normalized
315
+
316
+ def _generate_examples(
317
+ self, data_dir: str, split: str
318
+ ) -> Iterator[Tuple[int, Dict]]:
319
+ """Yields examples as (key, example) tuples."""
320
+
321
+ data = self.load_data(data_dir=data_dir)
322
+
323
+ if self.config.schema == "source":
324
+ for uid, document in enumerate(data):
325
+ yield uid, document
326
+
327
+ elif self.config.schema == "bigbio_kb":
328
+
329
+ uid = 0 # global unique id
330
+
331
+ for document in data:
332
+
333
+ kb_document = {
334
+ "id": uid,
335
+ "document_id": document["pmc_id"],
336
+ "passages": [],
337
+ "entities": [],
338
+ "relations": [],
339
+ "events": [],
340
+ "coreferences": [],
341
+ }
342
+
343
+ uid += 1
344
+
345
+ for passage in document["passages"]:
346
+ kb_document["passages"].append(
347
+ {
348
+ "id": uid,
349
+ "type": "figure_caption",
350
+ "text": [passage["text"]],
351
+ "offsets": [[0, len(passage["text"])]],
352
+ }
353
+ )
354
+ uid += 1
355
+
356
+ for a in passage["annotations"]:
357
+
358
+ entity_type, normalized = self.get_entity(a["obj"])
359
+
360
+ kb_document["entities"].append(
361
+ {
362
+ "id": uid,
363
+ "text": [a["text"]],
364
+ "type": entity_type,
365
+ "offsets": [[a["first left"], a["last right"]]],
366
+ "normalized": normalized,
367
+ }
368
+ )
369
+
370
+ uid += 1
371
+
372
+ yield uid, kb_document