Datasets:

Languages:
English
Multilinguality:
monolingual
Tags:
License:
gabrielaltay commited on
Commit
a0626f1
1 Parent(s): 9751996

upload hubscripts/osiris_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. osiris.py +328 -0
osiris.py ADDED
@@ -0,0 +1,328 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ import itertools
17
+ import os
18
+ import uuid
19
+ import xml.etree.ElementTree as ET
20
+ from typing import List
21
+
22
+ import datasets
23
+ from numpy import int32
24
+
25
+ from .bigbiohub import kb_features
26
+ from .bigbiohub import BigBioConfig
27
+ from .bigbiohub import Tasks
28
+
29
+ _LANGUAGES = ['English']
30
+ _PUBMED = True
31
+ _LOCAL = False
32
+ _CITATION = """\
33
+ @ARTICLE{Furlong2008,
34
+ author = {Laura I Furlong and Holger Dach and Martin Hofmann-Apitius and Ferran Sanz},
35
+ title = {OSIRISv1.2: a named entity recognition system for sequence variants
36
+ of genes in biomedical literature.},
37
+ journal = {BMC Bioinformatics},
38
+ year = {2008},
39
+ volume = {9},
40
+ pages = {84},
41
+ doi = {10.1186/1471-2105-9-84},
42
+ pii = {1471-2105-9-84},
43
+ pmid = {18251998},
44
+ timestamp = {2013.01.15},
45
+ url = {http://dx.doi.org/10.1186/1471-2105-9-84}
46
+ }
47
+ """
48
+
49
+ _DATASETNAME = "osiris"
50
+ _DISPLAYNAME = "OSIRIS"
51
+
52
+ _DESCRIPTION = """\
53
+ The OSIRIS corpus is a set of MEDLINE abstracts manually annotated
54
+ with human variation mentions. The corpus is distributed under the terms
55
+ of the Creative Commons Attribution License
56
+ Creative Commons Attribution 3.0 Unported License,
57
+ which permits unrestricted use, distribution, and reproduction in any medium,
58
+ provided the original work is properly cited (Furlong et al, BMC Bioinformatics 2008, 9:84).
59
+ """
60
+
61
+ _HOMEPAGE = "https://sites.google.com/site/laurafurlongweb/databases-and-tools/corpora/"
62
+
63
+
64
+ _LICENSE = 'Creative Commons Attribution 3.0 Unported'
65
+
66
+ _URLS = {
67
+ _DATASETNAME: [
68
+ "https://github.com/rockt/SETH/blob/master/resources/OSIRIS/corpus.xml?raw=true "
69
+ ]
70
+ }
71
+
72
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION, Tasks.NAMED_ENTITY_DISAMBIGUATION]
73
+
74
+
75
+ _SOURCE_VERSION = "1.2.0"
76
+
77
+ _BIGBIO_VERSION = "1.0.0"
78
+
79
+
80
+ class Osiris(datasets.GeneratorBasedBuilder):
81
+ """
82
+ The OSIRIS corpus is a set of MEDLINE abstracts manually annotated
83
+ with human variation mentions.
84
+ """
85
+
86
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
87
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
88
+
89
+ # You will be able to load the "source" or "bigbio" configurations with
90
+ # ds_source = datasets.load_dataset('my_dataset', name='source')
91
+ # ds_bigbio = datasets.load_dataset('my_dataset', name='bigbio')
92
+
93
+ # For local datasets you can make use of the `data_dir` and `data_files` kwargs
94
+ # https://huggingface.co/docs/datasets/add_dataset.html#downloading-data-files-and-organizing-splits
95
+ # ds_source = datasets.load_dataset('my_dataset', name='source', data_dir="/path/to/data/files")
96
+ # ds_bigbio = datasets.load_dataset('my_dataset', name='bigbio', data_dir="/path/to/data/files")
97
+
98
+ BUILDER_CONFIGS = [
99
+ BigBioConfig(
100
+ name="osiris_source",
101
+ version=SOURCE_VERSION,
102
+ description="osiris source schema",
103
+ schema="source",
104
+ subset_id="osiris",
105
+ ),
106
+ BigBioConfig(
107
+ name="osiris_bigbio_kb",
108
+ version=BIGBIO_VERSION,
109
+ description="osiris BigBio schema",
110
+ schema="bigbio_kb",
111
+ subset_id="osiris",
112
+ ),
113
+ ]
114
+
115
+ DEFAULT_CONFIG_NAME = "osiris_source"
116
+
117
+ def _info(self) -> datasets.DatasetInfo:
118
+
119
+ if self.config.schema == "source":
120
+
121
+ features = datasets.Features(
122
+ {
123
+ "Pmid": datasets.Value("string"),
124
+ "Title": datasets.Value("string"),
125
+ "Abstract": datasets.Value("string"),
126
+ "genes": [
127
+ {
128
+ "g_id": datasets.Value("string"),
129
+ "g_lex": datasets.Value("string"),
130
+ "offsets": [[datasets.Value("int32")]],
131
+ }
132
+ ],
133
+ "variants": [
134
+ {
135
+ "v_id": datasets.Value("string"),
136
+ "v_lex": datasets.Value("string"),
137
+ "v_norm": datasets.Value("string"),
138
+ "offsets": [[datasets.Value("int32")]],
139
+ }
140
+ ],
141
+ }
142
+ )
143
+
144
+ elif self.config.schema == "bigbio_kb":
145
+ features = kb_features
146
+
147
+ return datasets.DatasetInfo(
148
+ description=_DESCRIPTION,
149
+ features=features,
150
+ homepage=_HOMEPAGE,
151
+ license=str(_LICENSE),
152
+ citation=_CITATION,
153
+ )
154
+
155
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
156
+
157
+ urls = _URLS[_DATASETNAME]
158
+ data_dir = dl_manager.download(urls)
159
+
160
+ return [
161
+ datasets.SplitGenerator(
162
+ name=datasets.Split.TRAIN,
163
+ # Whatever you put in gen_kwargs will be passed to _generate_examples
164
+ gen_kwargs={
165
+ "filepath": os.path.join(data_dir[0]),
166
+ "split": "data",
167
+ },
168
+ )
169
+ ]
170
+
171
+ def _get_offsets(self, parent: ET.Element, child: ET.Element) -> List[int32]:
172
+ """
173
+ Retrieves character offsets for child from parent.
174
+ """
175
+ parent_text = " ".join(
176
+ [
177
+ " ".join([t for t in c.itertext()])
178
+ for c in list(parent)
179
+ if c.tag != "Pmid"
180
+ ]
181
+ )
182
+ child_text = " ".join([t for t in child.itertext()])
183
+ start = parent_text.index(child_text)
184
+ end = start + len(child_text)
185
+ return [start, end]
186
+
187
+ def _get_dict(self, elem: ET.Element) -> dict:
188
+ """
189
+ Retrieves dict from XML element.
190
+ """
191
+ elem_d = dict()
192
+ for child in elem:
193
+ elem_d[child.tag] = {}
194
+ elem_d[child.tag]["text"] = " ".join([t for t in child.itertext()])
195
+
196
+ if child.tag != "Pmid":
197
+ elem_d[child.tag]["offsets"] = self._get_offsets(elem, child)
198
+
199
+ for c in child:
200
+ elem_d[c.tag] = []
201
+
202
+ for c in child:
203
+ c_dict = c.attrib
204
+ c_dict["offsets"] = self._get_offsets(elem, c)
205
+ elem_d[c.tag].append(c.attrib)
206
+
207
+ return elem_d
208
+
209
+ def _handle_missing_variants(self, row: dict) -> dict:
210
+ """
211
+ If variant is not present in the row this function adds one variant
212
+ with no data (to make looping though items possible) and returns the new row.
213
+ These mocked variants will be romoved after parsing.
214
+ Otherwise returns unchanged row.
215
+ """
216
+
217
+ if row.get("variant", 0) == 0:
218
+ row["variant"] = [
219
+ {"v_id": "", "v_lex": "", "v_norm": "", "offsets": [0, 0]}
220
+ ]
221
+ return row
222
+
223
+ def _get_entities(self, row: dict) -> List[dict]:
224
+ """
225
+ Retrieves two lists of dicts for genes and variants.
226
+ After that, chains both together.
227
+ """
228
+ genes = [
229
+ {
230
+ "id": str(uuid.uuid4()),
231
+ "offsets": [gene["offsets"]],
232
+ "text": [gene["g_lex"]],
233
+ "type": "gene",
234
+ "normalized": [{"db_name": "NCBI Gene", "db_id": gene["g_id"]}],
235
+ }
236
+ for gene in row["gene"]
237
+ ]
238
+
239
+ variants = [
240
+ {
241
+ "id": str(uuid.uuid4()),
242
+ "offsets": [variant["offsets"]],
243
+ "text": [variant["v_lex"]],
244
+ "type": "variant",
245
+ "normalized": [
246
+ {
247
+ "db_name": "HGVS-like" if variant["v_id"] == "No" else "dbSNP",
248
+ "db_id": variant["v_norm"]
249
+ if variant["v_id"] == "No"
250
+ else variant["v_id"],
251
+ }
252
+ ],
253
+ }
254
+ for variant in row["variant"]
255
+ if variant["v_id"] != ""
256
+ ]
257
+ return list(itertools.chain(genes, variants))
258
+
259
+ def _generate_examples(self, filepath, split):
260
+
261
+ root = ET.parse(filepath).getroot()
262
+ uid = 0
263
+ if self.config.schema == "source":
264
+ for elem in list(root):
265
+ row = self._get_dict(elem)
266
+
267
+ # handling missing variants data
268
+ row = self._handle_missing_variants(row)
269
+ uid += 1
270
+ yield uid, {
271
+ "Pmid": row["Pmid"]["text"],
272
+ "Title": {
273
+ "offsets": [row["Title"]["offsets"]],
274
+ "text": row["Title"]["text"],
275
+ },
276
+ "Abstract": {
277
+ "offsets": [row["Abstract"]["offsets"]],
278
+ "text": row["Abstract"]["text"],
279
+ },
280
+ "genes": [
281
+ {
282
+ "g_id": gene["g_id"],
283
+ "g_lex": gene["g_lex"],
284
+ "offsets": [gene["offsets"]],
285
+ }
286
+ for gene in row["gene"]
287
+ ],
288
+ "variants": [
289
+ {
290
+ "v_id": variant["v_id"],
291
+ "v_lex": variant["v_lex"],
292
+ "v_norm": variant["v_norm"],
293
+ "offsets": [variant["offsets"]],
294
+ }
295
+ for variant in row["variant"]
296
+ ],
297
+ }
298
+
299
+ elif self.config.schema == "bigbio_kb":
300
+
301
+ for elem in list(root):
302
+ row = self._get_dict(elem)
303
+
304
+ # handling missing variants data
305
+ row = self._handle_missing_variants(row)
306
+ uid += 1
307
+ yield uid, {
308
+ "id": str(uid),
309
+ "document_id": row["Pmid"]["text"],
310
+ "passages": [
311
+ {
312
+ "id": str(uuid.uuid4()),
313
+ "type": "title",
314
+ "text": [row["Title"]["text"]],
315
+ "offsets": [row["Title"]["offsets"]],
316
+ },
317
+ {
318
+ "id": str(uuid.uuid4()),
319
+ "type": "abstract",
320
+ "text": [row["Abstract"]["text"]],
321
+ "offsets": [row["Abstract"]["offsets"]],
322
+ },
323
+ ],
324
+ "entities": self._get_entities(row),
325
+ "relations": [],
326
+ "events": [],
327
+ "coreferences": [],
328
+ }