gabrielaltay commited on
Commit
bbbdde0
1 Parent(s): 532a53b

upload hubscripts/citation_gia_test_collection_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. citation_gia_test_collection.py +350 -0
citation_gia_test_collection.py ADDED
@@ -0,0 +1,350 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+
17
+ import os
18
+ from typing import List
19
+
20
+ import datasets
21
+ import xml.etree.ElementTree as ET
22
+ import uuid
23
+ import html
24
+
25
+ from .bigbiohub import kb_features
26
+ from .bigbiohub import BigBioConfig
27
+ from .bigbiohub import Tasks
28
+
29
+ _LANGUAGES = ['English']
30
+ _PUBMED = True
31
+ _LOCAL = False
32
+ _CITATION = """\
33
+ @article{Wei2015,
34
+ title = {
35
+ {GNormPlus}: An Integrative Approach for Tagging Genes, Gene Families,
36
+ and Protein Domains
37
+ },
38
+ author = {Chih-Hsuan Wei and Hung-Yu Kao and Zhiyong Lu},
39
+ year = 2015,
40
+ journal = {{BioMed} Research International},
41
+ publisher = {Hindawi Limited},
42
+ volume = 2015,
43
+ pages = {1--7},
44
+ doi = {10.1155/2015/918710},
45
+ url = {https://doi.org/10.1155/2015/918710}
46
+ }
47
+ """
48
+
49
+ _DATASETNAME = "citation_gia_test_collection"
50
+ _DISPLAYNAME = "Citation GIA Test Collection"
51
+
52
+ _DESCRIPTION = """\
53
+ The Citation GIA Test Collection was recently created for gene indexing at the
54
+ NLM and includes 151 PubMed abstracts with both mention-level and document-level
55
+ annotations. They are selected because both have a focus on human genes.
56
+ """
57
+
58
+ _HOMEPAGE = "https://www.ncbi.nlm.nih.gov/research/bionlp/Tools/gnormplus/"
59
+
60
+ _LICENSE = 'License information unavailable'
61
+
62
+ _URLS = {
63
+ _DATASETNAME: [
64
+ "https://www.ncbi.nlm.nih.gov/CBBresearch/Lu/Demo/tmTools/download/GNormPlus/GNormPlusCorpus.zip"
65
+ ]
66
+ }
67
+
68
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION, Tasks.NAMED_ENTITY_DISAMBIGUATION]
69
+
70
+ _SOURCE_VERSION = "1.0.0"
71
+ _BIGBIO_VERSION = "1.0.0"
72
+
73
+
74
+ class CitationGIATestCollection(datasets.GeneratorBasedBuilder):
75
+ """
76
+ The Citation GIA Test Collection was recently created for gene indexing at the
77
+ NLM and includes 151 PubMed abstracts with both mention-level and document-level
78
+ annotations. They are selected because both have a focus on human genes.
79
+ """
80
+
81
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
82
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
83
+
84
+ BUILDER_CONFIGS = [
85
+ BigBioConfig(
86
+ name="citation_gia_test_collection_source",
87
+ version=SOURCE_VERSION,
88
+ description="citation_gia_test_collection source schema",
89
+ schema="source",
90
+ subset_id="citation_gia_test_collection",
91
+ ),
92
+ BigBioConfig(
93
+ name="citation_gia_test_collection_bigbio_kb",
94
+ version=BIGBIO_VERSION,
95
+ description="citation_gia_test_collection BigBio schema",
96
+ schema="bigbio_kb",
97
+ subset_id="citation_gia_test_collection",
98
+ ),
99
+ ]
100
+
101
+ DEFAULT_CONFIG_NAME = "citation_gia_test_collection_source"
102
+
103
+ def _info(self) -> datasets.DatasetInfo:
104
+
105
+ if self.config.schema == "source":
106
+ features = datasets.Features(
107
+ {
108
+ "id": datasets.Value("string"),
109
+ "passages": [
110
+ {
111
+ "id": datasets.Value("string"),
112
+ "type": datasets.Value("string"),
113
+ "text": datasets.Sequence(datasets.Value("string")),
114
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
115
+ }
116
+ ],
117
+ "entities": [
118
+ {
119
+ "id": datasets.Value("string"),
120
+ "type": datasets.Value("string"),
121
+ "text": datasets.Sequence(datasets.Value("string")),
122
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
123
+ "normalized": [
124
+ {
125
+ "db_name": datasets.Value("string"),
126
+ "db_id": datasets.Value("string"),
127
+ }
128
+ ],
129
+ }
130
+ ],
131
+ }
132
+ )
133
+
134
+ elif self.config.schema == "bigbio_kb":
135
+ features = kb_features
136
+
137
+ return datasets.DatasetInfo(
138
+ description=_DESCRIPTION,
139
+ features=features,
140
+ homepage=_HOMEPAGE,
141
+ license=str(_LICENSE),
142
+ citation=_CITATION,
143
+ )
144
+
145
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
146
+
147
+ urls = _URLS[_DATASETNAME]
148
+ data_dir = dl_manager.download_and_extract(urls)
149
+
150
+ return [
151
+ datasets.SplitGenerator(
152
+ name=datasets.Split.TEST,
153
+ gen_kwargs={
154
+ "filepath": os.path.join(
155
+ data_dir[0], "GNormPlusCorpus/NLMIAT.BioC.xml"
156
+ ),
157
+ "split": "NLMIAT",
158
+ },
159
+ ),
160
+ ]
161
+
162
+ def _get_entities(self, annot_d: dict) -> dict:
163
+ """'
164
+ Converts annotation dict to entity dict.
165
+ """
166
+ ent = {
167
+ "id": str(uuid.uuid4()),
168
+ "type": annot_d["type"],
169
+ "text": [annot_d["text"]],
170
+ "offsets": [annot_d["offsets"]],
171
+ "normalized": [
172
+ {
173
+ "db_name": "NCBI Gene" if annot_d["type"].isdigit() else "",
174
+ "db_id": annot_d["type"] if annot_d["type"].isdigit() else "",
175
+ }
176
+ ],
177
+ }
178
+
179
+ return ent
180
+
181
+ def _get_offsets_entities(
182
+ child, parent_text: str, child_text: str, offset: int
183
+ ) -> List[int]:
184
+ """
185
+ Extracts child text offsets from parent text for entities.
186
+ Some offsets that were present in the datset were wrong mainly because of string encodings.
187
+ Also a little fraction of parent strings doesn't contain its respective child strings.
188
+ Hence few assertion errors in the entitity offsets checking test.
189
+ """
190
+ if child_text in parent_text:
191
+ index = parent_text.index(child_text)
192
+ start = index + offset
193
+
194
+ else:
195
+ start = offset
196
+ end = start + len(child_text)
197
+
198
+ return [start, end]
199
+
200
+ def _process_annot(self, annot: ET.Element, passages: dict) -> dict:
201
+ """'
202
+ Converts annotation XML Element to Python dict.
203
+ """
204
+ parent_text = " ".join([p["text"] for p in passages.values()])
205
+ annot_d = dict()
206
+ a_d = {a.tag: a.text for a in annot}
207
+
208
+ for a in list(annot):
209
+
210
+ if a.tag == "location":
211
+ offset = int(a.attrib["offset"])
212
+ annot_d["offsets"] = self._get_offsets_entities(
213
+ html.escape(parent_text[offset:]), html.escape(a_d["text"]), offset
214
+ )
215
+
216
+ elif a.tag != "infon":
217
+ annot_d[a.tag] = html.escape(a.text)
218
+
219
+ else:
220
+ annot_d[a.attrib["key"]] = html.escape(a.text)
221
+
222
+ return annot_d
223
+
224
+ def _parse_elem(self, elem: ET.Element) -> dict:
225
+ """'
226
+ Converts document XML Element to Python dict.
227
+ """
228
+ elem_d = dict()
229
+ passages = dict()
230
+ annotations = elem.findall(".//annotation")
231
+ elem_d["entities"] = []
232
+
233
+ for child in elem:
234
+ elem_d[child.tag] = []
235
+
236
+ for child in elem:
237
+ if child.tag == "passage":
238
+ elem_d[child.tag].append(
239
+ {
240
+ c.tag: html.escape(
241
+ " ".join(
242
+ list(
243
+ filter(
244
+ lambda item: item,
245
+ [t.strip("\n") for t in c.itertext()],
246
+ )
247
+ )
248
+ )
249
+ )
250
+ for c in child
251
+ }
252
+ )
253
+
254
+ elif child.tag == "id":
255
+ elem_d[child.tag] = html.escape(child.text)
256
+
257
+ for passage in elem_d["passage"]:
258
+ infon = passage["infon"]
259
+ passage.pop("infon", None)
260
+ passages[infon] = passage
261
+
262
+ elem_d["passages"] = passages
263
+ elem_d.pop("passage", None)
264
+
265
+ for a in annotations:
266
+ elem_d["entities"].append(self._process_annot(a, elem_d["passages"]))
267
+
268
+ return elem_d
269
+
270
+ def _generate_examples(self, filepath, split):
271
+
272
+ root = ET.parse(filepath).getroot()
273
+
274
+ if self.config.schema == "source":
275
+ uid = 0
276
+ for elem in root.findall("document"):
277
+ row = self._parse_elem(elem)
278
+ uid += 1
279
+ passages = row["passages"]
280
+ yield uid, {
281
+ "id": str(uid),
282
+ "passages": [
283
+ {
284
+ "id": str(uuid.uuid4()),
285
+ "type": "title",
286
+ "text": [passages["title"]["text"]],
287
+ "offsets": [
288
+ [
289
+ int(passages["title"]["offset"]),
290
+ int(passages["title"]["offset"])
291
+ + len(passages["title"]["text"]),
292
+ ]
293
+ ],
294
+ },
295
+ {
296
+ "id": str(uuid.uuid4()),
297
+ "type": "abstract",
298
+ "text": [passages["abstract"]["text"]],
299
+ "offsets": [
300
+ [
301
+ int(passages["abstract"]["offset"]),
302
+ int(passages["abstract"]["offset"])
303
+ + len(passages["abstract"]["text"]),
304
+ ]
305
+ ],
306
+ },
307
+ ],
308
+ "entities": [self._get_entities(a) for a in row["entities"]],
309
+ }
310
+
311
+ elif self.config.schema == "bigbio_kb":
312
+ uid = 0
313
+ for elem in root.findall("document"):
314
+ row = self._parse_elem(elem)
315
+ uid += 1
316
+ passages = row["passages"]
317
+ yield uid, {
318
+ "id": str(uid),
319
+ "document_id": str(uuid.uuid4()),
320
+ "passages": [
321
+ {
322
+ "id": str(uuid.uuid4()),
323
+ "type": "title",
324
+ "text": [passages["title"]["text"]],
325
+ "offsets": [
326
+ [
327
+ int(passages["title"]["offset"]),
328
+ int(passages["title"]["offset"])
329
+ + len(passages["title"]["text"]),
330
+ ]
331
+ ],
332
+ },
333
+ {
334
+ "id": str(uuid.uuid4()),
335
+ "type": "abstract",
336
+ "text": [passages["abstract"]["text"]],
337
+ "offsets": [
338
+ [
339
+ int(passages["abstract"]["offset"]),
340
+ int(passages["abstract"]["offset"])
341
+ + len(passages["abstract"]["text"]),
342
+ ]
343
+ ],
344
+ },
345
+ ],
346
+ "entities": [self._get_entities(a) for a in row["entities"]],
347
+ "relations": [],
348
+ "events": [],
349
+ "coreferences": [],
350
+ }