gabrielaltay commited on
Commit
3ff7e19
1 Parent(s): 09fe515

upload hubscripts/gnormplus_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. gnormplus.py +260 -0
gnormplus.py ADDED
@@ -0,0 +1,260 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import itertools
17
+ import os
18
+ import re
19
+ from typing import Dict, List, Tuple
20
+
21
+ import datasets
22
+ from bioc import biocxml
23
+
24
+ from .bigbiohub import kb_features
25
+ from .bigbiohub import BigBioConfig
26
+ from .bigbiohub import Tasks
27
+
28
+ _LANGUAGES = ['English']
29
+ _PUBMED = True
30
+ _LOCAL = False
31
+ _CITATION = """\
32
+ @Article{Wei2015,
33
+ author={Wei, Chih-Hsuan and Kao, Hung-Yu and Lu, Zhiyong},
34
+ title={GNormPlus: An Integrative Approach for Tagging Genes, Gene Families, and Protein Domains},
35
+ journal={BioMed Research International},
36
+ year={2015},
37
+ month={Aug},
38
+ day={25},
39
+ publisher={Hindawi Publishing Corporation},
40
+ volume={2015},
41
+ pages={918710},
42
+ issn={2314-6133},
43
+ doi={10.1155/2015/918710},
44
+ url={https://doi.org/10.1155/2015/918710}
45
+ }
46
+ """
47
+
48
+ _DATASETNAME = "gnormplus"
49
+ _DISPLAYNAME = "GNormPlus"
50
+
51
+ _DESCRIPTION = """\
52
+ We re-annotated two existing gene corpora. The BioCreative II GN corpus is a widely used data set for benchmarking GN
53
+ tools and includes document-level annotations for a total of 543 articles (281 in its training set; and 262 in test).
54
+ The Citation GIA Test Collection was recently created for gene indexing at the NLM and includes 151 PubMed abstracts
55
+ with both mention-level and document-level annotations. They are selected because both have a focus on human genes.
56
+ For both corpora, we added annotations of gene families and protein domains. For the BioCreative GN corpus, we also
57
+ added mention-level gene annotations. As a result, in our new corpus, there are a total of 694 PubMed articles.
58
+ PubTator was used as our annotation tool along with BioC formats.
59
+ """
60
+
61
+ _HOMEPAGE = "https://www.ncbi.nlm.nih.gov/research/bionlp/Tools/gnormplus/"
62
+
63
+ _LICENSE = 'License information unavailable'
64
+
65
+ _URLS = {
66
+ _DATASETNAME: "https://www.ncbi.nlm.nih.gov/CBBresearch/Lu/Demo/tmTools/download/GNormPlus/GNormPlusCorpus.zip"
67
+ }
68
+
69
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION, Tasks.NAMED_ENTITY_DISAMBIGUATION]
70
+
71
+ _SOURCE_VERSION = "1.0.0"
72
+
73
+ _BIGBIO_VERSION = "1.0.0"
74
+
75
+
76
+ class GnormplusDataset(datasets.GeneratorBasedBuilder):
77
+ """Dataset loader for GNormPlus corpus."""
78
+
79
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
80
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
81
+
82
+ BUILDER_CONFIGS = [
83
+ BigBioConfig(
84
+ name="gnormplus_source",
85
+ version=SOURCE_VERSION,
86
+ description="gnormplus source schema",
87
+ schema="source",
88
+ subset_id="gnormplus",
89
+ ),
90
+ BigBioConfig(
91
+ name="gnormplus_bigbio_kb",
92
+ version=BIGBIO_VERSION,
93
+ description="gnormplus BigBio schema",
94
+ schema="bigbio_kb",
95
+ subset_id="gnormplus",
96
+ ),
97
+ ]
98
+
99
+ DEFAULT_CONFIG_NAME = "gnormplus_source"
100
+
101
+ _re_tax_id = re.compile(r"(?P<db_id>\d+)\(Tax:(?P<tax_id>\d+)\)")
102
+
103
+ def _info(self) -> datasets.DatasetInfo:
104
+ if self.config.schema == "source":
105
+ features = datasets.Features(
106
+ {
107
+ "doc_id": datasets.Value("string"),
108
+ "passages": [
109
+ {
110
+ "text": datasets.Value("string"),
111
+ "type": datasets.Value("string"),
112
+ "location": {
113
+ "offset": datasets.Value("int64"),
114
+ "length": datasets.Value("int64"),
115
+ },
116
+ }
117
+ ],
118
+ "entities": [
119
+ {
120
+ "id": datasets.Value("string"),
121
+ "type": datasets.Value("string"),
122
+ "text": datasets.Sequence(datasets.Value("string")),
123
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
124
+ "normalized": [
125
+ {
126
+ "db_name": datasets.Value("string"),
127
+ "db_id": datasets.Value("string"),
128
+ "tax_id": datasets.Value("string"),
129
+ }
130
+ ],
131
+ }
132
+ ],
133
+ }
134
+ )
135
+ elif self.config.schema == "bigbio_kb":
136
+ features = kb_features
137
+ else:
138
+ raise NotImplementedError(self.config.schema)
139
+
140
+ return datasets.DatasetInfo(
141
+ description=_DESCRIPTION,
142
+ features=features,
143
+ homepage=_HOMEPAGE,
144
+ license=str(_LICENSE),
145
+ citation=_CITATION,
146
+ )
147
+
148
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
149
+ """Returns SplitGenerators."""
150
+ urls = _URLS[_DATASETNAME]
151
+ data_dir = dl_manager.download_and_extract(urls)
152
+
153
+ return [
154
+ datasets.SplitGenerator(
155
+ name=datasets.Split.TRAIN,
156
+ # Whatever you put in gen_kwargs will be passed to _generate_examples
157
+ gen_kwargs={
158
+ "filepath": os.path.join(
159
+ data_dir, "GNormPlusCorpus/BC2GNtrain.BioC.xml"
160
+ ),
161
+ },
162
+ ),
163
+ datasets.SplitGenerator(
164
+ name=datasets.Split.TEST,
165
+ gen_kwargs={
166
+ "filepath": os.path.join(
167
+ data_dir, "GNormPlusCorpus/BC2GNtest.BioC.xml"
168
+ ),
169
+ },
170
+ ),
171
+ ]
172
+
173
+ def _parse_bioc_entity(self, uid, bioc_ann, db_id_key="NCBI", insert_tax_id=False):
174
+ offsets, texts = get_texts_and_offsets_from_bioc_ann(bioc_ann)
175
+ _type = bioc_ann.infons["type"]
176
+
177
+ # parse db ids
178
+ normalized = []
179
+ if _type in bioc_ann.infons:
180
+ for _id in bioc_ann.infons[_type].split(","):
181
+ match = self._re_tax_id.match(_id)
182
+ if match:
183
+ _id = match.group("db_id")
184
+
185
+ n = {"db_name": db_id_key, "db_id": _id}
186
+ if insert_tax_id:
187
+ n["tax_id"] = match.group("tax_id") if match else None
188
+
189
+ normalized.append(n)
190
+ return {
191
+ "id": uid,
192
+ "offsets": offsets,
193
+ "text": texts,
194
+ "type": _type,
195
+ "normalized": normalized,
196
+ }
197
+
198
+ def _generate_examples(self, filepath) -> Tuple[int, Dict]:
199
+ uid = map(str, itertools.count(start=0, step=1))
200
+
201
+ with open(filepath, "r") as fp:
202
+ collection = biocxml.load(fp)
203
+
204
+ for idx, document in enumerate(collection.documents):
205
+ if self.config.schema == "source":
206
+ features = {
207
+ "doc_id": document.id,
208
+ "passages": [
209
+ {
210
+ "text": passage.text,
211
+ "type": passage.infons["type"],
212
+ "location": {
213
+ "offset": passage.offset,
214
+ "length": passage.total_span.length,
215
+ },
216
+ }
217
+ for passage in document.passages
218
+ ],
219
+ "entities": [
220
+ self._parse_bioc_entity(
221
+ next(uid), entity, insert_tax_id=True
222
+ )
223
+ for passage in document.passages
224
+ for entity in passage.annotations
225
+ ],
226
+ }
227
+ yield idx, features
228
+ elif self.config.schema == "bigbio_kb":
229
+ # passage offsets/lengths do not connect, recalculate them for this schema.
230
+ passage_spans = []
231
+ start = 0
232
+ for passage in document.passages:
233
+ end = start + len(passage.text)
234
+ passage_spans.append((start, end))
235
+ start = end + 1
236
+
237
+ features = {
238
+ "id": next(uid),
239
+ "document_id": document.id,
240
+ "passages": [
241
+ {
242
+ "id": next(uid),
243
+ "type": passage.infons["type"],
244
+ "text": [passage.text],
245
+ "offsets": [span],
246
+ }
247
+ for passage, span in zip(document.passages, passage_spans)
248
+ ],
249
+ "entities": [
250
+ self._parse_bioc_entity(next(uid), entity)
251
+ for passage in document.passages
252
+ for entity in passage.annotations
253
+ ],
254
+ "events": [],
255
+ "coreferences": [],
256
+ "relations": [],
257
+ }
258
+ yield idx, features
259
+ else:
260
+ raise NotImplementedError(self.config.schema)