gabrielaltay commited on
Commit
b95f4ec
1 Parent(s): e33226d

upload hubscripts/bioscope_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. bioscope.py +332 -0
bioscope.py ADDED
@@ -0,0 +1,332 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ BioScope
18
+ ---
19
+ The corpus consists of three parts, namely medical free texts, biological full
20
+ papers and biological scientific abstracts. The dataset contains annotations at
21
+ the token level for negative and speculative keywords and at the sentence level
22
+ for their linguistic scope. The annotation process was carried out by two
23
+ independent linguist annotators and a chief linguist - also responsible for
24
+ setting up the annotation guidelines - who resolved cases where the annotators
25
+ disagreed. The resulting corpus consists of more than 20.000 sentences that were
26
+ considered for annotation and over 10% of them actually contain one (or more)
27
+ linguistic annotation suggesting negation or uncertainty.
28
+ """
29
+
30
+ import os
31
+ import re
32
+ import xml.etree.ElementTree as ET
33
+ from pathlib import Path
34
+ from typing import Dict, List, Tuple
35
+
36
+ import datasets
37
+
38
+ from .bigbiohub import kb_features
39
+ from .bigbiohub import BigBioConfig
40
+ from .bigbiohub import Tasks
41
+
42
+ _LANGUAGES = ['English']
43
+ _PUBMED = True
44
+ _LOCAL = False
45
+ _CITATION = """\
46
+ @article{vincze2008bioscope,
47
+ title={The BioScope corpus: biomedical texts annotated for uncertainty, negation and their scopes},
48
+ author={Vincze, Veronika and Szarvas, Gy{\"o}rgy and Farkas, Rich{\'a}rd and M{\'o}ra, Gy{\"o}rgy and Csirik, J{\'a}nos},
49
+ journal={BMC bioinformatics},
50
+ volume={9},
51
+ number={11},
52
+ pages={1--9},
53
+ year={2008},
54
+ publisher={BioMed Central}
55
+ }
56
+ """
57
+
58
+ _DATASETNAME = "bioscope"
59
+ _DISPLAYNAME = "BioScope"
60
+
61
+
62
+ _DESCRIPTION = """\
63
+ The BioScope corpus consists of medical and biological texts annotated for
64
+ negation, speculation and their linguistic scope. This was done to allow a
65
+ comparison between the development of systems for negation/hedge detection and
66
+ scope resolution. The BioScope corpus was annotated by two independent linguists
67
+ following the guidelines written by our linguist expert before the annotation of
68
+ the corpus was initiated.
69
+ """
70
+
71
+ _HOMEPAGE = "https://rgai.inf.u-szeged.hu/node/105"
72
+
73
+ _LICENSE = 'Creative Commons Attribution 2.0 Generic'
74
+
75
+ _URLS = {
76
+ _DATASETNAME: "https://rgai.sed.hu/sites/rgai.sed.hu/files/bioscope.zip",
77
+ }
78
+
79
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION]
80
+
81
+ _SOURCE_VERSION = "1.0.0"
82
+
83
+ _BIGBIO_VERSION = "1.0.0"
84
+
85
+
86
+ class BioscopeDataset(datasets.GeneratorBasedBuilder):
87
+ """The BioScope corpus consists of medical and biological texts annotated for negation, speculation and their linguistic scope."""
88
+
89
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
90
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
91
+
92
+ BUILDER_CONFIGS = [
93
+ BigBioConfig(
94
+ name="bioscope_source",
95
+ version=SOURCE_VERSION,
96
+ description="bioscope source schema",
97
+ schema="source",
98
+ subset_id="bioscope",
99
+ ),
100
+ BigBioConfig(
101
+ name="bioscope_abstracts_source",
102
+ version=SOURCE_VERSION,
103
+ description="bioscope source schema",
104
+ schema="source",
105
+ subset_id="bioscope_abstracts",
106
+ ),
107
+ BigBioConfig(
108
+ name="bioscope_papers_source",
109
+ version=SOURCE_VERSION,
110
+ description="bioscope source schema",
111
+ schema="source",
112
+ subset_id="bioscope_papers",
113
+ ),
114
+ BigBioConfig(
115
+ name="bioscope_medical_texts_source",
116
+ version=SOURCE_VERSION,
117
+ description="bioscope source schema",
118
+ schema="source",
119
+ subset_id="bioscope_medical_texts",
120
+ ),
121
+ BigBioConfig(
122
+ name="bioscope_bigbio_kb",
123
+ version=BIGBIO_VERSION,
124
+ description="bioscope BigBio schema",
125
+ schema="bigbio_kb",
126
+ subset_id="bioscope",
127
+ ),
128
+ BigBioConfig(
129
+ name="bioscope_abstracts_bigbio_kb",
130
+ version=BIGBIO_VERSION,
131
+ description="bioscope BigBio schema",
132
+ schema="bigbio_kb",
133
+ subset_id="bioscope_abstracts",
134
+ ),
135
+ BigBioConfig(
136
+ name="bioscope_papers_bigbio_kb",
137
+ version=BIGBIO_VERSION,
138
+ description="bioscope BigBio schema",
139
+ schema="bigbio_kb",
140
+ subset_id="bioscope_papers",
141
+ ),
142
+ BigBioConfig(
143
+ name="bioscope_medical_texts_bigbio_kb",
144
+ version=BIGBIO_VERSION,
145
+ description="bioscope BigBio schema",
146
+ schema="bigbio_kb",
147
+ subset_id="bioscope_medical_texts",
148
+ ),
149
+ ]
150
+
151
+ DEFAULT_CONFIG_NAME = "bioscope_source"
152
+
153
+ def _info(self) -> datasets.DatasetInfo:
154
+
155
+ if self.config.schema == "source":
156
+ features = datasets.Features(
157
+ {
158
+ "document_id": datasets.Value("string"),
159
+ "document_type": datasets.Value("string"),
160
+ "text": datasets.Value("string"),
161
+ "entities": [
162
+ {
163
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
164
+ "text": datasets.Value("string"),
165
+ "type": datasets.Value("string"),
166
+ "id": datasets.Value("string"),
167
+ "normalized": [
168
+ {
169
+ "db_name": datasets.Value("string"),
170
+ "db_id": datasets.Value("string"),
171
+ }
172
+ ],
173
+ }
174
+ ],
175
+ }
176
+ )
177
+
178
+ elif self.config.schema == "bigbio_kb":
179
+ features = kb_features
180
+
181
+ return datasets.DatasetInfo(
182
+ description=_DESCRIPTION,
183
+ features=features,
184
+ homepage=_HOMEPAGE,
185
+ license=str(_LICENSE),
186
+ citation=_CITATION,
187
+ )
188
+
189
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
190
+ """Returns SplitGenerators."""
191
+ urls = _URLS[_DATASETNAME]
192
+ data_dir = dl_manager.download_and_extract(urls)
193
+
194
+ return [
195
+ datasets.SplitGenerator(
196
+ name=datasets.Split.TRAIN,
197
+ gen_kwargs={
198
+ "data_files": data_dir,
199
+ },
200
+ )
201
+ ]
202
+
203
+ def _generate_examples(self, data_files: Path) -> Tuple[int, Dict]:
204
+ """Yields examples as (key, example) tuples."""
205
+ sentences = self._load_sentences(data_files)
206
+ if self.config.schema == "source":
207
+ for guid, sentence_tuple in enumerate(sentences):
208
+ document_type, sentence = sentence_tuple
209
+ example = self._create_example(sentence_tuple)
210
+ example["document_type"] = f"{document_type}_{sentence.attrib['id']}"
211
+ example["text"] = "".join(sentence_tuple[1].itertext())
212
+ yield guid, example
213
+
214
+ elif self.config.schema == "bigbio_kb":
215
+ for guid, sentence_tuple in enumerate(sentences):
216
+ document_type, sentence = sentence_tuple
217
+ example = self._create_example(sentence_tuple)
218
+ example["id"] = guid
219
+ example["passages"] = [
220
+ {
221
+ "id": f"{document_type}_{sentence.attrib['id']}",
222
+ "type": document_type,
223
+ "text": ["".join(sentence.itertext())],
224
+ "offsets": [(0, len("".join(sentence.itertext())))],
225
+ }
226
+ ]
227
+ example["events"] = []
228
+ example["coreferences"] = []
229
+ example["relations"] = []
230
+ yield guid, example
231
+
232
+ def _load_sentences(self, data_files: Path) -> List:
233
+ """
234
+ Returns a list of tuples (Document type, iterator from dataset)
235
+ """
236
+ if self.config.subset_id.__contains__("abstracts"):
237
+ sentences = self._concat_iterators(
238
+ (
239
+ "Abstract",
240
+ ET.parse(os.path.join(data_files, "abstracts.xml"))
241
+ .getroot()
242
+ .iter("sentence"),
243
+ )
244
+ )
245
+ elif self.config.subset_id.__contains__("papers"):
246
+ sentences = self._concat_iterators(
247
+ (
248
+ "Paper",
249
+ ET.parse(os.path.join(data_files, "full_papers.xml"))
250
+ .getroot()
251
+ .iter("sentence"),
252
+ )
253
+ )
254
+ elif self.config.subset_id.__contains__("medical_texts"):
255
+ sentences = self._concat_iterators(
256
+ (
257
+ "Medical text",
258
+ ET.parse(
259
+ os.path.join(
260
+ data_files, "clinical_merger/clinical_records_anon.xml"
261
+ )
262
+ )
263
+ .getroot()
264
+ .iter("sentence"),
265
+ )
266
+ )
267
+ else:
268
+ abstracts = (
269
+ ET.parse(os.path.join(data_files, "abstracts.xml"))
270
+ .getroot()
271
+ .iter("sentence")
272
+ )
273
+ papers = (
274
+ ET.parse(os.path.join(data_files, "full_papers.xml"))
275
+ .getroot()
276
+ .iter("sentence")
277
+ )
278
+ medical_texts = (
279
+ ET.parse(
280
+ os.path.join(
281
+ data_files, "clinical_merger/clinical_records_anon.xml"
282
+ )
283
+ )
284
+ .getroot()
285
+ .iter("sentence")
286
+ )
287
+ sentences = self._concat_iterators(
288
+ ("Abstract", abstracts),
289
+ ("Paper", papers),
290
+ ("Medical text", medical_texts),
291
+ )
292
+ return sentences
293
+
294
+ @staticmethod
295
+ def _concat_iterators(*iterator_tuple):
296
+ for document_type, iterator in iterator_tuple:
297
+ for element in iterator:
298
+ yield document_type, element
299
+
300
+ def _create_example(self, sentence_tuple):
301
+ document_type, sentence = sentence_tuple
302
+ document_type_prefix = document_type[0]
303
+
304
+ example = {}
305
+ example["document_id"] = f"{document_type_prefix}_{sentence.attrib['id']}"
306
+ example["entities"] = self._extract_entities(sentence, document_type_prefix)
307
+ return example
308
+
309
+ def _extract_entities(self, sentence, document_type_prefix):
310
+ text = "".join(sentence.itertext())
311
+ entities = []
312
+ xcopes = dict([(xcope.attrib["id"], xcope) for xcope in sentence.iter("xcope")])
313
+ cues = dict([(cue.attrib["ref"], cue) for cue in sentence.iter("cue")])
314
+ for idx, xcope in xcopes.items():
315
+ # X2.140.2 has no annotation in raw data
316
+ if cues.get(idx) is None:
317
+ continue
318
+ entities.append(
319
+ {
320
+ "id": f"{document_type_prefix}_{idx}",
321
+ "type": cues.get(idx).attrib["type"],
322
+ "text": ["".join(xcope.itertext())],
323
+ "offsets": self._extract_offsets(
324
+ text=text, entity_text="".join(xcope.itertext())
325
+ ),
326
+ "normalized": [],
327
+ }
328
+ )
329
+ return entities
330
+
331
+ def _extract_offsets(self, text, entity_text):
332
+ return [(text.find(entity_text), text.find(entity_text) + len(entity_text))]