gabrielaltay commited on
Commit
92e0543
1 Parent(s): 8bed104

upload hubscripts/bioinfer_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. bioinfer.py +259 -0
bioinfer.py ADDED
@@ -0,0 +1,259 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ The authors present BioInfer (Bio Information Extraction Resource), a new public
18
+ resource providing an annotated corpus of biomedical English. We describe an
19
+ annotation scheme capturing named entities and their relationships along with a
20
+ dependency analysis of sentence syntax. We further present ontologies defining
21
+ the types of entities and relationships annotated in the corpus. Currently, the
22
+ corpus contains 1100 sentences from abstracts of biomedical research articles
23
+ annotated for relationships, named entities, as well as syntactic dependencies.
24
+ """
25
+
26
+ import os
27
+ import xml.etree.ElementTree as ET
28
+ from typing import Dict, List, Tuple
29
+
30
+ import datasets
31
+
32
+ from .bigbiohub import kb_features
33
+ from .bigbiohub import BigBioConfig
34
+ from .bigbiohub import Tasks
35
+
36
+ _LANGUAGES = ['English']
37
+ _PUBMED = True
38
+ _LOCAL = False
39
+ _CITATION = """\
40
+ @article{pyysalo2007bioinfer,
41
+ title = {BioInfer: a corpus for information extraction in the biomedical domain},
42
+ author = {
43
+ Pyysalo, Sampo and Ginter, Filip and Heimonen, Juho and Bj{\"o}rne, Jari
44
+ and Boberg, Jorma and J{\"a}rvinen, Jouni and Salakoski, Tapio
45
+ },
46
+ year = 2007,
47
+ journal = {BMC bioinformatics},
48
+ publisher = {BioMed Central},
49
+ volume = 8,
50
+ number = 1,
51
+ pages = {1--24}
52
+ }
53
+ """
54
+
55
+ _DATASETNAME = "bioinfer"
56
+ _DISPLAYNAME = "BioInfer"
57
+
58
+ _DESCRIPTION = """\
59
+ A corpus targeted at protein, gene, and RNA relationships which serves as a
60
+ resource for the development of information extraction systems and their
61
+ components such as parsers and domain analyzers. Currently, the corpus contains
62
+ 1100 sentences from abstracts of biomedical research articles annotated for
63
+ relationships, named entities, as well as syntactic dependencies.
64
+ """
65
+
66
+ _HOMEPAGE = "https://github.com/metalrt/ppi-dataset"
67
+
68
+ _LICENSE = 'Creative Commons Attribution 2.0 Generic'
69
+
70
+ _URLS = {
71
+ _DATASETNAME: "https://github.com/metalrt/ppi-dataset/archive/refs/heads/master.zip",
72
+ }
73
+
74
+ _SUPPORTED_TASKS = [Tasks.RELATION_EXTRACTION, Tasks.NAMED_ENTITY_RECOGNITION]
75
+
76
+ _SOURCE_VERSION = "1.0.0"
77
+ _BIGBIO_VERSION = "1.0.0"
78
+
79
+
80
+ class BioinferDataset(datasets.GeneratorBasedBuilder):
81
+ """
82
+ 1100 sentences from abstracts of biomedical research articles annotated
83
+ for relationships, named entities, as well as syntactic dependencies.
84
+ """
85
+
86
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
87
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
88
+
89
+ BUILDER_CONFIGS = [
90
+ BigBioConfig(
91
+ name="bioinfer_source",
92
+ version=SOURCE_VERSION,
93
+ description="BioInfer source schema",
94
+ schema="source",
95
+ subset_id="bioinfer",
96
+ ),
97
+ BigBioConfig(
98
+ name="bioinfer_bigbio_kb",
99
+ version=BIGBIO_VERSION,
100
+ description="BioInfer BigBio schema",
101
+ schema="bigbio_kb",
102
+ subset_id="bioinfer",
103
+ ),
104
+ ]
105
+
106
+ DEFAULT_CONFIG_NAME = "bioinfer_source"
107
+
108
+ def _info(self) -> datasets.DatasetInfo:
109
+
110
+ if self.config.schema == "source":
111
+ features = datasets.Features(
112
+ {
113
+ "document_id": datasets.Value("string"),
114
+ "type": datasets.Value("string"),
115
+ "text": datasets.Value("string"),
116
+ "entities": [
117
+ {
118
+ "id": datasets.Value("string"),
119
+ "offsets": [[datasets.Value("int32")]],
120
+ "text": [datasets.Value("string")],
121
+ "type": datasets.Value("string"),
122
+ "normalized": [
123
+ {
124
+ "db_name": datasets.Value("string"),
125
+ "db_id": datasets.Value("string"),
126
+ }
127
+ ],
128
+ }
129
+ ],
130
+ "relations": [
131
+ {
132
+ "id": datasets.Value("string"),
133
+ "type": datasets.Value("string"),
134
+ "arg1_id": datasets.Value("string"),
135
+ "arg2_id": datasets.Value("string"),
136
+ "normalized": [
137
+ {
138
+ "db_name": datasets.Value("string"),
139
+ "db_id": datasets.Value("string"),
140
+ }
141
+ ],
142
+ }
143
+ ],
144
+ }
145
+ )
146
+ elif self.config.schema == "bigbio_kb":
147
+ features = kb_features
148
+
149
+ return datasets.DatasetInfo(
150
+ description=_DESCRIPTION,
151
+ features=features,
152
+ homepage=_HOMEPAGE,
153
+ license=str(_LICENSE),
154
+ citation=_CITATION,
155
+ )
156
+
157
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
158
+ """Returns SplitGenerators."""
159
+ urls = _URLS[_DATASETNAME]
160
+ data_dir = dl_manager.download_and_extract(urls)
161
+ return [
162
+ datasets.SplitGenerator(
163
+ name=datasets.Split.TRAIN,
164
+ gen_kwargs={
165
+ "filepath": os.path.join(
166
+ data_dir, "ppi-dataset-master/csv_output/BioInfer-train.xml"
167
+ ),
168
+ "split": "train",
169
+ },
170
+ ),
171
+ datasets.SplitGenerator(
172
+ name=datasets.Split.TEST,
173
+ gen_kwargs={
174
+ "filepath": os.path.join(
175
+ data_dir, "ppi-dataset-master/csv_output/BioInfer-test.xml"
176
+ ),
177
+ "split": "test",
178
+ },
179
+ ),
180
+ ]
181
+
182
+ def _generate_examples(self, filepath, split: str) -> Tuple[int, Dict]:
183
+ """Yields examples as (key, example) tuples."""
184
+ tree = ET.parse(filepath)
185
+ root = tree.getroot()
186
+ if self.config.schema == "source":
187
+ for guid, sentence in enumerate(root.iter("sentence")):
188
+ example = self._create_example(sentence)
189
+ example["text"] = sentence.attrib["text"]
190
+ example["type"] = "Sentence"
191
+ yield guid, example
192
+
193
+ elif self.config.schema == "bigbio_kb":
194
+ for guid, sentence in enumerate(root.iter("sentence")):
195
+ example = self._create_example(sentence)
196
+ example["passages"] = [
197
+ {
198
+ "id": f"{sentence.attrib['id']}__text",
199
+ "type": "Sentence",
200
+ "text": [sentence.attrib["text"]],
201
+ "offsets": [(0, len(sentence.attrib["text"]))],
202
+ }
203
+ ]
204
+ example["events"] = []
205
+ example["coreferences"] = []
206
+ example["id"] = guid
207
+ yield guid, example
208
+
209
+ def _create_example(self, sentence):
210
+ example = {}
211
+ example["document_id"] = sentence.attrib["id"]
212
+ example["entities"] = []
213
+ example["relations"] = []
214
+ for tag in sentence:
215
+ if tag.tag == "entity":
216
+ example["entities"].append(self._add_entity(tag))
217
+ elif tag.tag == "interaction":
218
+ example["relations"].append(self._add_interaction(tag))
219
+ else:
220
+ raise ValueError(f"unknown tags: {tag.tag}")
221
+ return example
222
+
223
+ @staticmethod
224
+ def _add_entity(entity):
225
+ offsets = [
226
+ [int(o) for o in offset.split("-")]
227
+ for offset in entity.attrib["charOffset"].split(",")
228
+ ]
229
+ # For multiple offsets, split entity text accordingly
230
+ if len(offsets) > 1:
231
+ text = []
232
+ i = 0
233
+ for start, end in offsets:
234
+ chunk_len = end - start
235
+ text.append(entity.attrib["text"][i : chunk_len + i])
236
+ i += chunk_len
237
+ while (
238
+ i < len(entity.attrib["text"]) and entity.attrib["text"][i] == " "
239
+ ):
240
+ i += 1
241
+ else:
242
+ text = [entity.attrib["text"]]
243
+ return {
244
+ "id": entity.attrib["id"],
245
+ "offsets": offsets,
246
+ "text": text,
247
+ "type": entity.attrib["type"],
248
+ "normalized": {},
249
+ }
250
+
251
+ @staticmethod
252
+ def _add_interaction(interaction):
253
+ return {
254
+ "id": interaction.attrib["id"],
255
+ "type": interaction.attrib["type"],
256
+ "arg1_id": interaction.attrib["e1"],
257
+ "arg2_id": interaction.attrib["e2"],
258
+ "normalized": {},
259
+ }