Datasets:

Languages:
English
ArXiv:
License:
gabrielaltay commited on
Commit
f987ba2
1 Parent(s): 35ac26d

upload hubscripts/biored_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. biored.py +323 -0
biored.py ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ Relation Extraction corpus with multiple entity types (e.g., gene/protein,
18
+ disease, chemical) and relation pairs (e.g., gene-disease; chemical-chemical),
19
+ on a set of 600 PubMed articles
20
+ """
21
+
22
+ import itertools
23
+ import os
24
+ from typing import Dict, List, Tuple
25
+
26
+ import datasets
27
+ from bioc import pubtator
28
+
29
+ from .bigbiohub import kb_features
30
+ from .bigbiohub import BigBioConfig
31
+ from .bigbiohub import Tasks
32
+
33
+ _LANGUAGES = ['English']
34
+ _PUBMED = True
35
+ _LOCAL = False
36
+ _CITATION = """\
37
+ @article{DBLP:journals/corr/abs-2204-04263,
38
+ author = {Ling Luo and
39
+ Po{-}Ting Lai and
40
+ Chih{-}Hsuan Wei and
41
+ Cecilia N. Arighi and
42
+ Zhiyong Lu},
43
+ title = {BioRED: {A} Comprehensive Biomedical Relation Extraction Dataset},
44
+ journal = {CoRR},
45
+ volume = {abs/2204.04263},
46
+ year = {2022},
47
+ url = {https://doi.org/10.48550/arXiv.2204.04263},
48
+ doi = {10.48550/arXiv.2204.04263},
49
+ eprinttype = {arXiv},
50
+ eprint = {2204.04263},
51
+ timestamp = {Wed, 11 May 2022 15:24:37 +0200},
52
+ biburl = {https://dblp.org/rec/journals/corr/abs-2204-04263.bib},
53
+ bibsource = {dblp computer science bibliography, https://dblp.org}
54
+ }
55
+ """
56
+
57
+ _DATASETNAME = "biored"
58
+ _DISPLAYNAME = "BioRED"
59
+
60
+ _DESCRIPTION = """\
61
+ Relation Extraction corpus with multiple entity types (e.g., gene/protein,
62
+ disease, chemical) and relation pairs (e.g., gene-disease; chemical-chemical),
63
+ on a set of 600 PubMed articles
64
+ """
65
+
66
+ _HOMEPAGE = "https://ftp.ncbi.nlm.nih.gov/pub/lu/BioRED/"
67
+
68
+ _LICENSE = 'License information unavailable'
69
+
70
+ _URLS = {
71
+ _DATASETNAME: "https://ftp.ncbi.nlm.nih.gov/pub/lu/BioRED/BIORED.zip",
72
+ }
73
+
74
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION, Tasks.RELATION_EXTRACTION]
75
+
76
+ _SOURCE_VERSION = "1.0.0"
77
+
78
+ _BIGBIO_VERSION = "1.0.0"
79
+
80
+ logger = datasets.utils.logging.get_logger(__name__)
81
+
82
+
83
+ class BioredDataset(datasets.GeneratorBasedBuilder):
84
+ """Relation Extraction corpus with multiple entity types (e.g., gene/protein, disease, chemical) and relation pairs (e.g., gene-disease; chemical-chemical), on a set of 600 PubMed articles"""
85
+
86
+ # For bigbio_kb, this dataset uses a naming convention as
87
+ # uid_[title/abstract/relation/entity_id]_[entity/relation_uid]
88
+
89
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
90
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
91
+
92
+ BUILDER_CONFIGS = [
93
+ BigBioConfig(
94
+ name=_DATASETNAME + "_source",
95
+ version=SOURCE_VERSION,
96
+ description=_DATASETNAME + " source schema",
97
+ schema="source",
98
+ subset_id=_DATASETNAME,
99
+ ),
100
+ BigBioConfig(
101
+ name=_DATASETNAME + "_bigbio_kb",
102
+ version=BIGBIO_VERSION,
103
+ description=_DATASETNAME + " BigBio schema",
104
+ schema="bigbio_kb",
105
+ subset_id=_DATASETNAME,
106
+ ),
107
+ ]
108
+
109
+ DEFAULT_CONFIG_NAME = _DATASETNAME + "_source"
110
+
111
+ def _info(self) -> datasets.DatasetInfo:
112
+
113
+ if self.config.schema == "source":
114
+
115
+ features = datasets.Features(
116
+ {
117
+ "pmid": datasets.Value("string"),
118
+ "passages": [
119
+ {
120
+ "type": datasets.Value("string"),
121
+ "text": datasets.Sequence(datasets.Value("string")),
122
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
123
+ }
124
+ ],
125
+ "entities": [
126
+ {
127
+ "text": datasets.Sequence(datasets.Value("string")),
128
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
129
+ "concept_id": datasets.Value("string"),
130
+ "semantic_type_id": datasets.Sequence(
131
+ datasets.Value("string")
132
+ ),
133
+ }
134
+ ],
135
+ "relations": [
136
+ {
137
+ "novel": datasets.Value("string"),
138
+ "type": datasets.Value("string"),
139
+ "concept_1": datasets.Value("string"),
140
+ "concept_2": datasets.Value("string"),
141
+ }
142
+ ],
143
+ }
144
+ )
145
+
146
+ elif self.config.schema == "bigbio_kb":
147
+ features = kb_features
148
+
149
+ return datasets.DatasetInfo(
150
+ description=_DESCRIPTION,
151
+ features=features,
152
+ homepage=_HOMEPAGE,
153
+ license=str(_LICENSE),
154
+ citation=_CITATION,
155
+ )
156
+
157
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
158
+ """Returns SplitGenerators."""
159
+
160
+ urls = _URLS[_DATASETNAME]
161
+ data_dir = dl_manager.download_and_extract(urls)
162
+
163
+ return [
164
+ datasets.SplitGenerator(
165
+ name=datasets.Split.TRAIN,
166
+ # Whatever you put in gen_kwargs will be passed to _generate_examples
167
+ gen_kwargs={
168
+ "filepath": os.path.join(data_dir, "BioRED", "Train.PubTator"),
169
+ "split": "train",
170
+ },
171
+ ),
172
+ datasets.SplitGenerator(
173
+ name=datasets.Split.TEST,
174
+ gen_kwargs={
175
+ "filepath": os.path.join(data_dir, "BioRED", "Test.PubTator"),
176
+ "split": "test",
177
+ },
178
+ ),
179
+ datasets.SplitGenerator(
180
+ name=datasets.Split.VALIDATION,
181
+ gen_kwargs={
182
+ "filepath": os.path.join(data_dir, "BioRED", "Dev.PubTator"),
183
+ "split": "dev",
184
+ },
185
+ ),
186
+ ]
187
+
188
+ def _generate_examples(self, filepath, split: str) -> Tuple[int, Dict]:
189
+ """Yields examples as (key, example) tuples."""
190
+
191
+ if self.config.schema == "source":
192
+ with open(filepath, "r", encoding="utf8") as fstream:
193
+ for raw_document in self.generate_raw_docs(fstream):
194
+ document = self.parse_raw_doc(raw_document)
195
+ yield document["pmid"], document
196
+
197
+ elif self.config.schema == "bigbio_kb":
198
+ with open(filepath, "r", encoding="utf8") as fstream:
199
+ uid = itertools.count(0)
200
+ for raw_document in self.generate_raw_docs(fstream):
201
+ entities_in_doc = dict()
202
+ document = self.parse_raw_doc(raw_document)
203
+ pmid = document.pop("pmid")
204
+ document["id"] = str(next(uid))
205
+ document["document_id"] = pmid
206
+ entities_ = []
207
+ relations_ = []
208
+ for entity in document["entities"]:
209
+ temp_id = document["id"] + "_" + str(entity["concept_id"])
210
+ curr_entity_count = entities_in_doc.get(temp_id, 0)
211
+ entities_.append(
212
+ {
213
+ "id": temp_id + "_" + str(curr_entity_count),
214
+ "type": entity["semantic_type_id"],
215
+ "text": entity["text"],
216
+ "normalized": [],
217
+ "offsets": entity["offsets"],
218
+ }
219
+ )
220
+ entities_in_doc[temp_id] = curr_entity_count + 1
221
+ rel_uid = itertools.count(0)
222
+ for relation in document["relations"]:
223
+ relations_.append(
224
+ {
225
+ "id": document["id"]
226
+ + "_relation_"
227
+ + str(next(rel_uid)),
228
+ "type": relation["type"],
229
+ "arg1_id": document["id"]
230
+ + "_"
231
+ + str(relation["concept_1"])
232
+ + "_0",
233
+ "arg2_id": document["id"]
234
+ + "_"
235
+ + str(relation["concept_2"])
236
+ + "_0",
237
+ "normalized": [],
238
+ }
239
+ )
240
+ for passage in document["passages"]:
241
+ passage["id"] = document["id"] + "_" + passage["type"]
242
+ document["entities"] = entities_
243
+ document["relations"] = relations_
244
+ document["events"] = []
245
+ document["coreferences"] = []
246
+ yield document["document_id"], document
247
+
248
+ def generate_raw_docs(self, fstream):
249
+ """
250
+ Given a filestream, this function yields documents from it
251
+ """
252
+ raw_document = []
253
+ for line in fstream:
254
+ if line.strip():
255
+ raw_document.append(line.strip())
256
+ elif raw_document:
257
+ yield raw_document
258
+ raw_document = []
259
+ if raw_document:
260
+ yield raw_document
261
+
262
+ def parse_raw_doc(self, raw_doc):
263
+ pmid, _, title = raw_doc[0].split("|")
264
+ pmid = int(pmid)
265
+ _, _, abstract = raw_doc[1].split("|")
266
+ passages = [
267
+ {"type": "title", "text": [title], "offsets": [[0, len(title)]]},
268
+ {
269
+ "type": "abstract",
270
+ "text": [abstract],
271
+ "offsets": [[len(title) + 1, len(title) + len(abstract) + 1]],
272
+ },
273
+ ]
274
+ entities = []
275
+ relations = []
276
+ for line in raw_doc[2:]:
277
+ mentions = line.split("\t")
278
+ (_pmid, _type_ind, *rest) = mentions
279
+ if _type_ind in [
280
+ "Positive_Correlation",
281
+ "Association",
282
+ "Negative_Correlation",
283
+ "Bind",
284
+ "Conversion",
285
+ "Cotreatment",
286
+ "Cause",
287
+ "Comparison",
288
+ "Drug_Interaction",
289
+ ]:
290
+ # Relations handled here
291
+ relation_type = _type_ind
292
+ concept_1, concept_2, novel = rest
293
+ relation = {
294
+ "type": relation_type,
295
+ "concept_1": concept_1,
296
+ "concept_2": concept_2,
297
+ "novel": novel,
298
+ }
299
+ relations.append(relation)
300
+ elif _type_ind.isnumeric():
301
+ # Entities handled here
302
+ start_idx = _type_ind
303
+ end_idx, mention, semantic_type_id, entity_ids = rest
304
+ entity = [
305
+ {
306
+ "offsets": [[int(start_idx), int(end_idx)]],
307
+ "text": [mention],
308
+ "semantic_type_id": semantic_type_id.split(","),
309
+ "concept_id": entity_id,
310
+ }
311
+ for entity_id in entity_ids.split(",")
312
+ ]
313
+ entities.extend(entity)
314
+ else:
315
+ logger.warn(
316
+ f"Skipping annotation in Document ID: {_pmid}. Unexpected format"
317
+ )
318
+ return {
319
+ "pmid": pmid,
320
+ "passages": passages,
321
+ "entities": entities,
322
+ "relations": relations,
323
+ }