Datasets:

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
gabrielaltay commited on
Commit
eedd3bf
·
1 Parent(s): 419e29a

upload hubscripts/bionlp_st_2013_ge_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. bionlp_st_2013_ge.py +238 -0
bionlp_st_2013_ge.py ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from pathlib import Path
17
+ from typing import List
18
+
19
+ import datasets
20
+
21
+ from .bigbiohub import kb_features
22
+ from .bigbiohub import BigBioConfig
23
+ from .bigbiohub import Tasks
24
+
25
+ _DATASETNAME = "bionlp_st_2013_ge"
26
+ _DISPLAYNAME = "BioNLP 2013 GE"
27
+
28
+ _SOURCE_VIEW_NAME = "source"
29
+ _UNIFIED_VIEW_NAME = "bigbio"
30
+
31
+ _LANGUAGES = ['English']
32
+ _PUBMED = True
33
+ _LOCAL = False
34
+ _CITATION = """\
35
+ @inproceedings{kim-etal-2013-genia,
36
+ title = "The {G}enia Event Extraction Shared Task, 2013 Edition - Overview",
37
+ author = "Kim, Jin-Dong and
38
+ Wang, Yue and
39
+ Yasunori, Yamamoto",
40
+ booktitle = "Proceedings of the {B}io{NLP} Shared Task 2013 Workshop",
41
+ month = aug,
42
+ year = "2013",
43
+ address = "Sofia, Bulgaria",
44
+ publisher = "Association for Computational Linguistics",
45
+ url = "https://aclanthology.org/W13-2002",
46
+ pages = "8--15",
47
+ }
48
+ """
49
+
50
+ _DESCRIPTION = """\
51
+ The BioNLP-ST GE task has been promoting development of fine-grained
52
+ information extraction (IE) from biomedical
53
+ documents, since 2009. Particularly, it has focused on the domain of
54
+ NFkB as a model domain of Biomedical IE
55
+ """
56
+
57
+ _HOMEPAGE = "https://github.com/openbiocorpora/bionlp-st-2013-ge"
58
+
59
+ _LICENSE = 'GENIA Project License for Annotated Corpora'
60
+
61
+ _URLs = {
62
+ "source": "https://github.com/openbiocorpora/bionlp-st-2013-ge/archive/refs/heads/master.zip",
63
+ "bigbio_kb": "https://github.com/openbiocorpora/bionlp-st-2013-ge/archive/refs/heads/master.zip",
64
+ }
65
+
66
+ _SUPPORTED_TASKS = [
67
+ Tasks.EVENT_EXTRACTION,
68
+ Tasks.NAMED_ENTITY_RECOGNITION,
69
+ Tasks.RELATION_EXTRACTION,
70
+ Tasks.COREFERENCE_RESOLUTION,
71
+ ]
72
+ _SOURCE_VERSION = "1.0.0"
73
+ _BIGBIO_VERSION = "1.0.0"
74
+
75
+
76
+ class bionlp_st_2013_ge(datasets.GeneratorBasedBuilder):
77
+ """The BioNLP-ST GE task has been promoting development of fine-grained information extraction (IE) from biomedical
78
+ documents, since 2009. Particularly, it has focused on the domain of NFkB as a model domain of Biomedical IE"""
79
+
80
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
81
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
82
+
83
+ BUILDER_CONFIGS = [
84
+ BigBioConfig(
85
+ name="bionlp_st_2013_ge_source",
86
+ version=SOURCE_VERSION,
87
+ description="bionlp_st_2013_ge source schema",
88
+ schema="source",
89
+ subset_id="bionlp_st_2013_ge",
90
+ ),
91
+ BigBioConfig(
92
+ name="bionlp_st_2013_ge_bigbio_kb",
93
+ version=BIGBIO_VERSION,
94
+ description="bionlp_st_2013_ge BigBio schema",
95
+ schema="bigbio_kb",
96
+ subset_id="bionlp_st_2013_ge",
97
+ ),
98
+ ]
99
+
100
+ DEFAULT_CONFIG_NAME = "bionlp_st_2013_ge_source"
101
+
102
+ def _info(self):
103
+ """
104
+ - `features` defines the schema of the parsed data set. The schema depends on the
105
+ chosen `config`: If it is `_SOURCE_VIEW_NAME` the schema is the schema of the
106
+ original data. If `config` is `_UNIFIED_VIEW_NAME`, then the schema is the
107
+ canonical KB-task schema defined in `biomedical/schemas/kb.py`.
108
+ """
109
+ if self.config.schema == "source":
110
+ features = datasets.Features(
111
+ {
112
+ "id": datasets.Value("string"),
113
+ "document_id": datasets.Value("string"),
114
+ "text": datasets.Value("string"),
115
+ "text_bound_annotations": [ # T line in brat, e.g. type or event trigger
116
+ {
117
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
118
+ "text": datasets.Sequence(datasets.Value("string")),
119
+ "type": datasets.Value("string"),
120
+ "id": datasets.Value("string"),
121
+ }
122
+ ],
123
+ "events": [ # E line in brat
124
+ {
125
+ "trigger": datasets.Value(
126
+ "string"
127
+ ), # refers to the text_bound_annotation of the trigger,
128
+ "id": datasets.Value("string"),
129
+ "type": datasets.Value("string"),
130
+ "arguments": datasets.Sequence(
131
+ {
132
+ "role": datasets.Value("string"),
133
+ "ref_id": datasets.Value("string"),
134
+ }
135
+ ),
136
+ }
137
+ ],
138
+ "relations": [ # R line in brat
139
+ {
140
+ "id": datasets.Value("string"),
141
+ "head": {
142
+ "ref_id": datasets.Value("string"),
143
+ "role": datasets.Value("string"),
144
+ },
145
+ "tail": {
146
+ "ref_id": datasets.Value("string"),
147
+ "role": datasets.Value("string"),
148
+ },
149
+ "type": datasets.Value("string"),
150
+ }
151
+ ],
152
+ "equivalences": [ # Equiv line in brat
153
+ {
154
+ "id": datasets.Value("string"),
155
+ "ref_ids": datasets.Sequence(datasets.Value("string")),
156
+ }
157
+ ],
158
+ "attributes": [ # M or A lines in brat
159
+ {
160
+ "id": datasets.Value("string"),
161
+ "type": datasets.Value("string"),
162
+ "ref_id": datasets.Value("string"),
163
+ "value": datasets.Value("string"),
164
+ }
165
+ ],
166
+ "normalizations": [ # N lines in brat
167
+ {
168
+ "id": datasets.Value("string"),
169
+ "type": datasets.Value("string"),
170
+ "ref_id": datasets.Value("string"),
171
+ "resource_name": datasets.Value(
172
+ "string"
173
+ ), # Name of the resource, e.g. "Wikipedia"
174
+ "cuid": datasets.Value(
175
+ "string"
176
+ ), # ID in the resource, e.g. 534366
177
+ "text": datasets.Value(
178
+ "string"
179
+ ), # Human readable description/name of the entity, e.g. "Barack Obama"
180
+ }
181
+ ],
182
+ },
183
+ )
184
+ elif self.config.schema == "bigbio_kb":
185
+ features = kb_features
186
+
187
+ return datasets.DatasetInfo(
188
+ description=_DESCRIPTION,
189
+ features=features,
190
+ homepage=_HOMEPAGE,
191
+ license=str(_LICENSE),
192
+ citation=_CITATION,
193
+ )
194
+
195
+ def _split_generators(
196
+ self, dl_manager: datasets.DownloadManager
197
+ ) -> List[datasets.SplitGenerator]:
198
+
199
+ my_urls = _URLs[self.config.schema]
200
+ data_dir = Path(dl_manager.download_and_extract(my_urls))
201
+ data_files = {
202
+ "train": data_dir / f"bionlp-st-2013-ge-master" / "original-data" / "train",
203
+ "dev": data_dir / f"bionlp-st-2013-ge-master" / "original-data" / "devel",
204
+ "test": data_dir / f"bionlp-st-2013-ge-master" / "original-data" / "test",
205
+ }
206
+
207
+ return [
208
+ datasets.SplitGenerator(
209
+ name=datasets.Split.TRAIN,
210
+ gen_kwargs={"data_files": data_files["train"]},
211
+ ),
212
+ datasets.SplitGenerator(
213
+ name=datasets.Split.VALIDATION,
214
+ gen_kwargs={"data_files": data_files["dev"]},
215
+ ),
216
+ datasets.SplitGenerator(
217
+ name=datasets.Split.TEST,
218
+ gen_kwargs={"data_files": data_files["test"]},
219
+ ),
220
+ ]
221
+
222
+ def _generate_examples(self, data_files: Path):
223
+ if self.config.schema == "source":
224
+ txt_files = list(data_files.glob("*txt"))
225
+ for guid, txt_file in enumerate(txt_files):
226
+ example = parsing.parse_brat_file(txt_file)
227
+ example["id"] = str(guid)
228
+ yield guid, example
229
+ elif self.config.schema == "bigbio_kb":
230
+ txt_files = list(data_files.glob("*txt"))
231
+ for guid, txt_file in enumerate(txt_files):
232
+ example = parsing.brat_parse_to_bigbio_kb(
233
+ parsing.parse_brat_file(txt_file)
234
+ )
235
+ example["id"] = str(guid)
236
+ yield guid, example
237
+ else:
238
+ raise ValueError(f"Invalid config: {self.config.name}")