gabrielaltay commited on
Commit
6cac892
1 Parent(s): a2d8ede

upload hubscripts/bionlp_shared_task_2009_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. bionlp_shared_task_2009.py +231 -0
bionlp_shared_task_2009.py ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+
17
+ from pathlib import Path
18
+ from typing import Dict, List
19
+
20
+ import datasets
21
+
22
+ from .bigbiohub import kb_features
23
+ from .bigbiohub import BigBioConfig
24
+ from .bigbiohub import Tasks
25
+
26
+ _LANGUAGES = ['English']
27
+ _PUBMED = True
28
+ _LOCAL = False
29
+ _CITATION = """\
30
+ @inproceedings{kim-etal-2009-overview,
31
+ title = "Overview of {B}io{NLP}{'}09 Shared Task on Event Extraction",
32
+ author = "Kim, Jin-Dong and
33
+ Ohta, Tomoko and
34
+ Pyysalo, Sampo and
35
+ Kano, Yoshinobu and
36
+ Tsujii, Jun{'}ichi",
37
+ booktitle = "Proceedings of the {B}io{NLP} 2009 Workshop Companion Volume for Shared Task",
38
+ month = jun,
39
+ year = "2009",
40
+ address = "Boulder, Colorado",
41
+ publisher = "Association for Computational Linguistics",
42
+ url = "https://aclanthology.org/W09-1401",
43
+ pages = "1--9",
44
+ }
45
+ """
46
+
47
+ _DATASETNAME = "bionlp_shared_task_2009"
48
+ _DISPLAYNAME = "BioNLP 2009"
49
+
50
+ _DESCRIPTION = """\
51
+ The BioNLP Shared Task 2009 was organized by GENIA Project and its corpora were curated based
52
+ on the annotations of the publicly available GENIA Event corpus and an unreleased (blind) section
53
+ of the GENIA Event corpus annotations, used for evaluation.
54
+ """
55
+
56
+ _HOMEPAGE = "http://www.geniaproject.org/shared-tasks/bionlp-shared-task-2009"
57
+
58
+ _LICENSE = 'GENIA Project License for Annotated Corpora'
59
+
60
+ _URL_BASE = "http://www.nactem.ac.uk/GENIA/current/Shared-tasks/BioNLP-ST-2009/"
61
+ _URLS = {
62
+ _DATASETNAME: {
63
+ "train": _URL_BASE + "bionlp09_shared_task_training_data_rev2.tar.gz",
64
+ "test": _URL_BASE
65
+ + "bionlp09_shared_task_test_data_without_gold_annotation.tar.gz",
66
+ "dev": _URL_BASE + "bionlp09_shared_task_development_data_rev1.tar.gz",
67
+ },
68
+ }
69
+
70
+ _SUPPORTED_TASKS = [
71
+ Tasks.NAMED_ENTITY_RECOGNITION,
72
+ Tasks.EVENT_EXTRACTION,
73
+ Tasks.COREFERENCE_RESOLUTION,
74
+ ]
75
+
76
+ _SOURCE_VERSION = "1.0.0"
77
+
78
+ _BIGBIO_VERSION = "1.0.0"
79
+
80
+ # https://2011.bionlp-st.org/bionlp-shared-task-2011/genia-event-extraction-genia
81
+
82
+
83
+ class BioNLPSharedTask2009(datasets.GeneratorBasedBuilder):
84
+ """TODO: Short description of my dataset."""
85
+
86
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
87
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
88
+
89
+ BUILDER_CONFIGS = [
90
+ BigBioConfig(
91
+ name="bionlp_shared_task_2009_source",
92
+ version=SOURCE_VERSION,
93
+ description="bionlp_shared_task_2009 source schema",
94
+ schema="source",
95
+ subset_id="bionlp_shared_task_2009",
96
+ ),
97
+ BigBioConfig(
98
+ name="bionlp_shared_task_2009_bigbio_kb",
99
+ version=BIGBIO_VERSION,
100
+ description="bionlp_shared_task_2009 BigBio schema",
101
+ schema="bigbio_kb",
102
+ subset_id="bionlp_shared_task_2009",
103
+ ),
104
+ ]
105
+
106
+ DEFAULT_CONFIG_NAME = "bionlp_shared_task_2009_source"
107
+
108
+ _ROLE_MAPPING = {
109
+ "Theme2": "Theme",
110
+ "Theme3": "Theme",
111
+ "Theme4": "Theme",
112
+ "Site2": "Site",
113
+ }
114
+
115
+ def _info(self) -> datasets.DatasetInfo:
116
+
117
+ if self.config.schema == "source":
118
+ features = datasets.Features(
119
+ {
120
+ "document_id": datasets.Value("string"),
121
+ "text": datasets.Value("string"),
122
+ "text_bound_annotations": [
123
+ {
124
+ "id": datasets.Value("string"),
125
+ "offsets": [[datasets.Value("int64")]],
126
+ "text": [datasets.Value("string")],
127
+ "type": datasets.Value("string"),
128
+ }
129
+ ],
130
+ "events": [
131
+ {
132
+ "arguments": [
133
+ {
134
+ "ref_id": datasets.Value("string"),
135
+ "role": datasets.Value("string"),
136
+ }
137
+ ],
138
+ "id": datasets.Value("string"),
139
+ "trigger": datasets.Value("string"),
140
+ "type": datasets.Value("string"),
141
+ }
142
+ ],
143
+ "relations": [
144
+ {
145
+ "id": datasets.Value("string"),
146
+ "type": datasets.Value("string"),
147
+ "arg1_id": datasets.Value("string"),
148
+ "arg2_id": datasets.Value("string"),
149
+ "normalized": [
150
+ {
151
+ "db_name": datasets.Value("string"),
152
+ "db_id": datasets.Value("string"),
153
+ }
154
+ ],
155
+ }
156
+ ],
157
+ "equivalences": [datasets.Value("string")],
158
+ "attributes": [datasets.Value("string")],
159
+ "normalizations": [datasets.Value("string")],
160
+ }
161
+ )
162
+
163
+ elif self.config.schema == "bigbio_kb":
164
+ features = kb_features
165
+
166
+ return datasets.DatasetInfo(
167
+ description=_DESCRIPTION,
168
+ features=features,
169
+ homepage=_HOMEPAGE,
170
+ license=str(_LICENSE),
171
+ citation=_CITATION,
172
+ )
173
+
174
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
175
+ urls = _URLS[_DATASETNAME]
176
+ data_dir_train = dl_manager.download_and_extract(urls["train"])
177
+ data_dir_test = dl_manager.download_and_extract(urls["test"])
178
+ data_dir_dev = dl_manager.download_and_extract(urls["dev"])
179
+
180
+ return [
181
+ datasets.SplitGenerator(
182
+ name=datasets.Split.TRAIN,
183
+ gen_kwargs={
184
+ "filepath": data_dir_train,
185
+ "split": "train",
186
+ },
187
+ ),
188
+ datasets.SplitGenerator(
189
+ name=datasets.Split.TEST,
190
+ gen_kwargs={
191
+ "filepath": data_dir_test,
192
+ "split": "test",
193
+ },
194
+ ),
195
+ datasets.SplitGenerator(
196
+ name=datasets.Split.VALIDATION,
197
+ gen_kwargs={
198
+ "filepath": data_dir_dev,
199
+ "split": "dev",
200
+ },
201
+ ),
202
+ ]
203
+
204
+ def _standardize_arguments_roles(self, kb_example: Dict) -> Dict:
205
+
206
+ for event in kb_example["events"]:
207
+ for argument in event["arguments"]:
208
+ role = argument["role"]
209
+ argument["role"] = self._ROLE_MAPPING.get(role, role)
210
+
211
+ return kb_example
212
+
213
+ def _generate_examples(self, filepath, split):
214
+
215
+ filepath = Path(filepath)
216
+ txt_files: List[Path] = [
217
+ file for file in filepath.iterdir() if file.suffix == ".txt"
218
+ ]
219
+
220
+ if self.config.schema == "source":
221
+ for i, file in enumerate(txt_files):
222
+ brat_content = parse_brat_file(file)
223
+ yield i, brat_content
224
+
225
+ elif self.config.schema == "bigbio_kb":
226
+ for i, file in enumerate(txt_files):
227
+ brat_content = parse_brat_file(file)
228
+ kb_example = brat_parse_to_bigbio_kb(brat_content)
229
+ kb_example = self._standardize_arguments_roles(kb_example)
230
+ kb_example["id"] = kb_example["document_id"]
231
+ yield i, kb_example