gabrielaltay commited on
Commit
6918fe9
1 Parent(s): d1c9c6b

upload hubscripts/medal_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. medal.py +245 -0
medal.py ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ The Repository for Medical Dataset for Abbreviation Disambiguation for Natural Language Understanding (MeDAL) is
18
+ a large medical text dataset curated for abbreviation disambiguation, designed for natural language understanding
19
+ pre-training in the medical domain. This script loads the MeDAL dataset in the bigbio KB schema and/or source schema.
20
+ """
21
+
22
+ import pandas as pd
23
+ from typing import Dict, List, Tuple
24
+
25
+ import datasets
26
+
27
+ from .bigbiohub import kb_features
28
+ from .bigbiohub import BigBioConfig
29
+ from .bigbiohub import Tasks
30
+
31
+ logger = datasets.logging.get_logger(__name__)
32
+
33
+ _LANGUAGES = ['English']
34
+ _PUBMED = True
35
+ _LOCAL = False
36
+ _CITATION = """\
37
+ @inproceedings{,
38
+ title = {MeDAL\: Medical Abbreviation Disambiguation Dataset for Natural Language Understanding Pretraining},
39
+ author = {Wen, Zhi and Lu, Xing Han and Reddy, Siva},
40
+ booktitle = {Proceedings of the 3rd Clinical Natural Language Processing Workshop},
41
+ month = {Nov},
42
+ year = {2020},
43
+ address = {Online},
44
+ publisher = {Association for Computational Linguistics},
45
+ url = {https://www.aclweb.org/anthology/2020.clinicalnlp-1.15},
46
+ pages = {130--135},
47
+ }
48
+ """
49
+
50
+ _DATASETNAME = "medal"
51
+ _DISPLAYNAME = "MeDAL"
52
+
53
+ _DESCRIPTION = """\
54
+ The Repository for Medical Dataset for Abbreviation Disambiguation for Natural Language Understanding (MeDAL) is
55
+ a large medical text dataset curated for abbreviation disambiguation, designed for natural language understanding
56
+ pre-training in the medical domain.
57
+ """
58
+
59
+ _HOMEPAGE = "https://github.com/BruceWen120/medal"
60
+
61
+ _LICENSE = 'National Library of Medicine Terms and Conditions'
62
+
63
+ _URL = "https://zenodo.org/record/4482922/files/"
64
+ _URLS = {
65
+ "train": _URL + "train.csv",
66
+ "test": _URL + "test.csv",
67
+ "valid": _URL + "valid.csv",
68
+ }
69
+
70
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_DISAMBIGUATION]
71
+
72
+ _SOURCE_VERSION = "1.0.0"
73
+
74
+ _BIGBIO_VERSION = "1.0.0"
75
+
76
+
77
+ class MedalDataset(datasets.GeneratorBasedBuilder):
78
+ """The Repository for Medical Dataset for Abbreviation Disambiguation for Natural Language Understanding (MeDAL) is
79
+ a large medical text dataset curated for abbreviation disambiguation, designed for natural language understanding
80
+ pre-training in the medical domain."""
81
+
82
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
83
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
84
+
85
+ BUILDER_CONFIGS = [
86
+ BigBioConfig(
87
+ name="medal_source",
88
+ version=SOURCE_VERSION,
89
+ description="MeDAL source schema",
90
+ schema="source",
91
+ subset_id="medal",
92
+ ),
93
+ BigBioConfig(
94
+ name="medal_bigbio_kb",
95
+ version=BIGBIO_VERSION,
96
+ description="MeDAL BigBio schema",
97
+ schema="bigbio_kb",
98
+ subset_id="medal",
99
+ ),
100
+ ]
101
+
102
+ DEFAULT_CONFIG_NAME = "medal_source"
103
+
104
+ def _info(self) -> datasets.DatasetInfo:
105
+
106
+ if self.config.schema == "source":
107
+ features = datasets.Features(
108
+ {
109
+ "abstract_id": datasets.Value("int32"),
110
+ "text": datasets.Value("string"),
111
+ "location": datasets.Sequence(datasets.Value("int32")),
112
+ "label": datasets.Sequence(datasets.Value("string")),
113
+ }
114
+ )
115
+
116
+ elif self.config.schema == "bigbio_kb":
117
+ features = kb_features
118
+
119
+ return datasets.DatasetInfo(
120
+ description=_DESCRIPTION,
121
+ features=features,
122
+ homepage=_HOMEPAGE,
123
+ license=str(_LICENSE),
124
+ citation=_CITATION,
125
+ )
126
+
127
+ def _split_generators(
128
+ self, dl_manager: datasets.DownloadManager
129
+ ) -> List[datasets.SplitGenerator]:
130
+ """Returns SplitGenerators."""
131
+
132
+ urls = _URLS
133
+ data_dir = dl_manager.download_and_extract(urls)
134
+
135
+ urls_to_dl = _URLS
136
+ try:
137
+ dl_dir = dl_manager.download_and_extract(urls_to_dl)
138
+ except Exception:
139
+ logger.warning(
140
+ "This dataset is downloaded through Zenodo which is flaky. If this download failed try a few times before reporting an issue"
141
+ )
142
+ raise
143
+
144
+ return [
145
+ datasets.SplitGenerator(
146
+ name=datasets.Split.TRAIN,
147
+ # These kwargs will be passed to _generate_examples
148
+ gen_kwargs={"filepath": dl_dir["train"], "split": "train"},
149
+ ),
150
+ datasets.SplitGenerator(
151
+ name=datasets.Split.TEST,
152
+ # These kwargs will be passed to _generate_examples
153
+ gen_kwargs={"filepath": dl_dir["test"], "split": "test"},
154
+ ),
155
+ datasets.SplitGenerator(
156
+ name=datasets.Split.VALIDATION,
157
+ # These kwargs will be passed to _generate_examples
158
+ gen_kwargs={"filepath": dl_dir["valid"], "split": "val"},
159
+ ),
160
+ ]
161
+
162
+ def _generate_offsets(self, text, location):
163
+ """Generate offsets from text and word location.
164
+
165
+ Parameters
166
+ ----------
167
+ text : text
168
+ Abstract text
169
+ location : int
170
+ location of abbreviation in text, indexed by number of words in abstract
171
+
172
+ Returns
173
+ -------
174
+ dict
175
+ "word": str,
176
+ "offsets": tuple (int, int)
177
+ """
178
+ words = text.split(" ")
179
+ word = words[location]
180
+ offset_start = sum(len(word) for word in words[0:location]) + location
181
+ offset_end = offset_start + len(word)
182
+
183
+ # return word and offsets
184
+ return {"word": word, "offsets": (offset_start, offset_end)}
185
+
186
+ def _generate_examples(self, filepath, split: str) -> Tuple[int, Dict]:
187
+ """Yields examples as (key, example) tuples."""
188
+
189
+ with open(filepath, encoding="utf-8") as file:
190
+ data = pd.read_csv(
191
+ file,
192
+ sep=",",
193
+ dtype={"ABSTRACT_ID": str, "TEXT": str, "LOCATION": int, "LABEL": str},
194
+ )
195
+
196
+ if self.config.schema == "source":
197
+ for id_, row in enumerate(data.itertuples()):
198
+ yield id_, {
199
+ "abstract_id": int(row.ABSTRACT_ID),
200
+ "text": row.TEXT,
201
+ "location": [row.LOCATION],
202
+ "label": [row.LABEL],
203
+ }
204
+ elif self.config.schema == "bigbio_kb":
205
+ uid = 0 # global unique id
206
+ for id_, row in enumerate(data.itertuples()):
207
+ word_offsets = self._generate_offsets(row.TEXT, row.LOCATION)
208
+ example = {
209
+ "id": str(uid),
210
+ "document_id": row.ABSTRACT_ID,
211
+ "passages": [],
212
+ "entities": [],
213
+ "relations": [],
214
+ "events": [],
215
+ "coreferences": [],
216
+ }
217
+ uid += 1
218
+
219
+ example["passages"].append(
220
+ {
221
+ "id": str(uid),
222
+ "type": "PubMed abstract",
223
+ "text": [row.TEXT],
224
+ "offsets": [(0, len(row.TEXT))],
225
+ }
226
+ )
227
+
228
+ uid += 1
229
+
230
+ example["entities"].append(
231
+ {
232
+ "id": str(uid),
233
+ "type": "abbreviation",
234
+ "text": [word_offsets["word"]],
235
+ "offsets": [word_offsets["offsets"]],
236
+ "normalized": [
237
+ {
238
+ "db_name": "medal",
239
+ "db_id": row.LABEL,
240
+ }
241
+ ],
242
+ }
243
+ )
244
+ uid += 1
245
+ yield id_, example