Datasets:

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
gabrielaltay commited on
Commit
9275449
1 Parent(s): 8bc74fd

upload hubscripts/medhop_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. medhop.py +212 -0
medhop.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import json
17
+ import os
18
+
19
+ import datasets
20
+
21
+ from .bigbiohub import qa_features
22
+ from .bigbiohub import BigBioConfig
23
+ from .bigbiohub import Tasks
24
+
25
+ _LANGUAGES = ['English']
26
+ _PUBMED = True
27
+ _LOCAL = False
28
+ _CITATION = """\
29
+ @article{welbl-etal-2018-constructing,
30
+ title = Constructing Datasets for Multi-hop Reading Comprehension Across Documents,
31
+ author = Welbl, Johannes and Stenetorp, Pontus and Riedel, Sebastian,
32
+ journal = Transactions of the Association for Computational Linguistics,
33
+ volume = 6,
34
+ year = 2018,
35
+ address = Cambridge, MA,
36
+ publisher = MIT Press,
37
+ url = https://aclanthology.org/Q18-1021,
38
+ doi = 10.1162/tacl_a_00021,
39
+ pages = 287--302,
40
+ abstract = {
41
+ Most Reading Comprehension methods limit themselves to queries which
42
+ can be answered using a single sentence, paragraph, or document.
43
+ Enabling models to combine disjoint pieces of textual evidence would
44
+ extend the scope of machine comprehension methods, but currently no
45
+ resources exist to train and test this capability. We propose a novel
46
+ task to encourage the development of models for text understanding
47
+ across multiple documents and to investigate the limits of existing
48
+ methods. In our task, a model learns to seek and combine evidence
49
+ -- effectively performing multihop, alias multi-step, inference.
50
+ We devise a methodology to produce datasets for this task, given a
51
+ collection of query-answer pairs and thematically linked documents.
52
+ Two datasets from different domains are induced, and we identify
53
+ potential pitfalls and devise circumvention strategies. We evaluate
54
+ two previously proposed competitive models and find that one can
55
+ integrate information across documents. However, both models
56
+ struggle to select relevant information; and providing documents
57
+ guaranteed to be relevant greatly improves their performance. While
58
+ the models outperform several strong baselines, their best accuracy
59
+ reaches 54.5 % on an annotated test set, compared to human
60
+ performance at 85.0 %, leaving ample room for improvement.
61
+ }
62
+ """
63
+
64
+ _DESCRIPTION = """\
65
+ With the same format as WikiHop, this dataset is based on research paper
66
+ abstracts from PubMed, and the queries are about interactions between
67
+ pairs of drugs. The correct answer has to be inferred by combining
68
+ information from a chain of reactions of drugs and proteins.
69
+ """
70
+
71
+ _DATASETNAME = "medhop"
72
+ _DISPLAYNAME = "MedHop"
73
+
74
+ _HOMEPAGE = "http://qangaroo.cs.ucl.ac.uk/"
75
+
76
+ _LICENSE = 'Creative Commons Attribution Share Alike 3.0 Unported'
77
+
78
+ _BASE_GDRIVE = "https://drive.google.com/uc?export=download&confirm=yTib&id="
79
+
80
+ _URLs = {
81
+ "source": _BASE_GDRIVE + "1ytVZ4AhubFDOEL7o7XrIRIyhU8g9wvKA",
82
+ "bigbio_qa": _BASE_GDRIVE + "1ytVZ4AhubFDOEL7o7XrIRIyhU8g9wvKA",
83
+ }
84
+
85
+ _SUPPORTED_TASKS = [Tasks.QUESTION_ANSWERING]
86
+ _SOURCE_VERSION = "1.0.0"
87
+ _BIGBIO_VERSION = "1.0.0"
88
+
89
+
90
+ class MedHopDataset(datasets.GeneratorBasedBuilder):
91
+ """MedHop"""
92
+
93
+ DEFAULT_CONFIG_NAME = "medhop_source"
94
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
95
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
96
+
97
+ BUILDER_CONFIGS = [
98
+ BigBioConfig(
99
+ name="medhop_source",
100
+ version=SOURCE_VERSION,
101
+ description="MedHop source schema",
102
+ schema="source",
103
+ subset_id="MedHop",
104
+ ),
105
+ BigBioConfig(
106
+ name="medhop_bigbio_qa",
107
+ version=BIGBIO_VERSION,
108
+ description="MedHop BigBio schema",
109
+ schema="bigbio_qa",
110
+ subset_id="MedHop",
111
+ ),
112
+ ]
113
+
114
+ def _info(self):
115
+
116
+ if self.config.schema == "source":
117
+
118
+ features = datasets.Features(
119
+ {
120
+ "id": datasets.Value("string"),
121
+ "candidates": datasets.Sequence(datasets.Value("string")),
122
+ "answer": datasets.Value("string"),
123
+ "supports": datasets.Sequence(datasets.Value("string")),
124
+ "query": datasets.Value("string"),
125
+ }
126
+ )
127
+
128
+ # simplified schema for QA tasks
129
+ elif self.config.schema == "bigbio_qa":
130
+
131
+ features = qa_features
132
+
133
+ return datasets.DatasetInfo(
134
+ description=_DESCRIPTION,
135
+ features=features,
136
+ supervised_keys=None,
137
+ homepage=_HOMEPAGE,
138
+ license=str(_LICENSE),
139
+ citation=_CITATION,
140
+ )
141
+
142
+ def _split_generators(self, dl_manager):
143
+ """Returns SplitGenerators."""
144
+
145
+ my_urls = _URLs[self.config.schema]
146
+ data_dir = dl_manager.download_and_extract(my_urls)
147
+ data_dir += "/qangaroo_v1.1/medhop/"
148
+
149
+ return [
150
+ datasets.SplitGenerator(
151
+ name=datasets.Split.TRAIN,
152
+ gen_kwargs={
153
+ "filepath": os.path.join(data_dir, "train.json"),
154
+ "split": "train",
155
+ },
156
+ ),
157
+ datasets.SplitGenerator(
158
+ name=datasets.Split.VALIDATION,
159
+ gen_kwargs={
160
+ "filepath": os.path.join(data_dir, "dev.json"),
161
+ "split": "validation",
162
+ },
163
+ ),
164
+ ]
165
+
166
+ def _generate_examples(self, filepath, split):
167
+ """Yields examples as (key, example) tuples."""
168
+
169
+ if self.config.schema == "source":
170
+
171
+ with open(filepath, encoding="utf-8") as file:
172
+
173
+ uid = 0
174
+
175
+ data = json.load(file)
176
+
177
+ for i, record in enumerate(data):
178
+
179
+ yield i, {
180
+ "id": record["id"],
181
+ "candidates": record["candidates"],
182
+ "answer": record["answer"],
183
+ "supports": record["supports"],
184
+ "query": record["query"],
185
+ }
186
+
187
+ uid += 1
188
+
189
+ elif self.config.schema == "bigbio_qa":
190
+
191
+ with open(filepath, encoding="utf-8") as file:
192
+
193
+ uid = 0
194
+
195
+ data = json.load(file)
196
+
197
+ for record in data:
198
+
199
+ record["type"] = "multiple_choice"
200
+
201
+ yield uid, {
202
+ "id": record["id"],
203
+ "document_id": record["id"],
204
+ "question_id": record["id"],
205
+ "question": record["query"],
206
+ "type": record["type"],
207
+ "context": " ".join(record["supports"]),
208
+ "answer": [record["answer"]],
209
+ "choices": record["candidates"],
210
+ }
211
+
212
+ uid += 1