Datasets:

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
gabrielaltay commited on
Commit
fe23b87
1 Parent(s): 5b87902

upload hubscripts/mediqa_rqe_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. mediqa_rqe.py +155 -0
mediqa_rqe.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ import glob
16
+ import json
17
+ import os
18
+ from dataclasses import dataclass
19
+ from pathlib import Path
20
+ from typing import Dict, Iterator, Tuple
21
+ from xml.etree import ElementTree as ET
22
+
23
+ import datasets
24
+
25
+ from .bigbiohub import pairs_features
26
+ from .bigbiohub import BigBioConfig
27
+ from .bigbiohub import Tasks
28
+
29
+ _LANGUAGES = ['English']
30
+ _PUBMED = False
31
+ _LOCAL = False
32
+ _CITATION = """\
33
+ @inproceedings{MEDIQA2019,
34
+ author = {Asma {Ben Abacha} and Chaitanya Shivade and Dina Demner{-}Fushman},
35
+ title = {Overview of the MEDIQA 2019 Shared Task on Textual Inference, Question Entailment and Question Answering},
36
+ booktitle = {ACL-BioNLP 2019},
37
+ year = {2019}
38
+ }
39
+ """
40
+
41
+ _DATASETNAME = "mediqa_rqe"
42
+ _DISPLAYNAME = "MEDIQA RQE"
43
+
44
+ _DESCRIPTION = """\
45
+ The MEDIQA challenge is an ACL-BioNLP 2019 shared task aiming to attract further research efforts in Natural Language Inference (NLI), Recognizing Question Entailment (RQE), and their applications in medical Question Answering (QA).
46
+ Mailing List: https://groups.google.com/forum/#!forum/bionlp-mediqa
47
+
48
+ The objective of the RQE task is to identify entailment between two questions in the context of QA. We use the following definition of question entailment: “a question A entails a question B if every answer to B is also a complete or partial answer to A” [1]
49
+ [1] A. Ben Abacha & D. Demner-Fushman. “Recognizing Question Entailment for Medical Question Answering”. AMIA 2016.
50
+ """
51
+
52
+ _HOMEPAGE = "https://sites.google.com/view/mediqa2019"
53
+ _LICENSE = 'License information unavailable'
54
+ _URLS = {
55
+ _DATASETNAME: "https://github.com/abachaa/MEDIQA2019/archive/refs/heads/master.zip"
56
+ }
57
+
58
+ _SUPPORTED_TASKS = [Tasks.TEXT_PAIRS_CLASSIFICATION]
59
+ _SOURCE_VERSION = "1.0.0"
60
+ _BIGBIO_VERSION = "1.0.0"
61
+
62
+
63
+ class MediqaRQEDataset(datasets.GeneratorBasedBuilder):
64
+ """MediqaRQE Dataset"""
65
+
66
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
67
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
68
+
69
+ BUILDER_CONFIGS = [
70
+ # Source Schema
71
+ BigBioConfig(
72
+ name="mediqa_rqe_source",
73
+ version=SOURCE_VERSION,
74
+ description="MEDIQA RQE source schema",
75
+ schema="source",
76
+ subset_id="mediqa_rqe_source",
77
+ ),
78
+ # BigBio Schema
79
+ BigBioConfig(
80
+ name="mediqa_rqe_bigbio_pairs",
81
+ version=BIGBIO_VERSION,
82
+ description="MEDIQA RQE BigBio schema",
83
+ schema="bigbio_pairs",
84
+ subset_id="mediqa_rqe_bigbio_pairs",
85
+ ),
86
+ ]
87
+
88
+ DEFAULT_CONFIG_NAME = "mediqa_rqe_source"
89
+
90
+ def _info(self):
91
+ if self.config.schema == "source":
92
+ features = datasets.Features(
93
+ {
94
+ "pid": datasets.Value("string"),
95
+ "value": datasets.Value("string"),
96
+ "chq": datasets.Value("string"),
97
+ "faq": datasets.Value("string"),
98
+ }
99
+ )
100
+ elif self.config.schema == "bigbio_pairs":
101
+ features = pairs_features
102
+
103
+ return datasets.DatasetInfo(
104
+ description=_DESCRIPTION,
105
+ features=features,
106
+ homepage=_HOMEPAGE,
107
+ license=str(_LICENSE),
108
+ citation=_CITATION,
109
+ )
110
+
111
+ def _split_generators(self, dl_manager):
112
+ data_dir = Path(dl_manager.download_and_extract(_URLS[_DATASETNAME]))
113
+
114
+ return [
115
+ datasets.SplitGenerator(
116
+ name=datasets.Split.TRAIN,
117
+ gen_kwargs={
118
+ "filepath": data_dir
119
+ / "MEDIQA2019-master/MEDIQA_Task2_RQE/MEDIQA2019-Task2-RQE-TrainingSet-AMIA2016.xml"
120
+ },
121
+ ),
122
+ datasets.SplitGenerator(
123
+ name=datasets.Split.VALIDATION,
124
+ gen_kwargs={
125
+ "filepath": data_dir
126
+ / "MEDIQA2019-master/MEDIQA_Task2_RQE/MEDIQA2019-Task2-RQE-ValidationSet-AMIA2016.xml"
127
+ },
128
+ ),
129
+ datasets.SplitGenerator(
130
+ name=datasets.Split.TEST,
131
+ gen_kwargs={
132
+ "filepath": data_dir
133
+ / "MEDIQA2019-master/MEDIQA_Task2_RQE/MEDIQA2019-Task2-RQE-TestSet-wLabels.xml"
134
+ },
135
+ ),
136
+ ]
137
+
138
+ def _generate_examples(self, filepath: Path) -> Iterator[Tuple[str, Dict]]:
139
+ dom = ET.parse(filepath).getroot()
140
+ for row in dom.iterfind("pair"):
141
+ pid = row.attrib["pid"]
142
+ value = row.attrib["value"]
143
+ chq = row.find("chq").text.strip()
144
+ faq = row.find("faq").text.strip()
145
+
146
+ if self.config.schema == "source":
147
+ yield pid, {"pid": pid, "value": value, "chq": chq, "faq": faq}
148
+ elif self.config.schema == "bigbio_pairs":
149
+ yield pid, {
150
+ "id": pid,
151
+ "document_id": pid,
152
+ "text_1": chq,
153
+ "text_2": faq,
154
+ "label": value,
155
+ }