kleinay commited on
Commit
5e2e206
1 Parent(s): 69f1338

First version of qa_srl2020 datasets script

Browse files
Files changed (1) hide show
  1. qa_srl2020.py +226 -0
qa_srl2020.py ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """A Dataset loading script for the QASRL-GS dataset (Roit et. al., ACL 2020)."""
16
+
17
+
18
+ import datasets
19
+ from pathlib import Path
20
+ from typing import List
21
+ import pandas as pd
22
+
23
+
24
+ _CITATION = """\
25
+ @inproceedings{roit2020controlled,
26
+ title={Controlled Crowdsourcing for High-Quality QA-SRL Annotation},
27
+ author={Roit, Paul and Klein, Ayal and Stepanov, Daniela and Mamou, Jonathan and Michael, Julian and Stanovsky, Gabriel and Zettlemoyer, Luke and Dagan, Ido},
28
+ booktitle={Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics},
29
+ pages={7008--7013},
30
+ year={2020}
31
+ }
32
+ """
33
+
34
+
35
+ _DESCRIPTION = """\
36
+ The dataset contains question-answer pairs to model verbal predicate-argument structure.
37
+ The questions start with wh-words (Who, What, Where, What, etc.) and contain a verb predicate in the sentence; the answers are phrases in the sentence.
38
+ This dataset, a.k.a "QASRL-GS" (Gold Standard) or "QASRL-2020", was constructed via controlled crowdsourcing.
39
+ See the paper for details: Controlled Crowdsourcing for High-Quality QA-SRL Annotation, Roit et. al., 2020
40
+ """
41
+
42
+ _HOMEPAGE = "https://github.com/plroit/qasrl-gs"
43
+
44
+ _LICENSE = """MIT License
45
+
46
+ Copyright (c) 2020 plroit
47
+
48
+ Permission is hereby granted, free of charge, to any person obtaining a copy
49
+ of this software and associated documentation files (the "Software"), to deal
50
+ in the Software without restriction, including without limitation the rights
51
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
52
+ copies of the Software, and to permit persons to whom the Software is
53
+ furnished to do so, subject to the following conditions:
54
+
55
+ The above copyright notice and this permission notice shall be included in all
56
+ copies or substantial portions of the Software.
57
+
58
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
59
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
60
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
61
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
62
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
63
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
64
+ SOFTWARE."""
65
+
66
+
67
+ _URLs = {
68
+ "sentences": {
69
+ "wikinews.dev": "https://github.com/plroit/qasrl-gs/raw/master/data/sentences/wikinews.dev.full.csv",
70
+ "wikinews.test": "https://github.com/plroit/qasrl-gs/raw/master/data/sentences/wikinews.test.full.csv",
71
+ "wikipedia.dev": "https://github.com/plroit/qasrl-gs/raw/master/data/sentences/wikipedia.dev.full.csv",
72
+ "wikipedia.test": "https://github.com/plroit/qasrl-gs/raw/master/data/sentences/wikipedia.test.full.csv",
73
+ },
74
+ "qasrl-annotations": {
75
+ "wikinews.dev": "https://github.com/plroit/qasrl-gs/raw/master/data/gold/wikinews.dev.gold.csv",
76
+ "wikinews.test": "https://github.com/plroit/qasrl-gs/raw/master/data/gold/wikinews.test.gold.csv",
77
+ "wikipedia.dev": "https://github.com/plroit/qasrl-gs/raw/master/data/gold/wikipedia.dev.gold.csv",
78
+ "wikipedia.test": "https://github.com/plroit/qasrl-gs/raw/master/data/gold/wikipedia.test.gold.csv",
79
+ },
80
+ }
81
+
82
+ SpanFeatureType = datasets.Sequence(datasets.Value("int32"), length=2)
83
+
84
+ # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
85
+ class QaSrl2020(datasets.GeneratorBasedBuilder):
86
+ """QA-SRL2020: Question-Answer driven SRL gold-standard dataset.
87
+ Notice: This dataset genrally follows the format of `qa_srl` and `kleinay\qa_srl2018` datasets.
88
+ However, it extends Features to include "is_verbal" and "verb_form" fields, as in the `kleinay\qanom` dataset that accounts for nominalizations.
89
+ Nevertheless these fields can be ignored, since for all data points in QASRL-2020, "is_verbal"==True and "verb_form" is equivalent to the "predicate" feature. """
90
+
91
+ VERSION = datasets.Version("1.0.0")
92
+
93
+ BUILDER_CONFIGS = [
94
+ datasets.BuilderConfig(
95
+ name="plain_text", version=VERSION, description="This provides the QASRL-2020 (QASRL-GS) dataset"
96
+ ),
97
+ ]
98
+
99
+ DEFAULT_CONFIG_NAME = (
100
+ "plain_text" # It's not mandatory to have a default configuration. Just use one if it make sense.
101
+ )
102
+
103
+ def _info(self):
104
+ features = datasets.Features(
105
+ {
106
+ "sentence": datasets.Value("string"),
107
+ "sent_id": datasets.Value("string"),
108
+ "predicate_idx": datasets.Value("int32"),
109
+ "predicate": datasets.Value("string"),
110
+ "is_verbal": datasets.Value("bool"),
111
+ "verb_form": datasets.Value("string"),
112
+ "question": datasets.Sequence(datasets.Value("string")),
113
+ "answers": datasets.Sequence(datasets.Value("string")),
114
+ "answer_ranges": datasets.Sequence(SpanFeatureType)
115
+ }
116
+ )
117
+ return datasets.DatasetInfo(
118
+ # This is the description that will appear on the datasets page.
119
+ description=_DESCRIPTION,
120
+ # This defines the different columns of the dataset and their types
121
+ features=features, # Here we define them above because they are different between the two configurations
122
+ # If there's a common (input, target) tuple from the features,
123
+ # specify them here. They'll be used if as_supervised=True in
124
+ # builder.as_dataset.
125
+ supervised_keys=None,
126
+ # Homepage of the dataset for documentation
127
+ homepage=_HOMEPAGE,
128
+ # License for the dataset if available
129
+ license=_LICENSE,
130
+ # Citation for the dataset
131
+ citation=_CITATION,
132
+ )
133
+
134
+ def _prepare_wiktionary_verb_inflections(self, dl_manager):
135
+ wiktionary_url = "https://raw.githubusercontent.com/nafitzgerald/nrl-qasrl/master/data/wiktionary/en_verb_inflections.txt"
136
+ wiktionary_path = dl_manager.download(wiktionary_url)
137
+ verb_map = {}
138
+ with open(wiktionary_path, 'r', encoding="utf-8") as f:
139
+ for l in f.readlines():
140
+ inflections = l.strip().split('\t')
141
+ stem, presentsingular3rd, presentparticiple, past, pastparticiple = inflections
142
+ for inf in inflections:
143
+ verb_map[inf] = {"Stem" : stem, "PresentSingular3rd" : presentsingular3rd, "PresentParticiple":presentparticiple, "Past":past, "PastParticiple":pastparticiple}
144
+ self.verb_inflections = verb_map
145
+
146
+ def _split_generators(self, dl_manager: datasets.utils.download_manager.DownloadManager):
147
+ """Returns SplitGenerators."""
148
+
149
+ # prepare wiktionary for verb inflections inside 'self.verb_inflections'
150
+ self._prepare_wiktionary_verb_inflections(dl_manager)
151
+
152
+ # Download and prepare all files - keep same structure as _URLs
153
+ corpora = {data_type: {
154
+ section: Path(dl_manager.download_and_extract(_URLs[data_type][section]))
155
+ for section in _URLs[data_type] }
156
+ for data_type in _URLs
157
+ }
158
+ return [
159
+ datasets.SplitGenerator(
160
+ name=datasets.Split.VALIDATION,
161
+ # These kwargs will be passed to _generate_examples
162
+ gen_kwargs={
163
+ "qasrl_annotations_paths": [corpora["qasrl-annotations"]["wikinews.dev"],
164
+ corpora["qasrl-annotations"]["wikipedia.dev"]],
165
+ "sentences_paths": [corpora["sentences"]["wikinews.dev"],
166
+ corpora["sentences"]["wikipedia.dev"]],
167
+ },
168
+ ),
169
+ datasets.SplitGenerator(
170
+ name=datasets.Split.TEST,
171
+ # These kwargs will be passed to _generate_examples
172
+ gen_kwargs={
173
+ "qasrl_annotations_paths": [corpora["qasrl-annotations"]["wikinews.test"],
174
+ corpora["qasrl-annotations"]["wikipedia.test"]],
175
+ "sentences_paths": [corpora["sentences"]["wikinews.test"],
176
+ corpora["sentences"]["wikipedia.test"]],
177
+ },
178
+ ),
179
+ ]
180
+
181
+ @classmethod
182
+ def span_from_str(cls, s:str):
183
+ start, end = s.split(":")
184
+ return [int(start), int(end)]
185
+
186
+ def _generate_examples(self, qasrl_annotations_paths: List[str], sentences_paths: List[str]):
187
+
188
+ """ Yields QASRL examples from a csv file in QASRL-2020/QANom format."""
189
+
190
+ # merge sentence and create a map to raw-sentence from sentence-id
191
+ sent_df = pd.concat([pd.read_csv(fn) for fn in sentences_paths])
192
+ qasrl_id2sent = {r["qasrl_id"]: r["sentence"] for _, r in sent_df.iterrows()}
193
+ # merge annotations from sections
194
+ df = pd.concat([pd.read_csv(fn) for fn in qasrl_annotations_paths]).reset_index()
195
+ for counter, row in df.iterrows():
196
+ # Each record (row) in csv is a QA or is stating a predicate/non-predicate with no QAs
197
+ sentence = qasrl_id2sent[row.qasrl_id]
198
+ # Prepare question (slots)
199
+ na_to_underscore = lambda s: "_" if pd.isna(s) else str(s)
200
+ question = [] if pd.isna(row.question) else list(map(na_to_underscore, [
201
+ row.wh, row.aux, row.subj, row.verb_slot_inflection, row.obj, row.prep, row.obj2
202
+ ])) + ['?']
203
+ # fix verb slot - replace with actual verb inflection, and prepend verb_prefix
204
+ if question:
205
+ if row.verb in self.verb_inflections and not pd.isna(row.verb_slot_inflection):
206
+ verb_surface = self.verb_inflections[row.verb][row.verb_slot_inflection]
207
+ else:
208
+ verb_surface = row.verb
209
+ if not pd.isna(row.verb_prefix):
210
+ verb_surface = row.verb_prefix + " " + verb_surface
211
+ question[3] = verb_surface
212
+ answers = [] if pd.isna(row.answer) else row.answer.split("~!~")
213
+ answer_ranges = [] if pd.isna(row.answer_range) else [QaSrl2020.span_from_str(s) for s in row.answer_range.split("~!~")]
214
+
215
+ yield counter, {
216
+ "sentence": sentence,
217
+ "sent_id": row.qasrl_id,
218
+ "predicate_idx": row.verb_idx,
219
+ "predicate": row.verb,
220
+ "is_verbal": True,
221
+ "verb_form": row.verb,
222
+ "question": question,
223
+ "answers": answers,
224
+ "answer_ranges": answer_ranges
225
+ }
226
+