kleinay commited on
Commit
256646e
1 Parent(s): f3db811

upload qamr.py script

Browse files
Files changed (1) hide show
  1. qamr.py +216 -0
qamr.py ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """A Dataset loading script for the QAMR (Question-Answer Meaning Representations) dataset (Michael et al., NAACL 2018)."""
16
+
17
+
18
+ import datasets
19
+ from pathlib import Path
20
+ import pandas as pd
21
+ from operator import itemgetter
22
+ from itertools import groupby
23
+
24
+ _CITATION = """\
25
+ @inproceedings{michael-etal-2018-crowdsourcing,
26
+ title = "Crowdsourcing Question-Answer Meaning Representations",
27
+ author = "Michael, Julian and
28
+ Stanovsky, Gabriel and
29
+ He, Luheng and
30
+ Dagan, Ido and
31
+ Zettlemoyer, Luke",
32
+ booktitle = "Proceedings of the 2018 Conference of the North {A}merican Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers)",
33
+ month = jun,
34
+ year = "2018",
35
+ address = "New Orleans, Louisiana",
36
+ publisher = "Association for Computational Linguistics",
37
+ url = "https://aclanthology.org/N18-2089",
38
+ doi = "10.18653/v1/N18-2089",
39
+ pages = "560--568",
40
+ abstract = "We introduce Question-Answer Meaning Representations (QAMRs), which represent the predicate-argument structure of a sentence as a set of question-answer pairs. We develop a crowdsourcing scheme to show that QAMRs can be labeled with very little training, and gather a dataset with over 5,000 sentences and 100,000 questions. A qualitative analysis demonstrates that the crowd-generated question-answer pairs cover the vast majority of predicate-argument relationships in existing datasets (including PropBank, NomBank, and QA-SRL) along with many previously under-resourced ones, including implicit arguments and relations. We also report baseline models for question generation and answering, and summarize a recent approach for using QAMR labels to improve an Open IE system. These results suggest the freely available QAMR data and annotation scheme should support significant future work.",
41
+ }
42
+ """
43
+
44
+
45
+ _DESCRIPTION = """\
46
+ Question-Answer Meaning Representations (QAMR) are a new paradigm for representing predicate-argument structure, which makes use of free-form questions and their answers in order to represent a wide range of semantic phenomena.
47
+ The semantic expressivity of QAMR compares to (and in some cases exceeds) that of existing formalisms, while the representations can be annotated by non-experts (in particular, using crowdsourcing).
48
+ Formal Notes:
49
+ * The `answer_ranges` feature here has a different meaning from that of the `qanom` and `qa_srl` datasets, although both are structured the same way;
50
+ while in qasrl/qanom, each "answer range" (i.e. each span, represented as [begin-idx, end-idx]) stands for an independant answer which is read separately
51
+ (e.g., "John Vincen", "head of marketing"), in this `qamr` dataset each question has a single answer who might be conposed of non-consecutive spans;
52
+ that is, all given spans should be read successively.
53
+ * Another difference is that the meaning of `predicate` in QAMR is different and softer than in QASRL/QANom - here, the predicate is not necessarily within the question,
54
+ it can also be in the answer; it is generally what the annotator marked as the focus of the QA.
55
+ """
56
+
57
+ _HOMEPAGE = "https://github.com/uwnlp/qamr"
58
+
59
+ _LICENSE = """\
60
+ MIT License
61
+
62
+ Copyright (c) 2017 Julian Michael
63
+
64
+ Permission is hereby granted, free of charge, to any person obtaining a copy
65
+ of this software and associated documentation files (the "Software"), to deal
66
+ in the Software without restriction, including without limitation the rights
67
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
68
+ copies of the Software, and to permit persons to whom the Software is
69
+ furnished to do so, subject to the following conditions:
70
+
71
+ The above copyright notice and this permission notice shall be included in all
72
+ copies or substantial portions of the Software.
73
+
74
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
75
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
76
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
77
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
78
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
79
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
80
+ SOFTWARE."""
81
+
82
+
83
+ _URLs = {
84
+ "train": "https://github.com/uwnlp/qamr/raw/master/data/filtered/train.tsv",
85
+ "dev": "https://github.com/uwnlp/qamr/raw/master/data/filtered/dev.tsv",
86
+ "test": "https://github.com/uwnlp/qamr/raw/master/data/filtered/test.tsv",
87
+ "ptb": "https://github.com/uwnlp/qamr/raw/master/data/filtered/ptb.tsv",
88
+ "sentences": "https://github.com/uwnlp/qamr/raw/master/data/wiki-sentences.tsv",
89
+ }
90
+
91
+ TSV_COLUMNS = ["sentence_id", "target_words", "worker_id", "QA_id", "target_word_id", "question", "answer_indices", "validator_1_response", "validator_2_response"]
92
+
93
+ SpanFeatureType = datasets.Sequence(datasets.Value("int32"), length=2)
94
+
95
+ # helper func
96
+ def consecutive_groups(iterable, ordering=lambda x: x):
97
+ """ Adapted from the `more-itertools` package -
98
+ https://github.com/more-itertools/more-itertools/blob/ae32ef57502b9def6e2362cff43a453901fc1f4f/more_itertools/more.py#L2600
99
+ """
100
+ groups = []
101
+ for k, g in groupby(
102
+ enumerate(iterable), key=lambda x: x[0] - ordering(x[1])
103
+ ):
104
+ groups.append(list(map(itemgetter(1), g)))
105
+ return groups
106
+
107
+
108
+ class Qamr(datasets.GeneratorBasedBuilder):
109
+ """QAMR: Question-Answer Meaning Representations corpus"""
110
+
111
+ VERSION = datasets.Version("1.0.0")
112
+
113
+ BUILDER_CONFIGS = [
114
+ datasets.BuilderConfig(
115
+ name="plain_text", version=VERSION, description="This provides the filtered crowdsourced dataset for QAMR"
116
+ ),
117
+ ]
118
+
119
+ DEFAULT_CONFIG_NAME = (
120
+ "plain_text" # It's not mandatory to have a default configuration. Just use one if it make sense.
121
+ )
122
+
123
+ def _info(self): #TODO
124
+ features = datasets.Features(
125
+ {
126
+ "sentence": datasets.Value("string"),
127
+ "sent_id": datasets.Value("string"),
128
+ "predicate_idx": datasets.Value("int32"),
129
+ "predicate": datasets.Value("string"),
130
+ "question": datasets.Sequence(datasets.Value("string")),
131
+ "answers": datasets.Sequence(datasets.Value("string")),
132
+ "answer_ranges": datasets.Sequence(SpanFeatureType)
133
+ }
134
+ )
135
+ return datasets.DatasetInfo(
136
+ # This is the description that will appear on the datasets page.
137
+ description=_DESCRIPTION,
138
+ # This defines the different columns of the dataset and their types
139
+ features=features, # Here we define them above because they are different between the two configurations
140
+ # If there's a common (input, target) tuple from the features,
141
+ # specify them here. They'll be used if as_supervised=True in
142
+ # builder.as_dataset.
143
+ supervised_keys=None,
144
+ # Homepage of the dataset for documentation
145
+ homepage=_HOMEPAGE,
146
+ # License for the dataset if available
147
+ license=_LICENSE,
148
+ # Citation for the dataset
149
+ citation=_CITATION,
150
+ )
151
+
152
+ def _split_generators(self, dl_manager: datasets.utils.download_manager.DownloadManager):
153
+ """Returns SplitGenerators."""
154
+
155
+ # iterate the tar file of the corpus
156
+
157
+ # Older version of the corpus (has some format errors):
158
+ # corpus_base_path = Path(dl_manager.download_and_extract(_URLs["qasrl_v2.0"]))
159
+ # corpus_orig = corpus_base_path / "qasrl-v2" / "orig"
160
+
161
+ self.downloaded_files = {
162
+ key: Path(dl_manager.download_and_extract(_URLs[key]))
163
+ for key in _URLs
164
+ }
165
+
166
+ return [
167
+ datasets.SplitGenerator(
168
+ name=datasets.Split.TRAIN,
169
+ # These kwargs will be passed to _generate_examples
170
+ gen_kwargs={
171
+ "filepath": self.downloaded_files["train"],
172
+ },
173
+ ),
174
+ datasets.SplitGenerator(
175
+ name=datasets.Split.VALIDATION,
176
+ # These kwargs will be passed to _generate_examples
177
+ gen_kwargs={
178
+ "filepath": self.downloaded_files["dev"],
179
+ },
180
+ ),
181
+ datasets.SplitGenerator(
182
+ name=datasets.Split.TEST,
183
+ # These kwargs will be passed to _generate_examples
184
+ gen_kwargs={
185
+ "filepath": self.downloaded_files["test"],
186
+ },
187
+ ),
188
+ ]
189
+
190
+ def _generate_examples(self, filepath):
191
+ """ Yields QAMR examples (QAs) from a '.tsv' file ."""
192
+ # merge sentence and create a map to raw-sentence from sentence-id
193
+ sent_df = pd.read_csv(self.downloaded_files["sentences"], sep='\t', names=["sentence_id", "sentence"])
194
+ sent_id2sent = {r["sentence_id"]: r["sentence"] for _, r in sent_df.iterrows()}
195
+ df = pd.read_csv(filepath, sep='\t', names=TSV_COLUMNS)
196
+ for counter, row in df.iterrows():
197
+ # Each record (row) in tsv is a QA
198
+ sentence = sent_id2sent[row.sentence_id]
199
+ sent_tokens = sentence.split(" ")
200
+ # TODO: split question to some slots? wh-question? question mark?
201
+ question = [row.question]
202
+ answer_tokens = [int(t) for t in row.answer_indices.split(" ")]
203
+ answer_groups = consecutive_groups(answer_tokens) # list of lists-of-consecutive-indices
204
+ answer_ranges = [[group[0], group[-1]+1] for group in answer_groups] # make spans end-exclusive, to be inline with QASRL datasets
205
+ answer = ' '.join(sent_tokens[tok_idx] for tok_idx in answer_tokens)
206
+
207
+ yield counter, {
208
+ "sentence": sentence,
209
+ "sent_id": row.sentence_id,
210
+ "predicate_idx": row.target_word_id,
211
+ "predicate": sent_tokens[row.target_word_id],
212
+ "question": question,
213
+ "answers": [answer],
214
+ "answer_ranges": answer_ranges
215
+ }
216
+