kleinay commited on
Commit
6419964
1 Parent(s): 42f0b08

adding 'is_verbal' and 'verb_form' fixed columns, to align with qanom and qa_srl2020

Browse files
Files changed (1) hide show
  1. qa_srl2018.py +186 -0
qa_srl2018.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """TODO: Add a description here."""
16
+
17
+
18
+ import datasets
19
+ from pathlib import Path
20
+ import gzip
21
+ import json
22
+
23
+
24
+ _CITATION = """\
25
+ @inproceedings{fitzgerald2018large,
26
+ title={Large-Scale QA-SRL Parsing},
27
+ author={FitzGerald, Nicholas and Michael, Julian and He, Luheng and Zettlemoyer, Luke},
28
+ booktitle={Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)},
29
+ pages={2051--2060},
30
+ year={2018}
31
+ }
32
+ """
33
+
34
+
35
+ _DESCRIPTION = """\
36
+ The dataset contains question-answer pairs to model verbal predicate-argument structure. The questions start with wh-words (Who, What, Where, What, etc.) and contain a verb predicate in the sentence; the answers are phrases in the sentence.
37
+ This dataset, a.k.a "QASRL Bank", "QASRL-v2" or "QASRL-LS" (Large Scale), was constructed via crowdsourcing.
38
+ """
39
+
40
+ _HOMEPAGE = "https://qasrl.org"
41
+
42
+ # TODO: Add the licence for the dataset here if you can find it
43
+ _LICENSE = ""
44
+
45
+
46
+ _URLs = {
47
+ "qasrl_v2.0": "http://qasrl.org/data/qasrl-v2.tar",
48
+ "qasrl_v2.1": "https://qasrl.org/data/qasrl-v2_1.tar"
49
+ }
50
+
51
+ SpanFeatureType = datasets.Sequence(datasets.Value("int32"), length=2)
52
+
53
+ # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
54
+ class QaSrl2018(datasets.GeneratorBasedBuilder):
55
+ """QA-SRL2018: Large-Scale Question-Answer Driven Semantic Role Labeling corpus"""
56
+
57
+ VERSION = datasets.Version("1.0.1")
58
+
59
+ BUILDER_CONFIGS = [
60
+ datasets.BuilderConfig(
61
+ name="plain_text", version=VERSION, description="This provides WIKIPEDIA dataset for qa_srl corpus"
62
+ ),
63
+ ]
64
+
65
+ DEFAULT_CONFIG_NAME = (
66
+ "plain_text" # It's not mandatory to have a default configuration. Just use one if it make sense.
67
+ )
68
+
69
+ def _info(self):
70
+ features = datasets.Features(
71
+ {
72
+ "sentence": datasets.Value("string"),
73
+ "sent_id": datasets.Value("string"),
74
+ "predicate_idx": datasets.Value("int32"),
75
+ "predicate": datasets.Value("string"),
76
+ "is_verbal": datasets.Value("bool"),
77
+ "verb_form": datasets.Value("string"),
78
+ "question": datasets.Sequence(datasets.Value("string")),
79
+ "answers": datasets.Sequence(datasets.Value("string")),
80
+ "answer_ranges": datasets.Sequence(SpanFeatureType)
81
+ }
82
+ )
83
+ return datasets.DatasetInfo(
84
+ # This is the description that will appear on the datasets page.
85
+ description=_DESCRIPTION,
86
+ # This defines the different columns of the dataset and their types
87
+ features=features, # Here we define them above because they are different between the two configurations
88
+ # If there's a common (input, target) tuple from the features,
89
+ # specify them here. They'll be used if as_supervised=True in
90
+ # builder.as_dataset.
91
+ supervised_keys=None,
92
+ # Homepage of the dataset for documentation
93
+ homepage=_HOMEPAGE,
94
+ # License for the dataset if available
95
+ license=_LICENSE,
96
+ # Citation for the dataset
97
+ citation=_CITATION,
98
+ )
99
+
100
+ def _split_generators(self, dl_manager: datasets.utils.download_manager.DownloadManager):
101
+ """Returns SplitGenerators."""
102
+
103
+ # iterate the tar file of the corpus
104
+
105
+ # Older version of the corpus (has some format errors):
106
+ # corpus_base_path = Path(dl_manager.download_and_extract(_URLs["qasrl_v2.0"]))
107
+ # corpus_orig = corpus_base_path / "qasrl-v2" / "orig"
108
+
109
+ corpus_base_path = Path(dl_manager.download_and_extract(_URLs["qasrl_v2.1"]))
110
+ corpus_orig = corpus_base_path / "qasrl-v2_1" / "orig"
111
+
112
+
113
+ # TODO add optional kwarg for genre (wikinews)
114
+ return [
115
+ datasets.SplitGenerator(
116
+ name=datasets.Split.TRAIN,
117
+ # These kwargs will be passed to _generate_examples
118
+ gen_kwargs={
119
+ "filepath": corpus_orig / "train.jsonl.gz",
120
+ },
121
+ ),
122
+ datasets.SplitGenerator(
123
+ name=datasets.Split.VALIDATION,
124
+ # These kwargs will be passed to _generate_examples
125
+ gen_kwargs={
126
+ "filepath": corpus_orig / "dev.jsonl.gz",
127
+ },
128
+ ),
129
+ datasets.SplitGenerator(
130
+ name=datasets.Split.TEST,
131
+ # These kwargs will be passed to _generate_examples
132
+ gen_kwargs={
133
+ "filepath": corpus_orig / "test.jsonl.gz",
134
+ },
135
+ ),
136
+ ]
137
+
138
+ def _generate_examples(self, filepath):
139
+
140
+ """ Yields examples from a '.jsonl.gz' file ."""
141
+
142
+ with gzip.open(filepath, "rt") as f:
143
+ qa_counter = 0
144
+ for line in f:
145
+ sent_obj = json.loads(line.strip())
146
+ tokens = sent_obj['sentenceTokens']
147
+ sentence = ' '.join(tokens)
148
+ for predicate_idx, verb_obj in sent_obj['verbEntries'].items():
149
+ verb_forms = verb_obj['verbInflectedForms']
150
+ predicate = tokens[int(predicate_idx)]
151
+ for question_obj in verb_obj['questionLabels'].values():
152
+ question_slots = question_obj['questionSlots']
153
+ verb_form = question_slots['verb']
154
+ verb_surface = verb_forms[verb_form.split(" ")[-1]] # if verb_form in verb_forms else verb_forms['stem']
155
+ question_slots_in_order = [
156
+ question_slots["wh"],
157
+ question_slots["aux"],
158
+ question_slots["subj"],
159
+ verb_surface,
160
+ question_slots["obj"],
161
+ question_slots["prep"],
162
+ question_slots["obj2"],
163
+ '?'
164
+ ]
165
+ # retrieve answers
166
+ answer_spans = []
167
+ for ans in question_obj['answerJudgments']:
168
+ if ans['isValid']:
169
+ answer_spans.extend(ans['spans'])
170
+ answer_spans = list(set(tuple(a) for a in answer_spans))
171
+ # answer_spans = list(set(answer_spans))
172
+ answer_strs = [' '.join([tokens[i] for i in range(*span)])
173
+ for span in answer_spans]
174
+
175
+ yield qa_counter, {
176
+ "sentence": sentence,
177
+ "sent_id": sent_obj['sentenceId'],
178
+ "predicate_idx": predicate_idx,
179
+ "predicate": predicate,
180
+ "is_verbal": True,
181
+ "verb_form": predicate,
182
+ "question": question_slots_in_order,
183
+ "answers": answer_strs,
184
+ "answer_ranges": answer_spans
185
+ }
186
+ qa_counter += 1