kleinay commited on
Commit
b9d1f15
1 Parent(s): cce099f

Load from reformatted data (jsonl, at qasrl.org) by default - aligned question slots with qasrl-2018 data

Browse files
Files changed (1) hide show
  1. qa_srl2020.py +150 -49
qa_srl2020.py CHANGED
@@ -16,9 +16,13 @@
16
 
17
 
18
  import datasets
 
19
  from pathlib import Path
20
- from typing import List
21
  import pandas as pd
 
 
 
22
 
23
 
24
  _CITATION = """\
@@ -65,39 +69,57 @@ SOFTWARE."""
65
 
66
 
67
  _URLs = {
68
- "sentences": {
69
- "wikinews.dev": "https://github.com/plroit/qasrl-gs/raw/master/data/sentences/wikinews.dev.full.csv",
70
- "wikinews.test": "https://github.com/plroit/qasrl-gs/raw/master/data/sentences/wikinews.test.full.csv",
71
- "wikipedia.dev": "https://github.com/plroit/qasrl-gs/raw/master/data/sentences/wikipedia.dev.full.csv",
72
- "wikipedia.test": "https://github.com/plroit/qasrl-gs/raw/master/data/sentences/wikipedia.test.full.csv",
 
 
 
 
 
 
 
 
73
  },
74
- "qasrl-annotations": {
75
- "wikinews.dev": "https://github.com/plroit/qasrl-gs/raw/master/data/gold/wikinews.dev.gold.csv",
76
- "wikinews.test": "https://github.com/plroit/qasrl-gs/raw/master/data/gold/wikinews.test.gold.csv",
77
- "wikipedia.dev": "https://github.com/plroit/qasrl-gs/raw/master/data/gold/wikipedia.dev.gold.csv",
78
- "wikipedia.test": "https://github.com/plroit/qasrl-gs/raw/master/data/gold/wikipedia.test.gold.csv",
79
- },
80
  }
81
 
82
  SpanFeatureType = datasets.Sequence(datasets.Value("int32"), length=2)
83
 
84
- # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
 
 
 
 
 
85
  class QaSrl2020(datasets.GeneratorBasedBuilder):
86
  """QA-SRL2020: Question-Answer driven SRL gold-standard dataset.
87
  Notice: This dataset genrally follows the format of `qa_srl` and `kleinay\qa_srl2018` datasets.
88
  However, it extends Features to include "is_verbal" and "verb_form" fields, as in the `kleinay\qanom` dataset that accounts for nominalizations.
89
  Nevertheless these fields can be ignored, since for all data points in QASRL-2020, "is_verbal"==True and "verb_form" is equivalent to the "predicate" feature. """
90
 
91
- VERSION = datasets.Version("1.0.0")
 
 
92
 
93
  BUILDER_CONFIGS = [
94
- datasets.BuilderConfig(
95
- name="plain_text", version=VERSION, description="This provides the QASRL-2020 (QASRL-GS) dataset"
 
 
 
 
 
 
 
 
96
  ),
97
  ]
98
 
99
  DEFAULT_CONFIG_NAME = (
100
- "plain_text" # It's not mandatory to have a default configuration. Just use one if it make sense.
101
  )
102
 
103
  def _info(self):
@@ -145,45 +167,124 @@ class QaSrl2020(datasets.GeneratorBasedBuilder):
145
 
146
  def _split_generators(self, dl_manager: datasets.utils.download_manager.DownloadManager):
147
  """Returns SplitGenerators."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
148
 
149
- # prepare wiktionary for verb inflections inside 'self.verb_inflections'
150
- self._prepare_wiktionary_verb_inflections(dl_manager)
151
-
152
- # Download and prepare all files - keep same structure as _URLs
153
- corpora = {data_type: {
154
- section: Path(dl_manager.download_and_extract(_URLs[data_type][section]))
155
- for section in _URLs[data_type] }
156
- for data_type in _URLs
157
- }
158
- return [
159
- datasets.SplitGenerator(
160
- name=datasets.Split.VALIDATION,
161
- # These kwargs will be passed to _generate_examples
162
- gen_kwargs={
163
- "qasrl_annotations_paths": [corpora["qasrl-annotations"]["wikinews.dev"],
164
- corpora["qasrl-annotations"]["wikipedia.dev"]],
165
- "sentences_paths": [corpora["sentences"]["wikinews.dev"],
166
- corpora["sentences"]["wikipedia.dev"]],
167
- },
168
- ),
169
- datasets.SplitGenerator(
170
- name=datasets.Split.TEST,
171
- # These kwargs will be passed to _generate_examples
172
- gen_kwargs={
173
- "qasrl_annotations_paths": [corpora["qasrl-annotations"]["wikinews.test"],
174
- corpora["qasrl-annotations"]["wikipedia.test"]],
175
- "sentences_paths": [corpora["sentences"]["wikinews.test"],
176
- corpora["sentences"]["wikipedia.test"]],
177
- },
178
- ),
179
- ]
180
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
181
  @classmethod
182
  def span_from_str(cls, s:str):
183
  start, end = s.split(":")
184
  return [int(start), int(end)]
185
 
186
- def _generate_examples(self, qasrl_annotations_paths: List[str], sentences_paths: List[str]):
187
 
188
  """ Yields QASRL examples from a csv file in QASRL-2020/QANom format."""
189
 
16
 
17
 
18
  import datasets
19
+ from dataclasses import dataclass
20
  from pathlib import Path
21
+ from typing import List, Tuple
22
  import pandas as pd
23
+ import json
24
+ import gzip
25
+ import itertools
26
 
27
 
28
  _CITATION = """\
69
 
70
 
71
  _URLs = {
72
+ "csv": {
73
+ "sentences": {
74
+ "wikinews.dev": "https://github.com/plroit/qasrl-gs/raw/master/data/sentences/wikinews.dev.full.csv",
75
+ "wikinews.test": "https://github.com/plroit/qasrl-gs/raw/master/data/sentences/wikinews.test.full.csv",
76
+ "wikipedia.dev": "https://github.com/plroit/qasrl-gs/raw/master/data/sentences/wikipedia.dev.full.csv",
77
+ "wikipedia.test": "https://github.com/plroit/qasrl-gs/raw/master/data/sentences/wikipedia.test.full.csv",
78
+ },
79
+ "qasrl-annotations": {
80
+ "wikinews.dev": "https://github.com/plroit/qasrl-gs/raw/master/data/gold/wikinews.dev.gold.csv",
81
+ "wikinews.test": "https://github.com/plroit/qasrl-gs/raw/master/data/gold/wikinews.test.gold.csv",
82
+ "wikipedia.dev": "https://github.com/plroit/qasrl-gs/raw/master/data/gold/wikipedia.dev.gold.csv",
83
+ "wikipedia.test": "https://github.com/plroit/qasrl-gs/raw/master/data/gold/wikipedia.test.gold.csv",
84
+ },
85
  },
86
+ "jsonl": "https://qasrl.org/data/qasrl-gs.tar"
 
 
 
 
 
87
  }
88
 
89
  SpanFeatureType = datasets.Sequence(datasets.Value("int32"), length=2)
90
 
91
+ @dataclass
92
+ class QASRL2020BuilderConfig(datasets.BuilderConfig):
93
+ """ Allow the loader to re-distribute the original dev and test splits between train, dev and test. """
94
+ load_from: str = "jsonl" # "csv" or "jsonl"
95
+
96
+
97
  class QaSrl2020(datasets.GeneratorBasedBuilder):
98
  """QA-SRL2020: Question-Answer driven SRL gold-standard dataset.
99
  Notice: This dataset genrally follows the format of `qa_srl` and `kleinay\qa_srl2018` datasets.
100
  However, it extends Features to include "is_verbal" and "verb_form" fields, as in the `kleinay\qanom` dataset that accounts for nominalizations.
101
  Nevertheless these fields can be ignored, since for all data points in QASRL-2020, "is_verbal"==True and "verb_form" is equivalent to the "predicate" feature. """
102
 
103
+ VERSION = datasets.Version("1.1.0")
104
+
105
+ BUILDER_CONFIG_CLASS = QASRL2020BuilderConfig
106
 
107
  BUILDER_CONFIGS = [
108
+ QASRL2020BuilderConfig(
109
+ name="default", version=VERSION, description="This provides the QASRL-2020 (QASRL-GS) dataset"
110
+ ),
111
+ QASRL2020BuilderConfig(
112
+ name="csv", version=VERSION, description="This provides the QASRL-2020 (QASRL-GS) dataset",
113
+ load_from="csv"
114
+ ),
115
+ QASRL2020BuilderConfig(
116
+ name="jsonl", version=VERSION, description="This provides the QASRL-2020 (QASRL-GS) dataset",
117
+ load_from="jsonl"
118
  ),
119
  ]
120
 
121
  DEFAULT_CONFIG_NAME = (
122
+ "default" # It's not mandatory to have a default configuration. Just use one if it make sense.
123
  )
124
 
125
  def _info(self):
167
 
168
  def _split_generators(self, dl_manager: datasets.utils.download_manager.DownloadManager):
169
  """Returns SplitGenerators."""
170
+ assert self.config.load_from in ("csv", "jsonl")
171
+
172
+ if self.config.load_from == "csv":
173
+ # prepare wiktionary for verb inflections inside 'self.verb_inflections'
174
+ self._prepare_wiktionary_verb_inflections(dl_manager)
175
+
176
+ # Download and prepare all files - keep same structure as _URLs
177
+ URLs = _URLs["csv"]
178
+ corpora = {data_type: {
179
+ section: Path(dl_manager.download_and_extract(URLs[data_type][section]))
180
+ for section in URLs[data_type] }
181
+ for data_type in URLs
182
+ }
183
+ return [
184
+ datasets.SplitGenerator(
185
+ name=datasets.Split.VALIDATION,
186
+ # These kwargs will be passed to _generate_examples
187
+ gen_kwargs={
188
+ "qasrl_annotations_paths": [corpora["qasrl-annotations"]["wikinews.dev"],
189
+ corpora["qasrl-annotations"]["wikipedia.dev"]],
190
+ "sentences_paths": [corpora["sentences"]["wikinews.dev"],
191
+ corpora["sentences"]["wikipedia.dev"]],
192
+ },
193
+ ),
194
+ datasets.SplitGenerator(
195
+ name=datasets.Split.TEST,
196
+ # These kwargs will be passed to _generate_examples
197
+ gen_kwargs={
198
+ "qasrl_annotations_paths": [corpora["qasrl-annotations"]["wikinews.test"],
199
+ corpora["qasrl-annotations"]["wikipedia.test"]],
200
+ "sentences_paths": [corpora["sentences"]["wikinews.test"],
201
+ corpora["sentences"]["wikipedia.test"]],
202
+ },
203
+ ),
204
+ ]
205
 
206
+ elif self.config.load_from == "jsonl":
207
+ self.corpus_base_path = Path(dl_manager.download_and_extract(_URLs["jsonl"]))
208
+ return [
209
+ datasets.SplitGenerator(
210
+ name=datasets.Split.VALIDATION,
211
+ # These kwargs will be passed to _generate_examples
212
+ gen_kwargs={
213
+ "qasrl_annotations_paths": self.corpus_base_path / "qasrl-gs" / "dev.jsonl.gz",
214
+ },
215
+ ),
216
+ datasets.SplitGenerator(
217
+ name=datasets.Split.TEST,
218
+ # These kwargs will be passed to _generate_examples
219
+ gen_kwargs={
220
+ "qasrl_annotations_paths": self.corpus_base_path / "qasrl-gs" / "test.jsonl.gz",
221
+ },
222
+ ),
223
+ ]
224
+
 
 
 
 
 
 
 
 
 
 
 
 
225
 
226
+ def _generate_examples(self, qasrl_annotations_paths: List[str], sentences_paths: List[str] = None):
227
+ if self.config.load_from == "csv":
228
+ return self._generate_examples_from_csv(qasrl_annotations_paths, sentences_paths)
229
+ elif self.config.load_from == "jsonl":
230
+ return self._generate_examples_from_jsonl(qasrl_annotations_paths)
231
+
232
+ def _generate_examples_from_jsonl(self, qasrl_annotations_path):
233
+ """ Yields examples from a jsonl.gz file, in same format as qasrl-v2."""
234
+ empty_to_underscore = lambda s: "_" if s=="" else s
235
+ with gzip.open(qasrl_annotations_path, "rt") as f:
236
+ qa_counter = 0
237
+ for line in f:
238
+ sent_obj = json.loads(line.strip())
239
+ tokens = sent_obj['sentenceTokens']
240
+ sentence = ' '.join(tokens)
241
+ for predicate_idx, verb_obj in sent_obj['verbEntries'].items():
242
+ verb_forms = verb_obj['verbInflectedForms']
243
+ predicate = tokens[int(predicate_idx)]
244
+ for question_obj in verb_obj['questionLabels'].values():
245
+ question_slots = question_obj['questionSlots']
246
+ verb_form = question_slots['verb']
247
+ verb_surface = verb_forms[verb_form.split(" ")[-1]] # if verb_form in verb_forms else verb_forms['stem']
248
+ question_slots_in_order = [
249
+ question_slots["wh"],
250
+ question_slots["aux"],
251
+ question_slots["subj"],
252
+ verb_surface,
253
+ question_slots["obj"],
254
+ empty_to_underscore(question_slots["prep"]), # fix bug in data
255
+ question_slots["obj2"],
256
+ '?'
257
+ ]
258
+ # retrieve answers
259
+ answer_spans = []
260
+ for ans in question_obj['answerJudgments']:
261
+ if ans['isValid']:
262
+ answer_spans.extend(ans['spans'])
263
+ answer_spans = list(set(tuple(a) for a in answer_spans))
264
+ # answer_spans = list(set(answer_spans))
265
+ answer_strs = [' '.join([tokens[i] for i in range(*span)])
266
+ for span in answer_spans]
267
+
268
+ yield qa_counter, {
269
+ "sentence": sentence,
270
+ "sent_id": sent_obj['sentenceId'],
271
+ "predicate_idx": predicate_idx,
272
+ "predicate": predicate,
273
+ "is_verbal": True,
274
+ "verb_form": verb_forms['stem'],
275
+ "question": question_slots_in_order,
276
+ "answers": answer_strs,
277
+ "answer_ranges": answer_spans
278
+ }
279
+ qa_counter += 1
280
+
281
+
282
  @classmethod
283
  def span_from_str(cls, s:str):
284
  start, end = s.split(":")
285
  return [int(start), int(end)]
286
 
287
+ def _generate_examples_from_csv(self, qasrl_annotations_paths: List[str], sentences_paths: List[str]):
288
 
289
  """ Yields QASRL examples from a csv file in QASRL-2020/QANom format."""
290