kleinay commited on
Commit
a1a9f81
1 Parent(s): 758c895

Support 'domains' kwarg in loading

Browse files
Files changed (1) hide show
  1. qa_srl2018.py +45 -9
qa_srl2018.py CHANGED
@@ -16,6 +16,8 @@
16
 
17
 
18
  import datasets
 
 
19
  from pathlib import Path
20
  import gzip
21
  import json
@@ -50,21 +52,37 @@ _URLs = {
50
 
51
  SpanFeatureType = datasets.Sequence(datasets.Value("int32"), length=2)
52
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  # Name of the dataset usually match the script name with CamelCase instead of snake_case
54
  class QaSrl2018(datasets.GeneratorBasedBuilder):
55
  """QA-SRL2018: Large-Scale Question-Answer Driven Semantic Role Labeling corpus"""
56
 
57
- VERSION = datasets.Version("1.0.3")
 
 
58
 
59
  BUILDER_CONFIGS = [
60
- datasets.BuilderConfig(
61
- name="v2", version=VERSION, description="This provides WIKIPEDIA dataset for qa_srl corpus (original version from Fitzgerald et. al., 2018)"
 
62
  ),
63
- datasets.BuilderConfig(
64
- name="v2_1", version=VERSION, description="This provides WIKIPEDIA dataset for qa_srl corpus (version 2.1)"
 
65
  ),
66
  ]
67
-
68
  DEFAULT_CONFIG_NAME = (
69
  "v2_1"
70
  )
@@ -105,12 +123,25 @@ class QaSrl2018(datasets.GeneratorBasedBuilder):
105
 
106
  # iterate the tar file of the corpus
107
 
108
- qasrl_dataset_version = self.config_id
109
  corpus_base_path = Path(dl_manager.download_and_extract(_URLs[f"qasrl_{qasrl_dataset_version}"]))
110
  corpus_orig = corpus_base_path / f"qasrl-{qasrl_dataset_version}" / "orig"
111
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
 
113
- # TODO add optional kwarg for genre (wikinews)
114
  return [
115
  datasets.SplitGenerator(
116
  name=datasets.Split.TRAIN,
@@ -145,6 +176,11 @@ class QaSrl2018(datasets.GeneratorBasedBuilder):
145
  sent_obj = json.loads(line.strip())
146
  tokens = sent_obj['sentenceTokens']
147
  sentence = ' '.join(tokens)
 
 
 
 
 
148
  for predicate_idx, verb_obj in sent_obj['verbEntries'].items():
149
  verb_forms = verb_obj['verbInflectedForms']
150
  predicate = tokens[int(predicate_idx)]
@@ -174,7 +210,7 @@ class QaSrl2018(datasets.GeneratorBasedBuilder):
174
 
175
  yield qa_counter, {
176
  "sentence": sentence,
177
- "sent_id": sent_obj['sentenceId'],
178
  "predicate_idx": predicate_idx,
179
  "predicate": predicate,
180
  "is_verbal": True,
16
 
17
 
18
  import datasets
19
+ from dataclasses import dataclass
20
+ from typing import List, Tuple, Union, Set, Iterable
21
  from pathlib import Path
22
  import gzip
23
  import json
52
 
53
  SpanFeatureType = datasets.Sequence(datasets.Value("int32"), length=2)
54
 
55
+ SUPPOERTED_DOMAINS = {"wikinews", "wikipedia", "TQA"}
56
+
57
+ @dataclass
58
+ class QASRL2018BuilderConfig(datasets.BuilderConfig):
59
+ """ Allow the loader to re-distribute the original dev and test splits between train, dev and test. """
60
+ dataset_version: str = "v2_1"
61
+
62
+ domains: Union[str, Iterable[str]] = "all" # can provide also a subset of acceptable domains.
63
+ # Acceptable domains are {"wikipedia", "wikinews"} for dev and test (qasrl-2020)
64
+ # and {"wikipedia", "wikinews", "TQA"} for train (qasrl-2018)
65
+
66
+
67
  # Name of the dataset usually match the script name with CamelCase instead of snake_case
68
  class QaSrl2018(datasets.GeneratorBasedBuilder):
69
  """QA-SRL2018: Large-Scale Question-Answer Driven Semantic Role Labeling corpus"""
70
 
71
+ VERSION = datasets.Version("1.2.0")
72
+
73
+ BUILDER_CONFIG_CLASS = QASRL2018BuilderConfig
74
 
75
  BUILDER_CONFIGS = [
76
+ QASRL2018BuilderConfig(
77
+ name="v2", dataset_version="v2", version=VERSION,
78
+ description="This provides WIKIPEDIA dataset for qa_srl corpus (original version from Fitzgerald et. al., 2018)"
79
  ),
80
+ QASRL2018BuilderConfig(
81
+ name="v2_1", dataset_version="v2_1", version=VERSION,
82
+ description="This provides WIKIPEDIA dataset for qa_srl corpus (version 2.1)"
83
  ),
84
  ]
85
+
86
  DEFAULT_CONFIG_NAME = (
87
  "v2_1"
88
  )
123
 
124
  # iterate the tar file of the corpus
125
 
126
+ qasrl_dataset_version = self.config.dataset_version
127
  corpus_base_path = Path(dl_manager.download_and_extract(_URLs[f"qasrl_{qasrl_dataset_version}"]))
128
  corpus_orig = corpus_base_path / f"qasrl-{qasrl_dataset_version}" / "orig"
129
 
130
+ # Handle domain selection
131
+ domains: Set[str] = []
132
+ if self.config.domains == "all":
133
+ domains = SUPPOERTED_DOMAINS
134
+ elif isinstance(self.config.domains, str):
135
+ if self.config.domains in SUPPOERTED_DOMAINS:
136
+ domains = {self.config.domains}
137
+ else:
138
+ raise ValueError(f"Unrecognized domain '{self.config.domains}'; only {SUPPOERTED_DOMAINS} are supported")
139
+ else:
140
+ domains = set(self.config.domains) & SUPPOERTED_DOMAINS
141
+ if len(domains) == 0:
142
+ raise ValueError(f"Unrecognized domains '{self.config.domains}'; only {SUPPOERTED_DOMAINS} are supported")
143
+ self.config.domains = domains
144
 
 
145
  return [
146
  datasets.SplitGenerator(
147
  name=datasets.Split.TRAIN,
176
  sent_obj = json.loads(line.strip())
177
  tokens = sent_obj['sentenceTokens']
178
  sentence = ' '.join(tokens)
179
+ sent_id = sent_obj['sentenceId']
180
+ # consider only selected domains
181
+ sent_domain = "TQA" if sent_id.startswith("TQA") else sent_id.split(":")[1]
182
+ if sent_domain not in self.config.domains:
183
+ continue
184
  for predicate_idx, verb_obj in sent_obj['verbEntries'].items():
185
  verb_forms = verb_obj['verbInflectedForms']
186
  predicate = tokens[int(predicate_idx)]
210
 
211
  yield qa_counter, {
212
  "sentence": sentence,
213
+ "sent_id": sent_id,
214
  "predicate_idx": predicate_idx,
215
  "predicate": predicate,
216
  "is_verbal": True,