kleinay commited on
Commit
3a1ebc9
1 Parent(s): 92f0cf8

Allow re-distribution of dev and test samples between splits

Browse files
Files changed (1) hide show
  1. qanom.py +51 -11
qanom.py CHANGED
@@ -15,6 +15,8 @@
15
  """A Dataset loading script for the QANom dataset (klein et. al., COLING 2000)."""
16
 
17
 
 
 
18
  import datasets
19
  from pathlib import Path
20
  import pandas as pd
@@ -71,6 +73,14 @@ _URLs = {
71
 
72
  SpanFeatureType = datasets.Sequence(datasets.Value("int32"), length=2)
73
 
 
 
 
 
 
 
 
 
74
  # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
75
  class Qanom(datasets.GeneratorBasedBuilder):
76
  """QANom: Question-Answer driven SRL for Nominalizations corpus.
@@ -80,16 +90,18 @@ class Qanom(datasets.GeneratorBasedBuilder):
80
  are for canidate nominalization which are judged to be non-predicates ("is_verbal"==False) or predicates with no QAs.
81
  In these cases, the qa fields (question, answers, answer_ranges) would be empty lists. """
82
 
83
- VERSION = datasets.Version("1.0.1")
 
 
84
 
85
  BUILDER_CONFIGS = [
86
- datasets.BuilderConfig(
87
- name="plain_text", version=VERSION, description="This provides the QANom dataset"
88
  ),
89
  ]
90
 
91
  DEFAULT_CONFIG_NAME = (
92
- "plain_text" # It's not mandatory to have a default configuration. Just use one if it make sense.
93
  )
94
 
95
  def _info(self):
@@ -137,32 +149,54 @@ class Qanom(datasets.GeneratorBasedBuilder):
137
 
138
  def _split_generators(self, dl_manager: datasets.utils.download_manager.DownloadManager):
139
  """Returns SplitGenerators."""
140
-
141
  # prepare wiktionary for verb inflections inside 'self.verb_inflections'
142
  self._prepare_wiktionary_verb_inflections(dl_manager)
143
 
144
- corpus_base_path = Path(dl_manager.download_and_extract(_URLs["qanom_zip"]))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145
 
146
  return [
147
  datasets.SplitGenerator(
148
  name=datasets.Split.TRAIN,
149
  # These kwargs will be passed to _generate_examples
150
  gen_kwargs={
151
- "filepath": corpus_base_path / "annot.train.csv",
152
  },
153
  ),
154
  datasets.SplitGenerator(
155
  name=datasets.Split.VALIDATION,
156
  # These kwargs will be passed to _generate_examples
157
  gen_kwargs={
158
- "filepath": corpus_base_path / "annot.dev.csv",
159
  },
160
  ),
161
  datasets.SplitGenerator(
162
  name=datasets.Split.TEST,
163
  # These kwargs will be passed to _generate_examples
164
  gen_kwargs={
165
- "filepath": corpus_base_path / "annot.test.csv",
166
  },
167
  ),
168
  ]
@@ -172,11 +206,17 @@ class Qanom(datasets.GeneratorBasedBuilder):
172
  start, end = s.split(":")
173
  return [int(start), int(end)]
174
 
175
- def _generate_examples(self, filepath):
176
 
177
  """ Yields examples from a 'annot.?.csv' file in QANom's format."""
178
 
179
- df = pd.read_csv(filepath)
 
 
 
 
 
 
180
  for counter, row in df.iterrows():
181
  # Each record (row) in csv is a QA or is stating a predicate/non-predicate with no QAs
182
 
 
15
  """A Dataset loading script for the QANom dataset (klein et. al., COLING 2000)."""
16
 
17
 
18
+ from dataclasses import dataclass
19
+ from typing import Optional, Tuple
20
  import datasets
21
  from pathlib import Path
22
  import pandas as pd
 
73
 
74
  SpanFeatureType = datasets.Sequence(datasets.Value("int32"), length=2)
75
 
76
+ @dataclass
77
+ class QANomBuilderConfig(datasets.BuilderConfig):
78
+ """ Allow the loader to re-distribute the original dev and test splits between train, dev and test. """
79
+ redistribute_dev: Tuple[float, float, float] = (0., 1., 0.)
80
+ redistribute_test: Tuple[float, float, float] = (0., 0., 1.)
81
+
82
+
83
+
84
  # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
85
  class Qanom(datasets.GeneratorBasedBuilder):
86
  """QANom: Question-Answer driven SRL for Nominalizations corpus.
 
90
  are for canidate nominalization which are judged to be non-predicates ("is_verbal"==False) or predicates with no QAs.
91
  In these cases, the qa fields (question, answers, answer_ranges) would be empty lists. """
92
 
93
+ VERSION = datasets.Version("1.0.2")
94
+
95
+ BUILDER_CONFIG_CLASS = QANomBuilderConfig
96
 
97
  BUILDER_CONFIGS = [
98
+ QANomBuilderConfig(
99
+ name="default", version=VERSION, description="This provides the QANom dataset"#, redistribute_dev=(0,1,0)
100
  ),
101
  ]
102
 
103
  DEFAULT_CONFIG_NAME = (
104
+ "default" # It's not mandatory to have a default configuration. Just use one if it make sense.
105
  )
106
 
107
  def _info(self):
 
149
 
150
  def _split_generators(self, dl_manager: datasets.utils.download_manager.DownloadManager):
151
  """Returns SplitGenerators."""
 
152
  # prepare wiktionary for verb inflections inside 'self.verb_inflections'
153
  self._prepare_wiktionary_verb_inflections(dl_manager)
154
 
155
+ self.corpus_base_path = Path(dl_manager.download_and_extract(_URLs["qanom_zip"]))
156
+
157
+ self.dataset_files = [
158
+ self.corpus_base_path / "annot.train.csv",
159
+ self.corpus_base_path / "annot.dev.csv",
160
+ self.corpus_base_path / "annot.test.csv"
161
+ ]
162
+
163
+ # proportional segment (start,end) to take from every original split to returned SplitGenerator
164
+ orig_dev_segments = ((0, self.config.redistribute_dev[0]),
165
+ (self.config.redistribute_dev[0], sum(self.config.redistribute_dev[:2])),
166
+ (sum(self.config.redistribute_dev[:2]), 1))
167
+ orig_tst_segments = ((0, self.config.redistribute_test[0]),
168
+ (self.config.redistribute_test[0], sum(self.config.redistribute_test[:2])),
169
+ (sum(self.config.redistribute_test[:2]), 1))
170
+ train_proportion = ((0,1), # from train
171
+ orig_dev_segments[0], # from dev
172
+ orig_tst_segments[0]) # from test
173
+ dev_proportion = ((0,0), # from train
174
+ orig_dev_segments[1], # from dev
175
+ orig_tst_segments[1]) # from test
176
+ test_proportion = ((0,0), # from train
177
+ orig_dev_segments[2], # from dev
178
+ orig_tst_segments[2]) # from test
179
 
180
  return [
181
  datasets.SplitGenerator(
182
  name=datasets.Split.TRAIN,
183
  # These kwargs will be passed to _generate_examples
184
  gen_kwargs={
185
+ "split_proportion": train_proportion
186
  },
187
  ),
188
  datasets.SplitGenerator(
189
  name=datasets.Split.VALIDATION,
190
  # These kwargs will be passed to _generate_examples
191
  gen_kwargs={
192
+ "split_proportion": dev_proportion
193
  },
194
  ),
195
  datasets.SplitGenerator(
196
  name=datasets.Split.TEST,
197
  # These kwargs will be passed to _generate_examples
198
  gen_kwargs={
199
+ "split_proportion": test_proportion
200
  },
201
  ),
202
  ]
 
206
  start, end = s.split(":")
207
  return [int(start), int(end)]
208
 
209
+ def _generate_examples(self, split_proportion=None):
210
 
211
  """ Yields examples from a 'annot.?.csv' file in QANom's format."""
212
 
213
+ # construct concatenated DataFrame from different source splits
214
+ orig_splits_dfs = [pd.read_csv(filepath)
215
+ for filepath in self.dataset_files] # train, dev, test
216
+ segment_df_from_orig_splits = [df.iloc[int(len(df)*start) : int(len(df)*end)]
217
+ for df, (start,end) in zip(orig_splits_dfs, split_proportion)]
218
+
219
+ df = pd.concat(segment_df_from_orig_splits, ignore_index=True)
220
  for counter, row in df.iterrows():
221
  # Each record (row) in csv is a QA or is stating a predicate/non-predicate with no QAs
222