parquet-converter commited on
Commit
4a90767
1 Parent(s): 5499db7

Update parquet files

Browse files
.gitattributes DELETED
@@ -1,27 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bin.* filter=lfs diff=lfs merge=lfs -text
5
- *.bz2 filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.model filter=lfs diff=lfs merge=lfs -text
12
- *.msgpack filter=lfs diff=lfs merge=lfs -text
13
- *.onnx filter=lfs diff=lfs merge=lfs -text
14
- *.ot filter=lfs diff=lfs merge=lfs -text
15
- *.parquet filter=lfs diff=lfs merge=lfs -text
16
- *.pb filter=lfs diff=lfs merge=lfs -text
17
- *.pt filter=lfs diff=lfs merge=lfs -text
18
- *.pth filter=lfs diff=lfs merge=lfs -text
19
- *.rar filter=lfs diff=lfs merge=lfs -text
20
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
- *.tar.* filter=lfs diff=lfs merge=lfs -text
22
- *.tflite filter=lfs diff=lfs merge=lfs -text
23
- *.tgz filter=lfs diff=lfs merge=lfs -text
24
- *.xz filter=lfs diff=lfs merge=lfs -text
25
- *.zip filter=lfs diff=lfs merge=lfs -text
26
- *.zstandard filter=lfs diff=lfs merge=lfs -text
27
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md DELETED
@@ -1,12 +0,0 @@
1
- # QANom
2
-
3
- This dataset contains question-answer pairs to model the predicate-argument structure of deverbal nominalizations.
4
- The questions start with wh-words (Who, What, Where, What, etc.) and contain the verbal form of a nominalization from the sentence;
5
- the answers are phrases in the sentence.
6
-
7
- See the paper for details: [QANom: Question-Answer driven SRL for Nominalizations (Klein et. al., COLING 2020)](https://www.aclweb.org/anthology/2020.coling-main.274/)
8
-
9
- For previewing the QANom data along with the verbal annotations of QASRL, check out https://browse.qasrl.org/.
10
- Also check out our [GitHub repository](https://github.com/kleinay/QANom) to find code for nominalization identification, QANom annotation, evaluation, and models.
11
-
12
- The dataset was annotated by selected workers from Amazon Mechanical Turk.
 
 
 
 
 
 
 
 
 
 
 
 
 
default/qanom-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0246ce1421e1ffe61cc15cc081e3a1c7fa0796a1c207d7574ef06d6f6ac23e79
3
+ size 403766
default/qanom-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:947d9da5345af0da411ebeadf40ccaaef101d24dcb9a92e1900d6e7c81b0018b
3
+ size 1639240
default/qanom-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50cf3f4b4279aaba628935072a2fd91e445afd0f2f5cb133fa630e4c123e6ac8
3
+ size 436231
qanom.py DELETED
@@ -1,373 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """A Dataset loading script for the QANom dataset (klein et. al., COLING 2000)."""
16
-
17
-
18
- from dataclasses import dataclass
19
- from typing import Optional, Tuple, Union, Iterable, Set
20
- import datasets
21
- from pathlib import Path
22
- import pandas as pd
23
- import gzip
24
- import json
25
- import itertools
26
-
27
- _CITATION = """\
28
- @inproceedings{klein2020qanom,
29
- title={QANom: Question-Answer driven SRL for Nominalizations},
30
- author={Klein, Ayal and Mamou, Jonathan and Pyatkin, Valentina and Stepanov, Daniela and He, Hangfeng and Roth, Dan and Zettlemoyer, Luke and Dagan, Ido},
31
- booktitle={Proceedings of the 28th International Conference on Computational Linguistics},
32
- pages={3069--3083},
33
- year={2020}
34
- }
35
- """
36
-
37
-
38
- _DESCRIPTION = """\
39
- The dataset contains question-answer pairs to model predicate-argument structure of deverbal nominalizations.
40
- The questions start with wh-words (Who, What, Where, What, etc.) and contain a the verbal form of a nominalization from the sentence;
41
- the answers are phrases in the sentence.
42
- See the paper for details: QANom: Question-Answer driven SRL for Nominalizations (Klein et. al., COLING 2020)
43
- For previewing the QANom data along with the verbal annotations of QASRL, check out "https://browse.qasrl.org/".
44
- This dataset was annotated by selected workers from Amazon Mechanical Turk.
45
- """
46
-
47
- _HOMEPAGE = "https://github.com/kleinay/QANom"
48
-
49
- _LICENSE = """MIT License
50
-
51
- Copyright (c) 2020 Ayal Klein (kleinay)
52
-
53
- Permission is hereby granted, free of charge, to any person obtaining a copy
54
- of this software and associated documentation files (the "Software"), to deal
55
- in the Software without restriction, including without limitation the rights
56
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
57
- copies of the Software, and to permit persons to whom the Software is
58
- furnished to do so, subject to the following conditions:
59
-
60
- The above copyright notice and this permission notice shall be included in all
61
- copies or substantial portions of the Software.
62
-
63
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
64
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
65
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
66
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
67
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
68
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
69
- SOFTWARE."""
70
-
71
-
72
- _URLs = {
73
- "qanom_csv": "https://github.com/kleinay/QANom/raw/master/qanom_dataset.zip",
74
- "qanom_jsonl": "https://qasrl.org/data/qanom.tar"
75
- }
76
-
77
- SpanFeatureType = datasets.Sequence(datasets.Value("int32"), length=2)
78
-
79
- SUPPOERTED_DOMAINS = {"wikinews", "wikipedia"}
80
-
81
- @dataclass
82
- class QANomBuilderConfig(datasets.BuilderConfig):
83
- """ Allow the loader to re-distribute the original dev and test splits between train, dev and test. """
84
- redistribute_dev: Tuple[float, float, float] = (0., 1., 0.)
85
- redistribute_test: Tuple[float, float, float] = (0., 0., 1.)
86
- load_from: str = "jsonl" # "csv" or "jsonl"
87
- domains: Union[str, Iterable[str]] = "all" # can provide also a subset of acceptable domains.
88
- # Acceptable domains are {"wikipedia", "wikinews"} for dev and test (qasrl-2020)
89
- # and {"wikipedia", "wikinews", "TQA"} for train (qasrl-2018)
90
-
91
- # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
92
- class Qanom(datasets.GeneratorBasedBuilder):
93
- """QANom: Question-Answer driven SRL for Nominalizations corpus.
94
- Notice: This datasets genrally follows the format of `qa_srl` and `kleinay\qa_srl2018` datasets.
95
- However, it extends Features to include "is_verbal" and "verb_form" fields (required for nominalizations).
96
- In addition, and most critically, unlike these verbal qasrl datasets, in the qanom datset some examples
97
- are for canidate nominalization which are judged to be non-predicates ("is_verbal"==False) or predicates with no QAs.
98
- In these cases, the qa fields (question, answers, answer_ranges) would be empty lists. """
99
-
100
- VERSION = datasets.Version("1.2.0")
101
-
102
- BUILDER_CONFIG_CLASS = QANomBuilderConfig
103
-
104
- BUILDER_CONFIGS = [
105
- QANomBuilderConfig(
106
- name="default", version=VERSION, description="This provides the QANom dataset"#, redistribute_dev=(0,1,0)
107
- ),
108
- ]
109
-
110
- DEFAULT_CONFIG_NAME = (
111
- "default" # It's not mandatory to have a default configuration. Just use one if it make sense.
112
- )
113
-
114
- def _info(self):
115
- features = datasets.Features(
116
- {
117
- "sentence": datasets.Value("string"),
118
- "sent_id": datasets.Value("string"),
119
- "predicate_idx": datasets.Value("int32"),
120
- "predicate": datasets.Value("string"),
121
- "is_verbal": datasets.Value("bool"),
122
- "verb_form": datasets.Value("string"),
123
- "question": datasets.Sequence(datasets.Value("string")),
124
- "answers": datasets.Sequence(datasets.Value("string")),
125
- "answer_ranges": datasets.Sequence(SpanFeatureType)
126
- }
127
- )
128
- return datasets.DatasetInfo(
129
- # This is the description that will appear on the datasets page.
130
- description=_DESCRIPTION,
131
- # This defines the different columns of the dataset and their types
132
- features=features, # Here we define them above because they are different between the two configurations
133
- # If there's a common (input, target) tuple from the features,
134
- # specify them here. They'll be used if as_supervised=True in
135
- # builder.as_dataset.
136
- supervised_keys=None,
137
- # Homepage of the dataset for documentation
138
- homepage=_HOMEPAGE,
139
- # License for the dataset if available
140
- license=_LICENSE,
141
- # Citation for the dataset
142
- citation=_CITATION,
143
- )
144
-
145
- def _prepare_wiktionary_verb_inflections(self, dl_manager):
146
- wiktionary_url = "https://raw.githubusercontent.com/nafitzgerald/nrl-qasrl/master/data/wiktionary/en_verb_inflections.txt"
147
- wiktionary_path = dl_manager.download(wiktionary_url)
148
- verb_map = {}
149
- with open(wiktionary_path, 'r', encoding="utf-8") as f:
150
- for l in f.readlines():
151
- inflections = l.strip().split('\t')
152
- stem, presentsingular3rd, presentparticiple, past, pastparticiple = inflections
153
- for inf in inflections:
154
- verb_map[inf] = {"Stem" : stem, "PresentSingular3rd" : presentsingular3rd, "PresentParticiple":presentparticiple, "Past":past, "PastParticiple":pastparticiple}
155
- self.verb_inflections = verb_map
156
-
157
- def _split_generators(self, dl_manager: datasets.utils.download_manager.DownloadManager):
158
- """Returns SplitGenerators."""
159
-
160
- assert self.config.load_from in ("csv", "jsonl")
161
-
162
- # Handle domain selection
163
- domains: Set[str] = []
164
- if self.config.domains == "all":
165
- domains = SUPPOERTED_DOMAINS
166
- elif isinstance(self.config.domains, str):
167
- if self.config.domains in SUPPOERTED_DOMAINS:
168
- domains = {self.config.domains}
169
- else:
170
- raise ValueError(f"Unrecognized domain '{self.config.domains}'; only {SUPPOERTED_DOMAINS} are supported")
171
- else:
172
- domains = set(self.config.domains) & SUPPOERTED_DOMAINS
173
- if len(domains) == 0:
174
- raise ValueError(f"Unrecognized domains '{self.config.domains}'; only {SUPPOERTED_DOMAINS} are supported")
175
- self.config.domains = domains
176
-
177
- self.corpus_base_path = Path(dl_manager.download_and_extract(_URLs[f"qanom_{self.config.load_from}"]))
178
- if self.config.load_from == "csv":
179
- # prepare wiktionary for verb inflections inside 'self.verb_inflections'
180
- self._prepare_wiktionary_verb_inflections(dl_manager)
181
- self.dataset_files = [
182
- self.corpus_base_path / "annot.train.csv",
183
- self.corpus_base_path / "annot.dev.csv",
184
- self.corpus_base_path / "annot.test.csv"
185
- ]
186
- elif self.config.load_from == "jsonl":
187
- self.dataset_files = [
188
- self.corpus_base_path / "qanom" / "train.jsonl.gz",
189
- self.corpus_base_path / "qanom" / "dev.jsonl.gz",
190
- self.corpus_base_path / "qanom" / "test.jsonl.gz"
191
- ]
192
-
193
-
194
- # proportional segment (start,end) to take from every original split to returned SplitGenerator
195
- orig_dev_segments = ((0, self.config.redistribute_dev[0]),
196
- (self.config.redistribute_dev[0], sum(self.config.redistribute_dev[:2])),
197
- (sum(self.config.redistribute_dev[:2]), 1))
198
- orig_tst_segments = ((0, self.config.redistribute_test[0]),
199
- (self.config.redistribute_test[0], sum(self.config.redistribute_test[:2])),
200
- (sum(self.config.redistribute_test[:2]), 1))
201
- train_proportion = ((0,1), # from train
202
- orig_dev_segments[0], # from dev
203
- orig_tst_segments[0]) # from test
204
- dev_proportion = ((0,0), # from train
205
- orig_dev_segments[1], # from dev
206
- orig_tst_segments[1]) # from test
207
- test_proportion = ((0,0), # from train
208
- orig_dev_segments[2], # from dev
209
- orig_tst_segments[2]) # from test
210
-
211
- return [
212
- datasets.SplitGenerator(
213
- name=datasets.Split.TRAIN,
214
- # These kwargs will be passed to _generate_examples
215
- gen_kwargs={
216
- "split_proportion": train_proportion
217
- },
218
- ),
219
- datasets.SplitGenerator(
220
- name=datasets.Split.VALIDATION,
221
- # These kwargs will be passed to _generate_examples
222
- gen_kwargs={
223
- "split_proportion": dev_proportion
224
- },
225
- ),
226
- datasets.SplitGenerator(
227
- name=datasets.Split.TEST,
228
- # These kwargs will be passed to _generate_examples
229
- gen_kwargs={
230
- "split_proportion": test_proportion
231
- },
232
- ),
233
- ]
234
-
235
- def _generate_examples(self, split_proportion):
236
- if self.config.load_from == "csv":
237
- return self._generate_examples_from_csv(split_proportion=split_proportion)
238
- elif self.config.load_from == "jsonl":
239
- return self._generate_examples_from_jsonl(split_proportion=split_proportion)
240
-
241
- def _generate_examples_from_jsonl(self, split_proportion):
242
- """ Yields examples from a jsonl.gz file, in same format as qasrl-v2."""
243
- empty_to_underscore = lambda s: "_" if s=="" else s
244
- def read_lines(filepath):
245
- with gzip.open(filepath, "rt") as f:
246
- return [line.strip() for line in f]
247
-
248
-
249
- orig_splits_jsons = [read_lines(filepath)
250
- for filepath in self.dataset_files] # train, dev, test
251
- # Each json-line stands for a sentence with several predicates and QAs; we will redistribute
252
- # the new proportions of the splits on the sentence level for convenience
253
- lines_from_orig_splits = [jsonlines[int(len(jsonlines)*start) : int(len(jsonlines)*end)]
254
- for jsonlines, (start,end) in zip(orig_splits_jsons, split_proportion)]
255
- this_split_lines = list(itertools.chain(*lines_from_orig_splits))
256
- qa_counter = 0
257
- for line in this_split_lines:
258
- sent_obj = json.loads(line.strip())
259
- tokens = sent_obj['sentenceTokens']
260
- sentence = ' '.join(tokens)
261
- sent_id = sent_obj['sentenceId']
262
- # consider only selected domains
263
- sent_domain = sent_id.split(":")[1]
264
- if sent_domain not in self.config.domains:
265
- continue
266
- for predicate_idx, verb_obj in sent_obj['verbEntries'].items():
267
- verb_forms = verb_obj['verbInflectedForms']
268
- predicate = tokens[int(predicate_idx)]
269
- for question_obj in verb_obj['questionLabels'].values():
270
- question_slots = question_obj['questionSlots']
271
- verb_form = question_slots['verb']
272
- verb_surface = verb_forms[verb_form.split(" ")[-1]] # if verb_form in verb_forms else verb_forms['stem']
273
- question_slots_in_order = [
274
- question_slots["wh"],
275
- question_slots["aux"],
276
- question_slots["subj"],
277
- verb_surface,
278
- question_slots["obj"],
279
- empty_to_underscore(question_slots["prep"]), # fix bug in data
280
- question_slots["obj2"],
281
- '?'
282
- ]
283
- # retrieve answers
284
- answer_spans = []
285
- for ans in question_obj['answerJudgments']:
286
- if ans['isValid']:
287
- answer_spans.extend(ans['spans'])
288
- answer_spans = list(set(tuple(a) for a in answer_spans))
289
- # answer_spans = list(set(answer_spans))
290
- answer_strs = [' '.join([tokens[i] for i in range(*span)])
291
- for span in answer_spans]
292
-
293
- yield qa_counter, {
294
- "sentence": sentence,
295
- "sent_id": sent_id,
296
- "predicate_idx": predicate_idx,
297
- "predicate": predicate,
298
- "is_verbal": True,
299
- "verb_form": verb_forms['stem'],
300
- "question": question_slots_in_order,
301
- "answers": answer_strs,
302
- "answer_ranges": answer_spans
303
- }
304
- qa_counter += 1
305
- # also return non-predicates with empty data
306
- for non_predicate_idx, non_predicate in sent_obj["nonPredicates"].items():
307
- yield qa_counter, {
308
- "sentence": sentence,
309
- "sent_id": sent_obj['sentenceId'],
310
- "predicate_idx": int(non_predicate_idx),
311
- "predicate": non_predicate,
312
- "is_verbal": False,
313
- "verb_form": "",
314
- "question": [],
315
- "answers": [],
316
- "answer_ranges": []
317
- }
318
- qa_counter += 1
319
-
320
-
321
- @classmethod
322
- def span_from_str(cls, s:str):
323
- start, end = s.split(":")
324
- return [int(start), int(end)]
325
-
326
- def _generate_examples_from_csv(self, split_proportion):
327
-
328
- """ Yields examples from a 'annot.?.csv' file in QANom's format."""
329
-
330
- # construct concatenated DataFrame from different source splits
331
- orig_splits_dfs = [pd.read_csv(filepath)
332
- for filepath in self.dataset_files] # train, dev, test
333
- segment_df_from_orig_splits = [df.iloc[int(len(df)*start) : int(len(df)*end)]
334
- for df, (start,end) in zip(orig_splits_dfs, split_proportion)]
335
-
336
- df = pd.concat(segment_df_from_orig_splits, ignore_index=True)
337
- for counter, row in df.iterrows():
338
- # Each record (row) in csv is a QA or is stating a predicate/non-predicate with no QAs
339
-
340
- # consider only selected domains
341
- sent_domain = row.qasrl_id.split(":")[1]
342
- if sent_domain not in self.config.domains:
343
- continue
344
-
345
- # Prepare question (slots)
346
- na_to_underscore = lambda s: "_" if pd.isna(s) else str(s)
347
- question = [] if pd.isna(row.question) else list(map(na_to_underscore, [
348
- row.wh, row.aux, row.subj, row.verb_slot_inflection, row.obj, row.prep, row.obj2
349
- ])) + ['?']
350
- # fix verb slot - replace with actual verb inflection, and prepend verb_prefix
351
- if question:
352
- if row.verb_form in self.verb_inflections and not pd.isna(row.verb_slot_inflection):
353
- verb_surface = self.verb_inflections[row.verb_form][row.verb_slot_inflection]
354
- else:
355
- verb_surface = row.verb_form
356
- if not pd.isna(row.verb_prefix):
357
- verb_surface = row.verb_prefix.replace("~!~", " ") + " " + verb_surface
358
- question[3] = verb_surface
359
- answers = [] if pd.isna(row.answer) else row.answer.split("~!~")
360
- answer_ranges = [] if pd.isna(row.answer_range) else [Qanom.span_from_str(s) for s in row.answer_range.split("~!~")]
361
-
362
- yield counter, {
363
- "sentence": row.sentence,
364
- "sent_id": row.qasrl_id,
365
- "predicate_idx": row.target_idx,
366
- "predicate": row.noun,
367
- "is_verbal": row.is_verbal,
368
- "verb_form": row.verb_form,
369
- "question": question,
370
- "answers": answers,
371
- "answer_ranges": answer_ranges
372
- }
373
-