parquet-converter commited on
Commit
702350c
1 Parent(s): ad43938

Update parquet files

Browse files
.gitattributes DELETED
@@ -1,27 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bin.* filter=lfs diff=lfs merge=lfs -text
5
- *.bz2 filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.model filter=lfs diff=lfs merge=lfs -text
12
- *.msgpack filter=lfs diff=lfs merge=lfs -text
13
- *.onnx filter=lfs diff=lfs merge=lfs -text
14
- *.ot filter=lfs diff=lfs merge=lfs -text
15
- *.parquet filter=lfs diff=lfs merge=lfs -text
16
- *.pb filter=lfs diff=lfs merge=lfs -text
17
- *.pt filter=lfs diff=lfs merge=lfs -text
18
- *.pth filter=lfs diff=lfs merge=lfs -text
19
- *.rar filter=lfs diff=lfs merge=lfs -text
20
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
- *.tar.* filter=lfs diff=lfs merge=lfs -text
22
- *.tflite filter=lfs diff=lfs merge=lfs -text
23
- *.tgz filter=lfs diff=lfs merge=lfs -text
24
- *.xz filter=lfs diff=lfs merge=lfs -text
25
- *.zip filter=lfs diff=lfs merge=lfs -text
26
- *.zstandard filter=lfs diff=lfs merge=lfs -text
27
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
QDMR-high-level-lexicon/break_data-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a02f1d99805abcdd55e6e59763c4587ebc29ce3d613e3a1e417dd32fc333ba11
3
+ size 759217
QDMR-high-level-lexicon/break_data-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc32046450381eda1c42ee7c7bf9c7315ad0e2a9ef30acf7307a363d83ea53f5
3
+ size 4159208
QDMR-high-level-lexicon/break_data-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26e7043dd9d072d2faef8feacecf0ee70e6091d285fafa8494bcf9500a31a177
3
+ size 745496
QDMR-high-level/break_data-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29eb7efbd2700004a8247a26c7134b3e01e09059b810ecbbb913549348910c4e
3
+ size 262747
QDMR-high-level/break_data-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19063c9ebbab2d13b548fbdd45f040872249bb25adf809233cb148455039f989
3
+ size 2412935
QDMR-high-level/break_data-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3378094626fefadb4e40f54a4f507ac7e5e9e0a8d43f6a882ac2c288038ee62e
3
+ size 437502
QDMR-lexicon/break_data-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e768280fcd53305b8e029adefe2b15c175c06cf1a899fb866ff00142bbbb1762
3
+ size 1459037
QDMR-lexicon/break_data-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1b4b59d92a56291e48dd212ef4b499eefaaa28e697ac8c68ab39fa6c5f042be
3
+ size 7950249
QDMR-lexicon/break_data-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2704c01f0163d866f8fd46990616eee36dbf101debe8e275bfac00863baff70f
3
+ size 1408977
QDMR/break_data-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b7c2addb7f136fcc94c6286c9695e8147dedaefa333600d47e4fd2c73248ea2
3
+ size 373458
QDMR/break_data-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e372e700088553fb61f15c4fb002a4a944107f1b3e77da14f1d34c6b1b0b49e2
3
+ size 4063483
QDMR/break_data-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14c2af092717711e0c26da2d690deec9688fdc2730e0e0692eb12a04dc8b1a1e
3
+ size 738564
README.md DELETED
@@ -1,384 +0,0 @@
1
- ---
2
- annotations_creators:
3
- - crowdsourced
4
- language_creators:
5
- - crowdsourced
6
- language:
7
- - en
8
- license:
9
- - unknown
10
- multilinguality:
11
- - monolingual
12
- size_categories:
13
- - 10K<n<100K
14
- source_datasets:
15
- - original
16
- task_categories:
17
- - text2text-generation
18
- task_ids:
19
- - open-domain-abstractive-qa
20
- paperswithcode_id: break
21
- pretty_name: BREAK
22
- dataset_info:
23
- - config_name: QDMR-high-level
24
- features:
25
- - name: question_id
26
- dtype: string
27
- - name: question_text
28
- dtype: string
29
- - name: decomposition
30
- dtype: string
31
- - name: operators
32
- dtype: string
33
- - name: split
34
- dtype: string
35
- splits:
36
- - name: test
37
- num_bytes: 482339
38
- num_examples: 3195
39
- - name: train
40
- num_bytes: 5148086
41
- num_examples: 17503
42
- - name: validation
43
- num_bytes: 914780
44
- num_examples: 3130
45
- download_size: 15971078
46
- dataset_size: 6545205
47
- - config_name: QDMR-high-level-lexicon
48
- features:
49
- - name: source
50
- dtype: string
51
- - name: allowed_tokens
52
- dtype: string
53
- splits:
54
- - name: test
55
- num_bytes: 4240755
56
- num_examples: 3195
57
- - name: train
58
- num_bytes: 23234518
59
- num_examples: 17503
60
- - name: validation
61
- num_bytes: 4158679
62
- num_examples: 3130
63
- download_size: 15971078
64
- dataset_size: 31633952
65
- - config_name: QDMR
66
- features:
67
- - name: question_id
68
- dtype: string
69
- - name: question_text
70
- dtype: string
71
- - name: decomposition
72
- dtype: string
73
- - name: operators
74
- dtype: string
75
- - name: split
76
- dtype: string
77
- splits:
78
- - name: test
79
- num_bytes: 900632
80
- num_examples: 8069
81
- - name: train
82
- num_bytes: 12790466
83
- num_examples: 44321
84
- - name: validation
85
- num_bytes: 2237472
86
- num_examples: 7760
87
- download_size: 15971078
88
- dataset_size: 15928570
89
- - config_name: QDMR-lexicon
90
- features:
91
- - name: source
92
- dtype: string
93
- - name: allowed_tokens
94
- dtype: string
95
- splits:
96
- - name: test
97
- num_bytes: 10331822
98
- num_examples: 8069
99
- - name: train
100
- num_bytes: 56913064
101
- num_examples: 44321
102
- - name: validation
103
- num_bytes: 9936933
104
- num_examples: 7760
105
- download_size: 15971078
106
- dataset_size: 77181819
107
- - config_name: logical-forms
108
- features:
109
- - name: question_id
110
- dtype: string
111
- - name: question_text
112
- dtype: string
113
- - name: decomposition
114
- dtype: string
115
- - name: operators
116
- dtype: string
117
- - name: split
118
- dtype: string
119
- - name: program
120
- dtype: string
121
- splits:
122
- - name: test
123
- num_bytes: 927038
124
- num_examples: 8006
125
- - name: train
126
- num_bytes: 19821676
127
- num_examples: 44098
128
- - name: validation
129
- num_bytes: 3504893
130
- num_examples: 7719
131
- download_size: 15971078
132
- dataset_size: 24253607
133
- ---
134
-
135
- # Dataset Card for "break_data"
136
-
137
- ## Table of Contents
138
- - [Dataset Description](#dataset-description)
139
- - [Dataset Summary](#dataset-summary)
140
- - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
141
- - [Languages](#languages)
142
- - [Dataset Structure](#dataset-structure)
143
- - [Data Instances](#data-instances)
144
- - [Data Fields](#data-fields)
145
- - [Data Splits](#data-splits)
146
- - [Dataset Creation](#dataset-creation)
147
- - [Curation Rationale](#curation-rationale)
148
- - [Source Data](#source-data)
149
- - [Annotations](#annotations)
150
- - [Personal and Sensitive Information](#personal-and-sensitive-information)
151
- - [Considerations for Using the Data](#considerations-for-using-the-data)
152
- - [Social Impact of Dataset](#social-impact-of-dataset)
153
- - [Discussion of Biases](#discussion-of-biases)
154
- - [Other Known Limitations](#other-known-limitations)
155
- - [Additional Information](#additional-information)
156
- - [Dataset Curators](#dataset-curators)
157
- - [Licensing Information](#licensing-information)
158
- - [Citation Information](#citation-information)
159
- - [Contributions](#contributions)
160
-
161
- ## Dataset Description
162
-
163
- - **Homepage:** [https://github.com/allenai/Break](https://github.com/allenai/Break)
164
- - **Repository:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
165
- - **Paper:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
166
- - **Point of Contact:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
167
- - **Size of downloaded dataset files:** 76.16 MB
168
- - **Size of the generated dataset:** 148.34 MB
169
- - **Total amount of disk used:** 224.49 MB
170
-
171
- ### Dataset Summary
172
-
173
- Break is a human annotated dataset of natural language questions and their Question Decomposition Meaning Representations
174
- (QDMRs). Break consists of 83,978 examples sampled from 10 question answering datasets over text, images and databases.
175
- This repository contains the Break dataset along with information on the exact data format.
176
-
177
- ### Supported Tasks and Leaderboards
178
-
179
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
180
-
181
- ### Languages
182
-
183
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
184
-
185
- ## Dataset Structure
186
-
187
- ### Data Instances
188
-
189
- #### QDMR
190
-
191
- - **Size of downloaded dataset files:** 15.23 MB
192
- - **Size of the generated dataset:** 15.19 MB
193
- - **Total amount of disk used:** 30.42 MB
194
-
195
- An example of 'validation' looks as follows.
196
- ```
197
- {
198
- "decomposition": "return flights ;return #1 from denver ;return #2 to philadelphia ;return #3 if available",
199
- "operators": "['select', 'filter', 'filter', 'filter']",
200
- "question_id": "ATIS_dev_0",
201
- "question_text": "what flights are available tomorrow from denver to philadelphia ",
202
- "split": "dev"
203
- }
204
- ```
205
-
206
- #### QDMR-high-level
207
-
208
- - **Size of downloaded dataset files:** 15.23 MB
209
- - **Size of the generated dataset:** 6.24 MB
210
- - **Total amount of disk used:** 21.47 MB
211
-
212
- An example of 'train' looks as follows.
213
- ```
214
- {
215
- "decomposition": "return ground transportation ;return #1 which is available ;return #2 from the pittsburgh airport ;return #3 to downtown ;return the cost of #4",
216
- "operators": "['select', 'filter', 'filter', 'filter', 'project']",
217
- "question_id": "ATIS_dev_102",
218
- "question_text": "what ground transportation is available from the pittsburgh airport to downtown and how much does it cost ",
219
- "split": "dev"
220
- }
221
- ```
222
-
223
- #### QDMR-high-level-lexicon
224
-
225
- - **Size of downloaded dataset files:** 15.23 MB
226
- - **Size of the generated dataset:** 30.17 MB
227
- - **Total amount of disk used:** 45.40 MB
228
-
229
- An example of 'train' looks as follows.
230
- ```
231
- This example was too long and was cropped:
232
-
233
- {
234
- "allowed_tokens": "\"['higher than', 'same as', 'what ', 'and ', 'than ', 'at most', 'he', 'distinct', 'House', 'two', 'at least', 'or ', 'date', 'o...",
235
- "source": "What office, also held by a member of the Maine House of Representatives, did James K. Polk hold before he was president?"
236
- }
237
- ```
238
-
239
- #### QDMR-lexicon
240
-
241
- - **Size of downloaded dataset files:** 15.23 MB
242
- - **Size of the generated dataset:** 73.61 MB
243
- - **Total amount of disk used:** 88.84 MB
244
-
245
- An example of 'validation' looks as follows.
246
- ```
247
- This example was too long and was cropped:
248
-
249
- {
250
- "allowed_tokens": "\"['higher than', 'same as', 'what ', 'and ', 'than ', 'at most', 'distinct', 'two', 'at least', 'or ', 'date', 'on ', '@@14@@', ...",
251
- "source": "what flights are available tomorrow from denver to philadelphia "
252
- }
253
- ```
254
-
255
- #### logical-forms
256
-
257
- - **Size of downloaded dataset files:** 15.23 MB
258
- - **Size of the generated dataset:** 23.13 MB
259
- - **Total amount of disk used:** 38.36 MB
260
-
261
- An example of 'train' looks as follows.
262
- ```
263
- {
264
- "decomposition": "return ground transportation ;return #1 which is available ;return #2 from the pittsburgh airport ;return #3 to downtown ;return the cost of #4",
265
- "operators": "['select', 'filter', 'filter', 'filter', 'project']",
266
- "program": "some program",
267
- "question_id": "ATIS_dev_102",
268
- "question_text": "what ground transportation is available from the pittsburgh airport to downtown and how much does it cost ",
269
- "split": "dev"
270
- }
271
- ```
272
-
273
- ### Data Fields
274
-
275
- The data fields are the same among all splits.
276
-
277
- #### QDMR
278
- - `question_id`: a `string` feature.
279
- - `question_text`: a `string` feature.
280
- - `decomposition`: a `string` feature.
281
- - `operators`: a `string` feature.
282
- - `split`: a `string` feature.
283
-
284
- #### QDMR-high-level
285
- - `question_id`: a `string` feature.
286
- - `question_text`: a `string` feature.
287
- - `decomposition`: a `string` feature.
288
- - `operators`: a `string` feature.
289
- - `split`: a `string` feature.
290
-
291
- #### QDMR-high-level-lexicon
292
- - `source`: a `string` feature.
293
- - `allowed_tokens`: a `string` feature.
294
-
295
- #### QDMR-lexicon
296
- - `source`: a `string` feature.
297
- - `allowed_tokens`: a `string` feature.
298
-
299
- #### logical-forms
300
- - `question_id`: a `string` feature.
301
- - `question_text`: a `string` feature.
302
- - `decomposition`: a `string` feature.
303
- - `operators`: a `string` feature.
304
- - `split`: a `string` feature.
305
- - `program`: a `string` feature.
306
-
307
- ### Data Splits
308
-
309
- | name |train|validation|test|
310
- |-----------------------|----:|---------:|---:|
311
- |QDMR |44321| 7760|8069|
312
- |QDMR-high-level |17503| 3130|3195|
313
- |QDMR-high-level-lexicon|17503| 3130|3195|
314
- |QDMR-lexicon |44321| 7760|8069|
315
- |logical-forms |44098| 7719|8006|
316
-
317
- ## Dataset Creation
318
-
319
- ### Curation Rationale
320
-
321
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
322
-
323
- ### Source Data
324
-
325
- #### Initial Data Collection and Normalization
326
-
327
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
328
-
329
- #### Who are the source language producers?
330
-
331
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
332
-
333
- ### Annotations
334
-
335
- #### Annotation process
336
-
337
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
338
-
339
- #### Who are the annotators?
340
-
341
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
342
-
343
- ### Personal and Sensitive Information
344
-
345
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
346
-
347
- ## Considerations for Using the Data
348
-
349
- ### Social Impact of Dataset
350
-
351
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
352
-
353
- ### Discussion of Biases
354
-
355
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
356
-
357
- ### Other Known Limitations
358
-
359
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
360
-
361
- ## Additional Information
362
-
363
- ### Dataset Curators
364
-
365
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
366
-
367
- ### Licensing Information
368
-
369
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
370
-
371
- ### Citation Information
372
-
373
- ```
374
- @article{Wolfson2020Break,
375
- title={Break It Down: A Question Understanding Benchmark},
376
- author={Wolfson, Tomer and Geva, Mor and Gupta, Ankit and Gardner, Matt and Goldberg, Yoav and Deutch, Daniel and Berant, Jonathan},
377
- journal={Transactions of the Association for Computational Linguistics},
378
- year={2020},
379
- }
380
- ```
381
-
382
- ### Contributions
383
-
384
- Thanks to [@patrickvonplaten](https://github.com/patrickvonplaten), [@lewtun](https://github.com/lewtun), [@thomwolf](https://github.com/thomwolf) for adding this dataset.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
break_data.py DELETED
@@ -1,261 +0,0 @@
1
- """TODO(break_data): Add a description here."""
2
-
3
-
4
- import csv
5
- import json
6
- import os
7
- import textwrap
8
-
9
- import datasets
10
-
11
-
12
- # TODO(break): BibTeX citation
13
- _CITATION = """\
14
- @article{Wolfson2020Break,
15
- title={Break It Down: A Question Understanding Benchmark},
16
- author={Wolfson, Tomer and Geva, Mor and Gupta, Ankit and Gardner, Matt and Goldberg, Yoav and Deutch, Daniel and Berant, Jonathan},
17
- journal={Transactions of the Association for Computational Linguistics},
18
- year={2020},
19
- }
20
- """
21
-
22
- # TODO(break):
23
- _DESCRIPTION = """\
24
- Break is a human annotated dataset of natural language questions and their Question Decomposition Meaning Representations
25
- (QDMRs). Break consists of 83,978 examples sampled from 10 question answering datasets over text, images and databases.
26
- This repository contains the Break dataset along with information on the exact data format.
27
- """
28
- _URL = "https://github.com/allenai/Break/raw/master/break_dataset/Break-dataset.zip"
29
-
30
-
31
- class BreakDataConfig(datasets.BuilderConfig):
32
-
33
- """BuilderConfig for Break"""
34
-
35
- def __init__(self, text_features, lexicon_tokens, **kwargs):
36
- """
37
-
38
- Args:
39
- text_features: `dict[string, string]`, map from the name of the feature
40
- dict for each text field to the name of the column in the tsv file
41
- lexicon_tokens: to define if we want to load the lexicon_tokens files or not
42
- **kwargs: keyword arguments forwarded to super.
43
- """
44
- super(BreakDataConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
45
- self.text_features = text_features
46
- self.lexicon_tokens = lexicon_tokens
47
-
48
-
49
- class BreakData(datasets.GeneratorBasedBuilder):
50
- """TODO(break_data): Short description of my dataset."""
51
-
52
- # TODO(break_data): Set up version.
53
- VERSION = datasets.Version("0.1.0")
54
- BUILDER_CONFIGS = [
55
- BreakDataConfig(
56
- name="QDMR-high-level",
57
- description=textwrap.dedent(
58
- """
59
- Contains questions annotated with the high-level variant of QDMR. These decomposition are exclusive to Reading
60
- Comprehension tasks (Section 2). lexicon_tokens files are also provided."""
61
- ),
62
- text_features={
63
- "question_id": "question_id",
64
- "question_text": "question_text",
65
- "decomposition": "decomposition",
66
- "operators": "operators",
67
- "split": "split",
68
- },
69
- lexicon_tokens=False,
70
- ),
71
- BreakDataConfig(
72
- name="QDMR-high-level-lexicon",
73
- description=textwrap.dedent(
74
- """
75
- Contains questions annotated with the high-level variant of QDMR. These decomposition are exclusive to Reading
76
- Comprehension tasks (Section 2). lexicon_tokens files are also provided."""
77
- ),
78
- text_features={
79
- "source": "source",
80
- "allowed_tokens": "allowed_tokens",
81
- },
82
- lexicon_tokens=True,
83
- ),
84
- BreakDataConfig(
85
- name="QDMR",
86
- description=textwrap.dedent(
87
- """
88
- Contains questions over text, images and databases annotated with their Question Decomposition Meaning
89
- Representation. In addition to the train, dev and (hidden) test sets we provide lexicon_tokens files. For
90
- each question, the lexicon file contains the set of valid tokens that could potentially appear in its
91
- decomposition """
92
- ),
93
- text_features={
94
- "question_id": "question_id",
95
- "question_text": "question_text",
96
- "decomposition": "decomposition",
97
- "operators": "operators",
98
- "split": "split",
99
- },
100
- lexicon_tokens=False,
101
- ),
102
- BreakDataConfig(
103
- name="QDMR-lexicon",
104
- description=textwrap.dedent(
105
- """
106
- Contains questions over text, images and databases annotated with their Question Decomposition Meaning
107
- Representation. In addition to the train, dev and (hidden) test sets we provide lexicon_tokens files. For
108
- each question, the lexicon file contains the set of valid tokens that could potentially appear in its
109
- decomposition """
110
- ),
111
- text_features={
112
- "source": "source",
113
- "allowed_tokens": "allowed_tokens",
114
- },
115
- lexicon_tokens=True,
116
- ),
117
- BreakDataConfig(
118
- name="logical-forms",
119
- description=textwrap.dedent(
120
- """
121
- Contains questions and QDMRs annotated with full logical-forms of QDMR operators + arguments. Full logical-forms
122
- were inferred by the annotation-consistency algorithm described in """
123
- ),
124
- lexicon_tokens=False,
125
- text_features={
126
- "question_id": "question_id",
127
- "question_text": "question_text",
128
- "decomposition": "decomposition",
129
- "operators": "operators",
130
- "split": "split",
131
- "program": "program",
132
- },
133
- ),
134
- ]
135
-
136
- def _info(self):
137
- # TODO(break_data): Specifies the datasets.DatasetInfo object
138
- features = {text_feature: datasets.Value("string") for text_feature in self.config.text_features.keys()}
139
- return datasets.DatasetInfo(
140
- # This is the description that will appear on the datasets page.
141
- description=_DESCRIPTION,
142
- # datasets.features.FeatureConnectors
143
- features=datasets.Features(
144
- features
145
- # These are the features of your dataset like images, labels ...
146
- ),
147
- # If there's a common (input, target) tuple from the features,
148
- # specify them here. They'll be used if as_supervised=True in
149
- # builder.as_dataset.
150
- supervised_keys=None,
151
- # Homepage of the dataset for documentation
152
- homepage="https://github.com/allenai/Break",
153
- citation=_CITATION,
154
- )
155
- # if
156
-
157
- def _split_generators(self, dl_manager):
158
- """Returns SplitGenerators."""
159
- # TODO(break_data): Downloads the data and defines the splits
160
- # dl_manager is a datasets.download.DownloadManager that can be used to
161
- # download and extract URLs
162
- dl_dir = dl_manager.download_and_extract(_URL)
163
- data_dir = os.path.join(dl_dir, "Break-dataset")
164
- qdmr_high_level = os.path.join(data_dir, "QDMR-high-level")
165
- qdmr = os.path.join(data_dir, "QDMR")
166
- logical = os.path.join(data_dir, "logical-forms")
167
- if self.config.name == "QDMR" or self.config.name == "QDMR-lexicon":
168
- return [
169
- datasets.SplitGenerator(
170
- name=datasets.Split.TRAIN,
171
- # These kwargs will be passed to _generate_examples
172
- gen_kwargs={
173
- "filepath": os.path.join(qdmr, "train.csv")
174
- if not self.config.lexicon_tokens
175
- else os.path.join(qdmr, "train_lexicon_tokens.json")
176
- },
177
- ),
178
- datasets.SplitGenerator(
179
- name=datasets.Split.VALIDATION,
180
- # These kwargs will be passed to _generate_examples
181
- gen_kwargs={
182
- "filepath": os.path.join(qdmr, "dev.csv")
183
- if not self.config.lexicon_tokens
184
- else os.path.join(qdmr, "dev_lexicon_tokens.json")
185
- },
186
- ),
187
- datasets.SplitGenerator(
188
- name=datasets.Split.TEST,
189
- # These kwargs will be passed to _generate_examples
190
- gen_kwargs={
191
- "filepath": os.path.join(qdmr, "test.csv")
192
- if not self.config.lexicon_tokens
193
- else os.path.join(qdmr, "test_lexicon_tokens.json")
194
- },
195
- ),
196
- ]
197
- elif self.config.name == "QDMR-high-level" or self.config.name == "QDMR-high-level-lexicon":
198
- return [
199
- datasets.SplitGenerator(
200
- name=datasets.Split.TRAIN,
201
- # These kwargs will be passed to _generate_examples
202
- gen_kwargs={
203
- "filepath": os.path.join(qdmr_high_level, "train.csv")
204
- if not self.config.lexicon_tokens
205
- else os.path.join(qdmr_high_level, "train_lexicon_tokens.json")
206
- },
207
- ),
208
- datasets.SplitGenerator(
209
- name=datasets.Split.VALIDATION,
210
- # These kwargs will be passed to _generate_examples
211
- gen_kwargs={
212
- "filepath": os.path.join(qdmr_high_level, "dev.csv")
213
- if not self.config.lexicon_tokens
214
- else os.path.join(qdmr_high_level, "dev_lexicon_tokens.json")
215
- },
216
- ),
217
- datasets.SplitGenerator(
218
- name=datasets.Split.TEST,
219
- # These kwargs will be passed to _generate_examples
220
- gen_kwargs={
221
- "filepath": os.path.join(qdmr_high_level, "test.csv")
222
- if not self.config.lexicon_tokens
223
- else os.path.join(qdmr_high_level, "test_lexicon_tokens.json")
224
- },
225
- ),
226
- ]
227
- elif self.config.name == "logical-forms":
228
- return [
229
- datasets.SplitGenerator(
230
- name=datasets.Split.TRAIN,
231
- # These kwargs will be passed to _generate_examples
232
- gen_kwargs={"filepath": os.path.join(logical, "train.csv")},
233
- ),
234
- datasets.SplitGenerator(
235
- name=datasets.Split.VALIDATION,
236
- # These kwargs will be passed to _generate_examples
237
- gen_kwargs={"filepath": os.path.join(logical, "dev.csv")},
238
- ),
239
- datasets.SplitGenerator(
240
- name=datasets.Split.TEST,
241
- # These kwargs will be passed to _generate_examples
242
- gen_kwargs={"filepath": os.path.join(logical, "test.csv")},
243
- ),
244
- ]
245
-
246
- def _generate_examples(self, filepath):
247
- """Yields examples."""
248
- # TODO(break_data): Yields (key, example) tuples from the dataset
249
- with open(filepath, encoding="utf-8") as f:
250
- if (
251
- self.config.name == "QDMR-high-level"
252
- or self.config.name == "QDMR"
253
- or self.config.name == "logical-forms"
254
- ):
255
- data = csv.DictReader(f)
256
- for id_, row in enumerate(data):
257
- yield id_, row
258
- elif self.config.name == "QDMR-high-level-lexicon" or self.config.name == "QDMR-lexicon":
259
- for id_, row in enumerate(f):
260
- data = json.loads(row)
261
- yield id_, data
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"QDMR-high-level": {"description": "Break is a human annotated dataset of natural language questions and their Question Decomposition Meaning Representations\n(QDMRs). Break consists of 83,978 examples sampled from 10 question answering datasets over text, images and databases. \nThis repository contains the Break dataset along with information on the exact data format.\n", "citation": "@article{Wolfson2020Break,\n title={Break It Down: A Question Understanding Benchmark},\n author={Wolfson, Tomer and Geva, Mor and Gupta, Ankit and Gardner, Matt and Goldberg, Yoav and Deutch, Daniel and Berant, Jonathan},\n journal={Transactions of the Association for Computational Linguistics},\n year={2020},\n}\n", "homepage": "https://github.com/allenai/Break", "license": "", "features": {"question_id": {"dtype": "string", "id": null, "_type": "Value"}, "question_text": {"dtype": "string", "id": null, "_type": "Value"}, "decomposition": {"dtype": "string", "id": null, "_type": "Value"}, "operators": {"dtype": "string", "id": null, "_type": "Value"}, "split": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "break_data", "config_name": "QDMR-high-level", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 482339, "num_examples": 3195, "dataset_name": "break_data"}, "train": {"name": "train", "num_bytes": 5148086, "num_examples": 17503, "dataset_name": "break_data"}, "validation": {"name": "validation", "num_bytes": 914780, "num_examples": 3130, "dataset_name": "break_data"}}, "download_checksums": {"https://github.com/allenai/Break/raw/master/break_dataset/Break-dataset.zip": {"num_bytes": 15971078, "checksum": "37efea4fa1b7774d077ff0452e5e199cecba8216c12da76781010f189d1cf259"}}, "download_size": 15971078, "dataset_size": 6545205, "size_in_bytes": 22516283}, "QDMR-high-level-lexicon": {"description": "Break is a human annotated dataset of natural language questions and their Question Decomposition Meaning Representations\n(QDMRs). Break consists of 83,978 examples sampled from 10 question answering datasets over text, images and databases. \nThis repository contains the Break dataset along with information on the exact data format.\n", "citation": "@article{Wolfson2020Break,\n title={Break It Down: A Question Understanding Benchmark},\n author={Wolfson, Tomer and Geva, Mor and Gupta, Ankit and Gardner, Matt and Goldberg, Yoav and Deutch, Daniel and Berant, Jonathan},\n journal={Transactions of the Association for Computational Linguistics},\n year={2020},\n}\n", "homepage": "https://github.com/allenai/Break", "license": "", "features": {"source": {"dtype": "string", "id": null, "_type": "Value"}, "allowed_tokens": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "break_data", "config_name": "QDMR-high-level-lexicon", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4240755, "num_examples": 3195, "dataset_name": "break_data"}, "train": {"name": "train", "num_bytes": 23234518, "num_examples": 17503, "dataset_name": "break_data"}, "validation": {"name": "validation", "num_bytes": 4158679, "num_examples": 3130, "dataset_name": "break_data"}}, "download_checksums": {"https://github.com/allenai/Break/raw/master/break_dataset/Break-dataset.zip": {"num_bytes": 15971078, "checksum": "37efea4fa1b7774d077ff0452e5e199cecba8216c12da76781010f189d1cf259"}}, "download_size": 15971078, "dataset_size": 31633952, "size_in_bytes": 47605030}, "QDMR": {"description": "Break is a human annotated dataset of natural language questions and their Question Decomposition Meaning Representations\n(QDMRs). Break consists of 83,978 examples sampled from 10 question answering datasets over text, images and databases. \nThis repository contains the Break dataset along with information on the exact data format.\n", "citation": "@article{Wolfson2020Break,\n title={Break It Down: A Question Understanding Benchmark},\n author={Wolfson, Tomer and Geva, Mor and Gupta, Ankit and Gardner, Matt and Goldberg, Yoav and Deutch, Daniel and Berant, Jonathan},\n journal={Transactions of the Association for Computational Linguistics},\n year={2020},\n}\n", "homepage": "https://github.com/allenai/Break", "license": "", "features": {"question_id": {"dtype": "string", "id": null, "_type": "Value"}, "question_text": {"dtype": "string", "id": null, "_type": "Value"}, "decomposition": {"dtype": "string", "id": null, "_type": "Value"}, "operators": {"dtype": "string", "id": null, "_type": "Value"}, "split": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "break_data", "config_name": "QDMR", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 900632, "num_examples": 8069, "dataset_name": "break_data"}, "train": {"name": "train", "num_bytes": 12790466, "num_examples": 44321, "dataset_name": "break_data"}, "validation": {"name": "validation", "num_bytes": 2237472, "num_examples": 7760, "dataset_name": "break_data"}}, "download_checksums": {"https://github.com/allenai/Break/raw/master/break_dataset/Break-dataset.zip": {"num_bytes": 15971078, "checksum": "37efea4fa1b7774d077ff0452e5e199cecba8216c12da76781010f189d1cf259"}}, "download_size": 15971078, "dataset_size": 15928570, "size_in_bytes": 31899648}, "QDMR-lexicon": {"description": "Break is a human annotated dataset of natural language questions and their Question Decomposition Meaning Representations\n(QDMRs). Break consists of 83,978 examples sampled from 10 question answering datasets over text, images and databases. \nThis repository contains the Break dataset along with information on the exact data format.\n", "citation": "@article{Wolfson2020Break,\n title={Break It Down: A Question Understanding Benchmark},\n author={Wolfson, Tomer and Geva, Mor and Gupta, Ankit and Gardner, Matt and Goldberg, Yoav and Deutch, Daniel and Berant, Jonathan},\n journal={Transactions of the Association for Computational Linguistics},\n year={2020},\n}\n", "homepage": "https://github.com/allenai/Break", "license": "", "features": {"source": {"dtype": "string", "id": null, "_type": "Value"}, "allowed_tokens": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "break_data", "config_name": "QDMR-lexicon", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 10331822, "num_examples": 8069, "dataset_name": "break_data"}, "train": {"name": "train", "num_bytes": 56913064, "num_examples": 44321, "dataset_name": "break_data"}, "validation": {"name": "validation", "num_bytes": 9936933, "num_examples": 7760, "dataset_name": "break_data"}}, "download_checksums": {"https://github.com/allenai/Break/raw/master/break_dataset/Break-dataset.zip": {"num_bytes": 15971078, "checksum": "37efea4fa1b7774d077ff0452e5e199cecba8216c12da76781010f189d1cf259"}}, "download_size": 15971078, "dataset_size": 77181819, "size_in_bytes": 93152897}, "logical-forms": {"description": "Break is a human annotated dataset of natural language questions and their Question Decomposition Meaning Representations\n(QDMRs). Break consists of 83,978 examples sampled from 10 question answering datasets over text, images and databases. \nThis repository contains the Break dataset along with information on the exact data format.\n", "citation": "@article{Wolfson2020Break,\n title={Break It Down: A Question Understanding Benchmark},\n author={Wolfson, Tomer and Geva, Mor and Gupta, Ankit and Gardner, Matt and Goldberg, Yoav and Deutch, Daniel and Berant, Jonathan},\n journal={Transactions of the Association for Computational Linguistics},\n year={2020},\n}\n", "homepage": "https://github.com/allenai/Break", "license": "", "features": {"question_id": {"dtype": "string", "id": null, "_type": "Value"}, "question_text": {"dtype": "string", "id": null, "_type": "Value"}, "decomposition": {"dtype": "string", "id": null, "_type": "Value"}, "operators": {"dtype": "string", "id": null, "_type": "Value"}, "split": {"dtype": "string", "id": null, "_type": "Value"}, "program": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "break_data", "config_name": "logical-forms", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 927038, "num_examples": 8006, "dataset_name": "break_data"}, "train": {"name": "train", "num_bytes": 19821676, "num_examples": 44098, "dataset_name": "break_data"}, "validation": {"name": "validation", "num_bytes": 3504893, "num_examples": 7719, "dataset_name": "break_data"}}, "download_checksums": {"https://github.com/allenai/Break/raw/master/break_dataset/Break-dataset.zip": {"num_bytes": 15971078, "checksum": "37efea4fa1b7774d077ff0452e5e199cecba8216c12da76781010f189d1cf259"}}, "download_size": 15971078, "dataset_size": 24253607, "size_in_bytes": 40224685}}
 
 
logical-forms/break_data-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cfc310cc7adac37a15b7f390ca2c2197bd6a1fa4b05e42f2c011b4460a3498d
3
+ size 373455
logical-forms/break_data-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd9bbc92b6c698fb1538ea52204eb027e4e26ce08c4ffd17f53766187465f6f1
3
+ size 6088625
logical-forms/break_data-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0e9768415e2ae6829ac2406cb6fb38fbbf4013def8a4df2c2bdbada60fdcdde
3
+ size 1110732