Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
100K<n<1M
Language Creators:
expert-generated
Annotations Creators:
no-annotation
Source Datasets:
original
ArXiv:
Tags:
compositionality
License:
albertvillanova HF staff commited on
Commit
6627f93
1 Parent(s): 4119d6a

Convert dataset to Parquet (#4)

Browse files

- Convert dataset to Parquet (63fd7944d5e89a9bb1b6d5576f8c4073688da0a6)
- Add mcd2 data files (0bf00d661039c696cac750a0b39778c4ce4c6c71)
- Add mcd3 data files (739c8ad5bc908b420652805ed0094608f210e6fd)
- Add query_complexity_split data files (b7e70403afeb85ab7e142b80176d0ed5e090bfce)
- Add query_pattern_split data files (e297eb9c4cfc9806459c9a3ed1bbb8267bba7682)
- Add question_complexity_split data files (f9d63f8b0c12c73e68bf91d8551fefc6c3c92014)
- Add question_pattern_split data files (cf409dc63b98ed3184f5c2f0c877988f57a5be3c)
- Add random_split data files (83737e52c2ac7fbc075d14209bbd8091303ed92a)
- Delete loading script (14dd78b1e7818255f3c65eda48d1b12fa5fa8a1e)

README.md CHANGED
@@ -9,7 +9,6 @@ license:
9
  - cc-by-4.0
10
  multilinguality:
11
  - monolingual
12
- pretty_name: Compositional Freebase Questions
13
  size_categories:
14
  - 100K<n<1M
15
  source_datasets:
@@ -21,6 +20,7 @@ task_ids:
21
  - open-domain-qa
22
  - closed-domain-qa
23
  paperswithcode_id: cfq
 
24
  tags:
25
  - compositionality
26
  dataset_info:
@@ -37,7 +37,7 @@ dataset_info:
37
  - name: test
38
  num_bytes: 5446503
39
  num_examples: 11968
40
- download_size: 267599061
41
  dataset_size: 42855309
42
  - config_name: mcd2
43
  features:
@@ -52,7 +52,7 @@ dataset_info:
52
  - name: test
53
  num_bytes: 5314019
54
  num_examples: 11968
55
- download_size: 267599061
56
  dataset_size: 44738676
57
  - config_name: mcd3
58
  features:
@@ -67,9 +67,9 @@ dataset_info:
67
  - name: test
68
  num_bytes: 5244503
69
  num_examples: 11968
70
- download_size: 267599061
71
  dataset_size: 43560848
72
- - config_name: question_complexity_split
73
  features:
74
  - name: question
75
  dtype: string
@@ -77,14 +77,14 @@ dataset_info:
77
  dtype: string
78
  splits:
79
  - name: train
80
- num_bytes: 39989433
81
- num_examples: 98999
82
  - name: test
83
- num_bytes: 5781561
84
- num_examples: 10340
85
- download_size: 267599061
86
- dataset_size: 45770994
87
- - config_name: question_pattern_split
88
  features:
89
  - name: question
90
  dtype: string
@@ -92,14 +92,14 @@ dataset_info:
92
  dtype: string
93
  splits:
94
  - name: train
95
- num_bytes: 41217350
96
- num_examples: 95654
97
  - name: test
98
- num_bytes: 5179936
99
- num_examples: 11909
100
- download_size: 267599061
101
- dataset_size: 46397286
102
- - config_name: query_complexity_split
103
  features:
104
  - name: question
105
  dtype: string
@@ -107,14 +107,14 @@ dataset_info:
107
  dtype: string
108
  splits:
109
  - name: train
110
- num_bytes: 40270175
111
- num_examples: 100654
112
  - name: test
113
- num_bytes: 5634924
114
- num_examples: 9512
115
- download_size: 267599061
116
- dataset_size: 45905099
117
- - config_name: query_pattern_split
118
  features:
119
  - name: question
120
  dtype: string
@@ -122,13 +122,13 @@ dataset_info:
122
  dtype: string
123
  splits:
124
  - name: train
125
- num_bytes: 40811284
126
- num_examples: 94600
127
  - name: test
128
- num_bytes: 5268358
129
- num_examples: 12589
130
- download_size: 267599061
131
- dataset_size: 46079642
132
  - config_name: random_split
133
  features:
134
  - name: question
@@ -142,8 +142,57 @@ dataset_info:
142
  - name: test
143
  num_bytes: 5164923
144
  num_examples: 11967
145
- download_size: 267599061
146
  dataset_size: 46444141
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
  ---
148
 
149
  # Dataset Card for "cfq"
 
9
  - cc-by-4.0
10
  multilinguality:
11
  - monolingual
 
12
  size_categories:
13
  - 100K<n<1M
14
  source_datasets:
 
20
  - open-domain-qa
21
  - closed-domain-qa
22
  paperswithcode_id: cfq
23
+ pretty_name: Compositional Freebase Questions
24
  tags:
25
  - compositionality
26
  dataset_info:
 
37
  - name: test
38
  num_bytes: 5446503
39
  num_examples: 11968
40
+ download_size: 8570962
41
  dataset_size: 42855309
42
  - config_name: mcd2
43
  features:
 
52
  - name: test
53
  num_bytes: 5314019
54
  num_examples: 11968
55
+ download_size: 8867866
56
  dataset_size: 44738676
57
  - config_name: mcd3
58
  features:
 
67
  - name: test
68
  num_bytes: 5244503
69
  num_examples: 11968
70
+ download_size: 8578142
71
  dataset_size: 43560848
72
+ - config_name: query_complexity_split
73
  features:
74
  - name: question
75
  dtype: string
 
77
  dtype: string
78
  splits:
79
  - name: train
80
+ num_bytes: 40270175
81
+ num_examples: 100654
82
  - name: test
83
+ num_bytes: 5634924
84
+ num_examples: 9512
85
+ download_size: 9303588
86
+ dataset_size: 45905099
87
+ - config_name: query_pattern_split
88
  features:
89
  - name: question
90
  dtype: string
 
92
  dtype: string
93
  splits:
94
  - name: train
95
+ num_bytes: 40811284
96
+ num_examples: 94600
97
  - name: test
98
+ num_bytes: 5268358
99
+ num_examples: 12589
100
+ download_size: 9387759
101
+ dataset_size: 46079642
102
+ - config_name: question_complexity_split
103
  features:
104
  - name: question
105
  dtype: string
 
107
  dtype: string
108
  splits:
109
  - name: train
110
+ num_bytes: 39989433
111
+ num_examples: 98999
112
  - name: test
113
+ num_bytes: 5781561
114
+ num_examples: 10340
115
+ download_size: 9255771
116
+ dataset_size: 45770994
117
+ - config_name: question_pattern_split
118
  features:
119
  - name: question
120
  dtype: string
 
122
  dtype: string
123
  splits:
124
  - name: train
125
+ num_bytes: 41217350
126
+ num_examples: 95654
127
  - name: test
128
+ num_bytes: 5179936
129
+ num_examples: 11909
130
+ download_size: 9482990
131
+ dataset_size: 46397286
132
  - config_name: random_split
133
  features:
134
  - name: question
 
142
  - name: test
143
  num_bytes: 5164923
144
  num_examples: 11967
145
+ download_size: 9533853
146
  dataset_size: 46444141
147
+ configs:
148
+ - config_name: mcd1
149
+ data_files:
150
+ - split: train
151
+ path: mcd1/train-*
152
+ - split: test
153
+ path: mcd1/test-*
154
+ - config_name: mcd2
155
+ data_files:
156
+ - split: train
157
+ path: mcd2/train-*
158
+ - split: test
159
+ path: mcd2/test-*
160
+ - config_name: mcd3
161
+ data_files:
162
+ - split: train
163
+ path: mcd3/train-*
164
+ - split: test
165
+ path: mcd3/test-*
166
+ - config_name: query_complexity_split
167
+ data_files:
168
+ - split: train
169
+ path: query_complexity_split/train-*
170
+ - split: test
171
+ path: query_complexity_split/test-*
172
+ - config_name: query_pattern_split
173
+ data_files:
174
+ - split: train
175
+ path: query_pattern_split/train-*
176
+ - split: test
177
+ path: query_pattern_split/test-*
178
+ - config_name: question_complexity_split
179
+ data_files:
180
+ - split: train
181
+ path: question_complexity_split/train-*
182
+ - split: test
183
+ path: question_complexity_split/test-*
184
+ - config_name: question_pattern_split
185
+ data_files:
186
+ - split: train
187
+ path: question_pattern_split/train-*
188
+ - split: test
189
+ path: question_pattern_split/test-*
190
+ - config_name: random_split
191
+ data_files:
192
+ - split: train
193
+ path: random_split/train-*
194
+ - split: test
195
+ path: random_split/test-*
196
  ---
197
 
198
  # Dataset Card for "cfq"
cfq.py DELETED
@@ -1,167 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """CFQ (Compositional Freebase Questions) dataset."""
18
-
19
-
20
- import json
21
- import re
22
-
23
- import datasets
24
-
25
-
26
- logger = datasets.logging.get_logger(__name__)
27
-
28
-
29
- _HOMEPAGE = "https://github.com/google-research/google-research/tree/master/cfq"
30
-
31
- _LICENSE = "CC BY 4.0"
32
-
33
- _CITATION = """
34
- @inproceedings{Keysers2020,
35
- title={Measuring Compositional Generalization: A Comprehensive Method on
36
- Realistic Data},
37
- author={Daniel Keysers and Nathanael Sch\"{a}rli and Nathan Scales and
38
- Hylke Buisman and Daniel Furrer and Sergii Kashubin and
39
- Nikola Momchev and Danila Sinopalnikov and Lukasz Stafiniak and
40
- Tibor Tihon and Dmitry Tsarkov and Xiao Wang and Marc van Zee and
41
- Olivier Bousquet},
42
- booktitle={ICLR},
43
- year={2020},
44
- url={https://arxiv.org/abs/1912.09713.pdf},
45
- }
46
- """
47
-
48
- _DESCRIPTION = """
49
- The CFQ dataset (and it's splits) for measuring compositional generalization.
50
-
51
- See https://arxiv.org/abs/1912.09713.pdf for background.
52
-
53
- Example usage:
54
- data = datasets.load_dataset('cfq/mcd1')
55
- """
56
-
57
- _DATA_URL = "https://storage.googleapis.com/cfq_dataset/cfq.tar.gz"
58
-
59
-
60
- class CfqConfig(datasets.BuilderConfig):
61
- """BuilderConfig for CFQ splits."""
62
-
63
- def __init__(self, name, directory="splits", **kwargs):
64
- """BuilderConfig for CFQ.
65
-
66
- Args:
67
- name: Unique name of the split.
68
- directory: Which subdirectory to read the split from.
69
- **kwargs: keyword arguments forwarded to super.
70
- """
71
- # Version history:
72
- super(CfqConfig, self).__init__(
73
- name=name, version=datasets.Version("1.0.1"), description=_DESCRIPTION, **kwargs
74
- )
75
- self.splits_path = f"cfq/{directory}/{name}.json"
76
-
77
-
78
- _QUESTION = "question"
79
- _QUERY = "query"
80
- _QUESTION_FIELD = "questionPatternModEntities"
81
- _QUERY_FIELD = "sparqlPatternModEntities"
82
-
83
-
84
- class Cfq(datasets.GeneratorBasedBuilder):
85
- """CFQ task / splits."""
86
-
87
- BUILDER_CONFIGS = [
88
- CfqConfig(name="mcd1"),
89
- CfqConfig(name="mcd2"),
90
- CfqConfig(name="mcd3"),
91
- CfqConfig(name="question_complexity_split"),
92
- CfqConfig(name="question_pattern_split"),
93
- CfqConfig(name="query_complexity_split"),
94
- CfqConfig(name="query_pattern_split"),
95
- CfqConfig(name="random_split"),
96
- ]
97
-
98
- def _info(self):
99
- return datasets.DatasetInfo(
100
- description=_DESCRIPTION,
101
- features=datasets.Features(
102
- {
103
- _QUESTION: datasets.Value("string"),
104
- _QUERY: datasets.Value("string"),
105
- }
106
- ),
107
- supervised_keys=(_QUESTION, _QUERY),
108
- homepage=_HOMEPAGE,
109
- license=_LICENSE,
110
- citation=_CITATION,
111
- )
112
-
113
- def _split_generators(self, dl_manager):
114
- """Returns SplitGenerators."""
115
- archive_path = dl_manager.download(_DATA_URL)
116
- return [
117
- datasets.SplitGenerator(
118
- name=split,
119
- gen_kwargs={
120
- "data_files": dl_manager.iter_archive(archive_path),
121
- "split_id": f"{split}Idxs",
122
- },
123
- )
124
- for split in [datasets.Split.TRAIN, datasets.Split.TEST]
125
- ]
126
-
127
- def _scrub_json(self, content):
128
- """Reduce JSON by filtering out only the fields of interest."""
129
- # Loading of json data with the standard Python library is very inefficient:
130
- # For the 4GB dataset file it requires more than 40GB of RAM and takes 3min.
131
- # There are more efficient libraries but in order to avoid additional
132
- # dependencies we use a simple (perhaps somewhat brittle) regexp to reduce
133
- # the content to only what is needed.
134
- question_regex = re.compile(r'("%s":\s*"[^"]*")' % _QUESTION_FIELD)
135
- query_regex = re.compile(r'("%s":\s*"[^"]*")' % _QUERY_FIELD)
136
- question_match = None
137
- for line in content:
138
- line = line.decode("utf-8")
139
- if not question_match:
140
- question_match = question_regex.match(line)
141
- else:
142
- query_match = query_regex.match(line)
143
- if query_match:
144
- yield json.loads("{" + question_match.group(1) + "," + query_match.group(1) + "}")
145
- question_match = None
146
-
147
- def _generate_examples(self, data_files, split_id):
148
- """Yields examples."""
149
- samples_path = "cfq/dataset.json"
150
- for path, file in data_files:
151
- if path == self.config.splits_path:
152
- splits = json.load(file)[split_id]
153
- elif path == samples_path:
154
- # The samples_path is the last path inside the archive
155
- generator = enumerate(self._scrub_json(file))
156
- samples = {}
157
- splits_set = set(splits)
158
- for split_idx in splits:
159
- if split_idx in samples:
160
- sample = samples.pop(split_idx)
161
- else:
162
- for sample_idx, sample in generator:
163
- if sample_idx == split_idx:
164
- break
165
- elif sample_idx in splits_set:
166
- samples[sample_idx] = sample
167
- yield split_idx, {_QUESTION: sample[_QUESTION_FIELD], _QUERY: sample[_QUERY_FIELD]}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
mcd1/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f882ccc6518c5bb384bb6942a4c7abdb1ccbfb4c28e55b82f645d58e080ea4ea
3
+ size 741845
mcd1/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb7fa309819a5328d0762e5ebdf1f7f0d80ce15f8e70907e85e16318dbbfe348
3
+ size 7829117
mcd2/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe802885e4acb5fa92a2cc9aba4c904221e77990a76b8e3f722146f3f37c281b
3
+ size 714978
mcd2/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a006a221e2cfa9f82bccbd35a3d1b054d363a21a586d125a57a8d9908dcff10
3
+ size 8152888
mcd3/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51c2467aae50712e0366daa694fc8640380eb3daed02074ec128e346f534ae69
3
+ size 721379
mcd3/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0bcf85fc29a4ca708452c76e97364bba7c6391fe08b0a7447f8daab773d30215
3
+ size 7856763
query_complexity_split/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d24c611cf4a406ae6ba7c19ba7a5297366fb2165280c059f89fe37f1f25dd6c2
3
+ size 891970
query_complexity_split/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4209ee60792c045f7171b367f10a04322ea90901e2e71094c104993c7ecd608f
3
+ size 8411618
query_pattern_split/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da7e0e5494fbbb6efd3de34e1d0253cec52f7f80732503c8e145a1e3821512e0
3
+ size 847600
query_pattern_split/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ffe07d5fb8d35045d698795649cf711b69541ea0c40c1288a4ee7cdae3a13eaf
3
+ size 8540159
question_complexity_split/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c517db9740127111a9a8961b3186aa5ad9433f71c735ba2441f066b8b49a468b
3
+ size 914960
question_complexity_split/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6415c8a8f3035450058254b264e25d7a80ed5be3b0d0f8c0242e7496628a33a1
3
+ size 8340811
question_pattern_split/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:113fac993dca05e7bf656e57d9be017ce15666ccd4a158260079d882434c197c
3
+ size 851220
question_pattern_split/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31a872b702eb1c5456b05f4001fae7d70dbfecfa92e2e254537e905cf8b5b9d4
3
+ size 8631770
random_split/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a937a1dd39a4b9a59ed43ccbfd238987f58782bf715b128da54e61f85cb9a867
3
+ size 897045
random_split/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fbb2cb309f4791efceeea4b3063c5be8e7ea4487d381e604692fda91d7e1dab
3
+ size 8636808