system HF staff commited on
Commit
56fbf4c
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
dataset_infos.json ADDED
The diff for this file is too large to render. See raw diff
dummy/algebra__linear_1d/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af12a948b5d88ad0ceb435b1abd3ccc0d98ab41a3a9a7e4f982545b85b8ea782
3
+ size 2688
math_dataset.py ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """Mathematics database."""
18
+
19
+ from __future__ import absolute_import, division, print_function
20
+
21
+ import logging
22
+ import os
23
+
24
+ import datasets
25
+
26
+
27
+ _CITATION = """
28
+ @article{2019arXiv,
29
+ author = {Saxton, Grefenstette, Hill, Kohli},
30
+ title = {Analysing Mathematical Reasoning Abilities of Neural Models},
31
+ year = {2019},
32
+ journal = {arXiv:1904.01557}
33
+ }
34
+ """
35
+
36
+ _DESCRIPTION = """
37
+ Mathematics database.
38
+
39
+ This dataset code generates mathematical question and answer pairs,
40
+ from a range of question types at roughly school-level difficulty.
41
+ This is designed to test the mathematical learning and algebraic
42
+ reasoning skills of learning models.
43
+
44
+ Original paper: Analysing Mathematical Reasoning Abilities of Neural Models
45
+ (Saxton, Grefenstette, Hill, Kohli).
46
+
47
+ Example usage:
48
+ train_examples, val_examples = datasets.load_dataset(
49
+ 'math_dataset/arithmetic__mul',
50
+ split=['train', 'test'],
51
+ as_supervised=True)
52
+ """
53
+
54
+ _DATA_URL = "https://storage.googleapis.com/mathematics-dataset/mathematics_dataset-v1.0.tar.gz"
55
+
56
+ _TRAIN_CATEGORY = [
57
+ "train-easy",
58
+ "train-medium",
59
+ "train-hard",
60
+ ]
61
+
62
+ _INTERPOLATE_CATEGORY = [
63
+ "interpolate",
64
+ ]
65
+
66
+ _MODULES = [
67
+ # extrapolate
68
+ "measurement__conversion",
69
+ # interpolate
70
+ "algebra__linear_1d",
71
+ "algebra__linear_1d_composed",
72
+ "algebra__linear_2d",
73
+ "algebra__linear_2d_composed",
74
+ "algebra__polynomial_roots",
75
+ "algebra__polynomial_roots_composed",
76
+ "algebra__sequence_next_term",
77
+ "algebra__sequence_nth_term",
78
+ "arithmetic__add_or_sub",
79
+ "arithmetic__add_or_sub_in_base",
80
+ "arithmetic__add_sub_multiple",
81
+ "arithmetic__div",
82
+ "arithmetic__mixed",
83
+ "arithmetic__mul",
84
+ "arithmetic__mul_div_multiple",
85
+ "arithmetic__nearest_integer_root",
86
+ "arithmetic__simplify_surd",
87
+ "calculus__differentiate",
88
+ "calculus__differentiate_composed",
89
+ "comparison__closest",
90
+ "comparison__closest_composed",
91
+ "comparison__kth_biggest",
92
+ "comparison__kth_biggest_composed",
93
+ "comparison__pair",
94
+ "comparison__pair_composed",
95
+ "comparison__sort",
96
+ "comparison__sort_composed",
97
+ "measurement__conversion",
98
+ "measurement__time",
99
+ "numbers__base_conversion",
100
+ "numbers__div_remainder",
101
+ "numbers__div_remainder_composed",
102
+ "numbers__gcd",
103
+ "numbers__gcd_composed",
104
+ "numbers__is_factor",
105
+ "numbers__is_factor_composed",
106
+ "numbers__is_prime",
107
+ "numbers__is_prime_composed",
108
+ "numbers__lcm",
109
+ "numbers__lcm_composed",
110
+ "numbers__list_prime_factors",
111
+ "numbers__list_prime_factors_composed",
112
+ "numbers__place_value",
113
+ "numbers__place_value_composed",
114
+ "numbers__round_number",
115
+ "numbers__round_number_composed",
116
+ "polynomials__add",
117
+ "polynomials__coefficient_named",
118
+ "polynomials__collect",
119
+ "polynomials__compose",
120
+ "polynomials__evaluate",
121
+ "polynomials__evaluate_composed",
122
+ "polynomials__expand",
123
+ "polynomials__simplify_power",
124
+ "probability__swr_p_level_set",
125
+ "probability__swr_p_sequence",
126
+ # train-easy train-medium train-hard
127
+ "algebra__linear_1d",
128
+ "algebra__linear_1d_composed",
129
+ "algebra__linear_2d",
130
+ "algebra__linear_2d_composed",
131
+ "algebra__polynomial_roots",
132
+ "algebra__polynomial_roots_composed",
133
+ "algebra__sequence_next_term",
134
+ "algebra__sequence_nth_term",
135
+ "arithmetic__add_or_sub",
136
+ "arithmetic__add_or_sub_in_base",
137
+ "arithmetic__add_sub_multiple",
138
+ "arithmetic__div",
139
+ "arithmetic__mixed",
140
+ "arithmetic__mul",
141
+ "arithmetic__mul_div_multiple",
142
+ "arithmetic__nearest_integer_root",
143
+ "arithmetic__simplify_surd",
144
+ "calculus__differentiate",
145
+ "calculus__differentiate_composed",
146
+ "comparison__closest",
147
+ "comparison__closest_composed",
148
+ "comparison__kth_biggest",
149
+ "comparison__kth_biggest_composed",
150
+ "comparison__pair",
151
+ "comparison__pair_composed",
152
+ "comparison__sort",
153
+ "comparison__sort_composed",
154
+ "measurement__conversion",
155
+ "measurement__time",
156
+ "numbers__base_conversion",
157
+ "numbers__div_remainder",
158
+ "numbers__div_remainder_composed",
159
+ "numbers__gcd",
160
+ "numbers__gcd_composed",
161
+ "numbers__is_factor",
162
+ "numbers__is_factor_composed",
163
+ "numbers__is_prime",
164
+ "numbers__is_prime_composed",
165
+ "numbers__lcm",
166
+ "numbers__lcm_composed",
167
+ "numbers__list_prime_factors",
168
+ "numbers__list_prime_factors_composed",
169
+ "numbers__place_value",
170
+ "numbers__place_value_composed",
171
+ "numbers__round_number",
172
+ "numbers__round_number_composed",
173
+ "polynomials__add",
174
+ "polynomials__coefficient_named",
175
+ "polynomials__collect",
176
+ "polynomials__compose",
177
+ "polynomials__evaluate",
178
+ "polynomials__evaluate_composed",
179
+ "polynomials__expand",
180
+ "polynomials__simplify_power",
181
+ "probability__swr_p_level_set",
182
+ "probability__swr_p_sequence",
183
+ ]
184
+
185
+ _QUESTION = "question"
186
+ _ANSWER = "answer"
187
+
188
+ _DATASET_VERSION = "mathematics_dataset-v1.0"
189
+
190
+
191
+ def _generate_builder_configs():
192
+ """Generate configs with different subsets of mathematics dataset."""
193
+ configs = []
194
+ for module in sorted(set(_MODULES)):
195
+ configs.append(
196
+ datasets.BuilderConfig(
197
+ name=module,
198
+ version=datasets.Version("1.0.0"),
199
+ description=_DESCRIPTION,
200
+ )
201
+ )
202
+
203
+ return configs
204
+
205
+
206
+ class MathDataset(datasets.GeneratorBasedBuilder):
207
+ """Math Dataset."""
208
+
209
+ BUILDER_CONFIGS = _generate_builder_configs()
210
+
211
+ def _info(self):
212
+ return datasets.DatasetInfo(
213
+ description=_DESCRIPTION,
214
+ features=datasets.Features(
215
+ {
216
+ _QUESTION: datasets.Value("string"),
217
+ _ANSWER: datasets.Value("string"),
218
+ }
219
+ ),
220
+ supervised_keys=(_QUESTION, _ANSWER),
221
+ homepage="https://github.com/deepmind/mathematics_dataset",
222
+ citation=_CITATION,
223
+ )
224
+
225
+ def _read_data_from_all_categories(self, directory, config, categories):
226
+ lines = []
227
+ for category in categories:
228
+ data_file = os.path.join(directory, _DATASET_VERSION, category, config)
229
+ if os.path.exists(data_file):
230
+ with open(data_file, encoding="utf-8") as f:
231
+ ls = f.read().split("\n")
232
+
233
+ for l in ls[::-1]:
234
+ if not l:
235
+ ls.remove(l)
236
+
237
+ lines.extend(ls)
238
+
239
+ return lines
240
+
241
+ def _split_generators(self, dl_manager):
242
+ """Returns SplitGenerators."""
243
+
244
+ directory = dl_manager.download_and_extract(_DATA_URL)
245
+ config = self.config.name + ".txt"
246
+
247
+ return [
248
+ datasets.SplitGenerator(
249
+ name=datasets.Split.TRAIN,
250
+ gen_kwargs={
251
+ "directory": directory,
252
+ "config": config,
253
+ "categories": _TRAIN_CATEGORY,
254
+ },
255
+ ),
256
+ datasets.SplitGenerator(
257
+ name=datasets.Split.TEST,
258
+ gen_kwargs={
259
+ "directory": directory,
260
+ "config": config,
261
+ "categories": _INTERPOLATE_CATEGORY,
262
+ },
263
+ ),
264
+ ]
265
+
266
+ def _generate_examples(self, directory, config, categories):
267
+ """Yields examples based on directory, module file.."""
268
+
269
+ lines = self._read_data_from_all_categories(directory, config, categories)
270
+ logging.info("%s: %s contains total: %d", categories, config, len(lines))
271
+ questions = lines[::2]
272
+ answers = lines[1::2]
273
+
274
+ assert len(answers) == len(questions), "answers: %d do not match questions: %d" % (
275
+ len(answers),
276
+ len(questions),
277
+ )
278
+
279
+ for idx, (q, a) in enumerate(zip(questions, answers)):
280
+ result = {_QUESTION: q, _ANSWER: a}
281
+ if all(result.values()):
282
+ yield idx, result