math_dataset / README.md
albertvillanova's picture
Convert dataset sizes from base 2 to base 10 in the dataset card (#1)
4212f9a
---
pretty_name: Mathematics Dataset
language:
- en
paperswithcode_id: mathematics
dataset_info:
- config_name: algebra__linear_1d
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 516405
num_examples: 10000
- name: train
num_bytes: 92086245
num_examples: 1999998
download_size: 2333082954
dataset_size: 92602650
- config_name: algebra__linear_1d_composed
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 1018090
num_examples: 10000
- name: train
num_bytes: 199566926
num_examples: 1999998
download_size: 2333082954
dataset_size: 200585016
- config_name: algebra__linear_2d
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 666095
num_examples: 10000
- name: train
num_bytes: 126743526
num_examples: 1999998
download_size: 2333082954
dataset_size: 127409621
- config_name: algebra__linear_2d_composed
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 1184664
num_examples: 10000
- name: train
num_bytes: 234405885
num_examples: 1999998
download_size: 2333082954
dataset_size: 235590549
- config_name: algebra__polynomial_roots
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 868630
num_examples: 10000
- name: train
num_bytes: 163134199
num_examples: 1999998
download_size: 2333082954
dataset_size: 164002829
- config_name: algebra__polynomial_roots_composed
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 1281321
num_examples: 10000
- name: train
num_bytes: 251435312
num_examples: 1999998
download_size: 2333082954
dataset_size: 252716633
- config_name: algebra__sequence_next_term
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 752459
num_examples: 10000
- name: train
num_bytes: 138735194
num_examples: 1999998
download_size: 2333082954
dataset_size: 139487653
- config_name: algebra__sequence_nth_term
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 947764
num_examples: 10000
- name: train
num_bytes: 175945643
num_examples: 1999998
download_size: 2333082954
dataset_size: 176893407
- config_name: arithmetic__add_or_sub
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 483725
num_examples: 10000
- name: train
num_bytes: 89690356
num_examples: 1999998
download_size: 2333082954
dataset_size: 90174081
- config_name: arithmetic__add_or_sub_in_base
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 502221
num_examples: 10000
- name: train
num_bytes: 93779137
num_examples: 1999998
download_size: 2333082954
dataset_size: 94281358
- config_name: arithmetic__add_sub_multiple
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 498421
num_examples: 10000
- name: train
num_bytes: 90962782
num_examples: 1999998
download_size: 2333082954
dataset_size: 91461203
- config_name: arithmetic__div
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 421520
num_examples: 10000
- name: train
num_bytes: 78417908
num_examples: 1999998
download_size: 2333082954
dataset_size: 78839428
- config_name: arithmetic__mixed
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 513364
num_examples: 10000
- name: train
num_bytes: 93989009
num_examples: 1999998
download_size: 2333082954
dataset_size: 94502373
- config_name: arithmetic__mul
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 394004
num_examples: 10000
- name: train
num_bytes: 73499093
num_examples: 1999998
download_size: 2333082954
dataset_size: 73893097
- config_name: arithmetic__mul_div_multiple
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 497308
num_examples: 10000
- name: train
num_bytes: 91406689
num_examples: 1999998
download_size: 2333082954
dataset_size: 91903997
- config_name: arithmetic__nearest_integer_root
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 705630
num_examples: 10000
- name: train
num_bytes: 137771237
num_examples: 1999998
download_size: 2333082954
dataset_size: 138476867
- config_name: arithmetic__simplify_surd
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 1261753
num_examples: 10000
- name: train
num_bytes: 207753790
num_examples: 1999998
download_size: 2333082954
dataset_size: 209015543
- config_name: calculus__differentiate
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 1025947
num_examples: 10000
- name: train
num_bytes: 199013993
num_examples: 1999998
download_size: 2333082954
dataset_size: 200039940
- config_name: calculus__differentiate_composed
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 1343416
num_examples: 10000
- name: train
num_bytes: 263757570
num_examples: 1999998
download_size: 2333082954
dataset_size: 265100986
- config_name: comparison__closest
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 681229
num_examples: 10000
- name: train
num_bytes: 132274822
num_examples: 1999998
download_size: 2333082954
dataset_size: 132956051
- config_name: comparison__closest_composed
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 1071089
num_examples: 10000
- name: train
num_bytes: 210658152
num_examples: 1999998
download_size: 2333082954
dataset_size: 211729241
- config_name: comparison__kth_biggest
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 797185
num_examples: 10000
- name: train
num_bytes: 149077463
num_examples: 1999998
download_size: 2333082954
dataset_size: 149874648
- config_name: comparison__kth_biggest_composed
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 1144556
num_examples: 10000
- name: train
num_bytes: 221547532
num_examples: 1999998
download_size: 2333082954
dataset_size: 222692088
- config_name: comparison__pair
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 452528
num_examples: 10000
- name: train
num_bytes: 85707543
num_examples: 1999998
download_size: 2333082954
dataset_size: 86160071
- config_name: comparison__pair_composed
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 946187
num_examples: 10000
- name: train
num_bytes: 184702998
num_examples: 1999998
download_size: 2333082954
dataset_size: 185649185
- config_name: comparison__sort
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 712498
num_examples: 10000
- name: train
num_bytes: 131752705
num_examples: 1999998
download_size: 2333082954
dataset_size: 132465203
- config_name: comparison__sort_composed
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 1114257
num_examples: 10000
- name: train
num_bytes: 213871896
num_examples: 1999998
download_size: 2333082954
dataset_size: 214986153
- config_name: measurement__conversion
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 592904
num_examples: 10000
- name: train
num_bytes: 118650852
num_examples: 1999998
download_size: 2333082954
dataset_size: 119243756
- config_name: measurement__time
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 584278
num_examples: 10000
- name: train
num_bytes: 116962599
num_examples: 1999998
download_size: 2333082954
dataset_size: 117546877
- config_name: numbers__base_conversion
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 490881
num_examples: 10000
- name: train
num_bytes: 90363333
num_examples: 1999998
download_size: 2333082954
dataset_size: 90854214
- config_name: numbers__div_remainder
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 644523
num_examples: 10000
- name: train
num_bytes: 125046212
num_examples: 1999998
download_size: 2333082954
dataset_size: 125690735
- config_name: numbers__div_remainder_composed
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 1151347
num_examples: 10000
- name: train
num_bytes: 226341870
num_examples: 1999998
download_size: 2333082954
dataset_size: 227493217
- config_name: numbers__gcd
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 659492
num_examples: 10000
- name: train
num_bytes: 127914889
num_examples: 1999998
download_size: 2333082954
dataset_size: 128574381
- config_name: numbers__gcd_composed
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 1206805
num_examples: 10000
- name: train
num_bytes: 237534189
num_examples: 1999998
download_size: 2333082954
dataset_size: 238740994
- config_name: numbers__is_factor
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 396129
num_examples: 10000
- name: train
num_bytes: 75875988
num_examples: 1999998
download_size: 2333082954
dataset_size: 76272117
- config_name: numbers__is_factor_composed
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 949828
num_examples: 10000
- name: train
num_bytes: 185369842
num_examples: 1999998
download_size: 2333082954
dataset_size: 186319670
- config_name: numbers__is_prime
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 385749
num_examples: 10000
- name: train
num_bytes: 73983639
num_examples: 1999998
download_size: 2333082954
dataset_size: 74369388
- config_name: numbers__is_prime_composed
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 947888
num_examples: 10000
- name: train
num_bytes: 184808483
num_examples: 1999998
download_size: 2333082954
dataset_size: 185756371
- config_name: numbers__lcm
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 717978
num_examples: 10000
- name: train
num_bytes: 136826050
num_examples: 1999998
download_size: 2333082954
dataset_size: 137544028
- config_name: numbers__lcm_composed
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 1127744
num_examples: 10000
- name: train
num_bytes: 221148668
num_examples: 1999998
download_size: 2333082954
dataset_size: 222276412
- config_name: numbers__list_prime_factors
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 585749
num_examples: 10000
- name: train
num_bytes: 109982816
num_examples: 1999998
download_size: 2333082954
dataset_size: 110568565
- config_name: numbers__list_prime_factors_composed
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 1053510
num_examples: 10000
- name: train
num_bytes: 205379513
num_examples: 1999998
download_size: 2333082954
dataset_size: 206433023
- config_name: numbers__place_value
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 496977
num_examples: 10000
- name: train
num_bytes: 95180091
num_examples: 1999998
download_size: 2333082954
dataset_size: 95677068
- config_name: numbers__place_value_composed
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 1011130
num_examples: 10000
- name: train
num_bytes: 197187918
num_examples: 1999998
download_size: 2333082954
dataset_size: 198199048
- config_name: numbers__round_number
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 570636
num_examples: 10000
- name: train
num_bytes: 111472483
num_examples: 1999998
download_size: 2333082954
dataset_size: 112043119
- config_name: numbers__round_number_composed
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 1016754
num_examples: 10000
- name: train
num_bytes: 201057283
num_examples: 1999998
download_size: 2333082954
dataset_size: 202074037
- config_name: polynomials__add
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 1308455
num_examples: 10000
- name: train
num_bytes: 257576092
num_examples: 1999998
download_size: 2333082954
dataset_size: 258884547
- config_name: polynomials__coefficient_named
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 1137226
num_examples: 10000
- name: train
num_bytes: 219716251
num_examples: 1999998
download_size: 2333082954
dataset_size: 220853477
- config_name: polynomials__collect
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 774709
num_examples: 10000
- name: train
num_bytes: 143743260
num_examples: 1999998
download_size: 2333082954
dataset_size: 144517969
- config_name: polynomials__compose
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 1209763
num_examples: 10000
- name: train
num_bytes: 233651887
num_examples: 1999998
download_size: 2333082954
dataset_size: 234861650
- config_name: polynomials__evaluate
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 599446
num_examples: 10000
- name: train
num_bytes: 114538250
num_examples: 1999998
download_size: 2333082954
dataset_size: 115137696
- config_name: polynomials__evaluate_composed
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 1148362
num_examples: 10000
- name: train
num_bytes: 226022455
num_examples: 1999998
download_size: 2333082954
dataset_size: 227170817
- config_name: polynomials__expand
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 1057353
num_examples: 10000
- name: train
num_bytes: 202338235
num_examples: 1999998
download_size: 2333082954
dataset_size: 203395588
- config_name: polynomials__simplify_power
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 1248040
num_examples: 10000
- name: train
num_bytes: 216407582
num_examples: 1999998
download_size: 2333082954
dataset_size: 217655622
- config_name: probability__swr_p_level_set
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 1159050
num_examples: 10000
- name: train
num_bytes: 227540179
num_examples: 1999998
download_size: 2333082954
dataset_size: 228699229
- config_name: probability__swr_p_sequence
features:
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 1097442
num_examples: 10000
- name: train
num_bytes: 215865725
num_examples: 1999998
download_size: 2333082954
dataset_size: 216963167
---
# Dataset Card for "math_dataset"
## Table of Contents
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Annotations](#annotations)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:** [https://github.com/deepmind/mathematics_dataset](https://github.com/deepmind/mathematics_dataset)
- **Repository:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
- **Paper:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
- **Point of Contact:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
- **Size of downloaded dataset files:** 130.65 GB
- **Size of the generated dataset:** 9.08 GB
- **Total amount of disk used:** 139.73 GB
### Dataset Summary
Mathematics database.
This dataset code generates mathematical question and answer pairs,
from a range of question types at roughly school-level difficulty.
This is designed to test the mathematical learning and algebraic
reasoning skills of learning models.
Original paper: Analysing Mathematical Reasoning Abilities of Neural Models
(Saxton, Grefenstette, Hill, Kohli).
Example usage:
train_examples, val_examples = datasets.load_dataset(
'math_dataset/arithmetic__mul',
split=['train', 'test'],
as_supervised=True)
### Supported Tasks and Leaderboards
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Languages
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
## Dataset Structure
### Data Instances
#### algebra__linear_1d
- **Size of downloaded dataset files:** 2.33 GB
- **Size of the generated dataset:** 92.60 MB
- **Total amount of disk used:** 2.43 GB
An example of 'train' looks as follows.
```
```
#### algebra__linear_1d_composed
- **Size of downloaded dataset files:** 2.33 GB
- **Size of the generated dataset:** 200.58 MB
- **Total amount of disk used:** 2.53 GB
An example of 'train' looks as follows.
```
```
#### algebra__linear_2d
- **Size of downloaded dataset files:** 2.33 GB
- **Size of the generated dataset:** 127.41 MB
- **Total amount of disk used:** 2.46 GB
An example of 'train' looks as follows.
```
```
#### algebra__linear_2d_composed
- **Size of downloaded dataset files:** 2.33 GB
- **Size of the generated dataset:** 235.59 MB
- **Total amount of disk used:** 2.57 GB
An example of 'train' looks as follows.
```
```
#### algebra__polynomial_roots
- **Size of downloaded dataset files:** 2.33 GB
- **Size of the generated dataset:** 164.01 MB
- **Total amount of disk used:** 2.50 GB
An example of 'train' looks as follows.
```
```
### Data Fields
The data fields are the same among all splits.
#### algebra__linear_1d
- `question`: a `string` feature.
- `answer`: a `string` feature.
#### algebra__linear_1d_composed
- `question`: a `string` feature.
- `answer`: a `string` feature.
#### algebra__linear_2d
- `question`: a `string` feature.
- `answer`: a `string` feature.
#### algebra__linear_2d_composed
- `question`: a `string` feature.
- `answer`: a `string` feature.
#### algebra__polynomial_roots
- `question`: a `string` feature.
- `answer`: a `string` feature.
### Data Splits
| name | train |test |
|---------------------------|------:|----:|
|algebra__linear_1d |1999998|10000|
|algebra__linear_1d_composed|1999998|10000|
|algebra__linear_2d |1999998|10000|
|algebra__linear_2d_composed|1999998|10000|
|algebra__polynomial_roots |1999998|10000|
## Dataset Creation
### Curation Rationale
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
#### Who are the source language producers?
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Annotations
#### Annotation process
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
#### Who are the annotators?
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Personal and Sensitive Information
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Discussion of Biases
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Other Known Limitations
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
## Additional Information
### Dataset Curators
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Licensing Information
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Citation Information
```
@article{2019arXiv,
author = {Saxton, Grefenstette, Hill, Kohli},
title = {Analysing Mathematical Reasoning Abilities of Neural Models},
year = {2019},
journal = {arXiv:1904.01557}
}
```
### Contributions
Thanks to [@patrickvonplaten](https://github.com/patrickvonplaten), [@lewtun](https://github.com/lewtun), [@thomwolf](https://github.com/thomwolf) for adding this dataset.