|
--- |
|
dataset_info: |
|
- config_name: all |
|
features: |
|
- name: content |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 360187653412.6177 |
|
num_examples: 56194997 |
|
download_size: 199030076349 |
|
dataset_size: 360187653412.6177 |
|
- config_name: c4_realnews |
|
features: |
|
- name: content |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 31597106256.723488 |
|
num_examples: 11427438 |
|
download_size: 19889880484 |
|
dataset_size: 31597106256.723488 |
|
- config_name: openwebtext |
|
features: |
|
- name: content |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 30974178275.039234 |
|
num_examples: 6474479 |
|
download_size: 19069709415 |
|
dataset_size: 30974178275.039234 |
|
- config_name: peS2o |
|
features: |
|
- name: content |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 221900508006.5479 |
|
num_examples: 32612199 |
|
download_size: 116217303065 |
|
dataset_size: 221900508006.5479 |
|
- config_name: redpajama_books |
|
features: |
|
- name: content |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 49246538575.26426 |
|
num_examples: 107443 |
|
download_size: 29612204926 |
|
dataset_size: 49246538575.26426 |
|
- config_name: stackexchange |
|
features: |
|
- name: content |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 2034535930.2150385 |
|
num_examples: 716532 |
|
download_size: 1222605537 |
|
dataset_size: 2034535930.2150385 |
|
- config_name: uspto |
|
features: |
|
- name: content |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 14755999149.910166 |
|
num_examples: 3247716 |
|
download_size: 7058272149 |
|
dataset_size: 14755999149.910166 |
|
- config_name: wiki |
|
features: |
|
- name: content |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 7528525537.163156 |
|
num_examples: 1609190 |
|
download_size: 4593971902 |
|
dataset_size: 7528525537.163156 |
|
configs: |
|
- config_name: all |
|
data_files: |
|
- split: train |
|
path: all/train-* |
|
- config_name: c4_realnews |
|
data_files: |
|
- split: train |
|
path: c4_realnews/train-* |
|
- config_name: openwebtext |
|
data_files: |
|
- split: train |
|
path: openwebtext/train-* |
|
- config_name: peS2o |
|
data_files: |
|
- split: train |
|
path: peS2o/train-* |
|
- config_name: redpajama_books |
|
data_files: |
|
- split: train |
|
path: redpajama_books/train-* |
|
- config_name: stackexchange |
|
data_files: |
|
- split: train |
|
path: stackexchange/train-* |
|
- config_name: uspto |
|
data_files: |
|
- split: train |
|
path: uspto/train-* |
|
- config_name: wiki |
|
data_files: |
|
- split: train |
|
path: wiki/train-* |
|
task_categories: |
|
- text-generation |
|
language: |
|
- en |
|
size_categories: |
|
- 10M<n<100M |
|
--- |
|
|
|
A small, aggressively cleaned and de-duped pre-training corpus for academic settings. It aims to recreate something akin to [The Pile](https://huggingface.co/datasets/EleutherAI/pile) but prioritizes quality for the constrained token budget academic researchers live with. |
|
|
|
It has seven config subsets and an eighth `all` subset that combines them for a total of ~91B tokens (GPT2 Tokenizer estimate). These splits are as follows: |
|
|
|
1. `c4_realnews`: The RealNews domain subset of the C4 dataset containing news articles. |
|
2. `openwebtext`: The OpenWebText dataset containing the contents of the links mentioned in Reddit posts with at least 3 upvotes. |
|
3. `peS2o`: The PeS2o dataset containing academic articles from Semantic Scholar. |
|
4. `redpajama_books`: The books subset of RedPajama V1. |
|
5. `stackexchange`: The EN StackExchange non-code subset of the BigScience ROOTs dataset. |
|
6. `uspto`: The EN USPTO patent applications contents' subset of the BigScience ROOTs dataset. |
|
7. `wiki`: The EN Wiki subset of the BigScience ROOTs dataset. |
|
|
|
The following processing and filtering steps have been applied: |
|
|
|
1. Removed citation text and bibliography information for academic texts. |
|
2. Ran a perplexity filter using a KenLM model trained on the EN OSCAR corpus and removed documents with a perplexity of more than 325 and less than 7. |
|
3. Removed samples which have a repeating <=4-gram proportion of 15%. |
|
4. Removed samples which have lower than 99% confidence of being EN using the lingua language detector. |
|
5. Performed an aggressive MinHash de-dupe using a shingle size of 8 and a low threshold of 0.5. |