c4-en-10k / process.txt
stas's picture
stas
HF staff
new ds 13c338d
1
2
# this is a small derivative from 8M-big c4-en dataset for testing
3
4
# how this build script and dataset_infos.json were generated
5
6
#
7
8
mkdir c4-en-10k
9
cd c4-en-10k
10
11
# data (extracted the dataset elsewhere) - this is a 1TB+ dataset, so tough to rebuild from scratch
12
```
13
from datasets import load_dataset
14
dataset_name = "c4"
15
ds = load_dataset(dataset_name, 'en', split='train[:10000]')
16
ds.to_json(f"c4.jsonl", orient="records", lines=True)
17
```
18
19
mkdir c4-en-10k
20
mv c4-en-10k.jsonl c4-en-10k
21
tar cfJ c4-en-10k.tar.xz c4-en-10k
22
23
# the c4-en-10k subdir gets created on the fly
24
aws s3 cp c4-en-10k.tar.xz s3://datasets.huggingface.co/nlp/datasets/c4/
25
26
# script
27
(adapted from stas/oscar-en-10k)
28
29
# manually check that the script is correct - edit the descriptions
30
31
# create a new dataset entry on the hub
32
https://huggingface.co/new-dataset
33
34
# once created clone it
35
git clone https://huggingface.co/datasets/stas/c4-en-10k
36
cp c4-en-10k.py process.txt c4-en-10k
37
cd c4-en-10k
38
39
git add c4-en-10k.py process.txt README.md
40
git commit -m "build script" c4-en-10k.py process.txt
41
git push
42
43
# test and generate config file
44
cd ..
45
datasets-cli test ./c4-en-10k --save_infos --all_configs
46
47
# add and push the generated config
48
cd c4-en-10k
49
git add dataset_infos.json
50
git commit -m "add dataset_infos.json" dataset_infos.json
51
git push
52
53
# test that the dataset is working
54
python -c "from datasets import load_dataset; ds=load_dataset('stas/c4-en-10k'); print(ds)"
55