File size: 1,440 Bytes
13c338d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55

# this is a small derivative from 8M-big c4-en dataset for testing

# how this build script and dataset_infos.json were generated

#

mkdir c4-en-10k
cd c4-en-10k

# data (extracted the dataset elsewhere) - this is a 1TB+ dataset, so tough to rebuild from scratch
```
from datasets import load_dataset
dataset_name = "c4"
ds = load_dataset(dataset_name, 'en', split='train[:10000]')
ds.to_json(f"c4.jsonl", orient="records", lines=True)
```

mkdir c4-en-10k
mv c4-en-10k.jsonl c4-en-10k
tar cfJ c4-en-10k.tar.xz c4-en-10k

# the c4-en-10k subdir gets created on the fly
aws s3 cp c4-en-10k.tar.xz s3://datasets.huggingface.co/nlp/datasets/c4/

# script
(adapted from stas/oscar-en-10k)

# manually check that the script is correct - edit the descriptions

# create a new dataset entry on the hub
https://huggingface.co/new-dataset

# once created clone it
git clone https://huggingface.co/datasets/stas/c4-en-10k
cp c4-en-10k.py process.txt c4-en-10k
cd c4-en-10k

git add c4-en-10k.py process.txt README.md
git commit -m "build script" c4-en-10k.py process.txt
git push

# test and generate config file
cd ..
datasets-cli test ./c4-en-10k --save_infos --all_configs

# add and push the generated config
cd c4-en-10k
git add dataset_infos.json
git commit -m "add dataset_infos.json" dataset_infos.json
git push

# test that the dataset is working
python -c "from datasets import load_dataset; ds=load_dataset('stas/c4-en-10k'); print(ds)"