File size: 1,371 Bytes
c6830ab
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50

# this is a small derivative from 8M-big oscar-en dataset for testing

# how this build script and dataset_infos.json were generated

#

mkdir oscar-en-10k
cd oscar-en-10k

# data (extracted oscar-1GB.jsonl elsewhere) - this is a 1.63TB dataset, so tough to rebuild from scratch
head -10000 oscar-1GB.jsonl > oscar-en-10k.jsonl

mkdir oscar-en-10k
mv oscar-en-10k.jsonl oscar-en-10k
tar cfJ oscar-en-10k.tar.xz oscar-en-10k

# the oscar-en-10k subdir gets created on the fly
aws s3 cp oscar-en-10k.tar.xz s3://datasets.huggingface.co/nlp/datasets/oscar/

# script
(adapted from stas/openwebtext-10k)

# manually check that the script is correct - edit the descriptions

# create a new dataset entry on the hub
https://huggingface.co/new-dataset

# once created clone it
git clone https://huggingface.co/datasets/stas/oscar-en-10k
cp oscar-en-10k.py process.txt oscar-en-10k
cd oscar-en-10k

git add oscar-en-10k.py process.txt
git commit -m "build script" oscar-en-10k.py process.txt
git push

# test and generate config file
cd ..
datasets-cli test ./oscar-en-10k --save_infos --all_configs

# add and push the generated config
cd oscar-en-10k
git add dataset_infos.json
git commit -m "add dataset_infos.json" dataset_infos.json
git push

# test that the dataset is working
python -c "from datasets import load_dataset; ds=load_dataset('stas/oscar-en-10k'); print(ds)"