File size: 2,039 Bytes
873619f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11aed43
 
 
873619f
 
 
 
 
 
ccb019e
873619f
 
 
 
 
 
 
 
ccb019e
 
 
873619f
ccb019e
 
873619f
 
 
ccb019e
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66

# this is a small derivative from 8M-big openwebtext dataset for testing

# how this build script and dataset_infos.json were generated

#

mkdir openwebtext-10k
cd openwebtext-10k

# data
wget https://zenodo.org/record/3834942/files/openwebtext.tar.xz
tar xf openwebtext.tar.xz
cd openwebtext
rename.pl 's|-|-00|; s|-00(\d\d\d)|-$1|; s|-00(\d\d)|-0$1|;' *xz

# now open the first 30 archives
mkdir subset
cp urlsf_subset00-0[0-2]*_data.xz subset
cd subset
find . -name "*xz" -exec tar xf {} \;
mkdir 10k
find . -name "*txt" | sort | head -10000 | xargs mv -t 10k
tar cfJ 10k.xz -C 10k .
mkdir openwebtext-10k
mv 10k.xz openwebtext-10k
tar cfJ openwebtext-10k.tar.xz openwebtext-10k
# the openwebtext subdir gets created on the fly
aws s3 cp openwebtext-10k.tar.xz s3://datasets.huggingface.co/nlp/datasets/openwebtext/

# script
wget https://raw.githubusercontent.com/huggingface/datasets/master/datasets/openwebtext/openwebtext.py
mv openwebtext.py openwebtext-10k.py
perl -pi -e 's|openwebtext|openwebtext-10k|g' openwebtext-10k.py
perl -pi -e 's|https://zenodo.org/record/3834942/files/|https://cdn-datasets.huggingface.co/nlp/datasets/openwebtext/|g' openwebtext-10k.py
perl -pi -e 's|Openwebtext|Openwebtext10k|g' openwebtext-10k.py



# manually check that the script is correct - edit the descriptions

# create a new dataset entry on the hub
https://huggingface.co/new-dataset

# once created clone it
git clone https://huggingface.co/datasets/stas/openwebtext-10k
cp openwebtext-10k.py process.txt openwebtext-10k
cd openwebtext-10k

git add openwebtext-10k.py process.txt
git commit -m "build script" openwebtext-10k.py process.txt
git push

# test and generate config file
cd ..
datasets-cli test ./openwebtext-10k --save_infos --all_configs

# add and push the generated config
cd openwebtext-10k
git add dataset_infos.json
git commit -m "add dataset_infos.json" dataset_infos.json
git push

# test that the dataset is working
python -c "from datasets import load_dataset; ds=load_dataset('stas/openwebtext-10k'); print(ds)"