stas commited on
Commit
13c338d
1 Parent(s): 59476ba
Files changed (4) hide show
  1. README.md +39 -0
  2. c4-en-10k.py +82 -0
  3. dataset_infos.json +1 -0
  4. process.txt +54 -0
README.md ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ license: "Apache 2.0"
5
+ datasets:
6
+ - c4
7
+ ---
8
+
9
+ # C4 EN 10K for testing
10
+
11
+ This is a small subset representing the first 10K records of the original C4 dataset, "en" subset - created for testing. The records were extracted after having been shuffled.
12
+
13
+ The full 1TB+ dataset is at https://huggingface.co/datasets/c4.
14
+
15
+ ```
16
+ $ python -c "from datasets import load_dataset; ds=load_dataset('stas/c4-en-10k'); print(ds)"
17
+ DatasetDict({
18
+ train: Dataset({
19
+ features: ['text'],
20
+ num_rows: 10000
21
+ })
22
+ })
23
+ ```
24
+
25
+ * Records: 10,000
26
+ * compressed size: 6.4M
27
+ * uncompressed size: 22M
28
+
29
+ To convert to jsonlines:
30
+
31
+ ```
32
+ from datasets import load_dataset
33
+ dataset_name = "stas/c4-en-10k"
34
+ name = dataset_name.split('/')[-1]
35
+ ds = load_dataset(dataset_name, split='train')
36
+ ds.to_json(f"{name}.jsonl", orient="records", lines=True)
37
+ ```
38
+
39
+ To see how this subset was created, here is the [instructions file](./process.txt).
c4-en-10k.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """The Open WebText Corpus"""
16
+
17
+
18
+ import os
19
+ import json
20
+
21
+ import datasets
22
+
23
+
24
+ _CITATION = """\
25
+ @inproceedings{OrtizSuarezSagotRomary2019,
26
+ author = {Pedro Javier {Ortiz Su{'a}rez} and Benoit Sagot and Laurent Romary},
27
+ title = {Asynchronous pipelines for processing huge corpora on medium to low resource infrastructures},
28
+ series = {Proceedings of the Workshop on Challenges in the Management of Large Corpora (CMLC-7) 2019. Cardiff, 22nd July 2019},
29
+ editor = {Piotr Bański and Adrien Barbaresi and Hanno Biber and Evelyn Breiteneder and Simon Clematide and Marc Kupietz and Harald L{"u}ngen and Caroline Iliadi},
30
+ publisher = {Leibniz-Institut f{"u}r Deutsche Sprache},
31
+ address = {Mannheim},
32
+ doi = {10.14618/ids-pub-9021},
33
+ url = {http://nbn-resolving.de/urn:nbn:de:bsz:mh39-90215},
34
+ pages = {9 -- 16},
35
+ year = {2019},
36
+ abstract = {Common Crawl is a considerably large, heterogeneous multilingual corpus comprised of crawled documents from the internet, surpassing 20TB of data and distributed as a set of more than 50 thousand plain text files where each contains many documents written in a wide variety of languages. Even though each document has a metadata block associated to it, this data lacks any information about the language in which each document is written, making it extremely difficult to use Common Crawl for monolingual applications. We propose a general, highly parallel, multithreaded pipeline to clean and classify Common Crawl by language; we specifically design it so that it runs efficiently on medium to low resource infrastructures where I/O speeds are the main constraint. We develop the pipeline so that it can be easily reapplied to any kind of heterogeneous corpus and so that it can be parameterised to a wide range of infrastructures. We also distribute a 6.3TB version of Common Crawl, filtered, classified by language, shuffled at line level in order to avoid copyright issues, and ready to be used for NLP applications.},
37
+ language = {en}
38
+ }
39
+ """
40
+
41
+ _DESCRIPTION = """\
42
+ This is a small subset representing 10K records from the original C4 dataset, "unshuffled_deduplicated_en" subset - created for testing. The records were extracted after having been shuffled.
43
+
44
+ The full 1TB+ dataset is at https://huggingface.co/datasets/c4.
45
+ """
46
+
47
+ _URL = "https://cdn-datasets.huggingface.co/nlp/datasets/c4/c4-en-10k.tar.xz"
48
+
49
+ class C4En10k(datasets.GeneratorBasedBuilder):
50
+ """The Open WebText dataset."""
51
+
52
+ BUILDER_CONFIGS = [
53
+ datasets.BuilderConfig(
54
+ name="plain_text",
55
+ description="Plain text",
56
+ version=datasets.Version("1.0.0"),
57
+ )
58
+ ]
59
+
60
+ def _info(self):
61
+ return datasets.DatasetInfo(
62
+ description=_DESCRIPTION,
63
+ features=datasets.Features({"text": datasets.Value("string")}),
64
+ homepage="https://c4-corpus.com/",
65
+ citation=_CITATION,
66
+ )
67
+
68
+ def _split_generators(self, dl_manager):
69
+ dl_dir = dl_manager.download_and_extract(_URL)
70
+ jsonl_file = os.path.join(dl_dir, "c4-en-10k", "c4-en-10k.jsonl")
71
+ return [
72
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"jsonl_file": jsonl_file}),
73
+ ]
74
+
75
+ def _generate_examples(self, jsonl_file):
76
+ """Yields examples."""
77
+ with open(jsonl_file, encoding="utf-8") as f:
78
+ idx = 0
79
+ for line in f:
80
+ rec = json.loads(line)
81
+ yield idx, {"text": rec["text"]}
82
+ idx += 1
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"plain_text": {"description": "This is a small subset representing 10K records from the original C4 dataset, \"unshuffled_deduplicated_en\" subset - created for testing. The records were extracted after having been shuffled.\n\nThe full 1TB+ dataset is at https://huggingface.co/datasets/c4.\n", "citation": "@inproceedings{OrtizSuarezSagotRomary2019,\n author = {Pedro Javier {Ortiz Su{'a}rez} and Benoit Sagot and Laurent Romary},\n title = {Asynchronous pipelines for processing huge corpora on medium to low resource infrastructures},\n series = {Proceedings of the Workshop on Challenges in the Management of Large Corpora (CMLC-7) 2019. Cardiff, 22nd July 2019},\n editor = {Piotr Ba\u0144ski and Adrien Barbaresi and Hanno Biber and Evelyn Breiteneder and Simon Clematide and Marc Kupietz and Harald L{\"u}ngen and Caroline Iliadi},\n publisher = {Leibniz-Institut f{\"u}r Deutsche Sprache},\n address = {Mannheim},\n doi = {10.14618/ids-pub-9021},\n url = {http://nbn-resolving.de/urn:nbn:de:bsz:mh39-90215},\n pages = {9 -- 16},\n year = {2019},\n abstract = {Common Crawl is a considerably large, heterogeneous multilingual corpus comprised of crawled documents from the internet, surpassing 20TB of data and distributed as a set of more than 50 thousand plain text files where each contains many documents written in a wide variety of languages. Even though each document has a metadata block associated to it, this data lacks any information about the language in which each document is written, making it extremely difficult to use Common Crawl for monolingual applications. We propose a general, highly parallel, multithreaded pipeline to clean and classify Common Crawl by language; we specifically design it so that it runs efficiently on medium to low resource infrastructures where I/O speeds are the main constraint. We develop the pipeline so that it can be easily reapplied to any kind of heterogeneous corpus and so that it can be parameterised to a wide range of infrastructures. We also distribute a 6.3TB version of Common Crawl, filtered, classified by language, shuffled at line level in order to avoid copyright issues, and ready to be used for NLP applications.},\n language = {en}\n}\n", "homepage": "https://c4-corpus.com/", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "c4_en10k", "config_name": "plain_text", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 21498951, "num_examples": 10000, "dataset_name": "c4_en10k"}}, "download_checksums": {"https://cdn-datasets.huggingface.co/nlp/datasets/c4/c4-en-10k.tar.xz": {"num_bytes": 6627716, "checksum": "d743c7a76595877c5810427bb6254c7af73211a483810b32016026225231fdd3"}}, "download_size": 6627716, "post_processing_size": null, "dataset_size": 21498951, "size_in_bytes": 28126667}}
process.txt ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # this is a small derivative from 8M-big c4-en dataset for testing
3
+
4
+ # how this build script and dataset_infos.json were generated
5
+
6
+ #
7
+
8
+ mkdir c4-en-10k
9
+ cd c4-en-10k
10
+
11
+ # data (extracted the dataset elsewhere) - this is a 1TB+ dataset, so tough to rebuild from scratch
12
+ ```
13
+ from datasets import load_dataset
14
+ dataset_name = "c4"
15
+ ds = load_dataset(dataset_name, 'en', split='train[:10000]')
16
+ ds.to_json(f"c4.jsonl", orient="records", lines=True)
17
+ ```
18
+
19
+ mkdir c4-en-10k
20
+ mv c4-en-10k.jsonl c4-en-10k
21
+ tar cfJ c4-en-10k.tar.xz c4-en-10k
22
+
23
+ # the c4-en-10k subdir gets created on the fly
24
+ aws s3 cp c4-en-10k.tar.xz s3://datasets.huggingface.co/nlp/datasets/c4/
25
+
26
+ # script
27
+ (adapted from stas/oscar-en-10k)
28
+
29
+ # manually check that the script is correct - edit the descriptions
30
+
31
+ # create a new dataset entry on the hub
32
+ https://huggingface.co/new-dataset
33
+
34
+ # once created clone it
35
+ git clone https://huggingface.co/datasets/stas/c4-en-10k
36
+ cp c4-en-10k.py process.txt c4-en-10k
37
+ cd c4-en-10k
38
+
39
+ git add c4-en-10k.py process.txt README.md
40
+ git commit -m "build script" c4-en-10k.py process.txt
41
+ git push
42
+
43
+ # test and generate config file
44
+ cd ..
45
+ datasets-cli test ./c4-en-10k --save_infos --all_configs
46
+
47
+ # add and push the generated config
48
+ cd c4-en-10k
49
+ git add dataset_infos.json
50
+ git commit -m "add dataset_infos.json" dataset_infos.json
51
+ git push
52
+
53
+ # test that the dataset is working
54
+ python -c "from datasets import load_dataset; ds=load_dataset('stas/c4-en-10k'); print(ds)"