Datasets:

Languages:
English
Multilinguality:
multilingual
Size Categories:
100M<n<1B
Language Creators:
found
Annotations Creators:
no-annotation
Source Datasets:
original
ArXiv:
License:
Peihao commited on
Commit
07df790
1 Parent(s): 1f72599

Upload c4.py

Browse files
Files changed (1) hide show
  1. c4.py +93 -0
c4.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """C4 dataset based on Common Crawl."""
2
+
3
+
4
+ import gzip
5
+ import json
6
+
7
+ import datasets
8
+
9
+
10
+ logger = datasets.logging.get_logger(__name__)
11
+
12
+
13
+ _DESCRIPTION = """\
14
+ A colossal, cleaned version of Common Crawl's web crawl corpus.
15
+
16
+ Based on Common Crawl dataset: "https://commoncrawl.org".
17
+
18
+ This is the processed version of Google's C4 dataset by AllenAI.
19
+ """
20
+
21
+ _CITATION = """
22
+ @article{2019t5,
23
+ author = {Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu},
24
+ title = {Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer},
25
+ journal = {arXiv e-prints},
26
+ year = {2019},
27
+ archivePrefix = {arXiv},
28
+ eprint = {1910.10683},
29
+ }
30
+ """
31
+
32
+ _URL = "https://github.com/allenai/allennlp/discussions/5056"
33
+
34
+ _VARIANTS = ["en", "realnewslike", "en.noblocklist", "en.noclean"]
35
+
36
+ _N_SHARDS_PER_SPLIT = {
37
+ "en": {"train": 1024, "validation": 8},
38
+ "realnewslike": {"train": 512, "validation": 1},
39
+ "en.noblocklist": {"train": 1024, "validation": 8},
40
+ "en.noclean": {"train": 7168, "validation": 64},
41
+ }
42
+
43
+ _DATA_URL = "https://huggingface.co/datasets/allenai/c4/resolve/1ddc917116b730e1859edef32896ec5c16be51d0/{name}/c4-{split}.{index:05d}-of-{n_shards:05d}.json.gz"
44
+
45
+
46
+ class C4(datasets.GeneratorBasedBuilder):
47
+ """C4, a colossal, cleaned version of Common Crawl's web crawl corpus."""
48
+
49
+ BUILDER_CONFIGS = [datasets.BuilderConfig(name) for name in _VARIANTS]
50
+
51
+ def _info(self):
52
+ return datasets.DatasetInfo(
53
+ description=_DESCRIPTION,
54
+ features=datasets.Features(
55
+ {
56
+ "text": datasets.Value("string"),
57
+ "timestamp": datasets.Value("string"),
58
+ "url": datasets.Value("string"),
59
+ }
60
+ ),
61
+ supervised_keys=None,
62
+ homepage=_URL,
63
+ citation=_CITATION,
64
+ )
65
+
66
+ def _split_generators(self, dl_manager):
67
+ data_urls = {}
68
+ for split in ["train", "validation"]:
69
+ n_shards = _N_SHARDS_PER_SPLIT[self.config.name][split]
70
+ data_urls[split] = [
71
+ _DATA_URL.format(name=self.config.name, split=split, index=index, n_shards=n_shards)
72
+ for index in range(n_shards)
73
+ ]
74
+ train_downloaded_files = dl_manager.download(data_urls["train"])
75
+ validation_downloaded_files = dl_manager.download(data_urls["validation"])
76
+ return [
77
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_downloaded_files}),
78
+ datasets.SplitGenerator(
79
+ name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": validation_downloaded_files}
80
+ ),
81
+ ]
82
+
83
+ def _generate_examples(self, filepaths):
84
+ """This function returns the examples in the raw (text) form by iterating on all the files."""
85
+ id_ = 0
86
+ for filepath in filepaths:
87
+ logger.info("generating examples from = %s", filepath)
88
+ with gzip.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
89
+ for line in f:
90
+ if line:
91
+ example = json.loads(line)
92
+ yield id_, example
93
+ id_ += 1