nthngdy commited on
Commit
0ef64f6
1 Parent(s): f3a0ed2

Upload openwebtext_split.py

Browse files
Files changed (1) hide show
  1. openwebtext_split.py +90 -0
openwebtext_split.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """The Open WebText Corpus"""
16
+
17
+
18
+ import os
19
+ import re
20
+ from itertools import chain
21
+
22
+ import datasets
23
+
24
+
25
+ _CITATION = """\
26
+ @misc{Gokaslan2019OpenWeb,
27
+ title={OpenWebText Corpus},
28
+ author={Aaron Gokaslan*, Vanya Cohen*, Ellie Pavlick, Stefanie Tellex},
29
+ howpublished{\\url{http://Skylion007.github.io/OpenWebTextCorpus}},
30
+ year={2019}
31
+ }
32
+ """
33
+
34
+ _DESCRIPTION = """\
35
+ An open-source replication of the WebText dataset from OpenAI.
36
+ """
37
+
38
+ _URL = "https://zenodo.org/record/3834942/files/openwebtext.tar.xz"
39
+
40
+
41
+ class Openwebtext(datasets.GeneratorBasedBuilder):
42
+ """The Open WebText dataset."""
43
+
44
+ BUILDER_CONFIGS = [
45
+ datasets.BuilderConfig(
46
+ name="plain_text",
47
+ description="Plain text",
48
+ version=datasets.Version("1.0.0"),
49
+ )
50
+ ]
51
+
52
+ def _info(self):
53
+ return datasets.DatasetInfo(
54
+ description=_DESCRIPTION,
55
+ features=datasets.Features({"text": datasets.Value("string")}),
56
+ homepage="https://skylion007.github.io/OpenWebTextCorpus/",
57
+ citation=_CITATION,
58
+ )
59
+
60
+ def _split_generators(self, dl_manager):
61
+ dl_dir = dl_manager.download_and_extract(_URL)
62
+ owt_dir = os.path.join(dl_dir, "openwebtext")
63
+ subset_xzs = [
64
+ os.path.join(owt_dir, file_name)
65
+ for file_name in sorted(os.listdir(owt_dir))
66
+ if file_name.endswith("xz") # filter out ...xz.lock
67
+ ]
68
+ ex_dirs = dl_manager.extract(subset_xzs, num_proc=round(os.cpu_count() * 0.75))
69
+ nested_txt_files = [
70
+ [
71
+ os.path.join(ex_dir, txt_file_name)
72
+ for txt_file_name in sorted(os.listdir(ex_dir))
73
+ if txt_file_name.endswith("txt")
74
+ ]
75
+ for ex_dir in ex_dirs
76
+ ]
77
+ txt_files = chain(*nested_txt_files)
78
+ train_end_idx = int(0.9 * len(txt_files))
79
+ val_end_idx = train_end_idx + int(0.05 * len(txt_files))
80
+ return [
81
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"txt_files": txt_files[:train_end_idx], "split": "train"}),
82
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"txt_files": txt_files[train_end_idx:val_end_idx], "split": "validation"}),
83
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"txt_files": txt_files[val_end_idx:], "split": "test"})
84
+ ]
85
+
86
+ def _generate_examples(self, txt_files):
87
+ """Yields examples."""
88
+ for idx, filepath in enumerate(txt_files):
89
+ with open(filepath, encoding="utf-8") as f:
90
+ yield idx, {"text": re.sub("\n\n\n+", "\n\n", f.read()).strip()}