Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
1M<n<10M
Language Creators:
found
Annotations Creators:
no-annotation
Source Datasets:
original
Tags:
License:
lhoestq HF staff commited on
Commit
8cfd553
1 Parent(s): 5bb45d5

update script

Browse files
Files changed (2) hide show
  1. README.md +3 -3
  2. openwebtext.py +36 -25
README.md CHANGED
@@ -28,10 +28,10 @@ dataset_info:
28
  config_name: plain_text
29
  splits:
30
  - name: train
31
- num_bytes: 39769494896
32
  num_examples: 8013769
33
- download_size: 12880027468
34
- dataset_size: 39769494896
35
  ---
36
 
37
  # Dataset Card for "openwebtext"
28
  config_name: plain_text
29
  splits:
30
  - name: train
31
+ num_bytes: 39769491688
32
  num_examples: 8013769
33
+ download_size: 12880189440
34
+ dataset_size: 39769491688
35
  ---
36
 
37
  # Dataset Card for "openwebtext"
openwebtext.py CHANGED
@@ -14,10 +14,9 @@
14
  # limitations under the License.
15
  """The Open WebText Corpus"""
16
 
17
-
18
  import os
19
  import re
20
- from itertools import chain
21
 
22
  import datasets
23
 
@@ -35,7 +34,25 @@ _DESCRIPTION = """\
35
  An open-source replication of the WebText dataset from OpenAI.
36
  """
37
 
38
- _URL = "https://zenodo.org/record/3834942/files/openwebtext.tar.xz"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
 
41
  class Openwebtext(datasets.GeneratorBasedBuilder):
@@ -58,29 +75,23 @@ class Openwebtext(datasets.GeneratorBasedBuilder):
58
  )
59
 
60
  def _split_generators(self, dl_manager):
61
- dl_dir = dl_manager.download_and_extract(_URL)
62
- owt_dir = os.path.join(dl_dir, "openwebtext")
63
- subset_xzs = [
64
- os.path.join(owt_dir, file_name)
65
- for file_name in sorted(os.listdir(owt_dir))
66
- if file_name.endswith("xz") # filter out ...xz.lock
67
- ]
68
- ex_dirs = dl_manager.extract(subset_xzs, num_proc=round(os.cpu_count() * 0.75))
69
- nested_txt_files = [
70
- [
71
- os.path.join(ex_dir, txt_file_name)
72
- for txt_file_name in sorted(os.listdir(ex_dir))
73
- if txt_file_name.endswith("txt")
74
- ]
75
- for ex_dir in ex_dirs
76
- ]
77
- txt_files = chain(*nested_txt_files)
78
  return [
79
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"txt_files": txt_files}),
 
 
 
 
80
  ]
81
 
82
- def _generate_examples(self, txt_files):
83
  """Yields examples."""
84
- for idx, filepath in enumerate(txt_files):
85
- with open(filepath, encoding="utf-8") as f:
86
- yield idx, {"text": re.sub("\n\n\n+", "\n\n", f.read()).strip()}
 
 
 
 
 
 
14
  # limitations under the License.
15
  """The Open WebText Corpus"""
16
 
 
17
  import os
18
  import re
19
+ import tarfile
20
 
21
  import datasets
22
 
34
  An open-source replication of the WebText dataset from OpenAI.
35
  """
36
 
37
+ _N_DATA_FILES = 21
38
+ _DATA_FILES = ["subsets/urlsf_subset{:02d}.tar".format(i) for i in range(_N_DATA_FILES)]
39
+
40
+
41
+ def _iter_tar(f):
42
+ stream = tarfile.open(fileobj=f, mode="r|*")
43
+ for tarinfo in stream:
44
+ file_path = tarinfo.name
45
+ if not tarinfo.isreg():
46
+ continue
47
+ if file_path is None:
48
+ continue
49
+ if os.path.basename(file_path).startswith((".", "__")):
50
+ # skipping hidden files
51
+ continue
52
+ file_obj = stream.extractfile(tarinfo)
53
+ yield file_path, file_obj
54
+ stream.members = []
55
+ del stream
56
 
57
 
58
  class Openwebtext(datasets.GeneratorBasedBuilder):
75
  )
76
 
77
  def _split_generators(self, dl_manager):
78
+ archives = dl_manager.download(_DATA_FILES)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
  return [
80
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={
81
+ "archive_iterators": [
82
+ dl_manager.iter_archive(archive) for archive in archives
83
+ ]
84
+ }),
85
  ]
86
 
87
+ def _generate_examples(self, archive_iterators):
88
  """Yields examples."""
89
+ for archive_iterator in archive_iterators:
90
+ for xz_filepath, xz_f in archive_iterator:
91
+ if not xz_filepath.endswith(".xz"):
92
+ continue
93
+ for txt_filepath, txt_f in _iter_tar(xz_f):
94
+ if not txt_filepath.endswith(".txt"):
95
+ continue
96
+ idx = f"{xz_filepath}/{txt_filepath}"
97
+ yield idx, {"text": re.sub("\n\n\n+", "\n\n", txt_f.read().decode("utf-8")).strip()}