nthngdy commited on
Commit
db5cf33
1 Parent(s): 5fae75e

Update openwebtext_split.py

Browse files
Files changed (1) hide show
  1. openwebtext_split.py +36 -22
openwebtext_split.py CHANGED
@@ -37,6 +37,34 @@ An open-source replication of the WebText dataset from OpenAI.
37
 
38
  _URL = "https://zenodo.org/record/3834942/files/openwebtext.tar.xz"
39
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
 
41
  class Openwebtext(datasets.GeneratorBasedBuilder):
42
  """The Open WebText dataset."""
@@ -58,29 +86,15 @@ class Openwebtext(datasets.GeneratorBasedBuilder):
58
  )
59
 
60
  def _split_generators(self, dl_manager):
61
- dl_dir = dl_manager.download_and_extract(_URL)
62
- owt_dir = os.path.join(dl_dir, "openwebtext")
63
- subset_xzs = [
64
- os.path.join(owt_dir, file_name)
65
- for file_name in sorted(os.listdir(owt_dir))
66
- if file_name.endswith("xz") # filter out ...xz.lock
67
- ]
68
- ex_dirs = dl_manager.extract(subset_xzs, num_proc=round(os.cpu_count() * 0.75))
69
- nested_txt_files = [
70
- [
71
- os.path.join(ex_dir, txt_file_name)
72
- for txt_file_name in sorted(os.listdir(ex_dir))
73
- if txt_file_name.endswith("txt")
74
- ]
75
- for ex_dir in ex_dirs
76
- ]
77
- txt_files = chain(*nested_txt_files)
78
- train_end_idx = int(0.9 * len(txt_files))
79
- val_end_idx = train_end_idx + int(0.05 * len(txt_files))
80
  return [
81
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"txt_files": txt_files[:train_end_idx], "split": "train"}),
82
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"txt_files": txt_files[train_end_idx:val_end_idx], "split": "validation"}),
83
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"txt_files": txt_files[val_end_idx:], "split": "test"})
84
  ]
85
 
86
  def _generate_examples(self, txt_files):
 
37
 
38
  _URL = "https://zenodo.org/record/3834942/files/openwebtext.tar.xz"
39
 
40
+ def custom_iter_archive(path_or_buf, _filter=lambda x: True):
41
+ def _iter_archive(f):
42
+ stream = tarfile.open(fileobj=f, mode="r|*")
43
+ for i, tarinfo in enumerate(stream):
44
+ if not _filter(i):
45
+ continue
46
+ file_path = tarinfo.name
47
+ if not tarinfo.isreg():
48
+ continue
49
+ if file_path is None:
50
+ continue
51
+ if os.path.basename(file_path).startswith(".") or os.path.basename(file_path).startswith("__"):
52
+ # skipping hidden files
53
+ continue
54
+ if not file_path.endswith('xz'):
55
+ continue
56
+ file_obj = stream.extractfile(tarinfo)
57
+ print(file_obj)
58
+ for txt_file in file_obj:
59
+ if txt_file:
60
+ yield file_path, file_obj
61
+ stream.members = []
62
+ del stream
63
+ if hasattr(path_or_buf, "read"):
64
+ yield from _iter_archive(path_or_buf)
65
+ else:
66
+ with open(path_or_buf, "rb") as f:
67
+ yield from _iter_archive(f)
68
 
69
  class Openwebtext(datasets.GeneratorBasedBuilder):
70
  """The Open WebText dataset."""
 
86
  )
87
 
88
  def _split_generators(self, dl_manager):
89
+ archive = dl_manager.download(_URL)
90
+
91
+ train_filter = lambda x : (x%10) < 8
92
+ val_filter = lambda x: (x%10) == 8
93
+ test_filter = lambda x: (x%10) == 9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
  return [
95
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": custom_iter_archive(archive, train_filter)}),
96
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"files": custom_iter_archive(archive, val_filter)}),
97
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"files": custom_iter_archive(archive, test_filter)}),
98
  ]
99
 
100
  def _generate_examples(self, txt_files):