albertvillanova HF staff commited on
Commit
a3de9de
1 Parent(s): 6e9b2df

Support streaming bookcorpus dataset (#4564)

Browse files

Commit from https://github.com/huggingface/datasets/commit/bd8fd273000a02bae960a32a92d543ba3eab1bed

Files changed (1) hide show
  1. bookcorpus.py +9 -16
bookcorpus.py CHANGED
@@ -16,9 +16,6 @@
16
  # Lint as: python3
17
  """The BookCorpus dataset."""
18
 
19
-
20
- import os
21
-
22
  import datasets
23
 
24
 
@@ -83,20 +80,16 @@ class Bookcorpus(datasets.GeneratorBasedBuilder):
83
  yield ex["text"]
84
 
85
  def _split_generators(self, dl_manager):
86
- arch_path = dl_manager.download_and_extract(URL)
87
-
88
  return [
89
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"directory": arch_path}),
 
 
90
  ]
91
 
92
- def _generate_examples(self, directory):
93
- files = [
94
- os.path.join(directory, "books_large_p1.txt"),
95
- os.path.join(directory, "books_large_p2.txt"),
96
- ]
97
  _id = 0
98
- for txt_file in files:
99
- with open(txt_file, mode="r", encoding="utf-8") as f:
100
- for line in f:
101
- yield _id, {"text": line.strip()}
102
- _id += 1
16
  # Lint as: python3
17
  """The BookCorpus dataset."""
18
 
 
 
 
19
  import datasets
20
 
21
 
80
  yield ex["text"]
81
 
82
  def _split_generators(self, dl_manager):
83
+ arch_path = dl_manager.download(URL)
 
84
  return [
85
+ datasets.SplitGenerator(
86
+ name=datasets.Split.TRAIN, gen_kwargs={"files": dl_manager.iter_archive(arch_path)}
87
+ ),
88
  ]
89
 
90
+ def _generate_examples(self, files):
 
 
 
 
91
  _id = 0
92
+ for path, file in files:
93
+ for line in file:
94
+ yield _id, {"text": line.decode("utf-8").strip()}
95
+ _id += 1