vblagoje commited on
Commit
821a824
1 Parent(s): 164178a

Another fix

Browse files
Files changed (1) hide show
  1. wikipedia_snippets_streamed.py +11 -13
wikipedia_snippets_streamed.py CHANGED
@@ -32,8 +32,9 @@ class WikipediaSnippetsStreamed(datasets.ArrowBasedBuilder):
32
  description=_DESCRIPTION,
33
  features=datasets.Features(
34
  {
35
- "title": datasets.Value("string"),
36
  "text": datasets.Value("string"),
 
37
  }
38
  ),
39
  supervised_keys=None,
@@ -43,18 +44,15 @@ class WikipediaSnippetsStreamed(datasets.ArrowBasedBuilder):
43
 
44
  def _split_generators(self, dl_manager):
45
  url = "https://storage.googleapis.com/huggingface-nlp/cache/datasets/wiki40b/en/1.1.0/wiki40b-train.parquet"
46
- downloaded_files = dl_manager.download(url)
47
  return [
48
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": downloaded_files}),
49
  ]
50
 
51
- def _generate_tables(self, filepaths):
52
- """This function returns the examples in the raw (text) form."""
53
- for filepath in filepaths:
54
- logger.info("generating examples from = %s", filepath)
55
- filepath_id = os.path.basename(filepath)
56
- with open(filepath, "rb") as f:
57
- pf = pq.ParquetFile(f)
58
- for i in range(pf.num_row_groups):
59
- id_ = f"{filepath_id}_{i}"
60
- yield id_, pf.read_row_group(i)
32
  description=_DESCRIPTION,
33
  features=datasets.Features(
34
  {
35
+ "wikidata_id": datasets.Value("string"),
36
  "text": datasets.Value("string"),
37
+ "version_id": datasets.Value("string"),
38
  }
39
  ),
40
  supervised_keys=None,
44
 
45
  def _split_generators(self, dl_manager):
46
  url = "https://storage.googleapis.com/huggingface-nlp/cache/datasets/wiki40b/en/1.1.0/wiki40b-train.parquet"
47
+ downloaded_file = dl_manager.download(url)
48
  return [
49
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_file}),
50
  ]
51
 
52
+ def _generate_tables(self, filepath):
53
+ logger.info("generating examples from = %s", filepath)
54
+ with open(filepath, "rb") as f:
55
+ pf = pq.ParquetFile(f)
56
+ for i in range(pf.num_row_groups):
57
+ id_ = f"{filepath}_{i}"
58
+ yield id_, pf.read_row_group(i)