singletongue commited on
Commit
9c6cfa2
1 Parent(s): 2ef7386

Change to use ArrowBasedBuilder

Browse files
Files changed (2) hide show
  1. README.md +1 -1
  2. wikipedia-utils.py +33 -14
README.md CHANGED
@@ -127,4 +127,4 @@ size_categories:
127
 
128
  Preprocessed Wikipedia texts generated with the scripts in [singletongue/wikipedia-utils](https://github.com/singletongue/wikipedia-utils) repo.
129
 
130
- For detailed information on how the texts are processed, please refer to the repo.
 
127
 
128
  Preprocessed Wikipedia texts generated with the scripts in [singletongue/wikipedia-utils](https://github.com/singletongue/wikipedia-utils) repo.
129
 
130
+ For detailed information on how the texts are processed, please refer to the repo.
wikipedia-utils.py CHANGED
@@ -1,4 +1,5 @@
1
  # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
 
2
  #
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
  # you may not use this file except in compliance with the License.
@@ -13,11 +14,11 @@
13
  # limitations under the License.
14
  """Wikipedia-Utils: Preprocessed Wikipedia Texts for NLP"""
15
 
16
-
17
- import json
18
- from typing import Dict, Iterator, List, Tuple, Union
19
 
20
  import datasets
 
21
 
22
 
23
  _DESCRIPTION = "Preprocessed Wikipedia texts generated with scripts in singletongue/wikipedia-utils repo."
@@ -40,7 +41,7 @@ _URLS = {
40
  _VERSION = datasets.Version("1.0.0")
41
 
42
 
43
- class WikipediaUtils(datasets.GeneratorBasedBuilder):
44
  """Wikipedia-Utils dataset."""
45
 
46
  BUILDER_CONFIGS = [datasets.BuilderConfig(name=name, version=_VERSION) for name in _URLS.keys()]
@@ -87,18 +88,36 @@ class WikipediaUtils(datasets.GeneratorBasedBuilder):
87
  filepath = dl_manager.download_and_extract(url)
88
  return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": filepath})]
89
 
90
- def _generate_examples(self, filepath: str) -> Iterator[Tuple[int, Dict[str, Union[int, str]]]]:
91
  if self.config.name.startswith("corpus"):
92
  with open(filepath) as f:
93
- for id_, line in enumerate(f):
94
- line = line.rstrip("\n")
95
- yield id_, {"text": line}
96
-
 
 
 
 
 
 
 
 
97
  elif self.config.name.startswith(("paragraphs", "passages")):
98
- with open(filepath) as f:
99
- for line in f:
100
- item = json.loads(line)
101
- yield item["id"], item
102
-
 
 
 
 
 
 
 
 
 
 
103
  else:
104
  raise ValueError("Invalid dataset config name is specified.")
 
1
  # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ # Copyright 2023 Masatoshi Suzuki (@singletongue)
3
  #
4
  # Licensed under the Apache License, Version 2.0 (the "License");
5
  # you may not use this file except in compliance with the License.
 
14
  # limitations under the License.
15
  """Wikipedia-Utils: Preprocessed Wikipedia Texts for NLP"""
16
 
17
+ import io
18
+ from typing import Iterator, List, Tuple
 
19
 
20
  import datasets
21
+ import pyarrow as pa
22
 
23
 
24
  _DESCRIPTION = "Preprocessed Wikipedia texts generated with scripts in singletongue/wikipedia-utils repo."
 
41
  _VERSION = datasets.Version("1.0.0")
42
 
43
 
44
+ class WikipediaUtils(datasets.ArrowBasedBuilder):
45
  """Wikipedia-Utils dataset."""
46
 
47
  BUILDER_CONFIGS = [datasets.BuilderConfig(name=name, version=_VERSION) for name in _URLS.keys()]
 
88
  filepath = dl_manager.download_and_extract(url)
89
  return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": filepath})]
90
 
91
+ def _generate_tables(self, filepath: str, chunksize: int = 10 << 20) -> Iterator[Tuple[int, pa.Table]]:
92
  if self.config.name.startswith("corpus"):
93
  with open(filepath) as f:
94
+ batch_idx = 0
95
+ while True:
96
+ batch = f.read(chunksize)
97
+ if not batch:
98
+ break
99
+
100
+ batch += f.readline()
101
+ batch = [line.rstrip("\n") for line in io.StringIO(batch).readlines()]
102
+ pa_table = pa.Table.from_arrays([pa.array(batch)], names=["text"])
103
+
104
+ yield batch_idx, pa_table
105
+ batch_idx += 1
106
  elif self.config.name.startswith(("paragraphs", "passages")):
107
+ with open(filepath, "rb") as f:
108
+ batch_idx = 0
109
+ block_size = max(chunksize // 32, 16 << 10)
110
+ while True:
111
+ batch = f.read(chunksize)
112
+ if not batch:
113
+ break
114
+
115
+ batch += f.readline()
116
+ pa_table = pa.json.read_json(
117
+ io.BytesIO(batch), read_options=pa.json.ReadOptions(block_size=block_size)
118
+ )
119
+
120
+ yield batch_idx, pa_table
121
+ batch_idx += 1
122
  else:
123
  raise ValueError("Invalid dataset config name is specified.")