singletongue commited on
Commit
27bb3cc
1 Parent(s): cc1be2d

Remove the dataset loading script

Browse files
Files changed (1) hide show
  1. wikipedia-utils.py +0 -130
wikipedia-utils.py DELETED
@@ -1,130 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- # Copyright 2023 Masatoshi Suzuki (@singletongue)
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Wikipedia-Utils: Preprocessed Wikipedia Texts for NLP"""
16
-
17
- import io
18
- from typing import Iterator, List, Tuple
19
-
20
- import datasets
21
- import pyarrow as pa
22
-
23
-
24
- _DESCRIPTION = "Preprocessed Wikipedia texts generated with scripts in singletongue/wikipedia-utils repo."
25
-
26
- _HOMEPAGE = "https://github.com/singletongue/wikipedia-utils"
27
-
28
- _LICENSE = "The content of Wikipedia is licensed under the CC-BY-SA 3.0 and GFDL licenses."
29
-
30
- _URL_BASE = "https://github.com/singletongue/wikipedia-utils/releases/download"
31
- _URLS = {
32
- "corpus-jawiki-20240401": f"{_URL_BASE}/2024-04-01/corpus-jawiki-20240401.txt.gz",
33
- "corpus-jawiki-20240401-cirrus": f"{_URL_BASE}/2024-04-01/corpus-jawiki-20240401-cirrus.txt.gz",
34
- "corpus-jawiki-20240401-filtered-large": f"{_URL_BASE}/2024-04-01/corpus-jawiki-20240401-filtered-large.txt.gz",
35
- "paragraphs-jawiki-20240401": f"{_URL_BASE}/2024-04-01/paragraphs-jawiki-20240401.json.gz",
36
- "passages-c300-jawiki-20240401": f"{_URL_BASE}/2024-04-01/passages-c300-jawiki-20240401.json.gz",
37
- "passages-c400-jawiki-20240401": f"{_URL_BASE}/2024-04-01/passages-c400-jawiki-20240401.json.gz",
38
- "passages-para-jawiki-20240401": f"{_URL_BASE}/2024-04-01/passages-para-jawiki-20240401.json.gz",
39
- "corpus-jawiki-20230403": f"{_URL_BASE}/2023-04-03/corpus-jawiki-20230403.txt.gz",
40
- "corpus-jawiki-20230403-cirrus": f"{_URL_BASE}/2023-04-03/corpus-jawiki-20230403-cirrus.txt.gz",
41
- "corpus-jawiki-20230403-filtered-large": f"{_URL_BASE}/2023-04-03/corpus-jawiki-20230403-filtered-large.txt.gz",
42
- "paragraphs-jawiki-20230403": f"{_URL_BASE}/2023-04-03/paragraphs-jawiki-20230403.json.gz",
43
- "passages-c300-jawiki-20230403": f"{_URL_BASE}/2023-04-03/passages-c300-jawiki-20230403.json.gz",
44
- "passages-c400-jawiki-20230403": f"{_URL_BASE}/2023-04-03/passages-c400-jawiki-20230403.json.gz",
45
- "passages-para-jawiki-20230403": f"{_URL_BASE}/2023-04-03/passages-para-jawiki-20230403.json.gz",
46
- }
47
-
48
- _VERSION = datasets.Version("1.0.0")
49
-
50
-
51
- class WikipediaUtils(datasets.ArrowBasedBuilder):
52
- """Wikipedia-Utils dataset."""
53
-
54
- BUILDER_CONFIGS = [datasets.BuilderConfig(name=name, version=_VERSION) for name in _URLS.keys()]
55
-
56
- def _info(self) -> datasets.DatasetInfo:
57
- if self.config.name.startswith("corpus"):
58
- features = datasets.Features({"text": datasets.Value("string")})
59
- elif self.config.name.startswith("paragraphs"):
60
- features = datasets.Features(
61
- {
62
- "id": datasets.Value("string"),
63
- "pageid": datasets.Value("int64"),
64
- "revid": datasets.Value("int64"),
65
- "paragraph_index": datasets.Value("int64"),
66
- "title": datasets.Value("string"),
67
- "section": datasets.Value("string"),
68
- "text": datasets.Value("string"),
69
- "html_tag": datasets.Value("string"),
70
- }
71
- )
72
- elif self.config.name.startswith("passages"):
73
- features = datasets.Features(
74
- {
75
- "id": datasets.Value("int64"),
76
- "pageid": datasets.Value("int64"),
77
- "revid": datasets.Value("int64"),
78
- "title": datasets.Value("string"),
79
- "section": datasets.Value("string"),
80
- "text": datasets.Value("string"),
81
- }
82
- )
83
- else:
84
- raise ValueError("Invalid dataset config name is specified.")
85
-
86
- return datasets.DatasetInfo(
87
- description=_DESCRIPTION,
88
- features=features,
89
- homepage=_HOMEPAGE,
90
- license=_LICENSE,
91
- )
92
-
93
- def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
94
- url = _URLS[self.config.name]
95
- filepath = dl_manager.download_and_extract(url)
96
- return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": filepath})]
97
-
98
- def _generate_tables(self, filepath: str, chunksize: int = 10 << 20) -> Iterator[Tuple[int, pa.Table]]:
99
- if self.config.name.startswith("corpus"):
100
- with open(filepath) as f:
101
- batch_idx = 0
102
- while True:
103
- batch = f.read(chunksize)
104
- if not batch:
105
- break
106
-
107
- batch += f.readline()
108
- batch = [line.rstrip("\n") for line in io.StringIO(batch).readlines()]
109
- pa_table = pa.Table.from_arrays([pa.array(batch)], names=["text"])
110
-
111
- yield batch_idx, pa_table
112
- batch_idx += 1
113
- elif self.config.name.startswith(("paragraphs", "passages")):
114
- with open(filepath, "rb") as f:
115
- batch_idx = 0
116
- block_size = max(chunksize // 32, 16 << 10)
117
- while True:
118
- batch = f.read(chunksize)
119
- if not batch:
120
- break
121
-
122
- batch += f.readline()
123
- pa_table = pa.json.read_json(
124
- io.BytesIO(batch), read_options=pa.json.ReadOptions(block_size=block_size)
125
- )
126
-
127
- yield batch_idx, pa_table
128
- batch_idx += 1
129
- else:
130
- raise ValueError("Invalid dataset config name is specified.")