singletongue commited on
Commit
4770bd8
1 Parent(s): 63d1d8a

Add a dataset loading script

Browse files
Files changed (2) hide show
  1. README.md +115 -1
  2. wikipedia-utils.py +104 -0
README.md CHANGED
@@ -1,3 +1,117 @@
1
  ---
2
- license: cc-by-sa-3.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
1
  ---
2
+ dataset_info:
3
+ - config_name: corpus-jawiki-20230403
4
+ features:
5
+ - name: text
6
+ dtype: string
7
+ splits:
8
+ - name: train
9
+ num_bytes: 3569619848
10
+ num_examples: 24387500
11
+ download_size: 1297833377
12
+ dataset_size: 3569619848
13
+ - config_name: corpus-jawiki-20230403-cirrus
14
+ features:
15
+ - name: text
16
+ dtype: string
17
+ splits:
18
+ - name: train
19
+ num_bytes: 4779055224
20
+ num_examples: 28018607
21
+ download_size: 1730081783
22
+ dataset_size: 4779055224
23
+ - config_name: corpus-jawiki-20230403-filtered-large
24
+ features:
25
+ - name: text
26
+ dtype: string
27
+ splits:
28
+ - name: train
29
+ num_bytes: 3027074884
30
+ num_examples: 20133720
31
+ download_size: 1092808039
32
+ dataset_size: 3027074884
33
+ - config_name: paragraphs-jawiki-20230403
34
+ features:
35
+ - name: id
36
+ dtype: string
37
+ - name: pageid
38
+ dtype: int64
39
+ - name: revid
40
+ dtype: int64
41
+ - name: paragraph_index
42
+ dtype: int64
43
+ - name: title
44
+ dtype: string
45
+ - name: section
46
+ dtype: string
47
+ - name: text
48
+ dtype: string
49
+ - name: html_tag
50
+ dtype: string
51
+ splits:
52
+ - name: train
53
+ num_bytes: 4417130987
54
+ num_examples: 9668476
55
+ download_size: 1489512230
56
+ dataset_size: 4417130987
57
+ - config_name: passages-c300-jawiki-20230403
58
+ features:
59
+ - name: id
60
+ dtype: int64
61
+ - name: pageid
62
+ dtype: int64
63
+ - name: revid
64
+ dtype: int64
65
+ - name: title
66
+ dtype: string
67
+ - name: section
68
+ dtype: string
69
+ - name: text
70
+ dtype: string
71
+ splits:
72
+ - name: train
73
+ num_bytes: 3939431360
74
+ num_examples: 6639833
75
+ download_size: 1402596784
76
+ dataset_size: 3939431360
77
+ - config_name: passages-c400-jawiki-20230403
78
+ features:
79
+ - name: id
80
+ dtype: int64
81
+ - name: pageid
82
+ dtype: int64
83
+ - name: revid
84
+ dtype: int64
85
+ - name: title
86
+ dtype: string
87
+ - name: section
88
+ dtype: string
89
+ - name: text
90
+ dtype: string
91
+ splits:
92
+ - name: train
93
+ num_bytes: 3868482519
94
+ num_examples: 5555583
95
+ download_size: 1393661115
96
+ dataset_size: 3868482519
97
+ - config_name: passages-para-jawiki-20230403
98
+ features:
99
+ - name: id
100
+ dtype: int64
101
+ - name: pageid
102
+ dtype: int64
103
+ - name: revid
104
+ dtype: int64
105
+ - name: title
106
+ dtype: string
107
+ - name: section
108
+ dtype: string
109
+ - name: text
110
+ dtype: string
111
+ splits:
112
+ - name: train
113
+ num_bytes: 3751418134
114
+ num_examples: 9397066
115
+ download_size: 1296071247
116
+ dataset_size: 3751418134
117
  ---
wikipedia-utils.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Wikipedia-Utils: Preprocessed Wikipedia Texts for NLP"""
15
+
16
+
17
+ import json
18
+ from typing import Dict, Iterator, List, Tuple, Union
19
+
20
+ import datasets
21
+
22
+
23
+ _DESCRIPTION = "Preprocessed Wikipedia texts generated with scripts in singletongue/wikipedia-utils repo."
24
+
25
+ _HOMEPAGE = "https://github.com/singletongue/wikipedia-utils"
26
+
27
+ _LICENSE = "The content of Wikipedia is licensed under the CC-BY-SA 3.0 and GFDL licenses."
28
+
29
+ _URL_BASE = "https://github.com/singletongue/wikipedia-utils/releases/download"
30
+ _URLS = {
31
+ "corpus-jawiki-20230403": f"{_URL_BASE}/2023-04-03/corpus-jawiki-20230403.txt.gz",
32
+ "corpus-jawiki-20230403-cirrus": f"{_URL_BASE}/2023-04-03/corpus-jawiki-20230403-cirrus.txt.gz",
33
+ "corpus-jawiki-20230403-filtered-large": f"{_URL_BASE}/2023-04-03/corpus-jawiki-20230403-filtered-large.txt.gz",
34
+ "paragraphs-jawiki-20230403": f"{_URL_BASE}/2023-04-03/paragraphs-jawiki-20230403.json.gz",
35
+ "passages-c300-jawiki-20230403": f"{_URL_BASE}/2023-04-03/passages-c300-jawiki-20230403.json.gz",
36
+ "passages-c400-jawiki-20230403": f"{_URL_BASE}/2023-04-03/passages-c400-jawiki-20230403.json.gz",
37
+ "passages-para-jawiki-20230403": f"{_URL_BASE}/2023-04-03/passages-para-jawiki-20230403.json.gz",
38
+ }
39
+
40
+ _VERSION = datasets.Version("1.0.0")
41
+
42
+
43
+ class WikipediaUtils(datasets.GeneratorBasedBuilder):
44
+ """Wikipedia-Utils dataset."""
45
+
46
+ BUILDER_CONFIGS = [datasets.BuilderConfig(name=name, version=_VERSION) for name in _URLS.keys()]
47
+
48
+ def _info(self) -> datasets.DatasetInfo:
49
+ if self.config.name.startswith("corpus"):
50
+ features = datasets.Features({"text": datasets.Value("string")})
51
+ elif self.config.name.startswith("paragraphs"):
52
+ features = datasets.Features(
53
+ {
54
+ "id": datasets.Value("string"),
55
+ "pageid": datasets.Value("int64"),
56
+ "revid": datasets.Value("int64"),
57
+ "paragraph_index": datasets.Value("int64"),
58
+ "title": datasets.Value("string"),
59
+ "section": datasets.Value("string"),
60
+ "text": datasets.Value("string"),
61
+ "html_tag": datasets.Value("string"),
62
+ }
63
+ )
64
+ elif self.config.name.startswith("passages"):
65
+ features = datasets.Features(
66
+ {
67
+ "id": datasets.Value("int64"),
68
+ "pageid": datasets.Value("int64"),
69
+ "revid": datasets.Value("int64"),
70
+ "title": datasets.Value("string"),
71
+ "section": datasets.Value("string"),
72
+ "text": datasets.Value("string"),
73
+ }
74
+ )
75
+ else:
76
+ raise ValueError("Invalid dataset config name is specified.")
77
+
78
+ return datasets.DatasetInfo(
79
+ description=_DESCRIPTION,
80
+ features=features,
81
+ homepage=_HOMEPAGE,
82
+ license=_LICENSE,
83
+ )
84
+
85
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
86
+ url = _URLS[self.config.name]
87
+ filepath = dl_manager.download_and_extract(url)
88
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": filepath})]
89
+
90
+ def _generate_examples(self, filepath: str) -> Iterator[Tuple[int, Dict[str, Union[int, str]]]]:
91
+ if self.config.name.startswith("corpus"):
92
+ with open(filepath) as f:
93
+ for id_, line in enumerate(f):
94
+ line = line.rstrip("\n")
95
+ yield id_, {"text": line}
96
+
97
+ elif self.config.name.startswith(("paragraphs", "passages")):
98
+ with open(filepath) as f:
99
+ for line in f:
100
+ item = json.loads(line)
101
+ yield item["id"], item
102
+
103
+ else:
104
+ raise ValueError("Invalid dataset config name is specified.")