Andreas Sünder commited on
Commit
2ded227
1 Parent(s): 91400fc

Upload first loading script

Browse files
Files changed (1) hide show
  1. simplebooks.py +95 -0
simplebooks.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """SimpleBooks dataset."""
16
+
17
+
18
+ import os
19
+
20
+ import datasets
21
+
22
+ _CITATION = """\
23
+ @misc{nguyen2019simplebooks,
24
+ title={SimpleBooks: Long-term dependency book dataset with simplified English vocabulary for word-level language modeling},
25
+ author={Huyen Nguyen},
26
+ year={2019},
27
+ eprint={1911.12391},
28
+ archivePrefix={arXiv},
29
+ primaryClass={cs.CL}
30
+ }
31
+ """
32
+
33
+ _DESCRIPTION = """\
34
+ SimpleBooks is a small long-term dependency dataset that has the FREQ number equivalent to the 1 billion token dataset. Its small vocabulary size and small percentage of out-of-vocabulary words make it an ideal testbed and benchmark for word-level language modeling task and tutorials.
35
+ It was created from 1,573 Gutenberg books. They were selected out of 39,432 Gutenberg books using a hill-climbing algorithm to maximize FREQ.
36
+ """
37
+
38
+ _LICENSE = "CC BY-SA"
39
+
40
+ URL = "https://dldata-public.s3.us-east-2.amazonaws.com/simplebooks.zip"
41
+
42
+
43
+ class SimpleBooks(datasets.GeneratorBasedBuilder):
44
+ """SimpleBooks dataset."""
45
+
46
+ VERSION = datasets.Version("1.0.0")
47
+
48
+ BUILDER_CONFIGS = [
49
+ datasets.BuilderConfig(name="simplebooks-2", version=VERSION, description="2.2M tokens with the vocab size of 11,492"),
50
+ datasets.BuilderConfig(name="simplebooks-2-raw", version=VERSION, description="2.2M tokens with the vocab size of 11,492 (raw)"),
51
+ datasets.BuilderConfig(name="simplebooks-92", version=VERSION, description="92M tokens with the vocab size of 98,304"),
52
+ datasets.BuilderConfig(name="simplebooks-92-raw", version=VERSION, description="92M tokens with the vocab size of 98,304 (raw)"),
53
+ ]
54
+
55
+ DEFAULT_CONFIG_NAME = "simplebooks-2"
56
+
57
+ def _info(self):
58
+ return datasets.DatasetInfo(
59
+ description=_DESCRIPTION,
60
+ features=datasets.Features(
61
+ {
62
+ "text": datasets.Value("string"),
63
+ }
64
+ ),
65
+ license=_LICENSE,
66
+ citation=_CITATION,
67
+ )
68
+
69
+ def _split_generators(self, dl_manager):
70
+ archive = dl_manager.download_and_extract(URL)
71
+ return [
72
+ datasets.SplitGenerator(
73
+ name=datasets.Split.TRAIN,
74
+ gen_kwargs={
75
+ "path": os.path.join(archive, self.config.name, "train.txt"),
76
+ },
77
+ ),
78
+ datasets.SplitGenerator(
79
+ name=datasets.Split.VALIDATION,
80
+ gen_kwargs={
81
+ "path": os.path.join(archive, self.config.name, "valid.txt")
82
+ },
83
+ ),
84
+ datasets.SplitGenerator(
85
+ name=datasets.Split.TEST,
86
+ gen_kwargs={
87
+ "path": os.path.join(archive, self.config.name, "test.txt"),
88
+ },
89
+ ),
90
+ ]
91
+
92
+ def _generate_examples(self, path):
93
+ with open(path, encoding="utf-8") as f:
94
+ for _id, line in enumerate(f):
95
+ yield _id, { "text": line.strip() }