# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """SimpleBooks dataset.""" import os import datasets _CITATION = """\ @misc{nguyen2019simplebooks, title={SimpleBooks: Long-term dependency book dataset with simplified English vocabulary for word-level language modeling}, author={Huyen Nguyen}, year={2019}, eprint={1911.12391}, archivePrefix={arXiv}, primaryClass={cs.CL} } """ _DESCRIPTION = """\ SimpleBooks is a small long-term dependency dataset that has the FREQ number equivalent to the 1 billion token dataset. Its small vocabulary size and small percentage of out-of-vocabulary words make it an ideal testbed and benchmark for word-level language modeling task and tutorials. It was created from 1,573 Gutenberg books. They were selected out of 39,432 Gutenberg books using a hill-climbing algorithm to maximize FREQ. """ _LICENSE = "CC BY-SA" URL = "https://dldata-public.s3.us-east-2.amazonaws.com/simplebooks.zip" class SimpleBooks(datasets.GeneratorBasedBuilder): """SimpleBooks dataset.""" VERSION = datasets.Version("1.0.0") BUILDER_CONFIGS = [ datasets.BuilderConfig( name="simplebooks-2", version=VERSION, description="2.2M tokens with the vocab size of 11,492", ), datasets.BuilderConfig( name="simplebooks-2-raw", version=VERSION, description="2.2M tokens with the vocab size of 11,492 (raw)", ), datasets.BuilderConfig( name="simplebooks-92", version=VERSION, description="92M tokens with the vocab size of 98,304", ), datasets.BuilderConfig( name="simplebooks-92-raw", version=VERSION, description="92M tokens with the vocab size of 98,304 (raw)", ), ] DEFAULT_CONFIG_NAME = "simplebooks-2" def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "text": datasets.Value("string"), } ), license=_LICENSE, citation=_CITATION, ) def _split_generators(self, dl_manager): archive = dl_manager.download(URL) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "subset": self.config.name, "split": "train", "files": dl_manager.iter_archive(archive), }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ "subset": self.config.name, "split": "valid", "files": dl_manager.iter_archive(archive), }, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "subset": self.config.name, "split": "test", "files": dl_manager.iter_archive(archive), }, ), ] def _generate_examples(self, subset, split, files): _id = 0 for path, file in files: head, tail = os.path.split(path) if head.endswith(f"{subset}") and tail == f"{split}.txt": for line in file: yield _id, {"text": line.strip()} _id += 1