File size: 2,679 Bytes
62078bd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
# HuggingFace loading script for generic sprucfluo datasets
# This script was automatically generated by convert_hf_to_sprucfluo
import json
import pathlib

import datasets
import fsspec
from datasets import DatasetInfo, Value, Features

logger = datasets.logging.get_logger(__name__)

_INFO = DatasetInfo(
            description='Automatically generated for wikitext (wikitext-103-raw-v1), split into 8 shards, detokenized.\n\nOriginal Description:\n The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified\n Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike\n License.\n',
            citation='@misc{merity2016pointer,\n      title={Pointer Sentinel Mixture Models},\n      author={Stephen Merity and Caiming Xiong and James Bradbury and Richard Socher},\n      year={2016},\n      eprint={1609.07843},\n      archivePrefix={arXiv},\n      primaryClass={cs.CL}\n}\n',
            homepage='https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/',
            license='Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)',
            version="1.0.0",
            features=Features.from_dict({'text': {'dtype': 'string', 'id': None, '_type': 'Value'}}),
            supervised_keys=None)


class AutoDataset(datasets.GeneratorBasedBuilder):
    BUILDER_CONFIGS = [datasets.BuilderConfig()]

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    def _info(self):
        return _INFO

    @property
    def dataset_dir(self):
        return pathlib.Path(__file__).parent

    def _split_generators(self, dl_manager):
        metadata = json.load(open(dl_manager.download("metadata.json"), 'rt'))
        return [
            datasets.SplitGenerator(
                name=split,
                gen_kwargs={"filepaths": dl_manager.download(split_metadata["files"])},
            )
            for split, split_metadata in metadata["splits"].items()
        ]

    def _generate_examples(self, filepaths):
        """This function returns the examples in the raw (text) form by iterating on all the files."""
        id_: int = 0
        for filepath in filepaths:
            logger.info(f"Generating examples from {filepath}")
            with fsspec.open(filepath, mode="rt", compression="infer", encoding="utf-8") as f:
                for line in f:
                    if line:
                        example = json.loads(line)
                        yield id_, example
                        id_ += 1


if __name__ == "__main__":
    AutoDataset().download_and_prepare()