|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import os |
|
|
|
import datasets |
|
|
|
|
|
_CITATION = "" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
The dataset is based on the Hutter Prize (http://prize.hutter1.net) and contains the first 10^8 bytes of English Wikipedia in 2006 in XML |
|
""" |
|
|
|
_HOMEPAGE = "http://mattmahoney.net/dc/textdata.html" |
|
|
|
_LICENSE = "" |
|
|
|
|
|
|
|
_URLS = {"source": "http://mattmahoney.net/dc/enwik8.zip"} |
|
|
|
|
|
class Enwik8(datasets.GeneratorBasedBuilder): |
|
|
|
VERSION = datasets.Version("2.0.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name="enwik8-standard", |
|
version=VERSION, |
|
description="This version of the dataset uses the standard split of 90M/5M/5M bytes, and yields a single text blob per split.", |
|
) |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "enwik8-standard" |
|
|
|
def _info(self): |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"text": datasets.Value("string"), |
|
} |
|
), |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
urls = _URLS["source"] |
|
data_dir = dl_manager.download_and_extract(urls) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir, "enwik8"), |
|
"split": "train", |
|
"start_index": 0, |
|
"end_index": 90_000_000, |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir, "enwik8"), |
|
"split": "validation", |
|
"start_index": 90_000_000, |
|
"end_index": 95_000_000, |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir, "enwik8"), |
|
"split": "test", |
|
"start_index": 95_000_000, |
|
"end_index": 100_000_000, |
|
}, |
|
) |
|
] |
|
|
|
|
|
def _generate_examples(self, filepath, split, start_index, end_index): |
|
with open(filepath, encoding="utf-8") as f: |
|
yield 0, {"text": f.read()[start_index:end_index]} |
|
|