wikitext_103 / wikitext_103.py
thomwolf's picture
thomwolf HF Staff
add data
dd27573
import datasets
import os
import json
_CITATION = ""
_DESCRIPTION = """
Wikitext-103 dataset from this paper:
https://arxiv.org/pdf/1609.07843.pdf
Gopher's authors concatenate all the articles, set context length to n/2 (n = max_seq_len),
and use the "closed vocabulary" variant of the dataset for evaluation.
In contrast, we evaluate the model on each article independently, use single token contexts
(except for the last sequence in each document), and use the raw dataset.
"""
class Wikitext103(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
def _info(self):
features = datasets.Features(
{
"text": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage="",
license="",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
test_json = dl_manager.download(os.path.join("data", "test.jsonl"))
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"path": test_json},
)
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, path):
with open(path, encoding="utf-8") as f:
for key, row in enumerate(f):
yield key, json.loads(row)