openwebtext_split / openwebtext_split.py
nthngdy's picture
Update openwebtext_split.py
6172ced
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Open WebText Corpus"""
import os
import re
from itertools import chain
import tarfile
import lzma
import datasets
_CITATION = """\
@misc{Gokaslan2019OpenWeb,
title={OpenWebText Corpus},
author={Aaron Gokaslan*, Vanya Cohen*, Ellie Pavlick, Stefanie Tellex},
howpublished{\\url{http://Skylion007.github.io/OpenWebTextCorpus}},
year={2019}
}
"""
_DESCRIPTION = """\
An open-source replication of the WebText dataset from OpenAI.
"""
_URL = "https://zenodo.org/record/3834942/files/openwebtext.tar.xz"
def custom_iter_archive(path_or_buf, _filter=lambda x: True):
decompressor = lzma.LZMADecompressor()
def _iter_archive(f):
stream = tarfile.open(fileobj=f, mode="r|*")
for i, tarinfo in enumerate(stream):
if not _filter(i):
continue
file_path = tarinfo.name
if not tarinfo.isreg():
continue
if file_path is None:
continue
if os.path.basename(file_path).startswith(".") or os.path.basename(file_path).startswith("__"):
# skipping hidden files
continue
if not file_path.endswith('xz'):
continue
file_obj = stream.extractfile(tarinfo)
decompressed = tarfile.open(fileobj=file_obj, mode='r|xz')
for j, xzinfo in enumerate(decompressed):
if not xzinfo.name.endswith('txt'):
continue
txt_file = decompressed.extractfile(xzinfo)
yield txt_file
stream.members = []
del stream
if hasattr(path_or_buf, "read"):
yield from _iter_archive(path_or_buf)
else:
with open(path_or_buf, "rb") as f:
yield from _iter_archive(f)
class Openwebtext(datasets.GeneratorBasedBuilder):
"""The Open WebText dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="plain_text",
description="Plain text",
version=datasets.Version("1.0.0"),
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({"text": datasets.Value("string")}),
homepage="https://skylion007.github.io/OpenWebTextCorpus/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
archive = dl_manager.download(_URL)
train_filter = lambda x : (x%10) < 8
val_filter = lambda x: (x%10) == 8
test_filter = lambda x: (x%10) == 9
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": custom_iter_archive(archive, train_filter)}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"files": custom_iter_archive(archive, val_filter)}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"files": custom_iter_archive(archive, test_filter)}),
]
def _generate_examples(self, files):
"""Yields examples."""
idx = 0
for f in files:
lines = f.readlines()
for line in lines:
line_str = line.decode().strip()
if line_str:
idx+=1
yield idx, {"text": line_str}