File size: 3,983 Bytes
0ef64f6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d64827c
9938b56
0ef64f6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
db5cf33
9938b56
db5cf33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d0f40ba
fc69395
d3aad52
a07556b
 
d3aad52
a07556b
db5cf33
 
 
 
 
 
 
0ef64f6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
db5cf33
 
 
 
 
0ef64f6
db5cf33
 
 
0ef64f6
 
f8f2576
0ef64f6
ded8965
 
 
 
6172ced
 
f45a588
6172ced
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Open WebText Corpus"""


import os
import re
from itertools import chain
import tarfile
import lzma

import datasets


_CITATION = """\
@misc{Gokaslan2019OpenWeb,
  title={OpenWebText Corpus},
  author={Aaron Gokaslan*, Vanya Cohen*, Ellie Pavlick, Stefanie Tellex},
  howpublished{\\url{http://Skylion007.github.io/OpenWebTextCorpus}},
  year={2019}
}
"""

_DESCRIPTION = """\
An open-source replication of the WebText dataset from OpenAI.
"""

_URL = "https://zenodo.org/record/3834942/files/openwebtext.tar.xz"

def custom_iter_archive(path_or_buf, _filter=lambda x: True):
    decompressor = lzma.LZMADecompressor()
    def _iter_archive(f):
        stream = tarfile.open(fileobj=f, mode="r|*")
        for i, tarinfo in enumerate(stream):
            if not _filter(i):
                continue
            file_path = tarinfo.name
            if not tarinfo.isreg():
                continue
            if file_path is None:
                continue
            if os.path.basename(file_path).startswith(".") or os.path.basename(file_path).startswith("__"):
                # skipping hidden files
                continue
            if not file_path.endswith('xz'):
                continue
            file_obj = stream.extractfile(tarinfo)
            decompressed = tarfile.open(fileobj=file_obj, mode='r|xz')
            for j, xzinfo in enumerate(decompressed):
                if not xzinfo.name.endswith('txt'):
                    continue
                txt_file = decompressed.extractfile(xzinfo)
                yield txt_file
            stream.members = []
        del stream
    if hasattr(path_or_buf, "read"):
        yield from _iter_archive(path_or_buf)
    else:
        with open(path_or_buf, "rb") as f:
            yield from _iter_archive(f)

class Openwebtext(datasets.GeneratorBasedBuilder):
    """The Open WebText dataset."""

    BUILDER_CONFIGS = [
        datasets.BuilderConfig(
            name="plain_text",
            description="Plain text",
            version=datasets.Version("1.0.0"),
        )
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features({"text": datasets.Value("string")}),
            homepage="https://skylion007.github.io/OpenWebTextCorpus/",
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        archive = dl_manager.download(_URL)
        
        train_filter = lambda x : (x%10) < 8
        val_filter = lambda x: (x%10) == 8
        test_filter = lambda x: (x%10) == 9
        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": custom_iter_archive(archive, train_filter)}),
            datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"files": custom_iter_archive(archive, val_filter)}),
            datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"files": custom_iter_archive(archive, test_filter)}),
        ]

    def _generate_examples(self, files):
        """Yields examples."""
        idx = 0
        for f in files:
            lines = f.readlines()
            for line in lines:
                line_str = line.decode().strip()
                if line_str:
                    idx+=1
                    yield idx, {"text": line_str}