File size: 4,303 Bytes
759cdbc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7ecd94e
759cdbc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3595f8b
d70f823
759cdbc
 
 
 
 
 
 
 
 
 
 
 
 
 
7ecd94e
 
 
759cdbc
 
 
 
 
 
4e947ff
759cdbc
 
 
 
 
 
 
ea2779f
759cdbc
ea2779f
 
 
 
759cdbc
 
 
 
 
ea2779f
 
 
759cdbc
ea2779f
759cdbc
 
 
 
 
 
 
 
 
 
 
 
 
ea2779f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The OpenWebText2 Corpus"""

import io
import json

import zstandard

import datasets
from datasets.exceptions import DefunctDatasetError


_CITATION = """\
@article{pile,
    title={The {P}ile: An 800GB Dataset of Diverse Text for Language Modeling},
    author={Gao, Leo and Biderman, Stella and Black, Sid and Golding, Laurence and Hoppe, Travis and Foster, Charles and Phang, Jason and He, Horace and Thite, Anish and Nabeshima, Noa and Presser, Shawn and Leahy, Connor},
    journal={arXiv preprint arXiv:2101.00027},
    year={2020}
}
"""

_DESCRIPTION = """\
OpenWebText2 is part of EleutherAi/The Pile dataset and is an enhanced version of the original OpenWebTextCorpus \
covering all Reddit submissions from 2005 up until April 2020, \
with further months becoming available after the corresponding PushShift dump files are released.
"""

_HOST_URL = "https://the-eye.eu"
_URL = f"{_HOST_URL}/public/AI/pile_preliminary_components/openwebtext2.jsonl.zst.tar"


class Openwebtext2(datasets.GeneratorBasedBuilder):
    """The OpenWebText2 dataset."""

    BUILDER_CONFIGS = [
        datasets.BuilderConfig(
            name="plain_text",
            description="Plain text",
            version=datasets.Version("1.0.0"),
        )
    ]

    def _info(self):
        raise DefunctDatasetError(
            "Dataset 'the_pile_openwebtext2' is defunct and no longer accessible due to unavailability of the source data"
        )
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "title": datasets.Value("string"),
                    "text": datasets.Value("string"),
                    "reddit_scores": datasets.Sequence(datasets.Value("int32")),
                }
            ),
            homepage="https://openwebtext2.readthedocs.io/en/latest/",
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        archive = dl_manager.download(_URL)
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={"files": dl_manager.iter_archive(archive)},
            ),
        ]

    def _generate_examples(self, files):
        """Yields examples."""
        _id = 0
        for path, file in files:
            if not path.endswith(".jsonl.zst"):
                continue
            reader = Reader()
            for document, metadata in reader.read_jsonl(file, get_meta=True):
                yield _id, {
                    "title": metadata["title"],
                    "text": document,
                    "reddit_scores": metadata["reddit_scores"],
                }
                _id += 1


# Modified version of lm_dataformat Reader with self.fh set, allowing peeking for tqdm.
class Reader:
    def __init__(self):
        pass

    def read_jsonl(self, fh, get_meta=False, autojoin_paragraphs=True, para_joiner="\n\n"):
        self.fh = fh
        cctx = zstandard.ZstdDecompressor()
        reader = io.BufferedReader(cctx.stream_reader(fh))
        for line in reader:
            ob = json.loads(line)
            # naive jsonl where each object is just the string itself, with no meta. For legacy compatibility.
            if isinstance(ob, str):
                assert not get_meta
                yield ob
                continue

            text = ob["text"]

            if autojoin_paragraphs and isinstance(text, list):
                text = para_joiner.join(text)

            if get_meta:
                yield text, (ob["meta"] if "meta" in ob else {})
            else:
                yield text