Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
1M<n<10M
Language Creators:
found
Annotations Creators:
no-annotation
Source Datasets:
original
ArXiv:
License:
File size: 4,737 Bytes
cf21600
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a2ff852
cf21600
a2ff852
cf21600
 
 
f171faa
cf21600
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6374ee4
33729e4
cf21600
 
 
 
 
 
 
 
 
 
 
 
 
 
f171faa
 
 
cf21600
 
 
 
 
 
 
 
a2ff852
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cf21600
a2ff852
cf21600
a2ff852
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cf21600
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
# coding=utf-8
# Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Stack Exchange Corpus"""
import io
import os
import zipfile
from pathlib import Path

import datasets
from datasets.exceptions import DefunctDatasetError


_CITATION = """\
@article{pile,
    title={The {P}ile: An 800GB Dataset of Diverse Text for Language Modeling},
    author={Gao, Leo and Biderman, Stella and Black, Sid and Golding, Laurence and Hoppe, Travis and Foster, Charles and Phang, Jason and He, Horace and Thite, Anish and Nabeshima, Noa and Presser, Shawn and Leahy, Connor},
    journal={arXiv preprint arXiv:2101.00027},
    year={2020}
}
"""

_DESCRIPTION = """\
This dataset is part of EleutherAI/The Pile dataset and is a dataset for Language Models from processing stackexchange data dump, \
which is an anonymized dump of all user-contributed content on the Stack Exchange network.
"""

_HOST_URL = "https://the-eye.eu"
_URL = f"{_HOST_URL}/public/AI/pile_preliminary_components/stackexchange_dataset.tar"


class ThePileStackExchange(datasets.GeneratorBasedBuilder):
    """The StackExchange dataset."""

    BUILDER_CONFIGS = [
        datasets.BuilderConfig(
            name="plain_text",
            description="Plain text",
            version=datasets.Version("1.0.0"),
        )
    ]

    def _info(self):
        raise DefunctDatasetError(
            "Dataset 'the_pile_stack_exchange' is defunct and no longer accessible due to unavailability of the source data"
        )
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features({"domain": datasets.Value("string"), "text": datasets.Value("string")}),
            homepage="https://github.com/EleutherAI/stackexchange-dataset",
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        if dl_manager.is_streaming:
            archive = dl_manager.download(_URL)
            return [
                datasets.SplitGenerator(
                    name=datasets.Split.TRAIN,
                    gen_kwargs={
                        "files": dl_manager.iter_archive(archive),
                        "is_streaming": dl_manager.is_streaming,
                    },
                ),
            ]
        else:
            dl_dir = dl_manager.download_and_extract(_URL)
            zips = [str(f) for f in (Path(dl_dir) / "out").iterdir()]
            extracted = dl_manager.extract(zips, num_proc=os.cpu_count())
            # non-dir extracteds are zero-size unknown things
            dirs = [path for path in extracted if os.path.isdir(path)]
            return [
                datasets.SplitGenerator(
                    name=datasets.Split.TRAIN,
                    gen_kwargs={
                        "files": dl_manager.iter_files(dirs),
                        "is_streaming": dl_manager.is_streaming,
                    },
                ),
            ]

    def _generate_examples(self, files, is_streaming):
        """Yields examples."""
        if is_streaming:
            _id = 0
            for path, file in files:
                if not path.startswith("out/"):
                    continue
                file_content = file.read()
                with zipfile.ZipFile(io.BytesIO(file_content)) as zip_file:
                    for name in zip_file.namelist():
                        if not name.endswith(".txt"):
                            continue
                        domain = name.split(".")[0]
                        with zip_file.open(name, mode="r") as f:
                            document = f.read().decode(encoding="utf-8")
                        yield _id, {"domain": domain, "text": document}
                        _id += 1
        else:
            _id = 0
            for file in files:
                path = Path(file)
                if not path.name.endswith(".txt"):
                    continue
                domain = path.name.split(".")[0]
                with path.open(mode="r", encoding="utf-8") as f:
                    document = f.read()
                yield _id, {"domain": domain, "text": document}
                _id += 1