Datasets:

Languages:
English
Size Categories:
n>1T
ArXiv:
License:
File size: 4,835 Bytes
37f853d
c56ecb2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7da0b42
 
 
 
c56ecb2
 
 
 
 
 
 
 
 
 
1eb0631
 
7da0b42
 
 
 
37f853d
c56ecb2
1eb0631
c56ecb2
7da0b42
 
 
 
37f853d
c56ecb2
1eb0631
 
7da0b42
 
 
 
37f853d
c56ecb2
7d5c7f2
c56ecb2
 
 
7d5c7f2
 
 
1eb0631
7da0b42
 
 
 
 
 
 
 
7d5c7f2
 
 
 
 
c56ecb2
1eb0631
c56ecb2
 
 
1eb0631
 
 
 
c56ecb2
1eb0631
c56ecb2
 
37f853d
c56ecb2
 
 
 
 
 
 
 
7da0b42
c56ecb2
7da0b42
 
c56ecb2
 
 
 
 
7da0b42
 
c56ecb2
7da0b42
 
c56ecb2
7da0b42
 
 
 
c56ecb2
 
 
1eb0631
 
c56ecb2
 
 
7da0b42
c56ecb2
7da0b42
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
# Copyright 2024 Allen Institute for AI
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Lint as: python3
"""Dolma: an Open Corpus of Three Trillion Tokens for Language Model Pretraining Research"""

import gzip
import json
import os
from typing import List

import datasets

logger = datasets.logging.get_logger(__name__)


_DESCRIPTION = """\
Dolma: an Open Corpus of Three Trillion Tokens for Language Model Pretraining Research
"""

_URL_LISTS = {
    "v1": "urls/v1.txt",
    "v1_5": "urls/v1_5.txt",
    "v1_5-sample": "urls/v1_5-sample.txt",
    "v1_6": "urls/v1_6.txt",
    "v1_6-sample": "urls/v1_6-sample.txt",
    "v1_7": "urls/v1_7.txt",
}
_VERSIONS = {
    "v1": "1.0.0",
    "v1_5": "1.5.0",
    "v1_5-sample": "1.5.0",
    "v1_6": "1.6.0",
    "v1_6-sample": "1.6.0",
    "v1_7": "1.7.0",
}
_DATES = {
    "v1": "(Aug 2023)",
    "v1_5": "(Oct 2023)",
    "v1_5-sample": "(Oct 2023)",
    "v1_6": "(Jan 2024)",
    "v1_6-sample": "(Jan 2024)",
    "v1_7": "(Apr 2024)",
}
_BASE_URL = "https://olmo-data.org"

_DATA_DIR = os.environ.get("DOLMA_DATA_DIR", None)

_CITATION = """\
@article{dolma,
  title = {{Dolma: An Open Corpus of Three Trillion Tokens for Language Model Pretraining Research}},
  author = {
    Luca Soldaini and Rodney Kinney and Akshita Bhagia and Dustin Schwenk and David Atkinson and
    Russell Authur and Ben Bogin and Khyathi Chandu and Jennifer Dumas and Yanai Elazar and
    Valentin Hofmann and Ananya Harsh Jha and Sachin Kumar and Li Lucy and Xinxi Lyu and Ian Magnusson and
    Jacob Morrison and Niklas Muennighoff and Aakanksha Naik and Crystal Nam and Matthew E. Peters and
    Abhilasha Ravichander and Kyle Richardson and Zejiang Shen and Emma Strubell and Nishant Subramani and
    Oyvind Tafjord and Evan Pete Walsh and Hannaneh Hajishirzi and Noah A. Smith and Luke Zettlemoyer and
    Iz Beltagy and Dirk Groeneveld and Jesse Dodge and Kyle Lo
},
  year = {2024},
  journal={arXiv preprint},
}
"""


class Dolma(datasets.GeneratorBasedBuilder):
    """Dolma: an Open Corpus of Three Trillion Tokens for Language Model Pretraining Research"""

    BUILDER_CONFIGS = [
        datasets.BuilderConfig(
            name=name,
            version=_VERSIONS[name],
            description=f"{_DESCRIPTION} {_DATES[name]}",
        )
        for name in _URL_LISTS.keys()
    ]

    DEFAULT_CONFIG_NAME = "v1_7"

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "id": datasets.Value("string"),
                    "text": datasets.Value("string"),
                    # "metadata": datasets.Value("string"),
                    "added": datasets.Value("string"),
                    "created": datasets.Value("string"),
                    "source": datasets.Value("string"),
                }
            ),
            supervised_keys=None,
        )

    def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
        path = dl_manager.download(_URL_LISTS[self.config.name])

        with open(path, mode="rt", encoding="utf-8") as f:  # type: ignore[no-untyped-call]
            subset_urls = f.read().splitlines()

        if _DATA_DIR is not None:
            subset_files = [os.path.join(_DATA_DIR, url.replace(_BASE_URL, "").lstrip("/")) for url in subset_urls]
        else:
            subset_files = dl_manager.download(subset_urls)

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,  # type: ignore[assignment]
                gen_kwargs={"files": subset_files},
            )
        ]

    def _generate_examples(self, files: List[str]):
        """This function returns the examples in the raw (text) form."""
        for fn in files:
            logger.info("generating examples from = %s", fn)

            with gzip.open(fn, mode="rt", encoding="utf-8") as f:
                for line in f:
                    row = json.loads(line)
                    yield row["id"], {
                        "id": row["id"],
                        "text": row["text"],
                        "added": row.get("added", ""),
                        "created": row.get("created", ""),
                        "source": row.get("source", ""),
                    }