pietrolesci commited on
Commit
3a51d4e
·
verified ·
1 Parent(s): 42ba946

Upload ./dataset.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. dataset.py +303 -0
dataset.py ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This is adapted from the GPT-Neox library
2
+ import os
3
+ import struct
4
+ from functools import lru_cache
5
+ from itertools import accumulate
6
+ from pathlib import Path
7
+
8
+ import numpy as np
9
+ from torch.utils.data import Dataset
10
+
11
+ dtypes = {1: np.uint8, 2: np.int8, 3: np.int16, 4: np.int32, 5: np.int64, 6: np.float32, 7: np.float64, 8: np.uint16}
12
+
13
+
14
+ def code(dtype):
15
+ for k in dtypes:
16
+ if dtypes[k] == dtype:
17
+ return k
18
+ raise ValueError(dtype)
19
+
20
+
21
+ def index_file_path(prefix_path):
22
+ return prefix_path + ".idx"
23
+
24
+
25
+ def data_file_path(prefix_path):
26
+ return prefix_path + ".bin"
27
+
28
+
29
+ def _warmup_mmap_file(path):
30
+ with open(path, "rb") as stream:
31
+ while stream.read(100 * 1024 * 1024):
32
+ pass
33
+
34
+
35
+ class MMapIndexedDataset(Dataset):
36
+ class Index:
37
+ _HDR_MAGIC = b"MMIDIDX\x00\x00"
38
+
39
+ @classmethod
40
+ def writer(cls, path, dtype):
41
+ class _Writer:
42
+ def __enter__(self):
43
+ self._file = open(path, "wb") # noqa: SIM115
44
+
45
+ # Write Magic string so we can check the file format then opening it again.
46
+ self._file.write(cls._HDR_MAGIC)
47
+ # Write version number
48
+ # Little endian unsigned 64 Bit integer
49
+ self._file.write(struct.pack("<Q", 1))
50
+ # Little endian unsigned 8 Bit integer
51
+ self._file.write(struct.pack("<B", code(dtype)))
52
+
53
+ return self
54
+
55
+ @staticmethod
56
+ def _get_pointers(sizes):
57
+ pointers = np.zeros(len(sizes), dtype=np.int64)
58
+ sizes = np.array(sizes, dtype=np.int64)
59
+
60
+ np.cumsum(sizes[:-1], out=pointers[1:])
61
+ pointers = pointers * dtype().itemsize
62
+ return pointers
63
+
64
+ def write(self, sizes, doc_idx):
65
+ pointers = self._get_pointers(sizes)
66
+
67
+ # Little endian unsigned 64 Bit integer
68
+ self._file.write(struct.pack("<Q", len(sizes)))
69
+ # Little endian unsigned 64 Bit integer
70
+ self._file.write(struct.pack("<Q", len(doc_idx)))
71
+
72
+ sizes = np.array(sizes, dtype=np.int32)
73
+ self._file.write(sizes.tobytes(order="C"))
74
+ del sizes
75
+
76
+ pointers = np.array(pointers, dtype=np.int64)
77
+ self._file.write(pointers.tobytes(order="C"))
78
+ del pointers
79
+
80
+ doc_idx = np.array(doc_idx, dtype=np.int64)
81
+ self._file.write(doc_idx.tobytes(order="C"))
82
+
83
+ def __exit__(self, exc_type, exc_val, exc_tb):
84
+ self._file.close()
85
+
86
+ return _Writer()
87
+
88
+ def __init__(self, path, skip_warmup=False):
89
+ with open(path, "rb") as stream:
90
+ magic_test = stream.read(9)
91
+ assert magic_test == self._HDR_MAGIC, (
92
+ "Index file doesn't match expected format. " "Make sure that --dataset-impl is configured properly."
93
+ )
94
+ # Little endian unsigned 64 Bit integer
95
+ version = struct.unpack("<Q", stream.read(8))
96
+ assert version == (1,)
97
+
98
+ # Little endian unsigned 8 Bit integer
99
+ (dtype_code,) = struct.unpack("<B", stream.read(1))
100
+ self._dtype = dtypes[dtype_code]
101
+ self._dtype_size = self._dtype().itemsize
102
+
103
+ self._len = struct.unpack("<Q", stream.read(8))[0]
104
+ self._doc_count = struct.unpack("<Q", stream.read(8))[0]
105
+ offset = stream.tell()
106
+
107
+ if not skip_warmup:
108
+ print(" warming up index mmap file...")
109
+ _warmup_mmap_file(path)
110
+
111
+ self._bin_buffer_mmap = np.memmap(path, mode="r", order="C")
112
+ self._bin_buffer = memoryview(self._bin_buffer_mmap)
113
+ print(" reading sizes...")
114
+ self._sizes = np.frombuffer(self._bin_buffer, dtype=np.int32, count=self._len, offset=offset)
115
+ print(" reading pointers...")
116
+ self._pointers = np.frombuffer(
117
+ self._bin_buffer, dtype=np.int64, count=self._len, offset=offset + self._sizes.nbytes
118
+ )
119
+ print(" reading document index...")
120
+ self._doc_idx = np.frombuffer(
121
+ self._bin_buffer,
122
+ dtype=np.int64,
123
+ count=self._doc_count,
124
+ offset=offset + self._sizes.nbytes + self._pointers.nbytes,
125
+ )
126
+
127
+ def __del__(self):
128
+ self._bin_buffer_mmap._mmap.close()
129
+ del self._bin_buffer_mmap
130
+
131
+ @property
132
+ def dtype(self):
133
+ return self._dtype
134
+
135
+ @property
136
+ def sizes(self):
137
+ return self._sizes
138
+
139
+ @property
140
+ def doc_idx(self):
141
+ return self._doc_idx
142
+
143
+ @lru_cache(maxsize=8) # noqa: B019
144
+ def __getitem__(self, i):
145
+ return self._pointers[i], self._sizes[i]
146
+
147
+ def __len__(self):
148
+ return self._len
149
+
150
+ def __init__(self, path, skip_warmup=False):
151
+ super().__init__()
152
+
153
+ self._path = None
154
+ self._index = None
155
+ self._bin_buffer = None
156
+
157
+ self._do_init(path, skip_warmup)
158
+
159
+ def __getstate__(self):
160
+ return self._path
161
+
162
+ def __setstate__(self, state):
163
+ self._do_init(state)
164
+
165
+ def _do_init(self, path, skip_warmup):
166
+ self._path = path
167
+ self._index = self.Index(index_file_path(self._path), skip_warmup)
168
+
169
+ if not skip_warmup:
170
+ print(" warming up data mmap file...")
171
+ _warmup_mmap_file(data_file_path(self._path))
172
+ print(" creating numpy buffer of mmap...")
173
+ self._bin_buffer_mmap = np.memmap(data_file_path(self._path), mode="r", order="C")
174
+ print(" creating memory view of numpy buffer...")
175
+ self._bin_buffer = memoryview(self._bin_buffer_mmap)
176
+
177
+ def __del__(self):
178
+ self._bin_buffer_mmap._mmap.close()
179
+ del self._bin_buffer_mmap
180
+ del self._index
181
+
182
+ def __len__(self):
183
+ return len(self._index)
184
+
185
+ # @lru_cache(maxsize=8)
186
+ def __getitem__(self, idx):
187
+ if isinstance(idx, int):
188
+ ptr, size = self._index[idx]
189
+ np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype, count=size, offset=ptr)
190
+ return np_array
191
+ elif isinstance(idx, slice):
192
+ start, stop, step = idx.indices(len(self))
193
+ if step != 1:
194
+ raise ValueError("Slices into indexed_dataset must be contiguous")
195
+ ptr = self._index._pointers[start]
196
+ sizes = self._index._sizes[idx]
197
+ offsets = list(accumulate(sizes))
198
+ total_size = sum(sizes)
199
+ np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype, count=total_size, offset=ptr)
200
+ sents = np.split(np_array, offsets[:-1])
201
+ return sents
202
+
203
+ def get(self, idx, offset=0, length=None):
204
+ """Retrieves a single item from the dataset with the option to only
205
+ return a portion of the item.
206
+
207
+ get(idx) is the same as [idx] but get() does not support slicing.
208
+ """
209
+ ptr, size = self._index[idx]
210
+ if length is None:
211
+ length = size - offset
212
+ ptr += offset * np.dtype(self._index.dtype).itemsize
213
+ np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype, count=length, offset=ptr)
214
+ return np_array
215
+
216
+ @property
217
+ def sizes(self):
218
+ return self._index.sizes
219
+
220
+ @property
221
+ def doc_idx(self):
222
+ return self._index.doc_idx
223
+
224
+ def get_doc_idx(self):
225
+ return self._index._doc_idx
226
+
227
+ def set_doc_idx(self, doc_idx_):
228
+ self._index._doc_idx = doc_idx_
229
+
230
+ @property
231
+ def supports_prefetch(self):
232
+ return False
233
+
234
+ @staticmethod
235
+ def exists(path):
236
+ return os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path))
237
+
238
+
239
+ class GPT2Dataset(Dataset):
240
+ """Streamlined version of the GPT2Dataset in megatron."""
241
+
242
+ def __init__(
243
+ self, indexed_dataset: MMapIndexedDataset, doc_idx: np.memmap, sample_idx: np.memmap, shuffle_idx: np.memmap
244
+ ):
245
+ self.indexed_dataset = indexed_dataset
246
+ self.doc_idx = doc_idx
247
+ self.sample_idx = sample_idx
248
+ self.shuffle_idx = shuffle_idx
249
+
250
+ self.shuffle_idx_len = self.shuffle_idx.shape[0] - 1
251
+ self.sample_idx_len = self.sample_idx.shape[0] - 1
252
+
253
+ if self.shuffle_idx_len != self.sample_idx_len:
254
+ print(f"WARNING: {self.shuffle_idx_len=} != {self.sample_idx_len=}")
255
+
256
+ def __len__(self):
257
+ return min(self.shuffle_idx_len, self.sample_idx_len)
258
+
259
+ def __getitem__(self, idx: int) -> dict[str, np.ndarray]:
260
+ # Get the shuffled index.
261
+ idx = self.shuffle_idx[idx]
262
+ # Start and end documents and offsets.
263
+ doc_index_f = self.sample_idx[idx][0]
264
+ doc_index_l = self.sample_idx[idx + 1][0]
265
+ offset_f = self.sample_idx[idx][1]
266
+ offset_l = self.sample_idx[idx + 1][1]
267
+ # If we are within the same document, just extract the chunk.
268
+ if doc_index_f == doc_index_l:
269
+ sample = self.indexed_dataset.get(
270
+ self.doc_idx[doc_index_f], offset=offset_f, length=offset_l - offset_f + 1
271
+ )
272
+ else:
273
+ # Otherwise, get the rest of the initial document.
274
+ sample_list = [self.indexed_dataset.get(self.doc_idx[doc_index_f], offset=offset_f)]
275
+ # Loop over all in between documents and add the entire document.
276
+ for i in range(doc_index_f + 1, doc_index_l):
277
+ sample_list.append(self.indexed_dataset.get(self.doc_idx[i]))
278
+ # And finally add the relevant portion of last document.
279
+ sample_list.append(self.indexed_dataset.get(self.doc_idx[doc_index_l], length=offset_l + 1))
280
+ sample = np.concatenate(sample_list)
281
+
282
+ return {"text": np.array(sample, dtype=np.int64)}
283
+
284
+
285
+ def read_dataset(file_path: str | Path, prefix: str, document_path: str | Path = ".") -> GPT2Dataset:
286
+ # e.g., pile_20B_tokenizer_text_document_train_indexmap_120ns_2048sl_1234s_doc_idx.npy
287
+ # prefix: pile_20B_tokenizer_text_document_train_indexmap_120ns_2048sl_1234s
288
+
289
+ file_path = Path(file_path)
290
+ document_path = Path(document_path)
291
+
292
+ doc_idx = np.load(file_path / f"{prefix}_doc_idx.npy", allow_pickle=True, mmap_mode="r")
293
+ sample_idx = np.load(file_path / f"{prefix}_sample_idx.npy", allow_pickle=True, mmap_mode="r")
294
+ shuffle_idx = np.load(file_path / f"{prefix}_shuffle_idx.npy", allow_pickle=True, mmap_mode="r")
295
+ indexed_dataset = MMapIndexedDataset(str(document_path / "pile_20B_tokenizer_text_document"), skip_warmup=True)
296
+
297
+ ds = GPT2Dataset(indexed_dataset=indexed_dataset, doc_idx=doc_idx, sample_idx=sample_idx, shuffle_idx=shuffle_idx)
298
+
299
+ # check seqlen is correct
300
+ print("Seq length ==", len(ds[0]["text"]))
301
+ print("Num batches ==", len(ds) / 1024, "(should be 143k)")
302
+
303
+ return ds