danbraunai-apollo commited on
Commit
f02886b
1 Parent(s): e3fa78d

Add upload script

Browse files
Files changed (1) hide show
  1. upload_script.py +385 -0
upload_script.py ADDED
@@ -0,0 +1,385 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Taken and adapated from Alan Cooney's
3
+ https://github.com/ai-safety-foundation/sparse_autoencoder/tree/main/sparse_autoencoder.
4
+ """
5
+
6
+ import subprocess
7
+ from collections.abc import Mapping, Sequence
8
+ from dataclasses import dataclass
9
+ from typing import TypedDict
10
+
11
+ from datasets import (
12
+ Dataset,
13
+ DatasetDict,
14
+ VerificationMode,
15
+ load_dataset,
16
+ )
17
+ from huggingface_hub import HfApi
18
+ from jaxtyping import Int
19
+ from pydantic import PositiveInt, validate_call
20
+ from torch import Tensor
21
+ from transformers import AutoTokenizer, PreTrainedTokenizerBase
22
+
23
+
24
+ class GenericTextDataBatch(TypedDict):
25
+ """Generic Text Dataset Batch.
26
+
27
+ Assumes the dataset provides a 'text' field with a list of strings.
28
+ """
29
+
30
+ text: list[str]
31
+ meta: list[dict[str, dict[str, str]]] # Optional, depending on the dataset structure.
32
+
33
+
34
+ TokenizedPrompt = list[int]
35
+ """A tokenized prompt."""
36
+
37
+
38
+ class TokenizedPrompts(TypedDict):
39
+ """Tokenized prompts."""
40
+
41
+ input_ids: list[TokenizedPrompt]
42
+
43
+
44
+ class TorchTokenizedPrompts(TypedDict):
45
+ """Tokenized prompts prepared for PyTorch."""
46
+
47
+ input_ids: Int[Tensor, "batch pos vocab"]
48
+
49
+
50
+ class TextDataset:
51
+ """Generic Text Dataset for any text-based dataset from Hugging Face."""
52
+
53
+ tokenizer: PreTrainedTokenizerBase
54
+
55
+ def preprocess(
56
+ self,
57
+ source_batch: GenericTextDataBatch,
58
+ *,
59
+ context_size: int,
60
+ ) -> TokenizedPrompts:
61
+ """Preprocess a batch of prompts.
62
+
63
+ Tokenizes a batch of text data and packs into context_size samples. An eos token is added
64
+ to the end of each document after tokenization.
65
+
66
+ Args:
67
+ source_batch: A batch of source data, including 'text' with a list of strings.
68
+ context_size: Context size for tokenized prompts.
69
+
70
+ Returns:
71
+ Tokenized prompts.
72
+ """
73
+ prompts: list[str] = source_batch["text"]
74
+
75
+ tokenized_prompts = self.tokenizer(prompts, truncation=False, padding=False)
76
+
77
+ all_tokens = []
78
+ for document_tokens in tokenized_prompts[self._dataset_column_name]: # type: ignore
79
+ all_tokens.extend(document_tokens + [self.tokenizer.eos_token_id])
80
+ # Ignore incomplete chunks
81
+ chunks = [
82
+ all_tokens[i : i + context_size]
83
+ for i in range(0, len(all_tokens), context_size)
84
+ if len(all_tokens[i : i + context_size]) == context_size
85
+ ]
86
+
87
+ return {"input_ids": chunks}
88
+
89
+ @validate_call(config={"arbitrary_types_allowed": True})
90
+ def __init__(
91
+ self,
92
+ dataset_path: str,
93
+ tokenizer: PreTrainedTokenizerBase,
94
+ context_size: PositiveInt = 256,
95
+ load_revision: str = "main",
96
+ dataset_dir: str | None = None,
97
+ dataset_files: str | Sequence[str] | Mapping[str, str | Sequence[str]] | None = None,
98
+ dataset_split: str | None = None,
99
+ dataset_column_name: str = "input_ids",
100
+ n_processes_preprocessing: PositiveInt | None = None,
101
+ preprocess_batch_size: PositiveInt = 1000,
102
+ ):
103
+ """Initialize a generic text dataset from Hugging Face.
104
+
105
+ Args:
106
+ dataset_path: Path to the dataset on Hugging Face (e.g. `'monology/pile-uncopyright'`).
107
+ tokenizer: Tokenizer to process text data.
108
+ context_size: The context size to use when returning a list of tokenized prompts.
109
+ *Towards Monosemanticity: Decomposing Language Models With Dictionary Learning* used
110
+ a context size of 250.
111
+ load_revision: The commit hash or branch name to download from the source dataset.
112
+ dataset_dir: Defining the `data_dir` of the dataset configuration.
113
+ dataset_files: Path(s) to source data file(s).
114
+ dataset_split: Dataset split (e.g., 'train'). If None, process all splits.
115
+ dataset_column_name: The column name for the prompts.
116
+ n_processes_preprocessing: Number of processes to use for preprocessing.
117
+ preprocess_batch_size: Batch size for preprocessing (tokenizing prompts).
118
+ """
119
+ self.tokenizer = tokenizer
120
+
121
+ self.context_size = context_size
122
+ self._dataset_column_name = dataset_column_name
123
+
124
+ # Load the dataset
125
+ dataset = load_dataset(
126
+ dataset_path,
127
+ revision=load_revision,
128
+ streaming=False, # We need to pre-download the dataset to upload it to the hub.
129
+ split=dataset_split,
130
+ data_dir=dataset_dir,
131
+ data_files=dataset_files,
132
+ verification_mode=VerificationMode.NO_CHECKS, # As it fails when data_files is set
133
+ )
134
+ # If split is not None, will return a Dataset instance. Convert to DatasetDict.
135
+ if isinstance(dataset, Dataset):
136
+ assert dataset_split is not None
137
+ dataset = DatasetDict({dataset_split: dataset})
138
+ assert isinstance(dataset, DatasetDict)
139
+
140
+ for split in dataset:
141
+ print(f"Processing split: {split}")
142
+ # Setup preprocessing (we remove all columns except for input ids)
143
+ remove_columns: list[str] = list(next(iter(dataset[split])).keys()) # type: ignore
144
+ if "input_ids" in remove_columns:
145
+ remove_columns.remove("input_ids")
146
+
147
+ # Tokenize and chunk the prompts
148
+ mapped_dataset = dataset[split].map(
149
+ self.preprocess,
150
+ batched=True,
151
+ batch_size=preprocess_batch_size,
152
+ fn_kwargs={"context_size": context_size},
153
+ remove_columns=remove_columns,
154
+ num_proc=n_processes_preprocessing,
155
+ )
156
+ dataset[split] = mapped_dataset.shuffle()
157
+
158
+ self.dataset = dataset
159
+
160
+ @validate_call
161
+ def push_to_hugging_face_hub(
162
+ self,
163
+ repo_id: str,
164
+ commit_message: str = "Upload preprocessed dataset using sparse_autoencoder.",
165
+ max_shard_size: str = "500MB",
166
+ revision: str = "main",
167
+ *,
168
+ private: bool = False,
169
+ ) -> None:
170
+ """Share preprocessed dataset to Hugging Face hub.
171
+
172
+ Motivation:
173
+ Pre-processing a dataset can be time-consuming, so it is useful to be able to share the
174
+ pre-processed dataset with others. This function allows you to do that by pushing the
175
+ pre-processed dataset to the Hugging Face hub.
176
+
177
+ Warning:
178
+ You must be logged into HuggingFace (e.g with `huggingface-cli login` from the terminal)
179
+ to use this.
180
+
181
+ Warning:
182
+ This will only work if the dataset is not streamed (i.e. if `pre_download=True` when
183
+ initializing the dataset).
184
+
185
+ Args:
186
+ repo_id: Hugging Face repo ID to save the dataset to (e.g. `username/dataset_name`).
187
+ commit_message: Commit message.
188
+ max_shard_size: Maximum shard size (e.g. `'500MB'`).
189
+ revision: Branch to push to.
190
+ private: Whether to save the dataset privately.
191
+ """
192
+ self.dataset.push_to_hub(
193
+ repo_id=repo_id,
194
+ commit_message=commit_message,
195
+ max_shard_size=max_shard_size,
196
+ private=private,
197
+ revision=revision,
198
+ )
199
+
200
+
201
+ @dataclass
202
+ class DatasetToPreprocess:
203
+ """Dataset to preprocess info."""
204
+
205
+ source_path: str
206
+ """Source path from HF (e.g. `roneneldan/TinyStories`)."""
207
+
208
+ tokenizer_name: str
209
+ """HF tokenizer name (e.g. `gpt2`)."""
210
+
211
+ load_revision: str = "main"
212
+ """Commit hash or branch name to download from the source dataset."""
213
+
214
+ data_dir: str | None = None
215
+ """Data directory to download from the source dataset."""
216
+
217
+ data_files: list[str] | None = None
218
+ """Data files to download from the source dataset."""
219
+
220
+ hugging_face_username: str = "apollo-research"
221
+ """HF username for the upload."""
222
+
223
+ private: bool = False
224
+ """Whether the HF dataset should be private or public."""
225
+
226
+ context_size: int = 2048
227
+ """Number of tokens in a single sample. gpt2 uses 1024, pythia uses 2048."""
228
+
229
+ split: str | None = None
230
+ """Dataset split to download from the source dataset. If None, process all splits."""
231
+
232
+ @property
233
+ def source_alias(self) -> str:
234
+ """Create a source alias for the destination dataset name.
235
+
236
+ Returns:
237
+ The modified source path as source alias.
238
+ """
239
+ return self.source_path.replace("/", "-")
240
+
241
+ @property
242
+ def tokenizer_alias(self) -> str:
243
+ """Create a tokenizer alias for the destination dataset name.
244
+
245
+ Returns:
246
+ The modified tokenizer name as tokenizer alias.
247
+ """
248
+ return self.tokenizer_name.replace("/", "-")
249
+
250
+ @property
251
+ def destination_repo_name(self) -> str:
252
+ """Destination repo name.
253
+
254
+ Returns:
255
+ The destination repo name.
256
+ """
257
+ split_str = f"{self.split}-" if self.split else ""
258
+ return f"{self.source_alias}-{split_str}tokenizer-{self.tokenizer_alias}"
259
+
260
+ @property
261
+ def destination_repo_id(self) -> str:
262
+ """Destination repo ID.
263
+
264
+ Returns:
265
+ The destination repo ID.
266
+ """
267
+ return f"{self.hugging_face_username}/{self.destination_repo_name}"
268
+
269
+
270
+ def upload_datasets(datasets_to_preprocess: list[DatasetToPreprocess]) -> None:
271
+ """Upload datasets to HF.
272
+
273
+ Warning:
274
+ Assumes you have already created the corresponding repos on HF.
275
+
276
+ Args:
277
+ datasets_to_preprocess: List of datasets to preprocess.
278
+
279
+ Raises:
280
+ ValueError: If the repo doesn't exist.
281
+ """
282
+ repositories_updating = [dataset.destination_repo_id for dataset in datasets_to_preprocess]
283
+ print("Updating repositories:\n" "\n".join(repositories_updating))
284
+
285
+ for dataset in datasets_to_preprocess:
286
+ print("Processing dataset: ", dataset.source_path)
287
+
288
+ # Preprocess
289
+ tokenizer = AutoTokenizer.from_pretrained(dataset.tokenizer_name)
290
+ text_dataset = TextDataset(
291
+ dataset_path=dataset.source_path,
292
+ tokenizer=tokenizer,
293
+ dataset_files=dataset.data_files,
294
+ dataset_dir=dataset.data_dir,
295
+ dataset_split=dataset.split,
296
+ context_size=dataset.context_size,
297
+ load_revision=dataset.load_revision,
298
+ )
299
+ # size_in_bytes and info gives info about the whole dataset regardless of the split index,
300
+ # so we just get the first split.
301
+ split = next(iter(text_dataset.dataset))
302
+ print("Dataset info:")
303
+ print(f"Size: {text_dataset.dataset[split].size_in_bytes / 1e9:.2f} GB") # type: ignore
304
+ print("Info: ", text_dataset.dataset[split].info)
305
+
306
+ # Upload
307
+ text_dataset.push_to_hugging_face_hub(
308
+ repo_id=dataset.destination_repo_id, private=dataset.private
309
+ )
310
+ # Also upload the current file to the repo for reproducibility and transparency
311
+ api = HfApi()
312
+ api.upload_file(
313
+ path_or_fileobj=__file__,
314
+ path_in_repo="upload_script.py",
315
+ repo_id=dataset.destination_repo_id,
316
+ repo_type="dataset",
317
+ commit_message="Add upload script",
318
+ )
319
+
320
+
321
+ if __name__ == "__main__":
322
+ # Check that the user is signed in to huggingface-cli
323
+ try:
324
+ result = subprocess.run(
325
+ ["huggingface-cli", "whoami"], check=True, capture_output=True, text=True
326
+ )
327
+ if "Not logged in" in result.stdout:
328
+ print("Please sign in to huggingface-cli using `huggingface-cli login`.")
329
+ raise Exception("You are not logged in to huggingface-cli.")
330
+ except subprocess.CalledProcessError:
331
+ print("An error occurred while checking the login status.")
332
+ raise
333
+
334
+ datasets: list[DatasetToPreprocess] = [
335
+ DatasetToPreprocess(
336
+ source_path="roneneldan/TinyStories",
337
+ # Paper says gpt-neo tokenizer, and e.g. EleutherAI/gpt-neo-125M uses the same tokenizer
338
+ # as gpt2. They also suggest using gpt2 in (https://github.com/EleutherAI/gpt-neo).
339
+ tokenizer_name="gpt2",
340
+ hugging_face_username="apollo-research",
341
+ context_size=512,
342
+ ),
343
+ DatasetToPreprocess(
344
+ source_path="Skylion007/openwebtext",
345
+ tokenizer_name="gpt2",
346
+ hugging_face_username="apollo-research",
347
+ context_size=1024,
348
+ ),
349
+ DatasetToPreprocess(
350
+ source_path="Skylion007/openwebtext",
351
+ tokenizer_name="EleutherAI/gpt-neox-20b",
352
+ hugging_face_username="apollo-research",
353
+ context_size=2048,
354
+ ),
355
+ DatasetToPreprocess(
356
+ source_path="monology/pile-uncopyrighted",
357
+ tokenizer_name="gpt2",
358
+ hugging_face_username="apollo-research",
359
+ context_size=1024,
360
+ # Get just the first few (each file is 11GB so this should be enough for a large dataset)
361
+ data_files=[
362
+ "train/00.jsonl.zst",
363
+ "train/01.jsonl.zst",
364
+ "train/02.jsonl.zst",
365
+ "train/03.jsonl.zst",
366
+ "train/04.jsonl.zst",
367
+ ],
368
+ ),
369
+ DatasetToPreprocess(
370
+ source_path="monology/pile-uncopyrighted",
371
+ tokenizer_name="EleutherAI/gpt-neox-20b",
372
+ hugging_face_username="apollo-research",
373
+ private=False,
374
+ context_size=2048,
375
+ data_files=[
376
+ "train/00.jsonl.zst",
377
+ "train/01.jsonl.zst",
378
+ "train/02.jsonl.zst",
379
+ "train/03.jsonl.zst",
380
+ "train/04.jsonl.zst",
381
+ ],
382
+ ),
383
+ ]
384
+
385
+ upload_datasets(datasets)