alokmatta commited on
Commit
49a1166
1 Parent(s): 7303004

gpt2 swahili

Browse files
config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.0,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.0,
10
+ "eos_token_id": 50256,
11
+ "gradient_checkpointing": false,
12
+ "initializer_range": 0.02,
13
+ "layer_norm_epsilon": 1e-05,
14
+ "model_type": "gpt2",
15
+ "n_ctx": 1024,
16
+ "n_embd": 768,
17
+ "n_head": 12,
18
+ "n_inner": null,
19
+ "n_layer": 12,
20
+ "n_positions": 1024,
21
+ "pad_token_id": 1,
22
+ "resid_pdrop": 0.0,
23
+ "scale_attn_weights": true,
24
+ "summary_activation": null,
25
+ "summary_first_dropout": 0.1,
26
+ "summary_proj_to_labels": true,
27
+ "summary_type": "cls_index",
28
+ "summary_use_proj": true,
29
+ "task_specific_params": {
30
+ "text-generation": {
31
+ "do_sample": true,
32
+ "max_length": 50
33
+ }
34
+ },
35
+ "torch_dtype": "float32",
36
+ "transformers_version": "4.9.0.dev0",
37
+ "use_cache": true,
38
+ "vocab_size": 50257
39
+ }
events.out.tfevents.1626480020.t1v-n-6a2ff29b-w-0.938827.3.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0989e1d3a73091a51ccf38770db13513d72f7e7b2c15f05f8866b6991c92bda
3
+ size 3697057
flax_model.msgpack ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:305a6b118b113b86408d867499f0d2a2e336f50afdd946ceeb22d0ea389ce1ce
3
+ size 497764120
flax_to_torch.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
1
+ from transformers import GPT2LMHeadModel
2
+
3
+ model = GPT2LMHeadModel.from_pretrained("./", from_flax=True)
4
+ model.save_pretrained("./")
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0584adc95c9028da7c1f958782a1ef48981dda6f69780b22d2cec62c2d4f757b
3
+ size 510401385
run.sh ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ./run_clm_flax.py \
2
+ --output_dir="./" \
3
+ --model_type="gpt2" \
4
+ --config_name="./" \
5
+ --tokenizer_name="./" \
6
+ --dataset_name="flax-community/swahili-safi" \
7
+ --do_train --do_eval \
8
+ --block_size="512" \
9
+ --per_device_train_batch_size="64" \
10
+ --per_device_eval_batch_size="64" \
11
+ --learning_rate="5e-3" --warmup_steps="1000" \
12
+ --adam_beta1="0.9" --adam_beta2="0.98" --weight_decay="0.01" \
13
+ --overwrite_output_dir \
14
+ --num_train_epochs="20" \
15
+ --logging_steps="500" \
16
+ --save_steps="2500" \
17
+ --dtype float32 \
18
+ --eval_steps="2500" \
19
+ --preprocessing_num_workers="96"
run_clm_flax.py ADDED
@@ -0,0 +1,641 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright 2021 The HuggingFace Team All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ Pre-training/Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...) on a text file or a dataset.
18
+
19
+ Here is the full list of checkpoints on the hub that can be fine-tuned by this script:
20
+ https://huggingface.co/models?filter=causal-lm
21
+ """
22
+ # You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments.
23
+
24
+ import logging
25
+ import math
26
+ import os
27
+ import sys
28
+ import time
29
+ from dataclasses import dataclass, field
30
+ from pathlib import Path
31
+ from typing import Callable, Optional
32
+
33
+ import datasets
34
+ from datasets import Dataset, load_dataset
35
+ from tqdm import tqdm
36
+
37
+ import jax
38
+ import jax.numpy as jnp
39
+ import optax
40
+ import transformers
41
+ from flax import jax_utils, traverse_util
42
+ from flax.jax_utils import unreplicate
43
+ from flax.training import train_state
44
+ from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key
45
+ from transformers import (
46
+ CONFIG_MAPPING,
47
+ FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
48
+ AutoConfig,
49
+ AutoTokenizer,
50
+ FlaxAutoModelForCausalLM,
51
+ HfArgumentParser,
52
+ TrainingArguments,
53
+ is_tensorboard_available,
54
+ )
55
+ from transformers.testing_utils import CaptureLogger
56
+
57
+
58
+ logger = logging.getLogger(__name__)
59
+
60
+ MODEL_CONFIG_CLASSES = list(FLAX_MODEL_FOR_CAUSAL_LM_MAPPING.keys())
61
+ MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
62
+
63
+
64
+ @dataclass
65
+ class ModelArguments:
66
+ """
67
+ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
68
+ """
69
+
70
+ model_name_or_path: Optional[str] = field(
71
+ default=None,
72
+ metadata={
73
+ "help": "The model checkpoint for weights initialization."
74
+ "Don't set if you want to train a model from scratch."
75
+ },
76
+ )
77
+ model_type: Optional[str] = field(
78
+ default=None,
79
+ metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
80
+ )
81
+ config_name: Optional[str] = field(
82
+ default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
83
+ )
84
+ tokenizer_name: Optional[str] = field(
85
+ default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
86
+ )
87
+ cache_dir: Optional[str] = field(
88
+ default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
89
+ )
90
+ use_fast_tokenizer: bool = field(
91
+ default=True,
92
+ metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
93
+ )
94
+ dtype: Optional[str] = field(
95
+ default="float32",
96
+ metadata={
97
+ "help": "Floating-point format in which the model weights should be initialized and trained. Choose one of `[float32, float16, bfloat16]`."
98
+ },
99
+ )
100
+
101
+
102
+ @dataclass
103
+ class DataTrainingArguments:
104
+ """
105
+ Arguments pertaining to what data we are going to input our model for training and eval.
106
+ """
107
+
108
+ dataset_name: Optional[str] = field(
109
+ default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
110
+ )
111
+ dataset_config_name: Optional[str] = field(
112
+ default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
113
+ )
114
+ train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
115
+ validation_file: Optional[str] = field(
116
+ default=None,
117
+ metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
118
+ )
119
+ max_train_samples: Optional[int] = field(
120
+ default=None,
121
+ metadata={
122
+ "help": "For debugging purposes or quicker training, truncate the number of training examples to this "
123
+ "value if set."
124
+ },
125
+ )
126
+ max_eval_samples: Optional[int] = field(
127
+ default=None,
128
+ metadata={
129
+ "help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
130
+ "value if set."
131
+ },
132
+ )
133
+ overwrite_cache: bool = field(
134
+ default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
135
+ )
136
+ validation_split_percentage: Optional[int] = field(
137
+ default=5,
138
+ metadata={
139
+ "help": "The percentage of the train set used as validation set in case there's no validation split"
140
+ },
141
+ )
142
+ block_size: Optional[int] = field(
143
+ default=None,
144
+ metadata={
145
+ "help": "Optional input sequence length after tokenization. "
146
+ "The training dataset will be truncated in block of this size for training. "
147
+ "Default to the model max input length for single sentence inputs (take into account special tokens)."
148
+ },
149
+ )
150
+ overwrite_cache: bool = field(
151
+ default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
152
+ )
153
+ preprocessing_num_workers: Optional[int] = field(
154
+ default=None,
155
+ metadata={"help": "The number of processes to use for the preprocessing."},
156
+ )
157
+
158
+ def __post_init__(self):
159
+ if self.dataset_name is None and self.train_file is None and self.validation_file is None:
160
+ raise ValueError("Need either a dataset name or a training/validation file.")
161
+ else:
162
+ if self.train_file is not None:
163
+ extension = self.train_file.split(".")[-1]
164
+ assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
165
+ if self.validation_file is not None:
166
+ extension = self.validation_file.split(".")[-1]
167
+ assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
168
+
169
+
170
+ class TrainState(train_state.TrainState):
171
+ dropout_rng: jnp.ndarray
172
+
173
+ def replicate(self):
174
+ return jax_utils.replicate(self).replace(dropout_rng=shard_prng_key(self.dropout_rng))
175
+
176
+
177
+ def data_loader(rng: jax.random.PRNGKey, dataset: Dataset, batch_size: int, shuffle: bool = False):
178
+ """
179
+ Returns batches of size `batch_size` from truncated `dataset`, sharded over all local devices.
180
+ Shuffle batches if `shuffle` is `True`.
181
+ """
182
+ steps_per_epoch = len(dataset) // batch_size
183
+
184
+ if shuffle:
185
+ batch_idx = jax.random.permutation(rng, len(dataset))
186
+ else:
187
+ batch_idx = jnp.arange(len(dataset))
188
+
189
+ batch_idx = batch_idx[: steps_per_epoch * batch_size] # Skip incomplete batch.
190
+ batch_idx = batch_idx.reshape((steps_per_epoch, batch_size))
191
+
192
+ for idx in batch_idx:
193
+ batch = dataset[idx]
194
+ batch = {k: jnp.array(v) for k, v in batch.items()}
195
+
196
+ batch = shard(batch)
197
+
198
+ yield batch
199
+
200
+
201
+ def write_train_metric(summary_writer, train_metrics, train_time, step):
202
+ summary_writer.scalar("train_time", train_time, step)
203
+
204
+ train_metrics = get_metrics(train_metrics)
205
+ for key, vals in train_metrics.items():
206
+ tag = f"train_{key}"
207
+ for i, val in enumerate(vals):
208
+ summary_writer.scalar(tag, val, step - len(vals) + i + 1)
209
+
210
+
211
+ def write_eval_metric(summary_writer, eval_metrics, step):
212
+ for metric_name, value in eval_metrics.items():
213
+ summary_writer.scalar(f"eval_{metric_name}", value, step)
214
+
215
+
216
+ def create_learning_rate_fn(
217
+ train_ds_size: int, train_batch_size: int, num_train_epochs: int, num_warmup_steps: int, learning_rate: float
218
+ ) -> Callable[[int], jnp.array]:
219
+ """Returns a linear warmup, linear_decay learning rate function."""
220
+ steps_per_epoch = train_ds_size // train_batch_size
221
+ num_train_steps = steps_per_epoch * num_train_epochs
222
+ warmup_fn = optax.linear_schedule(init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps)
223
+ decay_fn = optax.linear_schedule(
224
+ init_value=learning_rate, end_value=0, transition_steps=num_train_steps - num_warmup_steps
225
+ )
226
+ schedule_fn = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps])
227
+ return schedule_fn
228
+
229
+
230
+ def main():
231
+ # See all possible arguments in src/transformers/training_args.py
232
+ # or by passing the --help flag to this script.
233
+ # We now keep distinct sets of args, for a cleaner separation of concerns.
234
+
235
+ parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
236
+ if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
237
+ # If we pass only one argument to the script and it's the path to a json file,
238
+ # let's parse it to get our arguments.
239
+ model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
240
+ else:
241
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
242
+
243
+ if (
244
+ os.path.exists(training_args.output_dir)
245
+ and os.listdir(training_args.output_dir)
246
+ and training_args.do_train
247
+ and not training_args.overwrite_output_dir
248
+ ):
249
+ raise ValueError(
250
+ f"Output directory ({training_args.output_dir}) already exists and is not empty."
251
+ "Use --overwrite_output_dir to overcome."
252
+ )
253
+
254
+ # Make one log on every process with the configuration for debugging.
255
+ logging.basicConfig(
256
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
257
+ datefmt="%m/%d/%Y %H:%M:%S",
258
+ level=logging.INFO,
259
+ )
260
+ # Setup logging, we only want one process per machine to log things on the screen.
261
+ logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR)
262
+ if jax.process_index() == 0:
263
+ datasets.utils.logging.set_verbosity_warning()
264
+ transformers.utils.logging.set_verbosity_info()
265
+ else:
266
+ datasets.utils.logging.set_verbosity_error()
267
+ transformers.utils.logging.set_verbosity_error()
268
+
269
+ # Set the verbosity to info of the Transformers logger (on main process only):
270
+ logger.info(f"Training/evaluation parameters {training_args}")
271
+
272
+ # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
273
+ # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
274
+ # (the dataset will be downloaded automatically from the datasets Hub).
275
+ #
276
+ # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
277
+ # 'text' is found. You can easily tweak this behavior (see below).
278
+ #
279
+ # In distributed training, the load_dataset function guarantees that only one local process can concurrently
280
+ # download the dataset.
281
+ if data_args.dataset_name is not None:
282
+ # Downloading and loading a dataset from the hub.
283
+ dataset = load_dataset(
284
+ data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, keep_in_memory=False
285
+ )
286
+
287
+ if "validation" not in dataset.keys():
288
+ dataset["validation"] = load_dataset(
289
+ data_args.dataset_name,
290
+ data_args.dataset_config_name,
291
+ split=f"train[:{data_args.validation_split_percentage}%]",
292
+ cache_dir=model_args.cache_dir,
293
+ )
294
+ dataset["train"] = load_dataset(
295
+ data_args.dataset_name,
296
+ data_args.dataset_config_name,
297
+ split=f"train[{data_args.validation_split_percentage}%:]",
298
+ cache_dir=model_args.cache_dir,
299
+ )
300
+ else:
301
+ data_files = {}
302
+ if data_args.train_file is not None:
303
+ data_files["train"] = data_args.train_file
304
+ if data_args.validation_file is not None:
305
+ data_files["validation"] = data_args.validation_file
306
+ extension = data_args.train_file.split(".")[-1]
307
+ if extension == "txt":
308
+ extension = "text"
309
+ dataset = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
310
+ # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
311
+ # https://huggingface.co/docs/datasets/loading_datasets.html.
312
+
313
+ # Load pretrained model and tokenizer
314
+
315
+ # Distributed training:
316
+ # The .from_pretrained methods guarantee that only one local process can concurrently
317
+ # download model & vocab.
318
+ if model_args.config_name:
319
+ config = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir)
320
+ elif model_args.model_name_or_path:
321
+ config = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir)
322
+ else:
323
+ config = CONFIG_MAPPING[model_args.model_type]()
324
+ logger.warning("You are instantiating a new config instance from scratch.")
325
+
326
+ if model_args.tokenizer_name:
327
+ tokenizer = AutoTokenizer.from_pretrained(
328
+ model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer
329
+ )
330
+ elif model_args.model_name_or_path:
331
+ tokenizer = AutoTokenizer.from_pretrained(
332
+ model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer
333
+ )
334
+ else:
335
+ raise ValueError(
336
+ "You are instantiating a new tokenizer from scratch. This is not supported by this script."
337
+ "You can do it from another script, save it, and load it from here, using --tokenizer_name."
338
+ )
339
+
340
+ if model_args.model_name_or_path:
341
+ model = FlaxAutoModelForCausalLM.from_pretrained(
342
+ model_args.model_name_or_path, config=config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype)
343
+ )
344
+ else:
345
+ model = FlaxAutoModelForCausalLM.from_config(
346
+ config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype)
347
+ )
348
+ model.config.pad_token_id = 1
349
+
350
+ # Preprocessing the datasets.
351
+ # First we tokenize all the texts.
352
+ if training_args.do_train:
353
+ column_names = dataset["train"].column_names
354
+ else:
355
+ column_names = dataset["validation"].column_names
356
+ text_column_name = "text" if "text" in column_names else column_names[0]
357
+
358
+ # since this will be pickled to avoid _LazyModule error in Hasher force logger loading before tokenize_function
359
+ tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base")
360
+
361
+ def tokenize_function(examples):
362
+ with CaptureLogger(tok_logger) as cl:
363
+ output = tokenizer(examples[text_column_name])
364
+ # clm input could be much much longer than block_size
365
+ if "Token indices sequence length is longer than the" in cl.out:
366
+ tok_logger.warning(
367
+ "^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits before being passed to the model."
368
+ )
369
+ return output
370
+
371
+ tokenized_datasets = dataset.map(
372
+ tokenize_function,
373
+ batched=True,
374
+ num_proc=data_args.preprocessing_num_workers,
375
+ remove_columns=column_names,
376
+ load_from_cache_file=not data_args.overwrite_cache,
377
+ )
378
+
379
+ if data_args.block_size is None:
380
+ block_size = tokenizer.model_max_length
381
+ if block_size > config.max_position_embeddings:
382
+ logger.warning(
383
+ f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
384
+ "Picking 1024 instead. You can change that default value by passing --block_size xxx."
385
+ )
386
+ block_size = 1024
387
+ else:
388
+ if data_args.block_size > tokenizer.model_max_length:
389
+ logger.warning(
390
+ f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model"
391
+ f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
392
+ )
393
+ block_size = min(data_args.block_size, tokenizer.model_max_length)
394
+
395
+ # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
396
+ def group_texts(examples):
397
+ # Concatenate all texts.
398
+ concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
399
+ total_length = len(concatenated_examples[list(examples.keys())[0]])
400
+ # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
401
+ # customize this part to your needs.
402
+ if total_length >= block_size:
403
+ total_length = (total_length // block_size) * block_size
404
+ # Split by chunks of max_len.
405
+ result = {
406
+ k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
407
+ for k, t in concatenated_examples.items()
408
+ }
409
+ result["labels"] = result["input_ids"].copy()
410
+ return result
411
+
412
+ # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder
413
+ # for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower
414
+ # to preprocess.
415
+ #
416
+ # To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
417
+ # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
418
+
419
+ lm_datasets = tokenized_datasets.map(
420
+ group_texts,
421
+ batched=True,
422
+ num_proc=data_args.preprocessing_num_workers,
423
+ load_from_cache_file=not data_args.overwrite_cache,
424
+ )
425
+
426
+ if training_args.do_train:
427
+ if "train" not in tokenized_datasets:
428
+ raise ValueError("--do_train requires a train dataset")
429
+ train_dataset = lm_datasets["train"]
430
+ if data_args.max_train_samples is not None:
431
+ train_dataset = train_dataset.select(range(data_args.max_train_samples))
432
+
433
+ if training_args.do_eval:
434
+ if "validation" not in tokenized_datasets:
435
+ raise ValueError("--do_eval requires a validation dataset")
436
+ eval_dataset = lm_datasets["validation"]
437
+ if data_args.max_eval_samples is not None:
438
+ eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
439
+
440
+ # Enable tensorboard only on the master node
441
+ has_tensorboard = is_tensorboard_available()
442
+ if has_tensorboard and jax.process_index() == 0:
443
+ try:
444
+ from flax.metrics.tensorboard import SummaryWriter
445
+
446
+ summary_writer = SummaryWriter(log_dir=Path(training_args.output_dir))
447
+ except ImportError as ie:
448
+ has_tensorboard = False
449
+ logger.warning(
450
+ f"Unable to display metrics through TensorBoard because some package are not installed: {ie}"
451
+ )
452
+ else:
453
+ logger.warning(
454
+ "Unable to display metrics through TensorBoard because the package is not installed: "
455
+ "Please run pip install tensorboard to enable."
456
+ )
457
+
458
+ # Initialize our training
459
+ rng = jax.random.PRNGKey(training_args.seed)
460
+ rng, dropout_rng = jax.random.split(rng)
461
+
462
+ # Store some constant
463
+ num_epochs = int(training_args.num_train_epochs)
464
+ train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count()
465
+ eval_batch_size = int(training_args.per_device_eval_batch_size) * jax.device_count()
466
+ steps_per_epoch = len(train_dataset) // train_batch_size
467
+ total_train_steps = steps_per_epoch * num_epochs
468
+
469
+ # Create learning rate schedule
470
+ linear_decay_lr_schedule_fn = create_learning_rate_fn(
471
+ len(train_dataset),
472
+ train_batch_size,
473
+ training_args.num_train_epochs,
474
+ training_args.warmup_steps,
475
+ training_args.learning_rate,
476
+ )
477
+
478
+ # We use Optax's "masking" functionality to not apply weight decay
479
+ # to bias and LayerNorm scale parameters. decay_mask_fn returns a
480
+ # mask boolean with the same structure as the parameters.
481
+ # The mask is True for parameters that should be decayed.
482
+ # Note that this mask is specifically adapted for FlaxGPT2.
483
+ # For other models, one should correct the layer norm parameter naming
484
+ # accordingly.
485
+ def decay_mask_fn(params):
486
+ flat_params = traverse_util.flatten_dict(params)
487
+ flat_mask = {
488
+ path: (path[-1] != "bias" and path[-2:] not in [("ln_1", "scale"), ("ln_2", "scale"), ("ln_f", "scale")])
489
+ for path in flat_params
490
+ }
491
+ return traverse_util.unflatten_dict(flat_mask)
492
+
493
+ # create adam optimizer
494
+ if training_args.adafactor:
495
+ # We use the default parameters here to initialize adafactor,
496
+ # For more details about the parameters please check https://github.com/deepmind/optax/blob/ed02befef9bf81cbbf236be3d2b0e032e9ed4a40/optax/_src/alias.py#L74
497
+ optimizer = optax.adafactor(
498
+ learning_rate=linear_decay_lr_schedule_fn,
499
+ )
500
+ else:
501
+ optimizer = optax.adamw(
502
+ learning_rate=linear_decay_lr_schedule_fn,
503
+ b1=training_args.adam_beta1,
504
+ b2=training_args.adam_beta2,
505
+ eps=training_args.adam_epsilon,
506
+ weight_decay=training_args.weight_decay,
507
+ mask=decay_mask_fn,
508
+ )
509
+
510
+ # Setup train state
511
+ state = TrainState.create(apply_fn=model.__call__, params=model.params, tx=optimizer, dropout_rng=dropout_rng)
512
+
513
+ def loss_fn(logits, labels):
514
+ shift_logits = logits[..., :-1, :]
515
+ shift_labels = labels[..., 1:]
516
+ loss = optax.softmax_cross_entropy(shift_logits, onehot(shift_labels, shift_logits.shape[-1]))
517
+ return loss.mean()
518
+
519
+ # Define gradient update step fn
520
+ def train_step(state, batch):
521
+ dropout_rng, new_dropout_rng = jax.random.split(state.dropout_rng)
522
+
523
+ def compute_loss(params):
524
+ labels = batch.pop("labels")
525
+ logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0]
526
+ loss = loss_fn(logits, labels)
527
+ return loss
528
+
529
+ grad_fn = jax.value_and_grad(compute_loss)
530
+ loss, grad = grad_fn(state.params)
531
+ grad = jax.lax.pmean(grad, "batch")
532
+
533
+ new_state = state.apply_gradients(grads=grad, dropout_rng=new_dropout_rng)
534
+
535
+ metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)}
536
+ metrics = jax.lax.pmean(metrics, axis_name="batch")
537
+
538
+ return new_state, metrics
539
+
540
+ # Define eval fn
541
+ def eval_step(params, batch):
542
+ labels = batch.pop("labels")
543
+ logits = model(**batch, params=params, train=False)[0]
544
+ loss = loss_fn(logits, labels)
545
+
546
+ # summarize metrics
547
+ metrics = {"loss": loss}
548
+ metrics = jax.lax.pmean(metrics, axis_name="batch")
549
+ return metrics
550
+
551
+ # Create parallel version of the train and eval step
552
+ p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0,))
553
+ p_eval_step = jax.pmap(eval_step, "batch")
554
+
555
+ # Replicate the train state on each device
556
+ state = state.replicate()
557
+
558
+ logger.info("***** Running training *****")
559
+ logger.info(f" Num examples = {len(train_dataset)}")
560
+ logger.info(f" Num Epochs = {num_epochs}")
561
+ logger.info(f" Instantaneous batch size per device = {training_args.per_device_train_batch_size}")
562
+ logger.info(f" Total train batch size (w. parallel & distributed) = {train_batch_size}")
563
+ logger.info(f" Total optimization steps = {total_train_steps}")
564
+
565
+ train_time = 0
566
+ train_metrics = []
567
+ epochs = tqdm(range(num_epochs), desc=f"Epoch ... (1/{num_epochs})", position=0)
568
+ for epoch in epochs:
569
+ # ======================== Training ================================
570
+ train_start = time.time()
571
+
572
+ # Create sampling rng
573
+ rng, input_rng = jax.random.split(rng)
574
+
575
+ # Generate an epoch by shuffling sampling indices from the train dataset
576
+ train_loader = data_loader(input_rng, train_dataset, train_batch_size, shuffle=True)
577
+ steps_per_epoch = len(train_dataset) // train_batch_size
578
+ # train
579
+ for step in tqdm(range(steps_per_epoch), desc="Training...", position=1, leave=False):
580
+ batch = next(train_loader)
581
+ state, train_metric = p_train_step(state, batch)
582
+ train_metrics.append(train_metric)
583
+
584
+ cur_step = epoch * (len(train_dataset) // train_batch_size) + step
585
+
586
+ if cur_step % training_args.logging_steps == 0 and cur_step > 0:
587
+ # Save metrics
588
+ train_metric = unreplicate(train_metric)
589
+ train_time += time.time() - train_start
590
+ if has_tensorboard and jax.process_index() == 0:
591
+ write_train_metric(summary_writer, train_metrics, train_time, cur_step)
592
+
593
+ epochs.write(
594
+ f"Step... ({cur_step} | Loss: {train_metric['loss'].mean()}, Learning Rate: {train_metric['learning_rate'].mean()})"
595
+ )
596
+
597
+ train_metrics = []
598
+
599
+ if cur_step % training_args.eval_steps == 0 and cur_step > 0:
600
+ # ======================== Evaluating ==============================
601
+ eval_metrics = []
602
+ eval_loader = data_loader(input_rng, eval_dataset, eval_batch_size)
603
+ eval_steps = len(eval_dataset) // eval_batch_size
604
+ for _ in tqdm(range(eval_steps), desc="Evaluating...", position=2, leave=False):
605
+ # Model forward
606
+ batch = next(eval_loader)
607
+ metrics = p_eval_step(state.params, batch)
608
+ eval_metrics.append(metrics)
609
+
610
+ # normalize eval metrics
611
+ eval_metrics = get_metrics(eval_metrics)
612
+ eval_metrics = jax.tree_map(jnp.mean, eval_metrics)
613
+
614
+ try:
615
+ eval_metrics["perplexity"] = math.exp(eval_metrics["loss"])
616
+ except OverflowError:
617
+ eval_metrics["perplexity"] = float("inf")
618
+
619
+ # Print metrics and update progress bar
620
+ desc = f"Step... ({cur_step} | Eval Loss: {eval_metrics['loss']} | Eval Perplexity: {eval_metrics['perplexity']})"
621
+ epochs.write(desc)
622
+ epochs.desc = desc
623
+
624
+ # Save metrics
625
+ if has_tensorboard and jax.process_index() == 0:
626
+ write_eval_metric(summary_writer, eval_metrics, cur_step)
627
+
628
+ if cur_step % training_args.save_steps == 0 and cur_step > 0:
629
+ # save checkpoint after each epoch and push checkpoint to the hub
630
+ if jax.process_index() == 0:
631
+ params = jax.device_get(unreplicate(state.params))
632
+ model.save_pretrained(
633
+ training_args.output_dir,
634
+ params=params,
635
+ push_to_hub=training_args.push_to_hub,
636
+ commit_message=f"Saving weights and logs of step {cur_step}",
637
+ )
638
+
639
+
640
+ if __name__ == "__main__":
641
+ main()
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff