khalidsaifullaah commited on
Commit
d458774
1 Parent(s): 376a8d6

trainer script added

Browse files
Files changed (5) hide show
  1. config.json +36 -0
  2. create_config.py +6 -0
  3. run_clm_flax.py +640 -0
  4. tokenizer.json +0 -0
  5. train_tokenizer.py +42 -0
config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_pdrop": 0.0,
7
+ "bos_token_id": 50256,
8
+ "embd_pdrop": 0.0,
9
+ "eos_token_id": 50256,
10
+ "gradient_checkpointing": false,
11
+ "initializer_range": 0.02,
12
+ "layer_norm_epsilon": 1e-05,
13
+ "model_type": "gpt2",
14
+ "n_ctx": 1024,
15
+ "n_embd": 768,
16
+ "n_head": 12,
17
+ "n_inner": null,
18
+ "n_layer": 12,
19
+ "n_positions": 1024,
20
+ "resid_pdrop": 0.0,
21
+ "scale_attn_weights": true,
22
+ "summary_activation": null,
23
+ "summary_first_dropout": 0.1,
24
+ "summary_proj_to_labels": true,
25
+ "summary_type": "cls_index",
26
+ "summary_use_proj": true,
27
+ "task_specific_params": {
28
+ "text-generation": {
29
+ "do_sample": true,
30
+ "max_length": 50
31
+ }
32
+ },
33
+ "transformers_version": "4.9.0.dev0",
34
+ "use_cache": true,
35
+ "vocab_size": 50257
36
+ }
create_config.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from transformers import GPT2Config
2
+
3
+ model_dir = "./" # ${MODEL_DIR}
4
+
5
+ config = GPT2Config.from_pretrained("gpt2", resid_pdrop=0.0, embd_pdrop=0.0, attn_pdrop=0.0)
6
+ config.save_pretrained(model_dir)
run_clm_flax.py ADDED
@@ -0,0 +1,640 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright 2021 The HuggingFace Team All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ Pre-training/Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...) on a text file or a dataset.
18
+
19
+ Here is the full list of checkpoints on the hub that can be fine-tuned by this script:
20
+ https://huggingface.co/models?filter=causal-lm
21
+ """
22
+ # You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments.
23
+
24
+ import logging
25
+ import math
26
+ import os
27
+ import sys
28
+ import time
29
+ from dataclasses import dataclass, field
30
+ from pathlib import Path
31
+ from typing import Callable, Optional
32
+
33
+ import datasets
34
+ from datasets import Dataset, load_dataset
35
+ from tqdm import tqdm
36
+
37
+ import jax
38
+ import jax.numpy as jnp
39
+ import optax
40
+ import transformers
41
+ from flax import jax_utils, traverse_util
42
+ from flax.jax_utils import unreplicate
43
+ from flax.training import train_state
44
+ from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key
45
+ from transformers import (
46
+ CONFIG_MAPPING,
47
+ FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
48
+ AutoConfig,
49
+ AutoTokenizer,
50
+ FlaxAutoModelForCausalLM,
51
+ HfArgumentParser,
52
+ TrainingArguments,
53
+ is_tensorboard_available,
54
+ )
55
+ from transformers.testing_utils import CaptureLogger
56
+
57
+
58
+ logger = logging.getLogger(__name__)
59
+
60
+ MODEL_CONFIG_CLASSES = list(FLAX_MODEL_FOR_CAUSAL_LM_MAPPING.keys())
61
+ MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
62
+
63
+
64
+ @dataclass
65
+ class ModelArguments:
66
+ """
67
+ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
68
+ """
69
+
70
+ model_name_or_path: Optional[str] = field(
71
+ default=None,
72
+ metadata={
73
+ "help": "The model checkpoint for weights initialization."
74
+ "Don't set if you want to train a model from scratch."
75
+ },
76
+ )
77
+ model_type: Optional[str] = field(
78
+ default=None,
79
+ metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
80
+ )
81
+ config_name: Optional[str] = field(
82
+ default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
83
+ )
84
+ tokenizer_name: Optional[str] = field(
85
+ default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
86
+ )
87
+ cache_dir: Optional[str] = field(
88
+ default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
89
+ )
90
+ use_fast_tokenizer: bool = field(
91
+ default=True,
92
+ metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
93
+ )
94
+ dtype: Optional[str] = field(
95
+ default="float32",
96
+ metadata={
97
+ "help": "Floating-point format in which the model weights should be initialized and trained. Choose one of `[float32, float16, bfloat16]`."
98
+ },
99
+ )
100
+
101
+
102
+ @dataclass
103
+ class DataTrainingArguments:
104
+ """
105
+ Arguments pertaining to what data we are going to input our model for training and eval.
106
+ """
107
+
108
+ dataset_name: Optional[str] = field(
109
+ default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
110
+ )
111
+ dataset_config_name: Optional[str] = field(
112
+ default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
113
+ )
114
+ train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
115
+ validation_file: Optional[str] = field(
116
+ default=None,
117
+ metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
118
+ )
119
+ max_train_samples: Optional[int] = field(
120
+ default=None,
121
+ metadata={
122
+ "help": "For debugging purposes or quicker training, truncate the number of training examples to this "
123
+ "value if set."
124
+ },
125
+ )
126
+ max_eval_samples: Optional[int] = field(
127
+ default=None,
128
+ metadata={
129
+ "help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
130
+ "value if set."
131
+ },
132
+ )
133
+ overwrite_cache: bool = field(
134
+ default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
135
+ )
136
+ validation_split_percentage: Optional[int] = field(
137
+ default=5,
138
+ metadata={
139
+ "help": "The percentage of the train set used as validation set in case there's no validation split"
140
+ },
141
+ )
142
+ block_size: Optional[int] = field(
143
+ default=None,
144
+ metadata={
145
+ "help": "Optional input sequence length after tokenization. "
146
+ "The training dataset will be truncated in block of this size for training. "
147
+ "Default to the model max input length for single sentence inputs (take into account special tokens)."
148
+ },
149
+ )
150
+ overwrite_cache: bool = field(
151
+ default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
152
+ )
153
+ preprocessing_num_workers: Optional[int] = field(
154
+ default=None,
155
+ metadata={"help": "The number of processes to use for the preprocessing."},
156
+ )
157
+
158
+ def __post_init__(self):
159
+ if self.dataset_name is None and self.train_file is None and self.validation_file is None:
160
+ raise ValueError("Need either a dataset name or a training/validation file.")
161
+ else:
162
+ if self.train_file is not None:
163
+ extension = self.train_file.split(".")[-1]
164
+ assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
165
+ if self.validation_file is not None:
166
+ extension = self.validation_file.split(".")[-1]
167
+ assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
168
+
169
+
170
+ class TrainState(train_state.TrainState):
171
+ dropout_rng: jnp.ndarray
172
+
173
+ def replicate(self):
174
+ return jax_utils.replicate(self).replace(dropout_rng=shard_prng_key(self.dropout_rng))
175
+
176
+
177
+ def data_loader(rng: jax.random.PRNGKey, dataset: Dataset, batch_size: int, shuffle: bool = False):
178
+ """
179
+ Returns batches of size `batch_size` from truncated `dataset`, sharded over all local devices.
180
+ Shuffle batches if `shuffle` is `True`.
181
+ """
182
+ steps_per_epoch = len(dataset) // batch_size
183
+
184
+ if shuffle:
185
+ batch_idx = jax.random.permutation(rng, len(dataset))
186
+ else:
187
+ batch_idx = jnp.arange(len(dataset))
188
+
189
+ batch_idx = batch_idx[: steps_per_epoch * batch_size] # Skip incomplete batch.
190
+ batch_idx = batch_idx.reshape((steps_per_epoch, batch_size))
191
+
192
+ for idx in batch_idx:
193
+ batch = dataset[idx]
194
+ batch = {k: jnp.array(v) for k, v in batch.items()}
195
+
196
+ batch = shard(batch)
197
+
198
+ yield batch
199
+
200
+
201
+ def write_train_metric(summary_writer, train_metrics, train_time, step):
202
+ summary_writer.scalar("train_time", train_time, step)
203
+
204
+ train_metrics = get_metrics(train_metrics)
205
+ for key, vals in train_metrics.items():
206
+ tag = f"train_{key}"
207
+ for i, val in enumerate(vals):
208
+ summary_writer.scalar(tag, val, step - len(vals) + i + 1)
209
+
210
+
211
+ def write_eval_metric(summary_writer, eval_metrics, step):
212
+ for metric_name, value in eval_metrics.items():
213
+ summary_writer.scalar(f"eval_{metric_name}", value, step)
214
+
215
+
216
+ def create_learning_rate_fn(
217
+ train_ds_size: int, train_batch_size: int, num_train_epochs: int, num_warmup_steps: int, learning_rate: float
218
+ ) -> Callable[[int], jnp.array]:
219
+ """Returns a linear warmup, linear_decay learning rate function."""
220
+ steps_per_epoch = train_ds_size // train_batch_size
221
+ num_train_steps = steps_per_epoch * num_train_epochs
222
+ warmup_fn = optax.linear_schedule(init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps)
223
+ decay_fn = optax.linear_schedule(
224
+ init_value=learning_rate, end_value=0, transition_steps=num_train_steps - num_warmup_steps
225
+ )
226
+ schedule_fn = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps])
227
+ return schedule_fn
228
+
229
+
230
+ def main():
231
+ # See all possible arguments in src/transformers/training_args.py
232
+ # or by passing the --help flag to this script.
233
+ # We now keep distinct sets of args, for a cleaner separation of concerns.
234
+
235
+ parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
236
+ if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
237
+ # If we pass only one argument to the script and it's the path to a json file,
238
+ # let's parse it to get our arguments.
239
+ model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
240
+ else:
241
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
242
+
243
+ if (
244
+ os.path.exists(training_args.output_dir)
245
+ and os.listdir(training_args.output_dir)
246
+ and training_args.do_train
247
+ and not training_args.overwrite_output_dir
248
+ ):
249
+ raise ValueError(
250
+ f"Output directory ({training_args.output_dir}) already exists and is not empty."
251
+ "Use --overwrite_output_dir to overcome."
252
+ )
253
+
254
+ # Make one log on every process with the configuration for debugging.
255
+ logging.basicConfig(
256
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
257
+ datefmt="%m/%d/%Y %H:%M:%S",
258
+ level=logging.INFO,
259
+ )
260
+ # Setup logging, we only want one process per machine to log things on the screen.
261
+ logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR)
262
+ if jax.process_index() == 0:
263
+ datasets.utils.logging.set_verbosity_warning()
264
+ transformers.utils.logging.set_verbosity_info()
265
+ else:
266
+ datasets.utils.logging.set_verbosity_error()
267
+ transformers.utils.logging.set_verbosity_error()
268
+
269
+ # Set the verbosity to info of the Transformers logger (on main process only):
270
+ logger.info(f"Training/evaluation parameters {training_args}")
271
+
272
+ # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
273
+ # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
274
+ # (the dataset will be downloaded automatically from the datasets Hub).
275
+ #
276
+ # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
277
+ # 'text' is found. You can easily tweak this behavior (see below).
278
+ #
279
+ # In distributed training, the load_dataset function guarantees that only one local process can concurrently
280
+ # download the dataset.
281
+ if data_args.dataset_name is not None:
282
+ # Downloading and loading a dataset from the hub.
283
+ dataset = load_dataset(
284
+ data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, keep_in_memory=False
285
+ )
286
+
287
+ if "validation" not in dataset.keys():
288
+ dataset["validation"] = load_dataset(
289
+ data_args.dataset_name,
290
+ data_args.dataset_config_name,
291
+ split=f"train[:{data_args.validation_split_percentage}%]",
292
+ cache_dir=model_args.cache_dir,
293
+ )
294
+ dataset["train"] = load_dataset(
295
+ data_args.dataset_name,
296
+ data_args.dataset_config_name,
297
+ split=f"train[{data_args.validation_split_percentage}%:]",
298
+ cache_dir=model_args.cache_dir,
299
+ )
300
+ else:
301
+ data_files = {}
302
+ if data_args.train_file is not None:
303
+ data_files["train"] = data_args.train_file
304
+ if data_args.validation_file is not None:
305
+ data_files["validation"] = data_args.validation_file
306
+ extension = data_args.train_file.split(".")[-1]
307
+ if extension == "txt":
308
+ extension = "text"
309
+ dataset = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
310
+ # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
311
+ # https://huggingface.co/docs/datasets/loading_datasets.html.
312
+
313
+ # Load pretrained model and tokenizer
314
+
315
+ # Distributed training:
316
+ # The .from_pretrained methods guarantee that only one local process can concurrently
317
+ # download model & vocab.
318
+ if model_args.config_name:
319
+ config = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir)
320
+ elif model_args.model_name_or_path:
321
+ config = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir)
322
+ else:
323
+ config = CONFIG_MAPPING[model_args.model_type]()
324
+ logger.warning("You are instantiating a new config instance from scratch.")
325
+
326
+ if model_args.tokenizer_name:
327
+ tokenizer = AutoTokenizer.from_pretrained(
328
+ model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer
329
+ )
330
+ elif model_args.model_name_or_path:
331
+ tokenizer = AutoTokenizer.from_pretrained(
332
+ model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer
333
+ )
334
+ else:
335
+ raise ValueError(
336
+ "You are instantiating a new tokenizer from scratch. This is not supported by this script."
337
+ "You can do it from another script, save it, and load it from here, using --tokenizer_name."
338
+ )
339
+
340
+ if model_args.model_name_or_path:
341
+ model = FlaxAutoModelForCausalLM.from_pretrained(
342
+ model_args.model_name_or_path, config=config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype)
343
+ )
344
+ else:
345
+ model = FlaxAutoModelForCausalLM.from_config(
346
+ config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype)
347
+ )
348
+
349
+ # Preprocessing the datasets.
350
+ # First we tokenize all the texts.
351
+ if training_args.do_train:
352
+ column_names = dataset["train"].column_names
353
+ else:
354
+ column_names = dataset["validation"].column_names
355
+ text_column_name = "text" if "text" in column_names else column_names[0]
356
+
357
+ # since this will be pickled to avoid _LazyModule error in Hasher force logger loading before tokenize_function
358
+ tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base")
359
+
360
+ def tokenize_function(examples):
361
+ with CaptureLogger(tok_logger) as cl:
362
+ output = tokenizer(examples[text_column_name])
363
+ # clm input could be much much longer than block_size
364
+ if "Token indices sequence length is longer than the" in cl.out:
365
+ tok_logger.warning(
366
+ "^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits before being passed to the model."
367
+ )
368
+ return output
369
+
370
+ tokenized_datasets = dataset.map(
371
+ tokenize_function,
372
+ batched=True,
373
+ num_proc=data_args.preprocessing_num_workers,
374
+ remove_columns=column_names,
375
+ load_from_cache_file=not data_args.overwrite_cache,
376
+ )
377
+
378
+ if data_args.block_size is None:
379
+ block_size = tokenizer.model_max_length
380
+ if block_size > config.max_position_embeddings:
381
+ logger.warning(
382
+ f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
383
+ "Picking 1024 instead. You can change that default value by passing --block_size xxx."
384
+ )
385
+ block_size = 1024
386
+ else:
387
+ if data_args.block_size > tokenizer.model_max_length:
388
+ logger.warning(
389
+ f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model"
390
+ f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
391
+ )
392
+ block_size = min(data_args.block_size, tokenizer.model_max_length)
393
+
394
+ # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
395
+ def group_texts(examples):
396
+ # Concatenate all texts.
397
+ concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
398
+ total_length = len(concatenated_examples[list(examples.keys())[0]])
399
+ # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
400
+ # customize this part to your needs.
401
+ if total_length >= block_size:
402
+ total_length = (total_length // block_size) * block_size
403
+ # Split by chunks of max_len.
404
+ result = {
405
+ k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
406
+ for k, t in concatenated_examples.items()
407
+ }
408
+ result["labels"] = result["input_ids"].copy()
409
+ return result
410
+
411
+ # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder
412
+ # for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower
413
+ # to preprocess.
414
+ #
415
+ # To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
416
+ # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
417
+
418
+ lm_datasets = tokenized_datasets.map(
419
+ group_texts,
420
+ batched=True,
421
+ num_proc=data_args.preprocessing_num_workers,
422
+ load_from_cache_file=not data_args.overwrite_cache,
423
+ )
424
+
425
+ if training_args.do_train:
426
+ if "train" not in tokenized_datasets:
427
+ raise ValueError("--do_train requires a train dataset")
428
+ train_dataset = lm_datasets["train"]
429
+ if data_args.max_train_samples is not None:
430
+ train_dataset = train_dataset.select(range(data_args.max_train_samples))
431
+
432
+ if training_args.do_eval:
433
+ if "validation" not in tokenized_datasets:
434
+ raise ValueError("--do_eval requires a validation dataset")
435
+ eval_dataset = lm_datasets["validation"]
436
+ if data_args.max_eval_samples is not None:
437
+ eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
438
+
439
+ # Enable tensorboard only on the master node
440
+ has_tensorboard = is_tensorboard_available()
441
+ if has_tensorboard and jax.process_index() == 0:
442
+ try:
443
+ from flax.metrics.tensorboard import SummaryWriter
444
+
445
+ summary_writer = SummaryWriter(log_dir=Path(training_args.output_dir))
446
+ except ImportError as ie:
447
+ has_tensorboard = False
448
+ logger.warning(
449
+ f"Unable to display metrics through TensorBoard because some package are not installed: {ie}"
450
+ )
451
+ else:
452
+ logger.warning(
453
+ "Unable to display metrics through TensorBoard because the package is not installed: "
454
+ "Please run pip install tensorboard to enable."
455
+ )
456
+
457
+ # Initialize our training
458
+ rng = jax.random.PRNGKey(training_args.seed)
459
+ rng, dropout_rng = jax.random.split(rng)
460
+
461
+ # Store some constant
462
+ num_epochs = int(training_args.num_train_epochs)
463
+ train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count()
464
+ eval_batch_size = int(training_args.per_device_eval_batch_size) * jax.device_count()
465
+ steps_per_epoch = len(train_dataset) // train_batch_size
466
+ total_train_steps = steps_per_epoch * num_epochs
467
+
468
+ # Create learning rate schedule
469
+ linear_decay_lr_schedule_fn = create_learning_rate_fn(
470
+ len(train_dataset),
471
+ train_batch_size,
472
+ training_args.num_train_epochs,
473
+ training_args.warmup_steps,
474
+ training_args.learning_rate,
475
+ )
476
+
477
+ # We use Optax's "masking" functionality to not apply weight decay
478
+ # to bias and LayerNorm scale parameters. decay_mask_fn returns a
479
+ # mask boolean with the same structure as the parameters.
480
+ # The mask is True for parameters that should be decayed.
481
+ # Note that this mask is specifically adapted for FlaxGPT2.
482
+ # For other models, one should correct the layer norm parameter naming
483
+ # accordingly.
484
+ def decay_mask_fn(params):
485
+ flat_params = traverse_util.flatten_dict(params)
486
+ flat_mask = {
487
+ path: (path[-1] != "bias" and path[-2:] not in [("ln_1", "scale"), ("ln_2", "scale"), ("ln_f", "scale")])
488
+ for path in flat_params
489
+ }
490
+ return traverse_util.unflatten_dict(flat_mask)
491
+
492
+ # create adam optimizer
493
+ if training_args.adafactor:
494
+ # We use the default parameters here to initialize adafactor,
495
+ # For more details about the parameters please check https://github.com/deepmind/optax/blob/ed02befef9bf81cbbf236be3d2b0e032e9ed4a40/optax/_src/alias.py#L74
496
+ optimizer = optax.adafactor(
497
+ learning_rate=linear_decay_lr_schedule_fn,
498
+ )
499
+ else:
500
+ optimizer = optax.adamw(
501
+ learning_rate=linear_decay_lr_schedule_fn,
502
+ b1=training_args.adam_beta1,
503
+ b2=training_args.adam_beta2,
504
+ eps=training_args.adam_epsilon,
505
+ weight_decay=training_args.weight_decay,
506
+ mask=decay_mask_fn,
507
+ )
508
+
509
+ # Setup train state
510
+ state = TrainState.create(apply_fn=model.__call__, params=model.params, tx=optimizer, dropout_rng=dropout_rng)
511
+
512
+ def loss_fn(logits, labels):
513
+ shift_logits = logits[..., :-1, :]
514
+ shift_labels = labels[..., 1:]
515
+ loss = optax.softmax_cross_entropy(shift_logits, onehot(shift_labels, shift_logits.shape[-1]))
516
+ return loss.mean()
517
+
518
+ # Define gradient update step fn
519
+ def train_step(state, batch):
520
+ dropout_rng, new_dropout_rng = jax.random.split(state.dropout_rng)
521
+
522
+ def compute_loss(params):
523
+ labels = batch.pop("labels")
524
+ logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0]
525
+ loss = loss_fn(logits, labels)
526
+ return loss
527
+
528
+ grad_fn = jax.value_and_grad(compute_loss)
529
+ loss, grad = grad_fn(state.params)
530
+ grad = jax.lax.pmean(grad, "batch")
531
+
532
+ new_state = state.apply_gradients(grads=grad, dropout_rng=new_dropout_rng)
533
+
534
+ metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)}
535
+ metrics = jax.lax.pmean(metrics, axis_name="batch")
536
+
537
+ return new_state, metrics
538
+
539
+ # Define eval fn
540
+ def eval_step(params, batch):
541
+ labels = batch.pop("labels")
542
+ logits = model(**batch, params=params, train=False)[0]
543
+ loss = loss_fn(logits, labels)
544
+
545
+ # summarize metrics
546
+ metrics = {"loss": loss}
547
+ metrics = jax.lax.pmean(metrics, axis_name="batch")
548
+ return metrics
549
+
550
+ # Create parallel version of the train and eval step
551
+ p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0,))
552
+ p_eval_step = jax.pmap(eval_step, "batch")
553
+
554
+ # Replicate the train state on each device
555
+ state = state.replicate()
556
+
557
+ logger.info("***** Running training *****")
558
+ logger.info(f" Num examples = {len(train_dataset)}")
559
+ logger.info(f" Num Epochs = {num_epochs}")
560
+ logger.info(f" Instantaneous batch size per device = {training_args.per_device_train_batch_size}")
561
+ logger.info(f" Total train batch size (w. parallel & distributed) = {train_batch_size}")
562
+ logger.info(f" Total optimization steps = {total_train_steps}")
563
+
564
+ train_time = 0
565
+ train_metrics = []
566
+ epochs = tqdm(range(num_epochs), desc=f"Epoch ... (1/{num_epochs})", position=0)
567
+ for epoch in epochs:
568
+ # ======================== Training ================================
569
+ train_start = time.time()
570
+
571
+ # Create sampling rng
572
+ rng, input_rng = jax.random.split(rng)
573
+
574
+ # Generate an epoch by shuffling sampling indices from the train dataset
575
+ train_loader = data_loader(input_rng, train_dataset, train_batch_size, shuffle=True)
576
+ steps_per_epoch = len(train_dataset) // train_batch_size
577
+ # train
578
+ for step in tqdm(range(steps_per_epoch), desc="Training...", position=1, leave=False):
579
+ batch = next(train_loader)
580
+ state, train_metric = p_train_step(state, batch)
581
+ train_metrics.append(train_metric)
582
+
583
+ cur_step = epoch * (len(train_dataset) // train_batch_size) + step
584
+
585
+ if cur_step % training_args.logging_steps == 0 and cur_step > 0:
586
+ # Save metrics
587
+ train_metric = unreplicate(train_metric)
588
+ train_time += time.time() - train_start
589
+ if has_tensorboard and jax.process_index() == 0:
590
+ write_train_metric(summary_writer, train_metrics, train_time, cur_step)
591
+
592
+ epochs.write(
593
+ f"Step... ({cur_step} | Loss: {train_metric['loss'].mean()}, Learning Rate: {train_metric['learning_rate'].mean()})"
594
+ )
595
+
596
+ train_metrics = []
597
+
598
+ if cur_step % training_args.eval_steps == 0 and cur_step > 0:
599
+ # ======================== Evaluating ==============================
600
+ eval_metrics = []
601
+ eval_loader = data_loader(input_rng, eval_dataset, eval_batch_size)
602
+ eval_steps = len(eval_dataset) // eval_batch_size
603
+ for _ in tqdm(range(eval_steps), desc="Evaluating...", position=2, leave=False):
604
+ # Model forward
605
+ batch = next(eval_loader)
606
+ metrics = p_eval_step(state.params, batch)
607
+ eval_metrics.append(metrics)
608
+
609
+ # normalize eval metrics
610
+ eval_metrics = get_metrics(eval_metrics)
611
+ eval_metrics = jax.tree_map(jnp.mean, eval_metrics)
612
+
613
+ try:
614
+ eval_metrics["perplexity"] = math.exp(eval_metrics["loss"])
615
+ except OverflowError:
616
+ eval_metrics["perplexity"] = float("inf")
617
+
618
+ # Print metrics and update progress bar
619
+ desc = f"Step... ({cur_step} | Eval Loss: {eval_metrics['loss']} | Eval Perplexity: {eval_metrics['perplexity']})"
620
+ epochs.write(desc)
621
+ epochs.desc = desc
622
+
623
+ # Save metrics
624
+ if has_tensorboard and jax.process_index() == 0:
625
+ write_eval_metric(summary_writer, eval_metrics, cur_step)
626
+
627
+ if cur_step % training_args.save_steps == 0 and cur_step > 0:
628
+ # save checkpoint after each epoch and push checkpoint to the hub
629
+ if jax.process_index() == 0:
630
+ params = jax.device_get(unreplicate(state.params))
631
+ model.save_pretrained(
632
+ training_args.output_dir,
633
+ params=params,
634
+ push_to_hub=training_args.push_to_hub,
635
+ commit_message=f"Saving weights and logs of step {cur_step}",
636
+ )
637
+
638
+
639
+ if __name__ == "__main__":
640
+ main()
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
train_tokenizer.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import load_dataset
2
+ from tokenizers import trainers, Tokenizer, normalizers, ByteLevelBPETokenizer
3
+ from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
4
+
5
+ model_dir = "./" # ${MODEL_DIR}
6
+
7
+ # load dataset
8
+ dataset = load_dataset("mc4", "bn", split="train", streaming=True)
9
+
10
+ Instantiate tokenizer
11
+ tokenizer = ByteLevelBPETokenizer()
12
+
13
+ # Instantiate normalizer
14
+ tokenizer.normalizer = normalizers.Sequence(
15
+ [
16
+ normalizers.Nmt(),
17
+ normalizers.NFKC(),
18
+ normalizers.Replace(Regex(" {2,}"), " "),
19
+ normalizers.Replace("\u09e4", "\u0964"),
20
+ normalizers.Replace("\u09e5", "\u0965"),
21
+ normalizers.Replace("\u007c", "\u0964"),
22
+ normalizers.Replace("\u09f7", "\u0964"),
23
+ normalizers.Replace(Regex(r"(?<=[\u0980-\u09ff]):"), "\u0983"),
24
+ normalizers.Lowercase(),
25
+ ]
26
+ )
27
+
28
+ def batch_iterator(batch_size=1000):
29
+ for i in range(0, len(dataset), batch_size):
30
+ yield dataset[i: i + batch_size]["text"]
31
+
32
+ # Customized training
33
+ tokenizer.train_from_iterator(batch_iterator(), vocab_size=50265, min_frequency=2, special_tokens=[
34
+ "<|endoftext|>",
35
+ ])
36
+
37
+ # Save files to disk
38
+ tokenizer.save(f"{model_dir}/tokenizer.json")
39
+
40
+ # f = open("demofile3.txt", "w")
41
+ # f.write(next(iter(dataset))['text'])
42
+ # f.close()