nipunsadvilkar commited on
Commit
e0398d9
1 Parent(s): 85f01f9

setting up training scripts

Browse files
Files changed (5) hide show
  1. run.sh +16 -14
  2. run_mlm_flax.py +0 -688
  3. run_mlm_flax.py +1 -0
  4. run_mlm_flax_old.py +688 -0
  5. tokenizer.json +0 -0
run.sh CHANGED
@@ -1,20 +1,22 @@
1
  HUB_TOKEN=`cat $HOME/.huggingface/token`
2
- python run_mlm_flax.py \
3
- --output_dir="./" \
4
  --model_type="roberta" \
5
- --config_name="./" \
6
- --tokenizer_name="./" \
7
- --train_file="dummy/dummy_mr_train.csv" \
8
- --validation_file="dummy/dummy_mr_validation.csv"\
9
  --max_seq_length="128" \
10
- --per_device_train_batch_size="4" \
11
- --per_device_eval_batch_size="4" \
 
12
  --learning_rate="3e-4" \
13
  --warmup_steps="1000" \
14
  --overwrite_output_dir \
15
- --num_train_epochs="8" \
16
- --report_to wandb \
17
- --run_name hf-flax-robert-base-mr \
18
- --push_to_hub_model_id="flax-community/roberta-base-mr" \
19
- --push_to_hub_token=$HUB_TOKEN \
20
- --push_to_hub 2>&1 | tee run.log
 
1
  HUB_TOKEN=`cat $HOME/.huggingface/token`
2
+ ./run_mlm_flax.py \
3
+ --output_dir="${MODEL_DIR}" \
4
  --model_type="roberta" \
5
+ --config_name="${MODEL_DIR}" \
6
+ --tokenizer_name="${MODEL_DIR}" \
7
+ --train_file="/home/nipunsadvilkar/mr_data/mr_train.csv" \
8
+ --validation_file="/home/nipunsadvilkar/mr_data/mr_valid.csv"\
9
  --max_seq_length="128" \
10
+ --weight_decay="0.01" \
11
+ --per_device_train_batch_size="128" \
12
+ --per_device_eval_batch_size="128" \
13
  --learning_rate="3e-4" \
14
  --warmup_steps="1000" \
15
  --overwrite_output_dir \
16
+ --num_train_epochs="18" \
17
+ --adam_beta1="0.9" \
18
+ --adam_beta2="0.98" \
19
+ --logging_steps="500" \
20
+ --save_steps="2500" \
21
+ --eval_steps="2500" \
22
+ --push_to_hub
run_mlm_flax.py DELETED
@@ -1,688 +0,0 @@
1
- #!/usr/bin/env python
2
- # coding=utf-8
3
- # Copyright 2021 The HuggingFace Team All rights reserved.
4
- #
5
- # Licensed under the Apache License, Version 2.0 (the "License");
6
- # you may not use this file except in compliance with the License.
7
- # You may obtain a copy of the License at
8
- #
9
- # http://www.apache.org/licenses/LICENSE-2.0
10
- #
11
- # Unless required by applicable law or agreed to in writing, software
12
- # distributed under the License is distributed on an "AS IS" BASIS,
13
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- # See the License for the specific language governing permissions and
15
- # limitations under the License.
16
- """
17
- Fine-tuning the library models for masked language modeling (BERT, ALBERT, RoBERTa...) with whole word masking on a
18
- text file or a dataset.
19
-
20
- Here is the full list of checkpoints on the hub that can be fine-tuned by this script:
21
- https://huggingface.co/models?filter=masked-lm
22
- """
23
- import logging
24
- import os
25
- import sys
26
- import time
27
- from dataclasses import dataclass, field
28
-
29
- # You can also adapt this script on your own masked language modeling task. Pointers for this are left as comments.
30
- from pathlib import Path
31
- from typing import Dict, List, Optional, Tuple
32
-
33
- import numpy as np
34
- from datasets import load_dataset
35
- from tqdm import tqdm
36
-
37
- import flax
38
- import jax
39
- import jax.numpy as jnp
40
- import optax
41
- from flax import jax_utils, traverse_util
42
- from flax.training import train_state
43
- from flax.training.common_utils import get_metrics, onehot, shard
44
- from transformers import (
45
- CONFIG_MAPPING,
46
- FLAX_MODEL_FOR_MASKED_LM_MAPPING,
47
- AutoConfig,
48
- AutoTokenizer,
49
- FlaxAutoModelForMaskedLM,
50
- HfArgumentParser,
51
- PreTrainedTokenizerBase,
52
- TensorType,
53
- TrainingArguments,
54
- is_tensorboard_available,
55
- set_seed,
56
- )
57
-
58
-
59
- MODEL_CONFIG_CLASSES = list(FLAX_MODEL_FOR_MASKED_LM_MAPPING.keys())
60
- MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
61
-
62
-
63
- @dataclass
64
- class ModelArguments:
65
- """
66
- Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
67
- """
68
-
69
- model_name_or_path: Optional[str] = field(
70
- default=None,
71
- metadata={
72
- "help": "The model checkpoint for weights initialization."
73
- "Don't set if you want to train a model from scratch."
74
- },
75
- )
76
- model_type: Optional[str] = field(
77
- default=None,
78
- metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
79
- )
80
- config_name: Optional[str] = field(
81
- default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
82
- )
83
- tokenizer_name: Optional[str] = field(
84
- default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
85
- )
86
- cache_dir: Optional[str] = field(
87
- default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
88
- )
89
- use_fast_tokenizer: bool = field(
90
- default=True,
91
- metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
92
- )
93
- dtype: Optional[str] = field(
94
- default="float32",
95
- metadata={
96
- "help": "Floating-point format in which the model weights should be initialized and trained. Choose one of `[float32, float16, bfloat16]`."
97
- },
98
- )
99
-
100
-
101
- @dataclass
102
- class DataTrainingArguments:
103
- """
104
- Arguments pertaining to what data we are going to input our model for training and eval.
105
- """
106
-
107
- dataset_name: Optional[str] = field(
108
- default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
109
- )
110
- dataset_config_name: Optional[str] = field(
111
- default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
112
- )
113
- train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
114
- validation_file: Optional[str] = field(
115
- default=None,
116
- metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
117
- )
118
- train_ref_file: Optional[str] = field(
119
- default=None,
120
- metadata={"help": "An optional input train ref data file for whole word masking in Chinese."},
121
- )
122
- validation_ref_file: Optional[str] = field(
123
- default=None,
124
- metadata={"help": "An optional input validation ref data file for whole word masking in Chinese."},
125
- )
126
- overwrite_cache: bool = field(
127
- default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
128
- )
129
- validation_split_percentage: Optional[int] = field(
130
- default=5,
131
- metadata={
132
- "help": "The percentage of the train set used as validation set in case there's no validation split"
133
- },
134
- )
135
- max_seq_length: Optional[int] = field(
136
- default=None,
137
- metadata={
138
- "help": "The maximum total input sequence length after tokenization. Sequences longer "
139
- "than this will be truncated. Default to the max input length of the model."
140
- },
141
- )
142
- preprocessing_num_workers: Optional[int] = field(
143
- default=None,
144
- metadata={"help": "The number of processes to use for the preprocessing."},
145
- )
146
- mlm_probability: float = field(
147
- default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"}
148
- )
149
- pad_to_max_length: bool = field(
150
- default=False,
151
- metadata={
152
- "help": "Whether to pad all samples to `max_seq_length`. "
153
- "If False, will pad the samples dynamically when batching to the maximum length in the batch."
154
- },
155
- )
156
- line_by_line: bool = field(
157
- default=False,
158
- metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."},
159
- )
160
-
161
- def __post_init__(self):
162
- if self.dataset_name is None and self.train_file is None and self.validation_file is None:
163
- raise ValueError("Need either a dataset name or a training/validation file.")
164
- else:
165
- if self.train_file is not None:
166
- extension = self.train_file.split(".")[-1]
167
- assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
168
- if self.validation_file is not None:
169
- extension = self.validation_file.split(".")[-1]
170
- assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
171
-
172
-
173
- @flax.struct.dataclass
174
- class FlaxDataCollatorForLanguageModeling:
175
- """
176
- Data collator used for language modeling. Inputs are dynamically padded to the maximum length of a batch if they
177
- are not all of the same length.
178
-
179
- Args:
180
- tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):
181
- The tokenizer used for encoding the data.
182
- mlm_probability (:obj:`float`, `optional`, defaults to 0.15):
183
- The probability with which to (randomly) mask tokens in the input.
184
-
185
- .. note::
186
-
187
- For best performance, this data collator should be used with a dataset having items that are dictionaries or
188
- BatchEncoding, with the :obj:`"special_tokens_mask"` key, as returned by a
189
- :class:`~transformers.PreTrainedTokenizer` or a :class:`~transformers.PreTrainedTokenizerFast` with the
190
- argument :obj:`return_special_tokens_mask=True`.
191
- """
192
-
193
- tokenizer: PreTrainedTokenizerBase
194
- mlm_probability: float = 0.15
195
-
196
- def __post_init__(self):
197
- if self.tokenizer.mask_token is None:
198
- raise ValueError(
199
- "This tokenizer does not have a mask token which is necessary for masked language modeling. "
200
- "You should pass `mlm=False` to train on causal language modeling instead."
201
- )
202
-
203
- def __call__(self, examples: List[Dict[str, np.ndarray]], pad_to_multiple_of: int) -> Dict[str, np.ndarray]:
204
- # Handle dict or lists with proper padding and conversion to tensor.
205
- batch = self.tokenizer.pad(examples, pad_to_multiple_of=pad_to_multiple_of, return_tensors=TensorType.NUMPY)
206
-
207
- # If special token mask has been preprocessed, pop it from the dict.
208
- special_tokens_mask = batch.pop("special_tokens_mask", None)
209
-
210
- batch["input_ids"], batch["labels"] = self.mask_tokens(
211
- batch["input_ids"], special_tokens_mask=special_tokens_mask
212
- )
213
- return batch
214
-
215
- def mask_tokens(
216
- self, inputs: np.ndarray, special_tokens_mask: Optional[np.ndarray]
217
- ) -> Tuple[jnp.ndarray, jnp.ndarray]:
218
- """
219
- Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
220
- """
221
- labels = inputs.copy()
222
- # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
223
- probability_matrix = np.full(labels.shape, self.mlm_probability)
224
- special_tokens_mask = special_tokens_mask.astype("bool")
225
-
226
- probability_matrix[special_tokens_mask] = 0.0
227
- masked_indices = np.random.binomial(1, probability_matrix).astype("bool")
228
- labels[~masked_indices] = -100 # We only compute loss on masked tokens
229
-
230
- # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
231
- indices_replaced = np.random.binomial(1, np.full(labels.shape, 0.8)).astype("bool") & masked_indices
232
- inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
233
-
234
- # 10% of the time, we replace masked input tokens with random word
235
- indices_random = np.random.binomial(1, np.full(labels.shape, 0.5)).astype("bool")
236
- indices_random &= masked_indices & ~indices_replaced
237
-
238
- random_words = np.random.randint(self.tokenizer.vocab_size, size=labels.shape, dtype="i4")
239
- inputs[indices_random] = random_words[indices_random]
240
-
241
- # The rest of the time (10% of the time) we keep the masked input tokens unchanged
242
- return inputs, labels
243
-
244
-
245
- def generate_batch_splits(samples_idx: jnp.ndarray, batch_size: int) -> jnp.ndarray:
246
- num_samples = len(samples_idx)
247
- samples_to_remove = num_samples % batch_size
248
-
249
- if samples_to_remove != 0:
250
- samples_idx = samples_idx[:-samples_to_remove]
251
- sections_split = num_samples // batch_size
252
- batch_idx = np.split(samples_idx, sections_split)
253
- return batch_idx
254
-
255
-
256
- def write_train_metric(summary_writer, train_metrics, train_time, step):
257
- summary_writer.scalar("train_time", train_time, step)
258
-
259
- train_metrics = get_metrics(train_metrics)
260
- for key, vals in train_metrics.items():
261
- tag = f"train_{key}"
262
- for i, val in enumerate(vals):
263
- summary_writer.scalar(tag, val, step - len(vals) + i + 1)
264
-
265
-
266
- def write_eval_metric(summary_writer, eval_metrics, step):
267
- for metric_name, value in eval_metrics.items():
268
- summary_writer.scalar(f"eval_{metric_name}", value, step)
269
-
270
-
271
- if __name__ == "__main__":
272
- # See all possible arguments in src/transformers/training_args.py
273
- # or by passing the --help flag to this script.
274
- # We now keep distinct sets of args, for a cleaner separation of concerns.
275
-
276
- parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
277
- if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
278
- # If we pass only one argument to the script and it's the path to a json file,
279
- # let's parse it to get our arguments.
280
- model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
281
- else:
282
- model_args, data_args, training_args = parser.parse_args_into_dataclasses()
283
-
284
- if (
285
- os.path.exists(training_args.output_dir)
286
- and os.listdir(training_args.output_dir)
287
- and training_args.do_train
288
- and not training_args.overwrite_output_dir
289
- ):
290
- raise ValueError(
291
- f"Output directory ({training_args.output_dir}) already exists and is not empty."
292
- "Use --overwrite_output_dir to overcome."
293
- )
294
-
295
- # Setup logging
296
- logging.basicConfig(
297
- format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
298
- level="NOTSET",
299
- datefmt="[%X]",
300
- )
301
-
302
- # Log on each process the small summary:
303
- logger = logging.getLogger(__name__)
304
-
305
- # Set the verbosity to info of the Transformers logger (on main process only):
306
- logger.info(f"Training/evaluation parameters {training_args}")
307
-
308
- # Set seed before initializing model.
309
- set_seed(training_args.seed)
310
-
311
- # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
312
- # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
313
- # (the dataset will be downloaded automatically from the datasets Hub).
314
- #
315
- # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
316
- # 'text' is found. You can easily tweak this behavior (see below).
317
- #
318
- # In distributed training, the load_dataset function guarantees that only one local process can concurrently
319
- # download the dataset.
320
- if data_args.dataset_name is not None:
321
- # Downloading and loading a dataset from the hub.
322
- datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir)
323
-
324
- if "validation" not in datasets.keys():
325
- datasets["validation"] = load_dataset(
326
- data_args.dataset_name,
327
- data_args.dataset_config_name,
328
- split=f"train[:{data_args.validation_split_percentage}%]",
329
- cache_dir=model_args.cache_dir,
330
- )
331
- datasets["train"] = load_dataset(
332
- data_args.dataset_name,
333
- data_args.dataset_config_name,
334
- split=f"train[{data_args.validation_split_percentage}%:]",
335
- cache_dir=model_args.cache_dir,
336
- )
337
- else:
338
- data_files = {}
339
- if data_args.train_file is not None:
340
- data_files["train"] = data_args.train_file
341
- if data_args.validation_file is not None:
342
- data_files["validation"] = data_args.validation_file
343
- extension = data_args.train_file.split(".")[-1]
344
- if extension == "txt":
345
- extension = "text"
346
- datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
347
- # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
348
- # https://huggingface.co/docs/datasets/loading_datasets.html.
349
-
350
- # Load pretrained model and tokenizer
351
-
352
- # Distributed training:
353
- # The .from_pretrained methods guarantee that only one local process can concurrently
354
- # download model & vocab.
355
- if model_args.config_name:
356
- config = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir)
357
- elif model_args.model_name_or_path:
358
- config = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir)
359
- else:
360
- config = CONFIG_MAPPING[model_args.model_type]()
361
- logger.warning("You are instantiating a new config instance from scratch.")
362
-
363
- if model_args.tokenizer_name:
364
- tokenizer = AutoTokenizer.from_pretrained(
365
- model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer
366
- )
367
- elif model_args.model_name_or_path:
368
- tokenizer = AutoTokenizer.from_pretrained(
369
- model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer
370
- )
371
- else:
372
- raise ValueError(
373
- "You are instantiating a new tokenizer from scratch. This is not supported by this script."
374
- "You can do it from another script, save it, and load it from here, using --tokenizer_name."
375
- )
376
-
377
- # Preprocessing the datasets.
378
- # First we tokenize all the texts.
379
- if training_args.do_train:
380
- column_names = datasets["train"].column_names
381
- else:
382
- column_names = datasets["validation"].column_names
383
- text_column_name = "text" if "text" in column_names else column_names[0]
384
-
385
- max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
386
-
387
- if data_args.line_by_line:
388
- # When using line_by_line, we just tokenize each nonempty line.
389
- padding = "max_length" if data_args.pad_to_max_length else False
390
-
391
- def tokenize_function(examples):
392
- # Remove empty lines
393
- examples = [line for line in examples if len(line) > 0 and not line.isspace()]
394
- return tokenizer(
395
- examples,
396
- return_special_tokens_mask=True,
397
- padding=padding,
398
- truncation=True,
399
- max_length=max_seq_length,
400
- )
401
-
402
- tokenized_datasets = datasets.map(
403
- tokenize_function,
404
- input_columns=[text_column_name],
405
- batched=True,
406
- num_proc=data_args.preprocessing_num_workers,
407
- remove_columns=column_names,
408
- load_from_cache_file=not data_args.overwrite_cache,
409
- )
410
-
411
- else:
412
- # Otherwise, we tokenize every text, then concatenate them together before splitting them in smaller parts.
413
- # We use `return_special_tokens_mask=True` because DataCollatorForLanguageModeling (see below) is more
414
- # efficient when it receives the `special_tokens_mask`.
415
- def tokenize_function(examples):
416
- return tokenizer(examples[text_column_name], return_special_tokens_mask=True)
417
-
418
- tokenized_datasets = datasets.map(
419
- tokenize_function,
420
- batched=True,
421
- num_proc=data_args.preprocessing_num_workers,
422
- remove_columns=column_names,
423
- load_from_cache_file=not data_args.overwrite_cache,
424
- )
425
-
426
- # Main data processing function that will concatenate all texts from our dataset and generate chunks of
427
- # max_seq_length.
428
- def group_texts(examples):
429
- # Concatenate all texts.
430
- concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
431
- total_length = len(concatenated_examples[list(examples.keys())[0]])
432
- # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
433
- # customize this part to your needs.
434
- if total_length >= max_seq_length:
435
- total_length = (total_length // max_seq_length) * max_seq_length
436
- # Split by chunks of max_len.
437
- result = {
438
- k: [t[i : i + max_seq_length] for i in range(0, total_length, max_seq_length)]
439
- for k, t in concatenated_examples.items()
440
- }
441
- return result
442
-
443
- # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a
444
- # remainder for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value
445
- # might be slower to preprocess.
446
- #
447
- # To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
448
- # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
449
- tokenized_datasets = tokenized_datasets.map(
450
- group_texts,
451
- batched=True,
452
- num_proc=data_args.preprocessing_num_workers,
453
- load_from_cache_file=not data_args.overwrite_cache,
454
- )
455
-
456
- # Enable tensorboard only on the master node
457
- has_tensorboard = is_tensorboard_available()
458
- if has_tensorboard and jax.process_index() == 0:
459
- import wandb
460
-
461
- wandb.init(
462
- entity='nipunsadvilkar',
463
- project='hf-flax-robert-base-mr',
464
- sync_tensorboard=True
465
- )
466
-
467
- wandb.config.update(training_args) # optional, log your configs
468
- wandb.config.update(model_args) # optional, log your configs
469
- wandb.config.update(data_args) # optional, log your configs
470
- try:
471
- from flax.metrics.tensorboard import SummaryWriter
472
-
473
- summary_writer = SummaryWriter(log_dir=Path(training_args.output_dir))
474
- except ImportError as ie:
475
- has_tensorboard = False
476
- logger.warning(
477
- f"Unable to display metrics through TensorBoard because some package are not installed: {ie}"
478
- )
479
- else:
480
- logger.warning(
481
- "Unable to display metrics through TensorBoard because the package is not installed: "
482
- "Please run pip install tensorboard to enable."
483
- )
484
-
485
- # Data collator
486
- # This one will take care of randomly masking the tokens.
487
- data_collator = FlaxDataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=data_args.mlm_probability)
488
-
489
- # Initialize our training
490
- rng = jax.random.PRNGKey(training_args.seed)
491
- dropout_rngs = jax.random.split(rng, jax.local_device_count())
492
-
493
- if model_args.model_name_or_path:
494
- model = FlaxAutoModelForMaskedLM.from_pretrained(
495
- model_args.model_name_or_path, config=config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype)
496
- )
497
- else:
498
- model = FlaxAutoModelForMaskedLM.from_config(
499
- config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype)
500
- )
501
-
502
- # Store some constant
503
- num_epochs = int(training_args.num_train_epochs)
504
- train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count()
505
- eval_batch_size = int(training_args.per_device_eval_batch_size) * jax.device_count()
506
-
507
- num_train_steps = len(tokenized_datasets["train"]) // train_batch_size * num_epochs
508
-
509
- # Create learning rate schedule
510
- warmup_fn = optax.linear_schedule(
511
- init_value=0.0, end_value=training_args.learning_rate, transition_steps=training_args.warmup_steps
512
- )
513
- decay_fn = optax.linear_schedule(
514
- init_value=training_args.learning_rate,
515
- end_value=0,
516
- transition_steps=num_train_steps - training_args.warmup_steps,
517
- )
518
- linear_decay_lr_schedule_fn = optax.join_schedules(
519
- schedules=[warmup_fn, decay_fn], boundaries=[training_args.warmup_steps]
520
- )
521
-
522
- # We use Optax's "masking" functionality to not apply weight decay
523
- # to bias and LayerNorm scale parameters. decay_mask_fn returns a
524
- # mask boolean with the same structure as the parameters.
525
- # The mask is True for parameters that should be decayed.
526
- # Note that this mask is specifically adapted for FlaxBERT-like models.
527
- # For other models, one should correct the layer norm parameter naming
528
- # accordingly.
529
- def decay_mask_fn(params):
530
- flat_params = traverse_util.flatten_dict(params)
531
- flat_mask = {path: (path[-1] != "bias" and path[-2:] != ("LayerNorm", "scale")) for path in flat_params}
532
- return traverse_util.unflatten_dict(flat_mask)
533
-
534
- # create adam optimizer
535
- if training_args.adafactor:
536
- # We use the default parameters here to initialize adafactor,
537
- # For more details about the parameters please check https://github.com/deepmind/optax/blob/ed02befef9bf81cbbf236be3d2b0e032e9ed4a40/optax/_src/alias.py#L74
538
- optimizer = optax.adafactor(
539
- learning_rate=linear_decay_lr_schedule_fn,
540
- )
541
- else:
542
- optimizer = optax.adamw(
543
- learning_rate=linear_decay_lr_schedule_fn,
544
- b1=training_args.adam_beta1,
545
- b2=training_args.adam_beta2,
546
- eps=training_args.adam_epsilon,
547
- weight_decay=training_args.weight_decay,
548
- mask=decay_mask_fn,
549
- )
550
-
551
- # Setup train state
552
- state = train_state.TrainState.create(apply_fn=model.__call__, params=model.params, tx=optimizer)
553
-
554
- # Define gradient update step fn
555
- def train_step(state, batch, dropout_rng):
556
- dropout_rng, new_dropout_rng = jax.random.split(dropout_rng)
557
-
558
- def loss_fn(params):
559
- labels = batch.pop("labels")
560
-
561
- logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0]
562
-
563
- # compute loss, ignore padded input tokens
564
- label_mask = jnp.where(labels > 0, 1.0, 0.0)
565
- loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])) * label_mask
566
-
567
- # take average
568
- loss = loss.sum() / label_mask.sum()
569
-
570
- return loss
571
-
572
- grad_fn = jax.value_and_grad(loss_fn)
573
- loss, grad = grad_fn(state.params)
574
- grad = jax.lax.pmean(grad, "batch")
575
- new_state = state.apply_gradients(grads=grad)
576
-
577
- metrics = jax.lax.pmean(
578
- {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)}, axis_name="batch"
579
- )
580
-
581
- return new_state, metrics, new_dropout_rng
582
-
583
- # Create parallel version of the train step
584
- p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0,))
585
-
586
- # Define eval fn
587
- def eval_step(params, batch):
588
- labels = batch.pop("labels")
589
-
590
- logits = model(**batch, params=params, train=False)[0]
591
-
592
- # compute loss, ignore padded input tokens
593
- label_mask = jnp.where(labels > 0, 1.0, 0.0)
594
- loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])) * label_mask
595
-
596
- # compute accuracy
597
- accuracy = jnp.equal(jnp.argmax(logits, axis=-1), labels) * label_mask
598
-
599
- # summarize metrics
600
- metrics = {"loss": loss.sum(), "accuracy": accuracy.sum(), "normalizer": label_mask.sum()}
601
- metrics = jax.lax.psum(metrics, axis_name="batch")
602
-
603
- return metrics
604
-
605
- p_eval_step = jax.pmap(eval_step, "batch", donate_argnums=(0,))
606
-
607
- # Replicate the train state on each device
608
- state = jax_utils.replicate(state)
609
-
610
- train_time = 0
611
- epochs = tqdm(range(num_epochs), desc=f"Epoch ... (1/{num_epochs})", position=0)
612
- for epoch in epochs:
613
- # ======================== Training ================================
614
- train_start = time.time()
615
- train_metrics = []
616
-
617
- # Create sampling rng
618
- rng, input_rng = jax.random.split(rng)
619
-
620
- # Generate an epoch by shuffling sampling indices from the train dataset
621
- num_train_samples = len(tokenized_datasets["train"])
622
- train_samples_idx = jax.random.permutation(input_rng, jnp.arange(num_train_samples))
623
- train_batch_idx = generate_batch_splits(train_samples_idx, train_batch_size)
624
-
625
- # Gather the indexes for creating the batch and do a training step
626
- for step, batch_idx in enumerate(tqdm(train_batch_idx, desc="Training...", position=1)):
627
- samples = [tokenized_datasets["train"][int(idx)] for idx in batch_idx]
628
- model_inputs = data_collator(samples, pad_to_multiple_of=16)
629
-
630
- # Model forward
631
- model_inputs = shard(model_inputs.data)
632
- state, train_metric, dropout_rngs = p_train_step(state, model_inputs, dropout_rngs)
633
- train_metrics.append(train_metric)
634
-
635
- cur_step = epoch * (num_train_samples // train_batch_size) + step
636
-
637
- if cur_step % training_args.logging_steps == 0 and cur_step > 0:
638
- # Save metrics
639
- train_metric = jax_utils.unreplicate(train_metric)
640
- train_time += time.time() - train_start
641
- if has_tensorboard and jax.process_index() == 0:
642
- write_train_metric(summary_writer, train_metrics, train_time, cur_step)
643
-
644
- epochs.write(
645
- f"Step... ({cur_step} | Loss: {train_metric['loss']}, Learning Rate: {train_metric['learning_rate']})"
646
- )
647
-
648
- train_metrics = []
649
-
650
- if cur_step % training_args.eval_steps == 0 and cur_step > 0:
651
- # ======================== Evaluating ==============================
652
- num_eval_samples = len(tokenized_datasets["validation"])
653
- eval_samples_idx = jnp.arange(num_eval_samples)
654
- eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size)
655
-
656
- eval_metrics = []
657
- for i, batch_idx in enumerate(tqdm(eval_batch_idx, desc="Evaluating ...", position=2)):
658
- samples = [tokenized_datasets["validation"][int(idx)] for idx in batch_idx]
659
- model_inputs = data_collator(samples, pad_to_multiple_of=16)
660
-
661
- # Model forward
662
- model_inputs = shard(model_inputs.data)
663
- metrics = p_eval_step(state.params, model_inputs)
664
- eval_metrics.append(metrics)
665
-
666
- # normalize eval metrics
667
- eval_metrics = get_metrics(eval_metrics)
668
- eval_metrics = jax.tree_map(jnp.sum, eval_metrics)
669
- eval_normalizer = eval_metrics.pop("normalizer")
670
- eval_metrics = jax.tree_map(lambda x: x / eval_normalizer, eval_metrics)
671
-
672
- # Update progress bar
673
- epochs.desc = f"Step... ({cur_step} | Loss: {eval_metrics['loss']}, Acc: {eval_metrics['accuracy']})"
674
-
675
- # Save metrics
676
- if has_tensorboard and jax.process_index() == 0:
677
- write_eval_metric(summary_writer, eval_metrics, cur_step)
678
-
679
- if cur_step % training_args.save_steps == 0 and cur_step > 0:
680
- # save checkpoint after each epoch and push checkpoint to the hub
681
- if jax.process_index() == 0:
682
- params = jax.device_get(jax.tree_map(lambda x: x[0], state.params))
683
- model.save_pretrained(
684
- training_args.output_dir,
685
- params=params,
686
- push_to_hub=training_args.push_to_hub,
687
- commit_message=f"Saving weights and logs of step {cur_step}",
688
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
run_mlm_flax.py ADDED
@@ -0,0 +1 @@
 
1
+ /home/nipunsadvilkar/transformers/examples/flax/language-modeling/run_mlm_flax.py
run_mlm_flax_old.py ADDED
@@ -0,0 +1,688 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright 2021 The HuggingFace Team All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ Fine-tuning the library models for masked language modeling (BERT, ALBERT, RoBERTa...) with whole word masking on a
18
+ text file or a dataset.
19
+
20
+ Here is the full list of checkpoints on the hub that can be fine-tuned by this script:
21
+ https://huggingface.co/models?filter=masked-lm
22
+ """
23
+ import logging
24
+ import os
25
+ import sys
26
+ import time
27
+ from dataclasses import dataclass, field
28
+
29
+ # You can also adapt this script on your own masked language modeling task. Pointers for this are left as comments.
30
+ from pathlib import Path
31
+ from typing import Dict, List, Optional, Tuple
32
+
33
+ import numpy as np
34
+ from datasets import load_dataset
35
+ from tqdm import tqdm
36
+
37
+ import flax
38
+ import jax
39
+ import jax.numpy as jnp
40
+ import optax
41
+ from flax import jax_utils, traverse_util
42
+ from flax.training import train_state
43
+ from flax.training.common_utils import get_metrics, onehot, shard
44
+ from transformers import (
45
+ CONFIG_MAPPING,
46
+ FLAX_MODEL_FOR_MASKED_LM_MAPPING,
47
+ AutoConfig,
48
+ AutoTokenizer,
49
+ FlaxAutoModelForMaskedLM,
50
+ HfArgumentParser,
51
+ PreTrainedTokenizerBase,
52
+ TensorType,
53
+ TrainingArguments,
54
+ is_tensorboard_available,
55
+ set_seed,
56
+ )
57
+
58
+
59
+ MODEL_CONFIG_CLASSES = list(FLAX_MODEL_FOR_MASKED_LM_MAPPING.keys())
60
+ MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
61
+
62
+
63
+ @dataclass
64
+ class ModelArguments:
65
+ """
66
+ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
67
+ """
68
+
69
+ model_name_or_path: Optional[str] = field(
70
+ default=None,
71
+ metadata={
72
+ "help": "The model checkpoint for weights initialization."
73
+ "Don't set if you want to train a model from scratch."
74
+ },
75
+ )
76
+ model_type: Optional[str] = field(
77
+ default=None,
78
+ metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
79
+ )
80
+ config_name: Optional[str] = field(
81
+ default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
82
+ )
83
+ tokenizer_name: Optional[str] = field(
84
+ default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
85
+ )
86
+ cache_dir: Optional[str] = field(
87
+ default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
88
+ )
89
+ use_fast_tokenizer: bool = field(
90
+ default=True,
91
+ metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
92
+ )
93
+ dtype: Optional[str] = field(
94
+ default="float32",
95
+ metadata={
96
+ "help": "Floating-point format in which the model weights should be initialized and trained. Choose one of `[float32, float16, bfloat16]`."
97
+ },
98
+ )
99
+
100
+
101
+ @dataclass
102
+ class DataTrainingArguments:
103
+ """
104
+ Arguments pertaining to what data we are going to input our model for training and eval.
105
+ """
106
+
107
+ dataset_name: Optional[str] = field(
108
+ default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
109
+ )
110
+ dataset_config_name: Optional[str] = field(
111
+ default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
112
+ )
113
+ train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
114
+ validation_file: Optional[str] = field(
115
+ default=None,
116
+ metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
117
+ )
118
+ train_ref_file: Optional[str] = field(
119
+ default=None,
120
+ metadata={"help": "An optional input train ref data file for whole word masking in Chinese."},
121
+ )
122
+ validation_ref_file: Optional[str] = field(
123
+ default=None,
124
+ metadata={"help": "An optional input validation ref data file for whole word masking in Chinese."},
125
+ )
126
+ overwrite_cache: bool = field(
127
+ default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
128
+ )
129
+ validation_split_percentage: Optional[int] = field(
130
+ default=5,
131
+ metadata={
132
+ "help": "The percentage of the train set used as validation set in case there's no validation split"
133
+ },
134
+ )
135
+ max_seq_length: Optional[int] = field(
136
+ default=None,
137
+ metadata={
138
+ "help": "The maximum total input sequence length after tokenization. Sequences longer "
139
+ "than this will be truncated. Default to the max input length of the model."
140
+ },
141
+ )
142
+ preprocessing_num_workers: Optional[int] = field(
143
+ default=None,
144
+ metadata={"help": "The number of processes to use for the preprocessing."},
145
+ )
146
+ mlm_probability: float = field(
147
+ default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"}
148
+ )
149
+ pad_to_max_length: bool = field(
150
+ default=False,
151
+ metadata={
152
+ "help": "Whether to pad all samples to `max_seq_length`. "
153
+ "If False, will pad the samples dynamically when batching to the maximum length in the batch."
154
+ },
155
+ )
156
+ line_by_line: bool = field(
157
+ default=False,
158
+ metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."},
159
+ )
160
+
161
+ def __post_init__(self):
162
+ if self.dataset_name is None and self.train_file is None and self.validation_file is None:
163
+ raise ValueError("Need either a dataset name or a training/validation file.")
164
+ else:
165
+ if self.train_file is not None:
166
+ extension = self.train_file.split(".")[-1]
167
+ assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
168
+ if self.validation_file is not None:
169
+ extension = self.validation_file.split(".")[-1]
170
+ assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
171
+
172
+
173
+ @flax.struct.dataclass
174
+ class FlaxDataCollatorForLanguageModeling:
175
+ """
176
+ Data collator used for language modeling. Inputs are dynamically padded to the maximum length of a batch if they
177
+ are not all of the same length.
178
+
179
+ Args:
180
+ tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):
181
+ The tokenizer used for encoding the data.
182
+ mlm_probability (:obj:`float`, `optional`, defaults to 0.15):
183
+ The probability with which to (randomly) mask tokens in the input.
184
+
185
+ .. note::
186
+
187
+ For best performance, this data collator should be used with a dataset having items that are dictionaries or
188
+ BatchEncoding, with the :obj:`"special_tokens_mask"` key, as returned by a
189
+ :class:`~transformers.PreTrainedTokenizer` or a :class:`~transformers.PreTrainedTokenizerFast` with the
190
+ argument :obj:`return_special_tokens_mask=True`.
191
+ """
192
+
193
+ tokenizer: PreTrainedTokenizerBase
194
+ mlm_probability: float = 0.15
195
+
196
+ def __post_init__(self):
197
+ if self.tokenizer.mask_token is None:
198
+ raise ValueError(
199
+ "This tokenizer does not have a mask token which is necessary for masked language modeling. "
200
+ "You should pass `mlm=False` to train on causal language modeling instead."
201
+ )
202
+
203
+ def __call__(self, examples: List[Dict[str, np.ndarray]], pad_to_multiple_of: int) -> Dict[str, np.ndarray]:
204
+ # Handle dict or lists with proper padding and conversion to tensor.
205
+ batch = self.tokenizer.pad(examples, pad_to_multiple_of=pad_to_multiple_of, return_tensors=TensorType.NUMPY)
206
+
207
+ # If special token mask has been preprocessed, pop it from the dict.
208
+ special_tokens_mask = batch.pop("special_tokens_mask", None)
209
+
210
+ batch["input_ids"], batch["labels"] = self.mask_tokens(
211
+ batch["input_ids"], special_tokens_mask=special_tokens_mask
212
+ )
213
+ return batch
214
+
215
+ def mask_tokens(
216
+ self, inputs: np.ndarray, special_tokens_mask: Optional[np.ndarray]
217
+ ) -> Tuple[jnp.ndarray, jnp.ndarray]:
218
+ """
219
+ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
220
+ """
221
+ labels = inputs.copy()
222
+ # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
223
+ probability_matrix = np.full(labels.shape, self.mlm_probability)
224
+ special_tokens_mask = special_tokens_mask.astype("bool")
225
+
226
+ probability_matrix[special_tokens_mask] = 0.0
227
+ masked_indices = np.random.binomial(1, probability_matrix).astype("bool")
228
+ labels[~masked_indices] = -100 # We only compute loss on masked tokens
229
+
230
+ # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
231
+ indices_replaced = np.random.binomial(1, np.full(labels.shape, 0.8)).astype("bool") & masked_indices
232
+ inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
233
+
234
+ # 10% of the time, we replace masked input tokens with random word
235
+ indices_random = np.random.binomial(1, np.full(labels.shape, 0.5)).astype("bool")
236
+ indices_random &= masked_indices & ~indices_replaced
237
+
238
+ random_words = np.random.randint(self.tokenizer.vocab_size, size=labels.shape, dtype="i4")
239
+ inputs[indices_random] = random_words[indices_random]
240
+
241
+ # The rest of the time (10% of the time) we keep the masked input tokens unchanged
242
+ return inputs, labels
243
+
244
+
245
+ def generate_batch_splits(samples_idx: jnp.ndarray, batch_size: int) -> jnp.ndarray:
246
+ num_samples = len(samples_idx)
247
+ samples_to_remove = num_samples % batch_size
248
+
249
+ if samples_to_remove != 0:
250
+ samples_idx = samples_idx[:-samples_to_remove]
251
+ sections_split = num_samples // batch_size
252
+ batch_idx = np.split(samples_idx, sections_split)
253
+ return batch_idx
254
+
255
+
256
+ def write_train_metric(summary_writer, train_metrics, train_time, step):
257
+ summary_writer.scalar("train_time", train_time, step)
258
+
259
+ train_metrics = get_metrics(train_metrics)
260
+ for key, vals in train_metrics.items():
261
+ tag = f"train_{key}"
262
+ for i, val in enumerate(vals):
263
+ summary_writer.scalar(tag, val, step - len(vals) + i + 1)
264
+
265
+
266
+ def write_eval_metric(summary_writer, eval_metrics, step):
267
+ for metric_name, value in eval_metrics.items():
268
+ summary_writer.scalar(f"eval_{metric_name}", value, step)
269
+
270
+
271
+ if __name__ == "__main__":
272
+ # See all possible arguments in src/transformers/training_args.py
273
+ # or by passing the --help flag to this script.
274
+ # We now keep distinct sets of args, for a cleaner separation of concerns.
275
+
276
+ parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
277
+ if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
278
+ # If we pass only one argument to the script and it's the path to a json file,
279
+ # let's parse it to get our arguments.
280
+ model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
281
+ else:
282
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
283
+
284
+ if (
285
+ os.path.exists(training_args.output_dir)
286
+ and os.listdir(training_args.output_dir)
287
+ and training_args.do_train
288
+ and not training_args.overwrite_output_dir
289
+ ):
290
+ raise ValueError(
291
+ f"Output directory ({training_args.output_dir}) already exists and is not empty."
292
+ "Use --overwrite_output_dir to overcome."
293
+ )
294
+
295
+ # Setup logging
296
+ logging.basicConfig(
297
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
298
+ level="NOTSET",
299
+ datefmt="[%X]",
300
+ )
301
+
302
+ # Log on each process the small summary:
303
+ logger = logging.getLogger(__name__)
304
+
305
+ # Set the verbosity to info of the Transformers logger (on main process only):
306
+ logger.info(f"Training/evaluation parameters {training_args}")
307
+
308
+ # Set seed before initializing model.
309
+ set_seed(training_args.seed)
310
+
311
+ # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
312
+ # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
313
+ # (the dataset will be downloaded automatically from the datasets Hub).
314
+ #
315
+ # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
316
+ # 'text' is found. You can easily tweak this behavior (see below).
317
+ #
318
+ # In distributed training, the load_dataset function guarantees that only one local process can concurrently
319
+ # download the dataset.
320
+ if data_args.dataset_name is not None:
321
+ # Downloading and loading a dataset from the hub.
322
+ datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir)
323
+
324
+ if "validation" not in datasets.keys():
325
+ datasets["validation"] = load_dataset(
326
+ data_args.dataset_name,
327
+ data_args.dataset_config_name,
328
+ split=f"train[:{data_args.validation_split_percentage}%]",
329
+ cache_dir=model_args.cache_dir,
330
+ )
331
+ datasets["train"] = load_dataset(
332
+ data_args.dataset_name,
333
+ data_args.dataset_config_name,
334
+ split=f"train[{data_args.validation_split_percentage}%:]",
335
+ cache_dir=model_args.cache_dir,
336
+ )
337
+ else:
338
+ data_files = {}
339
+ if data_args.train_file is not None:
340
+ data_files["train"] = data_args.train_file
341
+ if data_args.validation_file is not None:
342
+ data_files["validation"] = data_args.validation_file
343
+ extension = data_args.train_file.split(".")[-1]
344
+ if extension == "txt":
345
+ extension = "text"
346
+ datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
347
+ # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
348
+ # https://huggingface.co/docs/datasets/loading_datasets.html.
349
+
350
+ # Load pretrained model and tokenizer
351
+
352
+ # Distributed training:
353
+ # The .from_pretrained methods guarantee that only one local process can concurrently
354
+ # download model & vocab.
355
+ if model_args.config_name:
356
+ config = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir)
357
+ elif model_args.model_name_or_path:
358
+ config = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir)
359
+ else:
360
+ config = CONFIG_MAPPING[model_args.model_type]()
361
+ logger.warning("You are instantiating a new config instance from scratch.")
362
+
363
+ if model_args.tokenizer_name:
364
+ tokenizer = AutoTokenizer.from_pretrained(
365
+ model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer
366
+ )
367
+ elif model_args.model_name_or_path:
368
+ tokenizer = AutoTokenizer.from_pretrained(
369
+ model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer
370
+ )
371
+ else:
372
+ raise ValueError(
373
+ "You are instantiating a new tokenizer from scratch. This is not supported by this script."
374
+ "You can do it from another script, save it, and load it from here, using --tokenizer_name."
375
+ )
376
+
377
+ # Preprocessing the datasets.
378
+ # First we tokenize all the texts.
379
+ if training_args.do_train:
380
+ column_names = datasets["train"].column_names
381
+ else:
382
+ column_names = datasets["validation"].column_names
383
+ text_column_name = "text" if "text" in column_names else column_names[0]
384
+
385
+ max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
386
+
387
+ if data_args.line_by_line:
388
+ # When using line_by_line, we just tokenize each nonempty line.
389
+ padding = "max_length" if data_args.pad_to_max_length else False
390
+
391
+ def tokenize_function(examples):
392
+ # Remove empty lines
393
+ examples = [line for line in examples if len(line) > 0 and not line.isspace()]
394
+ return tokenizer(
395
+ examples,
396
+ return_special_tokens_mask=True,
397
+ padding=padding,
398
+ truncation=True,
399
+ max_length=max_seq_length,
400
+ )
401
+
402
+ tokenized_datasets = datasets.map(
403
+ tokenize_function,
404
+ input_columns=[text_column_name],
405
+ batched=True,
406
+ num_proc=data_args.preprocessing_num_workers,
407
+ remove_columns=column_names,
408
+ load_from_cache_file=not data_args.overwrite_cache,
409
+ )
410
+
411
+ else:
412
+ # Otherwise, we tokenize every text, then concatenate them together before splitting them in smaller parts.
413
+ # We use `return_special_tokens_mask=True` because DataCollatorForLanguageModeling (see below) is more
414
+ # efficient when it receives the `special_tokens_mask`.
415
+ def tokenize_function(examples):
416
+ return tokenizer(examples[text_column_name], return_special_tokens_mask=True)
417
+
418
+ tokenized_datasets = datasets.map(
419
+ tokenize_function,
420
+ batched=True,
421
+ num_proc=data_args.preprocessing_num_workers,
422
+ remove_columns=column_names,
423
+ load_from_cache_file=not data_args.overwrite_cache,
424
+ )
425
+
426
+ # Main data processing function that will concatenate all texts from our dataset and generate chunks of
427
+ # max_seq_length.
428
+ def group_texts(examples):
429
+ # Concatenate all texts.
430
+ concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
431
+ total_length = len(concatenated_examples[list(examples.keys())[0]])
432
+ # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
433
+ # customize this part to your needs.
434
+ if total_length >= max_seq_length:
435
+ total_length = (total_length // max_seq_length) * max_seq_length
436
+ # Split by chunks of max_len.
437
+ result = {
438
+ k: [t[i : i + max_seq_length] for i in range(0, total_length, max_seq_length)]
439
+ for k, t in concatenated_examples.items()
440
+ }
441
+ return result
442
+
443
+ # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a
444
+ # remainder for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value
445
+ # might be slower to preprocess.
446
+ #
447
+ # To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
448
+ # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
449
+ tokenized_datasets = tokenized_datasets.map(
450
+ group_texts,
451
+ batched=True,
452
+ num_proc=data_args.preprocessing_num_workers,
453
+ load_from_cache_file=not data_args.overwrite_cache,
454
+ )
455
+
456
+ # Enable tensorboard only on the master node
457
+ has_tensorboard = is_tensorboard_available()
458
+ if has_tensorboard and jax.process_index() == 0:
459
+ import wandb
460
+
461
+ wandb.init(
462
+ entity='nipunsadvilkar',
463
+ project='hf-flax-robert-base-mr',
464
+ sync_tensorboard=True
465
+ )
466
+
467
+ wandb.config.update(training_args) # optional, log your configs
468
+ wandb.config.update(model_args) # optional, log your configs
469
+ wandb.config.update(data_args) # optional, log your configs
470
+ try:
471
+ from flax.metrics.tensorboard import SummaryWriter
472
+
473
+ summary_writer = SummaryWriter(log_dir=Path(training_args.output_dir))
474
+ except ImportError as ie:
475
+ has_tensorboard = False
476
+ logger.warning(
477
+ f"Unable to display metrics through TensorBoard because some package are not installed: {ie}"
478
+ )
479
+ else:
480
+ logger.warning(
481
+ "Unable to display metrics through TensorBoard because the package is not installed: "
482
+ "Please run pip install tensorboard to enable."
483
+ )
484
+
485
+ # Data collator
486
+ # This one will take care of randomly masking the tokens.
487
+ data_collator = FlaxDataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=data_args.mlm_probability)
488
+
489
+ # Initialize our training
490
+ rng = jax.random.PRNGKey(training_args.seed)
491
+ dropout_rngs = jax.random.split(rng, jax.local_device_count())
492
+
493
+ if model_args.model_name_or_path:
494
+ model = FlaxAutoModelForMaskedLM.from_pretrained(
495
+ model_args.model_name_or_path, config=config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype)
496
+ )
497
+ else:
498
+ model = FlaxAutoModelForMaskedLM.from_config(
499
+ config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype)
500
+ )
501
+
502
+ # Store some constant
503
+ num_epochs = int(training_args.num_train_epochs)
504
+ train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count()
505
+ eval_batch_size = int(training_args.per_device_eval_batch_size) * jax.device_count()
506
+
507
+ num_train_steps = len(tokenized_datasets["train"]) // train_batch_size * num_epochs
508
+
509
+ # Create learning rate schedule
510
+ warmup_fn = optax.linear_schedule(
511
+ init_value=0.0, end_value=training_args.learning_rate, transition_steps=training_args.warmup_steps
512
+ )
513
+ decay_fn = optax.linear_schedule(
514
+ init_value=training_args.learning_rate,
515
+ end_value=0,
516
+ transition_steps=num_train_steps - training_args.warmup_steps,
517
+ )
518
+ linear_decay_lr_schedule_fn = optax.join_schedules(
519
+ schedules=[warmup_fn, decay_fn], boundaries=[training_args.warmup_steps]
520
+ )
521
+
522
+ # We use Optax's "masking" functionality to not apply weight decay
523
+ # to bias and LayerNorm scale parameters. decay_mask_fn returns a
524
+ # mask boolean with the same structure as the parameters.
525
+ # The mask is True for parameters that should be decayed.
526
+ # Note that this mask is specifically adapted for FlaxBERT-like models.
527
+ # For other models, one should correct the layer norm parameter naming
528
+ # accordingly.
529
+ def decay_mask_fn(params):
530
+ flat_params = traverse_util.flatten_dict(params)
531
+ flat_mask = {path: (path[-1] != "bias" and path[-2:] != ("LayerNorm", "scale")) for path in flat_params}
532
+ return traverse_util.unflatten_dict(flat_mask)
533
+
534
+ # create adam optimizer
535
+ if training_args.adafactor:
536
+ # We use the default parameters here to initialize adafactor,
537
+ # For more details about the parameters please check https://github.com/deepmind/optax/blob/ed02befef9bf81cbbf236be3d2b0e032e9ed4a40/optax/_src/alias.py#L74
538
+ optimizer = optax.adafactor(
539
+ learning_rate=linear_decay_lr_schedule_fn,
540
+ )
541
+ else:
542
+ optimizer = optax.adamw(
543
+ learning_rate=linear_decay_lr_schedule_fn,
544
+ b1=training_args.adam_beta1,
545
+ b2=training_args.adam_beta2,
546
+ eps=training_args.adam_epsilon,
547
+ weight_decay=training_args.weight_decay,
548
+ mask=decay_mask_fn,
549
+ )
550
+
551
+ # Setup train state
552
+ state = train_state.TrainState.create(apply_fn=model.__call__, params=model.params, tx=optimizer)
553
+
554
+ # Define gradient update step fn
555
+ def train_step(state, batch, dropout_rng):
556
+ dropout_rng, new_dropout_rng = jax.random.split(dropout_rng)
557
+
558
+ def loss_fn(params):
559
+ labels = batch.pop("labels")
560
+
561
+ logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0]
562
+
563
+ # compute loss, ignore padded input tokens
564
+ label_mask = jnp.where(labels > 0, 1.0, 0.0)
565
+ loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])) * label_mask
566
+
567
+ # take average
568
+ loss = loss.sum() / label_mask.sum()
569
+
570
+ return loss
571
+
572
+ grad_fn = jax.value_and_grad(loss_fn)
573
+ loss, grad = grad_fn(state.params)
574
+ grad = jax.lax.pmean(grad, "batch")
575
+ new_state = state.apply_gradients(grads=grad)
576
+
577
+ metrics = jax.lax.pmean(
578
+ {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)}, axis_name="batch"
579
+ )
580
+
581
+ return new_state, metrics, new_dropout_rng
582
+
583
+ # Create parallel version of the train step
584
+ p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0,))
585
+
586
+ # Define eval fn
587
+ def eval_step(params, batch):
588
+ labels = batch.pop("labels")
589
+
590
+ logits = model(**batch, params=params, train=False)[0]
591
+
592
+ # compute loss, ignore padded input tokens
593
+ label_mask = jnp.where(labels > 0, 1.0, 0.0)
594
+ loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])) * label_mask
595
+
596
+ # compute accuracy
597
+ accuracy = jnp.equal(jnp.argmax(logits, axis=-1), labels) * label_mask
598
+
599
+ # summarize metrics
600
+ metrics = {"loss": loss.sum(), "accuracy": accuracy.sum(), "normalizer": label_mask.sum()}
601
+ metrics = jax.lax.psum(metrics, axis_name="batch")
602
+
603
+ return metrics
604
+
605
+ p_eval_step = jax.pmap(eval_step, "batch", donate_argnums=(0,))
606
+
607
+ # Replicate the train state on each device
608
+ state = jax_utils.replicate(state)
609
+
610
+ train_time = 0
611
+ epochs = tqdm(range(num_epochs), desc=f"Epoch ... (1/{num_epochs})", position=0)
612
+ for epoch in epochs:
613
+ # ======================== Training ================================
614
+ train_start = time.time()
615
+ train_metrics = []
616
+
617
+ # Create sampling rng
618
+ rng, input_rng = jax.random.split(rng)
619
+
620
+ # Generate an epoch by shuffling sampling indices from the train dataset
621
+ num_train_samples = len(tokenized_datasets["train"])
622
+ train_samples_idx = jax.random.permutation(input_rng, jnp.arange(num_train_samples))
623
+ train_batch_idx = generate_batch_splits(train_samples_idx, train_batch_size)
624
+
625
+ # Gather the indexes for creating the batch and do a training step
626
+ for step, batch_idx in enumerate(tqdm(train_batch_idx, desc="Training...", position=1)):
627
+ samples = [tokenized_datasets["train"][int(idx)] for idx in batch_idx]
628
+ model_inputs = data_collator(samples, pad_to_multiple_of=16)
629
+
630
+ # Model forward
631
+ model_inputs = shard(model_inputs.data)
632
+ state, train_metric, dropout_rngs = p_train_step(state, model_inputs, dropout_rngs)
633
+ train_metrics.append(train_metric)
634
+
635
+ cur_step = epoch * (num_train_samples // train_batch_size) + step
636
+
637
+ if cur_step % training_args.logging_steps == 0 and cur_step > 0:
638
+ # Save metrics
639
+ train_metric = jax_utils.unreplicate(train_metric)
640
+ train_time += time.time() - train_start
641
+ if has_tensorboard and jax.process_index() == 0:
642
+ write_train_metric(summary_writer, train_metrics, train_time, cur_step)
643
+
644
+ epochs.write(
645
+ f"Step... ({cur_step} | Loss: {train_metric['loss']}, Learning Rate: {train_metric['learning_rate']})"
646
+ )
647
+
648
+ train_metrics = []
649
+
650
+ if cur_step % training_args.eval_steps == 0 and cur_step > 0:
651
+ # ======================== Evaluating ==============================
652
+ num_eval_samples = len(tokenized_datasets["validation"])
653
+ eval_samples_idx = jnp.arange(num_eval_samples)
654
+ eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size)
655
+
656
+ eval_metrics = []
657
+ for i, batch_idx in enumerate(tqdm(eval_batch_idx, desc="Evaluating ...", position=2)):
658
+ samples = [tokenized_datasets["validation"][int(idx)] for idx in batch_idx]
659
+ model_inputs = data_collator(samples, pad_to_multiple_of=16)
660
+
661
+ # Model forward
662
+ model_inputs = shard(model_inputs.data)
663
+ metrics = p_eval_step(state.params, model_inputs)
664
+ eval_metrics.append(metrics)
665
+
666
+ # normalize eval metrics
667
+ eval_metrics = get_metrics(eval_metrics)
668
+ eval_metrics = jax.tree_map(jnp.sum, eval_metrics)
669
+ eval_normalizer = eval_metrics.pop("normalizer")
670
+ eval_metrics = jax.tree_map(lambda x: x / eval_normalizer, eval_metrics)
671
+
672
+ # Update progress bar
673
+ epochs.desc = f"Step... ({cur_step} | Loss: {eval_metrics['loss']}, Acc: {eval_metrics['accuracy']})"
674
+
675
+ # Save metrics
676
+ if has_tensorboard and jax.process_index() == 0:
677
+ write_eval_metric(summary_writer, eval_metrics, cur_step)
678
+
679
+ if cur_step % training_args.save_steps == 0 and cur_step > 0:
680
+ # save checkpoint after each epoch and push checkpoint to the hub
681
+ if jax.process_index() == 0:
682
+ params = jax.device_get(jax.tree_map(lambda x: x[0], state.params))
683
+ model.save_pretrained(
684
+ training_args.output_dir,
685
+ params=params,
686
+ push_to_hub=training_args.push_to_hub,
687
+ commit_message=f"Saving weights and logs of step {cur_step}",
688
+ )
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff