| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| import argparse |
| import os |
|
|
| import torch |
| from torch.optim import AdamW |
| from torch.utils.data import DataLoader |
|
|
| import evaluate |
| from accelerate import Accelerator, DistributedType |
| from datasets import load_dataset |
| from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed |
|
|
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
|
|
| MAX_GPU_BATCH_SIZE = 16 |
| EVAL_BATCH_SIZE = 32 |
|
|
|
|
| def training_function(config, args): |
| |
| if args.with_tracking: |
| accelerator = Accelerator( |
| cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", logging_dir=args.logging_dir |
| ) |
| else: |
| accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision) |
|
|
| if hasattr(args.checkpointing_steps, "isdigit"): |
| if args.checkpointing_steps == "epoch": |
| checkpointing_steps = args.checkpointing_steps |
| elif args.checkpointing_steps.isdigit(): |
| checkpointing_steps = int(args.checkpointing_steps) |
| else: |
| raise ValueError( |
| f"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed." |
| ) |
| else: |
| checkpointing_steps = None |
| |
| lr = config["lr"] |
| num_epochs = int(config["num_epochs"]) |
| seed = int(config["seed"]) |
| batch_size = int(config["batch_size"]) |
|
|
| |
| if args.with_tracking: |
| run = os.path.split(__file__)[-1].split(".")[0] |
| accelerator.init_trackers(run, config) |
|
|
| tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") |
| datasets = load_dataset("glue", "mrpc") |
| metric = evaluate.load("glue", "mrpc") |
|
|
| def tokenize_function(examples): |
| |
| outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) |
| return outputs |
|
|
| |
| |
| with accelerator.main_process_first(): |
| tokenized_datasets = datasets.map( |
| tokenize_function, |
| batched=True, |
| remove_columns=["idx", "sentence1", "sentence2"], |
| ) |
|
|
| |
| |
| tokenized_datasets = tokenized_datasets.rename_column("label", "labels") |
|
|
| |
| gradient_accumulation_steps = 1 |
| if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: |
| gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE |
| batch_size = MAX_GPU_BATCH_SIZE |
|
|
| def collate_fn(examples): |
| |
| if accelerator.distributed_type == DistributedType.TPU: |
| return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") |
| return tokenizer.pad(examples, padding="longest", return_tensors="pt") |
|
|
| |
| train_dataloader = DataLoader( |
| tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size |
| ) |
| eval_dataloader = DataLoader( |
| tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE |
| ) |
|
|
| set_seed(seed) |
|
|
| |
| model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True) |
|
|
| |
| |
| |
| model = model.to(accelerator.device) |
|
|
| |
| optimizer = AdamW(params=model.parameters(), lr=lr) |
|
|
| |
| lr_scheduler = get_linear_schedule_with_warmup( |
| optimizer=optimizer, |
| num_warmup_steps=100, |
| num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps, |
| ) |
|
|
| |
| |
| |
| model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( |
| model, optimizer, train_dataloader, eval_dataloader, lr_scheduler |
| ) |
|
|
| |
| overall_step = 0 |
| |
| starting_epoch = 0 |
|
|
| |
| if args.resume_from_checkpoint: |
| if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": |
| accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}") |
| accelerator.load_state(args.resume_from_checkpoint) |
| path = os.path.basename(args.resume_from_checkpoint) |
| else: |
| |
| dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()] |
| dirs.sort(key=os.path.getctime) |
| path = dirs[-1] |
| |
| training_difference = os.path.splitext(path)[0] |
|
|
| if "epoch" in training_difference: |
| starting_epoch = int(training_difference.replace("epoch_", "")) + 1 |
| resume_step = None |
| else: |
| resume_step = int(training_difference.replace("step_", "")) |
| starting_epoch = resume_step // len(train_dataloader) |
| resume_step -= starting_epoch * len(train_dataloader) |
|
|
| |
| for epoch in range(starting_epoch, num_epochs): |
| model.train() |
| if args.with_tracking: |
| total_loss = 0 |
| for step, batch in enumerate(train_dataloader): |
| |
| if args.resume_from_checkpoint and epoch == starting_epoch: |
| if resume_step is not None and step < resume_step: |
| overall_step += 1 |
| continue |
| |
| batch.to(accelerator.device) |
| outputs = model(**batch) |
| loss = outputs.loss |
| loss = loss / gradient_accumulation_steps |
| |
| if args.with_tracking: |
| total_loss += loss.detach().float() |
| accelerator.backward(loss) |
| if step % gradient_accumulation_steps == 0: |
| optimizer.step() |
| lr_scheduler.step() |
| optimizer.zero_grad() |
|
|
| overall_step += 1 |
|
|
| if isinstance(checkpointing_steps, int): |
| output_dir = f"step_{overall_step}" |
| if overall_step % checkpointing_steps == 0: |
| if args.output_dir is not None: |
| output_dir = os.path.join(args.output_dir, output_dir) |
| accelerator.save_state(output_dir) |
|
|
| model.eval() |
| for step, batch in enumerate(eval_dataloader): |
| |
| batch.to(accelerator.device) |
| with torch.no_grad(): |
| outputs = model(**batch) |
| predictions = outputs.logits.argmax(dim=-1) |
| predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"])) |
| metric.add_batch( |
| predictions=predictions, |
| references=references, |
| ) |
|
|
| eval_metric = metric.compute() |
| |
| accelerator.print(f"epoch {epoch}:", eval_metric) |
| if args.with_tracking: |
| accelerator.log( |
| { |
| "accuracy": eval_metric["accuracy"], |
| "f1": eval_metric["f1"], |
| "train_loss": total_loss.item() / len(train_dataloader), |
| "epoch": epoch, |
| }, |
| step=epoch, |
| ) |
|
|
| if checkpointing_steps == "epoch": |
| output_dir = f"epoch_{epoch}" |
| if args.output_dir is not None: |
| output_dir = os.path.join(args.output_dir, output_dir) |
| accelerator.save_state(output_dir) |
|
|
| if args.with_tracking: |
| accelerator.end_training() |
|
|
|
|
| def main(): |
| parser = argparse.ArgumentParser(description="Simple example of training script.") |
| parser.add_argument( |
| "--mixed_precision", |
| type=str, |
| default="no", |
| choices=["no", "fp16", "bf16"], |
| help="Whether to use mixed precision. Choose" |
| "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." |
| "and an Nvidia Ampere GPU.", |
| ) |
| parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") |
| parser.add_argument( |
| "--checkpointing_steps", |
| type=str, |
| default=None, |
| help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", |
| ) |
| parser.add_argument( |
| "--resume_from_checkpoint", |
| type=str, |
| default=None, |
| help="If the training should continue from a checkpoint folder.", |
| ) |
| parser.add_argument( |
| "--with_tracking", |
| action="store_true", |
| help="Whether to load in all available experiment trackers from the environment and use them for logging.", |
| ) |
| parser.add_argument( |
| "--output_dir", |
| type=str, |
| default=".", |
| help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", |
| ) |
| parser.add_argument( |
| "--logging_dir", |
| type=str, |
| default="logs", |
| help="Location on where to store experiment tracking logs`", |
| ) |
| args = parser.parse_args() |
| config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} |
| training_function(config, args) |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|