better configuration for quadratic warmup
Browse files- src/axolotl/utils/trainer.py +27 -5
src/axolotl/utils/trainer.py
CHANGED
@@ -5,6 +5,7 @@ import logging
|
|
5 |
import math
|
6 |
import os
|
7 |
import sys
|
|
|
8 |
from pathlib import Path
|
9 |
from typing import Optional
|
10 |
|
@@ -13,7 +14,7 @@ import torch.cuda
|
|
13 |
import transformers
|
14 |
from torch import nn
|
15 |
from torch.optim.lr_scheduler import OneCycleLR
|
16 |
-
from transformers import EarlyStoppingCallback, Trainer
|
17 |
from transformers.trainer_pt_utils import get_parameter_names
|
18 |
|
19 |
from axolotl.utils.callbacks import SavePeftModelCallback
|
@@ -23,11 +24,24 @@ from axolotl.utils.schedulers import (
|
|
23 |
)
|
24 |
|
25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
class AxolotlTrainer(Trainer):
|
27 |
"""
|
28 |
Extend the base Trainer for axolotl helpers
|
29 |
"""
|
30 |
|
|
|
|
|
31 |
def create_scheduler(
|
32 |
self, num_training_steps: int, optimizer: torch.optim.Optimizer = None
|
33 |
):
|
@@ -37,11 +51,16 @@ class AxolotlTrainer(Trainer):
|
|
37 |
|
38 |
Args:
|
39 |
num_training_steps (int): The number of training steps to do.
|
|
|
40 |
"""
|
41 |
|
42 |
-
|
43 |
-
|
44 |
-
|
|
|
|
|
|
|
|
|
45 |
self.lr_scheduler = get_cosine_schedule_with_quadratic_warmup( # pylint: disable=attribute-defined-outside-init
|
46 |
optimizer,
|
47 |
num_warmup_steps=self.args.get_warmup_steps(num_training_steps),
|
@@ -132,6 +151,9 @@ def setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer):
|
|
132 |
if cfg.fsdp_config:
|
133 |
training_arguments_kwargs["fsdp_config"] = dict(cfg.fsdp_config)
|
134 |
|
|
|
|
|
|
|
135 |
# deepspeed
|
136 |
if (
|
137 |
os.environ.get("ACCELERATE_USE_DEEPSPEED") == "true"
|
@@ -144,7 +166,7 @@ def setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer):
|
|
144 |
# TODO search Path("./") for one
|
145 |
training_arguments_kwargs["deepspeed"] = "./ds_config.json"
|
146 |
|
147 |
-
training_args =
|
148 |
per_device_train_batch_size=cfg.micro_batch_size,
|
149 |
per_device_eval_batch_size=cfg.eval_batch_size
|
150 |
if cfg.eval_batch_size is not None
|
|
|
5 |
import math
|
6 |
import os
|
7 |
import sys
|
8 |
+
from dataclasses import field
|
9 |
from pathlib import Path
|
10 |
from typing import Optional
|
11 |
|
|
|
14 |
import transformers
|
15 |
from torch import nn
|
16 |
from torch.optim.lr_scheduler import OneCycleLR
|
17 |
+
from transformers import EarlyStoppingCallback, Trainer, TrainingArguments
|
18 |
from transformers.trainer_pt_utils import get_parameter_names
|
19 |
|
20 |
from axolotl.utils.callbacks import SavePeftModelCallback
|
|
|
24 |
)
|
25 |
|
26 |
|
27 |
+
class AxolotlTrainingArguments(TrainingArguments):
|
28 |
+
"""
|
29 |
+
Extend the base TrainingArguments for axolotl helpers
|
30 |
+
"""
|
31 |
+
|
32 |
+
lr_quadratic_warmup: bool = field(
|
33 |
+
default=False,
|
34 |
+
metadata={"help": "Use quadratic warmup for cosine scheduling."},
|
35 |
+
)
|
36 |
+
|
37 |
+
|
38 |
class AxolotlTrainer(Trainer):
|
39 |
"""
|
40 |
Extend the base Trainer for axolotl helpers
|
41 |
"""
|
42 |
|
43 |
+
args = None # type: AxolotlTrainingArguments
|
44 |
+
|
45 |
def create_scheduler(
|
46 |
self, num_training_steps: int, optimizer: torch.optim.Optimizer = None
|
47 |
):
|
|
|
51 |
|
52 |
Args:
|
53 |
num_training_steps (int): The number of training steps to do.
|
54 |
+
optimizer (torch.optim.Optimizer): The training optimizer
|
55 |
"""
|
56 |
|
57 |
+
# fmt: off
|
58 |
+
if self.lr_scheduler is None: # type: ignore # pylint: disable=access-member-before-definition
|
59 |
+
# fmt: on
|
60 |
+
if (
|
61 |
+
self.args.lr_scheduler_type == "cosine"
|
62 |
+
and self.args.lr_quadratic_warmup is True
|
63 |
+
):
|
64 |
self.lr_scheduler = get_cosine_schedule_with_quadratic_warmup( # pylint: disable=attribute-defined-outside-init
|
65 |
optimizer,
|
66 |
num_warmup_steps=self.args.get_warmup_steps(num_training_steps),
|
|
|
151 |
if cfg.fsdp_config:
|
152 |
training_arguments_kwargs["fsdp_config"] = dict(cfg.fsdp_config)
|
153 |
|
154 |
+
if cfg.lr_quadratic_warmup is not None:
|
155 |
+
training_arguments_kwargs["lr_quadratic_warmup"] = cfg.lr_quadratic_warmup
|
156 |
+
|
157 |
# deepspeed
|
158 |
if (
|
159 |
os.environ.get("ACCELERATE_USE_DEEPSPEED") == "true"
|
|
|
166 |
# TODO search Path("./") for one
|
167 |
training_arguments_kwargs["deepspeed"] = "./ds_config.json"
|
168 |
|
169 |
+
training_args = AxolotlTrainingArguments(
|
170 |
per_device_train_batch_size=cfg.micro_batch_size,
|
171 |
per_device_eval_batch_size=cfg.eval_batch_size
|
172 |
if cfg.eval_batch_size is not None
|