|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import dataclasses |
|
import os |
|
import sys |
|
from dataclasses import dataclass, field |
|
from typing import Any, Dict, List, NewType, Optional, Tuple |
|
|
|
import transformers |
|
from transformers import MODEL_FOR_CAUSAL_LM_MAPPING, HfArgumentParser |
|
|
|
|
|
MODEL_CONFIG_CLASSES = list(MODEL_FOR_CAUSAL_LM_MAPPING.keys()) |
|
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) |
|
|
|
|
|
DataClassType = NewType("DataClassType", Any) |
|
|
|
|
|
class H4ArgumentParser(HfArgumentParser): |
|
def parse_yaml_and_args(self, yaml_arg: str, other_args: Optional[List[str]] = None) -> List[dataclass]: |
|
""" |
|
Parse a YAML file and overwrite the default/loaded values with the values provided to the command line. |
|
|
|
Args: |
|
yaml_arg (`str`): |
|
The path to the config file used |
|
other_args (`List[str]`, *optional`): |
|
A list of strings to parse as command line arguments, e.g. ['--arg=val', '--arg2=val2']. |
|
|
|
Returns: |
|
[`List[dataclass]`]: a list of dataclasses with the values from the YAML file and the command line |
|
""" |
|
arg_list = self.parse_yaml_file(os.path.abspath(yaml_arg)) |
|
|
|
outputs = [] |
|
|
|
other_args = {arg.split("=")[0].strip("-"): arg.split("=")[1] for arg in other_args} |
|
used_args = {} |
|
|
|
|
|
|
|
for data_yaml, data_class in zip(arg_list, self.dataclass_types): |
|
keys = {f.name for f in dataclasses.fields(data_yaml) if f.init} |
|
inputs = {k: v for k, v in vars(data_yaml).items() if k in keys} |
|
for arg, val in other_args.items(): |
|
|
|
if arg in keys: |
|
base_type = data_yaml.__dataclass_fields__[arg].type |
|
inputs[arg] = val |
|
|
|
|
|
if base_type in [int, float]: |
|
inputs[arg] = base_type(val) |
|
|
|
if base_type == List[str]: |
|
inputs[arg] = [str(v) for v in val.split(",")] |
|
|
|
|
|
if base_type == bool: |
|
if val in ["true", "True"]: |
|
inputs[arg] = True |
|
else: |
|
inputs[arg] = False |
|
|
|
|
|
if arg not in used_args: |
|
used_args[arg] = val |
|
else: |
|
raise ValueError(f"Duplicate argument provided: {arg}, may cause unexpected behavior") |
|
|
|
obj = data_class(**inputs) |
|
outputs.append(obj) |
|
|
|
return outputs |
|
|
|
def parse(self) -> DataClassType | Tuple[DataClassType]: |
|
if len(sys.argv) == 2 and sys.argv[1].endswith(".yaml"): |
|
|
|
|
|
output = self.parse_yaml_file(os.path.abspath(sys.argv[1])) |
|
|
|
elif len(sys.argv) > 2 and sys.argv[1].endswith(".yaml"): |
|
output = self.parse_yaml_and_args(os.path.abspath(sys.argv[1]), sys.argv[2:]) |
|
|
|
else: |
|
output = self.parse_args_into_dataclasses() |
|
|
|
if len(output) == 1: |
|
output = output[0] |
|
return output |
|
|
|
|
|
@dataclass |
|
class ModelArguments: |
|
""" |
|
Arguments pertaining to which model/config/tokenizer we are going to fine-tune. |
|
""" |
|
|
|
base_model_revision: Optional[str] = field( |
|
default=None, |
|
metadata={"help": ("The base model checkpoint for weights initialization with PEFT adatpers.")}, |
|
) |
|
model_name_or_path: Optional[str] = field( |
|
default=None, |
|
metadata={ |
|
"help": ( |
|
"The model checkpoint for weights initialization. Don't set if you want to train a model from scratch." |
|
) |
|
}, |
|
) |
|
model_revision: str = field( |
|
default="main", |
|
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, |
|
) |
|
model_code_revision: str = field(default=None, metadata={"help": "The branch of the IFT model"}) |
|
torch_dtype: Optional[str] = field( |
|
default=None, |
|
metadata={ |
|
"help": ( |
|
"Override the default `torch.dtype` and load the model under this dtype. If `auto` is passed, the " |
|
"dtype will be automatically derived from the model's weights." |
|
), |
|
"choices": ["auto", "bfloat16", "float16", "float32"], |
|
}, |
|
) |
|
trust_remote_code: bool = field(default=False, metadata={"help": "Trust remote code when loading a model."}) |
|
use_flash_attention_2: bool = field( |
|
default=False, |
|
metadata={ |
|
"help": ( |
|
"Whether to use flash attention 2. You must install this manually by running `pip install flash-attn --no-build-isolation`" |
|
) |
|
}, |
|
) |
|
use_peft: bool = field( |
|
default=False, |
|
metadata={"help": ("Whether to use PEFT or not for training.")}, |
|
) |
|
lora_r: Optional[int] = field( |
|
default=16, |
|
metadata={"help": ("LoRA R value.")}, |
|
) |
|
lora_alpha: Optional[int] = field( |
|
default=32, |
|
metadata={"help": ("LoRA alpha.")}, |
|
) |
|
lora_dropout: Optional[float] = field( |
|
default=0.05, |
|
metadata={"help": ("LoRA dropout.")}, |
|
) |
|
lora_target_modules: Optional[List[str]] = field( |
|
default=None, |
|
metadata={"help": ("LoRA target modules.")}, |
|
) |
|
lora_modules_to_save: Optional[List[str]] = field( |
|
default=None, |
|
metadata={"help": ("Model layers to unfreeze & train")}, |
|
) |
|
load_in_8bit: bool = field(default=False, metadata={"help": "use 8 bit precision"}) |
|
load_in_4bit: bool = field(default=False, metadata={"help": "use 4 bit precision"}) |
|
|
|
bnb_4bit_quant_type: Optional[str] = field( |
|
default="nf4", metadata={"help": "precise the quantization type (fp4 or nf4)"} |
|
) |
|
use_bnb_nested_quant: bool = field(default=False, metadata={"help": "use nested quantization"}) |
|
|
|
def __post_init__(self): |
|
if self.load_in_8bit and self.load_in_4bit: |
|
raise ValueError("You can't use 8 bit and 4 bit precision at the same time") |
|
|
|
|
|
@dataclass |
|
class DataArguments: |
|
""" |
|
Arguments pertaining to what data we are going to input our model for training and eval. |
|
""" |
|
|
|
chat_template: Optional[str] = field(default=None, metadata={"help": "The chat template to use."}) |
|
dataset_mixer: Optional[Dict[str, float]] = field( |
|
default=None, |
|
metadata={"help": ("Datasets and their proportions to be used for training ift/rl.")}, |
|
) |
|
dataset_splits: Optional[List[str]] = field( |
|
default_factory=lambda: ["train", "test"], |
|
metadata={"help": ("List of train test splits to use in the dataset")}, |
|
) |
|
preprocessing_num_workers: Optional[int] = field( |
|
default=None, |
|
metadata={"help": "The number of processes to use for the preprocessing."}, |
|
) |
|
truncation_side: Optional[str] = field( |
|
default=None, metadata={"help": "Truncation side to use for the tokenizer."} |
|
) |
|
|
|
|
|
@dataclass |
|
class SFTConfig(transformers.TrainingArguments): |
|
""" |
|
Arguments related to the training process itself. For all parameters, see: https://huggingface.co/docs/transformers/v4.26.1/en/main_classes/trainer#transformers.TrainingArguments |
|
""" |
|
|
|
max_seq_length: Optional[int] = field( |
|
default=None, |
|
metadata={"help": ("Used by TRL for reward model training, which tries to read this parameter in init.")}, |
|
) |
|
logging_first_step: bool = field( |
|
default=True, |
|
metadata={"help": ("Whether to log and evaluate the first global_step or not.")}, |
|
) |
|
optim: Optional[str] = field(default="adamw_torch") |
|
|
|
|
|
@dataclass |
|
class DPOConfig(transformers.TrainingArguments): |
|
""" |
|
Arguments related to the DPO training process itself. For all parameters, see: https://huggingface.co/docs/transformers/v4.26.1/en/main_classes/trainer#transformers.TrainingArguments |
|
""" |
|
|
|
beta: Optional[float] = field( |
|
default=0.1, |
|
metadata={"help": "The beta factor in DPO loss. Higher beta means less divergence from the initial policy."}, |
|
) |
|
hub_model_revision: Optional[str] = field( |
|
default="main", |
|
metadata={"help": ("The Hub model branch to push the model to.")}, |
|
) |
|
logging_first_step: bool = field( |
|
default=True, |
|
metadata={"help": ("Whether to log and evaluate the first global_step or not.")}, |
|
) |
|
max_prompt_length: Optional[int] = field( |
|
default=None, |
|
metadata={"help": ("For DPO, the maximum length of the prompt to use for conditioning the model.")}, |
|
) |
|
max_length: Optional[int] = field( |
|
default=None, |
|
metadata={"help": ("Used by TRL for reward model training, which tries to read this parameter in init.")}, |
|
) |
|
optim: Optional[str] = field(default="rmsprop") |
|
remove_unused_columns: bool = field(default=False) |
|
loss_type: Optional[str] = field(default="sigmoid", metadata={"help": ("The loss type for DPO.")}) |
|
|