# coding=utf-8 # coding=utf-8 # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import dataclasses from dataclasses import dataclass, field from typing import Any, Dict, List, NewType, Optional, Tuple @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune. """ base_model_revision: Optional[str] = field( default=None, metadata={"help": ("The base model checkpoint for weights initialization with PEFT adatpers.")}, ) model_name_or_path: Optional[str] = field( default=None, metadata={ "help": ( "The model checkpoint for weights initialization. Don't set if you want to train a model from scratch." ) }, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) model_code_revision: str = field(default=None, metadata={"help": "The branch of the IFT model"}) torch_dtype: Optional[str] = field( default=None, metadata={ "help": ( "Override the default `torch.dtype` and load the model under this dtype. If `auto` is passed, the " "dtype will be automatically derived from the model's weights." ), "choices": ["auto", "bfloat16", "float16", "float32"], }, ) trust_remote_code: bool = field(default=False, metadata={"help": "Trust remote code when loading a model."}) use_flash_attention_2: bool = field( default=False, metadata={ "help": ( "Whether to use flash attention 2. You must install this manually by running `pip install flash-attn --no-build-isolation`" ) }, ) use_peft: bool = field( default=False, metadata={"help": ("Whether to use PEFT or not for training.")}, ) lora_r: Optional[int] = field( default=16, metadata={"help": ("LoRA R value.")}, ) lora_alpha: Optional[int] = field( default=32, metadata={"help": ("LoRA alpha.")}, ) lora_dropout: Optional[float] = field( default=0.05, metadata={"help": ("LoRA dropout.")}, ) lora_target_modules: Optional[List[str]] = field( default=None, metadata={"help": ("LoRA target modules.")}, ) lora_modules_to_save: Optional[List[str]] = field( default=None, metadata={"help": ("Model layers to unfreeze & train")}, ) load_in_8bit: bool = field(default=False, metadata={"help": "use 8 bit precision"}) load_in_4bit: bool = field(default=False, metadata={"help": "use 4 bit precision"}) bnb_4bit_quant_type: Optional[str] = field( default="nf4", metadata={"help": "precise the quantization type (fp4 or nf4)"} ) use_bnb_nested_quant: bool = field(default=False, metadata={"help": "use nested quantization"}) def __post_init__(self): if self.load_in_8bit and self.load_in_4bit: raise ValueError("You can't use 8 bit and 4 bit precision at the same time") @dataclass class DataArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ chat_template: Optional[str] = field(default=None, metadata={"help": "The chat template to use."}) dataset_mixer: Optional[Dict[str, float]] = field( default=None, metadata={"help": ("Datasets and their proportions to be used for training ift/rl.")}, ) dataset_splits: Optional[List[str]] = field( default_factory=lambda: ["train", "test"], metadata={"help": ("List of train test splits to use in the dataset")}, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) truncation_side: Optional[str] = field( default=None, metadata={"help": "Truncation side to use for the tokenizer."} )