|
import sys |
|
import logging |
|
|
|
import datasets |
|
from datasets import load_dataset |
|
import torch |
|
import transformers |
|
from trl import SFTTrainer |
|
from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, BitsAndBytesConfig |
|
from typing import Dict, List |
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
""" |
|
# multi-gpu training |
|
CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 accelerate launch --gradient_clipping=1.0 --multi_gpu --num_processes=8 --num_machines=1 --mixed_precision=bf16 --zero_stage=3 sft_fast.py |
|
# single-gpu training |
|
CUDA_VISIBLE_DEVICES=0 accelerate launch --gradient_clipping=1.0 --mixed_precision=bf16 --zero_stage=3 sft.py |
|
# use tmux to train it in the background |
|
tmux new -d -s training "CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 accelerate launch --gradient_clipping=1.0 --multi_gpu --num_processes=8 --num_machines=1 --mixed_precision=bf16 --zero_stage=3 sft.py" |
|
""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
training_config = { |
|
"bf16": True, |
|
"do_eval": False, |
|
"learning_rate": 5e-06, |
|
"log_level": "info", |
|
"logging_steps": 20, |
|
"logging_strategy": "steps", |
|
"lr_scheduler_type": "cosine", |
|
"num_train_epochs": 3.0, |
|
"max_steps": -1, |
|
"output_dir": "./share_gpt_sft", |
|
"overwrite_output_dir": True, |
|
"per_device_eval_batch_size": 1, |
|
"per_device_train_batch_size": 1, |
|
"remove_unused_columns": True, |
|
"save_steps": 1000, |
|
"save_total_limit": 1, |
|
"seed": 0, |
|
"gradient_checkpointing": True, |
|
"gradient_checkpointing_kwargs":{"use_reentrant": False}, |
|
"gradient_accumulation_steps": 4, |
|
"warmup_ratio": 0.03, |
|
"ddp_find_unused_parameters": True, |
|
} |
|
train_conf = TrainingArguments(**training_config) |
|
|
|
|
|
|
|
|
|
|
|
logging.basicConfig( |
|
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", |
|
datefmt="%Y-%m-%d %H:%M:%S", |
|
handlers=[logging.StreamHandler(sys.stdout)], |
|
) |
|
log_level = train_conf.get_process_log_level() |
|
logger.setLevel(log_level) |
|
datasets.utils.logging.set_verbosity(log_level) |
|
transformers.utils.logging.set_verbosity(log_level) |
|
transformers.utils.logging.enable_default_handler() |
|
transformers.utils.logging.enable_explicit_format() |
|
|
|
|
|
logger.warning( |
|
f"Process rank: {train_conf.local_rank}, device: {train_conf.device}, n_gpu: {train_conf.n_gpu}" |
|
+ f" distributed training: {bool(train_conf.local_rank != -1)}, 16-bits training: {train_conf.fp16}" |
|
) |
|
logger.info(f"Training/evaluation parameters {train_conf}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
checkpoint_path = "./" |
|
model_kwargs = dict( |
|
use_cache=False, |
|
trust_remote_code=True, |
|
attn_implementation="flash_attention_2", |
|
torch_dtype=torch.bfloat16, |
|
device_map=None |
|
) |
|
model = AutoModelForCausalLM.from_pretrained(checkpoint_path, **model_kwargs) |
|
tokenizer = AutoTokenizer.from_pretrained(checkpoint_path) |
|
tokenizer.model_max_length = 2048 |
|
tokenizer.pad_token = tokenizer.eos_token |
|
tokenizer.pad_token_id = tokenizer.convert_tokens_to_ids(tokenizer.eos_token) |
|
tokenizer.padding_side = 'right' |
|
|
|
|
|
|
|
|
|
|
|
def load_sharegpt_dataset(file_path: str): |
|
dataset = load_dataset('json', data_files=file_path) |
|
return dataset |
|
|
|
def apply_chat_template( |
|
example: Dict, |
|
tokenizer: AutoTokenizer, |
|
max_length: int = None |
|
) -> Dict: |
|
messages = example["conversations"] |
|
converted_messages = [] |
|
|
|
role_mapping = { |
|
'human': 'user', |
|
'gpt': 'assistant' |
|
} |
|
|
|
for message in messages: |
|
role = message['from'] |
|
content = message['value'] |
|
|
|
role = role_mapping.get(role, role) |
|
|
|
converted_messages.append({ |
|
'content': content, |
|
'role': role |
|
}) |
|
|
|
example["text"] = tokenizer.apply_chat_template( |
|
converted_messages, |
|
tokenize=False, |
|
add_generation_prompt=False |
|
) |
|
|
|
return example |
|
|
|
def process_dataset( |
|
dataset_path: str, |
|
tokenizer: AutoTokenizer, |
|
num_proc: int = 64, |
|
max_length: int = None |
|
): |
|
|
|
dataset = load_sharegpt_dataset(dataset_path) |
|
column_names = list(dataset['train'].features) |
|
processed_dataset = dataset['train'].map( |
|
apply_chat_template, |
|
fn_kwargs={ |
|
"tokenizer": tokenizer, |
|
"max_length": max_length |
|
}, |
|
num_proc=num_proc, |
|
remove_columns=column_names, |
|
desc="Applying chat template" |
|
) |
|
|
|
return processed_dataset |
|
|
|
|
|
|
|
processed_dataset = process_dataset( |
|
dataset_path="./ShareGPT_40k.json", |
|
tokenizer=tokenizer, |
|
num_proc=64 |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
trainer = SFTTrainer( |
|
model=model, |
|
args=train_conf, |
|
peft_config=None, |
|
train_dataset=processed_dataset, |
|
eval_dataset=None, |
|
max_seq_length=2048, |
|
dataset_text_field="text", |
|
tokenizer=tokenizer, |
|
packing=False |
|
) |
|
|
|
train_result = trainer.train() |
|
metrics = train_result.metrics |
|
trainer.log_metrics("train", metrics) |
|
trainer.save_metrics("train", metrics) |
|
trainer.save_state() |
|
|
|
|
|
|
|
|
|
trainer.save_model(train_conf.output_dir) |