chatlawv1 / scripts /.ipynb_checkpoints /finetune-checkpoint.sh
teachyourselfcoding's picture
Upload 245 files
fa6856c
raw
history blame
641 Bytes
TOT_CUDA="0,1"
CUDAs=(${TOT_CUDA//,/ })
CUDA_NUM=${#CUDAs[@]}
PORT="12345"
DATA_PATH="./sample/instruct/data_sample.json" #"../dataset/instruction/guanaco_non_chat_mini_52K-utf8.json" #"./sample/merge_sample.json"
OUTPUT_PATH="../lora-Vicuna"
MODEL_PATH="../llama-13b-hf"
lora_checkpoint="../Chinese-Vicuna-lora-13b-belle-and-guanaco"
TEST_SIZE=1000
CUDA_VISIBLE_DEVICES=${TOT_CUDA} torchrun --nproc_per_node=$CUDA_NUM --master_port=$PORT finetune.py \
--data_path $DATA_PATH \
--output_path $OUTPUT_PATH \
--model_path $MODEL_PATH \
--eval_steps 200 \
--save_steps 200 \
--test_size $TEST_SIZE \
--lora_remote_checkpoint $lora_checkpoint