DATA_PATH="./sample/instruct/chat_data.json" lora_checkpoint="../Chinese-Vicuna-lora-7b-chatv1/train4800" MODEL_PATH="../llama-7b-hf" OUTPUT_PATH="../7b-legal-from-chatv1-new" TOT_CUDA="0,1" CUDAs=(${TOT_CUDA//,/ }) CUDA_NUM=${#CUDAs[@]} PORT="12345" CUDA_VISIBLE_DEVICES=${TOT_CUDA} torchrun --nproc_per_node=$CUDA_NUM --master_port=$PORT finetune_chat.py \ --data_path $DATA_PATH \ --model_path $MODEL_PATH \ --output_path $OUTPUT_PATH \ --micro_batch 3 \ --total_batch 32 \ --log_steps 100 \ --eval_steps 0 \ --warmup_ratio 0.01 \ --save_steps 200 \ --test_size 500 \ --prompt_type "chat" \ --resume_from_checkpoint $lora_checkpoint \ --ignore_data_skip True \ --num_epoch 6