Spaces:
Runtime error
Runtime error
TOT_CUDA="0,1,2,3" | |
CUDAs=(${TOT_CUDA//,/ }) | |
CUDA_NUM=${#CUDAs[@]} | |
PORT="12345" | |
# ../Chinese-Vicuna/sample/instruct/chat_data.jsonl | |
#DATA_PATH="sample/instruct/legislation2.json" #"../dataset/instruction/guanaco_non_chat_mini_52K-utf8.json" #"./sample/merge_sample.json" | |
#DATA_PATH="./sample/instruct/chat_data.jsonl" | |
DATA_PATH="../Chinese-Vicuna/sample/legislation30k.jsonl" | |
#DATA_PATH="../Chinese-Vicuna/sample/instructchat_data.jsonl" #working | |
OUTPUT_PATH="../llama2-30kjudgement-21sept" | |
MODEL_PATH="../chinese-llama-2-13b" | |
# lora_checkpoint="../Llama2-Chinese-13b-Chat-LoRA" | |
from_data_beginning=True | |
TEST_SIZE=300 | |
# TORCH_DISTRIBUTED_DEBUG=DETAIL | |
#CUDA_VISIBLE_DEVICES=${TOT_CUDA} torchrun --nproc_per_node=$CUDA_NUM --master_port=$PORT finetune.py \ | |
CUDA_VISIBLE_DEVICES=0 python finetune.py \ | |
--data_path $DATA_PATH \ | |
--output_path $OUTPUT_PATH \ | |
--model_path $MODEL_PATH \ | |
--eval_steps 200 \ | |
--save_steps 200 \ | |
--test_size $TEST_SIZE \ | |
# --resume_from_checkpoint $lora_checkpoint | |