File size: 815 Bytes
fa6856c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
TOT_CUDA="0,1,2,3"
CUDAs=(${TOT_CUDA//,/ })
CUDA_NUM=${#CUDAs[@]}
PORT="11451"

DATA_PATH="sample/instruct/data_sample.jsonl" 
OUTPUT_PATH="outs"
MODEL_PATH="decapoda-research/llama-7b-hf"
TEST_SIZE=1
use_zero_offload=1
if [ ${use_zero_offload} == "1" ]
then
    CUDA_VISIBLE_DEVICES=${TOT_CUDA} deepspeed --master_port=$PORT finetune_fp16.py \
    --data_path $DATA_PATH \
    --output_path $OUTPUT_PATH \
    --model_path $MODEL_PATH \
    --eval_steps 200 \
    --save_steps 200 \
    --test_size $TEST_SIZE \
    --deepspeed
else
    CUDA_VISIBLE_DEVICES=${TOT_CUDA} torchrun --nproc_per_node=$CUDA_NUM --master_port=$PORT finetune_fp16.py \
    --data_path $DATA_PATH \
    --output_path $OUTPUT_PATH \
    --model_path $MODEL_PATH \
    --eval_steps 200 \
    --save_steps 200 \
    --test_size $TEST_SIZE
fi