MASTER_PORT=$(shuf -n 1 -i 10000-65535)

deepspeed --include localhost:5,6 --master_port "$MASTER_PORT"  train.py \
      --train_path example.json \
      --model_name_or_path /data/transformers/chatglm2-6b \
      --per_device_train_batch_size 1 \
      --max_len 1560 \
      --max_src_len 1024 \
      --learning_rate 1e-4 \
      --weight_decay 0.1 \
      --num_train_epochs 2 \
      --gradient_accumulation_steps 4 \
      --warmup_ratio 0.1 \
      --mode glm2 \
      --train_type ptuning \
      --seed 1234 \
      --ds_file ds_zero2_no_offload.json \
      --gradient_checkpointing \
      --show_loss_step 10 \
      --pre_seq_len 16 \
      --prefix_projection True \
      --output_dir ./output_ptuning