MASTER_PORT=$(shuf -n 1 -i 10000-65535)
date=0531

deepspeed --include='localhost:1,2,3,4,5,6' --master_port $MASTER_PORT /workspace/psycho_trainning/lora_train.py \
  --deepspeed=/workspace/psycho_trainning/ds_configs/deepspeed_fp16_zero2.json \
  --train_file_path=/workspace/psycho_trainning/data/tokenized_data/train_data_only_modified_qwen_$date \
  --output_dir=/workspace/psycho_trainning/ckpts/psycho/lora/only_modified_with_states_$date \
  --lora_rank=8 \
  --per_device_train_batch_size=8 \
  --gradient_accumulation_steps=1 \
  --num_train_epochs=3 \
  --save_steps=100 \
  --save_total_limit=2 \
  --learning_rate=1e-4 \
  --fp16 \
  --remove_unused_columns=false \
  --logging_steps=50
  
