#!/bin/bash
set -x

# 模型配置
MODEL_NAME="Qwen3-4B-Instruct-2507"
MODEL_NAME_LOWER=$(echo "$MODEL_NAME" | tr '[:upper:]' '[:lower:]' | tr '-' '_')

# 固定配置参数
NPROC_PER_NODE=2  # 使用2个GPU
TRAIN_PATH="/home/yangcx24/Jayx/RAGEN/data/sft/merged/train.parquet"
TEST_PATH="/home/yangcx24/Jayx/RAGEN/data/sft/merged/test.parquet"
SAVE_PATH="/home/yangcx24/Jayx/RAGEN/checkpoints/${MODEL_NAME_LOWER}_sft"
MODEL_PATH="/home/yangcx24/Jayx/Models/${MODEL_NAME}"

# 设置CUDA可见设备
export CUDA_VISIBLE_DEVICES=0,1

# 设置内存相关环境变量 - 针对长序列优化
export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True,max_split_size_mb:512
export CUDA_LAUNCH_BLOCKING=1  # 用于调试，找到问题后可以移除
export OMP_NUM_THREADS=8
export TOKENIZERS_PARALLELISM=false

# 运行VERL SFT训练 - 优化配置
torchrun --standalone --nnodes=1 --nproc_per_node=$NPROC_PER_NODE \
     -m verl.trainer.fsdp_sft_trainer \
    data.train_batch_size=8 \
    data.micro_batch_size_per_gpu=1 \
    data.train_files=$TRAIN_PATH \
    data.val_files=$TEST_PATH \
    data.multiturn.enable=true \
    data.multiturn.messages_key=messages \
    data.max_length=40960 \
    model.partial_pretrain=$MODEL_PATH \
    ulysses_sequence_parallel_size=2 \
    use_remove_padding=true \
    trainer.default_local_dir=$SAVE_PATH \
    trainer.project_name=RAGEN-v10 \
    trainer.experiment_name=${MODEL_NAME_LOWER}-sft \
    trainer.total_training_steps=1 \
    trainer.logger=['swanlab'] \