#!/bin/bash
set -x

export NCCL_TIMEOUT=36000
while [[ $# -gt 0 ]]; do
    case $1 in
        --model)
            MODEL_PATH="$2"
            shift 2
            ;;
        --exp_name)
            EXP_NAME="$2"
            shift 2
            ;;
        --val_data)
            VAL_DATA="$2"
            shift 2
            ;;
        --ckpt_start)
            CKPT_START="$2"
            shift 2
            ;;
        --ckpt_end)
            CKPT_END="$2"
            shift 2
            ;;
        --ckpt_step)
            CKPT_STEP="$2"
            shift 2
            ;;
        --nnodes)
            NNODES="$2"
            shift 2
            ;;
        --ngpus)
            NGPUS="$2"
            shift 2
            ;;
        --override)
            OVERRIDE="$2"
            shift 2
            ;;
        *)
            break
            ;;
    esac
done

# Set default override to true if not provided
if [ -z "$OVERRIDE" ]; then
    OVERRIDE=true
fi

# Generate CKPTS array based on start, end, and step
if [ -n "$CKPT_START" ] && [ -n "$CKPT_END" ]; then
    CKPTS=()
    for ((i=CKPT_START; i<=CKPT_END; i+=CKPT_STEP)); do
        CKPTS+=($i)
    done
    echo "Generated checkpoints: ${CKPTS[@]}"
fi

export WANDB_INIT_TIMEOUT=600
export HF_HOME=/tmp/huggingface/

export TOKENIZERS_PARALLELISM=true
export WANDB_PROJECT='YOUR_PROJECT_NAME'
export WANDB_API_KEY='YOUR_WANDB_API_KEY'

export WANDB_MODE=disabled
export WANDB_DISABLED=true

if [ $OMPI_COMM_WORLD_RANK -eq 0 ]; then
    for CKPT in "${CKPTS[@]}"; do
        echo "Starting processing checkpoint: $CKPT"
        
        if [ "$CKPT" -ne 0 ]; then
            model_path="/tmp/${EXP_NAME}/global_step_${CKPT}/actor/huggingface"
            # Check if huggingface directory already has files
            if [ -d "$model_path" ] && [ "$(ls -A $model_path 2>/dev/null)" ]; then
                echo "Huggingface directory already exists and has files, skipping model preparation: $model_path"
            else
                mkdir -p /tmp/${EXP_NAME}/global_step_${CKPT}/actor/huggingface/
                # Copy all non-.pt files to huggingface directory
                find /tmp/${EXP_NAME}/global_step_${CKPT}/actor/ -maxdepth 1 -type f ! -name "*.pt" -exec cp {} /tmp/${EXP_NAME}/global_step_${CKPT}/actor/huggingface/ \;
                python tools/merge_model2hf.py --local_dir /tmp/${EXP_NAME}/global_step_${CKPT}/actor
            fi
        else
            model_path="$MODEL_PATH"
        fi
        
        # Check if results already exist and override is disabled
        RESULTS_FILE="/tmp/${EXP_NAME}/global_step_${CKPT}/${VAL_DATA}_generation_results.jsonl"
        if [ "$OVERRIDE" = "false" ] && [ -f "$RESULTS_FILE" ]; then
            echo "Results file already exists and override is disabled, skipping: $RESULTS_FILE"
            continue
        fi
        
        python3 -m verl.trainer.main_ppo \
            algorithm.adv_estimator=grpo \
            data.prompt_key=content \
            data.train_files=/tmp/lmsys_gpt5_chat_4k_filtered_train.parquet \
            data.val_files=/tmp/${VAL_DATA}_gpt5_chat_4k_filtered_test.parquet \
            data.train_batch_size=256 \
            data.val_batch_size=600 \
            data.max_prompt_length=2048 \
            data.max_response_length=1536 \
            data.truncation=right \
            actor_rollout_ref.model.path=${model_path} \
            actor_rollout_ref.actor.optim.lr=1e-6 \
            actor_rollout_ref.actor.grad_clip=0.2 \
            actor_rollout_ref.model.use_remove_padding=True \
            actor_rollout_ref.actor.ppo_mini_batch_size=256 \
            actor_rollout_ref.actor.use_dynamic_bsz=True \
            actor_rollout_ref.actor.ppo_max_token_len_per_gpu=32768 \
            actor_rollout_ref.actor.use_kl_loss=False \
            actor_rollout_ref.actor.entropy_coeff=0.0 \
            actor_rollout_ref.actor.kl_loss_coef=0.0 \
            actor_rollout_ref.actor.kl_loss_type=low_var_kl \
            actor_rollout_ref.actor.ulysses_sequence_parallel_size=1 \
            actor_rollout_ref.model.enable_gradient_checkpointing=True \
            actor_rollout_ref.actor.fsdp_config.param_offload=False \
            actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \
            actor_rollout_ref.rollout.tensor_model_parallel_size=1 \
            actor_rollout_ref.rollout.name=vllm \
            actor_rollout_ref.rollout.temperature=0.8 \
            actor_rollout_ref.rollout.gpu_memory_utilization=0.7 \
            actor_rollout_ref.rollout.n=8 \
            actor_rollout_ref.ref.fsdp_config.param_offload=True \
            algorithm.kl_ctrl.kl_coef=0.0 \
            +trainer.val_data=${VAL_DATA} \
            trainer.val_only=True \
            trainer.val_before_train=True \
            trainer.critic_warmup=0 \
            trainer.logger=['console'] \
            trainer.experiment_name=${EXP_NAME} \
            trainer.n_gpus_per_node=${NGPUS} \
            trainer.nnodes=${NNODES} \
            trainer.save_freq=200 \
            trainer.test_freq=50 \
            trainer.default_hdfs_dir=null \
            trainer.total_epochs=200 "${@:1}" \
            actor_rollout_ref.rollout.enforce_eager=False \
            actor_rollout_ref.rollout.free_cache_engine=False \
            trainer.resume_mode=disable \
            trainer.validation_data_dir=/tmp/${EXP_NAME}/global_step_${CKPT}
    done
fi