REPO_DIR=/home/boai/APO
DATA_DIR=${REPO_DIR}/data/hh-split/llm_data/base_model
DATA_PATH="${DATA_DIR}/alpaca_sampling_hh_llm_sample_v1.json"

MODEL_PATH=${REPO_DIR}/outputs/rm_base_model
MODEL_NAME="base_rm" # "apo_rm" or "base_rm"

NUM_GPUS=2
MICRO_BATCH_SIZE=64

# CUDA_VISIBLE_DEVICES=1 
torchrun --nproc_per_node=${NUM_GPUS} --master_port=6001 ${REPO_DIR}/train.py \
    --task_type inference \
    --do_train False \
    --eval_at_start True \
    --model_type reward \
    --model_name_or_path ${MODEL_PATH} \
    --data_type "reject_sample" \
    --eval_data_path ${DATA_PATH} \
    --rm_calibration False \
    --data_suffix ${MODEL_NAME} \
    --add_sep_token True \
    --remove_unused_columns false \
    --output_dir ${REPO_DIR}/outputs/inference/ \
    --per_device_eval_batch_size ${MICRO_BATCH_SIZE} \
    --evaluation_strategy steps \
    --padding_side right \
    --truncation_side left \
    --pooling_type last \
    --max_length 512 \
    --deepspeed configs/default_offload_opt_param.json \
    --bf16 true --tf32 true --report_to "none"


# rejection sampling
SCORE_PATH=${DATA_PATH}_pred_${MODEL_NAME}_results.json
OUTPUT_FILE_NAME=${DATA_DIR}/rjs_v1.json

python3 ${REPO_DIR}/tools/rejection_sampling.py \
	--data_path ${DATA_PATH} \
	--score_path ${SCORE_PATH} \
	--output_dir ${DATA_DIR} \
	--rm_scorer  ${MODEL_NAME} \
	--output_file_name ${OUTPUT_FILE_NAME}

# remove tmp inference files
rm ${DATA_DIR}/*rank*.jsonl