REPO_DIR=/home/boai/APO
DATA_DIR=${REPO_DIR}/data/hh-split
# TRAIN_DATA_LIST="${DATA_DIR}/rm_data/hh_split_rm.train.json \
# 		 ${DATA_DIR}/apo_data/rm_apo_data_v0_text_scores.json"
TRAIN_DATA_LIST=${DATA_DIR}/tlpo_data/rm_tlpo_data_v1_text_scores.json
LLM_DATA_DIR=${REPO_DIR}/data/hh-split/llm_data/tlpo_model
LLM_DATA_PATH=${LLM_DATA_DIR}/hh_split_llm_alpaca_v0.sample.json
NUM_APO_SAMPLES=4

# TEST_DATA_LIST="${DATA_DIR}/eval_data/hh_cleaned_origin.test.json \
# 		${DATA_DIR}/eval_data/hh_split_llm.valid.json"
TEST_DATA_LIST="${DATA_DIR}/eval_data/hh_cleaned_origin.test.json \
		${DATA_DIR}/eval_data/hh_split_llm.valid.json"

model_path=/home/boai/LLM_models/llama-2-7b-hf
model_output_path=${REPO_DIR}/outputs/rm_tlpo_model_v1/
		
NUM_GPUS=2
BATCH_SIZE=48
MICRO_BATCH_SIZE=1
LEARNING_RATE=1e-6
TLPO_COEFF=0.1
TRIPLET_MARGIN=0.2
GRADIENT_ACCUMULATION_STEP=$((BATCH_SIZE / NUM_GPUS / MICRO_BATCH_SIZE))

# /home/boai/LLM_models/llama-2-7b-hf"
torchrun --nproc_per_node=${NUM_GPUS} --master_port=6000 ${REPO_DIR}/train.py \
    --task_type tlpo \
    --do_train True \
    --eval_at_start False \
    --model_type reward \
    --model_name_or_path ${model_path} \
    --data_type "comparison_pair" \
    --train_data_path ${TRAIN_DATA_LIST} \
    --eval_data_path ${TEST_DATA_LIST} \
    --rm_calibration True \
    --data_suffix rm_apo_v1 \
    --add_sep_token True \
    --remove_unused_columns false \
    --output_dir ${model_output_path} \
    --num_train_epochs 1 \
    --tlpo_loss_coeff ${TLPO_COEFF} \
    --triplet_margin ${TRIPLET_MARGIN} \
    --apo_sample_num ${NUM_APO_SAMPLES} \
    --per_device_train_batch_size ${MICRO_BATCH_SIZE} \
    --per_device_eval_batch_size ${MICRO_BATCH_SIZE} \
    --gradient_accumulation_steps ${GRADIENT_ACCUMULATION_STEP} \
    --evaluation_strategy steps \
    --padding_side right \
    --truncation_side left \
    --pooling_type last \
    --max_length 512 \
    --save_strategy steps \
    --save_total_limit 10 \
    --learning_rate ${LEARNING_RATE} \
    --warmup_steps 100 \
    --deepspeed configs/default_offload_opt_param.json \
    --bf16 true --tf32 true

# score llm responses
MODEL_NAME="base_rm" # "apo_rm" or "base_rm"

NUM_GPUS=2
MICRO_BATCH_SIZE=64

# CUDA_VISIBLE_DEVICES=1 
torchrun --nproc_per_node=${NUM_GPUS} --master_port=6001 ${REPO_DIR}/train.py \
    --task_type inference \
    --do_train False \
    --eval_at_start True \
    --model_type reward \
    --model_name_or_path ${model_output_path} \
    --data_type "reject_sample" \
    --eval_data_path ${LLM_DATA_PATH} \
    --rm_calibration False \
    --data_suffix ${MODEL_NAME} \
    --add_sep_token True \
    --remove_unused_columns false \
    --output_dir ${REPO_DIR}/outputs/inference/ \
    --per_device_eval_batch_size ${MICRO_BATCH_SIZE} \
    --evaluation_strategy steps \
    --padding_side right \
    --truncation_side left \
    --pooling_type last \
    --max_length 512 \
    --deepspeed configs/default_offload_opt_param.json \
    --bf16 true --tf32 true --report_to "none"


# rejection sampling
SCORE_PATH=${LLM_DATA_PATH}_pred_${MODEL_NAME}_results.json
OUTPUT_FILE_NAME=${LLM_DATA_DIR}/rjs_v0.json

python3 ${REPO_DIR}/tools/rejection_sampling.py \
	--data_path ${LLM_DATA_PATH} \
	--score_path ${SCORE_PATH} \
	--output_dir ${LLM_DATA_DIR} \
	--rm_scorer  ${MODEL_NAME} \
	--output_file_name ${OUTPUT_FILE_NAME}

# remove tmp inference files
rm ${LLM_DATA_DIR}/*rank*.jsonl