
REPO_DIR=/home/boai/APO
export PYTHONPATH=${REPO_DIR}

DATA_DIR=${REPO_DIR}/data/hh-split
# TRAIN_DATA_LIST="${DATA_DIR}/rm_data/hh_split_rm.train.json \
# 		 ${DATA_DIR}/apo_data/rm_apo_data_v0_text_scores.json"
TRAIN_DATA_LIST="${DATA_DIR}/rm_data/hh_split_rm.train.json \
         ${DATA_DIR}/apo_data/rm_apo_data_v1_text_scores.json \
         ${DATA_DIR}/tpo_data/rm_tpo_data_v1_text_scores.json "
NUM_APO_SAMPLES=4

# TEST_DATA_LIST="${DATA_DIR}/eval_data/hh_cleaned_origin.test.json \
# 		${DATA_DIR}/eval_data/hh_split_llm.valid.json"
TEST_DATA_LIST="${DATA_DIR}/eval_data/hh_cleaned_origin.test.json \
		${DATA_DIR}/eval_data/hh_split_llm.valid.json"

model_path=/home/boai/LLM_models/llama-2-7b-hf
model_output_path=${REPO_DIR}/outputs/llm_tpo_model_v1
SAMPLE_OUTPUT_DIR=${DATA_DIR}/llm_data/tpo_model
		
NUM_GPUS=2
BATCH_SIZE=48
MICRO_BATCH_SIZE=3
LEARNING_RATE=1e-6
APO_COEFF=0.1
TPO_COEFF=0.05
GRADIENT_ACCUMULATION_STEP=$((BATCH_SIZE / NUM_GPUS / MICRO_BATCH_SIZE))

# train the llm model
# /home/boai/LLM_models/llama-2-7b-hf"
torchrun --nproc_per_node=${NUM_GPUS} --master_port=6000 ${REPO_DIR}/train.py \
    --task_type tpo \
    --do_train True \
    --eval_at_start False \
    --model_type reward \
    --model_name_or_path ${model_path} \
    --data_type "comparison_pair" \
    --train_data_path ${TRAIN_DATA_LIST} \
    --eval_data_path ${TEST_DATA_LIST} \
    --rm_calibration True \
    --data_suffix rm_apo_v1 \
    --add_sep_token True \
    --remove_unused_columns false \
    --output_dir ${model_output_path} \
    --num_train_epochs 1 \
    --apo_loss_coeff ${APO_COEFF} \
    --tpo_loss_coeff ${TPO_COEFF} \
    --apo_sample_num ${NUM_APO_SAMPLES} \
    --per_device_train_batch_size ${MICRO_BATCH_SIZE} \
    --per_device_eval_batch_size ${MICRO_BATCH_SIZE} \
    --gradient_accumulation_steps ${GRADIENT_ACCUMULATION_STEP} \
    --evaluation_strategy steps \
    --padding_side right \
    --truncation_side left \
    --pooling_type last \
    --max_length 512 \
    --save_strategy steps \
    --save_total_limit 10 \
    --learning_rate ${LEARNING_RATE} \
    --warmup_steps 100 \
    --deepspeed configs/default_offload_opt_param.json \
    --bf16 true --tf32 true

# sample new samples
MODEL_NAME="alpaca"
TASK_TYPE="sampling"
DATA_DIR=${REPO_DIR}/data/hh-split
if [[ "${TASK_TYPE}" == "testing" ]]; then    
    DATA_PATH=${DATA_DIR}/eval_data/hh_cleaned_origin.test.json
    DATA_NAME="hh_test"   
    DATA_TYPE="comparison_pair"   
else    
    DATA_DIR=${REPO_DIR}/data/hh-split
    DATA_PATH=${DATA_DIR}/llm_data/hh_split_llm_alpaca_v0.sample.json
    DATA_NAME="hh_llm_sample"   
    DATA_TYPE="non_comparison_pair"    
fi

EVAL_MICRO_BATCH_SIZE=25
MAX_INPUT_LENGTH=512

# torchrun --nproc_per_node 2 --master_port 6000 ${REPO_DIR}/tools/inference_llm.py \
python ${REPO_DIR}/tools/inference_llm.py \
	  --model_name_or_path ${model_output_path} \
	  --model_prefix ${MODEL_NAME} \
          --data_path ${DATA_PATH} \
          --output_dir ${OUTPUT_DIR} \
          --per_device_eval_batch_size ${EVAL_MICRO_BATCH_SIZE} \
	  --task_type ${TASK_TYPE} \
	  --data_suffix ${DATA_NAME} \
	  --max_length ${MAX_INPUT_LENGTH} \
	  --data_type ${DATA_TYPE} \

# evaluate the