#!/bin/bash

usage()
{
    cat << EOF
usage: $0 options
OPTIONS:
        -h      Show the help and exit
        -t      training data path
        -e      evaluation data path
        -s      total steps
EOF
}

while getopts "h:t:e:s:o:b:" opt
do
    case $opt in
        h)
            usage
            exit 1
            ;;
        t)
            train_data=$OPTARG
            ;;
        e)
            eval_data=$OPTARG
            ;;
        s)
            total_step=$OPTARG
            ;;
        o)
            model_size=$OPTARG
            ;;
        b)
            batch_size=$OPTARG
            ;;
    esac
done

n_context=100
answer_maxlength=128
# model_size=small #"large"
lr=5e-5

model_base=$(echo "$train_data" | awk -F'/' '{print $(NF-1)}')
model_name="${model_base}_${model_size}_${n_context}"
random_number=$((RANDOM % 10001 + 10000))

N_GPU=2
export NGPU=${N_GPU}
cd DecAF/Reading
cmd="OMP_NUM_THREADS=2 CUDA_VISIBLE_DEVICES=0,1 torchrun --nproc_per_node=${N_GPU} --master_port=${random_number}
        train_reader_new.py
        --train_data ${train_data}
        --eval_data ${eval_data}  
        --model_size ${model_size}  
        --name ${model_name}  
        --checkpoint_dir ./save-FiD/
        --use_checkpoint
        --lr ${lr} 
        --optim adamw 
        --scheduler linear 
        --weight_decay 0.01 
        --text_maxlength 200 
        --answer_maxlength ${answer_maxlength} 
        --per_gpu_batch_size ${batch_size}
        --total_batch_size 16
        --n_context ${n_context} 
        --total_step ${total_step} 
        --scheduler_steps ${total_step}
        --warmup_step 100
        --eval_freq 150
        --save_freq ${total_step}"

echo $cmd
eval $cmd

cd -