#!/bin/bash

source $HOME/.bashrc && conda activate lxb39 && cd $PROJECTS/MoE-LLaVA

gpu_list="${CUDA_VISIBLE_DEVICES:-0}"
IFS=',' read -ra GPULIST <<< "$gpu_list"
CHUNKS=${#GPULIST[@]}

CONV="phi"
CKPT_NAME="moe-llava-clip-336-phi-2.7b-3rd-sft-moe-reproduce"
CKPT="$OUTPUTS/MoE-LLaVA/${CKPT_NAME}"
EVAL="eval_files"

mkdir -p ./${EVAL}/vizwiz/answers/${CKPT_NAME}
for IDX in $(seq 0 $((CHUNKS-1))); do
    deepspeed --include localhost:${GPULIST[$IDX]} --master_port $((${GPULIST[$IDX]} + 24333)) moellava/eval/model_vqa_loader.py \
        --model-path ${CKPT} \
        --question-file ${EVAL}/vizwiz/llava_test.jsonl \
        --image-folder ${EVAL}/vizwiz/test \
        --answers-file ${EVAL}/vizwiz/answers/${CKPT_NAME}/${CHUNKS}_${IDX}.jsonl \
        --num-chunks $CHUNKS \
        --chunk-idx $IDX \
        --temperature 0 \
        --conv-mode ${CONV} &
done

wait

output_file=${EVAL}/vizwiz/answers/${CKPT_NAME}/${CKPT_NAME}.jsonl
> "$output_file"

for IDX in $(seq 0 $((CHUNKS-1))); do
    cat ${EVAL}/vizwiz/answers/${CKPT_NAME}/${CHUNKS}_${IDX}.jsonl >> "$output_file"
done

python3 scripts/convert_vizwiz_for_submission.py \
    --annotation-file ${EVAL}/vizwiz/llava_test.jsonl \
    --result-file ${EVAL}/vizwiz/answers/${CKPT_NAME}/${CKPT_NAME}.jsonl \
    --result-upload-file ${EVAL}/vizwiz/answers_upload/${CKPT_NAME}.json

