#!/bin/bash

MODEL_PATH=$CODE_ROOT/$REPO/checkpoints/$1
CKPT=$1-cwkuo

LLAVA_ROOT=$CODE_ROOT/$REPO
EVAL_ROOT=$DATA_ROOT/llava-image-eval

gpu_list="${CUDA_VISIBLE_DEVICES:-0}"
IFS=',' read -ra GPULIST <<< "$gpu_list"
CHUNKS=${#GPULIST[@]}





echo "SQA inference"
for IDX in $(seq 0 $((CHUNKS-1))); do
    CUDA_VISIBLE_DEVICES=${GPULIST[$IDX]} python3 -m llava.eval.model_vqa_science \
        --model-path $MODEL_PATH \
        --question-file $EVAL_ROOT/scienceqa/llava_test_CQM-A.json \
        --image-folder $EVAL_ROOT/scienceqa/ScienceQA/test \
        --answers-file $EVAL_ROOT/scienceqa/answers/$CKPT/${CHUNKS}_${IDX}.jsonl \
        --num-chunks $CHUNKS \
        --chunk-idx $IDX \
        --single-pred-prompt \
        --temperature 0 &
done
wait

output_file=$EVAL_ROOT/scienceqa/answers/$CKPT.jsonl
# Clear out the output file if it exists.
> "$output_file"
# Loop through the indices and concatenate each file.
for IDX in $(seq 0 $((CHUNKS-1))); do
    cat $EVAL_ROOT/scienceqa/answers/$CKPT/${CHUNKS}_${IDX}.jsonl >> "$output_file"
done
echo "------------------------------------------------------------"

echo "MME inference"
for IDX in $(seq 0 $((CHUNKS-1))); do
    CUDA_VISIBLE_DEVICES=${GPULIST[$IDX]} python3 -m llava.eval.model_vqa_loader \
        --model-path $MODEL_PATH \
        --question-file $EVAL_ROOT/MME/eval_tool/llava_mme.jsonl \
        --image-folder $EVAL_ROOT/MME/MME_Benchmark_release_version \
        --answers-file $EVAL_ROOT/MME/eval_tool/answers/$CKPT/${CHUNKS}_${IDX}.jsonl \
        --num-chunks $CHUNKS \
        --chunk-idx $IDX \
        --temperature 0 &
done
wait

output_file=$EVAL_ROOT/MME/eval_tool/answers/$CKPT.jsonl
> "$output_file"
for IDX in $(seq 0 $((CHUNKS-1))); do
    cat $EVAL_ROOT/MME/eval_tool/answers/$CKPT/${CHUNKS}_${IDX}.jsonl >> "$output_file"
done
cd $EVAL_ROOT/MME
python3 eval_tool/convert_answer_to_mme.py --experiment $CKPT
cd $LLAVA_ROOT
echo "------------------------------------------------------------"

echo "GQA inference"
SPLIT="llava_gqa_testdev_balanced"
for IDX in $(seq 0 $((CHUNKS-1))); do
    CUDA_VISIBLE_DEVICES=${GPULIST[$IDX]} python3 -m llava.eval.model_vqa_loader \
        --model-path $MODEL_PATH \
        --question-file $EVAL_ROOT/gqa/$SPLIT.jsonl \
        --image-folder $EVAL_ROOT/gqa/data/images \
        --answers-file $EVAL_ROOT/gqa/answers/$SPLIT/$CKPT/${CHUNKS}_${IDX}.jsonl \
        --num-chunks $CHUNKS \
        --chunk-idx $IDX \
        --temperature 0 &
done
wait

output_file=$EVAL_ROOT/gqa/answers/$SPLIT/$CKPT/merge.jsonl
> "$output_file"
for IDX in $(seq 0 $((CHUNKS-1))); do
    cat $EVAL_ROOT/gqa/answers/$SPLIT/$CKPT/${CHUNKS}_${IDX}.jsonl >> "$output_file"
done
python3 scripts/convert_gqa_for_eval.py --src $output_file --dst $EVAL_ROOT/gqa/data/$CKPT.json
echo "------------------------------------------------------------"

echo "POPE inference"
for IDX in $(seq 0 $((CHUNKS-1))); do
    CUDA_VISIBLE_DEVICES=${GPULIST[$IDX]} python3 -m llava.eval.model_vqa_loader \
        --model-path $MODEL_PATH  \
        --question-file $EVAL_ROOT/pope/llava_pope_test.jsonl \
        --image-folder $EVAL_ROOT/pope/val2014 \
        --answers-file $EVAL_ROOT/pope/answers/$CKPT/${CHUNKS}_${IDX}.jsonl \
        --num-chunks $CHUNKS \
        --chunk-idx $IDX \
        --temperature 0 &
done
wait

output_file=$EVAL_ROOT/pope/answers/$CKPT.jsonl
> "$output_file"
for IDX in $(seq 0 $((CHUNKS-1))); do
    cat $EVAL_ROOT/pope/answers/$CKPT/${CHUNKS}_${IDX}.jsonl >> "$output_file"
done
echo "------------------------------------------------------------"

echo "TextVQA inference"
for IDX in $(seq 0 $((CHUNKS-1))); do
    CUDA_VISIBLE_DEVICES=${GPULIST[$IDX]} python3 -m llava.eval.model_vqa_loader \
        --model-path $MODEL_PATH \
        --question-file $EVAL_ROOT/textvqa/llava_textvqa_val_v051_ocr.jsonl \
        --image-folder $EVAL_ROOT/textvqa/train_images \
        --answers-file $EVAL_ROOT/textvqa/answers/$CKPT/${CHUNKS}_${IDX}.jsonl \
        --num-chunks $CHUNKS \
        --chunk-idx $IDX \
        --temperature 0 &
done
wait

output_file=$EVAL_ROOT/textvqa/answers/$CKPT.jsonl
> "$output_file"
for IDX in $(seq 0 $((CHUNKS-1))); do
    cat $EVAL_ROOT/textvqa/answers/$CKPT/${CHUNKS}_${IDX}.jsonl >> "$output_file"
done

echo "------------------------------------------------------------"

echo "SEED inference"
for IDX in $(seq 0 $((CHUNKS-1))); do
    CUDA_VISIBLE_DEVICES=${GPULIST[$IDX]} python3 -m llava.eval.model_vqa_loader \
        --model-path $MODEL_PATH \
        --question-file $EVAL_ROOT/seed_bench/llava-seed-bench.jsonl \
        --image-folder $EVAL_ROOT/seed_bench \
        --answers-file $EVAL_ROOT/seed_bench/answers/$CKPT/${CHUNKS}_${IDX}.jsonl \
        --num-chunks $CHUNKS \
        --chunk-idx $IDX \
        --temperature 0 &
done
wait

output_file=$EVAL_ROOT/seed_bench/answers/$CKPT/merge.jsonl
> "$output_file"
for IDX in $(seq 0 $((CHUNKS-1))); do
    cat $EVAL_ROOT/seed_bench/answers/$CKPT/${CHUNKS}_${IDX}.jsonl >> "$output_file"
done
echo "------------------------------------------------------------"

echo "MMStar inference"
for IDX in $(seq 0 $((CHUNKS-1))); do
    CUDA_VISIBLE_DEVICES=${GPULIST[$IDX]} python3 -m llava.eval.model_mmstar \
        --model-path $MODEL_PATH \
        --answers-file $EVAL_ROOT/mmstar/answers/$CKPT/${CHUNKS}_${IDX}.jsonl \
        --num-chunks $CHUNKS \
        --chunk-idx $IDX \
        --single-pred-prompt \
        --temperature 0 &
done
wait

output_file=$EVAL_ROOT/mmstar/answers/$CKPT.jsonl
> "$output_file"
for IDX in $(seq 0 $((CHUNKS-1))); do
    cat $EVAL_ROOT/mmstar/answers/$CKPT/${CHUNKS}_${IDX}.jsonl >> "$output_file"
done
echo "------------------------------------------------------------"

echo "LLaVA Bench inference"
for IDX in $(seq 0 $((CHUNKS-1))); do
    CUDA_VISIBLE_DEVICES=${GPULIST[$IDX]} python3 -m llava.eval.model_vqa \
        --model-path $MODEL_PATH \
        --question-file $EVAL_ROOT/llava-bench-in-the-wild/questions.jsonl \
        --image-folder $EVAL_ROOT/llava-bench-in-the-wild/images \
        --answers-file $EVAL_ROOT/llava-bench-in-the-wild/answers/$CKPT/${CHUNKS}_${IDX}.jsonl \
        --num-chunks $CHUNKS \
        --chunk-idx $IDX \
        --temperature 0 &
done
wait

output_file=$EVAL_ROOT/llava-bench-in-the-wild/answers/$CKPT.jsonl
> "$output_file"
for IDX in $(seq 0 $((CHUNKS-1))); do
    cat $EVAL_ROOT/llava-bench-in-the-wild/answers/$CKPT/${CHUNKS}_${IDX}.jsonl >> "$output_file"
done

mkdir -p $EVAL_ROOT/llava-bench-in-the-wild/reviews
python3 llava/eval/eval_gpt_review_bench.py \
    --question $EVAL_ROOT/llava-bench-in-the-wild/questions.jsonl \
    --context $EVAL_ROOT/llava-bench-in-the-wild/context.jsonl \
    --rule $EVAL_ROOT/llava-bench-in-the-wild/rule.json \
    --answer-list \
        $EVAL_ROOT/llava-bench-in-the-wild/answers_gpt4.jsonl \
        $EVAL_ROOT/llava-bench-in-the-wild/answers/$CKPT.jsonl \
    --output \
        $EVAL_ROOT/llava-bench-in-the-wild/reviews/$CKPT.jsonl
echo "------------------------------------------------------------"

echo "MM-VET inference"
for IDX in $(seq 0 $((CHUNKS-1))); do
    CUDA_VISIBLE_DEVICES=${GPULIST[$IDX]} python3 -m llava.eval.model_vqa \
        --model-path $MODEL_PATH \
        --question-file $EVAL_ROOT/mm-vet/llava-mm-vet.jsonl \
        --image-folder $EVAL_ROOT/mm-vet/images \
        --answers-file $EVAL_ROOT/mm-vet/answers/$CKPT/${CHUNKS}_${IDX}.jsonl \
        --num-chunks $CHUNKS \
        --chunk-idx $IDX \
        --temperature 0 &
done
wait

output_file=$EVAL_ROOT/mm-vet/answers/$CKPT.jsonl
> "$output_file"
for IDX in $(seq 0 $((CHUNKS-1))); do
    cat $EVAL_ROOT/mm-vet/answers/$CKPT/${CHUNKS}_${IDX}.jsonl >> "$output_file"
done

mkdir -p $EVAL_ROOT/mm-vet/results
python3 scripts/convert_mmvet_for_eval.py \
    --src $EVAL_ROOT/mm-vet/answers/$CKPT.jsonl \
    --dst $EVAL_ROOT/mm-vet/results/$CKPT.json

mkdir -p $LLAVA_ROOT/mm-vet
cp $EVAL_ROOT/mm-vet/results/$CKPT.json $LLAVA_ROOT/mm-vet/
echo "------------------------------------------------------------"

echo "VQA-V2 inference"
SPLIT="llava_vqav2_mscoco_test-dev2015"
for IDX in $(seq 0 $((CHUNKS-1))); do
    CUDA_VISIBLE_DEVICES=${GPULIST[$IDX]} python3 -m llava.eval.model_vqa_loader \
        --model-path $MODEL_PATH \
        --question-file $EVAL_ROOT/vqav2/$SPLIT.jsonl \
        --image-folder $EVAL_ROOT/vqav2/test2015 \
        --answers-file $EVAL_ROOT/vqav2/answers/$SPLIT/$CKPT/${CHUNKS}_${IDX}.jsonl \
        --num-chunks $CHUNKS \
        --chunk-idx $IDX \
        --temperature 0 &
done
wait

output_file=$EVAL_ROOT/vqav2/answers/$SPLIT/$CKPT/merge.jsonl
> "$output_file"
for IDX in $(seq 0 $((CHUNKS-1))); do
    cat $EVAL_ROOT/vqav2/answers/$SPLIT/$CKPT/${CHUNKS}_${IDX}.jsonl >> "$output_file"
done

python3 scripts/convert_vqav2_for_submission.py --dir $EVAL_ROOT/vqav2 --split $SPLIT --ckpt $CKPT
mkdir -p $LLAVA_ROOT/vqav2
rm -f $LLAVA_ROOT/vqav2/$CKPT.json
cp $EVAL_ROOT/vqav2/answers_upload/$SPLIT/$CKPT.json $LLAVA_ROOT/vqav2/
echo "------------------------------------------------------------"

echo "MMBench inference"
SPLIT="mmbench_dev_20230712"
for IDX in $(seq 0 $((CHUNKS-1))); do
    CUDA_VISIBLE_DEVICES=${GPULIST[$IDX]} python3 -m llava.eval.model_vqa_mmbench \
        --model-path $MODEL_PATH \
        --question-file $EVAL_ROOT/mmbench/$SPLIT.tsv \
        --answers-file $EVAL_ROOT/mmbench/answers/$SPLIT/$CKPT/${CHUNKS}_${IDX}.jsonl \
        --num-chunks $CHUNKS \
        --chunk-idx $IDX \
        --single-pred-prompt \
        --temperature 0 &
done
wait

output_file=$EVAL_ROOT/mmbench/answers/$SPLIT/$CKPT.jsonl
> "$output_file"
for IDX in $(seq 0 $((CHUNKS-1))); do
    cat $EVAL_ROOT/mmbench/answers/$SPLIT/$CKPT/${CHUNKS}_${IDX}.jsonl >> "$output_file"
done

mkdir -p $EVAL_ROOT/mmbench/answers_upload/$SPLIT
python3 scripts/convert_mmbench_for_submission.py \
    --annotation-file $EVAL_ROOT/mmbench/$SPLIT.tsv \
    --result-dir $EVAL_ROOT/mmbench/answers/$SPLIT \
    --upload-dir $EVAL_ROOT/mmbench/answers_upload/$SPLIT \
    --experiment $CKPT
mkdir -p $LLAVA_ROOT/mmbench
rm -f $LLAVA_ROOT/mmbench/$CKPT.xlsx
cp $EVAL_ROOT/mmbench/answers_upload/$SPLIT/$CKPT.xlsx $LLAVA_ROOT/mmbench/
echo "------------------------------------------------------------"

echo "VizWiz inference"
for IDX in $(seq 0 $((CHUNKS-1))); do
    CUDA_VISIBLE_DEVICES=${GPULIST[$IDX]} python3 -m llava.eval.model_vqa_loader \
        --model-path $MODEL_PATH  \
        --question-file $EVAL_ROOT/vizwiz/llava_test.jsonl \
        --image-folder $EVAL_ROOT/vizwiz/test \
        --answers-file $EVAL_ROOT/vizwiz/answers/$CKPT/${CHUNKS}_${IDX}.jsonl \
        --num-chunks $CHUNKS \
        --chunk-idx $IDX \
        --temperature 0 &
done
wait

output_file=$EVAL_ROOT/vizwiz/answers/$CKPT.jsonl
> "$output_file"
for IDX in $(seq 0 $((CHUNKS-1))); do
    cat $EVAL_ROOT/vizwiz/answers/$CKPT/${CHUNKS}_${IDX}.jsonl >> "$output_file"
done

python3 scripts/convert_vizwiz_for_submission.py \
    --annotation-file $EVAL_ROOT/vizwiz/llava_test.jsonl \
    --result-file $EVAL_ROOT/vizwiz/answers/$CKPT.jsonl \
    --result-upload-file $EVAL_ROOT/vizwiz/answers_upload/$CKPT.json
mkdir -p $LLAVA_ROOT/vizwiz
rm -f $LLAVA_ROOT/vizwiz/$CKPT.json
cp $EVAL_ROOT/vizwiz/answers_upload/$CKPT.json $LLAVA_ROOT/vizwiz/
echo "------------------------------------------------------------"










echo "SQA evaluation"
python3 llava/eval/eval_science_qa.py \
    --base-dir $EVAL_ROOT/scienceqa/ScienceQA/data/scienceqa \
    --result-file $EVAL_ROOT/scienceqa/answers/$CKPT.jsonl \
    --output-file $EVAL_ROOT/scienceqa/answers/${CKPT}_output.jsonl \
    --output-result $EVAL_ROOT/scienceqa/answers/${CKPT}_result.json
echo "------------------------------------------------------------"

echo "MME evaluation"
cd $EVAL_ROOT/MME
python3 eval_tool/eval_tool/calculation.py --results_dir eval_tool/answers/$CKPT
cd $LLAVA_ROOT
echo "------------------------------------------------------------"

echo "GQA evaluation"
cd $EVAL_ROOT/gqa/data/
python3 eval.py --tier testdev_balanced --predictions $CKPT.json
cd $LLAVA_ROOT
echo "------------------------------------------------------------"

echo "POPE evaluation"
python3 llava/eval/eval_pope.py \
    --annotation-dir $EVAL_ROOT/pope/coco \
    --question-file $EVAL_ROOT/pope/llava_pope_test.jsonl \
    --result-file $EVAL_ROOT/pope/answers/$CKPT.jsonl
echo "------------------------------------------------------------"

echo "TextVQA evaluation"
python3 llava/eval/eval_textvqa.py \
    --annotation-file $EVAL_ROOT/textvqa/TextVQA_0.5.1_val.json \
    --result-file $EVAL_ROOT/textvqa/answers/$CKPT.jsonl
echo "------------------------------------------------------------"

echo "SEED evaluation"
python3 scripts/convert_seed_for_submission.py \
    --annotation-file $EVAL_ROOT/seed_bench/SEED-Bench.json \
    --result-file $EVAL_ROOT/seed_bench/answers/$CKPT/merge.jsonl \
    --result-upload-file $EVAL_ROOT/seed_bench/answers_upload/$CKPT.jsonl
echo "------------------------------------------------------------"

echo "MMStar evaluation"
python3 llava/eval/eval_mmstar.py --result-file $EVAL_ROOT/mmstar/answers/$CKPT.jsonl
echo "------------------------------------------------------------"

echo "LLaVA Bench evaluation"
python3 llava/eval/summarize_gpt_review.py -f $EVAL_ROOT/llava-bench-in-the-wild/reviews/$CKPT.jsonl
echo "------------------------------------------------------------"

echo "MM-VET evaluation"
echo "submit to https://huggingface.co/spaces/whyu/MM-Vet_Evaluator"
echo "------------------------------------------------------------"

echo "VQA-V2 evaluation"
echo "submit to https://eval.ai/web/challenges/challenge-page/830/submission"
echo "------------------------------------------------------------"

echo "MMBench evaluation"
echo "submit to https://mmbench.opencompass.org.cn/mmbench-submission"
echo "------------------------------------------------------------"

echo "VizWiz evaluation"
echo "submit to https://eval.ai/web/challenges/challenge-page/2185/submission"
echo "------------------------------------------------------------"