llavaguard / scripts /run_instructblip_baseline.sh
Ahren09's picture
Upload 227 files
5ca4e86 verified
raw
history blame
1.87 kB
set -x
MODEL=instructblip
MODEL_PATH=/workingdir/models_hf/lmsys/vicuna-13b-v1.1
BASELINE_ATTACK_MODE=blur
for TASK in unconstrained constrained qna; do
for BASELINE_ATTACK_MODE in blur compress; do
echo "Running ${TASK} with ${BASELINE_ATTACK_MODE}"
INFERENCE_FILE=outputs/${MODEL}/inference_${MODEL}_baseline_${TASK}_${BASELINE_ATTACK_MODE}
METRICS_FILE=outputs/${MODEL}/metric_${MODEL}_baseline_${TASK}_${BASELINE_ATTACK_MODE}
SUMMARY_FILE=outputs/${MODEL}/summary_${MODEL}_baseline_${TASK}_${BASELINE_ATTACK_MODE}
if [ "${TASK}" = "constrained" ]; then
echo "Running constrained attack"
python instructblip_constrained_inference.py --output_file ${INFERENCE_FILE} \
--model_path ${MODEL_PATH} \
--gpu-id 3 \
--do_baseline \
--baseline_mode 1 \
--baseline_attack_mode ${BASELINE_ATTACK_MODE}
elif [ "${TASK}" = "unconstrained" ]; then
echo "Running unconstrained attack"
python instructblip_unconstrained_inference.py --output_file ${INFERENCE_FILE} \
--model_path ${MODEL_PATH} \
--gpu-id 3 \
--do_baseline \
--baseline_mode 1 \
--baseline_attack_mode ${BASELINE_ATTACK_MODE}
elif [ "${TASK}" = "qna" ]; then
echo "Running QNA"
python instructblip_qna.py \
--image_path ${TASK}_attack_images/adversarial_ \
--output_file ${INFERENCE_FILE} \
--gpu-id ${GPU_ID} \
--do_baseline \
--baseline_mode 1 \
--baseline_attack_mode ${BASELINE_ATTACK_MODE}
else
echo "Wrong Implementation"
exit 1
fi
CUDA_VISIBLE_DEVICES=3 python get_metric.py --input ${INFERENCE_FILE} \
--output ${METRICS_FILE} \
--perplexity ${SUMMARY_FILE} \
--device cuda
python cal_metrics.py --input ${METRICS_FILE} \
--output ${SUMMARY_FILE}
done
done