Datasets:

ArXiv:
ea-dev-pjlab-results / eval_agent /open_eval_template.sh
shulin16's picture
Upload folder using huggingface_hub
9f3bc09 verified
#!/bin/bash
export CUDA_VISIBLE_DEVICES=3
# Define the query
queries=("How does the model perform in terms of aesthetics?" "How well does the model ensure that the subject maintains a consistent appearance throughout the video?" "How effectively does the model maintain a consistent background scene throughout the video?" "How well does the model produce smooth and natural motion that follows the physical laws of the real world?" "To what extent are distortions like over-exposure, noise, and blur present in the generated frames?" "How consistently does the visual style (e.g., oil painting, black and white, watercolor) align with the specified look throughout the video?" "How consistent are the time-based effects and camera motions throughout the video?" "How well does the generated video demonstrate overall consistency with the input prompt?" "How effectively does the model generate multiple distinct objects in a single scene?" "How accurately does the model generate specific object classes as described in the text prompt?" "To what extent does the video exhibit dynamic movement rather than being overly static?" "How accurately do human subjects in the video perform the actions described in the text prompt?" "How accurately do the colors of the generated objects match the specifications in the text prompt?" "How accurately does the spatial arrangement of objects reflect the positioning and relationships described in the text prompt?" "How accurately does the generated video represent the scene described in the text prompt?")
# queries=("How does the model perform in terms of aesthetics?")
# models=("latte1") # "modelscope" "vc10-large" "vc09"
# List available models
available_models=("latte1" "modelscope" "vc10-large" "vc09" "show1" "cogvideox-2b" "cogvideox-5b" "animatediff")
echo "Available models:"
for i in "${!available_models[@]}"; do
echo "$((i+1)). ${available_models[$i]}"
done
read -p "Please enter the number corresponding to the model you want to evaluate: " model_choice
# Validate input
if ! [[ "$model_choice" =~ ^[1-9][0-9]*$ ]] || [ "$model_choice" -lt 1 ] || [ "$model_choice" -gt "${#available_models[@]}" ]; then
echo "Invalid selection. Exiting."
exit 1
fi
models=("${available_models[$((model_choice-1))]}")
# export rounds=10 # the number of rounds
# indexs=("1" "2" "3" "4" "5" "6" "7" "8" "9" "10")
export rounds=10 # the number of rounds
timestamp=$(date +%Y-%m-%d-%H:%M:%S)
# for ind in "${indexs[@]}"; do
for ind in $(seq 1 $rounds); do
for model in "${models[@]}"; do
# Ensure log directory exists
mkdir -p ./logs/$model/
for query in "${queries[@]}"; do
echo "===ind: $ind, model: $model, query: $query===" | tee -a ./logs/$model/$ind.log
export FOLDER_NAME="$ind/$timestamp-$(echo $query | tr ' ' '_' | tr -d '?')" # Run the evaluation script (output to both terminal and log)
python eval_agent_for_vbench_open.py --user_query "$query" --model $model --recommend 2>&1 | tee -a ./logs/$model/$ind.log
unset FOLDER_NAME
done
done
done