MVBench_Leaderboard / constants.py
ynhe's picture
Update constants.py
2884743 verified
raw
history blame
3.62 kB
import os
# this is .py for store constants
MODEL_INFO = ["Type", "Model", "Language Model"]
TASK_INFO = ["Avg","Action Antonym","Action Count","Action Localization","Action Prediction","Action Sequence","Character Order","Counterfactual Inference","Egocentric Navigation","Episodic Reasoning","Fine-grained Action","Fine-grained Pose","Moving Attribute","Moving Count","Moving Direction","Object Existence","Object Interaction","Object Shuffle","Scene Transition","State Change","Unexpected Action"]
AVG_INFO = ["Avg"]
DATA_TITILE_TYPE = ['markdown', 'markdown', 'markdown', 'number', 'number', 'number', 'number', 'number', 'number', 'number', 'number', 'number', 'number', 'number', 'number', 'number', 'number', 'number', 'number', 'number', 'number', 'number', 'number', 'number']
SUBMISSION_NAME = "MVBench_submission"
SUBMISSION_URL = os.path.join("https://huggingface.co/datasets/OpenGVLab/", SUBMISSION_NAME)
CSV_DIR = "./MVBench_submission/result.csv"
COLUMN_NAMES = MODEL_INFO + TASK_INFO
LEADERBORAD_INTRODUCTION = """# MVBench Leaderboard
Welcome to the leaderboard of the MVBench! πŸ†
Please follow the instructions in [video_chat2/mvbench.ipynb](https://github.com/OpenGVLab/Ask-Anything/blob/main/video_chat2/mvbench.ipynb) to upload the generated `upload_leaderboard.json` file here. After clicking the `Submit Eval` button, click the `Refresh` button.
"""
SUBMIT_INTRODUCTION = """# Submit on MVBench Benchmark Introduction
"""
TABLE_INTRODUCTION = """
"""
LEADERBORAD_INFO = """
With the rapid development of Multi-modal Large Language Models (MLLMs), a number of diagnostic benchmarks have recently emerged to evaluate the comprehension capabilities of these models. However, most benchmarks predominantly assess spatial understanding in the static image tasks, while overlooking temporal understanding in the dynamic video tasks. To alleviate this issue, we introduce a comprehensive Multi-modal Video understanding Benchmark, namely MVBench, which covers 20 challenging video tasks that cannot be effectively solved with a single frame. Specifically, we first introduce a novel static-to-dynamic method to define these temporal-related tasks. By transforming various static tasks into dynamic ones, we enable the systematic generation of video tasks that require a broad spectrum of temporal skills, ranging from perception to cognition. Then, guided by the task definition, we automatically convert public video annotations into multiple-choice QA to evaluate each task. On one hand, such a distinct paradigm allows us to build MVBench efficiently, without much manual intervention. On the other hand, it guarantees evaluation fairness with ground-truth video annotations, avoiding the biased scoring of LLMs. Moreover, we further develop a robust video MLLM baseline, i.e., VideoChat2, by progressive multi-modal training with diverse instruction-tuning data. The extensive results on our MVBench reveal that, the existing MLLMs are far from satisfactory in temporal understanding, while our VideoChat2 largely surpasses these leading models by over 15% on MVBench. All models and data are available at this https URL.
"""
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
CITATION_BUTTON_TEXT = r"""@article{li2023mvbench,
title={MVBench: A Comprehensive Multi-modal Video Understanding Benchmark},
author={Li, Kunchang and Wang, Yali and He, Yinan and Li, Yizhuo and Wang, Yi and Liu, Yi and Wang, Zun and Xu, Jilan and Chen, Guo and Luo, Ping and others},
journal={arXiv preprint arXiv:2311.17005},
year={2023}
}"""