import os # this is .py for store constants MODEL_INFO = ["Model", "Language Model", "Date"] TASK_INFO = ["Dev Avg", "Test Avg", "MR", "LM", "CM", "MO", "AO", "RC"] AVG_INFO = ["Dev Avg", "Test Avg", "MR", "LM", "CM", "MO", "AO", "RC"] key_map = { "Dev Avg": "dev avg", "Test Avg": "test avg", "MR": "Motion Recognition", "LM": "Location-related Motion", "CM": "Camera Motion", "MO": "Motion-related Objects", "AO": "Action Order", "RC": "Repetition Count" } DATA_TITILE_TYPE = ['markdown', 'markdown', 'markdown', 'number', 'number', 'number', 'number', 'number', 'number', 'number', 'number'] SUBMISSION_NAME = "MotionBench_submission" SUBMISSION_URL = os.path.join("https://huggingface.co/datasets/THUDM/", SUBMISSION_NAME) CSV_DIR = "./MotionBench_submission/result.csv" test_answer_file = "./MotionBench_submission/test_ans_video_info.meta.jsonl" dev_answer_file = "./MotionBench_submission/dev_ans_video_info.meta.jsonl" COLUMN_NAMES = MODEL_INFO + TASK_INFO LEADERBORAD_INTRODUCTION = """# MotionBench Leaderboard Welcome to the leaderboard of the MotionBench! 🏆 You can prepare your submission by following the [instructions](https://github.com/THUDM/MotionBench?tab=readme-ov-file#get-evaluation-results-and-submit-to-leaderboard). """ SUBMIT_INTRODUCTION = """# Submit on MotionBench Benchmark Introduction """ TABLE_INTRODUCTION = """ """ LEADERBORAD_INFO = """ In recent years, vision language models (VLMs) have made significant advancements in video understanding. However, a crucial capability — fine-grained motion comprehension — remains under-explored in current benchmarks. To address this gap, we propose MotionBench, a comprehensive evaluation benchmark designed to assess the fine-grained motion comprehension of video understanding models. MotionBench evaluates models' motion-level perception through six primary categories of motion-oriented question types and includes data collected from diverse sources, ensuring a broad representation of real-world video content. Experimental results reveal that existing VLMs perform poorly in understanding fine-grained motions. To enhance VLM's ability to perceive fine-grained motion within a limited sequence length of LLM, we conduct extensive experiments reviewing VLM architectures optimized for video feature compression and propose a novel and efficient Through-Encoder (TE) Fusion method. Experiments show that higher frame rate inputs and TE Fusion yield improvements in motion understanding, yet there is still substantial room for enhancement. Our benchmark aims to guide and motivate the development of more capable video understanding models, emphasizing the importance of fine-grained motion comprehension. """ CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results" CITATION_BUTTON_TEXT = r"""@misc{xxx, title={MotionBench: Benchmarking and Improving Fine-grained Video Motion Understanding for Vision Language Models}, author={xxx}, year={2024}, eprint={xx.xx}, archivePrefix={arXiv}, primaryClass={cs.CV} }"""