yinanhe commited on
Commit
2ae3b27
1 Parent(s): ab968bc
Files changed (3) hide show
  1. app.py +213 -0
  2. constants.py +55 -0
  3. requirements.txt +2 -0
app.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__ = ['block', 'make_clickable_model', 'make_clickable_user', 'get_submissions']
2
+ import os
3
+
4
+ import gradio as gr
5
+ import pandas as pd
6
+ import json
7
+ import tempfile
8
+
9
+ from constants import *
10
+ from huggingface_hub import Repository
11
+ HF_TOKEN = os.environ.get("HF_TOKEN")
12
+
13
+ global data_component, filter_component
14
+
15
+
16
+ def upload_file(files):
17
+ file_paths = [file.name for file in files]
18
+ return file_paths
19
+
20
+ def add_new_eval(
21
+ input_file,
22
+ model_name_textbox: str,
23
+ revision_name_textbox: str,
24
+ model_link: str,
25
+ ):
26
+ if input_file is None:
27
+ return "Error! Empty file!"
28
+
29
+ upload_data=json.loads(input_file)
30
+ submission_repo = Repository(local_dir=SUBMISSION_NAME, clone_from=SUBMISSION_URL, use_auth_token=HF_TOKEN, repo_type="dataset")
31
+ submission_repo.git_pull()
32
+ shutil.copyfile(CSV_DIR, os.path.join(SUBMISSION_NAME, f"{input_file}"))
33
+
34
+ csv_data = pd.read_csv(CSV_DIR)
35
+
36
+ if revision_name_textbox == '':
37
+ col = csv_data.shape[0]
38
+ model_name = model_name_textbox
39
+ else:
40
+ model_name = revision_name_textbox
41
+ model_name_list = csv_data['name']
42
+ name_list = [name.split(']')[0][1:] for name in model_name_list]
43
+ if revision_name_textbox not in name_list:
44
+ col = csv_data.shape[0]
45
+ else:
46
+ col = name_list.index(revision_name_textbox)
47
+
48
+ if model_link == '':
49
+ model_name = model_name # no url
50
+ else:
51
+ model_name = '[' + model_name + '](' + model_link + ')'
52
+
53
+ # add new data
54
+ new_data = [
55
+ model_name
56
+ ]
57
+ for key in TASK_INFO:
58
+ if key in upload_data:
59
+ new_data.append(upload_data[key][0])
60
+ else:
61
+ new_data.append(0)
62
+ csv_data.loc[col] = new_data
63
+ csv_data = csv_data.to_csv(CSV_DIR, index=False)
64
+ submission_repo.push_to_hub()
65
+ return 0
66
+
67
+ def get_final_score(df):
68
+ # 分数计算公式
69
+ final_score = df.drop('name', axis=1).sum(axis=1)
70
+ # 将总分列放在第二列
71
+ df.insert(1, 'Final Score', final_score)
72
+ return df
73
+
74
+ def get_baseline_df():
75
+ submission_repo = Repository(local_dir=SUBMISSION_NAME, clone_from=SUBMISSION_URL, use_auth_token=HF_TOKEN, repo_type="dataset")
76
+ submission_repo.git_pull()
77
+ df = pd.read_csv(CSV_DIR)
78
+ df = get_final_score(df)
79
+ # calculate the final score
80
+ df = df.sort_values(by="Final Score", ascending=False)
81
+ present_columns = MODEL_INFO + checkbox_group.value
82
+ df = df[present_columns]
83
+ return df
84
+
85
+ def get_all_df():
86
+ submission_repo = Repository(local_dir=SUBMISSION_NAME, clone_from=SUBMISSION_URL, use_auth_token=HF_TOKEN, repo_type="dataset")
87
+ submission_repo.git_pull()
88
+ df = pd.read_csv(CSV_DIR)
89
+ df = get_final_score(df)
90
+ df = df.sort_values(by="Final Score", ascending=False)
91
+ return df
92
+
93
+ def on_filter_model_size_method_change(selected_columns):
94
+ updated_data = get_all_df()
95
+
96
+ # columns:
97
+ selected_columns = [item for item in TASK_INFO if item in selected_columns]
98
+ present_columns = MODEL_INFO + selected_columns
99
+ # print("selected_columns",'|'.join(selected_columns))
100
+ updated_data = updated_data[present_columns]
101
+ updated_data = updated_data.sort_values(by=selected_columns[0], ascending=False)
102
+ updated_headers = present_columns
103
+ update_datatype = [DATA_TITILE_TYPE[COLUMN_NAMES.index(x)] for x in updated_headers]
104
+ # print(updated_data,present_columns,update_datatype)
105
+ filter_component = gr.components.Dataframe(
106
+ value=updated_data,
107
+ headers=updated_headers,
108
+ type="pandas",
109
+ datatype=update_datatype,
110
+ interactive=False,
111
+ visible=True,
112
+ )
113
+
114
+ return filter_component#.value
115
+
116
+ block = gr.Blocks()
117
+
118
+
119
+ with block:
120
+ gr.Markdown(
121
+ LEADERBORAD_INTRODUCTION
122
+ )
123
+ with gr.Tabs(elem_classes="tab-buttons") as tabs:
124
+ with gr.TabItem("📊 VBench", elem_id="vbench-tab-table", id=1):
125
+ with gr.Row():
126
+ with gr.Accordion("Citation", open=False):
127
+ citation_button = gr.Textbox(
128
+ value=CITATION_BUTTON_TEXT,
129
+ label=CITATION_BUTTON_LABEL,
130
+ elem_id="citation-button",
131
+ lines=10,
132
+ )
133
+
134
+ gr.Markdown(
135
+ TABLE_INTRODUCTION
136
+ )
137
+
138
+ # selection for column part:
139
+ checkbox_group = gr.CheckboxGroup(
140
+ choices=TASK_INFO,
141
+ value=AVG_INFO,
142
+ label="Evaluation Dimension",
143
+ interactive=True,
144
+ )
145
+
146
+ data_component = gr.components.Dataframe(
147
+ value=get_baseline_df,
148
+ headers=COLUMN_NAMES,
149
+ type="pandas",
150
+ datatype=DATA_TITILE_TYPE,
151
+ interactive=False,
152
+ visible=True,
153
+ )
154
+
155
+
156
+ checkbox_group.change(fn=on_filter_model_size_method_change, inputs=[ checkbox_group], outputs=data_component)
157
+
158
+ # table 2
159
+ with gr.TabItem("📝 About", elem_id="mvbench-tab-table", id=2):
160
+ gr.Markdown(LEADERBORAD_INFO, elem_classes="markdown-text")
161
+
162
+ # table 3
163
+ with gr.TabItem("🚀 Submit here! ", elem_id="mvbench-tab-table", id=3):
164
+ gr.Markdown(LEADERBORAD_INTRODUCTION, elem_classes="markdown-text")
165
+
166
+ with gr.Row():
167
+ gr.Markdown(SUBMIT_INTRODUCTION, elem_classes="markdown-text")
168
+
169
+ with gr.Row():
170
+ gr.Markdown("# ✉️✨ Submit your model evaluation json file here!", elem_classes="markdown-text")
171
+
172
+ with gr.Row():
173
+ with gr.Column():
174
+ model_name_textbox = gr.Textbox(
175
+ label="Model name", placeholder="LaVie"
176
+ )
177
+ revision_name_textbox = gr.Textbox(
178
+ label="Revision Model Name", placeholder="LaVie"
179
+ )
180
+
181
+ with gr.Column():
182
+ model_link = gr.Textbox(
183
+ label="Model Link", placeholder="https://huggingface.co/decapoda-research/llama-7b-hf"
184
+ )
185
+
186
+
187
+ with gr.Column():
188
+
189
+ input_file = gr.components.File(label = "Click to Upload a json File", file_count="single", type='binary')
190
+ submit_button = gr.Button("Submit Eval")
191
+
192
+ submission_result = gr.Markdown()
193
+ submit_button.click(
194
+ add_new_eval,
195
+ inputs = [
196
+ input_file,
197
+ model_name_textbox,
198
+ revision_name_textbox,
199
+ model_link,
200
+ ],
201
+ )
202
+
203
+
204
+ def refresh_data():
205
+ value1 = get_baseline_df()
206
+ return value1
207
+
208
+ with gr.Row():
209
+ data_run = gr.Button("Refresh")
210
+ data_run.click(on_filter_model_size_method_change, inputs=[checkbox_group], outputs=data_component)
211
+
212
+
213
+ block.launch()
constants.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ # this is .py for store constants
3
+ MODEL_INFO = ["name"]
4
+ TASK_INFO = ["Final Score",
5
+ "subject consistency",
6
+ "background consistency",
7
+ "temporal flickering",
8
+ "motion smoothness",
9
+ "dynamic degree",
10
+ "aesthetic quality",
11
+ "imaging quality",
12
+ "object class",
13
+ "multiple objects",
14
+ "human action",
15
+ "color",
16
+ "spatial relationship",
17
+ "scene",
18
+ "appearance style",
19
+ "temporal style",
20
+ "overall consistency"]
21
+
22
+ AVG_INFO = ["Final Score"]
23
+
24
+ DATA_TITILE_TYPE = ['markdown', 'number', 'number', 'number', 'number', 'number', 'number', 'number', 'number', 'number', 'number', 'number', 'number', 'number', 'number', 'number', 'number', 'number', 'number']
25
+
26
+ SUBMISSION_NAME = "vbench_leaderboard_submission"
27
+ SUBMISSION_URL = os.path.join("https://huggingface.co/datasets/Vchitect/", SUBMISSION_NAME)
28
+ CSV_DIR = "./vbench_leaderboard_submission/results.csv"
29
+
30
+ COLUMN_NAMES = MODEL_INFO + TASK_INFO
31
+
32
+ LEADERBORAD_INTRODUCTION = """# Vbench Leaderboard
33
+
34
+ 🏆 Welcome to the leaderboard of the VBench! 🎦
35
+
36
+ Please follow the instructions in [Vbench](https://github.com/Vchitect/VBench?tab=readme-ov-file#usage) to upload the generated `result.json` file here. After clicking the `Submit Eval` button, click the `Refresh` button.
37
+ """
38
+
39
+ SUBMIT_INTRODUCTION = """# Submit on VBench Benchmark Introduction
40
+ """
41
+
42
+ TABLE_INTRODUCTION = """
43
+ """
44
+
45
+ LEADERBORAD_INFO = """
46
+ VBench, a comprehensive benchmark suite for video generative models. We design a comprehensive and hierarchical Evaluation Dimension Suite to decompose "video generation quality" into multiple well-defined dimensions to facilitate fine-grained and objective evaluation. For each dimension and each content category, we carefully design a Prompt Suite as test cases, and sample Generated Videos from a set of video generation models. For each evaluation dimension, we specifically design an Evaluation Method Suite, which uses carefully crafted method or designated pipeline for automatic objective evaluation. We also conduct Human Preference Annotation for the generated videos for each dimension, and show that VBench evaluation results are well aligned with human perceptions. VBench can provide valuable insights from multiple perspectives.
47
+ """
48
+
49
+ CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
50
+ CITATION_BUTTON_TEXT = r"""@article{huang2023vbench,
51
+ title={{VBench}: Comprehensive Benchmark Suite for Video Generative Models},
52
+ author={Huang, Ziqi and He, Yinan and Yu, Jiashuo and Zhang, Fan and Si, Chenyang and Jiang, Yuming and Zhang, Yuanhan and Wu, Tianxing and Jin, Qingyang and Chanpaisit, Nattapol and Wang, Yaohui and Chen, Xinyuan and Wang, Limin and Lin, Dahua and Qiao, Yu and Liu, Ziwei},
53
+ journal={arXiv preprint arXiv:2311.17982},
54
+ year={2023}
55
+ }"""
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ gradio==3.23.0
2
+ pandas==2.0.0