Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -34,7 +34,11 @@ hf_api = HfApi(endpoint="https://huggingface.co", token=hf_token)
|
|
34 |
hf_api = HfApi(token=hf_token)
|
35 |
repo_id = "Vchitect/VBench_sampled_video"
|
36 |
|
37 |
-
model_names=[
|
|
|
|
|
|
|
|
|
38 |
|
39 |
with open("videos_by_dimension.json") as f:
|
40 |
dimension = json.load(f)['videos_by_dimension']
|
@@ -44,20 +48,7 @@ with open("videos_by_dimension.json") as f:
|
|
44 |
|
45 |
types = ['appearance_style', 'color', 'temporal_style', 'spatial_relationship', 'temporal_flickering', 'scene', 'multiple_objects', 'object_class', 'human_action', 'overall_consistency', 'subject_consistency']
|
46 |
|
47 |
-
def
|
48 |
-
# 随机选择一个索引
|
49 |
-
random_index = random.randint(0, len(types) - 1)
|
50 |
-
type = types[random_index]
|
51 |
-
# 随机选择一个Prompt
|
52 |
-
random_index = random.randint(0, len(dimension[type]) - 1)
|
53 |
-
prompt = dimension[type][random_index]
|
54 |
-
prompt = os.path.basename(prompt)
|
55 |
-
# 随机一个模型
|
56 |
-
random_index = random.randint(0, len(model_names) - 1)
|
57 |
-
model_name = model_names[random_index]
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
video_path_subfolder = os.path.join(model_name, type)
|
62 |
try:
|
63 |
return hf_api.hf_hub_download(
|
@@ -83,18 +74,41 @@ def get_random_video():
|
|
83 |
print(e)
|
84 |
# video_path = dataset['train'][random_index]['video_path']
|
85 |
print('error:', model_name, type, prompt)
|
86 |
-
return
|
87 |
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
|
99 |
-
|
100 |
-
interface.launch()
|
|
|
34 |
hf_api = HfApi(token=hf_token)
|
35 |
repo_id = "Vchitect/VBench_sampled_video"
|
36 |
|
37 |
+
model_names=[]
|
38 |
+
for i in hf_api.list_repo_tree('Vchitect/VBench_sampled_video',repo_type='dataset'):
|
39 |
+
model_name = i.path
|
40 |
+
if '.git' not in model_name and '.md' not in model_name:
|
41 |
+
model_names.append(model_name)
|
42 |
|
43 |
with open("videos_by_dimension.json") as f:
|
44 |
dimension = json.load(f)['videos_by_dimension']
|
|
|
48 |
|
49 |
types = ['appearance_style', 'color', 'temporal_style', 'spatial_relationship', 'temporal_flickering', 'scene', 'multiple_objects', 'object_class', 'human_action', 'overall_consistency', 'subject_consistency']
|
50 |
|
51 |
+
def get_video_path_local(model_name, type, prompt):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
video_path_subfolder = os.path.join(model_name, type)
|
53 |
try:
|
54 |
return hf_api.hf_hub_download(
|
|
|
74 |
print(e)
|
75 |
# video_path = dataset['train'][random_index]['video_path']
|
76 |
print('error:', model_name, type, prompt)
|
77 |
+
return None
|
78 |
|
79 |
+
def get_random_video():
|
80 |
+
# 随机选择一个索引
|
81 |
+
random_index = random.randint(0, len(types) - 1)
|
82 |
+
type = types[random_index]
|
83 |
+
# 随机选择一个Prompt
|
84 |
+
random_index = random.randint(0, len(dimension[type]) - 1)
|
85 |
+
prompt = dimension[type][random_index]
|
86 |
+
prompt = os.path.basename(prompt)
|
87 |
+
# 随机选择两个不同的模型名称
|
88 |
+
random_model_names = random.sample(model_names, 2)
|
89 |
+
model_name_1, model_name_2 = random_model_names
|
90 |
+
video_path1 = get_video_path_local(model_name_1, type, prompt)
|
91 |
+
video_path2 = get_video_path_local(model_name_2, type, prompt)
|
92 |
+
return video_path1, video_path2, model_name_1, model_name_2, type, prompt
|
93 |
+
|
94 |
+
|
95 |
+
|
96 |
+
with gr.Blocks() as interface:
|
97 |
+
with gr.Row():
|
98 |
+
with gr.Column():
|
99 |
+
model_name_1_output = gr.Textbox(label="Model Name 1")
|
100 |
+
video_output_1 = gr.Video(label="Video 1")
|
101 |
+
with gr.Column():
|
102 |
+
model_name_2_output = gr.Textbox(label="Model Name 2")
|
103 |
+
video_output_2 = gr.Video(label="Video 2")
|
104 |
+
|
105 |
+
type_output = gr.Textbox(label="Type")
|
106 |
+
prompt_output = gr.Textbox(label="Prompt")
|
107 |
|
108 |
+
display_button = gr.Button("Display Videos")
|
109 |
+
display_button.click(
|
110 |
+
fn=get_random_video,
|
111 |
+
outputs=[video_output_1, video_output_2, type_output, prompt_output, model_name_1_output, model_name_2_output]
|
112 |
+
)
|
113 |
|
114 |
+
interface.launch()
|
|