Spaces:
Running
on
Zero
Running
on
Zero
Add Video
Browse files- app.py +10 -2
- sum_prompt.txt → coco_prompt.txt +0 -0
- get_webvid_prompt.py +32 -0
- model/matchmaker.py +1 -1
- model/matchmaker_video.py +103 -0
- model/model_manager.py +31 -2
- model/model_registry.py +23 -1
- model/models/__init__.py +12 -5
- model/models/huggingface_models.py +26 -3
- model/models/other_api_models.py +35 -7
- model/models/replicate_api_models.py +210 -88
- model/other_models.py +0 -0
- serve/Ksort.py +81 -36
- serve/gradio_web.py +17 -9
- serve/gradio_web_video.py +611 -0
- serve/leaderboard.py +66 -79
- serve/update_skill_video.py +107 -0
- serve/upload.py +32 -17
- serve/utils.py +34 -1
- serve/vote_utils.py +44 -2
- sorted_score_list.json +77 -76
- sorted_score_list_video.json +49 -0
- webvid_prompt.txt +100 -0
app.py
CHANGED
@@ -1,7 +1,8 @@
|
|
1 |
import gradio as gr
|
2 |
import os
|
3 |
from serve.gradio_web import *
|
4 |
-
from serve.
|
|
|
5 |
from model.model_manager import ModelManager
|
6 |
from pathlib import Path
|
7 |
from serve.constants import SERVER_PORT, ROOT_PATH, ELO_RESULTS_DIR
|
@@ -26,7 +27,14 @@ def build_combine_demo(models, elo_results_file, leaderboard_table_file):
|
|
26 |
|
27 |
with gr.Tab("Generation Arena (battle)", id=1):
|
28 |
build_side_by_side_ui_anony(models)
|
29 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
return demo
|
31 |
|
32 |
|
|
|
1 |
import gradio as gr
|
2 |
import os
|
3 |
from serve.gradio_web import *
|
4 |
+
from serve.gradio_web_video import build_side_by_side_video_ui_anony
|
5 |
+
from serve.leaderboard import build_leaderboard_tab, build_leaderboard_video_tab
|
6 |
from model.model_manager import ModelManager
|
7 |
from pathlib import Path
|
8 |
from serve.constants import SERVER_PORT, ROOT_PATH, ELO_RESULTS_DIR
|
|
|
27 |
|
28 |
with gr.Tab("Generation Arena (battle)", id=1):
|
29 |
build_side_by_side_ui_anony(models)
|
30 |
+
with gr.Tab("Video Generation", id=1):
|
31 |
+
with gr.Tabs() as tabs_ig:
|
32 |
+
with gr.Tab("Generation Leaderboard", id=0):
|
33 |
+
# build_leaderboard_tab(elo_results_file['t2i_generation'], leaderboard_table_file['t2i_generation'])
|
34 |
+
build_leaderboard_video_tab()
|
35 |
+
|
36 |
+
with gr.Tab("Generation Arena (battle)", id=1):
|
37 |
+
build_side_by_side_video_ui_anony(models)
|
38 |
return demo
|
39 |
|
40 |
|
sum_prompt.txt → coco_prompt.txt
RENAMED
File without changes
|
get_webvid_prompt.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from datasets import load_dataset
|
2 |
+
import pandas as pd
|
3 |
+
import re
|
4 |
+
# # Load the WebVid dataset
|
5 |
+
# dataset = load_dataset('webvid', 'webvid-10m', split='train')
|
6 |
+
# from datasets import load_dataset
|
7 |
+
|
8 |
+
ds = load_dataset("TempoFunk/webvid-10M", cache_dir="/mnt/data/lizhikai/webvid/")
|
9 |
+
v = ds['validation']['name']
|
10 |
+
# 定义字符串长度的合理范围
|
11 |
+
MIN_LENGTH = 30
|
12 |
+
MAX_LENGTH = 300
|
13 |
+
pattern = re.compile(r'^[a-zA-Z\s]+$')
|
14 |
+
|
15 |
+
# 过滤掉空字符串和特别长特别短的字符串
|
16 |
+
v = [s for s in v if len(s) >= MIN_LENGTH and len(s) <= MAX_LENGTH and pattern.match(s)]
|
17 |
+
|
18 |
+
# 指定保存文件的路径
|
19 |
+
file_path = 'webvid_prompt.txt'
|
20 |
+
|
21 |
+
# 打开文件,以写入模式
|
22 |
+
with open(file_path, 'w', encoding='utf-8') as file:
|
23 |
+
# 遍历列表中的每个字符串并写入文件
|
24 |
+
for item in v:
|
25 |
+
if '\n' in item:
|
26 |
+
continue
|
27 |
+
else:
|
28 |
+
file.write(item + '\n')
|
29 |
+
|
30 |
+
print("字符串列表已成功保存到文件中。")
|
31 |
+
|
32 |
+
|
model/matchmaker.py
CHANGED
@@ -79,7 +79,7 @@ def load_json_via_sftp():
|
|
79 |
class RunningPivot(object):
|
80 |
running_pivot = []
|
81 |
|
82 |
-
not_run = []
|
83 |
|
84 |
def matchmaker(num_players, k_group=4):
|
85 |
trueskill_env = TrueSkill()
|
|
|
79 |
class RunningPivot(object):
|
80 |
running_pivot = []
|
81 |
|
82 |
+
not_run = [20,21,22]
|
83 |
|
84 |
def matchmaker(num_players, k_group=4):
|
85 |
trueskill_env = TrueSkill()
|
model/matchmaker_video.py
ADDED
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import json
|
3 |
+
from trueskill import TrueSkill
|
4 |
+
import paramiko
|
5 |
+
import io, os
|
6 |
+
import sys
|
7 |
+
sys.path.append('../')
|
8 |
+
from serve.constants import SSH_SERVER, SSH_PORT, SSH_USER, SSH_PASSWORD, SSH_VIDEO_SKILL
|
9 |
+
trueskill_env = TrueSkill()
|
10 |
+
|
11 |
+
ssh_matchmaker_client = None
|
12 |
+
sftp_matchmaker_client = None
|
13 |
+
|
14 |
+
def create_ssh_matchmaker_client(server, port, user, password):
|
15 |
+
global ssh_matchmaker_client, sftp_matchmaker_client
|
16 |
+
ssh_matchmaker_client = paramiko.SSHClient()
|
17 |
+
ssh_matchmaker_client.load_system_host_keys()
|
18 |
+
ssh_matchmaker_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
19 |
+
ssh_matchmaker_client.connect(server, port, user, password)
|
20 |
+
|
21 |
+
transport = ssh_matchmaker_client.get_transport()
|
22 |
+
transport.set_keepalive(60)
|
23 |
+
|
24 |
+
sftp_matchmaker_client = ssh_matchmaker_client.open_sftp()
|
25 |
+
def is_connected():
|
26 |
+
global ssh_matchmaker_client, sftp_matchmaker_client
|
27 |
+
if ssh_matchmaker_client is None or sftp_matchmaker_client is None:
|
28 |
+
return False
|
29 |
+
# 检查SSH连接是否正常
|
30 |
+
if not ssh_matchmaker_client.get_transport().is_active():
|
31 |
+
return False
|
32 |
+
# 检查SFTP连接是否正常
|
33 |
+
try:
|
34 |
+
sftp_matchmaker_client.listdir('.') # 尝试列出根目录
|
35 |
+
except Exception as e:
|
36 |
+
print(f"Error checking SFTP connection: {e}")
|
37 |
+
return False
|
38 |
+
return True
|
39 |
+
def ucb_score(trueskill_diff, t, n):
|
40 |
+
exploration_term = np.sqrt((2 * np.log(t + 1e-5)) / (n + 1e-5))
|
41 |
+
ucb = -trueskill_diff + 1.0 * exploration_term
|
42 |
+
return ucb
|
43 |
+
|
44 |
+
def update_trueskill(ratings, ranks):
|
45 |
+
new_ratings = trueskill_env.rate(ratings, ranks)
|
46 |
+
return new_ratings
|
47 |
+
|
48 |
+
def serialize_rating(rating):
|
49 |
+
return {'mu': rating.mu, 'sigma': rating.sigma}
|
50 |
+
|
51 |
+
def deserialize_rating(rating_dict):
|
52 |
+
return trueskill_env.Rating(mu=rating_dict['mu'], sigma=rating_dict['sigma'])
|
53 |
+
|
54 |
+
def save_json_via_sftp(ratings, comparison_counts, total_comparisons):
|
55 |
+
global sftp_matchmaker_client
|
56 |
+
if not is_connected():
|
57 |
+
create_ssh_matchmaker_client(SSH_SERVER, SSH_PORT, SSH_USER, SSH_PASSWORD)
|
58 |
+
data = {
|
59 |
+
'ratings': [serialize_rating(r) for r in ratings],
|
60 |
+
'comparison_counts': comparison_counts.tolist(),
|
61 |
+
'total_comparisons': total_comparisons
|
62 |
+
}
|
63 |
+
json_data = json.dumps(data)
|
64 |
+
with sftp_matchmaker_client.open(SSH_VIDEO_SKILL, 'w') as f:
|
65 |
+
f.write(json_data)
|
66 |
+
|
67 |
+
def load_json_via_sftp():
|
68 |
+
global sftp_matchmaker_client
|
69 |
+
if not is_connected():
|
70 |
+
create_ssh_matchmaker_client(SSH_SERVER, SSH_PORT, SSH_USER, SSH_PASSWORD)
|
71 |
+
with sftp_matchmaker_client.open(SSH_VIDEO_SKILL, 'r') as f:
|
72 |
+
data = json.load(f)
|
73 |
+
ratings = [deserialize_rating(r) for r in data['ratings']]
|
74 |
+
comparison_counts = np.array(data['comparison_counts'])
|
75 |
+
total_comparisons = data['total_comparisons']
|
76 |
+
return ratings, comparison_counts, total_comparisons
|
77 |
+
|
78 |
+
|
79 |
+
def matchmaker_video(num_players, k_group=4):
|
80 |
+
trueskill_env = TrueSkill()
|
81 |
+
|
82 |
+
ratings, comparison_counts, total_comparisons = load_json_via_sftp()
|
83 |
+
|
84 |
+
ratings = ratings[:num_players]
|
85 |
+
comparison_counts = comparison_counts[:num_players, :num_players]
|
86 |
+
|
87 |
+
selected_player = np.argmin(comparison_counts.sum(axis=1))
|
88 |
+
|
89 |
+
selected_trueskill_score = trueskill_env.expose(ratings[selected_player])
|
90 |
+
trueskill_scores = np.array([trueskill_env.expose(p) for p in ratings])
|
91 |
+
trueskill_diff = np.abs(trueskill_scores - selected_trueskill_score)
|
92 |
+
n = comparison_counts[selected_player]
|
93 |
+
ucb_scores = ucb_score(trueskill_diff, total_comparisons, n)
|
94 |
+
|
95 |
+
# Exclude self, select opponent with highest UCB score
|
96 |
+
ucb_scores[selected_player] = -float('inf') # minimize the score for the selected player to exclude it
|
97 |
+
opponents = np.argsort(ucb_scores)[-k_group + 1:].tolist()
|
98 |
+
|
99 |
+
# Group players
|
100 |
+
model_ids = [selected_player] + opponents
|
101 |
+
|
102 |
+
return model_ids
|
103 |
+
|
model/model_manager.py
CHANGED
@@ -1,13 +1,13 @@
|
|
1 |
import concurrent.futures
|
2 |
import random
|
3 |
import gradio as gr
|
4 |
-
import requests
|
5 |
import io, base64, json
|
6 |
import spaces
|
7 |
import torch
|
8 |
from PIL import Image
|
9 |
from openai import OpenAI
|
10 |
-
from .models import IMAGE_GENERATION_MODELS, IMAGE_EDITION_MODELS, load_pipeline
|
11 |
from .fetch_museum_results import draw_from_imagen_museum, draw2_from_imagen_museum
|
12 |
from serve.upload import get_random_mscoco_prompt
|
13 |
|
@@ -15,6 +15,7 @@ class ModelManager:
|
|
15 |
def __init__(self):
|
16 |
self.model_ig_list = IMAGE_GENERATION_MODELS
|
17 |
self.model_ie_list = IMAGE_EDITION_MODELS
|
|
|
18 |
self.loaded_models = {}
|
19 |
|
20 |
def load_model_pipe(self, model_name):
|
@@ -92,6 +93,34 @@ class ModelManager:
|
|
92 |
return results[0], results[1], results[2], results[3], \
|
93 |
model_names[0], model_names[1], model_names[2], model_names[3]
|
94 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
95 |
def generate_image_ig_museum_parallel_anony(self, model_A, model_B, model_C, model_D):
|
96 |
if model_A == "" and model_B == "" and model_C == "" and model_D == "":
|
97 |
# model_names = random.sample([model for model in self.model_ig_list], 4)
|
|
|
1 |
import concurrent.futures
|
2 |
import random
|
3 |
import gradio as gr
|
4 |
+
import requests, os
|
5 |
import io, base64, json
|
6 |
import spaces
|
7 |
import torch
|
8 |
from PIL import Image
|
9 |
from openai import OpenAI
|
10 |
+
from .models import IMAGE_GENERATION_MODELS, IMAGE_EDITION_MODELS, VIDEO_GENERATION_MODELS, load_pipeline
|
11 |
from .fetch_museum_results import draw_from_imagen_museum, draw2_from_imagen_museum
|
12 |
from serve.upload import get_random_mscoco_prompt
|
13 |
|
|
|
15 |
def __init__(self):
|
16 |
self.model_ig_list = IMAGE_GENERATION_MODELS
|
17 |
self.model_ie_list = IMAGE_EDITION_MODELS
|
18 |
+
self.model_vg_list = VIDEO_GENERATION_MODELS
|
19 |
self.loaded_models = {}
|
20 |
|
21 |
def load_model_pipe(self, model_name):
|
|
|
93 |
return results[0], results[1], results[2], results[3], \
|
94 |
model_names[0], model_names[1], model_names[2], model_names[3]
|
95 |
|
96 |
+
def generate_video_ig_parallel_anony(self, prompt_num, model_A, model_B, model_C, model_D):
|
97 |
+
if model_A == "" and model_B == "" and model_C == "" and model_D == "":
|
98 |
+
# model_names = random.sample([model for model in self.model_vg_list], 4)
|
99 |
+
|
100 |
+
from .matchmaker_video import matchmaker_video
|
101 |
+
model_ids = matchmaker_video(num_players=len(self.model_vg_list))
|
102 |
+
print(model_ids)
|
103 |
+
model_names = [self.model_vg_list[i] for i in model_ids]
|
104 |
+
print(model_names)
|
105 |
+
else:
|
106 |
+
model_names = [model_A, model_B, model_C, model_D]
|
107 |
+
|
108 |
+
cache_dir = os.path.join("/mnt/data/lizhikai/ksort_video_cache/", f'cache_{prompt_num}')
|
109 |
+
results = []
|
110 |
+
for name in model_names:
|
111 |
+
model_source, model_name, model_type = name.split("_")
|
112 |
+
video_path = os.path.join(cache_dir, f'{model_name}.mp4')
|
113 |
+
print(video_path)
|
114 |
+
results.append(video_path)
|
115 |
+
|
116 |
+
# with concurrent.futures.ThreadPoolExecutor() as executor:
|
117 |
+
# futures = [executor.submit(self.generate_image_ig, prompt, model) if model.startswith("huggingface")
|
118 |
+
# else executor.submit(self.generate_image_ig_api, prompt, model) for model in model_names]
|
119 |
+
# results = [future.result() for future in futures]
|
120 |
+
|
121 |
+
return results[0], results[1], results[2], results[3], \
|
122 |
+
model_names[0], model_names[1], model_names[2], model_names[3]
|
123 |
+
|
124 |
def generate_image_ig_museum_parallel_anony(self, model_A, model_B, model_C, model_D):
|
125 |
if model_A == "" and model_B == "" and model_C == "" and model_D == "":
|
126 |
# model_names = random.sample([model for model in self.model_ig_list], 4)
|
model/model_registry.py
CHANGED
@@ -45,7 +45,29 @@ def get_model_description_md(model_list):
|
|
45 |
ct += 1
|
46 |
return model_description_md
|
47 |
|
48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
|
50 |
register_model_info(
|
51 |
["imagenhub_LCM_generation", "fal_LCM_text2image"],
|
|
|
45 |
ct += 1
|
46 |
return model_description_md
|
47 |
|
48 |
+
def get_video_model_description_md(model_list):
|
49 |
+
model_description_md = """
|
50 |
+
| | | | | | |
|
51 |
+
| ---- | ---- | ---- | ---- | ---- | ---- |
|
52 |
+
"""
|
53 |
+
ct = 0
|
54 |
+
visited = set()
|
55 |
+
for i, name in enumerate(model_list):
|
56 |
+
model_source, model_name, model_type = name.split("_")
|
57 |
+
minfo = get_model_info(model_name)
|
58 |
+
if minfo.simple_name in visited:
|
59 |
+
continue
|
60 |
+
visited.add(minfo.simple_name)
|
61 |
+
# one_model_md = f"[{minfo.simple_name}]({minfo.link}): {minfo.description}"
|
62 |
+
one_model_md = f"{minfo.simple_name}"
|
63 |
+
|
64 |
+
if ct % 7 == 0:
|
65 |
+
model_description_md += "|"
|
66 |
+
model_description_md += f" {one_model_md} |"
|
67 |
+
if ct % 7 == 6:
|
68 |
+
model_description_md += "\n"
|
69 |
+
ct += 1
|
70 |
+
return model_description_md
|
71 |
|
72 |
register_model_info(
|
73 |
["imagenhub_LCM_generation", "fal_LCM_text2image"],
|
model/models/__init__.py
CHANGED
@@ -43,11 +43,18 @@ IMAGE_EDITION_MODELS = ['imagenhub_CycleDiffusion_edition', 'imagenhub_Pix2PixZe
|
|
43 |
'imagenhub_SDEdit_edition', 'imagenhub_InstructPix2Pix_edition',
|
44 |
'imagenhub_MagicBrush_edition', 'imagenhub_PNP_edition',
|
45 |
'imagenhub_InfEdit_edition', 'imagenhub_CosXLEdit_edition']
|
46 |
-
VIDEO_GENERATION_MODELS = ['fal_AnimateDiff_text2video',
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
|
52 |
def load_pipeline(model_name):
|
53 |
"""
|
|
|
43 |
'imagenhub_SDEdit_edition', 'imagenhub_InstructPix2Pix_edition',
|
44 |
'imagenhub_MagicBrush_edition', 'imagenhub_PNP_edition',
|
45 |
'imagenhub_InfEdit_edition', 'imagenhub_CosXLEdit_edition']
|
46 |
+
# VIDEO_GENERATION_MODELS = ['fal_AnimateDiff_text2video',
|
47 |
+
# 'fal_AnimateDiffTurbo_text2video',
|
48 |
+
# 'videogenhub_LaVie_generation', 'videogenhub_VideoCrafter2_generation',
|
49 |
+
# 'videogenhub_ModelScope_generation', 'videogenhub_OpenSora_generation']
|
50 |
+
VIDEO_GENERATION_MODELS = ['replicate_Zeroscope-v2-xl_text2video',
|
51 |
+
# 'replicate_Damo-Text-to-Video_text2video',
|
52 |
+
'replicate_Animate-Diff_text2video',
|
53 |
+
'replicate_OpenSora_text2video',
|
54 |
+
'replicate_LaVie_text2video',
|
55 |
+
'replicate_VideoCrafter2_text2video',
|
56 |
+
'replicate_Stable-Video-Diffusion_text2video',
|
57 |
+
]
|
58 |
|
59 |
def load_pipeline(model_name):
|
60 |
"""
|
model/models/huggingface_models.py
CHANGED
@@ -28,9 +28,32 @@ def load_huggingface_model(model_name, model_type):
|
|
28 |
|
29 |
|
30 |
if __name__ == "__main__":
|
31 |
-
for name in ["SD-turbo", "SDXL-turbo"]: #"SD-turbo", "SDXL-turbo"
|
32 |
-
|
33 |
|
34 |
# for name in ["IF-I-XL-v1.0"]:
|
35 |
# pipe = load_huggingface_model(name, 'text2image')
|
36 |
-
# pipe = DiffusionPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
|
29 |
|
30 |
if __name__ == "__main__":
|
31 |
+
# for name in ["SD-turbo", "SDXL-turbo"]: #"SD-turbo", "SDXL-turbo"
|
32 |
+
# pipe = load_huggingface_model(name, "text2image")
|
33 |
|
34 |
# for name in ["IF-I-XL-v1.0"]:
|
35 |
# pipe = load_huggingface_model(name, 'text2image')
|
36 |
+
# pipe = DiffusionPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16)
|
37 |
+
|
38 |
+
prompt = 'draw a tiger'
|
39 |
+
pipe = load_huggingface_model('Stable-cascade', "text2image")
|
40 |
+
prior, decoder = pipe
|
41 |
+
prior.enable_model_cpu_offload()
|
42 |
+
prior_output = prior(
|
43 |
+
prompt=prompt,
|
44 |
+
height=512,
|
45 |
+
width=512,
|
46 |
+
negative_prompt='',
|
47 |
+
guidance_scale=4.0,
|
48 |
+
num_images_per_prompt=1,
|
49 |
+
num_inference_steps=20
|
50 |
+
)
|
51 |
+
decoder.enable_model_cpu_offload()
|
52 |
+
result = decoder(
|
53 |
+
image_embeddings=prior_output.image_embeddings.to(torch.float16),
|
54 |
+
prompt=prompt,
|
55 |
+
negative_prompt='',
|
56 |
+
guidance_scale=0.0,
|
57 |
+
output_type="pil",
|
58 |
+
num_inference_steps=10
|
59 |
+
).images[0]
|
model/models/other_api_models.py
CHANGED
@@ -8,9 +8,9 @@ class OtherModel():
|
|
8 |
def __init__(self, model_name, model_type):
|
9 |
self.model_name = model_name
|
10 |
self.model_type = model_type
|
11 |
-
self.
|
12 |
self.key = os.environ.get('MIDJOURNEY_KEY')
|
13 |
-
self.
|
14 |
self.repeat_num = 5
|
15 |
|
16 |
def __call__(self, *args, **kwargs):
|
@@ -40,12 +40,12 @@ class OtherModel():
|
|
40 |
"Content-Type": "application/json"
|
41 |
}
|
42 |
while 1:
|
43 |
-
response = requests.post(self.
|
44 |
if response.status_code == 200:
|
45 |
print("Submit success!")
|
46 |
response_json = json.loads(response.content.decode('utf-8'))
|
47 |
img_id = response_json["result"]
|
48 |
-
result_url = self.
|
49 |
print(result_url)
|
50 |
self.repeat_num = 800
|
51 |
while 1:
|
@@ -70,6 +70,9 @@ class OtherModel():
|
|
70 |
if self.repeat_num == 0:
|
71 |
raise ValueError("API request failed.")
|
72 |
continue
|
|
|
|
|
|
|
73 |
else:
|
74 |
raise ValueError("model_type must be text2image")
|
75 |
def load_other_model(model_name, model_type):
|
@@ -77,9 +80,34 @@ def load_other_model(model_name, model_type):
|
|
77 |
|
78 |
if __name__ == "__main__":
|
79 |
|
80 |
-
pipe = load_other_model("Midjourney-v5.0", "text2image")
|
81 |
-
result = pipe(prompt="a good girl")
|
82 |
-
print(result)
|
83 |
|
|
|
|
|
|
|
|
|
84 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
|
|
|
8 |
def __init__(self, model_name, model_type):
|
9 |
self.model_name = model_name
|
10 |
self.model_type = model_type
|
11 |
+
self.image_url = "https://www.xdai.online/mj/submit/imagine"
|
12 |
self.key = os.environ.get('MIDJOURNEY_KEY')
|
13 |
+
self.get_image_url = "https://www.xdai.online/mj/image/"
|
14 |
self.repeat_num = 5
|
15 |
|
16 |
def __call__(self, *args, **kwargs):
|
|
|
40 |
"Content-Type": "application/json"
|
41 |
}
|
42 |
while 1:
|
43 |
+
response = requests.post(self.image_url, data=json.dumps(data), headers=headers)
|
44 |
if response.status_code == 200:
|
45 |
print("Submit success!")
|
46 |
response_json = json.loads(response.content.decode('utf-8'))
|
47 |
img_id = response_json["result"]
|
48 |
+
result_url = self.get_image_url + img_id
|
49 |
print(result_url)
|
50 |
self.repeat_num = 800
|
51 |
while 1:
|
|
|
70 |
if self.repeat_num == 0:
|
71 |
raise ValueError("API request failed.")
|
72 |
continue
|
73 |
+
if self.model_type == "text2video":
|
74 |
+
assert "prompt" in kwargs, "prompt is required for text2video model"
|
75 |
+
|
76 |
else:
|
77 |
raise ValueError("model_type must be text2image")
|
78 |
def load_other_model(model_name, model_type):
|
|
|
80 |
|
81 |
if __name__ == "__main__":
|
82 |
|
83 |
+
# pipe = load_other_model("Midjourney-v5.0", "text2image")
|
84 |
+
# result = pipe(prompt="a good girl")
|
85 |
+
# print(result)
|
86 |
|
87 |
+
import http.client
|
88 |
+
import json
|
89 |
+
key = os.environ.get('MIDJOURNEY_KEY')
|
90 |
+
prompt = "a good girl"
|
91 |
|
92 |
+
conn = http.client.HTTPSConnection("xdai.online")
|
93 |
+
payload = json.dumps({
|
94 |
+
"messages": [
|
95 |
+
{
|
96 |
+
"role": "user",
|
97 |
+
"content": "{}".format(prompt)
|
98 |
+
}
|
99 |
+
],
|
100 |
+
"stream": True,
|
101 |
+
"model": "luma-video",
|
102 |
+
# "model": "pika-text-to-video",
|
103 |
+
})
|
104 |
+
headers = {
|
105 |
+
'Authorization': "Bearer {}".format(key),
|
106 |
+
'Content-Type': 'application/json'
|
107 |
+
}
|
108 |
+
conn.request("POST", "/v1/chat/completions", payload, headers)
|
109 |
+
res = conn.getresponse()
|
110 |
+
data = res.read()
|
111 |
+
info = data.decode("utf-8")
|
112 |
+
print(data.decode("utf-8"))
|
113 |
|
model/models/replicate_api_models.py
CHANGED
@@ -27,6 +27,14 @@ Replicate_MODEl_NAME_MAP = {
|
|
27 |
"SSD-1b": "lucataco/ssd-1b:b19e3639452c59ce8295b82aba70a231404cb062f2eb580ea894b31e8ce5bbb6",
|
28 |
"Open-Dalle-v1.1": "lucataco/open-dalle-v1.1:1c7d4c8dec39c7306df7794b28419078cb9d18b9213ab1c21fdc46a1deca0144",
|
29 |
"Deepfloyd-IF": "andreasjansson/deepfloyd-if:fb84d659df149f4515c351e394d22222a94144aa1403870c36025c8b28846c8d",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
}
|
31 |
|
32 |
class ReplicateModel():
|
@@ -36,20 +44,6 @@ class ReplicateModel():
|
|
36 |
# os.environ['FAL_KEY'] = os.environ['FalAPI']
|
37 |
|
38 |
def __call__(self, *args, **kwargs):
|
39 |
-
# def decode_data_url(data_url):
|
40 |
-
# # Find the start of the Base64 encoded data
|
41 |
-
# base64_start = data_url.find(",") + 1
|
42 |
-
# if base64_start == 0:
|
43 |
-
# raise ValueError("Invalid data URL provided")
|
44 |
-
|
45 |
-
# # Extract the Base64 encoded data
|
46 |
-
# base64_string = data_url[base64_start:]
|
47 |
-
|
48 |
-
# # Decode the Base64 string
|
49 |
-
# decoded_bytes = base64.b64decode(base64_string)
|
50 |
-
|
51 |
-
# return decoded_bytes
|
52 |
-
|
53 |
if self.model_type == "text2image":
|
54 |
assert "prompt" in kwargs, "prompt is required for text2image model"
|
55 |
output = replicate.run(
|
@@ -71,60 +65,101 @@ class ReplicateModel():
|
|
71 |
print(result_url)
|
72 |
response = requests.get(result_url)
|
73 |
result = Image.open(io.BytesIO(response.content))
|
74 |
-
# fal_client.submit(
|
75 |
-
# f"fal-ai/{FAL_MODEl_NAME_MAP[self.model_name]}",
|
76 |
-
# arguments={
|
77 |
-
# "prompt": kwargs["prompt"]
|
78 |
-
# },
|
79 |
-
# )
|
80 |
-
# for event in handler.iter_events(with_logs=True):
|
81 |
-
# if isinstance(event, fal_client.InProgress):
|
82 |
-
# print('Request in progress')
|
83 |
-
# print(event.logs)
|
84 |
-
# result = handler.get()
|
85 |
-
# print(result)
|
86 |
-
# result_url = result['images'][0]['url']
|
87 |
-
# if self.model_name in ["SDXLTurbo", "LCM(v1.5/XL)"]:
|
88 |
-
# result_url = io.BytesIO(decode_data_url(result_url))
|
89 |
-
# result = Image.open(result_url)
|
90 |
-
# else:
|
91 |
-
# response = requests.get(result_url)
|
92 |
-
# result = Image.open(io.BytesIO(response.content))
|
93 |
return result
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
128 |
|
129 |
# for event in handler.iter_events(with_logs=True):
|
130 |
# if isinstance(event, fal_client.InProgress):
|
@@ -136,6 +171,7 @@ class ReplicateModel():
|
|
136 |
# print(result)
|
137 |
# result_url = result['video']['url']
|
138 |
# return result_url
|
|
|
139 |
else:
|
140 |
raise ValueError("model_type must be text2image or image2image")
|
141 |
|
@@ -146,30 +182,116 @@ def load_replicate_model(model_name, model_type):
|
|
146 |
if __name__ == "__main__":
|
147 |
import replicate
|
148 |
import time
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
156 |
|
157 |
|
158 |
-
for name, address in Replicate_MODEl_NAME_MAP.items():
|
159 |
-
|
160 |
-
|
161 |
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
|
171 |
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
|
|
|
27 |
"SSD-1b": "lucataco/ssd-1b:b19e3639452c59ce8295b82aba70a231404cb062f2eb580ea894b31e8ce5bbb6",
|
28 |
"Open-Dalle-v1.1": "lucataco/open-dalle-v1.1:1c7d4c8dec39c7306df7794b28419078cb9d18b9213ab1c21fdc46a1deca0144",
|
29 |
"Deepfloyd-IF": "andreasjansson/deepfloyd-if:fb84d659df149f4515c351e394d22222a94144aa1403870c36025c8b28846c8d",
|
30 |
+
|
31 |
+
"Zeroscope-v2-xl": "anotherjesse/zeroscope-v2-xl:9f747673945c62801b13b84701c783929c0ee784e4748ec062204894dda1a351",
|
32 |
+
# "Damo-Text-to-Video": "cjwbw/damo-text-to-video:1e205ea73084bd17a0a3b43396e49ba0d6bc2e754e9283b2df49fad2dcf95755",
|
33 |
+
"Animate-Diff": "lucataco/animate-diff:beecf59c4aee8d81bf04f0381033dfa10dc16e845b4ae00d281e2fa377e48a9f",
|
34 |
+
"OpenSora": "camenduru/open-sora:8099e5722ba3d5f408cd3e696e6df058137056268939337a3fbe3912e86e72ad",
|
35 |
+
"LaVie": "cjwbw/lavie:0bca850c4928b6c30052541fa002f24cbb4b677259c461dd041d271ba9d3c517",
|
36 |
+
"VideoCrafter2": "lucataco/video-crafter:7757c5775e962c618053e7df4343052a21075676d6234e8ede5fa67c9e43bce0",
|
37 |
+
"Stable-Video-Diffusion": "sunfjun/stable-video-diffusion:d68b6e09eedbac7a49e3d8644999d93579c386a083768235cabca88796d70d82",
|
38 |
}
|
39 |
|
40 |
class ReplicateModel():
|
|
|
44 |
# os.environ['FAL_KEY'] = os.environ['FalAPI']
|
45 |
|
46 |
def __call__(self, *args, **kwargs):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
if self.model_type == "text2image":
|
48 |
assert "prompt" in kwargs, "prompt is required for text2image model"
|
49 |
output = replicate.run(
|
|
|
65 |
print(result_url)
|
66 |
response = requests.get(result_url)
|
67 |
result = Image.open(io.BytesIO(response.content))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
return result
|
69 |
+
elif self.model_type == "text2video":
|
70 |
+
assert "prompt" in kwargs, "prompt is required for text2image model"
|
71 |
+
if self.model_name == "Zeroscope-v2-xl":
|
72 |
+
input = {
|
73 |
+
"fps": 24,
|
74 |
+
"width": 512,
|
75 |
+
"height": 512,
|
76 |
+
"prompt": kwargs["prompt"],
|
77 |
+
"guidance_scale": 17.5,
|
78 |
+
# "negative_prompt": "very blue, dust, noisy, washed out, ugly, distorted, broken",
|
79 |
+
"num_frames": 48,
|
80 |
+
}
|
81 |
+
elif self.model_name == "Damo-Text-to-Video":
|
82 |
+
input={
|
83 |
+
"fps": 8,
|
84 |
+
"prompt": kwargs["prompt"],
|
85 |
+
"num_frames": 16,
|
86 |
+
"num_inference_steps": 50
|
87 |
+
}
|
88 |
+
elif self.model_name == "Animate-Diff":
|
89 |
+
input={
|
90 |
+
"path": "toonyou_beta3.safetensors",
|
91 |
+
"seed": 255224557,
|
92 |
+
"steps": 25,
|
93 |
+
"prompt": kwargs["prompt"],
|
94 |
+
"n_prompt": "badhandv4, easynegative, ng_deepnegative_v1_75t, verybadimagenegative_v1.3, bad-artist, bad_prompt_version2-neg, teeth",
|
95 |
+
"motion_module": "mm_sd_v14",
|
96 |
+
"guidance_scale": 7.5
|
97 |
+
}
|
98 |
+
elif self.model_name == "OpenSora":
|
99 |
+
input={
|
100 |
+
"seed": 1234,
|
101 |
+
"prompt": kwargs["prompt"],
|
102 |
+
}
|
103 |
+
elif self.model_name == "LaVie":
|
104 |
+
input={
|
105 |
+
"width": 512,
|
106 |
+
"height": 512,
|
107 |
+
"prompt": kwargs["prompt"],
|
108 |
+
"quality": 9,
|
109 |
+
"video_fps": 8,
|
110 |
+
"interpolation": False,
|
111 |
+
"sample_method": "ddpm",
|
112 |
+
"guidance_scale": 7,
|
113 |
+
"super_resolution": False,
|
114 |
+
"num_inference_steps": 50
|
115 |
+
}
|
116 |
+
elif self.model_name == "VideoCrafter2":
|
117 |
+
input={
|
118 |
+
"fps": 24,
|
119 |
+
"seed": 64045,
|
120 |
+
"steps": 40,
|
121 |
+
"width": 512,
|
122 |
+
"height": 512,
|
123 |
+
"prompt": kwargs["prompt"],
|
124 |
+
}
|
125 |
+
elif self.model_name == "Stable-Video-Diffusion":
|
126 |
+
text2image_name = "SD-v2.1"
|
127 |
+
output = replicate.run(
|
128 |
+
f"{Replicate_MODEl_NAME_MAP[text2image_name]}",
|
129 |
+
input={
|
130 |
+
"width": 512,
|
131 |
+
"height": 512,
|
132 |
+
"prompt": kwargs["prompt"]
|
133 |
+
},
|
134 |
+
)
|
135 |
+
if isinstance(output, list):
|
136 |
+
image_url = output[0]
|
137 |
+
else:
|
138 |
+
image_url = output
|
139 |
+
print(image_url)
|
140 |
+
|
141 |
+
input={
|
142 |
+
"cond_aug": 0.02,
|
143 |
+
"decoding_t": 14,
|
144 |
+
"input_image": "{}".format(image_url),
|
145 |
+
"video_length": "14_frames_with_svd",
|
146 |
+
"sizing_strategy": "maintain_aspect_ratio",
|
147 |
+
"motion_bucket_id": 127,
|
148 |
+
"frames_per_second": 6
|
149 |
+
}
|
150 |
+
|
151 |
+
output = replicate.run(
|
152 |
+
f"{Replicate_MODEl_NAME_MAP[self.model_name]}",
|
153 |
+
input=input,
|
154 |
+
)
|
155 |
+
if isinstance(output, list):
|
156 |
+
result_url = output[0]
|
157 |
+
else:
|
158 |
+
result_url = output
|
159 |
+
print(self.model_name)
|
160 |
+
print(result_url)
|
161 |
+
# response = requests.get(result_url)
|
162 |
+
# result = Image.open(io.BytesIO(response.content))
|
163 |
|
164 |
# for event in handler.iter_events(with_logs=True):
|
165 |
# if isinstance(event, fal_client.InProgress):
|
|
|
171 |
# print(result)
|
172 |
# result_url = result['video']['url']
|
173 |
# return result_url
|
174 |
+
return result_url
|
175 |
else:
|
176 |
raise ValueError("model_type must be text2image or image2image")
|
177 |
|
|
|
182 |
if __name__ == "__main__":
|
183 |
import replicate
|
184 |
import time
|
185 |
+
import concurrent.futures
|
186 |
+
import os
|
187 |
+
import requests
|
188 |
+
# model_name = 'replicate_zeroscope-v2-xl_text2video'
|
189 |
+
# model_name = 'replicate_Damo-Text-to-Video_text2video'
|
190 |
+
# model_name = 'replicate_Animate-Diff_text2video'
|
191 |
+
# model_name = 'replicate_open-sora_text2video'
|
192 |
+
# model_name = 'replicate_lavie_text2video'
|
193 |
+
# model_name = 'replicate_video-crafter_text2video'
|
194 |
+
# model_name = 'replicate_stable-video-diffusion_text2video'
|
195 |
+
# model_source, model_name, model_type = model_name.split("_")
|
196 |
+
# pipe = load_replicate_model(model_name, model_type)
|
197 |
+
# prompt = "Clown fish swimming in a coral reef, beautiful, 8k, perfect, award winning, national geographic"
|
198 |
+
# result = pipe(prompt=prompt)
|
199 |
+
|
200 |
+
# url = "https://replicate.delivery/yhqm/ucuUNHBetmQVJiogeNiDpxJoccKxPvxzTXQPZO53azfHfwvMB/out.mp4"
|
201 |
+
# response = requests.get(url, stream=True)
|
202 |
+
# if response.status_code == 200:
|
203 |
+
# file_path = os.path.join("/mnt/data/lizhikai/ksort_video_cache/", '1.mp4')
|
204 |
+
# with open(file_path, 'wb') as file:
|
205 |
+
# for chunk in response.iter_content(chunk_size=8192):
|
206 |
+
# file.write(chunk)
|
207 |
+
# print(f"视频 {j} 已保存到 {file_path}")
|
208 |
+
|
209 |
+
file_path = '/home/lizhikai/webvid_prompt100.txt'
|
210 |
+
str_list = []
|
211 |
+
with open(file_path, 'r', encoding='utf-8') as file:
|
212 |
+
for line in file:
|
213 |
+
str_list.append(line.strip())
|
214 |
+
if len(str_list) == 100:
|
215 |
+
break
|
216 |
+
|
217 |
+
def generate_image_ig_api(prompt, model_name):
|
218 |
+
model_source, model_name, model_type = model_name.split("_")
|
219 |
+
pipe = load_replicate_model(model_name, model_type)
|
220 |
+
result = pipe(prompt=prompt)
|
221 |
+
return result
|
222 |
+
model_names = ['replicate_Zeroscope-v2-xl_text2video',
|
223 |
+
# 'replicate_Damo-Text-to-Video_text2video',
|
224 |
+
'replicate_Animate-Diff_text2video',
|
225 |
+
'replicate_OpenSora_text2video',
|
226 |
+
'replicate_LaVie_text2video',
|
227 |
+
'replicate_VideoCrafter2_text2video',
|
228 |
+
'replicate_Stable-Video-Diffusion_text2video',
|
229 |
+
]
|
230 |
+
save_names = []
|
231 |
+
for name in model_names:
|
232 |
+
model_source, model_name, model_type = name.split("_")
|
233 |
+
save_names.append(model_name)
|
234 |
+
|
235 |
+
for i, prompt in enumerate(str_list):
|
236 |
+
print("save the {} prompt".format(i+1))
|
237 |
+
# if i+1 < 97:
|
238 |
+
# continue
|
239 |
+
with concurrent.futures.ThreadPoolExecutor() as executor:
|
240 |
+
futures = [executor.submit(generate_image_ig_api, prompt, model) for model in model_names]
|
241 |
+
results = [future.result() for future in futures]
|
242 |
+
|
243 |
+
root_dir = '/mnt/data/lizhikai/ksort_video_cache/'
|
244 |
+
save_dir = os.path.join(root_dir, f'cache_{i+1}')
|
245 |
+
if not os.path.exists(save_dir):
|
246 |
+
os.makedirs(save_dir, exist_ok=True)
|
247 |
+
with open(os.path.join(save_dir, "prompt.txt"), 'w', encoding='utf-8') as file:
|
248 |
+
file.write(prompt)
|
249 |
+
|
250 |
+
# 下载视频并保存
|
251 |
+
repeat_num = 5
|
252 |
+
for j, url in enumerate(results):
|
253 |
+
while 1:
|
254 |
+
time.sleep(1)
|
255 |
+
response = requests.get(url, stream=True)
|
256 |
+
if response.status_code == 200:
|
257 |
+
file_path = os.path.join(save_dir, f'{save_names[j]}.mp4')
|
258 |
+
with open(file_path, 'wb') as file:
|
259 |
+
for chunk in response.iter_content(chunk_size=8192):
|
260 |
+
file.write(chunk)
|
261 |
+
print(f"视频 {j} 已保存到 {file_path}")
|
262 |
+
break
|
263 |
+
else:
|
264 |
+
repeat_num = repeat_num - 1
|
265 |
+
if repeat_num == 0:
|
266 |
+
print(f"视频 {j} 保存失败")
|
267 |
+
# raise ValueError("Video request failed.")
|
268 |
+
continue
|
269 |
+
|
270 |
+
|
271 |
+
# input = {
|
272 |
+
# "seed": 1,
|
273 |
+
# "width": 512,
|
274 |
+
# "height": 512,
|
275 |
+
# "grid_size": 1,
|
276 |
+
# "prompt": "anime astronaut riding a horse on mars"
|
277 |
+
# }
|
278 |
|
279 |
|
280 |
+
# for name, address in Replicate_MODEl_NAME_MAP.items():
|
281 |
+
# print('*'*50)
|
282 |
+
# print(name)
|
283 |
|
284 |
+
# t1 = time.time()
|
285 |
+
# output = replicate.run(
|
286 |
+
# address,
|
287 |
+
# input=input
|
288 |
+
# )
|
289 |
+
# # for item in output:
|
290 |
+
# # print(item)
|
291 |
+
# print(output)
|
292 |
|
293 |
|
294 |
+
# t2 = time.time()
|
295 |
+
# print(t2-t1)
|
296 |
+
# print('*'*50)
|
297 |
|
model/other_models.py
DELETED
File without changes
|
serve/Ksort.py
CHANGED
@@ -5,7 +5,7 @@ from .constants import KSORT_IMAGE_DIR
|
|
5 |
from .constants import COLOR1, COLOR2, COLOR3, COLOR4
|
6 |
from .vote_utils import save_any_image
|
7 |
from .utils import disable_btn, enable_btn, invisible_btn
|
8 |
-
from .upload import create_remote_directory, upload_ssh_all
|
9 |
import json
|
10 |
|
11 |
def reset_level(Top_btn):
|
@@ -149,7 +149,7 @@ def vote_submit(states, textbox, rank, request: gr.Request):
|
|
149 |
}
|
150 |
fout.write(json.dumps(data) + "\n")
|
151 |
|
152 |
-
def vote_ssh_submit(states, textbox, rank):
|
153 |
conv_id = states[0].conv_id
|
154 |
output_dir = create_remote_directory(conv_id)
|
155 |
# upload_image(states, output_dir)
|
@@ -158,6 +158,7 @@ def vote_ssh_submit(states, textbox, rank):
|
|
158 |
"models_name": [x.model_name for x in states],
|
159 |
"img_rank": [x for x in rank],
|
160 |
"prompt": [textbox],
|
|
|
161 |
}
|
162 |
output_file = os.path.join(output_dir, "result.json")
|
163 |
# upload_informance(data, output_file)
|
@@ -166,12 +167,48 @@ def vote_ssh_submit(states, textbox, rank):
|
|
166 |
from .update_skill import update_skill
|
167 |
update_skill(rank, [x.model_name for x in states])
|
168 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
169 |
|
170 |
def submit_response_igm(
|
171 |
-
state0, state1, state2, state3, model_selector0, model_selector1, model_selector2, model_selector3, textbox, rank, request: gr.Request
|
172 |
):
|
173 |
# vote_submit([state0, state1, state2, state3], textbox, rank, request)
|
174 |
-
vote_ssh_submit([state0, state1, state2, state3], textbox, rank)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
175 |
if model_selector0 == "":
|
176 |
return (disable_btn,) * 6 + (
|
177 |
gr.Markdown(f"### Model A: {state0.model_name.split('_')[1]}", visible=True),
|
@@ -186,14 +223,35 @@ def submit_response_igm(
|
|
186 |
gr.Markdown(state2.model_name, visible=True),
|
187 |
gr.Markdown(state3.model_name, visible=True)
|
188 |
) + (disable_btn,)
|
189 |
-
|
190 |
def submit_response_rank_igm(
|
191 |
-
state0, state1, state2, state3, model_selector0, model_selector1, model_selector2, model_selector3, textbox, rank, right_vote_text, request: gr.Request
|
192 |
):
|
193 |
print(rank)
|
194 |
if right_vote_text == "right":
|
195 |
# vote_submit([state0, state1, state2, state3], textbox, rank, request)
|
196 |
-
vote_ssh_submit([state0, state1, state2, state3], textbox, rank)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
197 |
if model_selector0 == "":
|
198 |
return (disable_btn,) * 16 + (disable_btn,) * 3 + ("wrong",) + (
|
199 |
gr.Markdown(f"### Model A: {state0.model_name.split('_')[1]}", visible=True),
|
@@ -260,37 +318,24 @@ def text_response_rank_igm(generate_ig0, generate_ig1, generate_ig2, generate_ig
|
|
260 |
|
261 |
return chatbot + [rank_str] + ["right"] + [rank]
|
262 |
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
|
|
|
|
276 |
|
277 |
-
|
278 |
-
# if vote_level == 0:
|
279 |
-
# text_color = (0, 255, 0, 200)
|
280 |
-
# draw.text(text_position, Top1_text, font=font, fill=text_color)
|
281 |
-
# elif vote_level == 1:
|
282 |
-
# text_color = (0, 255, 255, 200)
|
283 |
-
# draw.text(text_position, Top2_text, font=font, fill=text_color)
|
284 |
-
# elif vote_level == 2:
|
285 |
-
# text_color = (255, 0, 255, 200)
|
286 |
-
# draw.text(text_position, Top3_text, font=font, fill=text_color)
|
287 |
-
# elif vote_level == 3:
|
288 |
-
# text_color = (255, 0, 0, 200)
|
289 |
-
# draw.text(text_position, Top4_text, font=font, fill=text_color)
|
290 |
|
291 |
-
# combined = Image.alpha_composite(base_image, txt_layer)
|
292 |
-
# combined = combined.convert("RGB")
|
293 |
-
# return combined
|
294 |
def add_foreground(image, vote_level, Top1_text, Top2_text, Top3_text, Top4_text):
|
295 |
base_image = Image.fromarray(image).convert("RGBA")
|
296 |
base_image = base_image.resize((512, 512), Image.ANTIALIAS)
|
|
|
5 |
from .constants import COLOR1, COLOR2, COLOR3, COLOR4
|
6 |
from .vote_utils import save_any_image
|
7 |
from .utils import disable_btn, enable_btn, invisible_btn
|
8 |
+
from .upload import create_remote_directory, upload_ssh_all, upload_ssh_data
|
9 |
import json
|
10 |
|
11 |
def reset_level(Top_btn):
|
|
|
149 |
}
|
150 |
fout.write(json.dumps(data) + "\n")
|
151 |
|
152 |
+
def vote_ssh_submit(states, textbox, rank, user_name, user_institution):
|
153 |
conv_id = states[0].conv_id
|
154 |
output_dir = create_remote_directory(conv_id)
|
155 |
# upload_image(states, output_dir)
|
|
|
158 |
"models_name": [x.model_name for x in states],
|
159 |
"img_rank": [x for x in rank],
|
160 |
"prompt": [textbox],
|
161 |
+
"user_info": {"name": [user_name], "institution": [user_institution]},
|
162 |
}
|
163 |
output_file = os.path.join(output_dir, "result.json")
|
164 |
# upload_informance(data, output_file)
|
|
|
167 |
from .update_skill import update_skill
|
168 |
update_skill(rank, [x.model_name for x in states])
|
169 |
|
170 |
+
def vote_video_ssh_submit(states, textbox, prompt_num, rank, user_name, user_institution):
|
171 |
+
conv_id = states[0].conv_id
|
172 |
+
output_dir = create_remote_directory(conv_id, video=True)
|
173 |
+
|
174 |
+
data = {
|
175 |
+
"models_name": [x.model_name for x in states],
|
176 |
+
"video_rank": [x for x in rank],
|
177 |
+
"prompt": [textbox],
|
178 |
+
"prompt_num": [prompt_num],
|
179 |
+
"video_path": [x.output for x in states],
|
180 |
+
"user_info": {"name": [user_name], "institution": [user_institution]},
|
181 |
+
}
|
182 |
+
output_file = os.path.join(output_dir, "result.json")
|
183 |
+
|
184 |
+
upload_ssh_data(data, output_file)
|
185 |
+
|
186 |
+
from .update_skill_video import update_skill_video
|
187 |
+
update_skill_video(rank, [x.model_name for x in states])
|
188 |
|
189 |
def submit_response_igm(
|
190 |
+
state0, state1, state2, state3, model_selector0, model_selector1, model_selector2, model_selector3, textbox, rank, user_name, user_institution, request: gr.Request
|
191 |
):
|
192 |
# vote_submit([state0, state1, state2, state3], textbox, rank, request)
|
193 |
+
vote_ssh_submit([state0, state1, state2, state3], textbox, rank, user_name, user_institution)
|
194 |
+
if model_selector0 == "":
|
195 |
+
return (disable_btn,) * 6 + (
|
196 |
+
gr.Markdown(f"### Model A: {state0.model_name.split('_')[1]}", visible=True),
|
197 |
+
gr.Markdown(f"### Model B: {state1.model_name.split('_')[1]}", visible=True),
|
198 |
+
gr.Markdown(f"### Model C: {state2.model_name.split('_')[1]}", visible=True),
|
199 |
+
gr.Markdown(f"### Model D: {state3.model_name.split('_')[1]}", visible=True)
|
200 |
+
) + (disable_btn,)
|
201 |
+
else:
|
202 |
+
return (disable_btn,) * 6 + (
|
203 |
+
gr.Markdown(state0.model_name, visible=True),
|
204 |
+
gr.Markdown(state1.model_name, visible=True),
|
205 |
+
gr.Markdown(state2.model_name, visible=True),
|
206 |
+
gr.Markdown(state3.model_name, visible=True)
|
207 |
+
) + (disable_btn,)
|
208 |
+
def submit_response_vg(
|
209 |
+
state0, state1, state2, state3, model_selector0, model_selector1, model_selector2, model_selector3, textbox, prompt_num, rank, user_name, user_institution, request: gr.Request
|
210 |
+
):
|
211 |
+
vote_video_ssh_submit([state0, state1, state2, state3], textbox, prompt_num, rank, user_name, user_institution)
|
212 |
if model_selector0 == "":
|
213 |
return (disable_btn,) * 6 + (
|
214 |
gr.Markdown(f"### Model A: {state0.model_name.split('_')[1]}", visible=True),
|
|
|
223 |
gr.Markdown(state2.model_name, visible=True),
|
224 |
gr.Markdown(state3.model_name, visible=True)
|
225 |
) + (disable_btn,)
|
|
|
226 |
def submit_response_rank_igm(
|
227 |
+
state0, state1, state2, state3, model_selector0, model_selector1, model_selector2, model_selector3, textbox, rank, right_vote_text, user_name, user_institution, request: gr.Request
|
228 |
):
|
229 |
print(rank)
|
230 |
if right_vote_text == "right":
|
231 |
# vote_submit([state0, state1, state2, state3], textbox, rank, request)
|
232 |
+
vote_ssh_submit([state0, state1, state2, state3], textbox, rank, user_name, user_institution)
|
233 |
+
if model_selector0 == "":
|
234 |
+
return (disable_btn,) * 16 + (disable_btn,) * 3 + ("wrong",) + (
|
235 |
+
gr.Markdown(f"### Model A: {state0.model_name.split('_')[1]}", visible=True),
|
236 |
+
gr.Markdown(f"### Model B: {state1.model_name.split('_')[1]}", visible=True),
|
237 |
+
gr.Markdown(f"### Model C: {state2.model_name.split('_')[1]}", visible=True),
|
238 |
+
gr.Markdown(f"### Model D: {state3.model_name.split('_')[1]}", visible=True)
|
239 |
+
)
|
240 |
+
else:
|
241 |
+
return (disable_btn,) * 16 + (disable_btn,) * 3 + ("wrong",) + (
|
242 |
+
gr.Markdown(state0.model_name, visible=True),
|
243 |
+
gr.Markdown(state1.model_name, visible=True),
|
244 |
+
gr.Markdown(state2.model_name, visible=True),
|
245 |
+
gr.Markdown(state3.model_name, visible=True)
|
246 |
+
)
|
247 |
+
else:
|
248 |
+
return (enable_btn,) * 16 + (enable_btn,) * 3 + ("wrong",) + (gr.Markdown("", visible=False),) * 4
|
249 |
+
def submit_response_rank_vg(
|
250 |
+
state0, state1, state2, state3, model_selector0, model_selector1, model_selector2, model_selector3, textbox, prompt_num, rank, right_vote_text, user_name, user_institution, request: gr.Request
|
251 |
+
):
|
252 |
+
print(rank)
|
253 |
+
if right_vote_text == "right":
|
254 |
+
vote_video_ssh_submit([state0, state1, state2, state3], textbox, prompt_num, rank, user_name, user_institution)
|
255 |
if model_selector0 == "":
|
256 |
return (disable_btn,) * 16 + (disable_btn,) * 3 + ("wrong",) + (
|
257 |
gr.Markdown(f"### Model A: {state0.model_name.split('_')[1]}", visible=True),
|
|
|
318 |
|
319 |
return chatbot + [rank_str] + ["right"] + [rank]
|
320 |
|
321 |
+
def text_response_rank_vg(vote_textbox):
|
322 |
+
rank_list = [char for char in vote_textbox if char.isdigit()]
|
323 |
+
rank = [None, None, None, None]
|
324 |
+
if len(rank_list) != 4:
|
325 |
+
return ["error rank"] + ["wrong"] + [rank]
|
326 |
+
for num in range(len(rank_list)):
|
327 |
+
if rank_list[num] in ['1', '2', '3', '4']:
|
328 |
+
continue
|
329 |
+
else:
|
330 |
+
return ["error rank"] + ["wrong"] + [rank]
|
331 |
+
rank_str = ""
|
332 |
+
for str_num in rank_list:
|
333 |
+
rank_str = rank_str + str_num
|
334 |
+
rank_str = rank_str + " "
|
335 |
+
rank = [int(x) for x in rank_list]
|
336 |
|
337 |
+
return [rank_str] + ["right"] + [rank]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
338 |
|
|
|
|
|
|
|
339 |
def add_foreground(image, vote_level, Top1_text, Top2_text, Top3_text, Top4_text):
|
340 |
base_image = Image.fromarray(image).convert("RGBA")
|
341 |
base_image = base_image.resize((512, 512), Image.ANTIALIAS)
|
serve/gradio_web.py
CHANGED
@@ -48,7 +48,7 @@ def set_ssh():
|
|
48 |
|
49 |
def build_side_by_side_ui_anony(models):
|
50 |
notice_markdown = """
|
51 |
-
# ⚔️ K-Sort-Arena ⚔️
|
52 |
## 📜 Rules
|
53 |
- Input prompt to Four anonymous models (e.g., SD, SDXL, OpenJourney in Text-guided Image Generation Model) and vote on the outputs!
|
54 |
- Two voting mode: Rand Mode and Best Mode. Please feel free to switch them!
|
@@ -220,7 +220,15 @@ def build_side_by_side_ui_anony(models):
|
|
220 |
clear_btn = gr.Button(value="🎲 New Round", interactive=False)
|
221 |
# regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False)
|
222 |
# share_btn = gr.Button(value="📷 Share")
|
223 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
224 |
#gr.Markdown(acknowledgment_md, elem_id="ack_markdown")
|
225 |
dummy_img_output = gr.Image(width=512, visible=False)
|
226 |
gr.Examples(
|
@@ -382,7 +390,7 @@ def build_side_by_side_ui_anony(models):
|
|
382 |
outputs=[chatbot_left, chatbot_left1, chatbot_right, chatbot_right1, vote_textbox, right_vote_text, rank]
|
383 |
).then(
|
384 |
submit_response_rank_igm,
|
385 |
-
inputs=[state0, state1, state2, state3, dummy_left_model, dummy_left1_model, dummy_right_model, dummy_right1_model, textbox, rank, right_vote_text],
|
386 |
outputs=[A1_btn, A2_btn, A3_btn, A4_btn, B1_btn, B2_btn, B3_btn, B4_btn, C1_btn, C2_btn, C3_btn, C4_btn, D1_btn, D2_btn, D3_btn, D4_btn, \
|
387 |
vote_textbox, vote_submit_btn, vote_mode_btn, right_vote_text, \
|
388 |
model_selector_left, model_selector_left1, model_selector_right, model_selector_right1],
|
@@ -399,7 +407,7 @@ def build_side_by_side_ui_anony(models):
|
|
399 |
outputs=[chatbot_left, chatbot_left1, chatbot_right, chatbot_right1, vote_textbox, right_vote_text, rank]
|
400 |
).then(
|
401 |
submit_response_rank_igm,
|
402 |
-
inputs=[state0, state1, state2, state3, dummy_left_model, dummy_left1_model, dummy_right_model, dummy_right1_model, textbox, rank, right_vote_text],
|
403 |
outputs=[A1_btn, A2_btn, A3_btn, A4_btn, B1_btn, B2_btn, B3_btn, B4_btn, C1_btn, C2_btn, C3_btn, C4_btn, D1_btn, D2_btn, D3_btn, D4_btn, \
|
404 |
vote_textbox, vote_submit_btn, vote_mode_btn, right_vote_text, \
|
405 |
model_selector_left, model_selector_left1, model_selector_right, model_selector_right1],
|
@@ -462,35 +470,35 @@ def build_side_by_side_ui_anony(models):
|
|
462 |
|
463 |
leftvote_btn.click(
|
464 |
submit_response_igm,
|
465 |
-
inputs=[state0, state1, state2, state3, dummy_left_model, dummy_left1_model, dummy_right_model, dummy_right1_model, textbox, rankA],
|
466 |
outputs=[textbox, leftvote_btn, left1vote_btn, rightvote_btn, right1vote_btn, tie_btn, \
|
467 |
model_selector_left, model_selector_left1, model_selector_right, model_selector_right1, \
|
468 |
vote_mode_btn]
|
469 |
)
|
470 |
left1vote_btn.click(
|
471 |
submit_response_igm,
|
472 |
-
inputs=[state0, state1, state2, state3, dummy_left_model, dummy_left1_model, dummy_right_model, dummy_right1_model, textbox, rankB],
|
473 |
outputs=[textbox, leftvote_btn, left1vote_btn, rightvote_btn, right1vote_btn, tie_btn, \
|
474 |
model_selector_left, model_selector_left1, model_selector_right, model_selector_right1, \
|
475 |
vote_mode_btn]
|
476 |
)
|
477 |
rightvote_btn.click(
|
478 |
submit_response_igm,
|
479 |
-
inputs=[state0, state1, state2, state3, dummy_left_model, dummy_left1_model, dummy_right_model, dummy_right1_model, textbox, rankC],
|
480 |
outputs=[textbox, leftvote_btn, left1vote_btn, rightvote_btn, right1vote_btn, tie_btn, \
|
481 |
model_selector_left, model_selector_left1, model_selector_right, model_selector_right1, \
|
482 |
vote_mode_btn]
|
483 |
)
|
484 |
right1vote_btn.click(
|
485 |
submit_response_igm,
|
486 |
-
inputs=[state0, state1, state2, state3, dummy_left_model, dummy_left1_model, dummy_right_model, dummy_right1_model, textbox, rankD],
|
487 |
outputs=[textbox, leftvote_btn, left1vote_btn, rightvote_btn, right1vote_btn, tie_btn, \
|
488 |
model_selector_left, model_selector_left1, model_selector_right, model_selector_right1, \
|
489 |
vote_mode_btn]
|
490 |
)
|
491 |
tie_btn.click(
|
492 |
submit_response_igm,
|
493 |
-
inputs=[state0, state1, state2, state3, dummy_left_model, dummy_left1_model, dummy_right_model, dummy_right1_model, textbox, rankTie],
|
494 |
outputs=[textbox, leftvote_btn, left1vote_btn, rightvote_btn, right1vote_btn, tie_btn, \
|
495 |
model_selector_left, model_selector_left1, model_selector_right, model_selector_right1, \
|
496 |
vote_mode_btn]
|
|
|
48 |
|
49 |
def build_side_by_side_ui_anony(models):
|
50 |
notice_markdown = """
|
51 |
+
# ⚔️ K-Sort-Arena (Text-to-Image) ⚔️
|
52 |
## 📜 Rules
|
53 |
- Input prompt to Four anonymous models (e.g., SD, SDXL, OpenJourney in Text-guided Image Generation Model) and vote on the outputs!
|
54 |
- Two voting mode: Rand Mode and Best Mode. Please feel free to switch them!
|
|
|
220 |
clear_btn = gr.Button(value="🎲 New Round", interactive=False)
|
221 |
# regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False)
|
222 |
# share_btn = gr.Button(value="📷 Share")
|
223 |
+
with gr.Blocks():
|
224 |
+
with gr.Row(elem_id="centered-text"): #
|
225 |
+
user_info = gr.Markdown("User information", visible=True, elem_id="centered-text") #, elem_id="centered-text"
|
226 |
+
# with gr.Blocks():
|
227 |
+
# name = gr.Markdown("Name", visible=True)
|
228 |
+
user_name = gr.Textbox(show_label=False,placeholder="👉 Enter your name (optional)", elem_classes="custom-width")
|
229 |
+
# with gr.Blocks():
|
230 |
+
# institution = gr.Markdown("Institution", visible=True)
|
231 |
+
user_institution = gr.Textbox(show_label=False,placeholder="👉 Enter your affiliation (optional)", elem_classes="custom-width")
|
232 |
#gr.Markdown(acknowledgment_md, elem_id="ack_markdown")
|
233 |
dummy_img_output = gr.Image(width=512, visible=False)
|
234 |
gr.Examples(
|
|
|
390 |
outputs=[chatbot_left, chatbot_left1, chatbot_right, chatbot_right1, vote_textbox, right_vote_text, rank]
|
391 |
).then(
|
392 |
submit_response_rank_igm,
|
393 |
+
inputs=[state0, state1, state2, state3, dummy_left_model, dummy_left1_model, dummy_right_model, dummy_right1_model, textbox, rank, right_vote_text, user_name, user_institution],
|
394 |
outputs=[A1_btn, A2_btn, A3_btn, A4_btn, B1_btn, B2_btn, B3_btn, B4_btn, C1_btn, C2_btn, C3_btn, C4_btn, D1_btn, D2_btn, D3_btn, D4_btn, \
|
395 |
vote_textbox, vote_submit_btn, vote_mode_btn, right_vote_text, \
|
396 |
model_selector_left, model_selector_left1, model_selector_right, model_selector_right1],
|
|
|
407 |
outputs=[chatbot_left, chatbot_left1, chatbot_right, chatbot_right1, vote_textbox, right_vote_text, rank]
|
408 |
).then(
|
409 |
submit_response_rank_igm,
|
410 |
+
inputs=[state0, state1, state2, state3, dummy_left_model, dummy_left1_model, dummy_right_model, dummy_right1_model, textbox, rank, right_vote_text, user_name, user_institution],
|
411 |
outputs=[A1_btn, A2_btn, A3_btn, A4_btn, B1_btn, B2_btn, B3_btn, B4_btn, C1_btn, C2_btn, C3_btn, C4_btn, D1_btn, D2_btn, D3_btn, D4_btn, \
|
412 |
vote_textbox, vote_submit_btn, vote_mode_btn, right_vote_text, \
|
413 |
model_selector_left, model_selector_left1, model_selector_right, model_selector_right1],
|
|
|
470 |
|
471 |
leftvote_btn.click(
|
472 |
submit_response_igm,
|
473 |
+
inputs=[state0, state1, state2, state3, dummy_left_model, dummy_left1_model, dummy_right_model, dummy_right1_model, textbox, rankA, user_name, user_institution],
|
474 |
outputs=[textbox, leftvote_btn, left1vote_btn, rightvote_btn, right1vote_btn, tie_btn, \
|
475 |
model_selector_left, model_selector_left1, model_selector_right, model_selector_right1, \
|
476 |
vote_mode_btn]
|
477 |
)
|
478 |
left1vote_btn.click(
|
479 |
submit_response_igm,
|
480 |
+
inputs=[state0, state1, state2, state3, dummy_left_model, dummy_left1_model, dummy_right_model, dummy_right1_model, textbox, rankB, user_name, user_institution],
|
481 |
outputs=[textbox, leftvote_btn, left1vote_btn, rightvote_btn, right1vote_btn, tie_btn, \
|
482 |
model_selector_left, model_selector_left1, model_selector_right, model_selector_right1, \
|
483 |
vote_mode_btn]
|
484 |
)
|
485 |
rightvote_btn.click(
|
486 |
submit_response_igm,
|
487 |
+
inputs=[state0, state1, state2, state3, dummy_left_model, dummy_left1_model, dummy_right_model, dummy_right1_model, textbox, rankC, user_name, user_institution],
|
488 |
outputs=[textbox, leftvote_btn, left1vote_btn, rightvote_btn, right1vote_btn, tie_btn, \
|
489 |
model_selector_left, model_selector_left1, model_selector_right, model_selector_right1, \
|
490 |
vote_mode_btn]
|
491 |
)
|
492 |
right1vote_btn.click(
|
493 |
submit_response_igm,
|
494 |
+
inputs=[state0, state1, state2, state3, dummy_left_model, dummy_left1_model, dummy_right_model, dummy_right1_model, textbox, rankD, user_name, user_institution],
|
495 |
outputs=[textbox, leftvote_btn, left1vote_btn, rightvote_btn, right1vote_btn, tie_btn, \
|
496 |
model_selector_left, model_selector_left1, model_selector_right, model_selector_right1, \
|
497 |
vote_mode_btn]
|
498 |
)
|
499 |
tie_btn.click(
|
500 |
submit_response_igm,
|
501 |
+
inputs=[state0, state1, state2, state3, dummy_left_model, dummy_left1_model, dummy_right_model, dummy_right1_model, textbox, rankTie, user_name, user_institution],
|
502 |
outputs=[textbox, leftvote_btn, left1vote_btn, rightvote_btn, right1vote_btn, tie_btn, \
|
503 |
model_selector_left, model_selector_left1, model_selector_right, model_selector_right1, \
|
504 |
vote_mode_btn]
|
serve/gradio_web_video.py
ADDED
@@ -0,0 +1,611 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .utils import *
|
2 |
+
from .vote_utils import (
|
3 |
+
upvote_last_response_ig as upvote_last_response,
|
4 |
+
downvote_last_response_ig as downvote_last_response,
|
5 |
+
flag_last_response_ig as flag_last_response,
|
6 |
+
leftvote_last_response_igm as leftvote_last_response,
|
7 |
+
left1vote_last_response_igm as left1vote_last_response,
|
8 |
+
rightvote_last_response_igm as rightvote_last_response,
|
9 |
+
right1vote_last_response_igm as right1vote_last_response,
|
10 |
+
tievote_last_response_igm as tievote_last_response,
|
11 |
+
bothbad_vote_last_response_igm as bothbad_vote_last_response,
|
12 |
+
share_click_igm as share_click,
|
13 |
+
generate_igv_annoy,
|
14 |
+
share_js
|
15 |
+
)
|
16 |
+
from .Ksort import (
|
17 |
+
add_foreground,
|
18 |
+
reset_level,
|
19 |
+
reset_rank,
|
20 |
+
revote_windows,
|
21 |
+
submit_response_vg,
|
22 |
+
submit_response_rank_vg,
|
23 |
+
reset_submit,
|
24 |
+
clear_rank,
|
25 |
+
reset_mode,
|
26 |
+
reset_chatbot,
|
27 |
+
reset_btn_rank,
|
28 |
+
reset_vote_text,
|
29 |
+
text_response_rank_vg,
|
30 |
+
check_textbox,
|
31 |
+
)
|
32 |
+
|
33 |
+
from functools import partial
|
34 |
+
from serve.constants import SSH_SERVER, SSH_PORT, SSH_USER, SSH_PASSWORD
|
35 |
+
from serve.upload import get_random_webvid_prompt, create_ssh_client
|
36 |
+
from serve.update_skill import create_ssh_skill_client
|
37 |
+
from model.matchmaker import create_ssh_matchmaker_client
|
38 |
+
|
39 |
+
def build_side_by_side_video_ui_anony(models):
|
40 |
+
notice_markdown = """
|
41 |
+
# ⚔️ K-Sort-Arena (Text-to-Video) ⚔️
|
42 |
+
## 📜 Rules
|
43 |
+
- Input prompt to Four anonymous models (e.g., OpenSora, StableVideoDiffusion in Text-guided Video Generation Model) and vote on the outputs!
|
44 |
+
- Two voting mode: Rand Mode and Best Mode. Please feel free to switch them!
|
45 |
+
- Users are encouraged to make evaluations based on subjective preferences. As an aid, they can refer to the following criteria: Alignment (50%) + Aesthetics (50%).
|
46 |
+
- Alignment includes: Video Content Matching (30%) + Inter-frame Consistency (20%);
|
47 |
+
- Aesthetics includes: Photorealism (30%) + Physical Correctness (10%) + Absence of Artifacts (10%).
|
48 |
+
|
49 |
+
## 👇 Generating now!
|
50 |
+
|
51 |
+
"""
|
52 |
+
|
53 |
+
model_list = models.model_vg_list
|
54 |
+
|
55 |
+
state0 = gr.State()
|
56 |
+
state1 = gr.State()
|
57 |
+
state2 = gr.State()
|
58 |
+
state3 = gr.State()
|
59 |
+
|
60 |
+
gen_func = partial(generate_igv_annoy, models.generate_video_ig_parallel_anony)
|
61 |
+
# gen_func_random = partial(generate_igm_annoy_museum, models.generate_image_ig_museum_parallel_anony)
|
62 |
+
|
63 |
+
gr.Markdown(notice_markdown, elem_id="notice_markdown")
|
64 |
+
|
65 |
+
with gr.Group(elem_id="share-region-anony"):
|
66 |
+
with gr.Accordion("🔍 Expand to see all Arena players", open=False):
|
67 |
+
model_description_md = get_video_model_description_md(model_list)
|
68 |
+
gr.Markdown(model_description_md, elem_id="model_description_markdown")
|
69 |
+
with gr.Row():
|
70 |
+
with gr.Column():
|
71 |
+
chatbot_left = gr.Video(width=512, label = "Model A", autoplay=True)
|
72 |
+
with gr.Column():
|
73 |
+
chatbot_left1 = gr.Video(width=512, label = "Model B", autoplay=True)
|
74 |
+
with gr.Column():
|
75 |
+
chatbot_right = gr.Video(width=512, label = "Model C", autoplay=True)
|
76 |
+
with gr.Column():
|
77 |
+
chatbot_right1 = gr.Video(width=512, label = "Model D", autoplay=True)
|
78 |
+
|
79 |
+
with gr.Row():
|
80 |
+
with gr.Column():
|
81 |
+
model_selector_left = gr.Markdown("", visible=False)
|
82 |
+
with gr.Column():
|
83 |
+
model_selector_left1 = gr.Markdown("", visible=False)
|
84 |
+
with gr.Column():
|
85 |
+
model_selector_right = gr.Markdown("", visible=False)
|
86 |
+
with gr.Column():
|
87 |
+
model_selector_right1 = gr.Markdown("", visible=False)
|
88 |
+
with gr.Row():
|
89 |
+
slow_warning = gr.Markdown("", elem_id="notice_markdown")
|
90 |
+
|
91 |
+
with gr.Row(elem_classes="row"):
|
92 |
+
with gr.Column(scale=1, min_width=10):
|
93 |
+
leftvote_btn = gr.Button(
|
94 |
+
value="A is Best", visible=False, interactive=False, elem_id="btncolor1", elem_classes="best-button"
|
95 |
+
)
|
96 |
+
with gr.Column(scale=1, min_width=10):
|
97 |
+
left1vote_btn = gr.Button(
|
98 |
+
value="B is Best", visible=False, interactive=False, elem_id="btncolor1", elem_classes="best-button"
|
99 |
+
)
|
100 |
+
with gr.Column(scale=1, min_width=10):
|
101 |
+
rightvote_btn = gr.Button(
|
102 |
+
value="C is Best", visible=False, interactive=False, elem_id="btncolor1", elem_classes="best-button"
|
103 |
+
)
|
104 |
+
with gr.Column(scale=1, min_width=10):
|
105 |
+
right1vote_btn = gr.Button(
|
106 |
+
value="D is Best", visible=False, interactive=False, elem_id="btncolor1", elem_classes="best-button"
|
107 |
+
)
|
108 |
+
with gr.Column(scale=1, min_width=10):
|
109 |
+
tie_btn = gr.Button(
|
110 |
+
value="🤝 Tie", visible=False, interactive=False, elem_id="btncolor2", elem_classes="best-button"
|
111 |
+
)
|
112 |
+
|
113 |
+
with gr.Row():
|
114 |
+
with gr.Blocks():
|
115 |
+
with gr.Row():
|
116 |
+
with gr.Column(scale=1, min_width=10):
|
117 |
+
A1_btn = gr.Button(
|
118 |
+
value="1", visible=False, interactive=False, elem_id="btncolor1", elem_classes="custom-button"
|
119 |
+
)
|
120 |
+
with gr.Column(scale=1, min_width=10):
|
121 |
+
A2_btn = gr.Button(
|
122 |
+
value="2", visible=False, interactive=False, elem_id="btncolor2", elem_classes="custom-button"
|
123 |
+
)
|
124 |
+
with gr.Column(scale=1, min_width=10):
|
125 |
+
A3_btn = gr.Button(
|
126 |
+
value="3", visible=False, interactive=False, elem_id="btncolor3", elem_classes="custom-button"
|
127 |
+
)
|
128 |
+
with gr.Column(scale=1, min_width=10):
|
129 |
+
A4_btn = gr.Button(
|
130 |
+
value="4", visible=False, interactive=False, elem_id="btncolor4", elem_classes="custom-button"
|
131 |
+
)
|
132 |
+
with gr.Blocks():
|
133 |
+
with gr.Row():
|
134 |
+
with gr.Column(scale=1, min_width=10):
|
135 |
+
B1_btn = gr.Button(
|
136 |
+
value="1", visible=False, interactive=False, elem_id="btncolor1", elem_classes="custom-button"
|
137 |
+
)
|
138 |
+
with gr.Column(scale=1, min_width=10):
|
139 |
+
B2_btn = gr.Button(
|
140 |
+
value="2", visible=False, interactive=False, elem_id="btncolor2", elem_classes="custom-button"
|
141 |
+
)
|
142 |
+
with gr.Column(scale=1, min_width=10):
|
143 |
+
B3_btn = gr.Button(
|
144 |
+
value="3", visible=False, interactive=False, elem_id="btncolor3", elem_classes="custom-button"
|
145 |
+
)
|
146 |
+
with gr.Column(scale=1, min_width=10):
|
147 |
+
B4_btn = gr.Button(
|
148 |
+
value="4", visible=False, interactive=False, elem_id="btncolor4", elem_classes="custom-button"
|
149 |
+
)
|
150 |
+
with gr.Blocks():
|
151 |
+
with gr.Row():
|
152 |
+
with gr.Column(scale=1, min_width=10):
|
153 |
+
C1_btn = gr.Button(
|
154 |
+
value="1", visible=False, interactive=False, elem_id="btncolor1", elem_classes="custom-button"
|
155 |
+
)
|
156 |
+
with gr.Column(scale=1, min_width=10):
|
157 |
+
C2_btn = gr.Button(
|
158 |
+
value="2", visible=False, interactive=False, elem_id="btncolor2", elem_classes="custom-button"
|
159 |
+
)
|
160 |
+
with gr.Column(scale=1, min_width=10):
|
161 |
+
C3_btn = gr.Button(
|
162 |
+
value="3", visible=False, interactive=False, elem_id="btncolor3", elem_classes="custom-button"
|
163 |
+
)
|
164 |
+
with gr.Column(scale=1, min_width=10):
|
165 |
+
C4_btn = gr.Button(
|
166 |
+
value="4", visible=False, interactive=False, elem_id="btncolor4", elem_classes="custom-button"
|
167 |
+
)
|
168 |
+
with gr.Blocks():
|
169 |
+
with gr.Row():
|
170 |
+
with gr.Column(scale=1, min_width=10):
|
171 |
+
D1_btn = gr.Button(
|
172 |
+
value="1", visible=False, interactive=False, elem_id="btncolor1", elem_classes="custom-button"
|
173 |
+
)
|
174 |
+
with gr.Column(scale=1, min_width=10):
|
175 |
+
D2_btn = gr.Button(
|
176 |
+
value="2", visible=False, interactive=False, elem_id="btncolor2", elem_classes="custom-button"
|
177 |
+
)
|
178 |
+
with gr.Column(scale=1, min_width=10):
|
179 |
+
D3_btn = gr.Button(
|
180 |
+
value="3", visible=False, interactive=False, elem_id="btncolor3", elem_classes="custom-button"
|
181 |
+
)
|
182 |
+
with gr.Column(scale=1, min_width=10):
|
183 |
+
D4_btn = gr.Button(
|
184 |
+
value="4", visible=False, interactive=False, elem_id="btncolor4", elem_classes="custom-button"
|
185 |
+
)
|
186 |
+
|
187 |
+
with gr.Row():
|
188 |
+
vote_textbox = gr.Textbox(
|
189 |
+
show_label=False,
|
190 |
+
placeholder="👉 Enter your rank",
|
191 |
+
container=True,
|
192 |
+
elem_id="input_box",
|
193 |
+
visible=False,
|
194 |
+
)
|
195 |
+
vote_submit_btn = gr.Button(value="Submit", visible=False, interactive=False, variant="primary", scale=0, elem_id="btnpink", elem_classes="submit-button")
|
196 |
+
vote_mode_btn = gr.Button(value="🔄 Mode", visible=False, interactive=False, variant="primary", scale=0, elem_id="btnpink", elem_classes="submit-button")
|
197 |
+
|
198 |
+
with gr.Row():
|
199 |
+
textbox = gr.Textbox(
|
200 |
+
show_label=False,
|
201 |
+
placeholder="👉 Generate the rand prompt and Send",
|
202 |
+
container=True,
|
203 |
+
elem_id="input_box",
|
204 |
+
interactive=False,
|
205 |
+
)
|
206 |
+
|
207 |
+
send_btn = gr.Button(value="Send", variant="primary", scale=0, elem_id="btnblue")
|
208 |
+
draw_btn = gr.Button(value="🎲 Random Prompt", variant="primary", scale=0, elem_id="btnblue")
|
209 |
+
with gr.Row():
|
210 |
+
clear_btn = gr.Button(value="🎲 New Round", interactive=False)
|
211 |
+
with gr.Blocks():
|
212 |
+
with gr.Row(elem_id="centered-text"): #
|
213 |
+
user_info = gr.Markdown("User information", visible=True, elem_id="centered-text") #, elem_id="centered-text"
|
214 |
+
# with gr.Blocks():
|
215 |
+
# name = gr.Markdown("Name", visible=True)
|
216 |
+
user_name = gr.Textbox(show_label=False,placeholder="👉 Enter your name (optional)", elem_classes="custom-width")
|
217 |
+
# with gr.Blocks():
|
218 |
+
# institution = gr.Markdown("Institution", visible=True)
|
219 |
+
user_institution = gr.Textbox(show_label=False,placeholder="👉 Enter your affiliation (optional)", elem_classes="custom-width")
|
220 |
+
|
221 |
+
prompt_num = gr.Number(value=0, visible=False, interactive=False) # record the num of prompt
|
222 |
+
#gr.Markdown(acknowledgment_md, elem_id="ack_markdown")
|
223 |
+
example1_text = gr.Textbox(value="Chef with recipe book watching young cook preparing dish in the kitchen.", visible=False, interactive=False)
|
224 |
+
example2_text = gr.Textbox(value="A baker turns freshly baked loaves of sourdough bread.", visible=False, interactive=False)
|
225 |
+
example3_text = gr.Textbox(value="Dense fog glows orange and covers hills at dawn.", visible=False, interactive=False)
|
226 |
+
example4_text = gr.Textbox(value="Happy extended family on the sofa at home.", visible=False, interactive=False)
|
227 |
+
dummy_video_output = gr.Video(width=512, visible=False)
|
228 |
+
gr.Examples(
|
229 |
+
examples=[["Chef with recipe book watching young cook preparing dish in the kitchen.", os.path.join("./examples", "video_example1.mp4")],
|
230 |
+
["A baker turns freshly baked loaves of sourdough bread.", os.path.join("./examples", "video_example2.mp4")],
|
231 |
+
["Dense fog glows orange and covers hills at dawn.",os.path.join("./examples", "video_example3.mp4")],
|
232 |
+
["Happy extended family on the sofa at home.", os.path.join("./examples", "video_example4.mp4")]],
|
233 |
+
inputs = [textbox, dummy_video_output])
|
234 |
+
|
235 |
+
order_btn_list = [textbox, send_btn, draw_btn, clear_btn]
|
236 |
+
vote_order_list = [leftvote_btn, left1vote_btn, rightvote_btn, right1vote_btn, tie_btn, \
|
237 |
+
A1_btn, A2_btn, A3_btn, A4_btn, B1_btn, B2_btn, B3_btn, B4_btn, C1_btn, C2_btn, C3_btn, C4_btn, D1_btn, D2_btn, D3_btn, D4_btn, \
|
238 |
+
vote_textbox, vote_submit_btn, vote_mode_btn]
|
239 |
+
|
240 |
+
generate_ig0 = gr.Video(width=512, label = "generate A", visible=False, interactive=False)
|
241 |
+
generate_ig1 = gr.Video(width=512, label = "generate B", visible=False, interactive=False)
|
242 |
+
generate_ig2 = gr.Video(width=512, label = "generate C", visible=False, interactive=False)
|
243 |
+
generate_ig3 = gr.Video(width=512, label = "generate D", visible=False, interactive=False)
|
244 |
+
dummy_left_model = gr.State("")
|
245 |
+
dummy_left1_model = gr.State("")
|
246 |
+
dummy_right_model = gr.State("")
|
247 |
+
dummy_right1_model = gr.State("")
|
248 |
+
|
249 |
+
ig_rank = [None, None, None, None]
|
250 |
+
bastA_rank = [0, 3, 3, 3]
|
251 |
+
bastB_rank = [3, 0, 3, 3]
|
252 |
+
bastC_rank = [3, 3, 0, 3]
|
253 |
+
bastD_rank = [3, 3, 3, 0]
|
254 |
+
tie_rank = [0, 0, 0, 0]
|
255 |
+
rank = gr.State(ig_rank)
|
256 |
+
rankA = gr.State(bastA_rank)
|
257 |
+
rankB = gr.State(bastB_rank)
|
258 |
+
rankC = gr.State(bastC_rank)
|
259 |
+
rankD = gr.State(bastD_rank)
|
260 |
+
rankTie = gr.State(tie_rank)
|
261 |
+
Top1_text = gr.Textbox(value="Top 1", visible=False, interactive=False)
|
262 |
+
Top2_text = gr.Textbox(value="Top 2", visible=False, interactive=False)
|
263 |
+
Top3_text = gr.Textbox(value="Top 3", visible=False, interactive=False)
|
264 |
+
Top4_text = gr.Textbox(value="Top 4", visible=False, interactive=False)
|
265 |
+
window1_text = gr.Textbox(value="Model A", visible=False, interactive=False)
|
266 |
+
window2_text = gr.Textbox(value="Model B", visible=False, interactive=False)
|
267 |
+
window3_text = gr.Textbox(value="Model C", visible=False, interactive=False)
|
268 |
+
window4_text = gr.Textbox(value="Model D", visible=False, interactive=False)
|
269 |
+
vote_level = gr.Number(value=0, visible=False, interactive=False) # record the level of now object
|
270 |
+
vote_mode = gr.Textbox(value="Rank", visible=False, interactive=False)
|
271 |
+
right_vote_text = gr.Textbox(value="wrong", visible=False, interactive=False)
|
272 |
+
|
273 |
+
send_btn.click(
|
274 |
+
disable_video_order_buttons,
|
275 |
+
inputs=[textbox, example1_text, example2_text, example3_text, example4_text],
|
276 |
+
outputs=[textbox, send_btn, draw_btn, clear_btn, prompt_num]
|
277 |
+
).then(
|
278 |
+
gen_func, # 修改为调取函数而不是生成函数
|
279 |
+
inputs=[state0, state1, state2, state3, textbox, prompt_num, model_selector_left, model_selector_left1, model_selector_right, model_selector_right1],
|
280 |
+
outputs=[state0, state1, state2, state3, generate_ig0, generate_ig1, generate_ig2, generate_ig3, chatbot_left, chatbot_left1, chatbot_right, chatbot_right1, \
|
281 |
+
model_selector_left, model_selector_left1, model_selector_right, model_selector_right1],
|
282 |
+
api_name="send_btn_annony"
|
283 |
+
).then(
|
284 |
+
enable_vote_mode_buttons,
|
285 |
+
inputs=[vote_mode, textbox],
|
286 |
+
outputs=vote_order_list
|
287 |
+
)
|
288 |
+
|
289 |
+
draw_btn.click(
|
290 |
+
get_random_webvid_prompt,
|
291 |
+
inputs=None,
|
292 |
+
outputs=[prompt_num, textbox],
|
293 |
+
api_name="draw_btn_annony"
|
294 |
+
)
|
295 |
+
|
296 |
+
clear_btn.click(
|
297 |
+
clear_history_side_by_side_anony,
|
298 |
+
inputs=None,
|
299 |
+
outputs=[state0, state1, state2, state3, textbox, vote_textbox, chatbot_left, chatbot_left1, chatbot_right, chatbot_right1, \
|
300 |
+
model_selector_left, model_selector_left1, model_selector_right, model_selector_right1],
|
301 |
+
api_name="clear_btn_annony"
|
302 |
+
).then(
|
303 |
+
enable_order_buttons,
|
304 |
+
inputs=None,
|
305 |
+
outputs=order_btn_list
|
306 |
+
).then(
|
307 |
+
clear_rank,
|
308 |
+
inputs=[rank, vote_level],
|
309 |
+
outputs=[rank, vote_level]
|
310 |
+
).then(
|
311 |
+
disable_vote_mode_buttons,
|
312 |
+
inputs=None,
|
313 |
+
outputs=vote_order_list
|
314 |
+
)
|
315 |
+
|
316 |
+
vote_mode_btn.click(
|
317 |
+
reset_chatbot,
|
318 |
+
inputs=[vote_mode, generate_ig0, generate_ig1, generate_ig2, generate_ig3],
|
319 |
+
outputs=[chatbot_left, chatbot_left1, chatbot_right, chatbot_right1]
|
320 |
+
).then(
|
321 |
+
reset_mode,
|
322 |
+
inputs=[vote_mode],
|
323 |
+
outputs=[leftvote_btn, left1vote_btn, rightvote_btn, right1vote_btn, tie_btn, \
|
324 |
+
A1_btn, A2_btn, A3_btn, A4_btn, B1_btn, B2_btn, B3_btn, B4_btn, C1_btn, C2_btn, C3_btn, C4_btn, D1_btn, D2_btn, D3_btn, D4_btn, \
|
325 |
+
vote_textbox, vote_submit_btn, vote_mode_btn, vote_mode]
|
326 |
+
)
|
327 |
+
|
328 |
+
vote_textbox.submit(
|
329 |
+
disable_vote,
|
330 |
+
inputs=None,
|
331 |
+
outputs=[vote_submit_btn, vote_mode_btn, \
|
332 |
+
A1_btn, A2_btn, A3_btn, A4_btn, B1_btn, B2_btn, B3_btn, B4_btn, C1_btn, C2_btn, C3_btn, C4_btn, D1_btn, D2_btn, D3_btn, D4_btn]
|
333 |
+
).then(
|
334 |
+
text_response_rank_vg,
|
335 |
+
inputs=[vote_textbox],
|
336 |
+
outputs=[vote_textbox, right_vote_text, rank]
|
337 |
+
).then(
|
338 |
+
submit_response_rank_vg,
|
339 |
+
inputs=[state0, state1, state2, state3, dummy_left_model, dummy_left1_model, dummy_right_model, dummy_right1_model, textbox, prompt_num, rank, right_vote_text, user_name, user_institution],
|
340 |
+
outputs=[A1_btn, A2_btn, A3_btn, A4_btn, B1_btn, B2_btn, B3_btn, B4_btn, C1_btn, C2_btn, C3_btn, C4_btn, D1_btn, D2_btn, D3_btn, D4_btn, \
|
341 |
+
vote_textbox, vote_submit_btn, vote_mode_btn, right_vote_text, \
|
342 |
+
model_selector_left, model_selector_left1, model_selector_right, model_selector_right1],
|
343 |
+
api_name="submit_btn_annony"
|
344 |
+
)
|
345 |
+
|
346 |
+
vote_submit_btn.click(
|
347 |
+
disable_vote,
|
348 |
+
inputs=None,
|
349 |
+
outputs=[vote_submit_btn, vote_mode_btn, \
|
350 |
+
A1_btn, A2_btn, A3_btn, A4_btn, B1_btn, B2_btn, B3_btn, B4_btn, C1_btn, C2_btn, C3_btn, C4_btn, D1_btn, D2_btn, D3_btn, D4_btn]
|
351 |
+
).then(
|
352 |
+
text_response_rank_vg,
|
353 |
+
inputs=[vote_textbox],
|
354 |
+
outputs=[vote_textbox, right_vote_text, rank]
|
355 |
+
).then(
|
356 |
+
submit_response_rank_vg,
|
357 |
+
inputs=[state0, state1, state2, state3, dummy_left_model, dummy_left1_model, dummy_right_model, dummy_right1_model, textbox, prompt_num, rank, right_vote_text, user_name, user_institution],
|
358 |
+
outputs=[A1_btn, A2_btn, A3_btn, A4_btn, B1_btn, B2_btn, B3_btn, B4_btn, C1_btn, C2_btn, C3_btn, C4_btn, D1_btn, D2_btn, D3_btn, D4_btn, \
|
359 |
+
vote_textbox, vote_submit_btn, vote_mode_btn, right_vote_text, \
|
360 |
+
model_selector_left, model_selector_left1, model_selector_right, model_selector_right1],
|
361 |
+
api_name="submit_btn_annony"
|
362 |
+
)
|
363 |
+
|
364 |
+
|
365 |
+
leftvote_btn.click(
|
366 |
+
submit_response_vg,
|
367 |
+
inputs=[state0, state1, state2, state3, dummy_left_model, dummy_left1_model, dummy_right_model, dummy_right1_model, textbox, prompt_num, rankA, user_name, user_institution],
|
368 |
+
outputs=[textbox, leftvote_btn, left1vote_btn, rightvote_btn, right1vote_btn, tie_btn, \
|
369 |
+
model_selector_left, model_selector_left1, model_selector_right, model_selector_right1, \
|
370 |
+
vote_mode_btn]
|
371 |
+
)
|
372 |
+
left1vote_btn.click(
|
373 |
+
submit_response_vg,
|
374 |
+
inputs=[state0, state1, state2, state3, dummy_left_model, dummy_left1_model, dummy_right_model, dummy_right1_model, textbox, prompt_num, rankB, user_name, user_institution],
|
375 |
+
outputs=[textbox, leftvote_btn, left1vote_btn, rightvote_btn, right1vote_btn, tie_btn, \
|
376 |
+
model_selector_left, model_selector_left1, model_selector_right, model_selector_right1, \
|
377 |
+
vote_mode_btn]
|
378 |
+
)
|
379 |
+
rightvote_btn.click(
|
380 |
+
submit_response_vg,
|
381 |
+
inputs=[state0, state1, state2, state3, dummy_left_model, dummy_left1_model, dummy_right_model, dummy_right1_model, textbox, prompt_num, rankC, user_name, user_institution],
|
382 |
+
outputs=[textbox, leftvote_btn, left1vote_btn, rightvote_btn, right1vote_btn, tie_btn, \
|
383 |
+
model_selector_left, model_selector_left1, model_selector_right, model_selector_right1, \
|
384 |
+
vote_mode_btn]
|
385 |
+
)
|
386 |
+
right1vote_btn.click(
|
387 |
+
submit_response_vg,
|
388 |
+
inputs=[state0, state1, state2, state3, dummy_left_model, dummy_left1_model, dummy_right_model, dummy_right1_model, textbox, prompt_num, rankD, user_name, user_institution],
|
389 |
+
outputs=[textbox, leftvote_btn, left1vote_btn, rightvote_btn, right1vote_btn, tie_btn, \
|
390 |
+
model_selector_left, model_selector_left1, model_selector_right, model_selector_right1, \
|
391 |
+
vote_mode_btn]
|
392 |
+
)
|
393 |
+
tie_btn.click(
|
394 |
+
submit_response_vg,
|
395 |
+
inputs=[state0, state1, state2, state3, dummy_left_model, dummy_left1_model, dummy_right_model, dummy_right1_model, textbox, prompt_num, rankTie, user_name, user_institution],
|
396 |
+
outputs=[textbox, leftvote_btn, left1vote_btn, rightvote_btn, right1vote_btn, tie_btn, \
|
397 |
+
model_selector_left, model_selector_left1, model_selector_right, model_selector_right1, \
|
398 |
+
vote_mode_btn]
|
399 |
+
)
|
400 |
+
|
401 |
+
A1_btn.click(
|
402 |
+
reset_btn_rank,
|
403 |
+
inputs=[window1_text, rank, A1_btn, vote_level],
|
404 |
+
outputs=[rank, vote_level]
|
405 |
+
).then(
|
406 |
+
reset_submit,
|
407 |
+
inputs = [rank],
|
408 |
+
outputs = [vote_submit_btn]
|
409 |
+
).then(
|
410 |
+
reset_vote_text,
|
411 |
+
inputs = [rank],
|
412 |
+
outputs = [vote_textbox]
|
413 |
+
)
|
414 |
+
A2_btn.click(
|
415 |
+
reset_btn_rank,
|
416 |
+
inputs=[window1_text, rank, A2_btn, vote_level],
|
417 |
+
outputs=[rank, vote_level]
|
418 |
+
).then(
|
419 |
+
reset_submit,
|
420 |
+
inputs = [rank],
|
421 |
+
outputs = [vote_submit_btn]
|
422 |
+
).then(
|
423 |
+
reset_vote_text,
|
424 |
+
inputs = [rank],
|
425 |
+
outputs = [vote_textbox]
|
426 |
+
)
|
427 |
+
A3_btn.click(
|
428 |
+
reset_btn_rank,
|
429 |
+
inputs=[window1_text, rank, A3_btn, vote_level],
|
430 |
+
outputs=[rank, vote_level]
|
431 |
+
).then(
|
432 |
+
reset_submit,
|
433 |
+
inputs = [rank],
|
434 |
+
outputs = [vote_submit_btn]
|
435 |
+
).then(
|
436 |
+
reset_vote_text,
|
437 |
+
inputs = [rank],
|
438 |
+
outputs = [vote_textbox]
|
439 |
+
)
|
440 |
+
A4_btn.click(
|
441 |
+
reset_btn_rank,
|
442 |
+
inputs=[window1_text, rank, A4_btn, vote_level],
|
443 |
+
outputs=[rank, vote_level]
|
444 |
+
).then(
|
445 |
+
reset_submit,
|
446 |
+
inputs = [rank],
|
447 |
+
outputs = [vote_submit_btn]
|
448 |
+
).then(
|
449 |
+
reset_vote_text,
|
450 |
+
inputs = [rank],
|
451 |
+
outputs = [vote_textbox]
|
452 |
+
)
|
453 |
+
|
454 |
+
B1_btn.click(
|
455 |
+
reset_btn_rank,
|
456 |
+
inputs=[window2_text, rank, B1_btn, vote_level],
|
457 |
+
outputs=[rank, vote_level]
|
458 |
+
).then(
|
459 |
+
reset_submit,
|
460 |
+
inputs = [rank],
|
461 |
+
outputs = [vote_submit_btn]
|
462 |
+
).then(
|
463 |
+
reset_vote_text,
|
464 |
+
inputs = [rank],
|
465 |
+
outputs = [vote_textbox]
|
466 |
+
)
|
467 |
+
B2_btn.click(
|
468 |
+
reset_btn_rank,
|
469 |
+
inputs=[window2_text, rank, B2_btn, vote_level],
|
470 |
+
outputs=[rank, vote_level]
|
471 |
+
).then(
|
472 |
+
reset_submit,
|
473 |
+
inputs = [rank],
|
474 |
+
outputs = [vote_submit_btn]
|
475 |
+
).then(
|
476 |
+
reset_vote_text,
|
477 |
+
inputs = [rank],
|
478 |
+
outputs = [vote_textbox]
|
479 |
+
)
|
480 |
+
B3_btn.click(
|
481 |
+
reset_btn_rank,
|
482 |
+
inputs=[window2_text, rank, B3_btn, vote_level],
|
483 |
+
outputs=[rank, vote_level]
|
484 |
+
).then(
|
485 |
+
reset_submit,
|
486 |
+
inputs = [rank],
|
487 |
+
outputs = [vote_submit_btn]
|
488 |
+
).then(
|
489 |
+
reset_vote_text,
|
490 |
+
inputs = [rank],
|
491 |
+
outputs = [vote_textbox]
|
492 |
+
)
|
493 |
+
B4_btn.click(
|
494 |
+
reset_btn_rank,
|
495 |
+
inputs=[window2_text, rank, B4_btn, vote_level],
|
496 |
+
outputs=[rank, vote_level]
|
497 |
+
).then(
|
498 |
+
reset_submit,
|
499 |
+
inputs = [rank],
|
500 |
+
outputs = [vote_submit_btn]
|
501 |
+
).then(
|
502 |
+
reset_vote_text,
|
503 |
+
inputs = [rank],
|
504 |
+
outputs = [vote_textbox]
|
505 |
+
)
|
506 |
+
|
507 |
+
C1_btn.click(
|
508 |
+
reset_btn_rank,
|
509 |
+
inputs=[window3_text, rank, C1_btn, vote_level],
|
510 |
+
outputs=[rank, vote_level]
|
511 |
+
).then(
|
512 |
+
reset_submit,
|
513 |
+
inputs = [rank],
|
514 |
+
outputs = [vote_submit_btn]
|
515 |
+
).then(
|
516 |
+
reset_vote_text,
|
517 |
+
inputs = [rank],
|
518 |
+
outputs = [vote_textbox]
|
519 |
+
)
|
520 |
+
C2_btn.click(
|
521 |
+
reset_btn_rank,
|
522 |
+
inputs=[window3_text, rank, C2_btn, vote_level],
|
523 |
+
outputs=[rank, vote_level]
|
524 |
+
).then(
|
525 |
+
reset_submit,
|
526 |
+
inputs = [rank],
|
527 |
+
outputs = [vote_submit_btn]
|
528 |
+
).then(
|
529 |
+
reset_vote_text,
|
530 |
+
inputs = [rank],
|
531 |
+
outputs = [vote_textbox]
|
532 |
+
)
|
533 |
+
C3_btn.click(
|
534 |
+
reset_btn_rank,
|
535 |
+
inputs=[window3_text, rank, C3_btn, vote_level],
|
536 |
+
outputs=[rank, vote_level]
|
537 |
+
).then(
|
538 |
+
reset_submit,
|
539 |
+
inputs = [rank],
|
540 |
+
outputs = [vote_submit_btn]
|
541 |
+
).then(
|
542 |
+
reset_vote_text,
|
543 |
+
inputs = [rank],
|
544 |
+
outputs = [vote_textbox]
|
545 |
+
)
|
546 |
+
C4_btn.click(
|
547 |
+
reset_btn_rank,
|
548 |
+
inputs=[window3_text, rank, C4_btn, vote_level],
|
549 |
+
outputs=[rank, vote_level]
|
550 |
+
).then(
|
551 |
+
reset_submit,
|
552 |
+
inputs = [rank],
|
553 |
+
outputs = [vote_submit_btn]
|
554 |
+
).then(
|
555 |
+
reset_vote_text,
|
556 |
+
inputs = [rank],
|
557 |
+
outputs = [vote_textbox]
|
558 |
+
)
|
559 |
+
|
560 |
+
D1_btn.click(
|
561 |
+
reset_btn_rank,
|
562 |
+
inputs=[window4_text, rank, D1_btn, vote_level],
|
563 |
+
outputs=[rank, vote_level]
|
564 |
+
).then(
|
565 |
+
reset_submit,
|
566 |
+
inputs = [rank],
|
567 |
+
outputs = [vote_submit_btn]
|
568 |
+
).then(
|
569 |
+
reset_vote_text,
|
570 |
+
inputs = [rank],
|
571 |
+
outputs = [vote_textbox]
|
572 |
+
)
|
573 |
+
D2_btn.click(
|
574 |
+
reset_btn_rank,
|
575 |
+
inputs=[window4_text, rank, D2_btn, vote_level],
|
576 |
+
outputs=[rank, vote_level]
|
577 |
+
).then(
|
578 |
+
reset_submit,
|
579 |
+
inputs = [rank],
|
580 |
+
outputs = [vote_submit_btn]
|
581 |
+
).then(
|
582 |
+
reset_vote_text,
|
583 |
+
inputs = [rank],
|
584 |
+
outputs = [vote_textbox]
|
585 |
+
)
|
586 |
+
D3_btn.click(
|
587 |
+
reset_btn_rank,
|
588 |
+
inputs=[window4_text, rank, D3_btn, vote_level],
|
589 |
+
outputs=[rank, vote_level]
|
590 |
+
).then(
|
591 |
+
reset_submit,
|
592 |
+
inputs = [rank],
|
593 |
+
outputs = [vote_submit_btn]
|
594 |
+
).then(
|
595 |
+
reset_vote_text,
|
596 |
+
inputs = [rank],
|
597 |
+
outputs = [vote_textbox]
|
598 |
+
)
|
599 |
+
D4_btn.click(
|
600 |
+
reset_btn_rank,
|
601 |
+
inputs=[window4_text, rank, D4_btn, vote_level],
|
602 |
+
outputs=[rank, vote_level]
|
603 |
+
).then(
|
604 |
+
reset_submit,
|
605 |
+
inputs = [rank],
|
606 |
+
outputs = [vote_submit_btn]
|
607 |
+
).then(
|
608 |
+
reset_vote_text,
|
609 |
+
inputs = [rank],
|
610 |
+
outputs = [vote_textbox]
|
611 |
+
)
|
serve/leaderboard.py
CHANGED
@@ -36,103 +36,59 @@ from datetime import datetime
|
|
36 |
|
37 |
def make_leaderboard_md():
|
38 |
leaderboard_md = f"""
|
39 |
-
# 🏆 K-Sort-Arena Leaderboard
|
40 |
"""
|
|
|
41 |
|
|
|
|
|
|
|
|
|
42 |
return leaderboard_md
|
43 |
|
44 |
def model_hyperlink(model_name, link):
|
45 |
return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
|
46 |
|
47 |
|
48 |
-
def make_arena_leaderboard_md(total_models, total_votes):
|
49 |
-
last_updated = datetime.now()
|
50 |
-
last_updated = last_updated.strftime("%Y-%m-%d")
|
51 |
|
52 |
leaderboard_md = f"""
|
53 |
-
Total #models: **{total_models}**(anonymous). Total #votes: **{total_votes}** (Equivalent to **{total_votes*6}** votes for one-on-one games).
|
54 |
\n Last updated: {last_updated}.
|
55 |
"""
|
56 |
|
57 |
return leaderboard_md
|
58 |
|
59 |
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
with open(elo_results_file, "rb") as fin:
|
67 |
-
elo_results = pickle.load(fin)
|
68 |
-
|
69 |
-
anony_elo_results = elo_results["anony"]
|
70 |
-
full_elo_results = elo_results["full"]
|
71 |
-
anony_arena_df = anony_elo_results["leaderboard_table_df"]
|
72 |
-
full_arena_df = full_elo_results["leaderboard_table_df"]
|
73 |
-
p1 = anony_elo_results["win_fraction_heatmap"]
|
74 |
-
p2 = anony_elo_results["battle_count_heatmap"]
|
75 |
-
p3 = anony_elo_results["bootstrap_elo_rating"]
|
76 |
-
p4 = anony_elo_results["average_win_rate_bar"]
|
77 |
-
|
78 |
-
md = make_leaderboard_md(anony_elo_results)
|
79 |
-
|
80 |
-
md_1 = gr.Markdown(md, elem_id="leaderboard_markdown")
|
81 |
-
|
82 |
-
if leaderboard_table_file:
|
83 |
-
model_table_df = load_leaderboard_table_csv(leaderboard_table_file)
|
84 |
-
with gr.Tabs() as tabs:
|
85 |
-
# arena table
|
86 |
-
arena_table_vals = get_arena_table(anony_arena_df, model_table_df)
|
87 |
-
with gr.Tab("Arena Score", id=0):
|
88 |
-
md = make_arena_leaderboard_md(anony_elo_results)
|
89 |
-
gr.Markdown(md, elem_id="leaderboard_markdown")
|
90 |
-
gr.Dataframe(
|
91 |
-
headers=[
|
92 |
-
"Rank",
|
93 |
-
"🤖 Model",
|
94 |
-
"⭐ Arena Elo",
|
95 |
-
"📊 95% CI",
|
96 |
-
"🗳️ Votes",
|
97 |
-
"Organization",
|
98 |
-
"License",
|
99 |
-
],
|
100 |
-
datatype=[
|
101 |
-
"str",
|
102 |
-
"markdown",
|
103 |
-
"number",
|
104 |
-
"str",
|
105 |
-
"number",
|
106 |
-
"str",
|
107 |
-
"str",
|
108 |
-
],
|
109 |
-
value=arena_table_vals,
|
110 |
-
elem_id="arena_leaderboard_dataframe",
|
111 |
-
height=700,
|
112 |
-
column_widths=[50, 200, 100, 100, 100, 150, 150],
|
113 |
-
wrap=True,
|
114 |
-
)
|
115 |
-
|
116 |
-
if not show_plot:
|
117 |
-
gr.Markdown(
|
118 |
-
""" ## The leaderboard is updated frequently and continues to incorporate new models.
|
119 |
-
""",
|
120 |
-
elem_id="leaderboard_markdown",
|
121 |
-
)
|
122 |
-
else:
|
123 |
-
pass
|
124 |
-
|
125 |
-
leader_component_values[:] = [md, p1, p2, p3, p4]
|
126 |
|
|
|
|
|
127 |
|
128 |
-
|
|
|
129 |
|
130 |
-
|
|
|
131 |
|
132 |
-
|
133 |
-
|
134 |
-
'''
|
135 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
136 |
|
137 |
|
138 |
def make_arena_leaderboard_data(results):
|
@@ -146,12 +102,42 @@ def build_leaderboard_tab(score_result_file = 'sorted_score_list.json'):
|
|
146 |
score_results = data["sorted_score_list"]
|
147 |
total_models = data["total_models"]
|
148 |
total_votes = data["total_votes"]
|
|
|
149 |
|
150 |
md = make_leaderboard_md()
|
151 |
md_1 = gr.Markdown(md, elem_id="leaderboard_markdown")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
152 |
|
153 |
with gr.Tab("Arena Score", id=0):
|
154 |
-
md = make_arena_leaderboard_md(total_models, total_votes)
|
155 |
gr.Markdown(md, elem_id="leaderboard_markdown")
|
156 |
md = make_arena_leaderboard_data(score_results)
|
157 |
gr.Dataframe(md)
|
@@ -161,5 +147,6 @@ def build_leaderboard_tab(score_result_file = 'sorted_score_list.json'):
|
|
161 |
""",
|
162 |
elem_id="leaderboard_markdown",
|
163 |
)
|
164 |
-
from .utils import acknowledgment_md
|
165 |
-
gr.
|
|
|
|
36 |
|
37 |
def make_leaderboard_md():
|
38 |
leaderboard_md = f"""
|
39 |
+
# 🏆 K-Sort-Arena Leaderboard (Text-to-Image)
|
40 |
"""
|
41 |
+
return leaderboard_md
|
42 |
|
43 |
+
def make_leaderboard_video_md():
|
44 |
+
leaderboard_md = f"""
|
45 |
+
# 🏆 K-Sort-Arena Leaderboard (Text-to-Video)
|
46 |
+
"""
|
47 |
return leaderboard_md
|
48 |
|
49 |
def model_hyperlink(model_name, link):
|
50 |
return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
|
51 |
|
52 |
|
53 |
+
def make_arena_leaderboard_md(total_models, total_votes, last_updated):
|
54 |
+
# last_updated = datetime.now()
|
55 |
+
# last_updated = last_updated.strftime("%Y-%m-%d")
|
56 |
|
57 |
leaderboard_md = f"""
|
58 |
+
Total #models: **{total_models}** (anonymous). Total #votes: **{total_votes}** (Equivalent to **{total_votes*6}** votes for one-on-one games).
|
59 |
\n Last updated: {last_updated}.
|
60 |
"""
|
61 |
|
62 |
return leaderboard_md
|
63 |
|
64 |
|
65 |
+
def make_disclaimer_md():
|
66 |
+
disclaimer_md = f'''
|
67 |
+
<div id="modal" style="display:none; position:fixed; top:50%; left:50%; transform:translate(-50%, -50%); padding:20px; background:white; box-shadow:0 0 10px rgba(0,0,0,0.5); z-index:1000;">
|
68 |
+
<p style="font-size:24px;"><strong>Disclaimer</strong></p>
|
69 |
+
<p style="font-size:18px;"><b>Purpose and Scope</b></b></p>
|
70 |
+
<p><b>This platform is designed for academic use, providing a space for evaluating and comparing Visual Generation Models. The information and services provided are intended for research and educational purposes only.</b></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
|
72 |
+
<p style="font-size:18px;"><b>Privacy and Data Protection</b></p>
|
73 |
+
<p><b>While users may voluntarily submit their names and institutional affiliations, this information is not required and is collected solely for the purpose of academic recognition. Personal information submitted to this platform will be handled with care and used solely for the intended academic purposes. We are committed to protecting your privacy, and we will not share personal data with third parties without explicit consent.</b></p>
|
74 |
|
75 |
+
<p style="font-size:18px;"><b>Source of Models</b></p>
|
76 |
+
<p><b>All models evaluated and displayed on this platform are obtained from official sources, including but not limited to official repositories and Replicate.</b></p>
|
77 |
|
78 |
+
<p style="font-size:18px;"><b>Limitations of Liability</b></p>
|
79 |
+
<p><b>The platform and its administrators do not assume any legal liability for the use or interpretation of the information provided. The evaluations and comparisons are for academic purposes. Users should verify the information independently and must not use the platform for any illegal, harmful, violent, racist, or sexual purposes.</b></p>
|
80 |
|
81 |
+
<p style="font-size:18px;"><b>Modification of Terms</b></p>
|
82 |
+
<p><b>We reserve the right to modify these terms at any time. Users will be notified of significant changes through updates on the platform.</b></p>
|
|
|
83 |
|
84 |
+
<p style="font-size:18px;"><b>Contact Information</b></p>
|
85 |
+
<p><b>For any questions or to report issues, please contact us at info@ksort.org.</b></p>
|
86 |
+
</div>
|
87 |
+
<div id="overlay" style="display:none; position:fixed; top:0; left:0; width:100%; height:100%; background:rgba(0,0,0,0.5); z-index:999;" onclick="document.getElementById('modal').style.display='none'; document.getElementById('overlay').style.display='none'"></div>
|
88 |
+
<p> ⚠️ This platform is designed for academic usage, for details please refer to <a href="#" id="open_link" onclick="document.getElementById('modal').style.display='block'; document.getElementById('overlay').style.display='block'">disclaimer</a>.</p>
|
89 |
+
'''
|
90 |
+
|
91 |
+
return disclaimer_md
|
92 |
|
93 |
|
94 |
def make_arena_leaderboard_data(results):
|
|
|
102 |
score_results = data["sorted_score_list"]
|
103 |
total_models = data["total_models"]
|
104 |
total_votes = data["total_votes"]
|
105 |
+
last_updated = data["last_updated"]
|
106 |
|
107 |
md = make_leaderboard_md()
|
108 |
md_1 = gr.Markdown(md, elem_id="leaderboard_markdown")
|
109 |
+
gr.HTML(make_disclaimer_md)
|
110 |
+
|
111 |
+
with gr.Tab("Arena Score", id=0):
|
112 |
+
md = make_arena_leaderboard_md(total_models, total_votes, last_updated)
|
113 |
+
gr.Markdown(md, elem_id="leaderboard_markdown")
|
114 |
+
md = make_arena_leaderboard_data(score_results)
|
115 |
+
gr.Dataframe(md)
|
116 |
+
|
117 |
+
gr.Markdown(
|
118 |
+
""" ## The leaderboard is updated frequently and continues to incorporate new models.
|
119 |
+
""",
|
120 |
+
elem_id="leaderboard_markdown",
|
121 |
+
)
|
122 |
+
from .utils import acknowledgment_md, html_code
|
123 |
+
with gr.Blocks():
|
124 |
+
gr.Markdown(acknowledgment_md)
|
125 |
+
|
126 |
+
|
127 |
+
def build_leaderboard_video_tab(score_result_file = 'sorted_score_list_video.json'):
|
128 |
+
with open(score_result_file, "r") as json_file:
|
129 |
+
data = json.load(json_file)
|
130 |
+
score_results = data["sorted_score_list"]
|
131 |
+
total_models = data["total_models"]
|
132 |
+
total_votes = data["total_votes"]
|
133 |
+
last_updated = data["last_updated"]
|
134 |
+
|
135 |
+
md = make_leaderboard_video_md()
|
136 |
+
md_1 = gr.Markdown(md, elem_id="leaderboard_markdown")
|
137 |
+
gr.HTML(make_disclaimer_md)
|
138 |
|
139 |
with gr.Tab("Arena Score", id=0):
|
140 |
+
md = make_arena_leaderboard_md(total_models, total_votes, last_updated)
|
141 |
gr.Markdown(md, elem_id="leaderboard_markdown")
|
142 |
md = make_arena_leaderboard_data(score_results)
|
143 |
gr.Dataframe(md)
|
|
|
147 |
""",
|
148 |
elem_id="leaderboard_markdown",
|
149 |
)
|
150 |
+
from .utils import acknowledgment_md, html_code
|
151 |
+
with gr.Blocks():
|
152 |
+
gr.Markdown(acknowledgment_md)
|
serve/update_skill_video.py
ADDED
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import json
|
3 |
+
from trueskill import TrueSkill
|
4 |
+
import paramiko
|
5 |
+
import io, os
|
6 |
+
import sys
|
7 |
+
from serve.constants import SSH_SERVER, SSH_PORT, SSH_USER, SSH_PASSWORD, SSH_VIDEO_SKILL
|
8 |
+
trueskill_env = TrueSkill()
|
9 |
+
sys.path.append('../')
|
10 |
+
from model.models import VIDEO_GENERATION_MODELS
|
11 |
+
|
12 |
+
ssh_skill_client = None
|
13 |
+
sftp_skill_client = None
|
14 |
+
|
15 |
+
def create_ssh_skill_client(server, port, user, password):
|
16 |
+
global ssh_skill_client, sftp_skill_client
|
17 |
+
ssh_skill_client = paramiko.SSHClient()
|
18 |
+
ssh_skill_client.load_system_host_keys()
|
19 |
+
ssh_skill_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
20 |
+
ssh_skill_client.connect(server, port, user, password)
|
21 |
+
|
22 |
+
transport = ssh_skill_client.get_transport()
|
23 |
+
transport.set_keepalive(60)
|
24 |
+
|
25 |
+
sftp_skill_client = ssh_skill_client.open_sftp()
|
26 |
+
def is_connected():
|
27 |
+
global ssh_skill_client, sftp_skill_client
|
28 |
+
if ssh_skill_client is None or sftp_skill_client is None:
|
29 |
+
return False
|
30 |
+
# 检查SSH连接是否正常
|
31 |
+
if not ssh_skill_client.get_transport().is_active():
|
32 |
+
return False
|
33 |
+
# 检查SFTP连接是否正常
|
34 |
+
try:
|
35 |
+
sftp_skill_client.listdir('.') # 尝试列出根目录
|
36 |
+
except Exception as e:
|
37 |
+
print(f"Error checking SFTP connection: {e}")
|
38 |
+
return False
|
39 |
+
return True
|
40 |
+
def ucb_score(trueskill_diff, t, n):
|
41 |
+
exploration_term = np.sqrt((2 * np.log(t + 1e-5)) / (n + 1e-5))
|
42 |
+
ucb = -trueskill_diff + 1.0 * exploration_term
|
43 |
+
return ucb
|
44 |
+
|
45 |
+
def update_trueskill(ratings, ranks):
|
46 |
+
new_ratings = trueskill_env.rate(ratings, ranks)
|
47 |
+
return new_ratings
|
48 |
+
|
49 |
+
def serialize_rating(rating):
|
50 |
+
return {'mu': rating.mu, 'sigma': rating.sigma}
|
51 |
+
|
52 |
+
def deserialize_rating(rating_dict):
|
53 |
+
return trueskill_env.Rating(mu=rating_dict['mu'], sigma=rating_dict['sigma'])
|
54 |
+
|
55 |
+
|
56 |
+
def save_json_via_sftp(ratings, comparison_counts, total_comparisons):
|
57 |
+
global sftp_skill_client
|
58 |
+
if not is_connected():
|
59 |
+
create_ssh_skill_client(SSH_SERVER, SSH_PORT, SSH_USER, SSH_PASSWORD)
|
60 |
+
data = {
|
61 |
+
'ratings': [serialize_rating(r) for r in ratings],
|
62 |
+
'comparison_counts': comparison_counts.tolist(),
|
63 |
+
'total_comparisons': total_comparisons
|
64 |
+
}
|
65 |
+
json_data = json.dumps(data)
|
66 |
+
with sftp_skill_client.open(SSH_VIDEO_SKILL, 'w') as f:
|
67 |
+
f.write(json_data)
|
68 |
+
|
69 |
+
def load_json_via_sftp():
|
70 |
+
global sftp_skill_client
|
71 |
+
if not is_connected():
|
72 |
+
create_ssh_skill_client(SSH_SERVER, SSH_PORT, SSH_USER, SSH_PASSWORD)
|
73 |
+
with sftp_skill_client.open(SSH_VIDEO_SKILL, 'r') as f:
|
74 |
+
data = json.load(f)
|
75 |
+
ratings = [deserialize_rating(r) for r in data['ratings']]
|
76 |
+
comparison_counts = np.array(data['comparison_counts'])
|
77 |
+
total_comparisons = data['total_comparisons']
|
78 |
+
return ratings, comparison_counts, total_comparisons
|
79 |
+
|
80 |
+
|
81 |
+
def update_skill_video(rank, model_names, k_group=4):
|
82 |
+
|
83 |
+
ratings, comparison_counts, total_comparisons = load_json_via_sftp()
|
84 |
+
|
85 |
+
# group = Model_ID.group
|
86 |
+
group = []
|
87 |
+
for model_name in model_names:
|
88 |
+
group.append(VIDEO_GENERATION_MODELS.index(model_name))
|
89 |
+
print(group)
|
90 |
+
|
91 |
+
pairwise_comparisons = [(i, j) for i in range(len(group)) for j in range(i+1, len(group))]
|
92 |
+
for player1, player2 in pairwise_comparisons:
|
93 |
+
if rank[player1] < rank[player2]:
|
94 |
+
ranks = [0, 1]
|
95 |
+
updated_ratings = update_trueskill([[ratings[group[player1]]], [ratings[group[player2]]]], ranks)
|
96 |
+
ratings[group[player1]], ratings[group[player2]] = updated_ratings[0][0], updated_ratings[1][0]
|
97 |
+
elif rank[player1] > rank[player2]:
|
98 |
+
ranks = [1, 0]
|
99 |
+
updated_ratings = update_trueskill([[ratings[group[player1]]], [ratings[group[player2]]]], ranks)
|
100 |
+
ratings[group[player1]], ratings[group[player2]] = updated_ratings[0][0], updated_ratings[1][0]
|
101 |
+
|
102 |
+
comparison_counts[group[player1], group[player2]] += 1
|
103 |
+
comparison_counts[group[player2], group[player1]] += 1
|
104 |
+
|
105 |
+
total_comparisons += 1
|
106 |
+
|
107 |
+
save_json_via_sftp(ratings, comparison_counts, total_comparisons)
|
serve/upload.py
CHANGED
@@ -1,12 +1,13 @@
|
|
1 |
import paramiko
|
2 |
import numpy as np
|
3 |
import io, os
|
|
|
4 |
from PIL import Image
|
5 |
import requests
|
6 |
import json
|
7 |
import random
|
8 |
import concurrent.futures
|
9 |
-
from .constants import SSH_SERVER, SSH_PORT, SSH_USER, SSH_PASSWORD, SSH_LOG, SSH_MSCOCO
|
10 |
|
11 |
ssh_client = None
|
12 |
sftp_client = None
|
@@ -71,26 +72,38 @@ def get_image_from_url(image_url):
|
|
71 |
|
72 |
def get_random_mscoco_prompt():
|
73 |
|
74 |
-
file_path = './
|
75 |
with open(file_path, 'r') as file:
|
76 |
lines = file.readlines()
|
77 |
|
78 |
random_line = random.choice(lines).strip()
|
79 |
return random_line
|
|
|
80 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
81 |
|
82 |
-
def create_remote_directory(remote_directory):
|
83 |
global ssh_client
|
84 |
if not is_connected():
|
85 |
create_ssh_client(SSH_SERVER, SSH_PORT, SSH_USER, SSH_PASSWORD)
|
86 |
-
|
|
|
|
|
|
|
|
|
87 |
error = stderr.read().decode('utf-8')
|
88 |
if error:
|
89 |
print(f"Error: {error}")
|
90 |
else:
|
91 |
print(f"Directory {remote_directory} created successfully.")
|
92 |
-
return
|
93 |
-
|
94 |
|
95 |
def upload_images(i, image_list, output_file_list, sftp_client):
|
96 |
with sftp_client as sftp:
|
@@ -122,19 +135,21 @@ def upload_ssh_all(states, output_dir, data, data_path):
|
|
122 |
with concurrent.futures.ThreadPoolExecutor() as executor:
|
123 |
futures = [executor.submit(upload_images, i, image_list, output_file_list, sftp_client_imgs[i]) for i in range(len(output_file_list))]
|
124 |
|
125 |
-
# for i in range(len(output_file_list)):
|
126 |
-
# if isinstance(image_list[i], str):
|
127 |
-
# print("get url image")
|
128 |
-
# image_list[i] = get_image_from_url(image_list[i])
|
129 |
-
# with io.BytesIO() as image_byte_stream:
|
130 |
-
# image_list[i] = image_list[i].resize((512, 512), Image.ANTIALIAS)
|
131 |
-
# image_list[i].save(image_byte_stream, format='JPEG')
|
132 |
-
# image_byte_stream.seek(0)
|
133 |
-
# sftp.putfo(image_byte_stream, output_file_list[i])
|
134 |
-
# print(f"Successfully uploaded image to {output_file_list[i]}")
|
135 |
with sftp_client as sftp:
|
136 |
json_data = json.dumps(data, indent=4)
|
137 |
with io.BytesIO(json_data.encode('utf-8')) as json_byte_stream:
|
138 |
sftp.putfo(json_byte_stream, data_path)
|
139 |
print(f"Successfully uploaded JSON data to {data_path}")
|
140 |
-
# create_ssh_client(SSH_SERVER, SSH_PORT, SSH_USER, SSH_PASSWORD)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import paramiko
|
2 |
import numpy as np
|
3 |
import io, os
|
4 |
+
import gradio as gr
|
5 |
from PIL import Image
|
6 |
import requests
|
7 |
import json
|
8 |
import random
|
9 |
import concurrent.futures
|
10 |
+
from .constants import SSH_SERVER, SSH_PORT, SSH_USER, SSH_PASSWORD, SSH_LOG, SSH_VIDEO_LOG, SSH_MSCOCO
|
11 |
|
12 |
ssh_client = None
|
13 |
sftp_client = None
|
|
|
72 |
|
73 |
def get_random_mscoco_prompt():
|
74 |
|
75 |
+
file_path = './coco_prompt.txt'
|
76 |
with open(file_path, 'r') as file:
|
77 |
lines = file.readlines()
|
78 |
|
79 |
random_line = random.choice(lines).strip()
|
80 |
return random_line
|
81 |
+
def get_random_webvid_prompt():
|
82 |
|
83 |
+
file_path = './webvid_prompt.txt'
|
84 |
+
with open(file_path, 'r', encoding='utf-8') as file:
|
85 |
+
lines = file.readlines()
|
86 |
+
line_number = random.randint(0, len(lines) - 1)
|
87 |
+
print(line_number + 1)
|
88 |
+
random_line = lines[line_number].strip()
|
89 |
+
print(random_line)
|
90 |
+
return line_number + 1, random_line
|
91 |
|
92 |
+
def create_remote_directory(remote_directory, video=False):
|
93 |
global ssh_client
|
94 |
if not is_connected():
|
95 |
create_ssh_client(SSH_SERVER, SSH_PORT, SSH_USER, SSH_PASSWORD)
|
96 |
+
if video:
|
97 |
+
log_dir = f'{SSH_VIDEO_LOG}/{remote_directory}'
|
98 |
+
else:
|
99 |
+
log_dir = f'{SSH_LOG}/{remote_directory}'
|
100 |
+
stdin, stdout, stderr = ssh_client.exec_command(f'mkdir -p {log_dir}')
|
101 |
error = stderr.read().decode('utf-8')
|
102 |
if error:
|
103 |
print(f"Error: {error}")
|
104 |
else:
|
105 |
print(f"Directory {remote_directory} created successfully.")
|
106 |
+
return log_dir
|
|
|
107 |
|
108 |
def upload_images(i, image_list, output_file_list, sftp_client):
|
109 |
with sftp_client as sftp:
|
|
|
135 |
with concurrent.futures.ThreadPoolExecutor() as executor:
|
136 |
futures = [executor.submit(upload_images, i, image_list, output_file_list, sftp_client_imgs[i]) for i in range(len(output_file_list))]
|
137 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
138 |
with sftp_client as sftp:
|
139 |
json_data = json.dumps(data, indent=4)
|
140 |
with io.BytesIO(json_data.encode('utf-8')) as json_byte_stream:
|
141 |
sftp.putfo(json_byte_stream, data_path)
|
142 |
print(f"Successfully uploaded JSON data to {data_path}")
|
143 |
+
# create_ssh_client(SSH_SERVER, SSH_PORT, SSH_USER, SSH_PASSWORD)
|
144 |
+
|
145 |
+
def upload_ssh_data(data, data_path):
|
146 |
+
global sftp_client
|
147 |
+
global sftp_client_imgs
|
148 |
+
if not is_connected():
|
149 |
+
create_ssh_client(SSH_SERVER, SSH_PORT, SSH_USER, SSH_PASSWORD)
|
150 |
+
|
151 |
+
with sftp_client as sftp:
|
152 |
+
json_data = json.dumps(data, indent=4)
|
153 |
+
with io.BytesIO(json_data.encode('utf-8')) as json_byte_stream:
|
154 |
+
sftp.putfo(json_byte_stream, data_path)
|
155 |
+
print(f"Successfully uploaded JSON data to {data_path}")
|
serve/utils.py
CHANGED
@@ -31,6 +31,16 @@ acknowledgment_md = """
|
|
31 |
<p> Our codebase is built upon <a href="https://github.com/lm-sys/FastChat" target="_blank">FastChat</a>, <a href="https://github.com/TIGER-AI-Lab/ImagenHub" target="_blank">ImagenHub</a>.</p>
|
32 |
</div>
|
33 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
block_css = """
|
35 |
#notice_markdown {
|
36 |
font-size: 110%
|
@@ -98,6 +108,11 @@ footer {
|
|
98 |
.submit-button {
|
99 |
color: red;
|
100 |
}
|
|
|
|
|
|
|
|
|
|
|
101 |
#btncolor1 {background: rgb(168, 230, 207);}
|
102 |
#btncolor2 {background: rgb(253, 255, 171);}
|
103 |
#btncolor3 {background: rgb(255, 211, 182);}
|
@@ -105,6 +120,8 @@ footer {
|
|
105 |
|
106 |
#btnblue {background: linear-gradient(to bottom right, rgb(222, 235, 247), rgb(189,215,238)); color: rgb(0, 112, 192); border: 1px solid rgb(189,215,238);}
|
107 |
#btnpink {background: rgb(255, 168, 184);}
|
|
|
|
|
108 |
"""
|
109 |
|
110 |
#
|
@@ -159,6 +176,8 @@ footer {
|
|
159 |
# .custom-button:hover {
|
160 |
# background-color: darkgreen; /* 悬停时的背景颜色 */
|
161 |
# }
|
|
|
|
|
162 |
def enable_vote_buttons():
|
163 |
return tuple(gr.update(visible=True, interactive=i<=4) for i in range(6))
|
164 |
def disable_vote_buttons():
|
@@ -185,11 +204,25 @@ def disable_vote_mode_buttons():
|
|
185 |
|
186 |
def enable_order_buttons():
|
187 |
return tuple(gr.update(interactive=True) for _ in range(4))
|
188 |
-
def disable_order_buttons(textbox):
|
189 |
if not textbox.strip():
|
190 |
return (gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True))
|
191 |
else:
|
192 |
return (gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=True))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
193 |
|
194 |
def clear_history():
|
195 |
return None, "", None
|
|
|
31 |
<p> Our codebase is built upon <a href="https://github.com/lm-sys/FastChat" target="_blank">FastChat</a>, <a href="https://github.com/TIGER-AI-Lab/ImagenHub" target="_blank">ImagenHub</a>.</p>
|
32 |
</div>
|
33 |
"""
|
34 |
+
# 定义一个HTML组件来创建链接和处理点击事件
|
35 |
+
html_code = """
|
36 |
+
<p> This platform is designed for academic usage, for details please refer to <a href="#" id="open_link">disclaimer</a>.</p>
|
37 |
+
<p id="link_text" style="display:none;">这里是弹出的文本链接: <a href="https://example.com" target="_blank">example.com</a></p>
|
38 |
+
<script>
|
39 |
+
document.getElementById('open_link').addEventListener('click', function() {
|
40 |
+
document.getElementById('link_text').style.display = 'block';
|
41 |
+
});
|
42 |
+
</script>
|
43 |
+
"""
|
44 |
block_css = """
|
45 |
#notice_markdown {
|
46 |
font-size: 110%
|
|
|
108 |
.submit-button {
|
109 |
color: red;
|
110 |
}
|
111 |
+
#custom-width {width: 100px !important;}
|
112 |
+
#centered-row {
|
113 |
+
display: flex;
|
114 |
+
justify-content: center;
|
115 |
+
}
|
116 |
#btncolor1 {background: rgb(168, 230, 207);}
|
117 |
#btncolor2 {background: rgb(253, 255, 171);}
|
118 |
#btncolor3 {background: rgb(255, 211, 182);}
|
|
|
120 |
|
121 |
#btnblue {background: linear-gradient(to bottom right, rgb(222, 235, 247), rgb(189,215,238)); color: rgb(0, 112, 192); border: 1px solid rgb(189,215,238);}
|
122 |
#btnpink {background: rgb(255, 168, 184);}
|
123 |
+
#centered-text { display: flex; justify-content: center; align-items: center; height: 100%; width: 100%; font-size: 150%; }
|
124 |
+
|
125 |
"""
|
126 |
|
127 |
#
|
|
|
176 |
# .custom-button:hover {
|
177 |
# background-color: darkgreen; /* 悬停时的背景颜色 */
|
178 |
# }
|
179 |
+
def enable_loop_buttons():
|
180 |
+
return tuple(gr.update(loop=True) for i in range(4))
|
181 |
def enable_vote_buttons():
|
182 |
return tuple(gr.update(visible=True, interactive=i<=4) for i in range(6))
|
183 |
def disable_vote_buttons():
|
|
|
204 |
|
205 |
def enable_order_buttons():
|
206 |
return tuple(gr.update(interactive=True) for _ in range(4))
|
207 |
+
def disable_order_buttons(textbox, video=False):
|
208 |
if not textbox.strip():
|
209 |
return (gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True))
|
210 |
else:
|
211 |
return (gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=True))
|
212 |
+
def disable_video_order_buttons(textbox, example1_text, example2_text, example3_text, example4_text):
|
213 |
+
example_list = [example1_text, example2_text, example3_text, example4_text]
|
214 |
+
if not textbox.strip():
|
215 |
+
return (gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True), gr.update(visible=False))
|
216 |
+
else:
|
217 |
+
if textbox == example_list[0]:
|
218 |
+
return (gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=True), gr.update(value=8))
|
219 |
+
elif textbox == example_list[1]:
|
220 |
+
return (gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=True), gr.update(value=3))
|
221 |
+
elif textbox == example_list[2]:
|
222 |
+
return (gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=True), gr.update(value=93))
|
223 |
+
elif textbox == example_list[3]:
|
224 |
+
return (gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=True), gr.update(value=84))
|
225 |
+
return (gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=True), gr.update(visible=False))
|
226 |
|
227 |
def clear_history():
|
228 |
return None, "", None
|
serve/vote_utils.py
CHANGED
@@ -806,8 +806,6 @@ def generate_igm_annoy(gen_func, state0, state1, state2, state3, text, model_nam
|
|
806 |
state2.model_name = model_name2
|
807 |
state3.model_name = model_name3
|
808 |
|
809 |
-
|
810 |
-
|
811 |
yield state0, state1, state2, state3, generated_image0, generated_image1, generated_image2, generated_image3, \
|
812 |
generated_image0, generated_image1, generated_image2, generated_image3, \
|
813 |
gr.Markdown(f"### Model A: {model_name0}", visible=False), gr.Markdown(f"### Model B: {model_name1}", visible=False), \
|
@@ -872,7 +870,51 @@ def generate_igm_annoy(gen_func, state0, state1, state2, state3, text, model_nam
|
|
872 |
# with open(output_file, 'w') as f:
|
873 |
# save_any_image(state.output, f)
|
874 |
# save_image_file_on_log_server(output_file)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
875 |
|
|
|
876 |
def generate_igm_annoy_museum(gen_func, state0, state1, state2, state3, model_name0, model_name1, model_name2, model_name3, request: gr.Request):
|
877 |
if state0 is None:
|
878 |
state0 = ImageStateIG(model_name0)
|
|
|
806 |
state2.model_name = model_name2
|
807 |
state3.model_name = model_name3
|
808 |
|
|
|
|
|
809 |
yield state0, state1, state2, state3, generated_image0, generated_image1, generated_image2, generated_image3, \
|
810 |
generated_image0, generated_image1, generated_image2, generated_image3, \
|
811 |
gr.Markdown(f"### Model A: {model_name0}", visible=False), gr.Markdown(f"### Model B: {model_name1}", visible=False), \
|
|
|
870 |
# with open(output_file, 'w') as f:
|
871 |
# save_any_image(state.output, f)
|
872 |
# save_image_file_on_log_server(output_file)
|
873 |
+
|
874 |
+
def generate_igv_annoy(gen_func, state0, state1, state2, state3, text, prompt_num, model_name0, model_name1, model_name2, model_name3, request: gr.Request):
|
875 |
+
if not text.strip():
|
876 |
+
return (gr.update(visible=False),) * 16
|
877 |
+
if state0 is None:
|
878 |
+
state0 = ImageStateIG(model_name0)
|
879 |
+
if state1 is None:
|
880 |
+
state1 = ImageStateIG(model_name1)
|
881 |
+
if state2 is None:
|
882 |
+
state2 = ImageStateIG(model_name2)
|
883 |
+
if state3 is None:
|
884 |
+
state3 = ImageStateIG(model_name3)
|
885 |
+
|
886 |
+
|
887 |
+
ip = get_ip(request)
|
888 |
+
igm_logger.info(f"generate. ip: {ip}")
|
889 |
+
start_tstamp = time.time()
|
890 |
+
model_name0 = ""
|
891 |
+
model_name1 = ""
|
892 |
+
model_name2 = ""
|
893 |
+
model_name3 = ""
|
894 |
+
|
895 |
+
generated_video0, generated_video1, generated_video2, generated_video3, model_name0, model_name1, model_name2, model_name3 \
|
896 |
+
= gen_func(prompt_num, model_name0, model_name1, model_name2, model_name3)
|
897 |
+
state0.prompt = text
|
898 |
+
state1.prompt = text
|
899 |
+
state2.prompt = text
|
900 |
+
state3.prompt = text
|
901 |
+
|
902 |
+
state0.output = generated_video0
|
903 |
+
state1.output = generated_video1
|
904 |
+
state2.output = generated_video2
|
905 |
+
state3.output = generated_video3
|
906 |
+
|
907 |
+
state0.model_name = model_name0
|
908 |
+
state1.model_name = model_name1
|
909 |
+
state2.model_name = model_name2
|
910 |
+
state3.model_name = model_name3
|
911 |
+
|
912 |
+
yield state0, state1, state2, state3, generated_video0, generated_video1, generated_video2, generated_video3, \
|
913 |
+
generated_video0, generated_video1, generated_video2, generated_video3, \
|
914 |
+
gr.Markdown(f"### Model A: {model_name0}", visible=False), gr.Markdown(f"### Model B: {model_name1}", visible=False), \
|
915 |
+
gr.Markdown(f"### Model C: {model_name2}", visible=False), gr.Markdown(f"### Model D: {model_name3}", visible=False)
|
916 |
|
917 |
+
|
918 |
def generate_igm_annoy_museum(gen_func, state0, state1, state2, state3, model_name0, model_name1, model_name2, model_name3, request: gr.Request):
|
919 |
if state0 is None:
|
920 |
state0 = ImageStateIG(model_name0)
|
sorted_score_list.json
CHANGED
@@ -1,82 +1,83 @@
|
|
1 |
{
|
2 |
"total_models": 27,
|
3 |
-
"total_votes":
|
|
|
4 |
"sorted_score_list": [
|
5 |
{
|
6 |
"Rank": 0,
|
7 |
"\ud83e\udd16 Model": "Midjourney-v6.0",
|
8 |
-
"\u2b50 Score (\u03bc/\u03c3)": "32.
|
9 |
-
"\ud83d\uddf3\ufe0f Votes":
|
10 |
"Organization": "Midjourney"
|
11 |
},
|
12 |
{
|
13 |
"Rank": 1,
|
14 |
"\ud83e\udd16 Model": "Midjourney-v5.0",
|
15 |
-
"\u2b50 Score (\u03bc/\u03c3)": "31.
|
16 |
-
"\ud83d\uddf3\ufe0f Votes":
|
17 |
"Organization": "Midjourney"
|
18 |
},
|
19 |
{
|
20 |
"Rank": 2,
|
21 |
"\ud83e\udd16 Model": "SD-v3.0",
|
22 |
-
"\u2b50 Score (\u03bc/\u03c3)": "29.
|
23 |
-
"\ud83d\uddf3\ufe0f Votes":
|
24 |
"Organization": "Stability AI"
|
25 |
},
|
26 |
{
|
27 |
"Rank": 3,
|
28 |
"\ud83e\udd16 Model": "Dalle-3",
|
29 |
-
"\u2b50 Score (\u03bc/\u03c3)": "28.
|
30 |
-
"\ud83d\uddf3\ufe0f Votes":
|
31 |
"Organization": "OpenAI"
|
32 |
},
|
33 |
{
|
34 |
"Rank": 4,
|
35 |
-
"\ud83e\udd16 Model": "
|
36 |
-
"\u2b50 Score (\u03bc/\u03c3)": "26.
|
37 |
-
"\ud83d\uddf3\ufe0f Votes":
|
38 |
-
"Organization": "
|
39 |
},
|
40 |
{
|
41 |
"Rank": 5,
|
42 |
"\ud83e\udd16 Model": "Proteus-v0.2",
|
43 |
-
"\u2b50 Score (\u03bc/\u03c3)": "
|
44 |
-
"\ud83d\uddf3\ufe0f Votes":
|
45 |
"Organization": "DataAutoGPT3"
|
46 |
},
|
47 |
{
|
48 |
"Rank": 6,
|
49 |
-
"\ud83e\udd16 Model": "
|
50 |
-
"\u2b50 Score (\u03bc/\u03c3)": "25.
|
51 |
-
"\ud83d\uddf3\ufe0f Votes":
|
52 |
-
"Organization": "
|
53 |
},
|
54 |
{
|
55 |
"Rank": 7,
|
56 |
-
"\ud83e\udd16 Model": "
|
57 |
-
"\u2b50 Score (\u03bc/\u03c3)": "25.
|
58 |
-
"\ud83d\uddf3\ufe0f Votes":
|
59 |
-
"Organization": "
|
60 |
},
|
61 |
{
|
62 |
"Rank": 8,
|
63 |
-
"\ud83e\udd16 Model": "
|
64 |
-
"\u2b50 Score (\u03bc/\u03c3)": "25.
|
65 |
"\ud83d\uddf3\ufe0f Votes": 426.0,
|
66 |
-
"Organization": "
|
67 |
},
|
68 |
{
|
69 |
"Rank": 9,
|
70 |
-
"\ud83e\udd16 Model": "
|
71 |
-
"\u2b50 Score (\u03bc/\u03c3)": "
|
72 |
-
"\ud83d\uddf3\ufe0f Votes":
|
73 |
-
"Organization": "
|
74 |
},
|
75 |
{
|
76 |
"Rank": 10,
|
77 |
"\ud83e\udd16 Model": "Realvisxl-v2.0",
|
78 |
-
"\u2b50 Score (\u03bc/\u03c3)": "
|
79 |
-
"\ud83d\uddf3\ufe0f Votes":
|
80 |
"Organization": "Realistic Vision"
|
81 |
},
|
82 |
{
|
@@ -89,106 +90,106 @@
|
|
89 |
{
|
90 |
"Rank": 12,
|
91 |
"\ud83e\udd16 Model": "Dalle-2",
|
92 |
-
"\u2b50 Score (\u03bc/\u03c3)": "
|
93 |
-
"\ud83d\uddf3\ufe0f Votes":
|
94 |
"Organization": "OpenAI"
|
95 |
},
|
96 |
{
|
97 |
"Rank": 13,
|
98 |
"\ud83e\udd16 Model": "Playground-v2.5",
|
99 |
-
"\u2b50 Score (\u03bc/\u03c3)": "21.
|
100 |
-
"\ud83d\uddf3\ufe0f Votes":
|
101 |
"Organization": "Playground AI"
|
102 |
},
|
103 |
{
|
104 |
"Rank": 14,
|
105 |
"\ud83e\udd16 Model": "Kandinsky-v2.0",
|
106 |
-
"\u2b50 Score (\u03bc/\u03c3)": "21.
|
107 |
-
"\ud83d\uddf3\ufe0f Votes":
|
108 |
"Organization": "AI-Forever"
|
109 |
},
|
110 |
{
|
111 |
"Rank": 15,
|
112 |
-
"\ud83e\udd16 Model": "Playground-v2.0",
|
113 |
-
"\u2b50 Score (\u03bc/\u03c3)": "21.2 (23.59/0.798)",
|
114 |
-
"\ud83d\uddf3\ufe0f Votes": 434.0,
|
115 |
-
"Organization": "Playground AI"
|
116 |
-
},
|
117 |
-
{
|
118 |
-
"Rank": 16,
|
119 |
"\ud83e\udd16 Model": "SDXL-turbo",
|
120 |
"\u2b50 Score (\u03bc/\u03c3)": "21.05 (23.42/0.79)",
|
121 |
-
"\ud83d\uddf3\ufe0f Votes":
|
122 |
"Organization": "Stability AI"
|
123 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
124 |
{
|
125 |
"Rank": 17,
|
126 |
"\ud83e\udd16 Model": "Openjourney-v4",
|
127 |
-
"\u2b50 Score (\u03bc/\u03c3)": "20.
|
128 |
-
"\ud83d\uddf3\ufe0f Votes":
|
129 |
"Organization": "Prompthero"
|
130 |
},
|
131 |
{
|
132 |
"Rank": 18,
|
133 |
-
"\ud83e\udd16 Model": "
|
134 |
-
"\u2b50 Score (\u03bc/\u03c3)": "20.
|
135 |
-
"\ud83d\uddf3\ufe0f Votes":
|
136 |
"Organization": "Stability AI"
|
137 |
},
|
138 |
{
|
139 |
"Rank": 19,
|
140 |
-
"\ud83e\udd16 Model": "
|
141 |
-
"\u2b50 Score (\u03bc/\u03c3)": "19.
|
142 |
-
"\ud83d\uddf3\ufe0f Votes":
|
143 |
-
"Organization": "
|
144 |
},
|
145 |
{
|
146 |
"Rank": 20,
|
147 |
-
"\ud83e\udd16 Model": "
|
148 |
-
"\u2b50 Score (\u03bc/\u03c3)": "19.
|
149 |
-
"\ud83d\uddf3\ufe0f Votes":
|
150 |
-
"Organization": "
|
151 |
},
|
152 |
{
|
153 |
"Rank": 21,
|
154 |
-
"\ud83e\udd16 Model": "
|
155 |
-
"\u2b50 Score (\u03bc/\u03c3)": "18.
|
156 |
-
"\ud83d\uddf3\ufe0f Votes":
|
157 |
-
"Organization": "
|
158 |
},
|
159 |
{
|
160 |
"Rank": 22,
|
161 |
-
"\ud83e\udd16 Model": "SD-
|
162 |
-
"\u2b50 Score (\u03bc/\u03c3)": "18.
|
163 |
-
"\ud83d\uddf3\ufe0f Votes":
|
164 |
"Organization": "Stability AI"
|
165 |
},
|
166 |
{
|
167 |
"Rank": 23,
|
168 |
-
"\ud83e\udd16 Model": "
|
169 |
-
"\u2b50 Score (\u03bc/\u03c3)": "18.
|
170 |
-
"\ud83d\uddf3\ufe0f Votes":
|
171 |
-
"Organization": "
|
172 |
},
|
173 |
{
|
174 |
"Rank": 24,
|
175 |
"\ud83e\udd16 Model": "Stable-cascade",
|
176 |
-
"\u2b50 Score (\u03bc/\u03c3)": "15.
|
177 |
-
"\ud83d\uddf3\ufe0f Votes":
|
178 |
"Organization": "Stability AI"
|
179 |
},
|
180 |
{
|
181 |
"Rank": 25,
|
182 |
"\ud83e\udd16 Model": "SDXL-Lightning",
|
183 |
-
"\u2b50 Score (\u03bc/\u03c3)": "
|
184 |
-
"\ud83d\uddf3\ufe0f Votes":
|
185 |
"Organization": "ByteDance"
|
186 |
},
|
187 |
{
|
188 |
"Rank": 26,
|
189 |
"\ud83e\udd16 Model": "SDXL-Deepcache",
|
190 |
-
"\u2b50 Score (\u03bc/\u03c3)": "10.
|
191 |
-
"\ud83d\uddf3\ufe0f Votes":
|
192 |
"Organization": "NUS"
|
193 |
}
|
194 |
]
|
|
|
1 |
{
|
2 |
"total_models": 27,
|
3 |
+
"total_votes": 965,
|
4 |
+
"last_updated": "2024-07-29",
|
5 |
"sorted_score_list": [
|
6 |
{
|
7 |
"Rank": 0,
|
8 |
"\ud83e\udd16 Model": "Midjourney-v6.0",
|
9 |
+
"\u2b50 Score (\u03bc/\u03c3)": "32.36 (34.75/0.796)",
|
10 |
+
"\ud83d\uddf3\ufe0f Votes": 425.0,
|
11 |
"Organization": "Midjourney"
|
12 |
},
|
13 |
{
|
14 |
"Rank": 1,
|
15 |
"\ud83e\udd16 Model": "Midjourney-v5.0",
|
16 |
+
"\u2b50 Score (\u03bc/\u03c3)": "31.36 (33.74/0.791)",
|
17 |
+
"\ud83d\uddf3\ufe0f Votes": 438.0,
|
18 |
"Organization": "Midjourney"
|
19 |
},
|
20 |
{
|
21 |
"Rank": 2,
|
22 |
"\ud83e\udd16 Model": "SD-v3.0",
|
23 |
+
"\u2b50 Score (\u03bc/\u03c3)": "29.33 (31.7/0.788)",
|
24 |
+
"\ud83d\uddf3\ufe0f Votes": 426.0,
|
25 |
"Organization": "Stability AI"
|
26 |
},
|
27 |
{
|
28 |
"Rank": 3,
|
29 |
"\ud83e\udd16 Model": "Dalle-3",
|
30 |
+
"\u2b50 Score (\u03bc/\u03c3)": "28.55 (30.94/0.795)",
|
31 |
+
"\ud83d\uddf3\ufe0f Votes": 427.0,
|
32 |
"Organization": "OpenAI"
|
33 |
},
|
34 |
{
|
35 |
"Rank": 4,
|
36 |
+
"\ud83e\udd16 Model": "Pixart-Sigma",
|
37 |
+
"\u2b50 Score (\u03bc/\u03c3)": "26.33 (28.7/0.79)",
|
38 |
+
"\ud83d\uddf3\ufe0f Votes": 441.0,
|
39 |
+
"Organization": "PixArt-Alpha"
|
40 |
},
|
41 |
{
|
42 |
"Rank": 5,
|
43 |
"\ud83e\udd16 Model": "Proteus-v0.2",
|
44 |
+
"\u2b50 Score (\u03bc/\u03c3)": "26.15 (28.52/0.79)",
|
45 |
+
"\ud83d\uddf3\ufe0f Votes": 423.0,
|
46 |
"Organization": "DataAutoGPT3"
|
47 |
},
|
48 |
{
|
49 |
"Rank": 6,
|
50 |
+
"\ud83e\udd16 Model": "Dreamshaper-xl",
|
51 |
+
"\u2b50 Score (\u03bc/\u03c3)": "25.66 (28.02/0.786)",
|
52 |
+
"\ud83d\uddf3\ufe0f Votes": 434.0,
|
53 |
+
"Organization": "Lykon"
|
54 |
},
|
55 |
{
|
56 |
"Rank": 7,
|
57 |
+
"\ud83e\udd16 Model": "Open-Dalle-v1.1",
|
58 |
+
"\u2b50 Score (\u03bc/\u03c3)": "25.51 (27.85/0.782)",
|
59 |
+
"\ud83d\uddf3\ufe0f Votes": 429.0,
|
60 |
+
"Organization": "DataAutoGPT3"
|
61 |
},
|
62 |
{
|
63 |
"Rank": 8,
|
64 |
+
"\ud83e\udd16 Model": "Deepfloyd-IF",
|
65 |
+
"\u2b50 Score (\u03bc/\u03c3)": "25.22 (27.58/0.789)",
|
66 |
"\ud83d\uddf3\ufe0f Votes": 426.0,
|
67 |
+
"Organization": "DeepFloyd"
|
68 |
},
|
69 |
{
|
70 |
"Rank": 9,
|
71 |
+
"\ud83e\udd16 Model": "Realvisxl-v3.0",
|
72 |
+
"\u2b50 Score (\u03bc/\u03c3)": "24.9 (27.26/0.785)",
|
73 |
+
"\ud83d\uddf3\ufe0f Votes": 424.0,
|
74 |
+
"Organization": "Realistic Vision"
|
75 |
},
|
76 |
{
|
77 |
"Rank": 10,
|
78 |
"\ud83e\udd16 Model": "Realvisxl-v2.0",
|
79 |
+
"\u2b50 Score (\u03bc/\u03c3)": "24.75 (27.11/0.785)",
|
80 |
+
"\ud83d\uddf3\ufe0f Votes": 436.0,
|
81 |
"Organization": "Realistic Vision"
|
82 |
},
|
83 |
{
|
|
|
90 |
{
|
91 |
"Rank": 12,
|
92 |
"\ud83e\udd16 Model": "Dalle-2",
|
93 |
+
"\u2b50 Score (\u03bc/\u03c3)": "23.04 (25.4/0.787)",
|
94 |
+
"\ud83d\uddf3\ufe0f Votes": 423.0,
|
95 |
"Organization": "OpenAI"
|
96 |
},
|
97 |
{
|
98 |
"Rank": 13,
|
99 |
"\ud83e\udd16 Model": "Playground-v2.5",
|
100 |
+
"\u2b50 Score (\u03bc/\u03c3)": "21.98 (24.36/0.795)",
|
101 |
+
"\ud83d\uddf3\ufe0f Votes": 430.0,
|
102 |
"Organization": "Playground AI"
|
103 |
},
|
104 |
{
|
105 |
"Rank": 14,
|
106 |
"\ud83e\udd16 Model": "Kandinsky-v2.0",
|
107 |
+
"\u2b50 Score (\u03bc/\u03c3)": "21.86 (24.23/0.791)",
|
108 |
+
"\ud83d\uddf3\ufe0f Votes": 427.0,
|
109 |
"Organization": "AI-Forever"
|
110 |
},
|
111 |
{
|
112 |
"Rank": 15,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
113 |
"\ud83e\udd16 Model": "SDXL-turbo",
|
114 |
"\u2b50 Score (\u03bc/\u03c3)": "21.05 (23.42/0.79)",
|
115 |
+
"\ud83d\uddf3\ufe0f Votes": 436.0,
|
116 |
"Organization": "Stability AI"
|
117 |
},
|
118 |
+
{
|
119 |
+
"Rank": 16,
|
120 |
+
"\ud83e\udd16 Model": "Playground-v2.0",
|
121 |
+
"\u2b50 Score (\u03bc/\u03c3)": "20.71 (23.1/0.795)",
|
122 |
+
"\ud83d\uddf3\ufe0f Votes": 449.0,
|
123 |
+
"Organization": "Playground AI"
|
124 |
+
},
|
125 |
{
|
126 |
"Rank": 17,
|
127 |
"\ud83e\udd16 Model": "Openjourney-v4",
|
128 |
+
"\u2b50 Score (\u03bc/\u03c3)": "20.4 (22.76/0.789)",
|
129 |
+
"\ud83d\uddf3\ufe0f Votes": 426.0,
|
130 |
"Organization": "Prompthero"
|
131 |
},
|
132 |
{
|
133 |
"Rank": 18,
|
134 |
+
"\ud83e\udd16 Model": "SD-v2.1",
|
135 |
+
"\u2b50 Score (\u03bc/\u03c3)": "20.19 (22.57/0.792)",
|
136 |
+
"\ud83d\uddf3\ufe0f Votes": 424.0,
|
137 |
"Organization": "Stability AI"
|
138 |
},
|
139 |
{
|
140 |
"Rank": 19,
|
141 |
+
"\ud83e\udd16 Model": "LCM-v1.5",
|
142 |
+
"\u2b50 Score (\u03bc/\u03c3)": "19.79 (22.18/0.797)",
|
143 |
+
"\ud83d\uddf3\ufe0f Votes": 429.0,
|
144 |
+
"Organization": "Tsinghua"
|
145 |
},
|
146 |
{
|
147 |
"Rank": 20,
|
148 |
+
"\ud83e\udd16 Model": "SDXL",
|
149 |
+
"\u2b50 Score (\u03bc/\u03c3)": "19.78 (22.15/0.789)",
|
150 |
+
"\ud83d\uddf3\ufe0f Votes": 441.0,
|
151 |
+
"Organization": "Stability AI"
|
152 |
},
|
153 |
{
|
154 |
"Rank": 21,
|
155 |
+
"\ud83e\udd16 Model": "SSD-1b",
|
156 |
+
"\u2b50 Score (\u03bc/\u03c3)": "18.53 (20.91/0.794)",
|
157 |
+
"\ud83d\uddf3\ufe0f Votes": 427.0,
|
158 |
+
"Organization": "Segmind"
|
159 |
},
|
160 |
{
|
161 |
"Rank": 22,
|
162 |
+
"\ud83e\udd16 Model": "SD-v1.5",
|
163 |
+
"\u2b50 Score (\u03bc/\u03c3)": "18.52 (20.92/0.799)",
|
164 |
+
"\ud83d\uddf3\ufe0f Votes": 437.0,
|
165 |
"Organization": "Stability AI"
|
166 |
},
|
167 |
{
|
168 |
"Rank": 23,
|
169 |
+
"\ud83e\udd16 Model": "SD-turbo",
|
170 |
+
"\u2b50 Score (\u03bc/\u03c3)": "18.41 (20.8/0.794)",
|
171 |
+
"\ud83d\uddf3\ufe0f Votes": 423.0,
|
172 |
+
"Organization": "Stability AI"
|
173 |
},
|
174 |
{
|
175 |
"Rank": 24,
|
176 |
"\ud83e\udd16 Model": "Stable-cascade",
|
177 |
+
"\u2b50 Score (\u03bc/\u03c3)": "15.27 (17.64/0.789)",
|
178 |
+
"\ud83d\uddf3\ufe0f Votes": 426.0,
|
179 |
"Organization": "Stability AI"
|
180 |
},
|
181 |
{
|
182 |
"Rank": 25,
|
183 |
"\ud83e\udd16 Model": "SDXL-Lightning",
|
184 |
+
"\u2b50 Score (\u03bc/\u03c3)": "15.05 (17.43/0.794)",
|
185 |
+
"\ud83d\uddf3\ufe0f Votes": 428.0,
|
186 |
"Organization": "ByteDance"
|
187 |
},
|
188 |
{
|
189 |
"Rank": 26,
|
190 |
"\ud83e\udd16 Model": "SDXL-Deepcache",
|
191 |
+
"\u2b50 Score (\u03bc/\u03c3)": "10.0 (12.46/0.821)",
|
192 |
+
"\ud83d\uddf3\ufe0f Votes": 423.0,
|
193 |
"Organization": "NUS"
|
194 |
}
|
195 |
]
|
sorted_score_list_video.json
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"total_models": 6,
|
3 |
+
"total_votes": 52,
|
4 |
+
"last_updated": "2024-07-29",
|
5 |
+
"sorted_score_list": [
|
6 |
+
{
|
7 |
+
"Rank": 0,
|
8 |
+
"\ud83e\udd16 Model": "VideoCrafter2",
|
9 |
+
"\u2b50 Score (\u03bc/\u03c3)": "25.33 (28.23/0.966)",
|
10 |
+
"\ud83d\uddf3\ufe0f Votes": 90.0,
|
11 |
+
"Organization": "Tencent"
|
12 |
+
},
|
13 |
+
{
|
14 |
+
"Rank": 1,
|
15 |
+
"\ud83e\udd16 Model": "LaVie",
|
16 |
+
"\u2b50 Score (\u03bc/\u03c3)": "25.08 (27.83/0.917)",
|
17 |
+
"\ud83d\uddf3\ufe0f Votes": 108.0,
|
18 |
+
"Organization": "Shanghai AI Lab"
|
19 |
+
},
|
20 |
+
{
|
21 |
+
"Rank": 2,
|
22 |
+
"\ud83e\udd16 Model": "OpenSora",
|
23 |
+
"\u2b50 Score (\u03bc/\u03c3)": "24.9 (27.65/0.918)",
|
24 |
+
"\ud83d\uddf3\ufe0f Votes": 111.0,
|
25 |
+
"Organization": "HPC-AI"
|
26 |
+
},
|
27 |
+
{
|
28 |
+
"Rank": 3,
|
29 |
+
"\ud83e\udd16 Model": "AnimateDiff",
|
30 |
+
"\u2b50 Score (\u03bc/\u03c3)": "23.85 (26.56/0.903)",
|
31 |
+
"\ud83d\uddf3\ufe0f Votes": 123.0,
|
32 |
+
"Organization": "CUHK etc."
|
33 |
+
},
|
34 |
+
{
|
35 |
+
"Rank": 4,
|
36 |
+
"\ud83e\udd16 Model": "StableVideoDiffusion",
|
37 |
+
"\u2b50 Score (\u03bc/\u03c3)": "20.96 (24.0/1.015)",
|
38 |
+
"\ud83d\uddf3\ufe0f Votes": 96.0,
|
39 |
+
"Organization": "Stability AI"
|
40 |
+
},
|
41 |
+
{
|
42 |
+
"Rank": 5,
|
43 |
+
"\ud83e\udd16 Model": "Zeroscope-v2-xl",
|
44 |
+
"\u2b50 Score (\u03bc/\u03c3)": "9.39 (13.91/1.508)",
|
45 |
+
"\ud83d\uddf3\ufe0f Votes": 96.0,
|
46 |
+
"Organization": "Cerspense"
|
47 |
+
}
|
48 |
+
]
|
49 |
+
}
|
webvid_prompt.txt
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Cloudy moscow kremlin time lapse
|
2 |
+
Sharp knife to cut delicious smoked fish
|
3 |
+
A baker turns freshly baked loaves of sourdough bread
|
4 |
+
Shot of beautiful dinnerware and cutlery on a banquet table at restaurant
|
5 |
+
Corn husks in field for agriculture farming
|
6 |
+
Pear fruits hanging on a branch of a tree rulevogo
|
7 |
+
Peaceful girl doing yoga meditate virabhasana sunrise coast slow motion
|
8 |
+
Chef with recipe book watching young cook preparing dish in the kitchen
|
9 |
+
Man walking in deep snow under the branches
|
10 |
+
Fist patting down sand in an orange bucket
|
11 |
+
Old decrepit house in the center of the city surrounded by new buildings
|
12 |
+
Green grape plants in the middle of nature
|
13 |
+
Climbers go up to the mountain pass
|
14 |
+
Bodrum holiday resort seashore marina
|
15 |
+
A flight over a beautiful stream in the forest
|
16 |
+
Attractive young couple at home laughing at funny internet joke online using digital tablet having fun
|
17 |
+
A small animation for the design video about redcurrant
|
18 |
+
Time lapse of sky clouds storm
|
19 |
+
Happy young caucasian baby boy playing toy bucket spade sandy beach with father while mother takes photograph sun lens flare shot on red epic
|
20 |
+
A young woman having dental treatment
|
21 |
+
Huge waterfall in chilean scenery huilo huilo
|
22 |
+
Yellow bird and nest on the branch
|
23 |
+
Little girl playing in the garden in summer
|
24 |
+
Hands of girl fall out sweets from bowl on white floor
|
25 |
+
Woman hairdresser put on curlers to long hair plus size woman
|
26 |
+
Pov of two hands and young woman putting the propellers on a drone
|
27 |
+
Video of watering a flower pot
|
28 |
+
Woman look at the city in hong kong
|
29 |
+
Sexy woman posing sitting on chair
|
30 |
+
Coconat palm tree on sky tropical view
|
31 |
+
Dog sitting tied to a post and waiting for the owner
|
32 |
+
Asian female cyclist blogger live stream happily to show her ride on the road
|
33 |
+
Pretty student learning in computer class at the university
|
34 |
+
Storm clouds and rainbow over bordeaux vineyards
|
35 |
+
Aerial view of roof tops of old city
|
36 |
+
Mountain area with fog and mist time lapse
|
37 |
+
The famous waterfall of niagara falls in canada
|
38 |
+
Low section of ice fisherman fishing in tent
|
39 |
+
Medical students in training assisting surgeon in hospital operating room
|
40 |
+
Passing by a cruise ship in the evening in ha long bay
|
41 |
+
London skyline with building site at sunset
|
42 |
+
The red deer female close up in the summertime
|
43 |
+
Chinese new year flat icon chinese decor lantern
|
44 |
+
The waiters in the restaurant put the glasses on buffet table
|
45 |
+
Makeup artist dips brush into a makeup kit
|
46 |
+
Panning down on team of businesswomen and businessmen in an office working and talking about new projects
|
47 |
+
Time lapse in the mountains of brazil tree line
|
48 |
+
Sick man in bed measuring temperature feeling fever medium shot
|
49 |
+
Santa claus sitting in a chair with a little girl dreaming about her christmas presents
|
50 |
+
Abused woman lying in the bathtub screams in despair
|
51 |
+
Young mime is juggling in the park
|
52 |
+
Countryside timelapse in spain
|
53 |
+
Green branches of a coniferous tree with needles fill the entire screen
|
54 |
+
Closeup of pouring healthy black chia seeds into a bowl
|
55 |
+
South korea high resolution victory concept
|
56 |
+
Inspecting south african passport
|
57 |
+
Attractive business woman reading documents in busy office female executive working on company project deadline
|
58 |
+
Slow dolly in towards beautiful waterfall through bright fall colors
|
59 |
+
Digital animation of american flag waving while background shows silhouette of hill with tree during sunset
|
60 |
+
Hud multipath target monitoring
|
61 |
+
Happy young businessman thinking while using phone
|
62 |
+
Thick morning fog in the summer forest
|
63 |
+
Group of children play a game of reaction speed in scientific museum
|
64 |
+
Abstract chrisanthemum background
|
65 |
+
Inside processor of a battery chip
|
66 |
+
Big and small frog sitting in the water and swimming away
|
67 |
+
Street lamp fallen down on ground with shattered glass from snow storm
|
68 |
+
Rustic cabin in early autumn in the great smoky mountains with dark shadows and some autumn color as seen from a window pane from another historical cabin
|
69 |
+
Night vision view of timber wolf
|
70 |
+
Pseudo galena by pyrrhotite hematitiziran
|
71 |
+
Gorgeous couple in love have a romantic walk in magnificent budapest blurred background at night
|
72 |
+
Close up thoughtful pretty young woman relaxing at the beach resort
|
73 |
+
Happy senior woman working on laptop computer
|
74 |
+
Singing nightingale on a tree branch
|
75 |
+
Pouring of rice water from jug into glass on table
|
76 |
+
Flaming gorge national park in summer in utah with handheld shot of brown cow grazing closeup on grass herd near ranch
|
77 |
+
Aerial flyby miami beach condominiums
|
78 |
+
Jungle covered mountain on panay island in the philippines
|
79 |
+
Close up portrait of two caucasian young girl and boy playing together on digital tablet
|
80 |
+
White spools are getting wound by textile equipment
|
81 |
+
The wedge newport beach california waves crashing aerial footage
|
82 |
+
Peak at kaengkrachan national park thailand
|
83 |
+
Christmas bauble dropping and bouncing beside crackers and presents in slow motion
|
84 |
+
Happy extended family on the sofa at home
|
85 |
+
Closeup shot of young female confectioner decorating delicious handmade cake placed on rotating stand with freshly baked meringues
|
86 |
+
Non smoking sign in the airplane cabin
|
87 |
+
Swan opening up wings and flapping them to dry off while grooming
|
88 |
+
A series of explosions in the winter tundra
|
89 |
+
Man actor wearing a tiger clothe preparing his acting with a tambourine
|
90 |
+
Abstract shape pattern texture moving background
|
91 |
+
Above the thermal station two pipes smoking in the gray sky
|
92 |
+
Vacation water villas on tropical island
|
93 |
+
Dense fog glows orange and covers hills at dawn
|
94 |
+
People walking on the beach in timelapse
|
95 |
+
Aerial view of wind turbines energy production in yellow fields
|
96 |
+
Preparing and mixing a fresh salad
|
97 |
+
A jetsurfer taking various turns while jet surfing on water
|
98 |
+
Happy little girl jumping in swimming pool and learning how to swim with help of father
|
99 |
+
Student boy wash the school board
|
100 |
+
Office workers showing work on laptop
|