tianleliphoebe commited on
Commit
944dd2b
1 Parent(s): ea4fcea

add video model

Browse files
.idea/.gitignore ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Default ignored files
2
+ /shelf/
3
+ /workspace.xml
4
+ # Editor-based HTTP Client requests
5
+ /httpRequests/
6
+ # Datasource local storage ignored files
7
+ /dataSources/
8
+ /dataSources.local.xml
.idea/GenAI-Arena.iml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <module type="PYTHON_MODULE" version="4">
3
+ <component name="NewModuleRootManager">
4
+ <content url="file://$MODULE_DIR$" />
5
+ <orderEntry type="inheritedJdk" />
6
+ <orderEntry type="sourceFolder" forTests="false" />
7
+ </component>
8
+ <component name="PyDocumentationSettings">
9
+ <option name="format" value="GOOGLE" />
10
+ <option name="myDocStringFormat" value="Google" />
11
+ </component>
12
+ <component name="TemplatesService">
13
+ <option name="TEMPLATE_CONFIGURATION" value="Jinja2" />
14
+ </component>
15
+ </module>
.idea/inspectionProfiles/profiles_settings.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <settings>
3
+ <option name="USE_PROJECT_PROFILE" value="false" />
4
+ <version value="1.0" />
5
+ </settings>
6
+ </component>
.idea/modules.xml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectModuleManager">
4
+ <modules>
5
+ <module fileurl="file://$PROJECT_DIR$/.idea/GenAI-Arena.iml" filepath="$PROJECT_DIR$/.idea/GenAI-Arena.iml" />
6
+ </modules>
7
+ </component>
8
+ </project>
.idea/vcs.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="VcsDirectoryMappings">
4
+ <mapping directory="" vcs="Git" />
5
+ </component>
6
+ </project>
app.py CHANGED
@@ -2,6 +2,7 @@ import gradio as gr
2
  import os
3
  from serve.gradio_web import *
4
  from serve.gradio_web_image_editing import *
 
5
  from serve.leaderboard import build_leaderboard_tab
6
  from model.model_manager import ModelManager
7
  from pathlib import Path
@@ -47,6 +48,22 @@ def build_combine_demo(models, elo_results_file, leaderboard_table_file):
47
  with gr.Tab("About Us", id=9):
48
  build_about()
49
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
  return demo
51
 
52
 
@@ -63,6 +80,8 @@ def load_elo_results(elo_results_dir):
63
  elo_results_file['t2i_generation'] = file
64
  elif 'image_editing' in file.name:
65
  elo_results_file['image_editing'] = file
 
 
66
  else:
67
  raise ValueError(f"Unknown file name: {file.name}")
68
  for file in elo_results_dir.glob('*_leaderboard.csv'):
@@ -70,6 +89,8 @@ def load_elo_results(elo_results_dir):
70
  leaderboard_table_file['t2i_generation'] = file
71
  elif 'image_editing' in file.name:
72
  leaderboard_table_file['image_editing'] = file
 
 
73
  else:
74
  raise ValueError(f"Unknown file name: {file.name}")
75
 
 
2
  import os
3
  from serve.gradio_web import *
4
  from serve.gradio_web_image_editing import *
5
+ from serve.gradio_web_video_generation import *
6
  from serve.leaderboard import build_leaderboard_tab
7
  from model.model_manager import ModelManager
8
  from pathlib import Path
 
48
  with gr.Tab("About Us", id=9):
49
  build_about()
50
 
51
+ with gr.Tab("Video Generation", id=10):
52
+ with gr.Tabs() as tabs_vg:
53
+ with gr.Tab("Video Generation Arena (battle)", id=10):
54
+ build_side_by_side_ui_anony_vg(models)
55
+
56
+ with gr.Tab("Video Generation Arena (side-by-side)", id=11):
57
+ build_side_by_side_ui_named_vg(models)
58
+
59
+ with gr.Tab("Video Generation Direct Chat", id=12):
60
+ build_single_model_ui_vg(models, add_promotion_links=True)
61
+ if elo_results_file and 'video_generation' in elo_results_file:
62
+ with gr.Tab("Video Generation Leaderboard", id=13):
63
+ build_leaderboard_tab(elo_results_file['video_generation'], leaderboard_table_file['video_generation'])
64
+ with gr.Tab("About Us", id=14):
65
+ build_about()
66
+
67
  return demo
68
 
69
 
 
80
  elo_results_file['t2i_generation'] = file
81
  elif 'image_editing' in file.name:
82
  elo_results_file['image_editing'] = file
83
+ elif 'video_generation' in file.name:
84
+ elo_results_file['video_generation'] = file
85
  else:
86
  raise ValueError(f"Unknown file name: {file.name}")
87
  for file in elo_results_dir.glob('*_leaderboard.csv'):
 
89
  leaderboard_table_file['t2i_generation'] = file
90
  elif 'image_editing' in file.name:
91
  leaderboard_table_file['image_editing'] = file
92
+ elif 'video_generation' in file.name:
93
+ leaderboard_table_file['video_generation'] = file
94
  else:
95
  raise ValueError(f"Unknown file name: {file.name}")
96
 
model/model_manager.py CHANGED
@@ -5,12 +5,13 @@ import requests
5
  import io, base64, json
6
  import spaces
7
  from PIL import Image
8
- from .models import IMAGE_GENERATION_MODELS, IMAGE_EDITION_MODELS, load_pipeline
9
 
10
  class ModelManager:
11
  def __init__(self):
12
  self.model_ig_list = IMAGE_GENERATION_MODELS
13
  self.model_ie_list = IMAGE_EDITION_MODELS
 
14
  self.loaded_models = {}
15
 
16
  def load_model_pipe(self, model_name):
@@ -79,4 +80,34 @@ class ModelManager:
79
  for future in concurrent.futures.as_completed(future_to_result):
80
  result = future.result()
81
  results.append(result)
82
- return results[0], results[1], model_names[0], model_names[1]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  import io, base64, json
6
  import spaces
7
  from PIL import Image
8
+ from .models import IMAGE_GENERATION_MODELS, IMAGE_EDITION_MODELS, VIDEO_GENERATION_MODELS, load_pipeline
9
 
10
  class ModelManager:
11
  def __init__(self):
12
  self.model_ig_list = IMAGE_GENERATION_MODELS
13
  self.model_ie_list = IMAGE_EDITION_MODELS
14
+ self.model_vg_list = VIDEO_GENERATION_MODELS
15
  self.loaded_models = {}
16
 
17
  def load_model_pipe(self, model_name):
 
80
  for future in concurrent.futures.as_completed(future_to_result):
81
  result = future.result()
82
  results.append(result)
83
+ return results[0], results[1], model_names[0], model_names[1]
84
+
85
+ @spaces.GPU(duration=150)
86
+ def generate_video_vg(self, prompt, model_name):
87
+ pipe = self.load_model_pipe(model_name)
88
+ result = pipe(prompt=prompt)
89
+ return result
90
+
91
+ def generate_video_vg_parallel_anony(self, prompt, model_A, model_B):
92
+ if model_A == "" and model_B == "":
93
+ model_names = random.sample([model for model in self.model_vg_list], 2)
94
+ else:
95
+ model_names = [model_A, model_B]
96
+
97
+ results = []
98
+ with concurrent.futures.ThreadPoolExecutor() as executor:
99
+ future_to_result = {executor.submit(self.generate_video_vg, prompt, model): model for model in model_names}
100
+ for future in concurrent.futures.as_completed(future_to_result):
101
+ result = future.result()
102
+ results.append(result)
103
+ return results[0], results[1], model_names[0], model_names[1]
104
+
105
+ def generate_video_vg_parallel(self, prompt, model_A, model_B):
106
+ results = []
107
+ model_names = [model_A, model_B]
108
+ with concurrent.futures.ThreadPoolExecutor() as executor:
109
+ future_to_result = {executor.submit(self.generate_video_vg, prompt, model): model for model in model_names}
110
+ for future in concurrent.futures.as_completed(future_to_result):
111
+ result = future.result()
112
+ results.append(result)
113
+ return results[0], results[1]
model/model_registry.py CHANGED
@@ -159,15 +159,30 @@ register_model_info(
159
  )
160
 
161
  register_model_info(
162
- ["fal_stable_cascade"],
163
  "StableCascade",
164
  "https://fal.ai/models/stable-cascade/api",
165
  "StableCascade is a generative model that can generate high-quality images from text prompts.",
166
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167
 
168
 
169
  models = ['imagenhub_LCM_generation','imagenhub_SDXLTurbo_generation','imagenhub_SDXL_generation',
170
- 'imagenhub_OpenJourney_generation','imagenhub_PixArtAlpha_generation','imagenhub_SDXLLightning_generation',
171
- 'imagenhub_StableCascade_generation','imagenhub_PlaygroundV2_generation', 'fal_Playground-v25_generation', 'fal_stable_cascade',
172
- 'imagenhub_CycleDiffusion_edition', 'imagenhub_Pix2PixZero_edition', 'imagenhub_Prompt2prompt_edition',
173
- 'imagenhub_SDEdit_edition', 'imagenhub_InstructPix2Pix_edition', 'imagenhub_MagicBrush_edition', 'imagenhub_PNP_edition']
 
 
159
  )
160
 
161
  register_model_info(
162
+ ["fal_stable-cascade_text2image"],
163
  "StableCascade",
164
  "https://fal.ai/models/stable-cascade/api",
165
  "StableCascade is a generative model that can generate high-quality images from text prompts.",
166
  )
167
+
168
+ register_model_info(
169
+ ["fal_fast-animatediff/text-to-video_text2video"],
170
+ "AnimateDiff",
171
+ "https://fal.ai/models/fast-animatediff-t2v",
172
+ "AnimateDiff is a text-driven models that produce diverse and personalized animated images.",
173
+ )
174
+
175
+ register_model_info(
176
+ ["fal_fast-animatediff/turbo/text-to-video_text2video"],
177
+ "AnimateDiff Turbo",
178
+ "https://fal.ai/models/fast-animatediff-t2v-turbo",
179
+ "AnimateDiff Turbo is a lightning version of AnimateDiff.",
180
+ )
181
 
182
 
183
  models = ['imagenhub_LCM_generation','imagenhub_SDXLTurbo_generation','imagenhub_SDXL_generation',
184
+ 'imagenhub_OpenJourney_generation','imagenhub_PixArtAlpha_generation','imagenhub_SDXLLightning_generation',
185
+ 'imagenhub_StableCascade_generation','imagenhub_PlaygroundV2_generation', 'fal_Playground-v25_generation', 'fal_stable-cascade_text2image',
186
+ 'imagenhub_CycleDiffusion_edition', 'imagenhub_Pix2PixZero_edition', 'imagenhub_Prompt2prompt_edition',
187
+ 'imagenhub_SDEdit_edition', 'imagenhub_InstructPix2Pix_edition', 'imagenhub_MagicBrush_edition', 'imagenhub_PNP_edition'
188
+ "fal_fast-animatediff/turbo/text-to-video_text2video", "fal_fast-animatediff/text-to-video_text2video"]
model/models/__init__.py CHANGED
@@ -1,12 +1,14 @@
1
  from .imagenhub_models import load_imagenhub_model
2
  from .playground_api import load_playground_model
3
- # from .fal_api_models import load_fal_model
4
 
5
  IMAGE_GENERATION_MODELS = ['imagenhub_LCM_generation','imagenhub_SDXLTurbo_generation','imagenhub_SDXL_generation', 'imagenhub_PixArtAlpha_generation',
6
  'imagenhub_OpenJourney_generation','imagenhub_SDXLLightning_generation', 'imagenhub_StableCascade_generation',
7
  'playground_PlayGroundV2_generation', 'playground_PlayGroundV2.5_generation']
8
  IMAGE_EDITION_MODELS = ['imagenhub_CycleDiffusion_edition', 'imagenhub_Pix2PixZero_edition', 'imagenhub_Prompt2prompt_edition',
9
  'imagenhub_SDEdit_edition', 'imagenhub_InstructPix2Pix_edition', 'imagenhub_MagicBrush_edition', 'imagenhub_PNP_edition']
 
 
10
 
11
 
12
  def load_pipeline(model_name):
@@ -24,7 +26,7 @@ def load_pipeline(model_name):
24
  elif model_source == "playground":
25
  pipe = load_playground_model(model_name)
26
  elif model_source == "fal":
27
- raise NotImplementedError("FAL models are not supported yet")
28
  else:
29
  raise ValueError(f"Model source {model_source} not supported")
30
  return pipe
 
1
  from .imagenhub_models import load_imagenhub_model
2
  from .playground_api import load_playground_model
3
+ from .fal_api_models import load_fal_model
4
 
5
  IMAGE_GENERATION_MODELS = ['imagenhub_LCM_generation','imagenhub_SDXLTurbo_generation','imagenhub_SDXL_generation', 'imagenhub_PixArtAlpha_generation',
6
  'imagenhub_OpenJourney_generation','imagenhub_SDXLLightning_generation', 'imagenhub_StableCascade_generation',
7
  'playground_PlayGroundV2_generation', 'playground_PlayGroundV2.5_generation']
8
  IMAGE_EDITION_MODELS = ['imagenhub_CycleDiffusion_edition', 'imagenhub_Pix2PixZero_edition', 'imagenhub_Prompt2prompt_edition',
9
  'imagenhub_SDEdit_edition', 'imagenhub_InstructPix2Pix_edition', 'imagenhub_MagicBrush_edition', 'imagenhub_PNP_edition']
10
+ VIDEO_GENERATION_MODELS = ['fal_fast-animatediff/text-to-video_text2video',
11
+ 'fal_fast-animatediff/turbo/text-to-video_text2video']
12
 
13
 
14
  def load_pipeline(model_name):
 
26
  elif model_source == "playground":
27
  pipe = load_playground_model(model_name)
28
  elif model_source == "fal":
29
+ pipe = load_fal_model(model_name, model_type)
30
  else:
31
  raise ValueError(f"Model source {model_source} not supported")
32
  return pipe
model/models/fal_api_models.py CHANGED
@@ -1,49 +1,73 @@
1
- import fal
 
 
 
 
 
2
 
3
  class FalModel():
4
  def __init__(self, model_name, model_type):
5
  self.model_name = model_name
6
- self.modle_type = model_type
 
7
 
8
  def __call__(self, *args, **kwargs):
9
-
10
  if self.model_type == "text2image":
11
  assert "prompt" in kwargs, "prompt is required for text2image model"
12
- handler = fal.apps.submit(
13
  f"fal-ai/{self.model_name}",
14
  arguments={
15
  "prompt": kwargs["prompt"]
16
  },
17
  )
18
-
19
- for event in handler.iter_events():
20
- if isinstance(event, fal.apps.InProgress):
21
  print('Request in progress')
22
  print(event.logs)
23
-
24
  result = handler.get()
 
 
 
25
  return result
26
  elif self.model_type == "image2image":
27
- assert "image" in kwargs or "image_url" in kwargs, "image or image_url is required for image2image model"
28
- if "image" in kwargs:
29
- image_url = None
30
- pass
31
- handler = fal.apps.submit(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  f"fal-ai/{self.model_name}",
33
  arguments={
34
- "image_url": image_url
35
  },
36
  )
37
-
38
- for event in handler.iter_events():
39
- if isinstance(event, fal.apps.InProgress):
40
  print('Request in progress')
41
  print(event.logs)
42
 
43
  result = handler.get()
44
- return result
45
- elif self.model_type == "text2video":
46
- raise NotImplementedError("text2video model is not implemented yet")
 
47
  else:
48
  raise ValueError("model_type must be text2image or image2image")
49
 
 
1
+ import fal_client
2
+ from PIL import Image
3
+ import requests
4
+ import io
5
+ import os
6
+
7
 
8
  class FalModel():
9
  def __init__(self, model_name, model_type):
10
  self.model_name = model_name
11
+ self.model_type = model_type
12
+ os.environ['FAL_KEY'] = os.environ['FalAPI']
13
 
14
  def __call__(self, *args, **kwargs):
 
15
  if self.model_type == "text2image":
16
  assert "prompt" in kwargs, "prompt is required for text2image model"
17
+ handler = fal_client.submit(
18
  f"fal-ai/{self.model_name}",
19
  arguments={
20
  "prompt": kwargs["prompt"]
21
  },
22
  )
23
+ for event in handler.iter_events(with_logs=True):
24
+ if isinstance(event, fal_client.InProgress):
 
25
  print('Request in progress')
26
  print(event.logs)
 
27
  result = handler.get()
28
+ result_url = result['images'][0]['url']
29
+ response = requests.get(result_url)
30
+ result = Image.open(io.BytesIO(response.content))
31
  return result
32
  elif self.model_type == "image2image":
33
+ raise NotImplementedError("image2image model is not implemented yet")
34
+ # assert "image" in kwargs or "image_url" in kwargs, "image or image_url is required for image2image model"
35
+ # if "image" in kwargs:
36
+ # image_url = None
37
+ # pass
38
+ # handler = fal_client.submit(
39
+ # f"fal-ai/{self.model_name}",
40
+ # arguments={
41
+ # "image_url": image_url
42
+ # },
43
+ # )
44
+ #
45
+ # for event in handler.iter_events():
46
+ # if isinstance(event, fal_client.InProgress):
47
+ # print('Request in progress')
48
+ # print(event.logs)
49
+ #
50
+ # result = handler.get()
51
+ # return result
52
+ elif self.model_type == "text2video":
53
+ assert "prompt" in kwargs, "prompt is required for text2video model"
54
+ handler = fal_client.submit(
55
  f"fal-ai/{self.model_name}",
56
  arguments={
57
+ "prompt": kwargs["prompt"]
58
  },
59
  )
60
+
61
+ for event in handler.iter_events(with_logs=True):
62
+ if isinstance(event, fal_client.InProgress):
63
  print('Request in progress')
64
  print(event.logs)
65
 
66
  result = handler.get()
67
+ print("result video: ====")
68
+ print(result)
69
+ result_url = result['video']['url']
70
+ return result_url
71
  else:
72
  raise ValueError("model_type must be text2image or image2image")
73
 
requirements.txt CHANGED
@@ -48,3 +48,5 @@ krippendorff
48
  statsmodels
49
  plotly
50
  -e git+https://github.com/TIGER-AI-Lab/ImagenHub.git#egg=imagen-hub
 
 
 
48
  statsmodels
49
  plotly
50
  -e git+https://github.com/TIGER-AI-Lab/ImagenHub.git#egg=imagen-hub
51
+ fal_client
52
+
serve/constants.py CHANGED
@@ -2,6 +2,7 @@ import os
2
 
3
  LOGDIR = os.getenv("LOGDIR", "./GenAI-Arena-hf-logs/vote_log")
4
  IMAGE_DIR = os.getenv("IMAGE_DIR", f"{LOGDIR}/images")
 
5
 
6
  SERVER_PORT = os.getenv("SERVER_PORT", 7860)
7
  ROOT_PATH = os.getenv("ROOT_PATH", None)
@@ -13,5 +14,7 @@ LOG_SERVER_ADDR = os.getenv("LOG_SERVER_ADDR", f"{LOG_SERVER}{LOG_SERVER_SUBDOMA
13
  # LOG SERVER API ENDPOINTS
14
  APPEND_JSON = "append_json"
15
  SAVE_IMAGE = "save_image"
 
16
  SAVE_LOG = "save_log"
17
 
 
 
2
 
3
  LOGDIR = os.getenv("LOGDIR", "./GenAI-Arena-hf-logs/vote_log")
4
  IMAGE_DIR = os.getenv("IMAGE_DIR", f"{LOGDIR}/images")
5
+ VIDEO_DIR = os.getenv("VIDEO_DIR", f"{LOGDIR}/videos")
6
 
7
  SERVER_PORT = os.getenv("SERVER_PORT", 7860)
8
  ROOT_PATH = os.getenv("ROOT_PATH", None)
 
14
  # LOG SERVER API ENDPOINTS
15
  APPEND_JSON = "append_json"
16
  SAVE_IMAGE = "save_image"
17
+ SAVE_VIDEO = "save_video"
18
  SAVE_LOG = "save_log"
19
 
20
+
serve/gradio_web.py CHANGED
@@ -82,7 +82,7 @@ Find out who is the 🥇conditional image generation models! More models are goi
82
  with gr.Row():
83
  clear_btn = gr.Button(value="🎲 New Round", interactive=False)
84
  regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False)
85
- share_btn = gr.Button(value="📷 Share")
86
 
87
  gr.Markdown(acknowledgment_md, elem_id="ack_markdown")
88
 
@@ -162,12 +162,12 @@ Find out who is the 🥇conditional image generation models! More models are goi
162
  outputs=[textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn, model_selector_left, model_selector_right]
163
  )
164
 
165
- share_btn.click(
166
- share_click,
167
- inputs=[state0, state1, model_selector_left, model_selector_right],
168
- outputs=[],
169
- js=share_js
170
- )
171
 
172
 
173
  def build_side_by_side_ui_named(models):
@@ -241,7 +241,7 @@ def build_side_by_side_ui_named(models):
241
  with gr.Row():
242
  clear_btn = gr.Button(value="🗑️ Clear history", interactive=False)
243
  regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False)
244
- share_btn = gr.Button(value="📷 Share")
245
 
246
  gr.Markdown(acknowledgment_md, elem_id="ack_markdown")
247
 
@@ -322,12 +322,12 @@ def build_side_by_side_ui_named(models):
322
  outputs=[textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn, model_selector_left, model_selector_right]
323
  )
324
 
325
- share_btn.click(
326
- share_click,
327
- inputs=[state0, state1, model_selector_left, model_selector_right],
328
- outputs=[],
329
- js=share_js
330
- )
331
 
332
  def build_single_model_ui(models, add_promotion_links=False):
333
  promotion = (
 
82
  with gr.Row():
83
  clear_btn = gr.Button(value="🎲 New Round", interactive=False)
84
  regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False)
85
+ # share_btn = gr.Button(value="📷 Share")
86
 
87
  gr.Markdown(acknowledgment_md, elem_id="ack_markdown")
88
 
 
162
  outputs=[textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn, model_selector_left, model_selector_right]
163
  )
164
 
165
+ # share_btn.click(
166
+ # share_click,
167
+ # inputs=[state0, state1, model_selector_left, model_selector_right],
168
+ # outputs=[],
169
+ # js=share_js
170
+ # )
171
 
172
 
173
  def build_side_by_side_ui_named(models):
 
241
  with gr.Row():
242
  clear_btn = gr.Button(value="🗑️ Clear history", interactive=False)
243
  regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False)
244
+ # share_btn = gr.Button(value="📷 Share")
245
 
246
  gr.Markdown(acknowledgment_md, elem_id="ack_markdown")
247
 
 
322
  outputs=[textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn, model_selector_left, model_selector_right]
323
  )
324
 
325
+ # share_btn.click(
326
+ # share_click,
327
+ # inputs=[state0, state1, model_selector_left, model_selector_right],
328
+ # outputs=[],
329
+ # js=share_js
330
+ # )
331
 
332
  def build_single_model_ui(models, add_promotion_links=False):
333
  promotion = (
serve/gradio_web_image_editing.py CHANGED
@@ -97,7 +97,7 @@ Find out who is the 🥇conditional image edition models!
97
  with gr.Row() as button_row:
98
  clear_btn = gr.Button(value="🎲 New Round", interactive=False)
99
  regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False)
100
- share_btn = gr.Button(value="📷 Share")
101
 
102
  gr.Markdown(acknowledgment_md, elem_id="ack_markdown")
103
 
@@ -194,12 +194,7 @@ Find out who is the 🥇conditional image edition models!
194
  inputs=[state0, state1, dummy_left_model, dummy_right_model],
195
  outputs=[model_selector_left, model_selector_right] + input_list + vote_btns,
196
  )
197
- share_btn.click(
198
- share_click,
199
- inputs=[state0, state1, model_selector_left, model_selector_right],
200
- outputs=[],
201
- js=share_js
202
- )
203
 
204
 
205
 
@@ -292,7 +287,7 @@ def build_side_by_side_ui_named_ie(models):
292
  with gr.Row() as button_row:
293
  clear_btn = gr.Button(value="🗑️ Clear history", interactive=False)
294
  regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False)
295
- share_btn = gr.Button(value="📷 Share")
296
 
297
  gr.Markdown(acknowledgment_md, elem_id="ack_markdown")
298
  gr.Examples(
@@ -393,12 +388,6 @@ def build_side_by_side_ui_named_ie(models):
393
  outputs=[model_selector_left, model_selector_right] + input_list + vote_btns,
394
  )
395
 
396
- share_btn.click(
397
- share_click,
398
- inputs=[state0, state1, model_selector_left, model_selector_right],
399
- outputs=[],
400
- js=share_js
401
- )
402
 
403
 
404
  def build_single_model_ui_ie(models, add_promotion_links=False):
 
97
  with gr.Row() as button_row:
98
  clear_btn = gr.Button(value="🎲 New Round", interactive=False)
99
  regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False)
100
+ # share_btn = gr.Button(value="📷 Share")
101
 
102
  gr.Markdown(acknowledgment_md, elem_id="ack_markdown")
103
 
 
194
  inputs=[state0, state1, dummy_left_model, dummy_right_model],
195
  outputs=[model_selector_left, model_selector_right] + input_list + vote_btns,
196
  )
197
+
 
 
 
 
 
198
 
199
 
200
 
 
287
  with gr.Row() as button_row:
288
  clear_btn = gr.Button(value="🗑️ Clear history", interactive=False)
289
  regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False)
290
+ # share_btn = gr.Button(value="📷 Share")
291
 
292
  gr.Markdown(acknowledgment_md, elem_id="ack_markdown")
293
  gr.Examples(
 
388
  outputs=[model_selector_left, model_selector_right] + input_list + vote_btns,
389
  )
390
 
 
 
 
 
 
 
391
 
392
 
393
  def build_single_model_ui_ie(models, add_promotion_links=False):
serve/gradio_web_video_generation.py ADDED
@@ -0,0 +1,482 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .utils import *
2
+ from .vote_utils import (
3
+ upvote_last_response_vg as upvote_last_response,
4
+ downvote_last_response_vg as downvote_last_response,
5
+ flag_last_response_vg as flag_last_response,
6
+ leftvote_last_response_vgm as leftvote_last_response,
7
+ rightvote_last_response_vgm as rightvote_last_response,
8
+ tievote_last_response_vgm as tievote_last_response,
9
+ bothbad_vote_last_response_vgm as bothbad_vote_last_response,
10
+ # share_click_vgm as share_click,
11
+ generate_vg,
12
+ generate_vgm,
13
+ generate_vgm_annoy,
14
+ share_js
15
+ )
16
+ from functools import partial
17
+
18
+
19
+ def build_side_by_side_ui_anony_vg(models):
20
+ notice_markdown = """
21
+ # ⚔️ GenAI-Arena ⚔️ : Benchmarking Visual Generative Models in the Wild
22
+ | [GitHub](https://github.com/TIGER-AI-Lab/ImagenHub) | [Paper](https://arxiv.org/abs/2310.01596) | [Dataset](https://huggingface.co/ImagenHub) |
23
+ ## 📜 Rules
24
+ - Input prompt to two anonymous models in same area (e.g., AnimateDiff, AnimateDiff-Turbo) and vote for the better one!
25
+ - When the results are ready, click the button below to vote.
26
+ - Vote won't be counted if model identity is revealed during conversation.
27
+ - Click "New Round" to start a new round.
28
+
29
+ ## 🏆 Arena Elo
30
+ Find out who is the 🥇conditional video generation models! More models are going to be supported.
31
+
32
+ ## 👇 Generating now!
33
+
34
+ """
35
+
36
+ model_list = models.model_vg_list
37
+
38
+ state0 = gr.State()
39
+ state1 = gr.State()
40
+ gen_func = partial(generate_vgm_annoy, models.generate_video_vg_parallel_anony)
41
+
42
+ gr.Markdown(notice_markdown, elem_id="notice_markdown")
43
+
44
+ with gr.Group(elem_id="share-region-anony"):
45
+ with gr.Accordion("🔍 Expand to see all Arena players", open=False):
46
+ model_description_md = get_model_description_md(model_list)
47
+ gr.Markdown(model_description_md, elem_id="model_description_markdown")
48
+ with gr.Row():
49
+ with gr.Column():
50
+ chatbot_left = gr.Video(width=512, label="Model A", autoplay=True)
51
+ with gr.Column():
52
+ chatbot_right = gr.Video(width=512, label="Model B", autoplay=True)
53
+
54
+ with gr.Row():
55
+ with gr.Column():
56
+ model_selector_left = gr.Markdown("", visible=False)
57
+ with gr.Column():
58
+ model_selector_right = gr.Markdown("", visible=False)
59
+ with gr.Row():
60
+ slow_warning = gr.Markdown("", elem_id="notice_markdown")
61
+
62
+ with gr.Row():
63
+ leftvote_btn = gr.Button(
64
+ value="👈 A is better", visible=False, interactive=False
65
+ )
66
+ rightvote_btn = gr.Button(
67
+ value="👉 B is better", visible=False, interactive=False
68
+ )
69
+ tie_btn = gr.Button(value="🤝 Tie", visible=False, interactive=False)
70
+ bothbad_btn = gr.Button(
71
+ value="👎 Both are bad", visible=False, interactive=False
72
+ )
73
+
74
+ with gr.Row():
75
+ textbox = gr.Textbox(
76
+ show_label=False,
77
+ placeholder="👉 Enter your prompt and press ENTER",
78
+ container=True,
79
+ elem_id="input_box",
80
+ )
81
+ send_btn = gr.Button(value="Send", variant="primary", scale=0)
82
+
83
+ with gr.Row():
84
+ clear_btn = gr.Button(value="🎲 New Round", interactive=False)
85
+ regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False)
86
+ # share_btn = gr.Button(value="📷 Share")
87
+
88
+ gr.Markdown(acknowledgment_md, elem_id="ack_markdown")
89
+
90
+ dummy_video_output = gr.Video(width=512, visible=False)
91
+ gr.Examples(
92
+ examples=[["a cute dog is playing a ball", 'https://fal-cdn.batuhan-941.workers.dev/files/lion/BFcM30vpeyRHFfdhbfFoY.mp4'],
93
+ ["Buildings on fire, old film still", 'https://fal-cdn.batuhan-941.workers.dev/files/zebra/r4NeCgXTkpzIBCF0DDHqJ.mp4'],
94
+ ["A serene underwater scene featuring a sea turtle swimming through a coral reef.",
95
+ 'https://fal-cdn.batuhan-941.workers.dev/files/tiger/cBHesvgUzjonNUEL1FQGw.mp4'],
96
+ ["A futuristic hopeful busy city, purple and green color scheme",
97
+ 'https://fal-cdn.batuhan-941.workers.dev/files/monkey/5-vUtY4_bHAhTtevZx16K.mp4']],
98
+ inputs=[textbox, dummy_video_output])
99
+
100
+ btn_list = [leftvote_btn, rightvote_btn, tie_btn, bothbad_btn, regenerate_btn, clear_btn, ]
101
+
102
+ textbox.submit(
103
+ gen_func,
104
+ inputs=[state0, state1, textbox, model_selector_left, model_selector_right],
105
+ outputs=[state0, state1, chatbot_left, chatbot_right, model_selector_left, model_selector_right],
106
+ api_name="submit_btn_annony"
107
+ ).then(
108
+ enable_buttons_side_by_side,
109
+ inputs=None,
110
+ outputs=btn_list
111
+ )
112
+ send_btn.click(
113
+ gen_func,
114
+ inputs=[state0, state1, textbox, model_selector_left, model_selector_right],
115
+ outputs=[state0, state1, chatbot_left, chatbot_right, model_selector_left, model_selector_right],
116
+ api_name="send_btn_annony"
117
+ ).then(
118
+ enable_buttons_side_by_side,
119
+ inputs=None,
120
+ outputs=btn_list
121
+ )
122
+
123
+ clear_btn.click(
124
+ clear_history_side_by_side_anony,
125
+ inputs=None,
126
+ outputs=[state0, state1, textbox, chatbot_left, chatbot_right, model_selector_left, model_selector_right],
127
+ api_name="clear_btn_annony"
128
+ ).then(
129
+ disable_buttons_side_by_side,
130
+ inputs=None,
131
+ outputs=btn_list
132
+ )
133
+
134
+ regenerate_btn.click(
135
+ gen_func,
136
+ inputs=[state0, state1, textbox, model_selector_left, model_selector_right],
137
+ outputs=[state0, state1, chatbot_left, chatbot_right, model_selector_left, model_selector_right],
138
+ api_name="regenerate_btn_annony"
139
+ ).then(
140
+ enable_buttons_side_by_side,
141
+ inputs=None,
142
+ outputs=btn_list
143
+ )
144
+ dummy_left_model = gr.State("")
145
+ dummy_right_model = gr.State("")
146
+ leftvote_btn.click(
147
+ leftvote_last_response,
148
+ inputs=[state0, state1, dummy_left_model, dummy_right_model],
149
+ outputs=[textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn, model_selector_left, model_selector_right]
150
+ )
151
+ rightvote_btn.click(
152
+ rightvote_last_response,
153
+ inputs=[state0, state1, dummy_left_model, dummy_right_model],
154
+ outputs=[textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn, model_selector_left, model_selector_right]
155
+ )
156
+ tie_btn.click(
157
+ tievote_last_response,
158
+ inputs=[state0, state1, dummy_left_model, dummy_right_model],
159
+ outputs=[textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn, model_selector_left, model_selector_right]
160
+ )
161
+ bothbad_btn.click(
162
+ bothbad_vote_last_response,
163
+ inputs=[state0, state1, dummy_left_model, dummy_right_model],
164
+ outputs=[textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn, model_selector_left, model_selector_right]
165
+ )
166
+
167
+ # share_btn.click(
168
+ # share_click,
169
+ # inputs=[state0, state1, model_selector_left, model_selector_right],
170
+ # outputs=[],
171
+ # js=share_js
172
+ # )
173
+
174
+
175
+ def build_side_by_side_ui_named_vg(models):
176
+ notice_markdown = """
177
+ # ⚔️ GenAI-Arena ⚔️ : Benchmarking Visual Generative Models in the Wild
178
+ | [GitHub](https://github.com/TIGER-AI-Lab/ImagenHub) | [Paper](https://arxiv.org/abs/2310.01596) | [Dataset](https://huggingface.co/ImagenHub) |
179
+
180
+ ## 📜 Rules
181
+ - Generate with any two selected models side-by-side and vote!
182
+ - Input prompt you want to generate.
183
+ - Click "Send" to submit the prompt.
184
+ - Click "Clear history" to start a new round.
185
+
186
+ ## 🤖 Choose two models to compare
187
+ """
188
+ model_list = models.model_vg_list
189
+
190
+ state0 = gr.State()
191
+ state1 = gr.State()
192
+ gen_func = partial(generate_vgm, models.generate_video_vg_parallel)
193
+ gr.Markdown(notice_markdown, elem_id="notice_markdown")
194
+
195
+ with gr.Group(elem_id="share-region-named"):
196
+ with gr.Row():
197
+ with gr.Column():
198
+ model_selector_left = gr.Dropdown(
199
+ choices=model_list,
200
+ value=model_list[0] if len(model_list) > 0 else "",
201
+ interactive=True,
202
+ show_label=False,
203
+ container=False,
204
+ )
205
+ with gr.Column():
206
+ model_selector_right = gr.Dropdown(
207
+ choices=model_list,
208
+ value=model_list[1] if len(model_list) > 1 else "",
209
+ interactive=True,
210
+ show_label=False,
211
+ container=False,
212
+ )
213
+ with gr.Row():
214
+ with gr.Accordion("🔍 Expand to see all model descriptions", open=False):
215
+ model_description_md = get_model_description_md(model_list)
216
+ gr.Markdown(model_description_md, elem_id="model_description_markdown")
217
+
218
+ with gr.Row():
219
+ with gr.Column():
220
+ chatbot_left = gr.Video(width=512, label="Model A", autoplay=True)
221
+ with gr.Column():
222
+ chatbot_right = gr.Video(width=512, label="Model B", autoplay=True)
223
+ with gr.Row():
224
+ leftvote_btn = gr.Button(
225
+ value="👈 A is better", visible=False, interactive=False
226
+ )
227
+ rightvote_btn = gr.Button(
228
+ value="👉 B is better", visible=False, interactive=False
229
+ )
230
+ tie_btn = gr.Button(value="🤝 Tie", visible=False, interactive=False)
231
+ bothbad_btn = gr.Button(
232
+ value="👎 Both are bad", visible=False, interactive=False
233
+ )
234
+
235
+ with gr.Row():
236
+ textbox = gr.Textbox(
237
+ show_label=False,
238
+ placeholder="👉 Enter your prompt and press ENTER",
239
+ elem_id="input_box"
240
+ )
241
+ send_btn = gr.Button(value="Send", variant="primary", scale=0)
242
+
243
+ with gr.Row():
244
+ clear_btn = gr.Button(value="🗑️ Clear history", interactive=False)
245
+ regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False)
246
+ # share_btn = gr.Button(value="📷 Share")
247
+
248
+ gr.Markdown(acknowledgment_md, elem_id="ack_markdown")
249
+
250
+ dummy_video_output = gr.Video(width=512, visible=False)
251
+ gr.Examples(
252
+ examples=[["a cute dog is playing a ball",
253
+ 'https://fal-cdn.batuhan-941.workers.dev/files/lion/BFcM30vpeyRHFfdhbfFoY.mp4'],
254
+ ["Buildings on fire, old film still",
255
+ 'https://fal-cdn.batuhan-941.workers.dev/files/zebra/r4NeCgXTkpzIBCF0DDHqJ.mp4'],
256
+ ["A serene underwater scene featuring a sea turtle swimming through a coral reef.",
257
+ 'https://fal-cdn.batuhan-941.workers.dev/files/tiger/cBHesvgUzjonNUEL1FQGw.mp4'],
258
+ ["A futuristic hopeful busy city, purple and green color scheme",
259
+ 'https://fal-cdn.batuhan-941.workers.dev/files/monkey/5-vUtY4_bHAhTtevZx16K.mp4']],
260
+ inputs=[textbox, dummy_video_output])
261
+
262
+ model_selector_left.change(clear_history_side_by_side, inputs=None,
263
+ outputs=[state0, state1, textbox, chatbot_left, chatbot_right],
264
+ api_name="model_selector_left_side_by_side")
265
+ model_selector_right.change(clear_history_side_by_side, inputs=None,
266
+ outputs=[state0, state1, textbox, chatbot_left, chatbot_right],
267
+ api_name="model_selector_right_side_by_side")
268
+
269
+ btn_list = [leftvote_btn, rightvote_btn, tie_btn, bothbad_btn, regenerate_btn, clear_btn]
270
+
271
+ textbox.submit(
272
+ gen_func,
273
+ inputs=[state0, state1, textbox, model_selector_left, model_selector_right],
274
+ outputs=[state0, state1, chatbot_left, chatbot_right],
275
+ api_name="textbox_side_by_side"
276
+ ).then(
277
+ enable_buttons_side_by_side,
278
+ inputs=None,
279
+ outputs=btn_list
280
+ )
281
+
282
+ send_btn.click(
283
+ gen_func,
284
+ inputs=[state0, state1, textbox, model_selector_left, model_selector_right],
285
+ outputs=[state0, state1, chatbot_left, chatbot_right],
286
+ api_name="send_side_by_side"
287
+ ).then(
288
+ enable_buttons_side_by_side,
289
+ inputs=None,
290
+ outputs=btn_list
291
+ )
292
+ regenerate_btn.click(
293
+ gen_func,
294
+ inputs=[state0, state1, textbox, model_selector_left, model_selector_right],
295
+ outputs=[state0, state1, chatbot_left, chatbot_right],
296
+ api_name="regenerate_side_by_side"
297
+ ).then(
298
+ enable_buttons_side_by_side,
299
+ inputs=None,
300
+ outputs=btn_list
301
+ )
302
+
303
+ clear_btn.click(
304
+ clear_history_side_by_side,
305
+ inputs=None,
306
+ outputs=[state0, state1, textbox, chatbot_left, chatbot_right],
307
+ api_name="clear_btn_side_by_side"
308
+ ).then(
309
+ disable_buttons_side_by_side,
310
+ inputs=None,
311
+ outputs=btn_list
312
+ )
313
+
314
+ leftvote_btn.click(
315
+ leftvote_last_response,
316
+ inputs=[state0, state1, model_selector_left, model_selector_right],
317
+ outputs=[textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn, model_selector_left, model_selector_right]
318
+ )
319
+ rightvote_btn.click(
320
+ rightvote_last_response,
321
+ inputs=[state0, state1, model_selector_left, model_selector_right],
322
+ outputs=[textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn, model_selector_left, model_selector_right]
323
+ )
324
+ tie_btn.click(
325
+ tievote_last_response,
326
+ inputs=[state0, state1, model_selector_left, model_selector_right],
327
+ outputs=[textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn, model_selector_left, model_selector_right]
328
+ )
329
+ bothbad_btn.click(
330
+ bothbad_vote_last_response,
331
+ inputs=[state0, state1, model_selector_left, model_selector_right],
332
+ outputs=[textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn, model_selector_left, model_selector_right]
333
+ )
334
+
335
+ # share_btn.click(
336
+ # share_click,
337
+ # inputs=[state0, state1, model_selector_left, model_selector_right],
338
+ # outputs=[],
339
+ # js=share_js
340
+ # )
341
+
342
+
343
+ def build_single_model_ui_vg(models, add_promotion_links=False):
344
+ promotion = (
345
+ """
346
+ - | [GitHub](https://github.com/TIGER-AI-Lab/ImagenHub) | [Paper](https://arxiv.org/abs/2310.01596) | [Dataset](https://huggingface.co/ImagenHub) |
347
+ """
348
+ if add_promotion_links
349
+ else ""
350
+ )
351
+
352
+ notice_markdown = f"""
353
+ # 🏔️ Play with Video Generation Models
354
+ {promotion}
355
+
356
+ ## 🤖 Choose any model to generate
357
+ """
358
+
359
+ state = gr.State()
360
+ gen_func = partial(generate_vg, models.generate_video_vg)
361
+ gr.Markdown(notice_markdown, elem_id="notice_markdown")
362
+
363
+ model_list = models.model_vg_list
364
+
365
+ with gr.Row(elem_id="model_selector_row"):
366
+ model_selector = gr.Dropdown(
367
+ choices=model_list,
368
+ value=model_list[0] if len(model_list) > 0 else "",
369
+ interactive=True,
370
+ show_label=False
371
+ )
372
+
373
+ with gr.Row():
374
+ with gr.Accordion(
375
+ "🔍 Expand to see all model descriptions",
376
+ open=False,
377
+ elem_id="model_description_accordion",
378
+ ):
379
+ model_description_md = get_model_description_md(model_list)
380
+ gr.Markdown(model_description_md, elem_id="model_description_markdown")
381
+
382
+ with gr.Row():
383
+ textbox = gr.Textbox(
384
+ show_label=False,
385
+ placeholder="👉 Enter your prompt and press ENTER",
386
+ elem_id="input_box"
387
+ )
388
+
389
+ send_btn = gr.Button(value="Send", variant="primary", scale=0)
390
+
391
+ with gr.Row():
392
+ chatbot = gr.Video(width=512, autoplay=True)
393
+
394
+ with gr.Row() as button_row:
395
+ upvote_btn = gr.Button(value="👍 Upvote", interactive=False)
396
+ downvote_btn = gr.Button(value="👎 Downvote", interactive=False)
397
+ flag_btn = gr.Button(value="⚠️ Flag", interactive=False)
398
+ regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False)
399
+ clear_btn = gr.Button(value="🗑️ Clear history", interactive=False)
400
+
401
+ if add_promotion_links:
402
+ gr.Markdown(acknowledgment_md, elem_id="ack_markdown")
403
+
404
+ dummy_video_output = gr.Video(width=512, visible=False)
405
+ gr.Examples(
406
+ examples=[["a cute dog is playing a ball",
407
+ 'https://fal-cdn.batuhan-941.workers.dev/files/lion/BFcM30vpeyRHFfdhbfFoY.mp4'],
408
+ ["Buildings on fire, old film still",
409
+ 'https://fal-cdn.batuhan-941.workers.dev/files/zebra/r4NeCgXTkpzIBCF0DDHqJ.mp4'],
410
+ ["A serene underwater scene featuring a sea turtle swimming through a coral reef.",
411
+ 'https://fal-cdn.batuhan-941.workers.dev/files/tiger/cBHesvgUzjonNUEL1FQGw.mp4'],
412
+ ["A futuristic hopeful busy city, purple and green color scheme",
413
+ 'https://fal-cdn.batuhan-941.workers.dev/files/monkey/5-vUtY4_bHAhTtevZx16K.mp4']],
414
+ inputs=[textbox, dummy_video_output])
415
+
416
+ model_selector.change(clear_history, inputs=None, outputs=[state, textbox, chatbot],
417
+ api_name="model_selector_single")
418
+
419
+ btn_list = [upvote_btn, downvote_btn, flag_btn, regenerate_btn, clear_btn]
420
+
421
+ textbox.submit(
422
+ gen_func,
423
+ inputs=[state, textbox, model_selector],
424
+ outputs=[state, chatbot],
425
+ api_name="submit_btn_single",
426
+ show_progress="full"
427
+ ).success(
428
+ enable_buttons,
429
+ inputs=None,
430
+ outputs=btn_list
431
+ )
432
+ send_btn.click(
433
+ gen_func,
434
+ inputs=[state, textbox, model_selector],
435
+ outputs=[state, chatbot],
436
+ api_name="send_btn_single",
437
+ show_progress="full"
438
+ ).success(
439
+ enable_buttons,
440
+ inputs=None,
441
+ outputs=btn_list
442
+ )
443
+
444
+ upvote_btn.click(
445
+ upvote_last_response,
446
+ inputs=[state, model_selector],
447
+ outputs=[textbox, upvote_btn, downvote_btn, flag_btn]
448
+ )
449
+
450
+ downvote_btn.click(
451
+ downvote_last_response,
452
+ inputs=[state, model_selector],
453
+ outputs=[textbox, upvote_btn, downvote_btn, flag_btn]
454
+ )
455
+ flag_btn.click(
456
+ flag_last_response,
457
+ inputs=[state, model_selector],
458
+ outputs=[textbox, upvote_btn, downvote_btn, flag_btn]
459
+ )
460
+ regenerate_btn.click(
461
+ gen_func,
462
+ inputs=[state, textbox, model_selector],
463
+ outputs=[state, chatbot],
464
+ api_name="regenerate_btn_single",
465
+ show_progress="full"
466
+ ).success(
467
+ enable_buttons,
468
+ inputs=None,
469
+ outputs=btn_list
470
+ )
471
+ clear_btn.click(
472
+ clear_history,
473
+ inputs=None,
474
+ outputs=[state, textbox, chatbot],
475
+ api_name="clear_history_single",
476
+ show_progress="full"
477
+ ).then(
478
+ disable_buttons,
479
+ inputs=None,
480
+ outputs=btn_list
481
+ )
482
+
serve/log_server.py CHANGED
@@ -4,7 +4,7 @@ import json
4
  import os
5
  import aiofiles
6
  from .log_utils import build_logger
7
- from .constants import LOG_SERVER_SUBDOMAIN, APPEND_JSON, SAVE_IMAGE, SAVE_LOG
8
 
9
  logger = build_logger("log_server", "log_server.log", add_remote_handler=False)
10
 
@@ -40,6 +40,20 @@ async def save_image(image: UploadFile = File(...), image_path: str = Form(...))
40
  logger.info(f"Image saved successfully at {image_path}")
41
  return {"message": f"Image saved successfully at {image_path}"}
42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  @app.post(f"/{SAVE_LOG}")
44
  async def save_log(message: str = Form(...), log_path: str = Form(...)):
45
  """
 
4
  import os
5
  import aiofiles
6
  from .log_utils import build_logger
7
+ from .constants import LOG_SERVER_SUBDOMAIN, APPEND_JSON, SAVE_IMAGE, SAVE_VIDEO, SAVE_LOG
8
 
9
  logger = build_logger("log_server", "log_server.log", add_remote_handler=False)
10
 
 
40
  logger.info(f"Image saved successfully at {image_path}")
41
  return {"message": f"Image saved successfully at {image_path}"}
42
 
43
+ @app.post(f"/{SAVE_VIDEO}")
44
+ async def save_video(video: UploadFile = File(...), video_path: str = Form(...)):
45
+ """
46
+ Saves an uploaded video to the specified path.
47
+ """
48
+ # Note: 'video_path' should include the file name and extension for the video to be saved.
49
+ if os.path.dirname(video_path):
50
+ os.makedirs(os.path.dirname(video_path), exist_ok=True)
51
+ async with aiofiles.open(video_path, mode='wb') as f:
52
+ content = await video.read() # Read the content of the uploaded video
53
+ await f.write(content) # Write the video content to a file
54
+ logger.info(f"Video saved successfully at {video_path}")
55
+ return {"message": f"Image saved successfully at {video_path}"}
56
+
57
  @app.post(f"/{SAVE_LOG}")
58
  async def save_log(message: str = Form(...), log_path: str = Form(...)):
59
  """
serve/utils.py CHANGED
@@ -6,7 +6,7 @@ import numpy as np
6
  import gradio as gr
7
  from pathlib import Path
8
  from model.model_registry import *
9
- from .constants import LOGDIR, LOG_SERVER_ADDR, APPEND_JSON, SAVE_IMAGE, SAVE_LOG
10
  from typing import Union
11
 
12
 
@@ -151,6 +151,17 @@ def save_image_file_on_log_server(image_file:str):
151
  response = requests.post(url, files={'image': f}, data={'image_path': image_file})
152
  return response
153
 
 
 
 
 
 
 
 
 
 
 
 
154
  def append_json_item_on_log_server(json_item: Union[dict, str], log_file: str):
155
  if isinstance(json_item, dict):
156
  json_item = json.dumps(json_item)
 
6
  import gradio as gr
7
  from pathlib import Path
8
  from model.model_registry import *
9
+ from .constants import LOGDIR, LOG_SERVER_ADDR, APPEND_JSON, SAVE_IMAGE, SAVE_VIDEO, SAVE_LOG
10
  from typing import Union
11
 
12
 
 
151
  response = requests.post(url, files={'image': f}, data={'image_path': image_file})
152
  return response
153
 
154
+ def save_video_file_on_log_server(video_file:str):
155
+
156
+ video_file = Path(video_file).absolute().relative_to(os.getcwd())
157
+ video_file = str(video_file)
158
+ # Open the video file in binary mode
159
+ url = f"{LOG_SERVER_ADDR}/{SAVE_VIDEO}"
160
+ with open(video_file, 'rb') as f:
161
+ # Make the POST request, sending the video file and the video path
162
+ response = requests.post(url, files={'video': f}, data={'video_path': video_file})
163
+ return response
164
+
165
  def append_json_item_on_log_server(json_item: Union[dict, str], log_file: str):
166
  if isinstance(json_item, dict):
167
  json_item = json.dumps(json_item)
serve/vote_utils.py CHANGED
@@ -7,12 +7,14 @@ import regex as re
7
  from pathlib import Path
8
  from .utils import *
9
  from .log_utils import build_logger
10
- from .constants import IMAGE_DIR
11
 
12
  ig_logger = build_logger("gradio_web_server_image_generation", "gr_web_image_generation.log") # ig = image generation, loggers for single model direct chat
13
  igm_logger = build_logger("gradio_web_server_image_generation_multi", "gr_web_image_generation_multi.log") # igm = image generation multi, loggers for side-by-side and battle
14
  ie_logger = build_logger("gradio_web_server_image_editing", "gr_web_image_editing.log") # ie = image editing, loggers for single model direct chat
15
  iem_logger = build_logger("gradio_web_server_image_editing_multi", "gr_web_image_editing_multi.log") # iem = image editing multi, loggers for side-by-side and battle
 
 
16
 
17
  def vote_last_response_ig(state, vote_type, model_selector, request: gr.Request):
18
  with open(get_conv_log_filename(), "a") as fout:
@@ -25,6 +27,10 @@ def vote_last_response_ig(state, vote_type, model_selector, request: gr.Request)
25
  }
26
  fout.write(json.dumps(data) + "\n")
27
  append_json_item_on_log_server(data, get_conv_log_filename())
 
 
 
 
28
 
29
  def vote_last_response_igm(states, vote_type, model_selectors, request: gr.Request):
30
  with open(get_conv_log_filename(), "a") as fout:
@@ -83,6 +89,47 @@ def vote_last_response_iem(states, vote_type, model_selectors, request: gr.Reque
83
  state.source_image.save(sf, 'JPEG')
84
  save_image_file_on_log_server(output_file)
85
  save_image_file_on_log_server(source_file)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
 
87
 
88
  ## Image Generation (IG) Single Model Direct Chat
@@ -217,6 +264,67 @@ def bothbad_vote_last_response_iem(
217
  return names + ("", "", gr.Image(height=512, width=512, type="pil"), "") + (disable_btn,) * 4
218
 
219
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
220
  share_js = """
221
  function (a, b, c, d) {
222
  const captureElement = document.querySelector('#share-region-named');
@@ -288,6 +396,21 @@ class ImageStateIE:
288
  }
289
  return base
290
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
291
 
292
 
293
 
@@ -659,4 +782,169 @@ def generate_iem_annoy(gen_func, state0, state1, source_text, target_text, instr
659
  with open(output_file, 'w') as f:
660
  state.output.save(f, 'JPEG')
661
  save_image_file_on_log_server(src_img_file)
662
- save_image_file_on_log_server(output_file)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  from pathlib import Path
8
  from .utils import *
9
  from .log_utils import build_logger
10
+ from .constants import IMAGE_DIR, VIDEO_DIR
11
 
12
  ig_logger = build_logger("gradio_web_server_image_generation", "gr_web_image_generation.log") # ig = image generation, loggers for single model direct chat
13
  igm_logger = build_logger("gradio_web_server_image_generation_multi", "gr_web_image_generation_multi.log") # igm = image generation multi, loggers for side-by-side and battle
14
  ie_logger = build_logger("gradio_web_server_image_editing", "gr_web_image_editing.log") # ie = image editing, loggers for single model direct chat
15
  iem_logger = build_logger("gradio_web_server_image_editing_multi", "gr_web_image_editing_multi.log") # iem = image editing multi, loggers for side-by-side and battle
16
+ vg_logger = build_logger("gradio_web_server_video_generation", "gr_web_video_generation.log") # vg = video generation, loggers for single model direct chat
17
+ vgm_logger = build_logger("gradio_web_server_video_generation_multi", "gr_web_video_generation_multi.log") # vgm = video generation multi, loggers for side-by-side and battle
18
 
19
  def vote_last_response_ig(state, vote_type, model_selector, request: gr.Request):
20
  with open(get_conv_log_filename(), "a") as fout:
 
27
  }
28
  fout.write(json.dumps(data) + "\n")
29
  append_json_item_on_log_server(data, get_conv_log_filename())
30
+ output_file = f'{IMAGE_DIR}/generation/{state.conv_id}.jpg'
31
+ with open(output_file, 'w') as f:
32
+ state.output.save(f, 'JPEG')
33
+ save_image_file_on_log_server(output_file)
34
 
35
  def vote_last_response_igm(states, vote_type, model_selectors, request: gr.Request):
36
  with open(get_conv_log_filename(), "a") as fout:
 
89
  state.source_image.save(sf, 'JPEG')
90
  save_image_file_on_log_server(output_file)
91
  save_image_file_on_log_server(source_file)
92
+
93
+
94
+ def vote_last_response_vg(state, vote_type, model_selector, request: gr.Request):
95
+ with open(get_conv_log_filename(), "a") as fout:
96
+ data = {
97
+ "tstamp": round(time.time(), 4),
98
+ "type": vote_type,
99
+ "model": model_selector,
100
+ "state": state.dict(),
101
+ "ip": get_ip(request),
102
+ }
103
+ fout.write(json.dumps(data) + "\n")
104
+ append_json_item_on_log_server(data, get_conv_log_filename())
105
+
106
+ output_file = f'{VIDEO_DIR}/generation/{state.conv_id}.mp4'
107
+ os.makedirs(os.path.dirname(output_file), exist_ok=True)
108
+ r = requests.get(state.output)
109
+ with open(output_file, 'wb') as outfile:
110
+ outfile.write(r.content)
111
+ save_video_file_on_log_server(output_file)
112
+
113
+
114
+
115
+ def vote_last_response_vgm(states, vote_type, model_selectors, request: gr.Request):
116
+ with open(get_conv_log_filename(), "a") as fout:
117
+ data = {
118
+ "tstamp": round(time.time(), 4),
119
+ "type": vote_type,
120
+ "models": [x for x in model_selectors],
121
+ "states": [x.dict() for x in states],
122
+ "ip": get_ip(request),
123
+ }
124
+ fout.write(json.dumps(data) + "\n")
125
+ append_json_item_on_log_server(data, get_conv_log_filename())
126
+ for state in states:
127
+ output_file = f'{VIDEO_DIR}/generation/{state.conv_id}.mp4'
128
+ os.makedirs(os.path.dirname(output_file), exist_ok=True)
129
+ r = requests.get(state.output)
130
+ with open(output_file, 'wb') as outfile:
131
+ outfile.write(r.content)
132
+ save_video_file_on_log_server(output_file)
133
 
134
 
135
  ## Image Generation (IG) Single Model Direct Chat
 
264
  return names + ("", "", gr.Image(height=512, width=512, type="pil"), "") + (disable_btn,) * 4
265
 
266
 
267
+ ## Video Generation (VG) Single Model Direct Chat
268
+ def upvote_last_response_vg(state, model_selector, request: gr.Request):
269
+ ip = get_ip(request)
270
+ vg_logger.info(f"upvote. ip: {ip}")
271
+ vote_last_response_vg(state, "upvote", model_selector, request)
272
+ return ("",) + (disable_btn,) * 3
273
+
274
+ def downvote_last_response_vg(state, model_selector, request: gr.Request):
275
+ ip = get_ip(request)
276
+ vg_logger.info(f"downvote. ip: {ip}")
277
+ vote_last_response_vg(state, "downvote", model_selector, request)
278
+ return ("",) + (disable_btn,) * 3
279
+
280
+
281
+ def flag_last_response_vg(state, model_selector, request: gr.Request):
282
+ ip = get_ip(request)
283
+ vg_logger.info(f"flag. ip: {ip}")
284
+ vote_last_response_vg(state, "flag", model_selector, request)
285
+ return ("",) + (disable_btn,) * 3
286
+
287
+ ## Image Generation Multi (IGM) Side-by-Side and Battle
288
+
289
+ def leftvote_last_response_vgm(
290
+ state0, state1, model_selector0, model_selector1, request: gr.Request
291
+ ):
292
+ vgm_logger.info(f"leftvote (named). ip: {get_ip(request)}")
293
+ vote_last_response_vgm(
294
+ [state0, state1], "leftvote", [model_selector0, model_selector1], request
295
+ )
296
+ return ("",) + (disable_btn,) * 4 + (gr.Markdown(f"### Model A: {state0.model_name}", visible=True), gr.Markdown(f"### Model B: {state1.model_name}", visible=True))
297
+
298
+
299
+ def rightvote_last_response_vgm(
300
+ state0, state1, model_selector0, model_selector1, request: gr.Request
301
+ ):
302
+ vgm_logger.info(f"rightvote (named). ip: {get_ip(request)}")
303
+ vote_last_response_vgm(
304
+ [state0, state1], "rightvote", [model_selector0, model_selector1], request
305
+ )
306
+ return ("",) + (disable_btn,) * 4 + (gr.Markdown(f"### Model A: {state0.model_name}", visible=True), gr.Markdown(f"### Model B: {state1.model_name}", visible=True))
307
+
308
+
309
+ def tievote_last_response_vgm(
310
+ state0, state1, model_selector0, model_selector1, request: gr.Request
311
+ ):
312
+ vgm_logger.info(f"tievote (named). ip: {get_ip(request)}")
313
+ vote_last_response_vgm(
314
+ [state0, state1], "tievote", [model_selector0, model_selector1], request
315
+ )
316
+ return ("",) + (disable_btn,) * 4 + (gr.Markdown(f"### Model A: {state0.model_name}", visible=True), gr.Markdown(f"### Model B: {state1.model_name}", visible=True))
317
+
318
+
319
+ def bothbad_vote_last_response_vgm(
320
+ state0, state1, model_selector0, model_selector1, request: gr.Request
321
+ ):
322
+ vgm_logger.info(f"bothbad_vote (named). ip: {get_ip(request)}")
323
+ vote_last_response_vgm(
324
+ [state0, state1], "bothbad_vote", [model_selector0, model_selector1], request
325
+ )
326
+ return ("",) + (disable_btn,) * 4 + (gr.Markdown(f"### Model A: {state0.model_name}", visible=True), gr.Markdown(f"### Model B: {state1.model_name}", visible=True))
327
+
328
  share_js = """
329
  function (a, b, c, d) {
330
  const captureElement = document.querySelector('#share-region-named');
 
396
  }
397
  return base
398
 
399
+ class VideoStateVG:
400
+ def __init__(self, model_name):
401
+ self.conv_id = uuid.uuid4().hex
402
+ self.model_name = model_name
403
+ self.prompt = None
404
+ self.output = None
405
+
406
+ def dict(self):
407
+ base = {
408
+ "conv_id": self.conv_id,
409
+ "model_name": self.model_name,
410
+ "prompt": self.prompt
411
+ }
412
+ return base
413
+
414
 
415
 
416
 
 
782
  with open(output_file, 'w') as f:
783
  state.output.save(f, 'JPEG')
784
  save_image_file_on_log_server(src_img_file)
785
+ save_image_file_on_log_server(output_file)
786
+
787
+ def generate_vg(gen_func, state, text, model_name, request: gr.Request):
788
+ if not text:
789
+ raise gr.Warning("Prompt cannot be empty.")
790
+ if not model_name:
791
+ raise gr.Warning("Model name cannot be empty.")
792
+ if state is None:
793
+ state = VideoStateVG(model_name)
794
+ ip = get_ip(request)
795
+ vg_logger.info(f"generate. ip: {ip}")
796
+ start_tstamp = time.time()
797
+ generated_video = gen_func(text, model_name)
798
+ state.prompt = text
799
+ state.output = generated_video
800
+ state.model_name = model_name
801
+
802
+ yield state, generated_video
803
+
804
+ finish_tstamp = time.time()
805
+
806
+ with open(get_conv_log_filename(), "a") as fout:
807
+ data = {
808
+ "tstamp": round(finish_tstamp, 4),
809
+ "type": "chat",
810
+ "model": model_name,
811
+ "gen_params": {},
812
+ "start": round(start_tstamp, 4),
813
+ "finish": round(finish_tstamp, 4),
814
+ "state": state.dict(),
815
+ "ip": get_ip(request),
816
+ }
817
+ fout.write(json.dumps(data) + "\n")
818
+ append_json_item_on_log_server(data, get_conv_log_filename())
819
+
820
+ output_file = f'{VIDEO_DIR}/generation/{state.conv_id}.mp4'
821
+ os.makedirs(os.path.dirname(output_file), exist_ok=True)
822
+ r = requests.get(state.output)
823
+ with open(output_file, 'wb') as outfile:
824
+ outfile.write(r.content)
825
+ save_video_file_on_log_server(output_file)
826
+
827
+ def generate_vgm(gen_func, state0, state1, text, model_name0, model_name1, request: gr.Request):
828
+ if not text:
829
+ raise gr.Warning("Prompt cannot be empty.")
830
+ if not model_name0:
831
+ raise gr.Warning("Model name A cannot be empty.")
832
+ if not model_name1:
833
+ raise gr.Warning("Model name B cannot be empty.")
834
+ if state0 is None:
835
+ state0 = VideoStateVG(model_name0)
836
+ if state1 is None:
837
+ state1 = VideoStateVG(model_name1)
838
+ ip = get_ip(request)
839
+ igm_logger.info(f"generate. ip: {ip}")
840
+ start_tstamp = time.time()
841
+ # Remove ### Model (A|B): from model name
842
+ model_name0 = re.sub(r"### Model A: ", "", model_name0)
843
+ model_name1 = re.sub(r"### Model B: ", "", model_name1)
844
+ generated_video0, generated_video1 = gen_func(text, model_name0, model_name1)
845
+ state0.prompt = text
846
+ state1.prompt = text
847
+ state0.output = generated_video0
848
+ state1.output = generated_video1
849
+ state0.model_name = model_name0
850
+ state1.model_name = model_name1
851
+
852
+ yield state0, state1, generated_video0, generated_video1
853
+
854
+ finish_tstamp = time.time()
855
+ # logger.info(f"===output===: {output}")
856
+
857
+ with open(get_conv_log_filename(), "a") as fout:
858
+ data = {
859
+ "tstamp": round(finish_tstamp, 4),
860
+ "type": "chat",
861
+ "model": model_name0,
862
+ "gen_params": {},
863
+ "start": round(start_tstamp, 4),
864
+ "finish": round(finish_tstamp, 4),
865
+ "state": state0.dict(),
866
+ "ip": get_ip(request),
867
+ }
868
+ fout.write(json.dumps(data) + "\n")
869
+ append_json_item_on_log_server(data, get_conv_log_filename())
870
+ data = {
871
+ "tstamp": round(finish_tstamp, 4),
872
+ "type": "chat",
873
+ "model": model_name1,
874
+ "gen_params": {},
875
+ "start": round(start_tstamp, 4),
876
+ "finish": round(finish_tstamp, 4),
877
+ "state": state1.dict(),
878
+ "ip": get_ip(request),
879
+ }
880
+ fout.write(json.dumps(data) + "\n")
881
+ append_json_item_on_log_server(data, get_conv_log_filename())
882
+
883
+ for i, state in enumerate([state0, state1]):
884
+ output_file = f'{VIDEO_DIR}/generation/{state.conv_id}.mp4'
885
+ os.makedirs(os.path.dirname(output_file), exist_ok=True)
886
+ r = requests.get(state.output)
887
+ with open(output_file, 'wb') as outfile:
888
+ outfile.write(r.content)
889
+ save_video_file_on_log_server(output_file)
890
+
891
+
892
+ def generate_vgm_annoy(gen_func, state0, state1, text, model_name0, model_name1, request: gr.Request):
893
+ if not text:
894
+ raise gr.Warning("Prompt cannot be empty.")
895
+ if state0 is None:
896
+ state0 = VideoStateVG(model_name0)
897
+ if state1 is None:
898
+ state1 = VideoStateVG(model_name1)
899
+ ip = get_ip(request)
900
+ vgm_logger.info(f"generate. ip: {ip}")
901
+ start_tstamp = time.time()
902
+ model_name0 = re.sub(r"### Model A: ", "", model_name0)
903
+ model_name1 = re.sub(r"### Model B: ", "", model_name1)
904
+ generated_video0, generated_video1, model_name0, model_name1 = gen_func(text, model_name0, model_name1)
905
+ state0.prompt = text
906
+ state1.prompt = text
907
+ state0.output = generated_video0
908
+ state1.output = generated_video1
909
+ state0.model_name = model_name0
910
+ state1.model_name = model_name1
911
+
912
+ yield state0, state1, generated_video0, generated_video1, \
913
+ gr.Markdown(f"### Model A: {model_name0}"), gr.Markdown(f"### Model B: {model_name1}")
914
+
915
+ finish_tstamp = time.time()
916
+ # logger.info(f"===output===: {output}")
917
+
918
+ with open(get_conv_log_filename(), "a") as fout:
919
+ data = {
920
+ "tstamp": round(finish_tstamp, 4),
921
+ "type": "chat",
922
+ "model": model_name0,
923
+ "gen_params": {},
924
+ "start": round(start_tstamp, 4),
925
+ "finish": round(finish_tstamp, 4),
926
+ "state": state0.dict(),
927
+ "ip": get_ip(request),
928
+ }
929
+ fout.write(json.dumps(data) + "\n")
930
+ append_json_item_on_log_server(data, get_conv_log_filename())
931
+ data = {
932
+ "tstamp": round(finish_tstamp, 4),
933
+ "type": "chat",
934
+ "model": model_name1,
935
+ "gen_params": {},
936
+ "start": round(start_tstamp, 4),
937
+ "finish": round(finish_tstamp, 4),
938
+ "state": state1.dict(),
939
+ "ip": get_ip(request),
940
+ }
941
+ fout.write(json.dumps(data) + "\n")
942
+ append_json_item_on_log_server(data, get_conv_log_filename())
943
+
944
+ for i, state in enumerate([state0, state1]):
945
+ output_file = f'{VIDEO_DIR}/generation/{state.conv_id}.mp4'
946
+ os.makedirs(os.path.dirname(output_file), exist_ok=True)
947
+ r = requests.get(state.output)
948
+ with open(output_file, 'wb') as outfile:
949
+ outfile.write(r.content)
950
+ save_video_file_on_log_server(output_file)
serve_local.sh DELETED
@@ -1,26 +0,0 @@
1
- LOG_SERVER_PORT=9929
2
- export LOG_SERVER="http://0.0.0.0:${LOG_SERVER_PORT}"
3
- export LOG_SERVER_SUBDOMAIN=""
4
- export LOGDIR="tmp_log/"
5
- export SERVER_PORT=10201 # gradio server port
6
-
7
- # start log server
8
- uvicorn serve.log_server:app --reload --port ${LOG_SERVER_PORT} --host 0.0.0.0 &
9
- PID1=$!
10
- # start gradio server
11
- python app.py &
12
- PID2=$!
13
-
14
- # Function to kill the background jobs if this script is terminated
15
- cleanup() {
16
- echo "Cleaning up..."
17
- kill $PID1 $PID2
18
- exit 0
19
- }
20
-
21
- # Trap SIGINT (Ctrl+C) and SIGTERM signals and call cleanup function
22
- trap cleanup SIGINT SIGTERM
23
-
24
- # Wait for both background processes to finish
25
- wait $PID1
26
- wait $PID2