Spaces:
Running
on
Zero
Running
on
Zero
tianleliphoebe
commited on
Commit
•
18d0376
1
Parent(s):
513a020
fix rerun
Browse files- serve/constants.py +0 -12
- serve/vote_utils.py +7 -22
serve/constants.py
CHANGED
@@ -17,16 +17,4 @@ SAVE_IMAGE = "save_image"
|
|
17 |
SAVE_VIDEO = "save_video"
|
18 |
SAVE_LOG = "save_log"
|
19 |
|
20 |
-
IMAGE_GENERATION_MODELS = ['fal_LCM(v1.5/XL)_text2image','fal_SDXLTurbo_text2image','fal_SDXL_text2image', 'imagenhub_PixArtAlpha_generation', 'fal_PixArtSigma_text2image',
|
21 |
-
'imagenhub_OpenJourney_generation','fal_SDXLLightning_text2image', 'fal_StableCascade_text2image',
|
22 |
-
'playground_PlayGroundV2_generation', 'playground_PlayGroundV2.5_generation']
|
23 |
-
IMAGE_EDITION_MODELS = ['imagenhub_CycleDiffusion_edition', 'imagenhub_Pix2PixZero_edition', 'imagenhub_Prompt2prompt_edition',
|
24 |
-
'imagenhub_SDEdit_edition', 'imagenhub_InstructPix2Pix_edition',
|
25 |
-
'imagenhub_MagicBrush_edition', 'imagenhub_PNP_edition',
|
26 |
-
'imagenhub_InfEdit_edition', 'imagenhub_CosXLEdit_edition']
|
27 |
-
VIDEO_GENERATION_MODELS = ['fal_AnimateDiff_text2video',
|
28 |
-
'fal_AnimateDiffTurbo_text2video',
|
29 |
-
'videogenhub_LaVie_generation', 'videogenhub_VideoCrafter2_generation',
|
30 |
-
'videogenhub_ModelScope_generation', 'videogenhub_OpenSora_generation']
|
31 |
-
|
32 |
|
|
|
17 |
SAVE_VIDEO = "save_video"
|
18 |
SAVE_LOG = "save_log"
|
19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
|
serve/vote_utils.py
CHANGED
@@ -7,7 +7,7 @@ import regex as re
|
|
7 |
from pathlib import Path
|
8 |
from .utils import *
|
9 |
from .log_utils import build_logger
|
10 |
-
from .constants import IMAGE_DIR, VIDEO_DIR
|
11 |
import imageio
|
12 |
|
13 |
ig_logger = build_logger("gradio_web_server_image_generation", "gr_web_image_generation.log") # ig = image generation, loggers for single model direct chat
|
@@ -598,13 +598,8 @@ def generate_igm_annoy(gen_func, state0, state1, text, model_name0, model_name1,
|
|
598 |
ip = get_ip(request)
|
599 |
igm_logger.info(f"generate. ip: {ip}")
|
600 |
start_tstamp = time.time()
|
601 |
-
model_name0 =
|
602 |
-
model_name1 =
|
603 |
-
model_map = {model_name.split('_')[1]: model_name for model_name in IMAGE_GENERATION_MODELS}
|
604 |
-
if model_name0 in model_map:
|
605 |
-
model_name0 = model_map[model_name0]
|
606 |
-
if model_name1 in model_map:
|
607 |
-
model_name1 = model_map[model_name1]
|
608 |
generated_image0, generated_image1, model_name0, model_name1 = gen_func(text, model_name0, model_name1)
|
609 |
state0.prompt = text
|
610 |
state1.prompt = text
|
@@ -801,13 +796,8 @@ def generate_iem_annoy(gen_func, state0, state1, source_text, target_text, instr
|
|
801 |
ip = get_ip(request)
|
802 |
igm_logger.info(f"generate. ip: {ip}")
|
803 |
start_tstamp = time.time()
|
804 |
-
model_name0 =
|
805 |
-
model_name1 =
|
806 |
-
model_map = {model_name.split('_')[1]: model_name for model_name in IMAGE_EDITION_MODELS}
|
807 |
-
if model_name0 in model_map:
|
808 |
-
model_name0 = model_map[model_name0]
|
809 |
-
if model_name1 in model_map:
|
810 |
-
model_name1 = model_map[model_name1]
|
811 |
generated_image0, generated_image1, model_name0, model_name1 = gen_func(source_text, target_text, instruct_text, source_image, model_name0, model_name1)
|
812 |
state0.source_prompt = source_text
|
813 |
state0.target_prompt = target_text
|
@@ -1000,13 +990,8 @@ def generate_vgm_annoy(gen_func, state0, state1, text, model_name0, model_name1,
|
|
1000 |
ip = get_ip(request)
|
1001 |
vgm_logger.info(f"generate. ip: {ip}")
|
1002 |
start_tstamp = time.time()
|
1003 |
-
model_name0 =
|
1004 |
-
model_name1 =
|
1005 |
-
model_map = {model_name.split('_')[1]: model_name for model_name in VIDEO_GENERATION_MODELS}
|
1006 |
-
if model_name0 in model_map:
|
1007 |
-
model_name0 = model_map[model_name0]
|
1008 |
-
if model_name1 in model_map:
|
1009 |
-
model_name1 = model_map[model_name1]
|
1010 |
generated_video0, generated_video1, model_name0, model_name1 = gen_func(text, model_name0, model_name1)
|
1011 |
state0.prompt = text
|
1012 |
state1.prompt = text
|
|
|
7 |
from pathlib import Path
|
8 |
from .utils import *
|
9 |
from .log_utils import build_logger
|
10 |
+
from .constants import IMAGE_DIR, VIDEO_DIR
|
11 |
import imageio
|
12 |
|
13 |
ig_logger = build_logger("gradio_web_server_image_generation", "gr_web_image_generation.log") # ig = image generation, loggers for single model direct chat
|
|
|
598 |
ip = get_ip(request)
|
599 |
igm_logger.info(f"generate. ip: {ip}")
|
600 |
start_tstamp = time.time()
|
601 |
+
model_name0 = ""
|
602 |
+
model_name1 = ""
|
|
|
|
|
|
|
|
|
|
|
603 |
generated_image0, generated_image1, model_name0, model_name1 = gen_func(text, model_name0, model_name1)
|
604 |
state0.prompt = text
|
605 |
state1.prompt = text
|
|
|
796 |
ip = get_ip(request)
|
797 |
igm_logger.info(f"generate. ip: {ip}")
|
798 |
start_tstamp = time.time()
|
799 |
+
model_name0 = ""
|
800 |
+
model_name1 = ""
|
|
|
|
|
|
|
|
|
|
|
801 |
generated_image0, generated_image1, model_name0, model_name1 = gen_func(source_text, target_text, instruct_text, source_image, model_name0, model_name1)
|
802 |
state0.source_prompt = source_text
|
803 |
state0.target_prompt = target_text
|
|
|
990 |
ip = get_ip(request)
|
991 |
vgm_logger.info(f"generate. ip: {ip}")
|
992 |
start_tstamp = time.time()
|
993 |
+
model_name0 = ""
|
994 |
+
model_name1 = ""
|
|
|
|
|
|
|
|
|
|
|
995 |
generated_video0, generated_video1, model_name0, model_name1 = gen_func(text, model_name0, model_name1)
|
996 |
state0.prompt = text
|
997 |
state1.prompt = text
|