Spaces:
Running
on
Zero
Running
on
Zero
Max Ku
commited on
Commit
•
e548ada
1
Parent(s):
06726d0
renaming CogVideoX
Browse files
model/fetch_museum_results/__init__.py
CHANGED
@@ -3,7 +3,7 @@ from .imagen_museum import fetch_indexes, fetch_indexes_no_csv
|
|
3 |
import random
|
4 |
|
5 |
ARENA_TO_IG_MUSEUM = {"LCM(v1.5/XL)":"LCM", "PlayGroundV2.5": "PlayGroundV2_5", "FLUX.1-schnell": "FLUX1schnell", "FLUX.1-dev": "FLUX1dev"}
|
6 |
-
ARENA_TO_VG_MUSEUM = {"StableVideoDiffusion": "FastSVD"}
|
7 |
|
8 |
def draw2_from_imagen_museum(task, model_name1, model_name2):
|
9 |
task_name = TASK_DICT[task]
|
|
|
3 |
import random
|
4 |
|
5 |
ARENA_TO_IG_MUSEUM = {"LCM(v1.5/XL)":"LCM", "PlayGroundV2.5": "PlayGroundV2_5", "FLUX.1-schnell": "FLUX1schnell", "FLUX.1-dev": "FLUX1dev"}
|
6 |
+
ARENA_TO_VG_MUSEUM = {"StableVideoDiffusion": "FastSVD", "CogVideoX-2B": "CogVideoX"}
|
7 |
|
8 |
def draw2_from_imagen_museum(task, model_name1, model_name2):
|
9 |
task_name = TASK_DICT[task]
|
model/model_registry.py
CHANGED
@@ -410,9 +410,19 @@ register_model_info(
|
|
410 |
"text2video_generation"
|
411 |
)
|
412 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
413 |
register_model_info(
|
414 |
["fal_CogVideoX_text2video"],
|
415 |
-
"CogVideoX",
|
416 |
"https://github.com/THUDM/CogVideo",
|
417 |
"Text-to-Video Diffusion Models with An Expert Transformer.",
|
418 |
"CogVideoX LICENSE",
|
|
|
410 |
"text2video_generation"
|
411 |
)
|
412 |
|
413 |
+
register_model_info(
|
414 |
+
["videogenhub_CogVideoX_generation"],
|
415 |
+
"CogVideoX-2B",
|
416 |
+
"https://github.com/THUDM/CogVideo",
|
417 |
+
"Text-to-Video Diffusion Models with An Expert Transformer.",
|
418 |
+
"CogVideoX LICENSE",
|
419 |
+
"THUDM",
|
420 |
+
"text2video_generation"
|
421 |
+
)
|
422 |
+
|
423 |
register_model_info(
|
424 |
["fal_CogVideoX_text2video"],
|
425 |
+
"CogVideoX-5B",
|
426 |
"https://github.com/THUDM/CogVideo",
|
427 |
"Text-to-Video Diffusion Models with An Expert Transformer.",
|
428 |
"CogVideoX LICENSE",
|
model/models/__init__.py
CHANGED
@@ -21,13 +21,16 @@ VIDEO_GENERATION_MODELS = ['fal_AnimateDiff_text2video',
|
|
21 |
#'videogenhub_LaVie_generation',
|
22 |
'videogenhub_VideoCrafter2_generation',
|
23 |
#'videogenhub_ModelScope_generation',
|
24 |
-
|
25 |
'videogenhub_OpenSora12_generation',
|
26 |
#'videogenhub_OpenSora_generation',
|
27 |
#'videogenhub_T2VTurbo_generation',
|
28 |
'fal_T2VTurbo_text2video',
|
29 |
'fal_StableVideoDiffusion_text2video',
|
30 |
-
'
|
|
|
|
|
|
|
31 |
MUSEUM_UNSUPPORTED_MODELS = ['videogenhub_OpenSoraPlan_generation']
|
32 |
DESIRED_APPEAR_MODEL = ['videogenhub_T2VTurbo_generation','fal_StableVideoDiffusion_text2video']
|
33 |
|
@@ -56,6 +59,8 @@ def load_pipeline(model_name):
|
|
56 |
elif model_source == "fal":
|
57 |
pipe = load_fal_model(model_name, model_type)
|
58 |
elif model_source == "videogenhub":
|
|
|
|
|
59 |
pipe = load_videogenhub_model(model_name)
|
60 |
else:
|
61 |
raise ValueError(f"Model source {model_source} not supported")
|
|
|
21 |
#'videogenhub_LaVie_generation',
|
22 |
'videogenhub_VideoCrafter2_generation',
|
23 |
#'videogenhub_ModelScope_generation',
|
24 |
+
'videogenhub_CogVideoX-2B_generation',
|
25 |
'videogenhub_OpenSora12_generation',
|
26 |
#'videogenhub_OpenSora_generation',
|
27 |
#'videogenhub_T2VTurbo_generation',
|
28 |
'fal_T2VTurbo_text2video',
|
29 |
'fal_StableVideoDiffusion_text2video',
|
30 |
+
'fal_CogVideoX-5B_text2video']
|
31 |
+
|
32 |
+
MAP_NAMES_VIDEOGENHUB = {"CogVideoX-2B": "CogVideoX"}
|
33 |
+
|
34 |
MUSEUM_UNSUPPORTED_MODELS = ['videogenhub_OpenSoraPlan_generation']
|
35 |
DESIRED_APPEAR_MODEL = ['videogenhub_T2VTurbo_generation','fal_StableVideoDiffusion_text2video']
|
36 |
|
|
|
59 |
elif model_source == "fal":
|
60 |
pipe = load_fal_model(model_name, model_type)
|
61 |
elif model_source == "videogenhub":
|
62 |
+
if model_name in MAP_NAMES_VIDEOGENHUB.keys():
|
63 |
+
model_name = MAP_NAMES_VIDEOGENHUB[model_name]
|
64 |
pipe = load_videogenhub_model(model_name)
|
65 |
else:
|
66 |
raise ValueError(f"Model source {model_source} not supported")
|
model/models/fal_api_models.py
CHANGED
@@ -62,7 +62,7 @@ class FalModel():
|
|
62 |
fal_model_name = 'fast-animatediff/turbo/text-to-video'
|
63 |
elif self.model_name == 'StableVideoDiffusion':
|
64 |
fal_model_name = 'fast-svd/text-to-video'
|
65 |
-
elif self.model_name == 'CogVideoX':
|
66 |
fal_model_name = 'cogvideox-5b'
|
67 |
else:
|
68 |
raise NotImplementedError(f"text2video model of {self.model_name} in fal is not implemented yet")
|
|
|
62 |
fal_model_name = 'fast-animatediff/turbo/text-to-video'
|
63 |
elif self.model_name == 'StableVideoDiffusion':
|
64 |
fal_model_name = 'fast-svd/text-to-video'
|
65 |
+
elif self.model_name == 'CogVideoX-5B':
|
66 |
fal_model_name = 'cogvideox-5b'
|
67 |
else:
|
68 |
raise NotImplementedError(f"text2video model of {self.model_name} in fal is not implemented yet")
|