Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -28,7 +28,7 @@ from transformers import CLIPVisionModelWithProjection, CLIPImageProcessor
|
|
28 |
from transformers.image_transforms import convert_to_rgb
|
29 |
import spaces
|
30 |
|
31 |
-
@spaces.GPU
|
32 |
def auto_inpainting(video_input, masked_video, mask, prompt, image, vae, text_encoder, image_encoder, diffusion, model, device, cfg_scale, img_cfg_scale, negative_prompt=""):
|
33 |
global use_fp16
|
34 |
image_prompt_embeds = None
|
@@ -83,7 +83,7 @@ def auto_inpainting(video_input, masked_video, mask, prompt, image, vae, text_en
|
|
83 |
video_clip = vae.decode(video_clip / 0.18215).sample # [16, 3, 256, 256]
|
84 |
return video_clip
|
85 |
|
86 |
-
@spaces.GPU
|
87 |
def auto_inpainting_temp_split(video_input, masked_video, mask, prompt, image, vae, text_encoder, image_encoder, diffusion, model, device, scfg_scale, tcfg_scale, img_cfg_scale, negative_prompt=""):
|
88 |
global use_fp16
|
89 |
image_prompt_embeds = None
|
@@ -213,7 +213,7 @@ init_model()
|
|
213 |
# ========================================
|
214 |
# Video Generation
|
215 |
# ========================================
|
216 |
-
@spaces.GPU
|
217 |
def video_generation(text, image, scfg_scale, tcfg_scale, img_cfg_scale, diffusion):
|
218 |
with torch.no_grad():
|
219 |
print("begin generation", flush=True)
|
@@ -243,7 +243,7 @@ def video_generation(text, image, scfg_scale, tcfg_scale, img_cfg_scale, diffusi
|
|
243 |
# ========================================
|
244 |
# Video Prediction
|
245 |
# ========================================
|
246 |
-
@spaces.GPU
|
247 |
def video_prediction(text, image, scfg_scale, tcfg_scale, img_cfg_scale, preframe, diffusion):
|
248 |
with torch.no_grad():
|
249 |
print("begin generation", flush=True)
|
@@ -280,7 +280,7 @@ def video_prediction(text, image, scfg_scale, tcfg_scale, img_cfg_scale, prefram
|
|
280 |
# ========================================
|
281 |
# Judge Generation or Prediction
|
282 |
# ========================================
|
283 |
-
@spaces.GPU
|
284 |
def gen_or_pre(text_input, image_input, scfg_scale, tcfg_scale, img_cfg_scale, preframe_input, diffusion_step):
|
285 |
default_step = [25, 40, 50, 100, 125, 200, 250]
|
286 |
difference = [abs(item - diffusion_step) for item in default_step]
|
|
|
28 |
from transformers.image_transforms import convert_to_rgb
|
29 |
import spaces
|
30 |
|
31 |
+
# @spaces.GPU
|
32 |
def auto_inpainting(video_input, masked_video, mask, prompt, image, vae, text_encoder, image_encoder, diffusion, model, device, cfg_scale, img_cfg_scale, negative_prompt=""):
|
33 |
global use_fp16
|
34 |
image_prompt_embeds = None
|
|
|
83 |
video_clip = vae.decode(video_clip / 0.18215).sample # [16, 3, 256, 256]
|
84 |
return video_clip
|
85 |
|
86 |
+
# @spaces.GPU
|
87 |
def auto_inpainting_temp_split(video_input, masked_video, mask, prompt, image, vae, text_encoder, image_encoder, diffusion, model, device, scfg_scale, tcfg_scale, img_cfg_scale, negative_prompt=""):
|
88 |
global use_fp16
|
89 |
image_prompt_embeds = None
|
|
|
213 |
# ========================================
|
214 |
# Video Generation
|
215 |
# ========================================
|
216 |
+
# @spaces.GPU
|
217 |
def video_generation(text, image, scfg_scale, tcfg_scale, img_cfg_scale, diffusion):
|
218 |
with torch.no_grad():
|
219 |
print("begin generation", flush=True)
|
|
|
243 |
# ========================================
|
244 |
# Video Prediction
|
245 |
# ========================================
|
246 |
+
# @spaces.GPU
|
247 |
def video_prediction(text, image, scfg_scale, tcfg_scale, img_cfg_scale, preframe, diffusion):
|
248 |
with torch.no_grad():
|
249 |
print("begin generation", flush=True)
|
|
|
280 |
# ========================================
|
281 |
# Judge Generation or Prediction
|
282 |
# ========================================
|
283 |
+
# @spaces.GPU
|
284 |
def gen_or_pre(text_input, image_input, scfg_scale, tcfg_scale, img_cfg_scale, preframe_input, diffusion_step):
|
285 |
default_step = [25, 40, 50, 100, 125, 200, 250]
|
286 |
difference = [abs(item - diffusion_step) for item in default_step]
|