GrayShine commited on
Commit
24a5924
1 Parent(s): 7875496

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -2
app.py CHANGED
@@ -27,7 +27,7 @@ from PIL import Image
27
  from transformers import CLIPVisionModelWithProjection, CLIPImageProcessor
28
  from transformers.image_transforms import convert_to_rgb
29
 
30
-
31
  def auto_inpainting(video_input, masked_video, mask, prompt, image, vae, text_encoder, image_encoder, diffusion, model, device, cfg_scale, img_cfg_scale, negative_prompt=""):
32
  global use_fp16
33
  image_prompt_embeds = None
@@ -82,7 +82,7 @@ def auto_inpainting(video_input, masked_video, mask, prompt, image, vae, text_en
82
  video_clip = vae.decode(video_clip / 0.18215).sample # [16, 3, 256, 256]
83
  return video_clip
84
 
85
-
86
  def auto_inpainting_temp_split(video_input, masked_video, mask, prompt, image, vae, text_encoder, image_encoder, diffusion, model, device, scfg_scale, tcfg_scale, img_cfg_scale, negative_prompt=""):
87
  global use_fp16
88
  image_prompt_embeds = None
@@ -153,6 +153,7 @@ vae = None
153
  text_encoder = None
154
  image_encoder = None
155
  clip_image_processor = None
 
156
  def init_model():
157
  global device
158
  global output_path
@@ -211,6 +212,7 @@ init_model()
211
  # ========================================
212
  # Video Generation
213
  # ========================================
 
214
  def video_generation(text, image, scfg_scale, tcfg_scale, img_cfg_scale, diffusion):
215
  with torch.no_grad():
216
  print("begin generation", flush=True)
@@ -272,9 +274,11 @@ def video_prediction(text, image, scfg_scale, tcfg_scale, img_cfg_scale, prefram
272
  return video_path
273
 
274
 
 
275
  # ========================================
276
  # Judge Generation or Prediction
277
  # ========================================
 
278
  def gen_or_pre(text_input, image_input, scfg_scale, tcfg_scale, img_cfg_scale, preframe_input, diffusion_step):
279
  default_step = [25, 40, 50, 100, 125, 200, 250]
280
  difference = [abs(item - diffusion_step) for item in default_step]
 
27
  from transformers import CLIPVisionModelWithProjection, CLIPImageProcessor
28
  from transformers.image_transforms import convert_to_rgb
29
 
30
+ +@spaces.GPU
31
  def auto_inpainting(video_input, masked_video, mask, prompt, image, vae, text_encoder, image_encoder, diffusion, model, device, cfg_scale, img_cfg_scale, negative_prompt=""):
32
  global use_fp16
33
  image_prompt_embeds = None
 
82
  video_clip = vae.decode(video_clip / 0.18215).sample # [16, 3, 256, 256]
83
  return video_clip
84
 
85
+ +@spaces.GPU
86
  def auto_inpainting_temp_split(video_input, masked_video, mask, prompt, image, vae, text_encoder, image_encoder, diffusion, model, device, scfg_scale, tcfg_scale, img_cfg_scale, negative_prompt=""):
87
  global use_fp16
88
  image_prompt_embeds = None
 
153
  text_encoder = None
154
  image_encoder = None
155
  clip_image_processor = None
156
+ +@spaces.GPU
157
  def init_model():
158
  global device
159
  global output_path
 
212
  # ========================================
213
  # Video Generation
214
  # ========================================
215
+ +@spaces.GPU
216
  def video_generation(text, image, scfg_scale, tcfg_scale, img_cfg_scale, diffusion):
217
  with torch.no_grad():
218
  print("begin generation", flush=True)
 
274
  return video_path
275
 
276
 
277
+
278
  # ========================================
279
  # Judge Generation or Prediction
280
  # ========================================
281
+ +@spaces.GPU
282
  def gen_or_pre(text_input, image_input, scfg_scale, tcfg_scale, img_cfg_scale, preframe_input, diffusion_step):
283
  default_step = [25, 40, 50, 100, 125, 200, 250]
284
  difference = [abs(item - diffusion_step) for item in default_step]