GrayShine commited on
Commit
affaf7b
β€’
1 Parent(s): 24a5924

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -5
app.py CHANGED
@@ -26,8 +26,9 @@ import torchvision
26
  from PIL import Image
27
  from transformers import CLIPVisionModelWithProjection, CLIPImageProcessor
28
  from transformers.image_transforms import convert_to_rgb
 
29
 
30
- +@spaces.GPU
31
  def auto_inpainting(video_input, masked_video, mask, prompt, image, vae, text_encoder, image_encoder, diffusion, model, device, cfg_scale, img_cfg_scale, negative_prompt=""):
32
  global use_fp16
33
  image_prompt_embeds = None
@@ -82,7 +83,7 @@ def auto_inpainting(video_input, masked_video, mask, prompt, image, vae, text_en
82
  video_clip = vae.decode(video_clip / 0.18215).sample # [16, 3, 256, 256]
83
  return video_clip
84
 
85
- +@spaces.GPU
86
  def auto_inpainting_temp_split(video_input, masked_video, mask, prompt, image, vae, text_encoder, image_encoder, diffusion, model, device, scfg_scale, tcfg_scale, img_cfg_scale, negative_prompt=""):
87
  global use_fp16
88
  image_prompt_embeds = None
@@ -153,7 +154,7 @@ vae = None
153
  text_encoder = None
154
  image_encoder = None
155
  clip_image_processor = None
156
- +@spaces.GPU
157
  def init_model():
158
  global device
159
  global output_path
@@ -212,7 +213,7 @@ init_model()
212
  # ========================================
213
  # Video Generation
214
  # ========================================
215
- +@spaces.GPU
216
  def video_generation(text, image, scfg_scale, tcfg_scale, img_cfg_scale, diffusion):
217
  with torch.no_grad():
218
  print("begin generation", flush=True)
@@ -242,6 +243,7 @@ def video_generation(text, image, scfg_scale, tcfg_scale, img_cfg_scale, diffusi
242
  # ========================================
243
  # Video Prediction
244
  # ========================================
 
245
  def video_prediction(text, image, scfg_scale, tcfg_scale, img_cfg_scale, preframe, diffusion):
246
  with torch.no_grad():
247
  print("begin generation", flush=True)
@@ -278,7 +280,7 @@ def video_prediction(text, image, scfg_scale, tcfg_scale, img_cfg_scale, prefram
278
  # ========================================
279
  # Judge Generation or Prediction
280
  # ========================================
281
- +@spaces.GPU
282
  def gen_or_pre(text_input, image_input, scfg_scale, tcfg_scale, img_cfg_scale, preframe_input, diffusion_step):
283
  default_step = [25, 40, 50, 100, 125, 200, 250]
284
  difference = [abs(item - diffusion_step) for item in default_step]
 
26
  from PIL import Image
27
  from transformers import CLIPVisionModelWithProjection, CLIPImageProcessor
28
  from transformers.image_transforms import convert_to_rgb
29
+ import spaces
30
 
31
+ @spaces.GPU
32
  def auto_inpainting(video_input, masked_video, mask, prompt, image, vae, text_encoder, image_encoder, diffusion, model, device, cfg_scale, img_cfg_scale, negative_prompt=""):
33
  global use_fp16
34
  image_prompt_embeds = None
 
83
  video_clip = vae.decode(video_clip / 0.18215).sample # [16, 3, 256, 256]
84
  return video_clip
85
 
86
+ @spaces.GPU
87
  def auto_inpainting_temp_split(video_input, masked_video, mask, prompt, image, vae, text_encoder, image_encoder, diffusion, model, device, scfg_scale, tcfg_scale, img_cfg_scale, negative_prompt=""):
88
  global use_fp16
89
  image_prompt_embeds = None
 
154
  text_encoder = None
155
  image_encoder = None
156
  clip_image_processor = None
157
+ @spaces.GPU
158
  def init_model():
159
  global device
160
  global output_path
 
213
  # ========================================
214
  # Video Generation
215
  # ========================================
216
+ @spaces.GPU
217
  def video_generation(text, image, scfg_scale, tcfg_scale, img_cfg_scale, diffusion):
218
  with torch.no_grad():
219
  print("begin generation", flush=True)
 
243
  # ========================================
244
  # Video Prediction
245
  # ========================================
246
+ @spaces.GPU
247
  def video_prediction(text, image, scfg_scale, tcfg_scale, img_cfg_scale, preframe, diffusion):
248
  with torch.no_grad():
249
  print("begin generation", flush=True)
 
280
  # ========================================
281
  # Judge Generation or Prediction
282
  # ========================================
283
+ @spaces.GPU
284
  def gen_or_pre(text_input, image_input, scfg_scale, tcfg_scale, img_cfg_scale, preframe_input, diffusion_step):
285
  default_step = [25, 40, 50, 100, 125, 200, 250]
286
  difference = [abs(item - diffusion_step) for item in default_step]