ariG23498 commited on
Commit
71a7048
1 Parent(s): 5b94f14

adding spaces decorator to make it compatible with zeros

Browse files
Files changed (1) hide show
  1. app.py +5 -4
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import gradio as gr
2
  import torch
3
  from diffusers import AutoPipelineForInpainting
@@ -19,7 +20,7 @@ def delete_model(model):
19
  del model
20
  torch.cuda.empty_cache()
21
 
22
-
23
  def run_language_model(edit_prompt, device):
24
  language_model_id = "Qwen/Qwen1.5-0.5B-Chat"
25
  language_model = AutoModelForCausalLM.from_pretrained(
@@ -54,7 +55,7 @@ def run_language_model(edit_prompt, device):
54
  delete_model(language_model)
55
  return (to_replace, replace_with)
56
 
57
-
58
  def run_image_captioner(image, device):
59
  caption_model_id = "Salesforce/blip-image-captioning-base"
60
  caption_model = BlipForConditionalGeneration.from_pretrained(caption_model_id).to(
@@ -69,7 +70,7 @@ def run_image_captioner(image, device):
69
  delete_model(caption_model)
70
  return caption
71
 
72
-
73
  def run_segmentation(image, object_to_segment, device):
74
  # OWL-ViT for object detection
75
  owl_vit_model_id = "google/owlvit-base-patch32"
@@ -105,7 +106,7 @@ def run_segmentation(image, object_to_segment, device):
105
  delete_model(seg_model)
106
  return masks
107
 
108
-
109
  def run_inpainting(image, replaced_caption, masks, device):
110
  pipeline = AutoPipelineForInpainting.from_pretrained(
111
  "diffusers/stable-diffusion-xl-1.0-inpainting-0.1",
 
1
+ import spaces
2
  import gradio as gr
3
  import torch
4
  from diffusers import AutoPipelineForInpainting
 
20
  del model
21
  torch.cuda.empty_cache()
22
 
23
+ @spaces.GPU
24
  def run_language_model(edit_prompt, device):
25
  language_model_id = "Qwen/Qwen1.5-0.5B-Chat"
26
  language_model = AutoModelForCausalLM.from_pretrained(
 
55
  delete_model(language_model)
56
  return (to_replace, replace_with)
57
 
58
+ @spaces.GPU
59
  def run_image_captioner(image, device):
60
  caption_model_id = "Salesforce/blip-image-captioning-base"
61
  caption_model = BlipForConditionalGeneration.from_pretrained(caption_model_id).to(
 
70
  delete_model(caption_model)
71
  return caption
72
 
73
+ @spaces.GPU
74
  def run_segmentation(image, object_to_segment, device):
75
  # OWL-ViT for object detection
76
  owl_vit_model_id = "google/owlvit-base-patch32"
 
106
  delete_model(seg_model)
107
  return masks
108
 
109
+ @spaces.GPU
110
  def run_inpainting(image, replaced_caption, masks, device):
111
  pipeline = AutoPipelineForInpainting.from_pretrained(
112
  "diffusers/stable-diffusion-xl-1.0-inpainting-0.1",