dhanushreddy29 commited on
Commit
c79ebbe
1 Parent(s): afbc44e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -6
app.py CHANGED
@@ -7,10 +7,17 @@ import torch.nn.functional as F
7
  from huggingface_hub import hf_hub_download
8
  from torch.autograd import Variable
9
  from PIL import Image
 
10
 
 
 
 
 
 
11
 
12
  def removeBackground(image):
13
  image = Image.fromarray(image).convert("RGB")
 
14
  if not os.path.exists("saved_models"):
15
  os.mkdir("saved_models")
16
  os.mkdir("git")
@@ -139,14 +146,10 @@ def removeBackground(image):
139
  cropped = im_rgb.copy()
140
  cropped.putalpha(mask)
141
 
142
- return cropped
143
-
144
- def remove_background(image):
145
- return removeBackground(image)
146
-
147
 
148
  inputs = gr.inputs.Image()
149
- outputs = gr.outputs.Image(type="pil")
150
  interface = gr.Interface(
151
  fn=remove_background,
152
  inputs=inputs,
 
7
  from huggingface_hub import hf_hub_download
8
  from torch.autograd import Variable
9
  from PIL import Image
10
+ from transformers import pipeline
11
 
12
+ captioner = pipeline(
13
+ "image-to-text",
14
+ model="Salesforce/blip-image-captioning-base",
15
+ prompt="The main subject of this picture is a"
16
+ )
17
 
18
  def removeBackground(image):
19
  image = Image.fromarray(image).convert("RGB")
20
+ caption = captioner(image, max_new_tokens=20)[0]["generated_text"].lower().replace("The main subject of this picture is a".lower(), "").strip()
21
  if not os.path.exists("saved_models"):
22
  os.mkdir("saved_models")
23
  os.mkdir("git")
 
146
  cropped = im_rgb.copy()
147
  cropped.putalpha(mask)
148
 
149
+ return cropped, caption
 
 
 
 
150
 
151
  inputs = gr.inputs.Image()
152
+ outputs = [gr.outputs.Image(type="pil"), "text"]
153
  interface = gr.Interface(
154
  fn=remove_background,
155
  inputs=inputs,