shengqiangShi commited on
Commit
1e759df
1 Parent(s): 2d74d2f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -34
app.py CHANGED
@@ -3,46 +3,45 @@ import gradio as gr
3
  from PIL import Image
4
  import torch
5
  import matplotlib.pyplot as plt
6
- import cv2
7
 
8
  processor = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined")
9
  model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined")
10
 
11
  def process_image(image, prompt):
12
- inputs = processor(text=prompt, images=image, padding="max_length", return_tensors="pt")
13
-
14
- # predict
15
- with torch.no_grad():
16
- outputs = model(**inputs)
17
- preds = outputs.logits
18
-
19
- filename = f"mask.png"
20
- plt.imsave(filename, torch.sigmoid(preds))
21
-
22
- # # img2 = cv2.imread(filename)
23
- # # gray_image = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
 
 
 
 
 
 
 
 
24
 
25
- # # (thresh, bw_image) = cv2.threshold(gray_image, 100, 255, cv2.THRESH_BINARY)
26
-
27
- # # # fix color format
28
- # # cv2.cvtColor(bw_image, cv2.COLOR_BGR2RGB)
29
-
30
- # # return Image.fromarray(bw_image)
31
-
32
- return Image.open("mask.png").convert("RGB")
33
-
34
  title = "Interactive demo: zero-shot image segmentation with CLIPSeg"
35
- description = "Demo for using CLIPSeg, a CLIP-based model for zero- and one-shot image segmentation. To use it, simply upload an image and add a text to mask (identify in the image), or use one of the examples below and click 'submit'. Results will show up in a few seconds."
36
- article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2112.10003'>CLIPSeg: Image Segmentation Using Text and Image Prompts</a> | <a href='https://huggingface.co/docs/transformers/main/en/model_doc/clipseg'>HuggingFace docs</a></p>"
 
 
37
 
38
- examples = [["example_image.png", "wood"]]
39
-
40
  interface = gr.Interface(fn=process_image,
41
- inputs=[gr.Image(type="pil"), gr.Textbox(label="Please describe what you want to identify")],
42
- outputs=gr.Image(type="pil"),
43
- title=title,
44
- description=description,
45
- article=article,
46
- examples=examples)
47
-
48
- interface.launch(debug=True)
 
3
  from PIL import Image
4
  import torch
5
  import matplotlib.pyplot as plt
6
+ import numpy as np
7
 
8
  processor = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined")
9
  model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined")
10
 
11
  def process_image(image, prompt):
12
+ # Prepare inputs with the processor
13
+ inputs = processor(text=prompt, images=image, return_tensors="pt")
14
+
15
+ # Predict
16
+ with torch.no_grad():
17
+ outputs = model(**inputs)
18
+ preds = outputs.logits.squeeze() # Assuming the output logits is of shape [1, H, W]
19
+
20
+ # Apply sigmoid to convert logits to probabilities
21
+ preds = torch.sigmoid(preds)
22
+
23
+ # Convert to numpy array
24
+ mask = preds.numpy()
25
+
26
+ # Save the image correctly handling dimensions
27
+ filename = "mask.png"
28
+ plt.imsave(filename, mask, cmap='gray') # Use cmap='gray' for grayscale image saving
29
+
30
+ # Convert to PIL Image and return
31
+ return Image.open(filename).convert("RGB")
32
 
 
 
 
 
 
 
 
 
 
33
  title = "Interactive demo: zero-shot image segmentation with CLIPSeg"
34
+ description = "Demo for using CLIPSeg, a CLIP-based model for zero- and one-shot image segmentation."
35
+ article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2112.10003'>CLIPSeg: Image Segmentation Using Text and Image Prompts</a></p>"
36
+
37
+ examples = [["example_image.png", "a description of what to segment"]]
38
 
 
 
39
  interface = gr.Interface(fn=process_image,
40
+ inputs=[gr.Image(type="pil"), gr.Textbox(label="Please describe what you want to identify")],
41
+ outputs=gr.Image(type="pil"),
42
+ title=title,
43
+ description=description,
44
+ article=article,
45
+ examples=examples)
46
+
47
+ interface.launch(debug=True)