Update app.py
Browse files
app.py
CHANGED
@@ -2,20 +2,21 @@ from transformers import CLIPSegProcessor, CLIPSegForImageSegmentation
|
|
2 |
import gradio as gr
|
3 |
from PIL import Image
|
4 |
import torch
|
5 |
-
import matplotlib.pyplot as plt
|
6 |
-
import torch
|
7 |
import numpy as np
|
|
|
|
|
|
|
8 |
|
9 |
processor = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined")
|
10 |
model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined")
|
11 |
|
|
|
12 |
|
13 |
def process_image(image, prompt):
|
14 |
inputs = processor(
|
15 |
text=prompt, images=image, padding="max_length", return_tensors="pt"
|
16 |
)
|
17 |
|
18 |
-
# predict
|
19 |
with torch.no_grad():
|
20 |
outputs = model(**inputs)
|
21 |
preds = outputs.logits
|
@@ -27,44 +28,34 @@ def process_image(image, prompt):
|
|
27 |
mask = mask.resize(image.size)
|
28 |
mask = np.array(mask)[:, :, 0]
|
29 |
|
30 |
-
# normalize the mask
|
31 |
mask_min = mask.min()
|
32 |
mask_max = mask.max()
|
33 |
mask = (mask - mask_min) / (mask_max - mask_min)
|
34 |
return mask
|
35 |
|
36 |
-
|
37 |
-
def get_masks(prompts, img, threhsold):
|
38 |
prompts = prompts.split(",")
|
39 |
masks = []
|
40 |
for prompt in prompts:
|
41 |
mask = process_image(img, prompt)
|
42 |
-
mask = mask >
|
43 |
masks.append(mask)
|
44 |
return masks
|
45 |
|
46 |
-
|
47 |
-
def extract_image(pos_prompts, neg_prompts, img, threhsold):
|
48 |
positive_masks = get_masks(pos_prompts, img, 0.5)
|
49 |
negative_masks = get_masks(neg_prompts, img, 0.5)
|
50 |
|
51 |
-
# combine masks into one masks, logic OR
|
52 |
pos_mask = np.any(np.stack(positive_masks), axis=0)
|
53 |
neg_mask = np.any(np.stack(negative_masks), axis=0)
|
54 |
final_mask = pos_mask & ~neg_mask
|
55 |
|
56 |
-
# extract the final image
|
57 |
final_mask = Image.fromarray(final_mask.astype(np.uint8) * 255, "L")
|
58 |
output_image = Image.new("RGBA", img.size, (0, 0, 0, 0))
|
59 |
output_image.paste(img, mask=final_mask)
|
60 |
return output_image, final_mask
|
61 |
|
62 |
-
|
63 |
-
title = "Interactive demo: zero-shot image segmentation with CLIPSeg"
|
64 |
-
description = "Demo for using CLIPSeg, a CLIP-based model for zero- and one-shot image segmentation. To use it, simply upload an image and add a text to mask (identify in the image), or use one of the examples below and click 'submit'. Results will show up in a few seconds."
|
65 |
-
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2112.10003'>CLIPSeg: Image Segmentation Using Text and Image Prompts</a> | <a href='https://huggingface.co/docs/transformers/main/en/model_doc/clipseg'>HuggingFace docs</a></p>"
|
66 |
-
|
67 |
-
|
68 |
with gr.Blocks() as demo:
|
69 |
gr.Markdown("# CLIPSeg: Image Segmentation Using Text and Image Prompts")
|
70 |
gr.Markdown(article)
|
@@ -99,6 +90,19 @@ with gr.Blocks() as demo:
|
|
99 |
],
|
100 |
outputs=[output_image, output_mask],
|
101 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
102 |
|
|
|
|
|
103 |
|
104 |
-
|
|
|
|
2 |
import gradio as gr
|
3 |
from PIL import Image
|
4 |
import torch
|
|
|
|
|
5 |
import numpy as np
|
6 |
+
from flask import Flask, request, jsonify, send_file
|
7 |
+
from io import BytesIO
|
8 |
+
import threading
|
9 |
|
10 |
processor = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined")
|
11 |
model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined")
|
12 |
|
13 |
+
app = Flask(__name__)
|
14 |
|
15 |
def process_image(image, prompt):
|
16 |
inputs = processor(
|
17 |
text=prompt, images=image, padding="max_length", return_tensors="pt"
|
18 |
)
|
19 |
|
|
|
20 |
with torch.no_grad():
|
21 |
outputs = model(**inputs)
|
22 |
preds = outputs.logits
|
|
|
28 |
mask = mask.resize(image.size)
|
29 |
mask = np.array(mask)[:, :, 0]
|
30 |
|
|
|
31 |
mask_min = mask.min()
|
32 |
mask_max = mask.max()
|
33 |
mask = (mask - mask_min) / (mask_max - mask_min)
|
34 |
return mask
|
35 |
|
36 |
+
def get_masks(prompts, img, threshold):
|
|
|
37 |
prompts = prompts.split(",")
|
38 |
masks = []
|
39 |
for prompt in prompts:
|
40 |
mask = process_image(img, prompt)
|
41 |
+
mask = mask > threshold
|
42 |
masks.append(mask)
|
43 |
return masks
|
44 |
|
45 |
+
def extract_image(pos_prompts, neg_prompts, img, threshold):
|
|
|
46 |
positive_masks = get_masks(pos_prompts, img, 0.5)
|
47 |
negative_masks = get_masks(neg_prompts, img, 0.5)
|
48 |
|
|
|
49 |
pos_mask = np.any(np.stack(positive_masks), axis=0)
|
50 |
neg_mask = np.any(np.stack(negative_masks), axis=0)
|
51 |
final_mask = pos_mask & ~neg_mask
|
52 |
|
|
|
53 |
final_mask = Image.fromarray(final_mask.astype(np.uint8) * 255, "L")
|
54 |
output_image = Image.new("RGBA", img.size, (0, 0, 0, 0))
|
55 |
output_image.paste(img, mask=final_mask)
|
56 |
return output_image, final_mask
|
57 |
|
58 |
+
# Gradio UI
|
|
|
|
|
|
|
|
|
|
|
59 |
with gr.Blocks() as demo:
|
60 |
gr.Markdown("# CLIPSeg: Image Segmentation Using Text and Image Prompts")
|
61 |
gr.Markdown(article)
|
|
|
90 |
],
|
91 |
outputs=[output_image, output_mask],
|
92 |
)
|
93 |
+
def run_demo():
|
94 |
+
demo.launch()
|
95 |
+
|
96 |
+
def run_flask():
|
97 |
+
app.run(host='127.0.0.1', port=7860)
|
98 |
+
|
99 |
+
if __name__ == '__main__':
|
100 |
+
# Run Gradio UI and Flask in separate threads
|
101 |
+
gr_thread = threading.Thread(target=run_demo)
|
102 |
+
flask_thread = threading.Thread(target=run_flask)
|
103 |
|
104 |
+
gr_thread.start()
|
105 |
+
flask_thread.start()
|
106 |
|
107 |
+
gr_thread.join()
|
108 |
+
flask_thread.join()
|