Update app.py
Browse files
app.py
CHANGED
@@ -44,7 +44,7 @@ def get_masks(prompts, img, threhsold):
|
|
44 |
return masks
|
45 |
|
46 |
|
47 |
-
def extract_image(img, pos_prompts, neg_prompts, threshold):
|
48 |
positive_masks = get_masks(pos_prompts, img, threshold)
|
49 |
negative_masks = get_masks(neg_prompts, img, threshold)
|
50 |
|
@@ -53,6 +53,12 @@ def extract_image(img, pos_prompts, neg_prompts, threshold):
|
|
53 |
neg_mask = np.any(np.stack(negative_masks), axis=0)
|
54 |
final_mask = pos_mask & ~neg_mask
|
55 |
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
# extract the final image
|
57 |
final_mask = Image.fromarray(final_mask.astype(np.uint8) * 255, "L")
|
58 |
inverse_mask = np.invert(final_mask)
|
@@ -62,6 +68,7 @@ def extract_image(img, pos_prompts, neg_prompts, threshold):
|
|
62 |
return output_image, final_mask, inverse_mask
|
63 |
|
64 |
|
|
|
65 |
title = "Interactive demo: zero-shot image segmentation with CLIPSeg"
|
66 |
description = "Demo for using CLIPSeg, a CLIP-based model for zero- and one-shot image segmentation. To use it, simply upload an image and add a text to mask (identify in the image), or use one of the examples below and click 'submit'. Results will show up in a few seconds."
|
67 |
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2112.10003'>CLIPSeg: Image Segmentation Using Text and Image Prompts</a> | <a href='https://huggingface.co/docs/transformers/main/en/model_doc/clipseg'>HuggingFace docs</a></p>"
|
|
|
44 |
return masks
|
45 |
|
46 |
|
47 |
+
def extract_image(img, pos_prompts, neg_prompts, threshold, blur_radius=5):
|
48 |
positive_masks = get_masks(pos_prompts, img, threshold)
|
49 |
negative_masks = get_masks(neg_prompts, img, threshold)
|
50 |
|
|
|
53 |
neg_mask = np.any(np.stack(negative_masks), axis=0)
|
54 |
final_mask = pos_mask & ~neg_mask
|
55 |
|
56 |
+
# apply Gaussian blur for feathering
|
57 |
+
final_mask_img = Image.fromarray((final_mask * 255).astype(np.uint8), "L")
|
58 |
+
final_mask_img = final_mask_img.filter(ImageFilter.GaussianBlur(radius=blur_radius))
|
59 |
+
final_mask = np.array(final_mask_img) / 255
|
60 |
+
final_mask = final_mask > threshold
|
61 |
+
|
62 |
# extract the final image
|
63 |
final_mask = Image.fromarray(final_mask.astype(np.uint8) * 255, "L")
|
64 |
inverse_mask = np.invert(final_mask)
|
|
|
68 |
return output_image, final_mask, inverse_mask
|
69 |
|
70 |
|
71 |
+
|
72 |
title = "Interactive demo: zero-shot image segmentation with CLIPSeg"
|
73 |
description = "Demo for using CLIPSeg, a CLIP-based model for zero- and one-shot image segmentation. To use it, simply upload an image and add a text to mask (identify in the image), or use one of the examples below and click 'submit'. Results will show up in a few seconds."
|
74 |
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2112.10003'>CLIPSeg: Image Segmentation Using Text and Image Prompts</a> | <a href='https://huggingface.co/docs/transformers/main/en/model_doc/clipseg'>HuggingFace docs</a></p>"
|