JeffLiang commited on
Commit
0e0710e
1 Parent(s): 3b51872

update more examples

Browse files
app.py CHANGED
@@ -53,12 +53,12 @@ def inference(class_names, proposal_gen, granularity, input_img):
53
  return Image.fromarray(np.uint8(visualized_output.get_image())).convert('RGB')
54
 
55
 
56
- examples = [['Saturn V, toys, desk, sunflowers, white roses, chrysanthemums, carnations, green dianthus', 'Segment_Anything', 0.8, './resources/demo_samples/sample_01.jpeg'],
57
- ['red bench, yellow bench, blue bench, brown bench, green bench, blue chair, yellow chair, green chair', 'Segment_Anything', 0.8, './resources/demo_samples/sample_04.png'],
58
- # ['Saturn V, toys, blossom', 'MaskFormer', 1.0, './resources/demo_samples/sample_01.jpeg'],
59
- # ['Oculus, Ukulele', 'MaskFormer', 1.0, './resources/demo_samples/sample_03.jpeg'],
60
- # ['Golden gate, yacht', 'MaskFormer', 1.0, './resources/demo_samples/sample_02.jpeg'],
61
- ]
62
  output_labels = ['segmentation map']
63
 
64
  title = 'OVSeg (+ Segment_Anything)'
@@ -83,11 +83,11 @@ Open-Vocabulary Semantic Segmentation with Mask-adapted CLIP
83
  gr.Interface(
84
  inference,
85
  inputs=[
86
- gr.inputs.Textbox(
87
  lines=1, placeholder=None, default='', label='class names'),
88
- gr.inputs.Radio(["Segment_Anything", "MaskFormer"], label="Proposal generator", default="Segment_Anything"),
89
- gr.inputs.Slider(0, 1.0, 0.8, label="For Segment_Anything, Granularity of masks from 0 (most coarse) to 1 (most precise)"),
90
- gr.inputs.Image(type='filepath'),
91
  ],
92
  outputs=gr.outputs.Image(label='segmentation map'),
93
  title=title,
53
  return Image.fromarray(np.uint8(visualized_output.get_image())).convert('RGB')
54
 
55
 
56
+ examples = [['Saturn V, toys, desk, wall, sunflowers, white roses, chrysanthemums, carnations, green dianthus', 'Segment_Anything', 0.8, './resources/demo_samples/sample_01.jpeg'],
57
+ ['red bench, yellow bench, blue bench, brown bench, green bench, blue chair, yellow chair, green chair, brown chair, yellow square painting, barrel, buddha statue', 'Segment_Anything', 0.8, './resources/demo_samples/sample_04.png'],
58
+ ['pillow, pipe, sweater, shirt, jeans jacket, shoes, cabinet, handbag, photo frame', 'Segment_Anything', 0.8, './resources/demo_samples/sample_05.png'],
59
+ ['Saturn V, toys, blossom', 'MaskFormer', 1.0, './resources/demo_samples/sample_01.jpeg'],
60
+ ['Oculus, Ukulele', 'MaskFormer', 1.0, './resources/demo_samples/sample_03.jpeg'],
61
+ ['Golden gate, yacht', 'MaskFormer', 1.0, './resources/demo_samples/sample_02.jpeg'],]
62
  output_labels = ['segmentation map']
63
 
64
  title = 'OVSeg (+ Segment_Anything)'
83
  gr.Interface(
84
  inference,
85
  inputs=[
86
+ gr.Textbox(
87
  lines=1, placeholder=None, default='', label='class names'),
88
+ gr.Radio(["Segment_Anything", "MaskFormer"], label="Proposal generator", default="Segment_Anything"),
89
+ gr.Slider(0, 1.0, 0.8, label="For Segment_Anything only, granularity of masks from 0 (most coarse) to 1 (most precise)"),
90
+ gr.Image(type='filepath'),
91
  ],
92
  outputs=gr.outputs.Image(label='segmentation map'),
93
  title=title,
open_vocab_seg/modeling/clip_adapter/utils.py CHANGED
@@ -62,8 +62,8 @@ def crop_with_mask(
62
  new_image = torch.cat(
63
  [image.new_full((1, b - t, r - l), fill_value=val) for val in fill]
64
  )
65
- mask = mask.bool()
66
- return image[:, t:b, l:r] * mask[None, t:b, l:r] + (~ mask[None, t:b, l:r]) * new_image, mask[None, t:b, l:r]
67
 
68
 
69
  def build_clip_model(model: str, mask_prompt_depth: int = 0, frozen: bool = True):
62
  new_image = torch.cat(
63
  [image.new_full((1, b - t, r - l), fill_value=val) for val in fill]
64
  )
65
+ mask_bool = mask.bool()
66
+ return image[:, t:b, l:r] * mask[None, t:b, l:r] + (~ mask_bool[None, t:b, l:r]) * new_image, mask[None, t:b, l:r]
67
 
68
 
69
  def build_clip_model(model: str, mask_prompt_depth: int = 0, frozen: bool = True):
open_vocab_seg/utils/predictor.py CHANGED
@@ -173,7 +173,7 @@ class SAMVisualizationDemo(object):
173
  for bbox, mask in zip(bboxes, pred_masks):
174
  region, _ = crop_with_mask(
175
  image,
176
- mask.bool(),
177
  bbox,
178
  fill=mask_fill,
179
  )
173
  for bbox, mask in zip(bboxes, pred_masks):
174
  region, _ = crop_with_mask(
175
  image,
176
+ mask,
177
  bbox,
178
  fill=mask_fill,
179
  )
resources/demo_samples/sample_05.png ADDED

Git LFS Details

  • SHA256: 1331dfcef69066c225d34c659f756a92ce3dc71965978db67814eda36b1cdc5f
  • Pointer size: 132 Bytes
  • Size of remote file: 2.65 MB