wondervictor commited on
Commit
11600bb
·
verified ·
1 Parent(s): 773be7e

Update mask_adapter/sam_maskadapter.py

Browse files
Files changed (1) hide show
  1. mask_adapter/sam_maskadapter.py +13 -13
mask_adapter/sam_maskadapter.py CHANGED
@@ -79,10 +79,10 @@ class SAMVisualizationDemo(object):
79
  stability_score_thresh=0.7,
80
  crop_n_layers=0,
81
  crop_n_points_downscale_factor=2,
82
- min_mask_region_area=100)
83
 
84
- self.clip_model = clip_model
85
- self.mask_adapter = mask_adapter
86
 
87
 
88
 
@@ -147,17 +147,17 @@ class SAMVisualizationDemo(object):
147
 
148
 
149
  with torch.no_grad():
150
- self.clip_model.cuda()
151
- text_features = self.clip_model.encode_text(text.cuda())
152
  text_features /= text_features.norm(dim=-1, keepdim=True)
153
 
154
- features = self.extract_features_convnext(image.cuda().float())
155
 
156
  clip_feature = features['clip_vis_dense']
157
 
158
  clip_vis_dense = self.visual_prediction_forward_convnext_2d(clip_feature)
159
 
160
- semantic_activation_maps = self.mask_adapter(clip_vis_dense, pred_masks.tensor.unsqueeze(0).float().cuda())
161
 
162
  maps_for_pooling = F.interpolate(semantic_activation_maps, size=clip_feature.shape[-2:],
163
  mode='bilinear', align_corners=False)
@@ -220,11 +220,11 @@ class SAMPointVisualizationDemo(object):
220
 
221
  self.sam2 = sam2
222
 
223
- self.predictor = SAM2ImagePredictor(sam2)
224
 
225
- self.clip_model = clip_model
226
 
227
- self.mask_adapter = mask_adapter
228
 
229
 
230
  from .data.datasets import openseg_classes
@@ -308,17 +308,17 @@ class SAMPointVisualizationDemo(object):
308
  # text = open_clip.tokenize(txts)
309
 
310
  with torch.no_grad():
311
- self.clip_model.cuda()
312
  # text_features = self.clip_model.encode_text(text.cuda())
313
  # text_features /= text_features.norm(dim=-1, keepdim=True)
314
  #np.save("/home/yongkangli/Mask-Adapter/text_embedding/lvis_coco_text_embedding.npy", text_features.cpu().numpy())
315
  text_features = self.text_embedding
316
- features = self.extract_features_convnext(image.cuda().float())
317
  clip_feature = features['clip_vis_dense']
318
 
319
  clip_vis_dense = self.visual_prediction_forward_convnext_2d(clip_feature)
320
 
321
- semantic_activation_maps = self.mask_adapter(clip_vis_dense, pred_masks.tensor.unsqueeze(0).float().cuda())
322
  maps_for_pooling = F.interpolate(semantic_activation_maps, size=clip_feature.shape[-2:], mode='bilinear', align_corners=False)
323
 
324
  B, C = clip_feature.size(0), clip_feature.size(1)
 
79
  stability_score_thresh=0.7,
80
  crop_n_layers=0,
81
  crop_n_points_downscale_factor=2,
82
+ min_mask_region_area=100).to(self.cpu_device)
83
 
84
+ self.clip_model = clip_model.to(self.cpu_device)
85
+ self.mask_adapter = mask_adapter.to(self.cpu_device)
86
 
87
 
88
 
 
147
 
148
 
149
  with torch.no_grad():
150
+ self.clip_model.to(self.cpu_device)
151
+ text_features = self.clip_model.encode_text(text.to(self.cpu_device))
152
  text_features /= text_features.norm(dim=-1, keepdim=True)
153
 
154
+ features = self.extract_features_convnext(image.to(self.cpu_device).float())
155
 
156
  clip_feature = features['clip_vis_dense']
157
 
158
  clip_vis_dense = self.visual_prediction_forward_convnext_2d(clip_feature)
159
 
160
+ semantic_activation_maps = self.mask_adapter(clip_vis_dense, pred_masks.tensor.unsqueeze(0).float().to(self.cpu_device))
161
 
162
  maps_for_pooling = F.interpolate(semantic_activation_maps, size=clip_feature.shape[-2:],
163
  mode='bilinear', align_corners=False)
 
220
 
221
  self.sam2 = sam2
222
 
223
+ self.predictor = SAM2ImagePredictor(sam2).to(self.cpu_device)
224
 
225
+ self.clip_model = clip_model.to(self.cpu_device)
226
 
227
+ self.mask_adapter = mask_adapter.to(self.cpu_device)
228
 
229
 
230
  from .data.datasets import openseg_classes
 
308
  # text = open_clip.tokenize(txts)
309
 
310
  with torch.no_grad():
311
+ self.clip_model.to(self.cpu_device)
312
  # text_features = self.clip_model.encode_text(text.cuda())
313
  # text_features /= text_features.norm(dim=-1, keepdim=True)
314
  #np.save("/home/yongkangli/Mask-Adapter/text_embedding/lvis_coco_text_embedding.npy", text_features.cpu().numpy())
315
  text_features = self.text_embedding
316
+ features = self.extract_features_convnext(image.cpu().float())
317
  clip_feature = features['clip_vis_dense']
318
 
319
  clip_vis_dense = self.visual_prediction_forward_convnext_2d(clip_feature)
320
 
321
+ semantic_activation_maps = self.mask_adapter(clip_vis_dense, pred_masks.tensor.unsqueeze(0).float().cpu())
322
  maps_for_pooling = F.interpolate(semantic_activation_maps, size=clip_feature.shape[-2:], mode='bilinear', align_corners=False)
323
 
324
  B, C = clip_feature.size(0), clip_feature.size(1)