wjf5203 commited on
Commit
8468984
1 Parent(s): b55dd40

add video func support

Browse files
Files changed (1) hide show
  1. GLEE/glee/models/glee_model.py +119 -3
GLEE/glee/models/glee_model.py CHANGED
@@ -170,8 +170,6 @@ class GLEE_Model(nn.Module):
170
  features = self.backbone(images.tensor)
171
 
172
 
173
-
174
-
175
  if 'spatial' in prompts:
176
  ## setp 1,2,3
177
  key_images = [ images ] #bz*[1,3,H,W]
@@ -240,12 +238,130 @@ class GLEE_Model(nn.Module):
240
  extra['visual_prompt_tokens'] = src_spatial_queries #[len,bz,C]
241
  extra['visual_prompt_nonzero_mask'] = src_spatial_maskings # [bz,len]
242
 
243
-
244
  outputs = self.predictor(multi_scale_features, mask_features, extra=extra, task=task, masks=None, targets=targets)
245
  return outputs
246
 
247
 
248
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
249
 
250
 
251
 
 
170
  features = self.backbone(images.tensor)
171
 
172
 
 
 
173
  if 'spatial' in prompts:
174
  ## setp 1,2,3
175
  key_images = [ images ] #bz*[1,3,H,W]
 
238
  extra['visual_prompt_tokens'] = src_spatial_queries #[len,bz,C]
239
  extra['visual_prompt_nonzero_mask'] = src_spatial_maskings # [bz,len]
240
 
 
241
  outputs = self.predictor(multi_scale_features, mask_features, extra=extra, task=task, masks=None, targets=targets)
242
  return outputs
243
 
244
 
245
 
246
+
247
+ def vos_step1(self, previous_image, prompts, task, targets=None, batch_name_list=None, is_train = False):
248
+ extra = {}
249
+ if isinstance(previous_image,torch.Tensor):
250
+ features = self.backbone(previous_image)
251
+ else:
252
+ features = self.backbone(previous_image.tensor)
253
+ # bz = len(images)//2
254
+
255
+ ## setp 1,2,3
256
+ key_images = [previous_image] #bz*[1,3,H,W]
257
+ key_promptmasks = [m.unsqueeze(0) for m in prompts['spatial']] #bz*[1,1,H,W]
258
+ ref_feats, ref_masks = self.get_template(key_images, key_promptmasks)
259
+
260
+ early_fusion = {"hidden":ref_feats,"masks":ref_masks}
261
+
262
+
263
+
264
+ mask_features, _, multi_scale_features, zero_loss = self.pixel_decoder.forward_features(features, masks=None, early_fusion = early_fusion)
265
+
266
+
267
+ prompt_multi_scale_features = multi_scale_features+[mask_features]
268
+
269
+ if 'spatial' in prompts:
270
+ pos_masks = prompts['spatial']
271
+ # neg_masks = [~p for p in prompts['spatial']]
272
+ neg_masks = [p&False for p in prompts['spatial']]
273
+
274
+ extra.update({'spatial_query_pos_mask': pos_masks, 'spatial_query_neg_mask': neg_masks})
275
+ # import pdb;pdb.set_trace()
276
+
277
+
278
+ _,h,w = extra['spatial_query_pos_mask'][0].shape
279
+ divisor = torch.tensor([h,w], device=mask_features.device)[None,]
280
+ # Get mean pos spatial query
281
+ non_zero_pos_point = [rand_sample((m.nonzero()[:,1:]/divisor).t(), self.max_spatial_len[-1]).t() for m in extra['spatial_query_pos_mask']]
282
+ # [:,1:]第一个维度是指示属于那个batch,原本这里的mshape是【num_inst,H,W】,得到的nonzero 是【num_point,3】,[:,1:]是xy坐标,
283
+ # #这里舍弃第一个维度是表示每张图片上prompt覆盖的物体的point混在一起采样,没有instance之间的区分. 因此每个图片都得到一个[512,2]的point set,是采样过后的正样本
284
+ non_zero_pos_point = nn.utils.rnn.pad_sequence(non_zero_pos_point, padding_value=-1).permute(1,0,2) # 把list中的结果通过padding concat到一起,得到的是[bz,512,2]
285
+
286
+ non_zero_pos_mask = (non_zero_pos_point.sum(dim=-1) < 0) # 把xy坐标相加小于0的找出来
287
+ spatial_query_pos = point_sample(mask_features, non_zero_pos_point.flip(dims=(2,)).type(mask_features.dtype), align_corners=True) #[(N, C, P)
288
+ spatial_query_pos = torch.stack([x[m].mean(dim=0, keepdim=True) for x, m in zip(spatial_query_pos.transpose(1,2), ~non_zero_pos_mask)]).transpose(0,1).nan_to_num() # [1,bz,C]
289
+ # import pdb;pdb.set_trace()
290
+ # Get mean neg spatial query
291
+ non_zero_neg_point = [rand_sample((m.nonzero()[:,1:]/divisor).t(), self.max_spatial_len[-1]).t() for m in extra['spatial_query_neg_mask']]
292
+ non_zero_neg_point = nn.utils.rnn.pad_sequence(non_zero_neg_point, padding_value=-1).permute(1,0,2)
293
+ non_zero_neg_mask = (non_zero_neg_point.sum(dim=-1) < 0)
294
+ spatial_query_neg = point_sample(mask_features, non_zero_neg_point.flip(dims=(2,)).type(mask_features.dtype), align_corners=True)
295
+ spatial_query_neg = torch.stack([x[m].mean(dim=0, keepdim=True) for x, m in zip(spatial_query_neg.transpose(1,2), ~non_zero_neg_mask)]).transpose(0,1).nan_to_num()
296
+
297
+ # Get layerwise spatial query
298
+ src_spatial_queries = []
299
+ src_spatial_maskings = []
300
+ for i in range(len(prompt_multi_scale_features)):
301
+ bs,dc,h,w = prompt_multi_scale_features[i].shape
302
+ # src_mask_features = multi_scale_features[i].view(h,w,bs,dc)
303
+ src_mask_features = prompt_multi_scale_features[i].permute(2,3,0,1)
304
+ # import pdb;pdb.set_trace()
305
+ src_mask_features = src_mask_features @ self.mask_sptial_embed[i]
306
+
307
+ non_zero_query_point_pos = [rand_sample((m.nonzero()[:,1:]/divisor).t(), self.max_spatial_len[i]).t() for m in extra['spatial_query_pos_mask']]
308
+ non_zero_query_point_neg = [rand_sample((m.nonzero()[:,1:]/divisor).t(), self.max_spatial_len[i]).t() for m in extra['spatial_query_neg_mask']]
309
+ non_zero_query_point = [torch.cat([x,y], dim=0) for x,y in zip(non_zero_query_point_pos, non_zero_query_point_neg)]
310
+ pos_neg_indicator = [torch.cat([torch.ones(x.shape[0], device=x.device), -torch.ones(y.shape[0], device=y.device)]) for x,y in zip(non_zero_query_point_pos, non_zero_query_point_neg)]
311
+ pos_neg_indicator = nn.utils.rnn.pad_sequence(pos_neg_indicator, padding_value=0)
312
+ # import pdb;pdb.set_trace()
313
+ non_zero_query_point = nn.utils.rnn.pad_sequence(non_zero_query_point, padding_value=-1).permute(1,0,2)
314
+ non_zero_query_mask = (non_zero_query_point.sum(dim=-1) < 0)
315
+ non_zero_query_point[non_zero_query_mask] = 0
316
+ # import pdb;pdb.set_trace()
317
+ spatial_tokens = point_sample(src_mask_features.permute(2,3,0,1), non_zero_query_point.flip(dims=(2,)).type(src_mask_features.dtype), align_corners=True).permute(2,0,1)
318
+ spatial_tokens[pos_neg_indicator==1] += self.pn_indicator.weight[0:1]
319
+ spatial_tokens[pos_neg_indicator==-1] += self.pn_indicator.weight[1:2]
320
+
321
+ src_spatial_queries += [spatial_tokens]
322
+ src_spatial_maskings += [non_zero_query_mask]
323
+
324
+ extra['visual_prompt_tokens'] = src_spatial_queries #[len,bz,C]
325
+ extra['visual_prompt_nonzero_mask'] = src_spatial_maskings # [bz,len]
326
+
327
+ return early_fusion, extra
328
+
329
+
330
+
331
+ def vos_step2(self, images, task, language_dict_features, last_extra, targets=None, batch_name_list=None, is_train = False):
332
+ extra = last_extra
333
+ dist_loss = None
334
+ if True:
335
+ if task not in ['grounding','rvos']:
336
+ assert batch_name_list
337
+ calsses_name_list = batch_name_list
338
+ tokenized = self.tokenizer.batch_encode_plus(calsses_name_list,
339
+ max_length=self.cfg.MODEL.LANGUAGE_BACKBONE.MAX_QUERY_LEN, # 256
340
+ padding='max_length' if self.cfg.MODEL.LANGUAGE_BACKBONE.PAD_MAX else "longest", # max_length
341
+ return_special_tokens_mask=True,
342
+ return_tensors='pt',
343
+ truncation=True).to("cuda")
344
+
345
+ texts = (tokenized['input_ids'], tokenized['attention_mask'])
346
+ token_x = self.text_encoder(*texts)['last_hidden_state']
347
+ token_x = token_x @ self.lang_projection
348
+ lang_feat_pool = agg_lang_feat(token_x, tokenized['attention_mask'], pool_type="average") # (bs, 768)
349
+ extra['class_embeddings'] = lang_feat_pool
350
+
351
+ if isinstance(images,torch.Tensor):
352
+ features = self.backbone(images)
353
+ else:
354
+ features = self.backbone(images.tensor)
355
+ # bz = len(images)//2
356
+ # import pdb;pdb.set_trace()
357
+ mask_features, _, multi_scale_features, zero_loss = self.pixel_decoder.forward_features(features, masks=None, early_fusion = language_dict_features)
358
+
359
+
360
+ outputs = self.predictor(multi_scale_features, mask_features, extra=extra, task=task, masks=None, targets=targets)
361
+ return outputs
362
+
363
+
364
+
365
 
366
 
367