doevent commited on
Commit
5083fe9
1 Parent(s): eb5e503

Upload models/blip_retrieval.py

Browse files
Files changed (1) hide show
  1. models/blip_retrieval.py +322 -0
models/blip_retrieval.py ADDED
@@ -0,0 +1,322 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from models.med import BertConfig, BertModel
2
+ from transformers import BertTokenizer
3
+
4
+ import torch
5
+ from torch import nn
6
+ import torch.nn.functional as F
7
+
8
+ from models.blip import create_vit, init_tokenizer, load_checkpoint
9
+
10
+ class BLIP_Retrieval(nn.Module):
11
+ def __init__(self,
12
+ med_config = 'configs/med_config.json',
13
+ image_size = 384,
14
+ vit = 'base',
15
+ vit_grad_ckpt = False,
16
+ vit_ckpt_layer = 0,
17
+ embed_dim = 256,
18
+ queue_size = 57600,
19
+ momentum = 0.995,
20
+ negative_all_rank = False,
21
+ ):
22
+ """
23
+ Args:
24
+ med_config (str): path for the mixture of encoder-decoder model's configuration file
25
+ image_size (int): input image size
26
+ vit (str): model size of vision transformer
27
+ """
28
+ super().__init__()
29
+
30
+ self.visual_encoder, vision_width = create_vit(vit,image_size, vit_grad_ckpt, vit_ckpt_layer)
31
+ self.tokenizer = init_tokenizer()
32
+ med_config = BertConfig.from_json_file(med_config)
33
+ med_config.encoder_width = vision_width
34
+ self.text_encoder = BertModel(config=med_config, add_pooling_layer=False)
35
+
36
+ text_width = self.text_encoder.config.hidden_size
37
+
38
+ self.vision_proj = nn.Linear(vision_width, embed_dim)
39
+ self.text_proj = nn.Linear(text_width, embed_dim)
40
+
41
+ self.itm_head = nn.Linear(text_width, 2)
42
+
43
+ # create momentum encoders
44
+ self.visual_encoder_m, vision_width = create_vit(vit,image_size)
45
+ self.vision_proj_m = nn.Linear(vision_width, embed_dim)
46
+ self.text_encoder_m = BertModel(config=med_config, add_pooling_layer=False)
47
+ self.text_proj_m = nn.Linear(text_width, embed_dim)
48
+
49
+ self.model_pairs = [[self.visual_encoder,self.visual_encoder_m],
50
+ [self.vision_proj,self.vision_proj_m],
51
+ [self.text_encoder,self.text_encoder_m],
52
+ [self.text_proj,self.text_proj_m],
53
+ ]
54
+ self.copy_params()
55
+
56
+ # create the queue
57
+ self.register_buffer("image_queue", torch.randn(embed_dim, queue_size))
58
+ self.register_buffer("text_queue", torch.randn(embed_dim, queue_size))
59
+ self.register_buffer("idx_queue", torch.full((1,queue_size),-100))
60
+ self.register_buffer("ptr_queue", torch.zeros(1, dtype=torch.long))
61
+
62
+ self.image_queue = nn.functional.normalize(self.image_queue, dim=0)
63
+ self.text_queue = nn.functional.normalize(self.text_queue, dim=0)
64
+
65
+ self.queue_size = queue_size
66
+ self.momentum = momentum
67
+ self.temp = nn.Parameter(0.07*torch.ones([]))
68
+
69
+ self.negative_all_rank = negative_all_rank
70
+
71
+
72
+ def forward(self, image, caption, alpha, idx):
73
+ with torch.no_grad():
74
+ self.temp.clamp_(0.001,0.5)
75
+
76
+ image_embeds = self.visual_encoder(image)
77
+ image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device)
78
+ image_feat = F.normalize(self.vision_proj(image_embeds[:,0,:]),dim=-1)
79
+
80
+ text = self.tokenizer(caption, padding='max_length', truncation=True, max_length=35,
81
+ return_tensors="pt").to(image.device)
82
+
83
+ text_output = self.text_encoder(text.input_ids, attention_mask = text.attention_mask,
84
+ return_dict = True, mode = 'text')
85
+ text_feat = F.normalize(self.text_proj(text_output.last_hidden_state[:,0,:]),dim=-1)
86
+
87
+ ###============== Image-text Contrastive Learning ===================###
88
+ idx = idx.view(-1,1)
89
+ idx_all = torch.cat([idx.t(), self.idx_queue.clone().detach()],dim=1)
90
+ pos_idx = torch.eq(idx, idx_all).float()
91
+ sim_targets = pos_idx / pos_idx.sum(1,keepdim=True)
92
+
93
+ # get momentum features
94
+ with torch.no_grad():
95
+ self._momentum_update()
96
+ image_embeds_m = self.visual_encoder_m(image)
97
+ image_feat_m = F.normalize(self.vision_proj_m(image_embeds_m[:,0,:]),dim=-1)
98
+ image_feat_m_all = torch.cat([image_feat_m.t(),self.image_queue.clone().detach()],dim=1)
99
+
100
+ text_output_m = self.text_encoder_m(text.input_ids, attention_mask = text.attention_mask,
101
+ return_dict = True, mode = 'text')
102
+ text_feat_m = F.normalize(self.text_proj_m(text_output_m.last_hidden_state[:,0,:]),dim=-1)
103
+ text_feat_m_all = torch.cat([text_feat_m.t(),self.text_queue.clone().detach()],dim=1)
104
+
105
+ sim_i2t_m = image_feat_m @ text_feat_m_all / self.temp
106
+ sim_t2i_m = text_feat_m @ image_feat_m_all / self.temp
107
+
108
+ sim_targets = torch.zeros(sim_i2t_m.size()).to(image.device)
109
+ sim_targets.fill_diagonal_(1)
110
+
111
+ sim_i2t_targets = alpha * F.softmax(sim_i2t_m, dim=1) + (1 - alpha) * sim_targets
112
+ sim_t2i_targets = alpha * F.softmax(sim_t2i_m, dim=1) + (1 - alpha) * sim_targets
113
+
114
+ sim_i2t = image_feat @ text_feat_m_all / self.temp
115
+ sim_t2i = text_feat @ image_feat_m_all / self.temp
116
+
117
+ loss_i2t = -torch.sum(F.log_softmax(sim_i2t, dim=1)*sim_i2t_targets,dim=1).mean()
118
+ loss_t2i = -torch.sum(F.log_softmax(sim_t2i, dim=1)*sim_t2i_targets,dim=1).mean()
119
+
120
+ loss_ita = (loss_i2t+loss_t2i)/2
121
+
122
+ idxs = concat_all_gather(idx)
123
+ self._dequeue_and_enqueue(image_feat_m, text_feat_m, idxs)
124
+
125
+ ###============== Image-text Matching ===================###
126
+ encoder_input_ids = text.input_ids.clone()
127
+ encoder_input_ids[:,0] = self.tokenizer.enc_token_id
128
+
129
+ # forward the positve image-text pair
130
+ bs = image.size(0)
131
+ output_pos = self.text_encoder(encoder_input_ids,
132
+ attention_mask = text.attention_mask,
133
+ encoder_hidden_states = image_embeds,
134
+ encoder_attention_mask = image_atts,
135
+ return_dict = True,
136
+ )
137
+
138
+
139
+ if self.negative_all_rank:
140
+ # compute sample similarity
141
+ with torch.no_grad():
142
+ mask = torch.eq(idx, idxs.t())
143
+
144
+ image_feat_world = concat_all_gather(image_feat)
145
+ text_feat_world = concat_all_gather(text_feat)
146
+
147
+ sim_i2t = image_feat @ text_feat_world.t() / self.temp
148
+ sim_t2i = text_feat @ image_feat_world.t() / self.temp
149
+
150
+ weights_i2t = F.softmax(sim_i2t,dim=1)
151
+ weights_i2t.masked_fill_(mask, 0)
152
+
153
+ weights_t2i = F.softmax(sim_t2i,dim=1)
154
+ weights_t2i.masked_fill_(mask, 0)
155
+
156
+ image_embeds_world = all_gather_with_grad(image_embeds)
157
+
158
+ # select a negative image (from all ranks) for each text
159
+ image_embeds_neg = []
160
+ for b in range(bs):
161
+ neg_idx = torch.multinomial(weights_t2i[b], 1).item()
162
+ image_embeds_neg.append(image_embeds_world[neg_idx])
163
+ image_embeds_neg = torch.stack(image_embeds_neg,dim=0)
164
+
165
+ # select a negative text (from all ranks) for each image
166
+ input_ids_world = concat_all_gather(encoder_input_ids)
167
+ att_mask_world = concat_all_gather(text.attention_mask)
168
+
169
+ text_ids_neg = []
170
+ text_atts_neg = []
171
+ for b in range(bs):
172
+ neg_idx = torch.multinomial(weights_i2t[b], 1).item()
173
+ text_ids_neg.append(input_ids_world[neg_idx])
174
+ text_atts_neg.append(att_mask_world[neg_idx])
175
+
176
+ else:
177
+ with torch.no_grad():
178
+ mask = torch.eq(idx, idx.t())
179
+
180
+ sim_i2t = image_feat @ text_feat.t() / self.temp
181
+ sim_t2i = text_feat @ image_feat.t() / self.temp
182
+
183
+ weights_i2t = F.softmax(sim_i2t,dim=1)
184
+ weights_i2t.masked_fill_(mask, 0)
185
+
186
+ weights_t2i = F.softmax(sim_t2i,dim=1)
187
+ weights_t2i.masked_fill_(mask, 0)
188
+
189
+ # select a negative image (from same rank) for each text
190
+ image_embeds_neg = []
191
+ for b in range(bs):
192
+ neg_idx = torch.multinomial(weights_t2i[b], 1).item()
193
+ image_embeds_neg.append(image_embeds[neg_idx])
194
+ image_embeds_neg = torch.stack(image_embeds_neg,dim=0)
195
+
196
+ # select a negative text (from same rank) for each image
197
+ text_ids_neg = []
198
+ text_atts_neg = []
199
+ for b in range(bs):
200
+ neg_idx = torch.multinomial(weights_i2t[b], 1).item()
201
+ text_ids_neg.append(encoder_input_ids[neg_idx])
202
+ text_atts_neg.append(text.attention_mask[neg_idx])
203
+
204
+ text_ids_neg = torch.stack(text_ids_neg,dim=0)
205
+ text_atts_neg = torch.stack(text_atts_neg,dim=0)
206
+
207
+ text_ids_all = torch.cat([encoder_input_ids, text_ids_neg],dim=0)
208
+ text_atts_all = torch.cat([text.attention_mask, text_atts_neg],dim=0)
209
+
210
+ image_embeds_all = torch.cat([image_embeds_neg,image_embeds],dim=0)
211
+ image_atts_all = torch.cat([image_atts,image_atts],dim=0)
212
+
213
+ output_neg = self.text_encoder(text_ids_all,
214
+ attention_mask = text_atts_all,
215
+ encoder_hidden_states = image_embeds_all,
216
+ encoder_attention_mask = image_atts_all,
217
+ return_dict = True,
218
+ )
219
+
220
+
221
+ vl_embeddings = torch.cat([output_pos.last_hidden_state[:,0,:], output_neg.last_hidden_state[:,0,:]],dim=0)
222
+ vl_output = self.itm_head(vl_embeddings)
223
+
224
+ itm_labels = torch.cat([torch.ones(bs,dtype=torch.long),torch.zeros(2*bs,dtype=torch.long)],
225
+ dim=0).to(image.device)
226
+ loss_itm = F.cross_entropy(vl_output, itm_labels)
227
+
228
+ return loss_ita, loss_itm
229
+
230
+
231
+ @torch.no_grad()
232
+ def copy_params(self):
233
+ for model_pair in self.model_pairs:
234
+ for param, param_m in zip(model_pair[0].parameters(), model_pair[1].parameters()):
235
+ param_m.data.copy_(param.data) # initialize
236
+ param_m.requires_grad = False # not update by gradient
237
+
238
+
239
+ @torch.no_grad()
240
+ def _momentum_update(self):
241
+ for model_pair in self.model_pairs:
242
+ for param, param_m in zip(model_pair[0].parameters(), model_pair[1].parameters()):
243
+ param_m.data = param_m.data * self.momentum + param.data * (1. - self.momentum)
244
+
245
+
246
+ @torch.no_grad()
247
+ def _dequeue_and_enqueue(self, image_feat, text_feat, idxs):
248
+ # gather keys before updating queue
249
+ image_feats = concat_all_gather(image_feat)
250
+ text_feats = concat_all_gather(text_feat)
251
+
252
+
253
+ batch_size = image_feats.shape[0]
254
+
255
+ ptr = int(self.ptr_queue)
256
+ assert self.queue_size % batch_size == 0 # for simplicity
257
+
258
+ # replace the keys at ptr (dequeue and enqueue)
259
+ self.image_queue[:, ptr:ptr + batch_size] = image_feats.T
260
+ self.text_queue[:, ptr:ptr + batch_size] = text_feats.T
261
+ self.idx_queue[:, ptr:ptr + batch_size] = idxs.T
262
+ ptr = (ptr + batch_size) % self.queue_size # move pointer
263
+
264
+ self.ptr_queue[0] = ptr
265
+
266
+
267
+ def blip_retrieval(pretrained='',**kwargs):
268
+ model = BLIP_Retrieval(**kwargs)
269
+ if pretrained:
270
+ model,msg = load_checkpoint(model,pretrained)
271
+ print("missing keys:")
272
+ print(msg.missing_keys)
273
+ return model
274
+
275
+
276
+ @torch.no_grad()
277
+ def concat_all_gather(tensor):
278
+ """
279
+ Performs all_gather operation on the provided tensors.
280
+ *** Warning ***: torch.distributed.all_gather has no gradient.
281
+ """
282
+ tensors_gather = [torch.ones_like(tensor)
283
+ for _ in range(torch.distributed.get_world_size())]
284
+ torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
285
+
286
+ output = torch.cat(tensors_gather, dim=0)
287
+ return output
288
+
289
+
290
+ class GatherLayer(torch.autograd.Function):
291
+ """
292
+ Gather tensors from all workers with support for backward propagation:
293
+ This implementation does not cut the gradients as torch.distributed.all_gather does.
294
+ """
295
+
296
+ @staticmethod
297
+ def forward(ctx, x):
298
+ output = [torch.zeros_like(x) for _ in range(torch.distributed.get_world_size())]
299
+ torch.distributed.all_gather(output, x)
300
+ return tuple(output)
301
+
302
+ @staticmethod
303
+ def backward(ctx, *grads):
304
+ all_gradients = torch.stack(grads)
305
+ torch.distributed.all_reduce(all_gradients)
306
+ return all_gradients[torch.distributed.get_rank()]
307
+
308
+
309
+ def all_gather_with_grad(tensors):
310
+ """
311
+ Performs all_gather operation on the provided tensors.
312
+ Graph remains connected for backward grad computation.
313
+ """
314
+ # Queue the gathered tensors
315
+ world_size = torch.distributed.get_world_size()
316
+ # There is no need for reduction in the single-proc case
317
+ if world_size == 1:
318
+ return tensors
319
+
320
+ tensor_all = GatherLayer.apply(tensors)
321
+
322
+ return torch.cat(tensor_all, dim=0)