from torch import nn 
import torch 
from text_encoder import Transformer_Encoder
import torchvision.models
import torch.nn.functional as F


class BLIP(nn.Module):
    def __init__(self, config):
        super().__init__()
        self.img_enc = torchvision.models.resnet18(pretrained=True)
        self.img_enc.maxpool = nn.MaxPool2d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)#对resnet-18进行调整，适配cifar-10
        self.img_enc.fc = nn.Linear(in_features=512, out_features=config.hidden_size) #对resnet-18进行调整，适配cifar-10
        self.img_enc_momentum = torchvision.models.resnet18(pretrained=True) #动量模型
        self.img_enc_momentum.maxpool = nn.MaxPool2d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)#对resnet-18进行调整，适配cifar-10
        self.img_enc_momentum.fc = nn.Linear(in_features=512, out_features=config.hidden_size) #对resnet-18进行调整，适配cifar-10
        self.img_proj = nn.Linear(config.hidden_size, config.hidden_size) #对经过resnet作用后的特征图进行变换
        self.img_proj_momentum = nn.Linear(config.hidden_size, config.hidden_size) #对经过resnet作用后的特征图进行变换

        self.text_enc = Transformer_Encoder(config)
        self.text_enc_momentum = Transformer_Encoder(config) #动量模型
        self.text_proj = nn.Linear(config.hidden_size, config.hidden_size) #对经过transformer作用后的特征进行变换
        self.text_proj_momentum = nn.Linear(config.hidden_size, config.hidden_size) #对经过transformer作用后的特征进行变换

        self.itm_head = nn.Linear(config.hidden_size, 2)
        
        self.model_pairs = [[self.img_enc,self.img_enc_momentum],
                            [self.img_proj,self.img_proj_momentum],
                            [self.text_enc,self.text_enc_momentum],
                            [self.text_proj,self.text_proj_momentum],
                           ]   
        self.copy_params()
        self.momentum = config.momentum
        self.temp = nn.Parameter(0.07*torch.ones([]))
        self.vocab_d = config.vocab_d
        self.alpha = config.alpha
        self.device = config.device

    #img_x是原始图片数据，text_tokenized是tokenized后没有embedding的句子，大小是(bs,N),N = tokenize_max_len, labels就是数据的标签，大小为(bs,)
    def forward(self, img_x, text_tokenized, labels, self_attn_mask):
        img_emb=self.img_enc(img_x).unsqueeze(1)  #(bs, N, hidden_size),N=1
        image_feat = F.normalize(self.img_proj(img_emb[:,0,:]),dim=-1) #如果是用vit提特征的话，img_emb[:,0,:]就是CLS的特征向量,(bs,hidden_size)

        labels = labels.view(-1,1) #(bs,1)
        labels_matrix = torch.eq(labels.t(), labels)#(bs,bs),每一行代表一个batch中的一张图片与所有标签的关系，每一列代表一个batch中的一个标签和所有图片的关系
        labels_matrix = labels_matrix / labels_matrix.sum(1,keepdim=True) #一个batch里面可能有多个相同类的图片
        ###============== 计算itc loss，使用动量蒸馏 ===================###
        text_emb_itc = self.text_enc(text_tokenized, self_attn_mask = self_attn_mask)  #(bs, N, hidden_size)
        text_feat = F.normalize(self.text_proj(text_emb_itc[:,0,:]),dim=-1) #text_emb_itc[:,0,:]就是CLS的特征向量,(bs,hidden_size)

        with torch.no_grad():
            self._momentum_update()
            img_emb_momentum = self.img_enc_momentum(img_x).unsqueeze(1)   #(bs, N, hidden_size),N=1
            image_feat_momentum = F.normalize(self.img_proj_momentum(img_emb_momentum[:,0,:]),dim=-1) #(bs,hidden_size)

            text_emb_itc_momentum = self.text_enc_momentum(text_tokenized, self_attn_mask = self_attn_mask)  #(bs, N, hidden_size)
            text_feat_momentum = F.normalize(self.text_proj_momentum(text_emb_itc_momentum[:,0,:]),dim=-1) # (bs,hidden_size), text_emb_itc_momentum[:,0,:]就是CLS的特征向量

            sim_i2t_momentum = image_feat_momentum @ text_feat_momentum.t() / self.temp  #(bs, bs),每一行表示一张图片和所有标签的相似度
            sim_t2i_momentum = text_feat_momentum @ image_feat_momentum.t() / self.temp   #(bs, bs)，每一行表示一个标签和所有图片的相似度


        sim_i2t = image_feat @ text_feat_momentum.t() / self.temp #(bs, bs)
        sim_t2i = text_feat @ image_feat_momentum.t() / self.temp  #(bs, bs)
        
        loss_i2t = -(1 - self.alpha) * torch.sum(F.log_softmax(sim_i2t, dim=1) * labels_matrix,dim=1).mean() # 将blip源代码改写了下，意思是一样的，算的是交叉熵
        loss_i2t_momentum = -self.alpha * torch.sum(F.log_softmax(sim_i2t, dim=1) * F.softmax(sim_i2t_momentum, dim=1),dim=1).mean()
        loss_t2i = -(1 - self.alpha) * torch.sum(F.log_softmax(sim_t2i, dim=1) * labels_matrix,dim=1).mean()
        loss_t2i_momentum = -self.alpha * torch.sum(F.log_softmax(sim_t2i, dim=1) * F.softmax(sim_t2i_momentum, dim=1),dim=1).mean()

        loss_itc = (loss_i2t + loss_i2t_momentum + loss_t2i + loss_t2i_momentum) / 2

        ###============== 计算itm loss ===================###
        bs = img_emb.shape[0]
        N_img = img_emb.shape[1]
        cross_attn_mask = torch.full([bs,1], N_img).to(device = self.device) #跟blip源码的意思一样，实现方式不同
        text_tokenized_itm = text_tokenized.clone()
        text_tokenized_itm[0] = self.vocab_d["[ENC]"]  #将CLS替换成ENC标记，跟blip源码一致
        output_pos = self.text_enc(text_tokenized_itm, img_emb, mode = "itm", 
                                     self_attn_mask = self_attn_mask, cross_attn_mask = cross_attn_mask)  #(bs, N, hidden_size),N = tokenize_max_len, mode = "itm"就会做cross_attn
        with torch.no_grad():  
            mask = labels_matrix.to(torch.bool) #必须把labels_matrix变成bool类型才能mask
            sim_i2t = image_feat @ text_feat.t() / self.temp 
            sim_t2i = text_feat @ image_feat.t() / self.temp
            weights_i2t = F.softmax(sim_i2t,dim=1)
            weights_i2t.masked_fill_(mask, 0)      #正样本都被mask了，剩下的都是负样本的相似度

            weights_t2i = F.softmax(sim_t2i,dim=1)
            weights_t2i.masked_fill_(mask, 0)     #正样本都被mask了，剩下的都是负样本的相似度
        
        image_embeds_neg = []    
        for b in range(bs):
            neg_idx = torch.multinomial(weights_t2i[b], 1).item() #根据概率进行采样，返回的是每张图片对应的负样本的index
            image_embeds_neg.append(img_emb[neg_idx])  #img_emb的shape是(bs, N, hidden_size),N=1
        image_embeds_neg = torch.stack(image_embeds_neg,dim=0)   #(bs, 1, embed_dim)
        
        text_tokenized_neg = []
        self_attn_mask_neg = []
        for b in range(bs):
            neg_idx = torch.multinomial(weights_i2t[b], 1).item()
            text_tokenized_neg.append(text_tokenized_itm[neg_idx])
            self_attn_mask_neg.append(self_attn_mask[neg_idx]) 
        text_tokenized_neg = torch.stack(text_tokenized_neg,dim=0)   #(bs, max_length)
        self_attn_mask_neg = torch.stack(self_attn_mask_neg,dim=0)    #(bs, 1)

        #构造负样本对，负样本指的是img和text不匹配的对
        text_tokenized_all = torch.cat([text_tokenized_itm, text_tokenized_neg],dim=0)    #(2*bs, max_length)
        self_attn_mask_all = torch.cat([self_attn_mask, self_attn_mask_neg],dim=0)   #(2*bs, 1) 
        image_embeds_all = torch.cat([image_embeds_neg,img_emb],dim=0) #(2*bs, N, embed_dim),N=1
        cross_attn_mask_all = torch.cat([cross_attn_mask, cross_attn_mask], dim=0)  #(2*bs, 1)

        output_neg = self.text_enc(text_tokenized_all, image_embeds_all, mode = "itm", 
                                    self_attn_mask = self_attn_mask_all, cross_attn_mask = cross_attn_mask_all) #(bs*2, N, hidden_size)
        output_pos_and_neg = torch.cat([output_pos[:, 0, :], output_neg[:, 0, :]], dim=0) #(bs*3, N, hidden_size)
        vl_output = self.itm_head(output_pos_and_neg)
        itm_labels = torch.cat([torch.ones(bs,dtype=torch.long),torch.zeros(2*bs,dtype=torch.long)], dim=0).to(device = self.device) #为什么torch.zeros是2*bs：注意观察image_embeds_all和text_tokenized_all的不同（neg位置不一样）
        loss_itm = F.cross_entropy(vl_output, itm_labels)

        return loss_itc, loss_itm
    
    @torch.no_grad()        
    def _momentum_update(self): #更新动量模型
        for model_pair in self.model_pairs:           
            for param, param_m in zip(model_pair[0].parameters(), model_pair[1].parameters()):
                param_m.data = param_m.data * self.momentum + param.data * (1. - self.momentum)

    @torch.no_grad()    
    def copy_params(self):
        for model_pair in self.model_pairs:           
            for param, param_m in zip(model_pair[0].parameters(), model_pair[1].parameters()):
                param_m.data.copy_(param.data)  # initialize
                param_m.requires_grad = False  # not update by gradient  


# if __name__=='__main__':