'''
BLIP能力演示
1、对图片分类
2、根据文字检索图片
3、图像相似度
'''

from dataset import get_loader
import matplotlib.pyplot as plt 
import torch 
from blip import BLIP
import torch.nn.functional as F
from train import get_cifar_config, get_text_config, tokenize, vocabulary

# MODE可以选择1、2、3
# 1、对图片分类
# 2、根据文字检索图片
# 3、图像相似度
MODE = 3

if __name__ == "__main__":
    DEVICE='cuda' if torch.cuda.is_available() else 'cpu'   # 设备
    cifar_config = get_cifar_config()
    text_config = get_text_config()
    tokenize_max_len = 10
    train_loader, test_loader = get_loader(cifar_config)

    model=BLIP(text_config).to(DEVICE) # 模型
    model.load_state_dict(torch.load('model.pth'))

    model.eval()    # 预测模式

    '''
    1、对图片分类
    '''
    if MODE == 1:
        all_classes_sentence = []
        for i in range(cifar_config.class_num):
            label_str = vocabulary[i]
            sentence = "[CLS] this is a picture of {} [END]".format(label_str)
            all_classes_sentence.append(sentence)
        tokenized_all_class, all_class_self_attn_mask = tokenize(all_classes_sentence, max_len=tokenize_max_len)
        text_emb = model.text_enc(tokenized_all_class, self_attn_mask = all_class_self_attn_mask)  #(bs=10, N, hidden_size)
        text_feat = F.normalize(model.text_proj(text_emb[:,0,:]),dim=-1)  #(bs=10, hidden_size) cifar10中的10个类的文本特征
        image, label = test_loader.dataset[100]  # 取出一张图片
        image, label = image.unsqueeze(0).to(DEVICE), torch.tensor(label).to(DEVICE) # 将图片和标签放到GPU上
        image_emb=model.img_enc(image).unsqueeze(1)  #(bs, N, hidden_size),N=1
        image_feat = F.normalize(model.img_proj(image_emb[:,0,:]),dim=-1) #如果是用vit提特征的话，img_emb[:,0,:]就是CLS的特征向量,(bs,hidden_size)
        sim_i2t = (image_feat @ text_feat.t()).softmax(dim=1)  #(bs_img, bs_text = 10)
        predict_y = sim_i2t.max(dim=1).indices #根据图像预测label
        print('分类:',label)
        plt.imshow(image.permute(0,2,3,1).cpu().squeeze(0)) # permute(0,2,3,1)是为了将图片的通道数放到最后
        plt.title('predict: {}'.format(vocabulary[predict_y.item()]))
        plt.axis('off')
        plt.show()

        '''
        2. 根据文字检索图片
        '''
    elif MODE == 2:
        label_str = vocabulary[0] #airplane
        sentence = "[CLS] this is a picture of {} [END]".format(label_str)
        tokenized_all_class, all_class_self_attn_mask = tokenize([sentence], max_len=tokenize_max_len)
        text_emb = model.text_enc(tokenized_all_class, self_attn_mask = all_class_self_attn_mask)  #(bs=1, N, hidden_size)
        text_feat = F.normalize(model.text_proj(text_emb[:,0,:]),dim=-1)  #(bs=1, hidden_size)
        image_list = []
        for i in range(30):
            images, label = test_loader.dataset[i]  # 取出图片
            image_list.append(images)
        images = torch.stack(image_list, dim=0)  # 将图片堆叠成一个batch
        images = images.to(DEVICE) # 将图片放到GPU上
        print(images.shape)
        img_emb=model.img_enc(images).unsqueeze(1)
        image_feat = F.normalize(model.img_proj(img_emb[:,0,:]),dim=-1) #(bs, hidden_size)
        sim_t2i = (text_feat @ image_feat.t()).softmax(dim=1)  #(bs_text = 1, bs_img)
        values,indexs=sim_t2i[0].topk(3) # 3个最相似的
        fig, axs = plt.subplots(1, 3, figsize=(6, 6))
        axs[0].imshow(images[indexs[0]].permute(1,2,0).cpu())
        axs[0].set_title('predict: {}'.format(label_str))
        axs[0].axis('off')
        axs[1].imshow(images[indexs[1]].permute(1,2,0).cpu())
        axs[1].set_title('predict: {}'.format(label_str))
        axs[1].axis('off')
        axs[2].imshow(images[indexs[2]].permute(1,2,0).cpu())
        axs[2].set_title('predict: {}'.format(label_str))
        axs[2].axis('off')
        plt.show()
        '''
        3、图像相似度
        '''
    elif MODE == 3:
        # 取出50张图片
        # 这里是为了演示，实际上应该是从数据集中随机取出50张图片
        other_images=[]
        other_labels=[]
        for i in range(50):
            other_image,other_label=test_loader.dataset[i]
            other_images.append(other_image)
            other_labels.append(other_label)
        
        # 其他50张图片的向量
        other_img_embs=model.img_enc(torch.stack(other_images,dim=0).to(DEVICE))
        other_img_embs=other_img_embs.unsqueeze(1)
        other_img_feat=F.normalize(model.img_proj(other_img_embs[:,0,:]),dim=-1)

        # 当前图片的向量
        cur_image,cur_label=test_loader.dataset[100]
        cur_image=cur_image.unsqueeze(0).to(DEVICE)
        cur_img_emb=model.img_enc(cur_image)
        cur_img_emb=cur_img_emb.unsqueeze(1)
        cur_img_feat=F.normalize(model.img_proj(cur_img_emb[:,0,:]),dim=-1) #(bs, hidden_size)
        # 计算当前图片和50张其他图片的相似度
        sim_i2i = (cur_img_feat @ other_img_feat.t()).softmax(dim=1)
        values,indexs=sim_i2i[0].topk(3)
        # 3个最相似的
        plt.figure(figsize=(10,10))
        for i,img_idx in enumerate(indexs):
            plt.subplot(1,3,i+1)
            plt.imshow(other_images[img_idx].permute(1,2,0).cpu())
            plt.title(other_labels[img_idx])
            plt.axis('off')
        plt.show()