File size: 3,375 Bytes
fcc479d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
# --------------------------------------------------------
# X-Decoder -- Generalized Decoding for Pixel, Image, and Language
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Xueyan Zou (xueyan@cs.wisc.edu)
# --------------------------------------------------------

import glob
import os
import torch
import numpy as np
from PIL import Image
from torchvision import transforms
from detectron2.data import MetadataCatalog
from utils.visualizer import Visualizer
from xdecoder.language.loss import vl_similarity
from detectron2.utils.colormap import random_color


t = []
t.append(transforms.Resize((224,224), interpolation=Image.BICUBIC))
transform_ret = transforms.Compose(t)
t = []
t.append(transforms.Resize(512, interpolation=Image.BICUBIC))
transform_grd = transforms.Compose(t)
metadata = MetadataCatalog.get('coco_2017_train_panoptic')

imgs_root = 'images/coco'
img_pths = sorted(glob.glob(os.path.join(imgs_root, '*.jpg')))
imgs = [Image.open(x).convert('RGB') for x in img_pths]
v_emb = torch.load("v_emb.da")

def region_retrieval(model, image, texts, inpainting_text, *args, **kwargs):
    model_novg, model_seg = model
    with torch.no_grad():
        # images = [transform_ret(x) for x in imgs]
        # images = [np.asarray(x) for x in imgs]
        # images = [torch.from_numpy(x.copy()).permute(2,0,1).cuda() for x in images]
        # batch_inputs = [{'image': image, 'image_id': 0} for image in images]
        # outputs = model_novg.model.evaluate(batch_inputs)
        # v_emb = torch.cat([x['captions'][-1:] for x in outputs])
        # v_emb = v_emb / (v_emb.norm(dim=-1, keepdim=True) + 1e-7)
        # torch.save(v_emb, "v_emb.da")
        # exit()

        texts_ = [[x.strip() if x.strip().endswith('.') else (x.strip() + '.')] for x in texts.split(',')]
        model_novg.model.sem_seg_head.predictor.lang_encoder.get_text_embeddings(texts_, is_eval=False, name='caption', prompt=False)
        t_emb = getattr(model_novg.model.sem_seg_head.predictor.lang_encoder, '{}_text_embeddings'.format('caption'))
        temperature = model_novg.model.sem_seg_head.predictor.lang_encoder.logit_scale

        logits = vl_similarity(v_emb, t_emb, temperature)
        prob, idx = logits[:,0].softmax(-1).max(0)
        image_ori = imgs[idx]
        image = transform_grd(image_ori)
        width, height = image.size
        image = np.asarray(image)
        image_ori = np.asarray(image)
        images = torch.from_numpy(image.copy()).permute(2,0,1).cuda()
        batch_inputs = [{'image': images, 'height': height, 'width': width, 'groundings': {'texts': texts_}}]
        model_seg.model.sem_seg_head.predictor.lang_encoder.get_text_embeddings(texts_, is_eval=False, name='caption', prompt=False)
        outputs = model_seg.model.evaluate_grounding(batch_inputs, None)

        visual = Visualizer(image_ori, metadata=metadata)
        grd_masks = (outputs[0]['grounding_mask'] > 0).float().cpu().numpy()

        for text, mask in zip([x[0] for x in texts_], grd_masks):
            color = random_color(rgb=True, maximum=1).astype(np.int32).tolist()
            demo = visual.draw_binary_mask(mask, color=color, text=texts, alpha=0.5)
        res = demo.get_image()

    torch.cuda.empty_cache()
    return Image.fromarray(res), "Selected Image Probability: {:.2f}".format(prob.item()), None