File size: 3,796 Bytes
ddb7519
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
"""
Download the weights in ./checkpoints beforehand for fast inference
wget https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model*_base_caption.pth
wget https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model*_vqa.pth
wget https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth
"""

from pathlib import Path

from PIL import Image
import torch
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
import cog

from models.blip import blip_decoder
from models.blip_vqa import blip_vqa
from models.blip_itm import blip_itm


class Predictor(cog.Predictor):
    def setup(self):
        self.device = "cuda:0"

        self.models = {
            'image_captioning': blip_decoder(pretrained='checkpoints/model*_base_caption.pth',
                                             image_size=384, vit='base'),
            'visual_question_answering': blip_vqa(pretrained='checkpoints/model*_vqa.pth',
                                                  image_size=480, vit='base'),
            'image_text_matching': blip_itm(pretrained='checkpoints/model_base_retrieval_coco.pth',
                                            image_size=384, vit='base')
        }

    @cog.input(
        "image",
        type=Path,
        help="input image",
    )
    @cog.input(
        "task",
        type=str,
        default='image_captioning',
        options=['image_captioning', 'visual_question_answering', 'image_text_matching'],
        help="Choose a task.",
    )
    @cog.input(
        "question",
        type=str,
        default=None,
        help="Type question for the input image for visual question answering task.",
    )
    @cog.input(
        "caption",
        type=str,
        default=None,
        help="Type caption for the input image for image text matching task.",
    )
    def predict(self, image, task, question, caption):
        if task == 'visual_question_answering':
            assert question is not None, 'Please type a question for visual question answering task.'
        if task == 'image_text_matching':
            assert caption is not None, 'Please type a caption for mage text matching task.'

        im = load_image(image, image_size=480 if task == 'visual_question_answering' else 384, device=self.device)
        model = self.models[task]
        model.eval()
        model = model.to(self.device)

        if task == 'image_captioning':
            with torch.no_grad():
                caption = model.generate(im, sample=False, num_beams=3, max_length=20, min_length=5)
                return 'Caption: ' + caption[0]

        if task == 'visual_question_answering':
            with torch.no_grad():
                answer = model(im, question, train=False, inference='generate')
                return 'Answer: ' + answer[0]

        # image_text_matching
        itm_output = model(im, caption, match_head='itm')
        itm_score = torch.nn.functional.softmax(itm_output, dim=1)[:, 1]
        itc_score = model(im, caption, match_head='itc')
        return f'The image and text is matched with a probability of {itm_score.item():.4f}.\n' \
               f'The image feature and text feature has a cosine similarity of {itc_score.item():.4f}.'


def load_image(image, image_size, device):
    raw_image = Image.open(str(image)).convert('RGB')

    w, h = raw_image.size

    transform = transforms.Compose([
        transforms.Resize((image_size, image_size), interpolation=InterpolationMode.BICUBIC),
        transforms.ToTensor(),
        transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))
    ])
    image = transform(raw_image).unsqueeze(0).to(device)
    return image