File size: 5,104 Bytes
3eb682b
 
 
e11fab8
3eb682b
 
2f68cd3
3eb682b
4668a73
3eb682b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
import os

os.system('cd TimeSformer;'
          'pip install -e .; cd ..')

os.system('ls -l')
os.system('pwd')

import timesformer


import torch
from torchvision import transforms


from transformers import AutoTokenizer


from PIL import Image
import json 
import os

from torchvision import transforms

from models.epalm import ePALM

import os

from transformers import AutoTokenizer

# import ruamel_yaml as yaml
from ruamel.yaml import YAML

import torch
import gradio as gr


yaml=YAML(typ='safe')



use_cuda = torch.cuda.is_available()
device = torch.deivce('cuda') if use_cuda else torch.deivce('cpu')

## Load model

config = 'configs/image/ePALM_caption.yaml'
# config = yaml.load(open(config, 'r'), Loader=yaml.Loader)
config = yaml.load(open(config, 'r'))


text_model = 'facebook/opt-2.7b' 
vision_model_name = 'vit_base_patch16_224'

# text_model = 'facebook/opt-6.7b' 
# vision_model_name = 'vit_large_patch16_224'


start_layer_idx = 19
end_layer_idx = 31
low_cpu = True 
model = ePALM(opt_model_name=text_model, 
               vision_model_name=vision_model_name, 
               use_vis_prefix=True, 
               start_layer_idx=start_layer_idx, 
               end_layer_idx=end_layer_idx, 
               return_hidden_state_vision=True, 
               config=config,
               low_cpu=low_cpu
)
print("Model Built")
model.to(device)


checkpoint_path = 'checkpoints/float32/ePALM_caption/checkpoint_best.pth'
# checkpoint_path = '/data/mshukor/logs/eplam/models/accelerate/ePALM_pt_L_acc_caption/checkpoint_best.pth'
checkpoint = torch.load(checkpoint_path, map_location='cpu') 
state_dict = checkpoint['model']
msg = model.load_state_dict(state_dict,strict=False)  


## Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(text_model, use_fast=False)
eos_token = tokenizer.eos_token
pad_token = tokenizer.pad_token




image_size = 224
normalize = transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))

transform = transforms.Compose([
            transforms.Resize((image_size,image_size),interpolation=Image.BICUBIC),
            transforms.ToTensor(),
            normalize,
            ])  








do_sample=False
num_beams=3
max_length=30





def inference(image, audio, video, task_type, instruction):

    if task_type == 'Image Captioning':
        text = ['']  
        text_input = tokenizer(text, padding='longest', return_tensors="pt").to(device) 
    else:
        raise NotImplemented

    if "Video" in task_type:
        pass
    elif "Audio" in task_type:
        pass
    else:
        image = transform(image)
        image = image.to(device,non_blocking=True).unsqueeze(0)




    with torch.autocast(device_type='cuda', dtype=torch.float16, enabled=True):

        out = model(image=image, text=text_input, mode='generate', return_dict=True, max_length=max_length, 
                    do_sample=do_sample, num_beams=num_beams)

    out_decode = []
    for i, o in enumerate(out):
        res = tokenizer.decode(o)
        response = res.split('</s>')[1].replace(pad_token, '').replace('</s>', '').replace(eos_token, '') # skip_special_tokens=True

    return response


inputs = [gr.inputs.Image(type='pil'), gr.Audio(source="upload", type="filepath"), gr.Video(source="upload", type="filepath"), gr.inputs.Radio(choices=['Image Captioning', 'Video Captioning', 'Audio Captioning', "Visual Question Answering", "Visual Grounding", "General", "General Video"], type="value", default="Image Captioning", label="Task"), gr.inputs.Textbox(lines=1, label="Instruction")]
outputs = ['text']
examples = [
    ['examples/images/soccer.jpg', None, None, 'Image Captioning', None],
    ['examples/images/ski.jpg', None, None, 'Visual Question Answering', 'what does the woman wearing black do?'],
    ['examples/images/banana.jpg', None, None, 'Visual Grounding', 'the detached banana'],
    ['examples/images/skateboard.jpg', None, None, 'General', 'which region does the text " a yellow bird " describe?'],
    ['examples/images/baseball.jpg', None, None, 'General', 'what color is the left car?'],
    [None, None, 'examples/videos/video7014.mp4', 'Video Captioning', None], 
    [None, None, 'examples/videos/video7017.mp4', 'Video Captioning', None], 
    [None, None, 'examples/videos/video7019.mp4', 'Video Captioning', None], 
    [None, None, 'examples/videos/video7021.mp4', 'Video Captioning', None], 
    [None, None, 'examples/videos/video7021.mp4', 'General Video', "What is this sport?"], 
    [None, 'examples/audios/6cS0FsUM-cQ.wav', None, 'Audio Captioning', None],
    [None, 'examples/audios/AJtNitYMa1I.wav', None, 'Audio Captioning', None],
]

title = "eP-ALM"
description = "Gradio Demo for eP-ALM: "
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2303.11403' target='_blank'>Paper</a> | <a href='https://github.com/mshukor/eP-ALM' target='_blank'>Github Repo</a></p>"

io = gr.Interface(fn=inference, inputs=inputs, outputs=outputs,
                  title=title, description=description, article=article, examples=examples, cache_examples=False)
io.launch()