|
import os |
|
|
|
os.system('cd TimeSformer;' |
|
'pip install -e .; cd ..') |
|
|
|
os.system('ls -l') |
|
os.system('pwd') |
|
|
|
|
|
|
|
|
|
import torch |
|
from torchvision import transforms |
|
|
|
|
|
from transformers import AutoTokenizer |
|
|
|
|
|
from PIL import Image |
|
import json |
|
import os |
|
|
|
from torchvision import transforms |
|
|
|
from models.epalm import ePALM |
|
|
|
import os |
|
|
|
from transformers import AutoTokenizer |
|
|
|
|
|
from ruamel.yaml import YAML |
|
|
|
import torch |
|
import gradio as gr |
|
|
|
|
|
yaml=YAML(typ='safe') |
|
|
|
|
|
|
|
use_cuda = torch.cuda.is_available() |
|
device = torch.deivce('cuda') if use_cuda else torch.deivce('cpu') |
|
|
|
|
|
|
|
config = 'configs/image/ePALM_caption.yaml' |
|
|
|
config = yaml.load(open(config, 'r')) |
|
|
|
|
|
text_model = 'facebook/opt-2.7b' |
|
vision_model_name = 'vit_base_patch16_224' |
|
|
|
|
|
|
|
|
|
|
|
start_layer_idx = 19 |
|
end_layer_idx = 31 |
|
low_cpu = True |
|
model = ePALM(opt_model_name=text_model, |
|
vision_model_name=vision_model_name, |
|
use_vis_prefix=True, |
|
start_layer_idx=start_layer_idx, |
|
end_layer_idx=end_layer_idx, |
|
return_hidden_state_vision=True, |
|
config=config, |
|
low_cpu=low_cpu |
|
) |
|
print("Model Built") |
|
model.to(device) |
|
|
|
|
|
checkpoint_path = 'checkpoints/float32/ePALM_caption/checkpoint_best.pth' |
|
|
|
checkpoint = torch.load(checkpoint_path, map_location='cpu') |
|
state_dict = checkpoint['model'] |
|
msg = model.load_state_dict(state_dict,strict=False) |
|
|
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(text_model, use_fast=False) |
|
eos_token = tokenizer.eos_token |
|
pad_token = tokenizer.pad_token |
|
|
|
|
|
|
|
|
|
image_size = 224 |
|
normalize = transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)) |
|
|
|
transform = transforms.Compose([ |
|
transforms.Resize((image_size,image_size),interpolation=Image.BICUBIC), |
|
transforms.ToTensor(), |
|
normalize, |
|
]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
do_sample=False |
|
num_beams=3 |
|
max_length=30 |
|
|
|
|
|
|
|
|
|
|
|
def inference(image, audio, video, task_type, instruction): |
|
|
|
if task_type == 'Image Captioning': |
|
text = [''] |
|
text_input = tokenizer(text, padding='longest', return_tensors="pt").to(device) |
|
else: |
|
raise NotImplemented |
|
|
|
if "Video" in task_type: |
|
pass |
|
elif "Audio" in task_type: |
|
pass |
|
else: |
|
image = transform(image) |
|
image = image.to(device,non_blocking=True).unsqueeze(0) |
|
|
|
|
|
|
|
|
|
with torch.autocast(device_type='cuda', dtype=torch.float16, enabled=True): |
|
|
|
out = model(image=image, text=text_input, mode='generate', return_dict=True, max_length=max_length, |
|
do_sample=do_sample, num_beams=num_beams) |
|
|
|
out_decode = [] |
|
for i, o in enumerate(out): |
|
res = tokenizer.decode(o) |
|
response = res.split('</s>')[1].replace(pad_token, '').replace('</s>', '').replace(eos_token, '') |
|
|
|
return response |
|
|
|
|
|
inputs = [gr.inputs.Image(type='pil'), gr.Audio(source="upload", type="filepath"), gr.Video(source="upload", type="filepath"), gr.inputs.Radio(choices=['Image Captioning', 'Video Captioning', 'Audio Captioning', "Visual Question Answering", "Visual Grounding", "General", "General Video"], type="value", default="Image Captioning", label="Task"), gr.inputs.Textbox(lines=1, label="Instruction")] |
|
outputs = ['text'] |
|
examples = [ |
|
['examples/images/soccer.jpg', None, None, 'Image Captioning', None], |
|
['examples/images/ski.jpg', None, None, 'Visual Question Answering', 'what does the woman wearing black do?'], |
|
['examples/images/banana.jpg', None, None, 'Visual Grounding', 'the detached banana'], |
|
['examples/images/skateboard.jpg', None, None, 'General', 'which region does the text " a yellow bird " describe?'], |
|
['examples/images/baseball.jpg', None, None, 'General', 'what color is the left car?'], |
|
[None, None, 'examples/videos/video7014.mp4', 'Video Captioning', None], |
|
[None, None, 'examples/videos/video7017.mp4', 'Video Captioning', None], |
|
[None, None, 'examples/videos/video7019.mp4', 'Video Captioning', None], |
|
[None, None, 'examples/videos/video7021.mp4', 'Video Captioning', None], |
|
[None, None, 'examples/videos/video7021.mp4', 'General Video', "What is this sport?"], |
|
[None, 'examples/audios/6cS0FsUM-cQ.wav', None, 'Audio Captioning', None], |
|
[None, 'examples/audios/AJtNitYMa1I.wav', None, 'Audio Captioning', None], |
|
] |
|
|
|
title = "eP-ALM" |
|
description = "Gradio Demo for eP-ALM: " |
|
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2303.11403' target='_blank'>Paper</a> | <a href='https://github.com/mshukor/eP-ALM' target='_blank'>Github Repo</a></p>" |
|
|
|
io = gr.Interface(fn=inference, inputs=inputs, outputs=outputs, |
|
title=title, description=description, article=article, examples=examples, cache_examples=False) |
|
io.launch() |