File size: 1,211 Bytes
91755d5
 
 
 
 
 
 
5652284
91755d5
 
 
 
 
 
 
 
 
 
 
 
48dbbe4
91755d5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
import torch
from PIL import Image
import gradio as gr


device = 'cuda' if torch.cuda.is_available() else 'cpu'

model = torch.hub.load('mair-lab/mapl', 'mapl')
model.eval()
model.to(device)


def predict(image: Image.Image, question: str) -> str:
    pixel_values = model.image_transform(image).unsqueeze(0).to(device)

    input_ids = None
    if question:
        text = f"Please answer the question. Question: {question} Answer:" if '?' in question else question
        input_ids = model.text_transform(text).input_ids.to(device)
    
    with torch.autocast(device_type=device, dtype=torch.bfloat16):
        generated_ids = model.generate(
            pixel_values=pixel_values,
            input_ids=input_ids,
            max_new_tokens=50,
            num_beams=5
        )
    
    answer = model.text_processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()

    return answer


image = gr.components.Image(type='pil')
question = gr.components.Textbox(value="What is this?", label="Question")
answer = gr.components.Textbox(label="Answer")

interface = gr.Interface(
    fn=predict,
    inputs=[image, question],
    outputs=answer,
    allow_flagging='never')
interface.launch()