blip-vqa-gradio / app.py
iamrobotbear's picture
Update app.py
58bb7b3
raw
history blame
2.72 kB
import gradio as gr
from transformers import AutoProcessor, Blip2ForConditionalGeneration
import torch
from PIL import Image
# Load the BLIP-2 model and processor
processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b")
# Load model in int8 using bitsandbytes, and pass device_map='auto'
model = Blip2ForConditionalGeneration.from_pretrained(
"Salesforce/blip2-opt-2.7b", load_in_8bit=True, device_map='auto'
)
# Set device to GPU if available
device = "cuda" if torch.cuda.is_available() else "cpu"
model.to(device)
# Set device to GPU if available
device = "cuda" if torch.cuda.is_available() else "cpu"
model.to(device)
def blip2_interface(image, prompted_caption_text, vqa_question, chat_context):
# Prepare image input
image_input = Image.fromarray(image).convert('RGB')
inputs = processor(image_input, return_tensors="pt").to(device, torch.float16)
# Image Captioning
generated_ids = model.generate(**inputs, max_new_tokens=20)
image_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
# Prompted Image Captioning
inputs = processor(image_input, text=prompted_caption_text, return_tensors="pt").to(device, torch.float16)
generated_ids = model.generate(**inputs, max_new_tokens=20)
prompted_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
# Visual Question Answering (VQA)
prompt = f"Question: {vqa_question} Answer:"
inputs = processor(image_input, text=prompt, return_tensors="pt").to(device, torch.float16)
generated_ids = model.generate(**inputs, max_new_tokens=10)
vqa_answer = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
# Chat-based Prompting
prompt = chat_context + " Answer:"
inputs = processor(image_input, text=prompt, return_tensors="pt").to(device, torch.float16)
generated_ids = model.generate(**inputs, max_new_tokens=10)
chat_response = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
return image_caption, prompted_caption, vqa_answer, chat_response
# Define Gradio input and output components
image_input = gr.inputs.Image(type="numpy")
text_input = gr.inputs.Text()
output_text = gr.outputs.Textbox()
# Create Gradio interface
iface = gr.Interface(
fn=blip2_interface,
inputs=[image_input, text_input, text_input, text_input],
outputs=[output_text, output_text, output_text, output_text],
title="BLIP-2 Image Captioning and VQA",
description="Interact with the BLIP-2 model for image captioning, prompted image captioning, visual question answering, and chat-based prompting.",
)
if __name__ == "__main__":
iface.launch()