Visual_Q_and_A / app.py
captain-awesome's picture
Update app.py
6f85db8 verified
# import gradio as gr
# from transformers.utils import logging
# logging.set_verbosity_error()
# import warnings
# warnings.filterwarnings("ignore", message="Using the model-agnostic default `max_length`")
# from transformers import BlipForQuestionAnswering
# from transformers import AutoProcessor
# def qa(image, question):
# model = BlipForQuestionAnswering.from_pretrained(
# "./models/Salesforce/blip-vqa-base")
# processor = AutoProcessor.from_pretrained(
# "./models/Salesforce/blip-vqa-base")
# inputs = processor(image, question, return_tensors="pt")
# out = model.generate(image, question)
# result = processor.decode(out[0], skip_special_tokens=True)
# return result
# # def greet(name):
# # return "Hello " + name + "!!"
# iface = gr.Interface(fn=qa, inputs=["image","text"], outputs="textbox")
# iface.launch()
import gradio as gr
from transformers.utils import logging
from transformers import BlipForQuestionAnswering, AutoProcessor
logging.set_verbosity_error()
import warnings
warnings.filterwarnings("ignore", message="Using the model-agnostic default `max_length`")
def qa(image, question):
model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base")
processor = AutoProcessor.from_pretrained("Salesforce/blip-vqa-base")
inputs = processor(image=image, question=question, return_tensors="pt")
out = model.generate(**inputs)
result = processor.decode(out[0], skip_special_tokens=True)
return result
iface = gr.Interface(fn=qa, inputs=["image", "text"], outputs="textbox")
iface.launch()