Spaces:
Runtime error
Runtime error
File size: 1,611 Bytes
ab670a9 1a838e7 ab670a9 1a838e7 ab670a9 1a838e7 745820d 6f85db8 ab670a9 745820d 3620448 1a838e7 ab670a9 1a838e7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
# import gradio as gr
# from transformers.utils import logging
# logging.set_verbosity_error()
# import warnings
# warnings.filterwarnings("ignore", message="Using the model-agnostic default `max_length`")
# from transformers import BlipForQuestionAnswering
# from transformers import AutoProcessor
# def qa(image, question):
# model = BlipForQuestionAnswering.from_pretrained(
# "./models/Salesforce/blip-vqa-base")
# processor = AutoProcessor.from_pretrained(
# "./models/Salesforce/blip-vqa-base")
# inputs = processor(image, question, return_tensors="pt")
# out = model.generate(image, question)
# result = processor.decode(out[0], skip_special_tokens=True)
# return result
# # def greet(name):
# # return "Hello " + name + "!!"
# iface = gr.Interface(fn=qa, inputs=["image","text"], outputs="textbox")
# iface.launch()
import gradio as gr
from transformers.utils import logging
from transformers import BlipForQuestionAnswering, AutoProcessor
logging.set_verbosity_error()
import warnings
warnings.filterwarnings("ignore", message="Using the model-agnostic default `max_length`")
def qa(image, question):
model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base")
processor = AutoProcessor.from_pretrained("Salesforce/blip-vqa-base")
inputs = processor(image=image, question=question, return_tensors="pt")
out = model.generate(**inputs)
result = processor.decode(out[0], skip_special_tokens=True)
return result
iface = gr.Interface(fn=qa, inputs=["image", "text"], outputs="textbox")
iface.launch()
|