import gradio as gr # from PIL import Image from transformers import Pix2StructForConditionalGeneration, Pix2StructProcessor model = Pix2StructForConditionalGeneration.from_pretrained("google/pix2struct-docvqa-large") processor = Pix2StructProcessor.from_pretrained("google/pix2struct-docvqa-large") def process_document(image, question): # image = Image.open(image) inputs = processor(images=image, text=question, return_tensors="pt") predictions = model.generate(**inputs) return processor.decode(predictions[0], skip_special_tokens=True) description = "Demo for pix2struct fine-tuned on DocVQA (document visual question answering). To use it, simply upload your image and type a question and click 'submit', or click one of the examples to load them. Read more at the links below." article = "

PIX2STRUCT: SCREENSHOT PARSING AS PRETRAINING FOR VISUAL LANGUAGE UNDERSTANDING

" demo = gr.Interface( fn=process_document, inputs=["image", "text"], outputs="text", title="Demo: pix2struct for DocVQA", description=description, article=article, enable_queue=True, examples=[["example_1.png", "When is the coffee break?"], ["example_2.jpeg", "What's the population of Stoddard?"]], cache_examples=False) demo.launch()