import gradio as gr from transformers import AutoModelForQuestionAnswering, AutoTokenizer, LayoutLMv3ImageProcessor model_name = "TusharGoel/LiLT-Document-QA" tokenizer = AutoTokenizer.from_pretrained(model_name, apply_ocr = True) image_processor = LayoutLMv3ImageProcessor() model = AutoModelForQuestionAnswering.from_pretrained(model_name) model.eval() def qna(image, question): res = image_processor(image, apply_ocr = True) words = res["words"][0] boxes = res["boxes"][0] encoding = tokenizer(question, words, boxes = boxes, return_token_type_ids=True, return_tensors="pt", truncation=True, padding="max_length") word_ids = encoding.word_ids(0) outputs = model(**encoding) print(outputs) start_scores = outputs.start_logits end_scores = outputs.end_logits start, end = word_ids[start_scores.argmax(-1).item()], word_ids[end_scores.argmax(-1).item()] answer = " ".join(words[start : end + 1]) return answer img = gr.Image(source="upload", label="Image") question = gr.Text(label="Question") iface = gr.Interface(fn=qna, inputs=[img, question], outputs="text", title="LiLT - Document Question Answering") iface.launch()