# app.py import gradio as gr from transformers import AutoModelForVision2Seq, AutoProcessor import torch from PIL import Image import os # Load Qwen-VL model and processor (trust custom code) model_id = "Qwen/Qwen-VL-Chat" processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True) model = AutoModelForVision2Seq.from_pretrained(model_id, trust_remote_code=True) model = model.to("cpu") # Inference function def ocr_with_qwen(image): # Fallback to test.png if no image uploaded if image is None: image = Image.open("test.png") prompt = "<|im_start|>system\nYou are a helpful assistant. Extract all text from the image and output only the text.<|im_end|>\n<|im_start|>user\n" inputs = processor(images=image, text=prompt, return_tensors="pt").to("cpu") outputs = model.generate(**inputs, max_new_tokens=512) result = processor.batch_decode(outputs, skip_special_tokens=True)[0] return result.strip() # Gradio UI gr.Interface( fn=ocr_with_qwen, inputs=gr.Image(type="pil", label="Upload Image (defaults to test.png if none uploaded)", optional=True), outputs=gr.Textbox(label="Extracted Text"), title="OCR with Qwen2.5-VL", description="Upload an image to extract text using Qwen-VL model. If no image is uploaded, test.png is used.", examples=[["test.png"]] ).launch()