from transformers import ViTFeatureExtractor, BertTokenizer, VisionEncoderDecoderModel, AutoTokenizer, AutoFeatureExtractor import gradio as gr title="Multilingual OCR (currently recognises: English and Chinese)" description="mOCR(multilingual-OCR) is a Vision-Encoder-Decoder model which uses pre-trained facebook's vit-mae-large as the encoder and xlm-roberta-base as the decoder. It has been trained on IAM, SROIE 2019 and TRDG(synthetic) datasets (amounting to approx 1.4 Million samples) for English and Chinese text-recognition." examples =[["demo_image/img1.png"], ["demo_image/img2.jpeg"], ["demo_image/img3.jpeg"], ["demo_image/img4.png"], ["demo_image/img5.jpeg"], ["demo_image/img6.jpeg"]] model=VisionEncoderDecoderModel.from_pretrained("priyank-m/mOCR") tokenizer = AutoTokenizer.from_pretrained("xlm-roberta-base") feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/vit-mae-large") def run_ocr(image): pixel_values = feature_extractor(image, return_tensors="pt").pixel_values # autoregressively generate caption (uses greedy decoding by default ) generated_ids = model.generate(pixel_values, max_new_tokens=50) generated_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] return generated_text demo = gr.Interface(fn=run_ocr, inputs="image", outputs="text", title=title, description=description, examples=examples) demo.launch()