File size: 1,498 Bytes
295c649
241cd82
 
43ea1cc
a089e2a
b07a164
43ea1cc
 
 
6607a8c
295c649
 
241cd82
 
 
 
 
 
 
e860bc7
ee67de1
53d258c
a99fa6c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
from transformers import ViTFeatureExtractor, BertTokenizer, VisionEncoderDecoderModel, AutoTokenizer, AutoFeatureExtractor
import gradio as gr

title="Multilingual OCR (currently recognises: English and Chinese)"
description="m_OCR(multilingual OCR) is a Vision-Encoder-Decoder model (based on the concept of TrOCR) which uses pre-trained facebook's vit-mae-large as the encoder and xlm-roberta-base as the decoder. \nIt has been trained on IAM, SROIE 2019, text_renderer Chinese (synthetic) and TRDG (synthetic) datasets (amounting to approx 1.4 Million samples) for English and Chinese document text-recognition."
examples =[["demo_image/img1.png"], ["demo_image/img2.jpeg"], ["demo_image/img3.jpeg"], ["demo_image/img4.jpeg"], ["demo_image/img5.jpeg"], ["demo_image/img6.jpeg"]]



model=VisionEncoderDecoderModel.from_pretrained("priyank-m/m_OCR")
tokenizer =  AutoTokenizer.from_pretrained("xlm-roberta-base")
feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/vit-mae-large")

def run_ocr(image):
  pixel_values = feature_extractor(image, return_tensors="pt").pixel_values
  # autoregressively generate caption (uses greedy decoding by default )
  generated_ids = model.generate(pixel_values, max_new_tokens=50)
  generated_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
  return generated_text
        
         
demo = gr.Interface(fn=run_ocr, inputs="image", outputs="text", title=title, description=description, examples=examples)
demo.launch()