priyank-m commited on
Commit
4ff38a1
1 Parent(s): 2756472

added example images

Browse files

added example images and title and description.

Files changed (1) hide show
  1. app.py +7 -1
app.py CHANGED
@@ -1,6 +1,12 @@
1
  from transformers import ViTFeatureExtractor, BertTokenizer, VisionEncoderDecoderModel, AutoTokenizer, AutoFeatureExtractor
2
  import gradio as gr
3
 
 
 
 
 
 
 
4
  model=VisionEncoderDecoderModel.from_pretrained("priyank-m/mOCR")
5
  tokenizer = AutoTokenizer.from_pretrained("xlm-roberta-base")
6
  feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/vit-mae-large")
@@ -13,5 +19,5 @@ def run_ocr(image):
13
  return generated_text
14
 
15
 
16
- demo = gr.Interface(fn=run_ocr, inputs="image", outputs="text")
17
  demo.launch()
 
1
  from transformers import ViTFeatureExtractor, BertTokenizer, VisionEncoderDecoderModel, AutoTokenizer, AutoFeatureExtractor
2
  import gradio as gr
3
 
4
+ title="Multilingual OCR (currently recognises: English and Chinese)"
5
+ description="mOCR(multilingual-OCR) is a Vision-Encoder-Decoder model which uses pre-trained facebook's vit-mae-large as the encoder and xlm-roberta-base as the decoder. It has been trained on IAM, SROIE 2019 and TRDG(synthetic) datasets (amounting to approx 1.4 Million samples) for English and Chinese text-recognition."
6
+ examples =[["demo_image/img1.png"], ["demo_image/img2.jpeg"], ["demo_image/img3.jpeg"], ["demo_image/img4.png"], ["demo_image/img5.jpeg"], ["demo_image/img6.jpeg"]]
7
+
8
+
9
+
10
  model=VisionEncoderDecoderModel.from_pretrained("priyank-m/mOCR")
11
  tokenizer = AutoTokenizer.from_pretrained("xlm-roberta-base")
12
  feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/vit-mae-large")
 
19
  return generated_text
20
 
21
 
22
+ demo = gr.Interface(fn=run_ocr, inputs="image", outputs="text", title=title, description=description, article=article, examples=examples)
23
  demo.launch()