|
import os |
|
import torch |
|
|
|
print(torch.__version__) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import datetime |
|
import gradio as gr |
|
import pandas as pd |
|
from mmocr.utils.ocr import MMOCR |
|
import os |
|
|
|
def inference(img, det, recog): |
|
print(datetime.datetime.now(), 'start') |
|
|
|
|
|
if det == 'None': |
|
det = None |
|
if recog == 'None': |
|
recog = None |
|
ocr = MMOCR(det=det, recog=recog) |
|
print(datetime.datetime.now(), 'start read:', img.name) |
|
results = ocr.readtext(img.name, details=True, output='/tmp') |
|
result_file = '/tmp/out_{}.png'.format(os.path.splitext(os.path.basename(img.name))[0]) |
|
print(datetime.datetime.now(), results) |
|
|
|
return result_file, results |
|
|
|
description = 'Gradio demo for MMOCR. MMOCR is an open-source toolbox based on PyTorch and mmdetection for text detection, text recognition, and the corresponding downstream tasks including key information extraction. To use it, simply upload your image or click one of the examples to load them. Read more at the links below.' |
|
article = "<p style='text-align: center'><a href='https://mmocr.readthedocs.io/en/latest/'>MMOCR is an open-source toolbox based on PyTorch and mmdetection for text detection, text recognition, and the corresponding downstream tasks including key information extraction.</a> | <a href='https://github.com/open-mmlab/mmocr'>Github Repo</a></p>" |
|
|
|
|
|
examples = [] |
|
path = './images' |
|
|
|
files = os.listdir(path) |
|
files.sort() |
|
for f in files: |
|
file = os.path.join(path, f) |
|
if os.path.isfile(file): |
|
examples.append([file, 'PS_CTW', 'SAR']) |
|
|
|
det = gr.inputs.Dropdown(choices=[ |
|
'DB_r18', |
|
'DB_r50', |
|
'DBPP_r50', |
|
'DRRG', |
|
'FCE_IC15', |
|
'FCE_CTW_DCNv2', |
|
'MaskRCNN_CTW', |
|
'MaskRCNN_IC15', |
|
'MaskRCNN_IC17', |
|
'PANet_CTW', |
|
'PANet_IC15', |
|
'PS_CTW', |
|
'PS_IC15', |
|
'TextSnake', |
|
'None' |
|
], type="value", default='PS_CTW', label='det') |
|
|
|
recog = gr.inputs.Dropdown(choices=[ |
|
'CRNN', |
|
'SAR', |
|
'SAR_CN', |
|
'NRTR_1/16-1/8', |
|
'NRTR_1/8-1/4', |
|
'RobustScanner', |
|
'SATRN', |
|
'SATRN_sm', |
|
'ABINet', |
|
'ABINet_Vision', |
|
'SEG', |
|
'CRNN_TPS', |
|
'MASTER', |
|
'None' |
|
], type="value", default='SAR', label='recog') |
|
|
|
gr.Interface(inference, |
|
[gr.inputs.Image(type='file', label='Input'), det, recog ], |
|
|
|
[gr.outputs.Image(type='pil', label='Output'), gr.outputs.Textbox(type='str', label='Prediction')], |
|
title='MMOCR', |
|
description=description, |
|
article=article, |
|
examples=examples, |
|
css=".output_image, .input_image {height: 40rem !important; width: 100% !important;}", |
|
enable_queue=True |
|
).launch(debug=True, server_name='0.0.0.0', server_port=7860) |
|
|