import matplotlib.pyplot as plt import numpy as np from PIL import Image, ImageFilter import io import time import os import copy import pickle import datetime import urllib.request import gradio as gr import torch from mmocr.apis import MMOCRInferencer ocr = MMOCRInferencer(det="TextSnake", rec="ABINet_Vision") url = "https://upload.wikimedia.org/wikipedia/commons/thumb/5/5b/Draft_Marks_on_the_Bow_of_Kruzenshtern_Port_of_Tallinn_16_July_2011.jpg/1600px-Draft_Marks_on_the_Bow_of_Kruzenshtern_Port_of_Tallinn_16_July_2011.jpg" path_input = "./example1.jpg" urllib.request.urlretrieve(url, filename=path_input) url = "https://upload.wikimedia.org/wikipedia/commons/3/3e/733_how-deep.jpg" path_input = "./example2.jpg" urllib.request.urlretrieve(url, filename=path_input) path_img_output_folder = "./demo-out" path_img_input_folder = "./demo-input" def do_process(img): img_name = "tmp.jpg" path_input = os.path.join(path_img_input_folder, img_name) path_output = os.path.join(path_img_output_folder, "vis", img_name) img.save(path_input) # img.save(path_output) result = ocr(path_input, out_dir=path_img_output_folder, save_vis=True) img_res = Image.open(path_output) return img_res, result["predictions"][0]["rec_texts"] input_im = gr.inputs.Image( shape=None, image_mode="RGB", invert_colors=False, source="upload", type="pil" ) output_img = gr.outputs.Image(label="Output of OCR", type="pil") output_txt = gr.outputs.Textbox(type='text', label='predictions') title = "Reading draught marks" description = ( "Playground: Reading draught marks using pre-trained models. Tools: MMOCR, Gradio. Source of images: Wikipedia." ) examples = [["./example1.jpg"], ["./example2.jpg"]] article = "

By Dr. Mohamed Elawady

" iface = gr.Interface( fn=do_process, inputs=[input_im], outputs=[output_img, output_txt], live=False, interpretation=None, title=title, description=description, article=article, examples=examples, ) iface.launch(debug=True)