import pandas as pd import PIL from PIL import Image from PIL import ImageDraw import gradio as gr import torch import easyocr torch.hub.download_url_to_file('https://i.pinimg.com/originals/45/d0/30/45d03054e15f4be731781eecba7458a4.jpg', 'korean.png') def draw_boxes(image, bounds, color='yellow', width=2): draw = ImageDraw.Draw(image) for bound in bounds: p0, p1, p2, p3 = bound[0] draw.line([*p0, *p1, *p2, *p3, *p0], fill=color, width=width) return image def inference(img, lang): if lang == "english": lang = ['en'] elif lang == "chinese": lang = ['ch_sim'] elif lang == "korean": lang = ['ko'] else: lang = 'ja' reader = easyocr.Reader(lang) bounds = reader.readtext(img.name) im = PIL.Image.open(img.name) draw_boxes(im, bounds) im.save('result.jpg') return ['result.jpg', pd.DataFrame(bounds).iloc[: , 1:]] title = 'EasyOCR' description = 'Gradio demo for EasyOCR. EasyOCR demo supports 80+ languages.To use it, simply upload your image and choose a language from the dropdown menu, or click one of the examples to load them. Read more at the links below.' article = "

Ready-to-use OCR with 80+ supported languages and all popular writing scripts including Latin, Chinese, Arabic, Devanagari, Cyrillic and etc. | Github Repo

" css = ".output_image, .input_image {height: 40rem !important; width: 100% !important;}" examples = ['korean.png',"korean"] choices = [ "chinese", "english", "japanese", "korean" ] gr.Interface( inference, [gr.inputs.Image(type='file', label='Input'),gr.inputs.Dropdown(choices, type="value", default="korean", label='language')], [gr.outputs.Image(type='file', label='Output'), gr.outputs.Dataframe(headers=['text', 'confidence'])], title=title, description=description, article=article, examples=examples, css=css, enable_queue=True ).launch(debug=True)