Spaces:
Runtime error
Runtime error
File size: 2,080 Bytes
334adc2 9668774 334adc2 9668774 334adc2 1aa5d0c 9bccd26 334adc2 9668774 644d5b5 f4de4aa 644d5b5 f4de4aa 644d5b5 f4de4aa 20e28ec c5c4e79 334adc2 9668774 334adc2 9668774 c5c4e79 334adc2 20e28ec f4de4aa 334adc2 9668774 c5c4e79 334adc2 9668774 6ad9946 9668774 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
import pandas as pd
import PIL
from PIL import Image
from PIL import ImageDraw
import gradio as gr
import torch
import easyocr
torch.hub.download_url_to_file('https://i.pinimg.com/originals/45/d0/30/45d03054e15f4be731781eecba7458a4.jpg', 'korean.png')
def draw_boxes(image, bounds, color='yellow', width=2):
draw = ImageDraw.Draw(image)
for bound in bounds:
p0, p1, p2, p3 = bound[0]
draw.line([*p0, *p1, *p2, *p3, *p0], fill=color, width=width)
return image
def inference(img, lang):
if lang == "english":
lang = ['en']
elif lang == "chinese":
lang = ['ch_sim']
elif lang == "korean":
lang = ['ko']
else:
lang = ['ja']
reader = easyocr.Reader(lang)
bounds = reader.readtext(img.name)
im = PIL.Image.open(img.name)
draw_boxes(im, bounds)
im.save('result.jpg')
return ['result.jpg', pd.DataFrame(bounds).iloc[: , 1:]]
title = 'EasyOCR'
description = 'Gradio demo for EasyOCR. EasyOCR demo supports 80+ languages.To use it, simply upload your image and choose a language from the dropdown menu, or click one of the examples to load them. Read more at the links below.'
article = "<p style='text-align: center'><a href='https://www.jaided.ai/easyocr/'>Ready-to-use OCR with 80+ supported languages and all popular writing scripts including Latin, Chinese, Arabic, Devanagari, Cyrillic and etc.</a> | <a href='https://github.com/JaidedAI/EasyOCR'>Github Repo</a></p>"
css = ".output_image, .input_image {height: 40rem !important; width: 100% !important;}"
examples = [['korean.png',"korean"]]
choices = [
"chinese",
"english",
"japanese",
"korean"
]
gr.Interface(
inference,
[gr.inputs.Image(type='file', label='Input'),gr.inputs.Dropdown(choices, type="value", default="korean", label='language')],
[gr.outputs.Image(type='file', label='Output'), gr.outputs.Dataframe(headers=['text', 'confidence'])],
title=title,
description=description,
article=article,
examples=examples,
css=css,
enable_queue=True
).launch(debug=True) |