MangaCleaner / app.py
DerrylNessie's picture
Update app.py
f450985
raw
history blame
3.68 kB
import pandas as pd
import PIL
from PIL import Image
from PIL import ImageDraw
import gradio as gr
import torch
import easyocr
import cv2 as cv
import math
import numpy as np
torch.hub.download_url_to_file('https://i.pinimg.com/originals/45/d0/30/45d03054e15f4be731781eecba7458a4.jpg', 'korean.jpg')
def convert(img):
pil_im = Image.fromarray(img)
b = io.BytesIO()
pil_im.save(b, 'jpeg')
im_bytes = b.getvalue()
return im_bytes
def midpoint(x1, y1, x2, y2):
x_mid = int((x1 + x2)/2)
y_mid = int((y1 + y2)/2)
return (x_mid, y_mid)
def draw_mask(img, bounds):
mask = np.zeros(img.shape[:2], dtype="uint8")
for bound in bounds:
box0, box1, box2, box3 = bound[0]
x0, y0 = box0
x1, y1 = box1
x2, y2 = box2
x3, y3 = box3
x_mid0, y_mid0 = midpoint(x1, y1, x2, y2)
x_mid1, y_mi1 = midpoint(x0, y0, x3, y3)
thickness = int(math.sqrt((x2 - x1)**2 + (y2 - y1)**2))
cv2.line(mask, (x_mid0, y_mid0), (x_mid1, y_mi1), 255, thickness)
img = cv2.inpaint(img, mask, 7, cv2.INPAINT_NS)
return(img)
def masking(img, mask):
#img = cv.inpaint(img, mask, 3, cv.INPAINT_TELEA)
return img
def inference(img, lang):
if lang == "english":
lang = ['en']
elif lang == "chinese":
lang = ['ch_sim']
elif lang == "korean":
lang = ['ko']
else:
lang = ['ja']
reader = easyocr.Reader(lang)
bounds = reader.readtext(img.name)
im = PIL.Image.open(img.name)
img_array = asarray(im)
#mask = PIL.Image.open(img.name)
#mask = Image.new("L", im.size, 0)
draw_mask(img_array, bounds)
#
#masking(img_array, mask)
#img_inpainted = cv.inpaint(img_array, mask, 7, cv.INPAINT_NS)
#remove_text(im, mask, bounds)
lang = ""
#cv.imshow('dst', dst)
im.save('result.jpg')
#mask.save('mask.png')
#img = cv.imread('result.jpg')
#mask = cv.imread('mask2.png',0)
#dst = cv.inpaint(img,mask,3,cv.INPAINT_TELEA)
#final.save('final.png')
#img_inpainted.save('inpaint.jpg')
#dst = cv2.inpaint(np.float32(img),np.float32(mask),3,cv2.INPAINT_TELEA)
#img_inpainted = cv2.inpaint(im, mask, 7, cv2.INPAINT_TELEA)
#cv2.imwrite('dst.png', dst)
#cv.imwrite('result.jpg',dst)
return ['result.jpg', pd.DataFrame(bounds). iloc[: , 1:2]]
title = 'EasyOCR'
description = 'Gradio demo for EasyOCR. EasyOCR demo supports 80+ languages.To use it, simply upload your image and choose a language from the dropdown menu, or click one of the examples to load them. Read more at the links below.'
article = "<p style='text-align: center'><a href='https://www.jaided.ai/easyocr/'>Ready-to-use OCR with 80+ supported languages and all popular writing scripts including Latin, Chinese, Arabic, Devanagari, Cyrillic and etc.</a> | <a href='https://github.com/JaidedAI/EasyOCR'>Github Repo</a></p>"
css = ".output_image, .input_image {height: 40rem !important; width: 100% !important;}"
examples = [['korean.jpg',"korean"]]
choices = [
"chinese",
"english",
"japanese",
"korean"
]
gr.Interface(
inference,
[gr.inputs.Image(type='file', label='Input'),gr.inputs.Dropdown(choices, type="value", default="korean", label='language')],
[gr.outputs.Image(type='file', label='Output'),
#gr.outputs.Image(type='file', label='Output'),
#gr.outputs.Image(type='file', label='Output'),
gr.outputs.Dataframe()],
title=title,
description=description,
article=article,
examples=examples,
css=css,
enable_queue=True
).launch(debug=True)