NDLOCR / app.py
tomofi's picture
Update app.py
bf5361b
import os
import torch
print(torch.__version__)
torch_ver, cuda_ver = torch.__version__.split('+')
os.system(f'pip install mmcv-full==1.4.0 -f https://download.openmmlab.com/mmcv/dist/{cuda_ver}/torch{torch_ver}/index.html --no-cache-dir')
os.system('cd src/ndl_layout/mmdetection && python setup.py bdist_wheel && pip install dist/*.whl')
os.system('wget https://lab.ndl.go.jp/dataset/ndlocr/text_recognition/mojilist_NDL.txt -P ./src/text_recognition/models')
os.system('wget https://lab.ndl.go.jp/dataset/ndlocr/text_recognition/ndlenfixed64-mj0-synth1.pth -P ./src/text_recognition/models')
os.system('wget https://lab.ndl.go.jp/dataset/ndlocr/ndl_layout/ndl_layout_config.py -P ./src/ndl_layout/models')
os.system('wget https://lab.ndl.go.jp/dataset/ndlocr/ndl_layout/epoch_140_all_eql_bt.pth -P ./src/ndl_layout/models')
os.system('wget https://lab.ndl.go.jp/dataset/ndlocr/separate_pages_ssd/weights.hdf5 -P ./src/separate_pages_ssd/ssd_tools')
os.system("wget https://i.imgur.com/fSL1CGG.jpg")
os.environ["PYTHONPATH"]=os.environ["PYTHONPATH"]+":"+f"{os.getcwd()}/src/text_recognition/deep-text-recognition-benchmark"
import gradio as gr
from PIL import Image
from uuid import uuid4
from pathlib import Path
def inference(im):
dir_name = uuid4()
Path(f'{dir_name}/img').mkdir(parents=True)
im.save(f'{dir_name}/img/image.jpg')
os.system(f'python main.py infer {dir_name}/img/image.jpg {dir_name}_output -s f -i')
image_path = f'{dir_name}_output/image/pred_img/image_L.jpg'
if Path(f'{dir_name}_output/image/pred_img/image_R.jpg').exists():
image_L = Image.open(f'{dir_name}_output/image/pred_img/image_L.jpg')
image_R = Image.open(f'{dir_name}_output/image/pred_img/image_R.jpg')
dst = Image.new('RGB', (image_L.width + image_R.width, image_L.height))
dst.paste(image_L, (0, 0))
dst.paste(image_R, (image_L.width, 0))
dst.save(f'{dir_name}_output/image/pred_img/image_LR.jpg')
image_path = f'{dir_name}_output/image/pred_img/image_LR.jpg'
with open(f'{dir_name}_output/image/txt/image_main.txt') as f:
return image_path, f.read()
title = "NDLOCR"
description = "Gradio demo for NDLOCR. NDLOCR is a text recognition (OCR) Program."
article = "<p style='text-align: center'><a href='https://github.com/ndl-lab' target='_blank'>NDL Lab</a> | <a href='https://github.com/ndl-lab/ndlocr_cli' target='_blank'>NDLOCR Repo</a></p>"
gr.Interface(
inference,
gr.inputs.Image(label='image', type='pil'),
['image', 'text'],
title=title,
description=description,
article=article,
examples=['fSL1CGG.jpg', 'b3b3963f-d577-4a30-acb9-0b395d4d87f7.jpeg', '8016ed5e-c0c7-4979-b6c6-d6b6c3945d7f.jpeg']
).launch(enable_queue=True, cache_examples=True)