|
import os |
|
import gradio as gr |
|
import numpy as np |
|
import layoutparser as lp |
|
from PIL import Image |
|
import PIL |
|
|
|
os.system('pip install "git+https://github.com/facebookresearch/detectron2.git@v0.4#egg=detectron2" ') |
|
|
|
model = lp.Detectron2LayoutModel('lp://PubLayNet/faster_rcnn_R_50_FPN_3x/config', |
|
extra_config=["MODEL.ROI_HEADS.SCORE_THRESH_TEST", 0.8], |
|
label_map={0: "Text", 1: "Title", 2: "List", 3:"Table", 4:"Figure"}) |
|
|
|
article="References<br>[1] Z. Shen, R. Zhang, M. Dell, B. C. G. Lee, J. Carlson, and W. Li, “LayoutParser: A Unified Toolkit for Deep Learning Based Document Image Analysis,” arXiv Prepr. arXiv2103.15348, 2021." |
|
description = "Layout Detection/Parsing is one of the important tasks of converting unstructured data into structured data. This task helps to automate, digitize and organize the data in a usable format. In this project, we utilize LayoutParser library (https://github.com/Layout-Parser/layout-parser) to perform Layout Detection using pre-trained Faster_rcnn_R_50_FPN model that can classify the layout based on Text, Title, List, Table and Figure. Upload an image of a document or click an example image to check this out." |
|
|
|
def show_preds(input_image): |
|
|
|
img = PIL.Image.fromarray(input_image, 'RGB') |
|
basewidth = 900 |
|
wpercent = (basewidth/float(img.size[0])) |
|
|
|
hsize = int((float(img.size[1])*float(wpercent))) |
|
img = img.resize((basewidth,hsize), Image.ANTIALIAS) |
|
image_array=np.array(img) |
|
layout = model.detect(image_array) |
|
return lp.draw_box(image_array, layout, show_element_type=True) |
|
|
|
outputs = gr.outputs.Image(type="pil") |
|
|
|
examples = [['example1.png'], ['example2.png']] |
|
|
|
gr_interface = gr.Interface(fn=show_preds, inputs=["image"], outputs=outputs, title='Document Layout Detector/Parser', article=article, description=description, examples=examples, analytics_enabled = True, enable_queue=True) |
|
gr_interface.launch(inline=False, share=True, debug=True) |