Theivaprakasham commited on
Commit
a114b97
1 Parent(s): 72113ad

Add layout detection app.py

Browse files
Files changed (5) hide show
  1. README.md +2 -2
  2. app.py +32 -0
  3. example1.png +0 -0
  4. example2.png +0 -0
  5. requirements.txt +3 -0
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- title: Layoutdetection
3
  emoji: 📈
4
  colorFrom: purple
5
  colorTo: red
@@ -23,7 +23,7 @@ Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gr
23
  `colorTo`: _string_
24
  Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
25
 
26
- `sdk`: _string_
27
  Can be either `gradio`, `streamlit`, or `static`
28
 
29
  `sdk_version` : _string_
 
1
  ---
2
+ title: Document Layout Detection
3
  emoji: 📈
4
  colorFrom: purple
5
  colorTo: red
 
23
  `colorTo`: _string_
24
  Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
25
 
26
+ `sdk`: `gradio`
27
  Can be either `gradio`, `streamlit`, or `static`
28
 
29
  `sdk_version` : _string_
app.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ import numpy as np
4
+ import layoutparser as lp
5
+ from PIL import Image
6
+ import PIL
7
+
8
+ model = lp.Detectron2LayoutModel('lp://PubLayNet/faster_rcnn_R_50_FPN_3x/config',
9
+ extra_config=["MODEL.ROI_HEADS.SCORE_THRESH_TEST", 0.8],
10
+ label_map={0: "Text", 1: "Title", 2: "List", 3:"Table", 4:"Figure"})
11
+
12
+ article="References<br>[1] Z. Shen, R. Zhang, M. Dell, B. C. G. Lee, J. Carlson, and W. Li, “LayoutParser: A Unified Toolkit for Deep Learning Based Document Image Analysis,” arXiv Prepr. arXiv2103.15348, 2021."
13
+ description = "Layout Detection/Parsing is one of the important tasks of converting unstructured data into structured data. This task helps to automate, digitize and organize the data in a usable format. In this project, we utilize LayoutParser library (https://github.com/Layout-Parser/layout-parser) to perform Layout Detection using pre-trained Faster_rcnn_R_50_FPN model that can classify the layout based on Text, Title, List, Table and Figure. Upload an image of a document or click an example image to check this out."
14
+
15
+ def show_preds(input_image):
16
+
17
+ img = PIL.Image.fromarray(input_image, 'RGB')
18
+ basewidth = 900
19
+ wpercent = (basewidth/float(img.size[0]))
20
+
21
+ hsize = int((float(img.size[1])*float(wpercent)))
22
+ img = img.resize((basewidth,hsize), Image.ANTIALIAS)
23
+ image_array=np.array(img)
24
+ layout = model.detect(image_array)
25
+ return lp.draw_box(image_array, layout, show_element_type=True)
26
+
27
+ outputs = gr.outputs.Image(type="pil")
28
+
29
+ examples = [['example1.png'], ['example2.png']]
30
+
31
+ gr_interface = gr.Interface(fn=show_preds, inputs=["image"], outputs=outputs, title='Document Layout Detector/Parser', article=article, description=description, examples=examples, analytics_enabled = True, enable_queue=True)
32
+ gr_interface.launch(inline=False, share=True, debug=True)
example1.png ADDED
example2.png ADDED
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ layoutparser
2
+ git+https://github.com/facebookresearch/detectron2.git@v0.4#egg=detectron2
3
+ layoutparser[ocr]