pierreguillou commited on
Commit
36d6867
1 Parent(s): e8063c8

first version

Browse files
Files changed (6) hide show
  1. README.md +30 -5
  2. app.py +25 -0
  3. example-table.jpeg +0 -0
  4. packages.txt +1 -0
  5. paper-image.jpeg +0 -0
  6. requirements.txt +4 -0
README.md CHANGED
@@ -1,12 +1,37 @@
1
  ---
2
  title: Layout Parser
3
- emoji: 💩
4
- colorFrom: red
5
- colorTo: gray
6
  sdk: gradio
7
- sdk_version: 3.0.3
8
  app_file: app.py
9
  pinned: false
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  title: Layout Parser
3
+ emoji: 🐨
4
+ colorFrom: indigo
5
+ colorTo: purple
6
  sdk: gradio
 
7
  app_file: app.py
8
  pinned: false
9
  ---
10
 
11
+ # Configuration
12
+
13
+ `title`: _string_
14
+ Display title for the Space
15
+
16
+ `emoji`: _string_
17
+ Space emoji (emoji-only character allowed)
18
+
19
+ `colorFrom`: _string_
20
+ Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
21
+
22
+ `colorTo`: _string_
23
+ Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
24
+
25
+ `sdk`: _string_
26
+ Can be either `gradio` or `streamlit`
27
+
28
+ `sdk_version` : _string_
29
+ Only applicable for `streamlit` SDK.
30
+ See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
31
+
32
+ `app_file`: _string_
33
+ Path to your main application file (which contains either `gradio` or `streamlit` Python code).
34
+ Path is relative to the root of the repository.
35
+
36
+ `pinned`: _boolean_
37
+ Whether the Space stays on top of your list.
app.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ os.system('pip install "git+https://github.com/facebookresearch/detectron2.git@v0.5#egg=detectron2"')
3
+ import layoutparser as lp
4
+ import gradio as gr
5
+
6
+ model = lp.Detectron2LayoutModel('lp://PrimaLayout/mask_rcnn_R_50_FPN_3x/config')
7
+
8
+ def lpi(img):
9
+ layout = model.detect(img) # You need to load the image somewhere else, e.g., image = cv2.imread(...)
10
+ return lp.draw_box(img, layout,) # With extra configurations
11
+
12
+ inputs = gr.inputs.Image(type='pil', label="Original Image")
13
+ outputs = gr.outputs.Image(type="pil",label="Output Image")
14
+
15
+ title = "Layout Parser"
16
+ description = "demo for Layout Parser. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
17
+ article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2103.15348'>LayoutParser: A Unified Toolkit for Deep Learning Based Document Image Analysis</a> | <a href='https://github.com/Layout-Parser/layout-parser'>Github Repo</a></p>"
18
+
19
+ examples = [
20
+ ['example-table.jpeg'],
21
+ ['paper-image.jpeg']
22
+
23
+ ]
24
+
25
+ gr.Interface(lpi, inputs, outputs, title=title, description=description, article=article, examples=examples).launch()
example-table.jpeg ADDED
packages.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ libgl1
paper-image.jpeg ADDED
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ torch
2
+ torchvision
3
+ layoutparser
4
+ opencv-python-headless