awacke1 commited on
Commit
1079052
β€’
1 Parent(s): 64142b4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -7
app.py CHANGED
@@ -24,6 +24,7 @@ dataset = load_dataset("nielsr/funsd", split="test")
24
  image = Image.open(dataset[0]["image_path"]).convert("RGB")
25
  image = Image.open("./invoice.png")
26
  image.save("document.png")
 
27
  # define id2label, label2color
28
  labels = dataset.features['ner_tags'].feature.names
29
  id2label = {v: k for v, k in enumerate(labels)}
@@ -73,15 +74,15 @@ def process_image(image):
73
  return image
74
 
75
 
76
- title = "Interactive demo: LayoutLMv2"
77
- description = "Demo for Microsoft's LayoutLMv2, a Transformer for state-of-the-art document image understanding tasks. This particular model is fine-tuned on FUNSD, a dataset of manually annotated forms. It annotates the words appearing in the image as QUESTION/ANSWER/HEADER/OTHER. To use it, simply upload an image or use the example image below and click 'Submit'. Results will show up in a few seconds. If you want to make the output bigger, right-click on it and select 'Open image in new tab'."
78
- article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2012.14740' target='_blank'>LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding</a> | <a href='https://github.com/microsoft/unilm' target='_blank'>Github Repo</a></p>"
79
- examples =[['document.png']]
80
 
81
- css = ".output-image, .input-image {height: 40rem !important; width: 100% !important;}"
82
- #css = "@media screen and (max-width: 600px) { .output_image, .input_image {height:20rem !important; width: 100% !important;} }"
83
- # css = ".output_image, .input_image {height: 600px !important}"
84
 
 
85
  css = ".image-preview {height: auto !important;}"
86
 
87
  iface = gr.Interface(fn=process_image,
@@ -93,4 +94,5 @@ iface = gr.Interface(fn=process_image,
93
  examples=examples,
94
  css=css,
95
  enable_queue=True)
 
96
  iface.launch(debug=True)
 
24
  image = Image.open(dataset[0]["image_path"]).convert("RGB")
25
  image = Image.open("./invoice.png")
26
  image.save("document.png")
27
+
28
  # define id2label, label2color
29
  labels = dataset.features['ner_tags'].feature.names
30
  id2label = {v: k for v, k in enumerate(labels)}
 
74
  return image
75
 
76
 
77
+ title = "πŸ§ πŸ“‘ AI Document Understanding OCR Using LayoutLMv2, FUNSD, and UNILM πŸ“‘πŸ§ "
78
+
79
+ description = "LayoutLMv2 is a transformer for document image understanding tasks: https://paperswithcode.com/paper/layoutlmv2-multi-modal-pre-training-for. This particular model is fine-tuned on FUNSD, a dataset of manually annotated forms. It annotates the words appearing in the image as QUESTION/ANSWER/HEADER/OTHER."
 
80
 
81
+ article = "<p style='text-align: center'><a href='https://paperswithcode.com/paper/layoutlmv2-multi-modal-pre-training-for' target='_blank'>LayoutLMv2 on PapersWithCode</a> | <a href='https://github.com/microsoft/unilm' target='_blank'>UNILM Git</a></p>"
82
+
83
+ examples =[['document.png']]
84
 
85
+ #css = ".output-image, .input-image {height: 40rem !important; width: 100% !important;}"
86
  css = ".image-preview {height: auto !important;}"
87
 
88
  iface = gr.Interface(fn=process_image,
 
94
  examples=examples,
95
  css=css,
96
  enable_queue=True)
97
+
98
  iface.launch(debug=True)