awacke1 commited on
Commit
3990583
β€’
1 Parent(s): 6c819d5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -76,11 +76,12 @@ def process_image(image):
76
 
77
  title = "πŸ§ πŸ“‘ AI Document Understanding OCR Using LayoutLMv2, FUNSD, and UNILM πŸ“‘πŸ§ "
78
 
79
- description = "LayoutLMv2 is a transformer for document image understanding tasks: https://paperswithcode.com/paper/layoutlmv2-multi-modal-pre-training-for. This particular model is fine-tuned on FUNSD, a dataset of manually annotated forms. It annotates the words appearing in the image as QUESTION/ANSWER/HEADER/OTHER."
80
 
81
  article = "<p style='text-align: center'><a href='https://paperswithcode.com/paper/layoutlmv2-multi-modal-pre-training-for' target='_blank'>LayoutLMv2 on PapersWithCode</a> | <a href='https://github.com/microsoft/unilm' target='_blank'>UNILM Git</a></p>"
82
 
83
- examples =[['document.png']]
 
84
 
85
  #css = ".output-image, .input-image {height: 40rem !important; width: 100% !important;}"
86
  css = ".image-preview {height: auto !important;}"
 
76
 
77
  title = "πŸ§ πŸ“‘ AI Document Understanding OCR Using LayoutLMv2, FUNSD, and UNILM πŸ“‘πŸ§ "
78
 
79
+ description = "LayoutLMv2: https://paperswithcode.com/paper/layoutlmv2-multi-modal-pre-training-for fine-tuned on FUNSD, a dataset of manually annotated forms annotates words appearing in the image as QUESTION/ANSWER/HEADER/OTHER."
80
 
81
  article = "<p style='text-align: center'><a href='https://paperswithcode.com/paper/layoutlmv2-multi-modal-pre-training-for' target='_blank'>LayoutLMv2 on PapersWithCode</a> | <a href='https://github.com/microsoft/unilm' target='_blank'>UNILM Git</a></p>"
82
 
83
+ #examples =[['document.png']]
84
+ examples = [f"{i}.jpg" for i in range(1,5)]
85
 
86
  #css = ".output-image, .input-image {height: 40rem !important; width: 100% !important;}"
87
  css = ".image-preview {height: auto !important;}"