kaydee commited on
Commit
95f34c2
1 Parent(s): a1c84dd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -6
app.py CHANGED
@@ -107,10 +107,8 @@ def process_image(image):
107
  return image
108
 
109
 
110
- title = "Restaurant/ Hotel Bill information extraction using LayoutLMv3 model"
111
- description = "Restaurant/ Hotel Bill information extraction - We use Microsoft's LayoutLMv3 trained on WildReceipt Dataset to predict the Store_name_value, Store_name_key, Store_addr_value, Store_addr_key, Tel_value, Tel_key, Date_value, Date_key, Time_value, Time_key, Prod_item_value, Prod_item_key, Prod_quantity_value, Prod_quantity_key, Prod_price_value, Prod_price_key, Subtotal_value, Subtotal_key, Tax_value, Tax_key, Tips_value, Tips_key, Total_value, Total_key. To use it, simply upload an image or use the example image below. Results will show up in a few seconds."
112
-
113
- article="<b>References</b><br>[1] Y. Xu et al., “LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking.” 2022. <a href='https://arxiv.org/abs/2204.08387'>Paper Link</a><br>[2] <a href='https://github.com/NielsRogge/Transformers-Tutorials/tree/master/LayoutLMv3'>LayoutLMv3 training and inference</a><br>[3] Hongbin Sun, Zhanghui Kuang, Xiaoyu Yue, Chenhao Lin, and Wayne Zhang. 2021. Spatial Dual-Modality Graph Reasoning for Key Information Extraction. arXiv. DOI:https://doi.org/10.48550/ARXIV.2103.14470 <a href='https://doi.org/10.48550/ARXIV.2103.14470'>Paper Link</a>"
114
 
115
  examples =[['example1.png'],['example2.png'],['example3.png']]
116
 
@@ -121,9 +119,8 @@ iface = gr.Interface(fn=process_image,
121
  outputs=gr.Image(type="pil", label="annotated image"),
122
  title=title,
123
  description=description,
124
- article=article,
125
  examples=examples,
126
  css=css,
127
- analytics_enabled = True, enable_queue=True)
128
 
129
  iface.launch(inline=False, share=False, debug=False)
 
107
  return image
108
 
109
 
110
+ title = "Receipt information extraction using LayoutLMv3 model"
111
+ description = "Reciept information extraction - Here I use Microsoft's LayoutLMv3 trained on WildReceipt Dataset to predict the keys and values. To use it, simply upload an image or use the example image below. Results will show up in a few seconds."
 
 
112
 
113
  examples =[['example1.png'],['example2.png'],['example3.png']]
114
 
 
119
  outputs=gr.Image(type="pil", label="annotated image"),
120
  title=title,
121
  description=description,
 
122
  examples=examples,
123
  css=css,
124
+ analytics_enabled = True)
125
 
126
  iface.launch(inline=False, share=False, debug=False)