rajistics commited on
Commit
33e48a5
β€’
1 Parent(s): 539c608

Almost done

Browse files
Files changed (1) hide show
  1. app.py +8 -11
app.py CHANGED
@@ -27,6 +27,7 @@ image = Image.open("./test0.jpeg")
27
  labels = dataset.features['ner_tags'].feature.names
28
  id2label = {v: k for v, k in enumerate(labels)}
29
 
 
30
  label_ints = np.random.randint(0, len(ImageColor.colormap.items()), 61)
31
  label_color_pil = [k for k,_ in ImageColor.colormap.items()]
32
  label_color = [label_color_pil[i] for i in label_ints]
@@ -78,24 +79,20 @@ def process_image(image):
78
  return image
79
 
80
 
81
- title = "Interactive demo: LayoutLMv3"
82
- description = "Demo for Microsoft's LayoutLMv3, a Transformer for state-of-the-art document image understanding tasks. This particular model is fine-tuned on CORD, a dataset of receipts. It annotates the words appearing in the image as ***. To use it, simply upload an image or use the example image below and click 'Submit'. Results will show up in a few seconds. If you want to make the output bigger, right-click on it and select 'Open image in new tab'."
83
- article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2204.08387' target='_blank'>LayoutLMv3: Multi-modal Pre-training for Visually-Rich Document Understanding</a> | <a href='https://github.com/microsoft/unilm' target='_blank'>Github Repo</a></p>"
84
- examples =[['test0.jpeg'],['./test1.jpeg'],['test2.jpeg']]
85
 
86
- #css = ".output-image, .input-image"
87
- #css = ".output-image, .input-image {height: 40rem !important; width: 100% !important;}"
88
- #css = "@media screen and (max-width: 600px) { .output_image, .input_image {height:20rem !important; width: 100% !important;} }"
89
- #css = ".output_image {height: 600px !important}, .input_image {height: 300px !important}"
90
 
91
- css = ".image-preview {height: auto !important;}"
92
 
93
  iface = gr.Interface(fn=process_image,
94
- inputs=gr.inputs.Image(type="pil", tool="editor"),
95
  outputs=gr.outputs.Image(type="pil", label="annotated image"),
96
  title=title,
97
  description=description,
98
  article=article,
99
  examples=examples)
100
- #css=css)
101
  iface.launch(debug=True)
 
27
  labels = dataset.features['ner_tags'].feature.names
28
  id2label = {v: k for v, k in enumerate(labels)}
29
 
30
+ #Need to get discrete colors for each labels
31
  label_ints = np.random.randint(0, len(ImageColor.colormap.items()), 61)
32
  label_color_pil = [k for k,_ in ImageColor.colormap.items()]
33
  label_color = [label_color_pil[i] for i in label_ints]
 
79
  return image
80
 
81
 
82
+ title = "Extracting Receipts: LayoutLMv3"
83
+ description = "Demo for Microsoft's LayoutLMv3, a Transformer for state-of-the-art document image understanding tasks. \
84
+ This particular model is fine-tuned from [LayoutLMv3](https://huggingface.co/microsoft/layoutlmv3-base) on Consolidated Receipt Dataset ([CORD] (https://github.com/clovaai/cord), a dataset of receipts. If you search the πŸ€— Hugging Face hub you will see other related models fine-tuned for other documents. This model is trained using fine-tuning to look for entities around menu items, subtotal, and total prices. To perform your own fine tuning, take a look at the [notebook by Niels](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/LayoutLMv3). \
85
+ To try it out, simply upload an image or use the example image below and click 'Submit'. Results will show up in a few seconds. To see the output bigger, right-click on it, select 'Open image in new tab', and use your browser's zoom feature. "
86
 
87
+ article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2204.08387' target='_blank'>LayoutLMv3: Multi-modal Pre-training for Visually-Rich Document Understanding</a> | <a href='https://github.com/microsoft/unilm' target='_blank'>Github Repo</a></p>"
88
+ examples =[['test0.jpeg'],['test1.jpeg'],['test2.jpeg']]
 
 
89
 
 
90
 
91
  iface = gr.Interface(fn=process_image,
92
+ inputs=gr.inputs.Image(type="pil"),
93
  outputs=gr.outputs.Image(type="pil", label="annotated image"),
94
  title=title,
95
  description=description,
96
  article=article,
97
  examples=examples)
 
98
  iface.launch(debug=True)