File size: 4,895 Bytes
d57ae7c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ce0f679
d57ae7c
 
 
 
 
9373e2d
2726e12
 
d57ae7c
 
 
da3b44f
33e48a5
82536fc
ce0f679
da3b44f
 
 
 
 
d57ae7c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f0ccd65
4d457ef
 
d57ae7c
 
 
 
33e48a5
f87e896
607f3cd
f87e896
d57ae7c
33e48a5
 
d57ae7c
af1ca19
d57ae7c
33e48a5
e33d602
d57ae7c
 
 
af1ca19
539c608
d57ae7c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
import os
os.system('pip install pyyaml==5.1')
# workaround: install old version of pytorch since detectron2 hasn't released packages for pytorch 1.9 (issue: https://github.com/facebookresearch/detectron2/issues/3158)
os.system('pip install torch==1.8.0+cu101 torchvision==0.9.0+cu101 -f https://download.pytorch.org/whl/torch_stable.html')

# install detectron2 that matches pytorch 1.8
# See https://detectron2.readthedocs.io/tutorials/install.html for instructions
os.system('pip install -q detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu101/torch1.8/index.html')

## install PyTesseract
os.system('pip install -q pytesseract')

import gradio as gr
import numpy as np
from transformers import LayoutLMv3Processor, LayoutLMv3ForTokenClassification
from datasets import load_dataset
from PIL import Image, ImageDraw, ImageFont, ImageColor

processor = LayoutLMv3Processor.from_pretrained("microsoft/layoutlmv3-base")
model = LayoutLMv3ForTokenClassification.from_pretrained("nielsr/layoutlmv3-finetuned-cord")

# load image example
dataset = load_dataset("nielsr/cord-layoutlmv3", split="test")
#image = Image.open(dataset[0]["image_path"]).convert("RGB")
image = Image.open("./test0.jpeg")
# define id2label, label2color
labels = dataset.features['ner_tags'].feature.names
id2label = {v: k for v, k in enumerate(labels)}

#Need to get discrete colors for each labels
label_ints = np.random.randint(0, len(ImageColor.colormap.items()), 61)
label_color_pil = [k for k,_ in ImageColor.colormap.items()]
label_color = [label_color_pil[i] for i in label_ints]
label2color = {}
for k,v in id2label.items():
    label2color[v[2:]]=label_color[k]

def unnormalize_box(bbox, width, height):
     return [
         width * (bbox[0] / 1000),
         height * (bbox[1] / 1000),
         width * (bbox[2] / 1000),
         height * (bbox[3] / 1000),
     ]

def iob_to_label(label):
    label = label[2:]
    if not label:
      return 'other'
    return label

def process_image(image):
    width, height = image.size

    # encode
    encoding = processor(image, truncation=True, return_offsets_mapping=True, return_tensors="pt")
    offset_mapping = encoding.pop('offset_mapping')

    # forward pass
    outputs = model(**encoding)

    # get predictions
    predictions = outputs.logits.argmax(-1).squeeze().tolist()
    token_boxes = encoding.bbox.squeeze().tolist()

    # only keep non-subword predictions
    is_subword = np.array(offset_mapping.squeeze().tolist())[:,0] != 0
    true_predictions = [id2label[pred] for idx, pred in enumerate(predictions) if not is_subword[idx]]
    true_boxes = [unnormalize_box(box, width, height) for idx, box in enumerate(token_boxes) if not is_subword[idx]]

    # draw predictions over the image
    draw = ImageDraw.Draw(image)
    font = ImageFont.load_default()
    for prediction, box in zip(true_predictions, true_boxes):
        predicted_label = iob_to_label(prediction) #.lower()
        draw.rectangle(box, outline=label2color[predicted_label])
        draw.text((box[0]+10, box[1]-10), text=predicted_label, fill=label2color[predicted_label], font=font)
    
    return image


title = "Extracting Receipts: LayoutLMv3"
description = """<p> Demo for Microsoft's LayoutLMv3, a Transformer for state-of-the-art document image understanding tasks. </p>  <p> This particular model is fine-tuned from 
<a href="https://github.com/clovaai/cord">CORD</a> on the Consolidated Receipt Dataset, a dataset of receipts. If you search the πŸ€— Hugging Face hub you will see other related models fine-tuned for other documents. This model is trained using fine-tuning to look for entities around menu items, subtotal, and total prices. To perform your own fine-tuning, take a look at the 
<a href="https://github.com/NielsRogge/Transformers-Tutorials/tree/master/LayoutLMv3">notebook by Niels</a>. </p>  <p> To try it out, simply upload an image or use the example image below and click 'Submit'. Results will show up in a few seconds. To see the output bigger, right-click on it, select 'Open image in new tab', and use your browser's zoom feature. </p>"""

article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2204.08387' target='_blank'>LayoutLMv3: Multi-modal Pre-training for Visually-Rich Document Understanding</a> | <a href='https://github.com/microsoft/unilm' target='_blank'>Github Repo</a></p>"
examples =[['test0.jpeg'],['test1.jpeg'],['test2.jpeg']]

css = "{ text-decoration: none; border-bottom:1px solid red; }"
iface = gr.Interface(fn=process_image, 
                     inputs=gr.inputs.Image(type="pil"), 
                     outputs=gr.outputs.Image(type="pil", label="annotated image"),
                     title=title,
                     description=description,
                     article=article,
                     css = css,
                     examples=examples)
iface.launch(debug=True)