Theivaprakasham commited on
Commit
0ba962c
β€’
1 Parent(s): db47725

App addded

Browse files
Files changed (4) hide show
  1. README.md +1 -1
  2. app.py +96 -0
  3. packages.txt +6 -0
  4. requirements.txt +4 -0
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- title: Layoutlmv2_sroie
3
  emoji: πŸ”₯
4
  colorFrom: indigo
5
  colorTo: blue
 
1
  ---
2
+ title: Bill Information Extraction
3
  emoji: πŸ”₯
4
  colorFrom: indigo
5
  colorTo: blue
app.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ os.system('pip install torch==1.8.0+cpu torchvision==0.9.0+cpu -f https://download.pytorch.org/whl/torch_stable.html')
4
+ os.system('pip install -q detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cpu/torch1.8/index.html')
5
+
6
+
7
+ import gradio as gr
8
+ import numpy as np
9
+ from transformers import LayoutLMv2Processor, LayoutLMv2ForTokenClassification
10
+ from datasets import load_dataset
11
+ from PIL import Image, ImageDraw, ImageFont
12
+
13
+ processor = LayoutLMv2Processor.from_pretrained("microsoft/layoutlmv2-base-uncased")
14
+ model = LayoutLMv2ForTokenClassification.from_pretrained("Theivaprakasham/layoutlmv2-finetuned-sroie")
15
+
16
+ # load image example
17
+ dataset = load_dataset("darentang/sroie", split="test")
18
+ Image.open(dataset[50]["image_path"]).convert("RGB").save("example1.png")
19
+ Image.open(dataset[14]["image_path"]).convert("RGB").save("example2.png")
20
+ Image.open(dataset[20]["image_path"]).convert("RGB").save("example3.png")
21
+ # define id2label, label2color
22
+ labels = dataset.features['ner_tags'].feature.names
23
+ id2label = {v: k for v, k in enumerate(labels)}
24
+ label2color = {'B-ADDRESS': 'blue',
25
+ 'B-COMPANY': 'green',
26
+ 'B-DATE': 'red',
27
+ 'B-TOTAL': 'red',
28
+ 'I-ADDRESS': "blue",
29
+ 'I-COMPANY': 'green',
30
+ 'I-DATE': 'red',
31
+ 'I-TOTAL': 'red',
32
+ 'O': 'green'}
33
+
34
+ label2color = dict((k.lower(), v.lower()) for k,v in label2color.items())
35
+
36
+ def unnormalize_box(bbox, width, height):
37
+ return [
38
+ width * (bbox[0] / 1000),
39
+ height * (bbox[1] / 1000),
40
+ width * (bbox[2] / 1000),
41
+ height * (bbox[3] / 1000),
42
+ ]
43
+
44
+ def iob_to_label(label):
45
+ return label
46
+
47
+ def process_image(image):
48
+ width, height = image.size
49
+
50
+ # encode
51
+ encoding = processor(image, truncation=True, return_offsets_mapping=True, return_tensors="pt")
52
+ offset_mapping = encoding.pop('offset_mapping')
53
+
54
+ # forward pass
55
+ outputs = model(**encoding)
56
+
57
+ # get predictions
58
+ predictions = outputs.logits.argmax(-1).squeeze().tolist()
59
+ token_boxes = encoding.bbox.squeeze().tolist()
60
+
61
+ # only keep non-subword predictions
62
+ is_subword = np.array(offset_mapping.squeeze().tolist())[:,0] != 0
63
+ true_predictions = [id2label[pred] for idx, pred in enumerate(predictions) if not is_subword[idx]]
64
+ true_boxes = [unnormalize_box(box, width, height) for idx, box in enumerate(token_boxes) if not is_subword[idx]]
65
+
66
+ # draw predictions over the image
67
+ draw = ImageDraw.Draw(image)
68
+ font = ImageFont.load_default()
69
+ for prediction, box in zip(true_predictions, true_boxes):
70
+ predicted_label = iob_to_label(prediction).lower()
71
+ draw.rectangle(box, outline=label2color[predicted_label])
72
+ draw.text((box[0]+10, box[1]-10), text=predicted_label, fill=label2color[predicted_label], font=font)
73
+
74
+ return image
75
+
76
+
77
+ title = "Bill Information extraction using LayoutLMv2 model"
78
+ description = "Bill Information Extraction - We use Microsoft's LayoutLMv2 trained on SROIE Dataset to predict the Company Name, Address, Date, and Total Amount from Bills. To use it, simply upload an image or use the example image below. Results will show up in a few seconds."
79
+
80
+ article="<b>References</b><br>[1] Y. Xu et al., β€œLayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding.” 2022. <a href='https://arxiv.org/abs/2012.14740'>Paper Link</a><br>[2] <a href='https://github.com/NielsRogge/Transformers-Tutorials/tree/master/LayoutLMv2/FUNSD'>LayoutLMv2 training and inference</a>"
81
+
82
+ examples =[['example1.png'],['example2.png'],['example3.png']]
83
+
84
+
85
+ css = """.output_image, .input_image {height: 600px !important}"""
86
+
87
+ iface = gr.Interface(fn=process_image,
88
+ inputs=gr.inputs.Image(type="pil"),
89
+ outputs=gr.outputs.Image(type="pil", label="annotated image"),
90
+ title=title,
91
+ description=description,
92
+ article=article,
93
+ examples=examples,
94
+ css=css,
95
+ analytics_enabled = True, enable_queue=True)
96
+ iface.launch(inline=False, share=True, debug=False)
packages.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ ffmpeg
2
+ libsm6
3
+ libxext6 -y
4
+ libgl1
5
+ -y libgl1-mesa-glx
6
+ tesseract-ocr
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ git+https://github.com/huggingface/transformers.git
2
+ pyyaml==5.1
3
+ pytesseract==0.3.9
4
+ git+https://github.com/huggingface/datasets#egg=datasets