acverma commited on
Commit
7a13891
1 Parent(s): 643c376
Files changed (1) hide show
  1. Key_Information_Extraction +198 -0
Key_Information_Extraction ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ os.system('pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cpu')
3
+
4
+ import gradio as gr
5
+
6
+ import numpy as np
7
+ import tensorflow as tf
8
+
9
+ import torch
10
+ import json
11
+
12
+ from datasets.features import ClassLabel
13
+ from transformers import AutoProcessor
14
+
15
+ from datasets import Features, Sequence, ClassLabel, Value, Array2D, Array3D
16
+ from datasets import load_dataset # this dataset uses the new Image feature :)
17
+
18
+ from transformers import LayoutLMv3ForTokenClassification
19
+ from transformers.data.data_collator import default_data_collator
20
+ from transformers import AutoModelForTokenClassification
21
+
22
+ import cv2
23
+ from PIL import Image, ImageDraw, ImageFont
24
+
25
+ #setting up the Huggingface env
26
+ # pip install -q git+https://github.com/huggingface/transformers.git
27
+ # !pip install h5py
28
+ # It's useful for evaluation metrics such as F1 on sequence labeling tasks
29
+ # !pip install -q datasets seqeval
30
+
31
+ # this dataset uses the new Image feature :)
32
+ dataset = load_dataset("nielsr/funsd-layoutlmv3")
33
+ #dataset = load_dataset("G:\\BITS - MTECH\\Sem -4\\Final Report\\code\dataset")
34
+
35
+ Image.open(dataset[2]["image_path"]).convert("RGB").save("example1.png")
36
+ Image.open(dataset[1]["image_path"]).convert("RGB").save("example2.png")
37
+ Image.open(dataset[0]["image_path"]).convert("RGB").save("example3.png")
38
+
39
+ example = dataset["test"][0]
40
+
41
+ words, boxes, ner_tags = example["tokens"], example["bboxes"], example["ner_tags"]
42
+
43
+ features = dataset["test"].features
44
+
45
+ column_names = dataset["test"].column_names
46
+ image_column_name = "image"
47
+ text_column_name = "tokens"
48
+ boxes_column_name = "bboxes"
49
+ label_column_name = "ner_tags"
50
+
51
+ def get_label_list(labels):
52
+ unique_labels = set()
53
+ for label in labels:
54
+ unique_labels = unique_labels | set(label)
55
+ label_list = list(unique_labels)
56
+ label_list.sort()
57
+ return label_list
58
+
59
+ if isinstance(features[label_column_name].feature, ClassLabel):
60
+ label_list = features[label_column_name].feature.names
61
+ # No need to convert the labels since they are already ints.
62
+ id2label = {k: v for k,v in enumerate(label_list)}
63
+ label2id = {v: k for k,v in enumerate(label_list)}
64
+ else:
65
+ label_list = get_label_list(dataset["train"][label_column_name])
66
+ id2label = {k: v for k,v in enumerate(label_list)}
67
+ label2id = {v: k for k,v in enumerate(label_list)}
68
+ num_labels = len(label_list)
69
+
70
+ label2color = {'question':'blue', 'answer':'green', 'header':'orange', 'other':'violet'}
71
+
72
+
73
+ def prepare_examples(examples):
74
+ images = examples[image_column_name]
75
+ words = examples[text_column_name]
76
+ boxes = examples[boxes_column_name]
77
+ word_labels = examples[label_column_name]
78
+
79
+ encoding = processor(images, words, boxes=boxes, word_labels=word_labels,
80
+ truncation=True, padding="max_length")
81
+
82
+ return encoding
83
+
84
+ processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False)
85
+ #model = AutoModelForTokenClassification.from_pretrained("microsoft/layoutlmv3-base")
86
+
87
+ model = LayoutLMv3ForTokenClassification.from_pretrained("microsoft/layoutlmv3-base",
88
+ id2label=id2label,
89
+ label2id=label2id)
90
+
91
+ # we need to define custom features for `set_format` (used later on) to work properly
92
+ features = Features({
93
+ 'pixel_values': Array3D(dtype="float32", shape=(3, 224, 224)),
94
+ 'input_ids': Sequence(feature=Value(dtype='int64')),
95
+ 'attention_mask': Sequence(Value(dtype='int64')),
96
+ 'bbox': Array2D(dtype="int64", shape=(512, 4)),
97
+ 'labels': Sequence(feature=Value(dtype='int64')),
98
+ })
99
+
100
+ # train_dataset = dataset["train"].map(
101
+ # prepare_examples,
102
+ # batched=True,
103
+ # remove_columns=column_names,
104
+ # features=features,
105
+ # )
106
+ eval_dataset = dataset["test"].map(
107
+ prepare_examples,
108
+ batched=True,
109
+ remove_columns=column_names,
110
+ features=features,
111
+ )
112
+
113
+ def unnormalize_box(bbox, width, height):
114
+ return [
115
+ width * (bbox[0] / 1000),
116
+ height * (bbox[1] / 1000),
117
+ width * (bbox[2] / 1000),
118
+ height * (bbox[3] / 1000),
119
+ ]
120
+
121
+ def process_image(image):
122
+
123
+ print(type(image))
124
+ width, height = image.size
125
+
126
+ # encode
127
+ #encoding = processor(image, truncation=True, return_offsets_mapping=True, return_tensors="pt")
128
+ #offset_mapping = encoding.pop('offset_mapping')
129
+
130
+ image = example["image"]
131
+ words = example["tokens"]
132
+ boxes = example["bboxes"]
133
+ word_labels = example["ner_tags"]
134
+
135
+ encoding = processor(image, words, truncation=True,boxes=boxes, word_labels=word_labels,return_offsets_mapping=True, return_tensors="pt")
136
+ offset_mapping = encoding.pop('offset_mapping')
137
+
138
+ for k,v in encoding.items():
139
+ print(k,v.shape)
140
+
141
+ # forward pass
142
+ with torch.no_grad():
143
+ outputs = model(**encoding)
144
+
145
+ # get predictions
146
+
147
+ # We take the highest score for each token, using argmax.
148
+ # This serves as the predicted label for each token.
149
+ logits = outputs.logits
150
+ #logits.shape
151
+ predictions = logits.argmax(-1).squeeze().tolist()
152
+
153
+ labels = encoding.labels.squeeze().tolist()
154
+
155
+ token_boxes = encoding.bbox.squeeze().tolist()
156
+ width, height = image.size
157
+
158
+ true_predictions = [model.config.id2label[pred] for pred, label in zip(predictions, labels) if label != - 100]
159
+ true_labels = [model.config.id2label[label] for prediction, label in zip(predictions, labels) if label != -100]
160
+ true_boxes = [unnormalize_box(box, width, height) for box, label in zip(token_boxes, labels) if label != -100]
161
+
162
+
163
+ # only keep non-subword predictions
164
+ is_subword = np.array(offset_mapping.squeeze().tolist())[:,0] != 0
165
+ true_predictions = [id2label[pred] for idx, pred in enumerate(predictions) if not is_subword[idx]]
166
+ true_boxes = [unnormalize_box(box, width, height) for idx, box in enumerate(token_boxes) if not is_subword[idx]]
167
+
168
+ # draw predictions over the image
169
+ draw = ImageDraw.Draw(image)
170
+ font = ImageFont.load_default()
171
+ for prediction, box in zip(true_predictions, true_boxes):
172
+ predicted_label = id2label(prediction)
173
+ draw.rectangle(box, outline=label2color[predicted_label])
174
+ draw.text((box[0]+10, box[1]-10), text=predicted_label, fill=label2color[predicted_label], font=font)
175
+
176
+ return image
177
+
178
+ title = "DocumentAI - Extraction using LayoutLMv3 model"
179
+ description = "Extraction of Form or Invoice Extraction - We use Microsoft's LayoutLMv3 trained on Invoice Dataset to predict the Biller Name, Biller Address, Biller post_code, Due_date, GST, Invoice_date, Invoice_number, Subtotal and Total. To use it, simply upload an image or use the example image below. Results will show up in a few seconds."
180
+
181
+ article="<b>References</b><br>[1] Y. Xu et al., “LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking.” 2022. <a href='https://arxiv.org/abs/2204.08387'>Paper Link</a><br>[2] <a href='https://github.com/NielsRogge/Transformers-Tutorials/tree/master/LayoutLMv3'>LayoutLMv3 training and inference</a>"
182
+
183
+ examples =[['example1.png'],['example2.png'],['example3.png']]
184
+
185
+ css = """.output_image, .input_image {height: 600px !important}"""
186
+
187
+ iface = gr.Interface(fn=process_image,
188
+ inputs=gr.inputs.Image(type="pil"),
189
+ outputs=gr.outputs.Image(type="pil", label="annotated image"),
190
+ title=title,
191
+ description=description,
192
+ article=article,
193
+ examples=examples,
194
+ css=css,
195
+ analytics_enabled = True, enable_queue=True)
196
+
197
+ iface.launch(inline=False, share=False, debug=False)
198
+