import os
os.system('pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cpu')
import gradio as gr
import numpy as np
from transformers import AutoModelForTokenClassification
from datasets.features import ClassLabel
from transformers import AutoProcessor
from datasets import Features, Sequence, ClassLabel, Value, Array2D, Array3D
import torch
from datasets import load_metric
from transformers import LayoutLMv3ForTokenClassification
from transformers.data.data_collator import default_data_collator
from transformers import AutoModelForTokenClassification
from datasets import load_dataset
from PIL import Image, ImageDraw, ImageFont
processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=True)
model = AutoModelForTokenClassification.from_pretrained("Theivaprakasham/layoutlmv3-finetuned-invoice")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Load image example
dataset = load_dataset("darentang/generated", split="test")
Image.open(dataset[2]["image_path"]).convert("RGB").save("example1.png")
Image.open(dataset[1]["image_path"]).convert("RGB").save("example2.png")
Image.open(dataset[0]["image_path"]).convert("RGB").save("example3.png")
# Define id2label, label2color
labels = dataset.features['ner_tags'].feature.names
id2label = {v: k for v, k in enumerate(labels)}
label2color = {
"B-ABN": 'blue',
"B-BILLER": 'blue',
"B-BILLER_ADDRESS": 'green',
"B-BILLER_POST_CODE": 'orange',
"B-DUE_DATE": "blue",
"B-GST": 'green',
"B-INVOICE_DATE": 'violet',
"B-INVOICE_NUMBER": 'orange',
"B-SUBTOTAL": 'green',
"B-TOTAL": 'blue',
"I-BILLER_ADDRESS": 'blue',
"O": 'orange'
}
def unnormalize_box(bbox, width, height):
return [
width * (box / 1000) for box in bbox
]
def iob_to_label(label):
return label
def process_image(image):
width, height = image.size
# Encode image
encoding = processor(image, truncation=True, padding="max_length", max_length=512, return_tensors="pt")
input_ids = encoding.input_ids.to(device)
attention_mask = encoding.attention_mask.to(device)
bbox = encoding.bbox[0].tolist()
bbox = torch.tensor(bbox, dtype=torch.long).unsqueeze(0).to(device)
# Inference
with torch.no_grad():
outputs = model(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask)
predicted_labels = outputs.logits.argmax(dim=2).squeeze().tolist()
# Extract content from boxes
extracted_content = {}
for idx, box in enumerate(bbox[0]):
predicted_label = id2label[predicted_labels[idx]]
box_width = np.array(box)[2] - np.array(box)[0]
box_height = np.array(box)[3] - np.array(box)[1]
normalized_box = [int(coord) for coord in unnormalize_box(box, width, height)]
extracted_content[predicted_label] = image.crop(normalized_box).copy()
# Draw predictions over the image
draw = ImageDraw.Draw(image)
font = ImageFont.load_default()
for prediction, box in zip(predicted_labels, bbox[0]):
predicted_label = iob_to_label(id2label[prediction])
box_width = np.array(box)[2] - np.array(box)[0]
box_height = np.array(box)[3] - np.array(box)[1]
normalized_box = [int(coord) for coord in unnormalize_box(box, width, height)]
draw.rectangle(normalized_box, outline=label2color[predicted_label])
draw.text((normalized_box[0] + 10, normalized_box[1] - 10), text=predicted_label, fill=label2color[predicted_label], font=font)
return image, extracted_content
title = "Invoice Information Extraction using LayoutLMv3 Model"
description = "This model uses Microsoft's LayoutLMv3 trained on an Invoice Dataset to predict information such as Biller Name, Biller Address, Biller Post Code, Due Date, GST, Invoice Date, Invoice Number, Subtotal, and Total. To use it, simply upload an image or use the example images below. The results will be shown with annotated images and extracted information."
article = "References
[1] Y. Xu et al., 'LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking.' 2022. [Paper Link](https://arxiv.org/abs/2204.08387)
[2] [LayoutLMv3 training and inference](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/LayoutLMv3)"
examples = [['example1.png'], ['example2.png'], ['example3.png']]
css = """.output_image, .input_image {height: 600px !important}"""
iface = gr.Interface(fn=process_image,
inputs=gr.inputs.Image(type="pil"),
outputs=[gr.outputs.Image(type="pil", label="Annotated Image"), gr.outputs.JSON(label="Extracted Content")],
title=title,
description=description,
article=article,
examples=examples,
css=css,
analytics_enabled=True,
enable_queue=True)
iface.launch(inline=False, share=False, debug=False)