ajimeno commited on
Commit
c12418b
1 Parent(s): ab57ba8

First commit

Browse files
README.md CHANGED
@@ -1,8 +1,8 @@
1
  ---
2
- title: Unstructured Invoices
3
- emoji: 🐢
4
- colorFrom: pink
5
- colorTo: green
6
  sdk: streamlit
7
  sdk_version: 1.17.0
8
  app_file: app.py
1
  ---
2
+ title: Invoices Parser
3
+ emoji:
4
+ colorFrom: purple
5
+ colorTo: red
6
  sdk: streamlit
7
  sdk_version: 1.17.0
8
  app_file: app.py
app.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import streamlit as st
3
+ import os
4
+
5
+ from PIL import Image
6
+ from io import BytesIO
7
+ from transformers import VisionEncoderDecoderModel, VisionEncoderDecoderConfig , DonutProcessor
8
+
9
+ task_prompt = "<s_unstructured-invoices>"
10
+
11
+ def run_prediction(sample):
12
+ global pretrained_model, processor, task_prompt
13
+ if isinstance(sample, dict):
14
+ # prepare inputs
15
+ pixel_values = torch.tensor(sample["pixel_values"]).unsqueeze(0)
16
+ else: # sample is an image
17
+ # prepare encoder inputs
18
+ pixel_values = processor(sample, return_tensors="pt").pixel_values
19
+
20
+ decoder_input_ids = processor.tokenizer(task_prompt, add_special_tokens=False, return_tensors="pt").input_ids
21
+
22
+ outputs = pretrained_model.generate(
23
+ pixel_values.to(device),
24
+ decoder_input_ids=decoder_input_ids.to(device)
25
+ )
26
+
27
+ # process output
28
+ prediction = processor.token2json(processor.batch_decode(outputs)[0])
29
+
30
+ # load reference target
31
+ if isinstance(sample, dict):
32
+ target = processor.token2json(sample["target_sequence"])
33
+ else:
34
+ target = "<not_provided>"
35
+
36
+ return prediction, target
37
+
38
+
39
+ logo = Image.open("./img/rsz_unstructured_logo.png")
40
+ st.image(logo)
41
+
42
+ st.markdown('''
43
+ ### Invoice Parser
44
+ This is an OCR-free Document Understanding Transformer. It was fine-tuned with 1000 invoice images -> RVL-CDIP dataset.
45
+ The original implementation can be found on [here](https://github.com/clovaai/donut).
46
+
47
+ At [Unstructured.io](https://github.com/Unstructured-IO/unstructured) we are on a mission to build custom preprocessing pipelines for labeling, training, or production ML-ready pipelines.
48
+ Come and join us in our public repos and contribute! Each of your contributions and feedback holds great value and is very significant to the community.
49
+ ''')
50
+
51
+ image_upload = None
52
+ photo = None
53
+ with st.sidebar:
54
+ # file upload
55
+ uploaded_file = st.file_uploader("Upload an invoice")
56
+ if uploaded_file is not None:
57
+ # To read file as bytes:
58
+ image_bytes_data = uploaded_file.getvalue()
59
+ image_upload = Image.open(BytesIO(image_bytes_data)) #.frombytes('RGBA', (128,128), image_bytes_data, 'raw')
60
+ # st.write(bytes_data)
61
+
62
+ col1, col2 = st.columns(2)
63
+
64
+ if image_upload:
65
+ image = image_upload
66
+ else:
67
+ image = Image.open(f"./img/4fabfaab-1299.png")
68
+
69
+ with col1:
70
+ st.image(image, caption='Your target invoice')
71
+
72
+ with st.spinner(f'baking the invoice ...'):
73
+ processor = DonutProcessor.from_pretrained("unstructuredio/donut-invoices", max_length=1200, use_auth_token=os.environ['TOKEN'])
74
+ pretrained_model = VisionEncoderDecoderModel.from_pretrained("unstructuredio/donut-invoices", max_length=1200, use_auth_token=os.environ['TOKEN'])
75
+
76
+ device = "cuda" if torch.cuda.is_available() else "cpu"
77
+ pretrained_model.to(device)
78
+
79
+ with col2:
80
+ st.info(f'Parsing invoice')
81
+ parsed_info, _ = run_prediction(image.convert("RGB"))
82
+ st.text(f'\nInvoice Summary:')
83
+ st.json(parsed_info)
img/4fabfaab-1299.png ADDED
img/rsz_unstructured_logo.png ADDED
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
1
+ opencv-python==4.7.0.68
2
+ streamlit
3
+ torch==1.13.1
4
+ transformers==4.26.0