Joyantac33 commited on
Commit
bcbb9bc
1 Parent(s): 4e3eecd

Upload 8 files

Browse files
.gitattributes CHANGED
@@ -32,3 +32,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ sample_image_cord_test_receipt_00004.png filter=lfs diff=lfs merge=lfs -text
000.jpg ADDED
README.md CHANGED
@@ -1,10 +1,10 @@
1
  ---
2
- title: Donut Base Finetuned Docvqa
3
- emoji: 🐠
4
- colorFrom: pink
5
- colorTo: purple
6
  sdk: gradio
7
- sdk_version: 3.34.0
8
  app_file: app.py
9
  pinned: false
10
  ---
 
1
  ---
2
+ title: Donut Base Finetuned Cord V2
3
+ emoji: 🍩
4
+ colorFrom: blue
5
+ colorTo: gray
6
  sdk: gradio
7
+ sdk_version: 3.0.26
8
  app_file: app.py
9
  pinned: false
10
  ---
app.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Donut
3
+ Copyright (c) 2022-present NAVER Corp.
4
+ MIT License
5
+
6
+ https://github.com/clovaai/donut
7
+ """
8
+ import gradio as gr
9
+ import torch
10
+ from PIL import Image
11
+
12
+ from donut import DonutModel
13
+
14
+
15
+ def _init_weights(DonutModel, module):
16
+ pass
17
+
18
+ def demo_process(input_img):
19
+ global pretrained_model, task_prompt, task_name
20
+ # input_img = Image.fromarray(input_img)
21
+ output = pretrained_model.inference(image=input_img, prompt=task_prompt)["predictions"][0]
22
+ return output
23
+
24
+ task_prompt = f"<s_cord-v2>"
25
+
26
+ image = Image.open("./sample_image_cord_test_receipt_00004.png")
27
+ image.save("cord_sample_receipt1.png")
28
+ image = Image.open("./sample_image_cord_test_receipt_00012.png")
29
+ image.save("cord_sample_receipt2.png")
30
+
31
+ DonutModel._init_weights= _init_weights
32
+
33
+ pretrained_model = DonutModel.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa",ignore_mismatched_sizes=True)
34
+ pretrained_model.eval()
35
+
36
+ demo = gr.Interface(
37
+ fn=demo_process,
38
+ inputs= gr.inputs.Image(type="pil"),
39
+ outputs="json",
40
+ title=f"Donut 🍩 demonstration for `cord-v2` task",
41
+ description="""This model is trained with 800 Indonesian receipt images of CORD dataset. <br>
42
+ Demonstrations for other types of documents/tasks are available at https://github.com/clovaai/donut <br>
43
+ More CORD receipt images are available at https://huggingface.co/datasets/naver-clova-ix/cord-v2
44
+
45
+ More details are available at:
46
+ - Paper: https://arxiv.org/abs/2111.15664
47
+ - GitHub: https://github.com/clovaai/donut""",
48
+ examples=[["cord_sample_receipt1.png"], ["cord_sample_receipt2.png"]],
49
+ cache_examples=False,
50
+ )
51
+
52
+ demo.launch()
data_sample.jpg ADDED
gitattributes.txt ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ sample_image_cord_test_receipt_00004.png filter=lfs diff=lfs merge=lfs -text
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ torch
2
+ donut-python
3
+ gradio
4
+ transformers==4.24.0
5
+ timm==0.6.13
sample_image_cord_test_receipt_00004.png ADDED

Git LFS Details

  • SHA256: 8f3eee7068c96e86cdb2e4b5c53085cb5e1439462edd55c373548cb1962801ad
  • Pointer size: 132 Bytes
  • Size of remote file: 1.64 MB
sample_image_cord_test_receipt_00012.png ADDED