Update README.md
Browse files
README.md
CHANGED
@@ -1,50 +1,64 @@
|
|
1 |
---
|
2 |
license: apache-2.0
|
3 |
---
|
4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
```bash
|
6 |
pip install git+https://github.com/tic-top/transformers.git
|
7 |
```
|
8 |
-
|
9 |
```python
|
10 |
-
from transformers import AutoModelForVision2Seq, AutoProcessor
|
11 |
from PIL import Image
|
|
|
12 |
import torch
|
13 |
-
|
|
|
|
|
14 |
repo = "kirp/kosmos2_5"
|
15 |
-
|
16 |
-
|
17 |
-
model = AutoModelForVision2Seq.from_pretrained(repo, device_map
|
18 |
-
# print(model)
|
19 |
-
# exit(0)
|
20 |
processor = AutoProcessor.from_pretrained(repo)
|
21 |
|
22 |
-
|
23 |
-
image = Image.open(
|
24 |
-
prompt = "<ocr>"
|
25 |
-
# prompt = "<md>"
|
26 |
-
inputs = processor(text=prompt, images=image, return_tensors="pt", max_patches=4096)
|
27 |
|
28 |
-
|
29 |
height, width = inputs.pop("height"), inputs.pop("width")
|
|
|
30 |
scale_height = raw_height / height
|
31 |
scale_width = raw_width / width
|
32 |
|
33 |
inputs = {k: v.to(device) if v is not None else None for k, v in inputs.items()}
|
34 |
inputs["flattened_patches"] = inputs["flattened_patches"].to(dtype)
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
)
|
47 |
-
print(y)
|
48 |
pattern = r"<bbox><x_\d+><y_\d+><x_\d+><y_\d+></bbox>"
|
49 |
bboxs_raw = re.findall(pattern, y)
|
50 |
lines = re.split(pattern, y)[1:]
|
@@ -53,24 +67,54 @@ def postprocess(y, scale_height, scale_width, result_path=None):
|
|
53 |
info = ""
|
54 |
for i in range(len(lines)):
|
55 |
box = bboxs[i]
|
56 |
-
# do we need to convert the size of the box?
|
57 |
-
# maybe yes
|
58 |
x0, y0, x1, y1 = box
|
59 |
-
# maybe modify the order
|
60 |
if not (x0 >= x1 or y0 >= y1):
|
61 |
x0 = int(x0 * scale_width)
|
62 |
y0 = int(y0 * scale_height)
|
63 |
x1 = int(x1 * scale_width)
|
64 |
y1 = int(y1 * scale_height)
|
65 |
info += f"{x0},{y0},{x1},{y0},{x1},{y1},{x0},{y1},{lines[i]}"
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
license: apache-2.0
|
3 |
---
|
4 |
+
---
|
5 |
+
language: en
|
6 |
+
license: mit
|
7 |
+
---
|
8 |
+
|
9 |
+
# Kosmos-2.5
|
10 |
+
|
11 |
+
[Microsoft Document AI](https://www.microsoft.com/en-us/research/project/document-ai/) | [GitHub](https://github.com/microsoft/unilm/tree/master/kosmos-2.5)
|
12 |
+
|
13 |
+
## Model description
|
14 |
+
|
15 |
+
Kosmos-2.5 is a multimodal literate model for machine reading of text-intensive images. Pre-trained on large-scale text-intensive images, Kosmos-2.5 excels in two distinct yet cooperative transcription tasks: (1) generating spatially-aware text blocks, where each block of text is assigned its spatial coordinates within the image, and (2) producing structured text output that captures styles and structures into the markdown format. This unified multimodal literate capability is achieved through a shared decoder-only auto-regressive Transformer architecture, task-specific prompts, and flexible text representations. We evaluate Kosmos-2.5 on end-to-end document-level text recognition and image-to-markdown text generation. Furthermore, the model can be readily adapted for any text-intensive image understanding task with different prompts through supervised fine-tuning, making it a general-purpose tool for real-world applications involving text-rich images. This work also paves the way for the future scaling of multimodal large language models.
|
16 |
+
|
17 |
+
[Kosmos-2.5: A Multimodal Literate Model](https://arxiv.org/abs/2309.11419)
|
18 |
+
|
19 |
+
## NOTE:
|
20 |
+
Since this is a generative model, there is a risk of **hallucination** during the generation process, and it **CAN NOT** guarantee the accuracy of all OCR/Markdown results in the images.
|
21 |
+
|
22 |
+
## Use with transformers:
|
23 |
```bash
|
24 |
pip install git+https://github.com/tic-top/transformers.git
|
25 |
```
|
|
|
26 |
```python
|
|
|
27 |
from PIL import Image
|
28 |
+
import requests
|
29 |
import torch
|
30 |
+
from transformers import AutoProcessor, AutoModelForVision2Seq
|
31 |
+
import re
|
32 |
+
|
33 |
repo = "kirp/kosmos2_5"
|
34 |
+
device = "cuda:0"
|
35 |
+
dtype = torch.bfloat16
|
36 |
+
model = AutoModelForVision2Seq.from_pretrained(repo, device_map=device, torch_dtype=dtype)
|
|
|
|
|
37 |
processor = AutoProcessor.from_pretrained(repo)
|
38 |
|
39 |
+
url = "https://huggingface.co/kirp/kosmos2_5/resolve/main/receipt_00008.png"
|
40 |
+
image = Image.open(requests.get(url, stream=True).raw)
|
41 |
+
prompt = "<ocr>" # <md>
|
|
|
|
|
42 |
|
43 |
+
inputs = processor(text=prompt, images=image, return_tensors="pt")
|
44 |
height, width = inputs.pop("height"), inputs.pop("width")
|
45 |
+
raw_width, raw_height = image.size
|
46 |
scale_height = raw_height / height
|
47 |
scale_width = raw_width / width
|
48 |
|
49 |
inputs = {k: v.to(device) if v is not None else None for k, v in inputs.items()}
|
50 |
inputs["flattened_patches"] = inputs["flattened_patches"].to(dtype)
|
51 |
+
|
52 |
+
generated_ids = model.generate(
|
53 |
+
**inputs,
|
54 |
+
max_new_tokens=1024,
|
55 |
+
)
|
56 |
+
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)
|
57 |
+
|
58 |
+
def postprocess(y, scale_height, scale_width):
|
59 |
+
y = y.replace(prompt, "")
|
60 |
+
if "<md>" in prompt:
|
61 |
+
return y
|
|
|
|
|
62 |
pattern = r"<bbox><x_\d+><y_\d+><x_\d+><y_\d+></bbox>"
|
63 |
bboxs_raw = re.findall(pattern, y)
|
64 |
lines = re.split(pattern, y)[1:]
|
|
|
67 |
info = ""
|
68 |
for i in range(len(lines)):
|
69 |
box = bboxs[i]
|
|
|
|
|
70 |
x0, y0, x1, y1 = box
|
|
|
71 |
if not (x0 >= x1 or y0 >= y1):
|
72 |
x0 = int(x0 * scale_width)
|
73 |
y0 = int(y0 * scale_height)
|
74 |
x1 = int(x1 * scale_width)
|
75 |
y1 = int(y1 * scale_height)
|
76 |
info += f"{x0},{y0},{x1},{y0},{x1},{y1},{x0},{y1},{lines[i]}"
|
77 |
+
return info
|
78 |
+
|
79 |
+
output_text = postprocess(generated_text[0], scale_height, scale_width)
|
80 |
+
print(output_text)
|
81 |
+
```
|
82 |
+
```text
|
83 |
+
55,595,71,595,71,629,55,629,1
|
84 |
+
82,595,481,595,481,635,82,635,[REG] BLACK SAKURA
|
85 |
+
716,590,841,590,841,629,716,629,45,455
|
86 |
+
55,637,71,637,71,672,55,672,1
|
87 |
+
82,637,486,637,486,675,82,675,COOKIE DOH SAUCES
|
88 |
+
818,632,843,632,843,668,818,668,0
|
89 |
+
51,683,71,683,71,719,51,719,1
|
90 |
+
82,683,371,683,371,719,82,719,NATA DE COCO
|
91 |
+
820,677,845,677,845,713,820,713,0
|
92 |
+
32,770,851,770,851,811,32,811,Sub Total 45,455
|
93 |
+
28,811,853,811,853,858,28,858,PB1 (10%) 4,545
|
94 |
+
28,857,855,857,855,905,28,905,Rounding 0
|
95 |
+
24,905,858,905,858,956,24,956,Total 50,000
|
96 |
+
17,1096,868,1096,868,1150,17,1150,Card Payment 50,000
|
97 |
+
```
|
98 |
+
|
99 |
+
|
100 |
+
|
101 |
+
## Citation
|
102 |
+
|
103 |
+
If you find Kosmos-2.5 useful in your research, please cite the following paper:
|
104 |
+
|
105 |
+
```
|
106 |
+
@article{lv2023kosmos,
|
107 |
+
title={Kosmos-2.5: A multimodal literate model},
|
108 |
+
author={Lv, Tengchao and Huang, Yupan and Chen, Jingye and Cui, Lei and Ma, Shuming and Chang, Yaoyao and Huang, Shaohan and Wang, Wenhui and Dong, Li and Luo, Weiyao and others},
|
109 |
+
journal={arXiv preprint arXiv:2309.11419},
|
110 |
+
year={2023}
|
111 |
+
}
|
112 |
+
```
|
113 |
+
|
114 |
+
## License
|
115 |
+
The content of this project itself is licensed under the [MIT](https://github.com/microsoft/unilm/blob/master/kosmos-2.5/LICENSE)
|
116 |
+
|
117 |
+
[Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct)
|
118 |
+
|
119 |
+
|
120 |
+
|