anaghasavit commited on
Commit
72791ee
1 Parent(s): 57f758a

Upload 9 files

Browse files
README.md ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - trocr
4
+ - image-to-text
5
+ widget:
6
+ - src: https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg
7
+ example_title: Note 1
8
+ - src: https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcSoolxi9yWGAT5SLZShv8vVd0bz47UWRzQC19fDTeE8GmGv_Rn-PCF1pP1rrUx8kOjA4gg&usqp=CAU
9
+ example_title: Note 2
10
+ - src: https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcRNYtTuSBpZPV_nkBYPMFwVVD9asZOPgHww4epu9EqWgDmXW--sE2o8og40ZfDGo87j5w&usqp=CAU
11
+ example_title: Note 3
12
+ ---
13
+
14
+ # TrOCR (base-sized model, fine-tuned on IAM)
15
+
16
+ TrOCR model fine-tuned on the [IAM dataset](https://fki.tic.heia-fr.ch/databases/iam-handwriting-database). It was introduced in the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Li et al. and first released in [this repository](https://github.com/microsoft/unilm/tree/master/trocr).
17
+
18
+ Disclaimer: The team releasing TrOCR did not write a model card for this model so this model card has been written by the Hugging Face team.
19
+
20
+ ## Model description
21
+
22
+ The TrOCR model is an encoder-decoder model, consisting of an image Transformer as encoder, and a text Transformer as decoder. The image encoder was initialized from the weights of BEiT, while the text decoder was initialized from the weights of RoBERTa.
23
+
24
+ Images are presented to the model as a sequence of fixed-size patches (resolution 16x16), which are linearly embedded. One also adds absolute position embeddings before feeding the sequence to the layers of the Transformer encoder. Next, the Transformer text decoder autoregressively generates tokens.
25
+
26
+ ## Intended uses & limitations
27
+
28
+ You can use the raw model for optical character recognition (OCR) on single text-line images. See the [model hub](https://huggingface.co/models?search=microsoft/trocr) to look for fine-tuned versions on a task that interests you.
29
+
30
+ ### How to use
31
+
32
+ Here is how to use this model in PyTorch:
33
+
34
+ ```python
35
+ from transformers import TrOCRProcessor, VisionEncoderDecoderModel
36
+ from PIL import Image
37
+ import requests
38
+
39
+ # load image from the IAM database
40
+ url = 'https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg'
41
+ image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
42
+
43
+ processor = TrOCRProcessor.from_pretrained('microsoft/trocr-base-handwritten')
44
+ model = VisionEncoderDecoderModel.from_pretrained('microsoft/trocr-base-handwritten')
45
+ pixel_values = processor(images=image, return_tensors="pt").pixel_values
46
+
47
+ generated_ids = model.generate(pixel_values)
48
+ generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
49
+ ```
50
+
51
+ ### BibTeX entry and citation info
52
+
53
+ ```bibtex
54
+ @misc{li2021trocr,
55
+ title={TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models},
56
+ author={Minghao Li and Tengchao Lv and Lei Cui and Yijuan Lu and Dinei Florencio and Cha Zhang and Zhoujun Li and Furu Wei},
57
+ year={2021},
58
+ eprint={2109.10282},
59
+ archivePrefix={arXiv},
60
+ primaryClass={cs.CL}
61
+ }
62
+ ```
config.json ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "VisionEncoderDecoderModel"
4
+ ],
5
+ "decoder": {
6
+ "_name_or_path": "",
7
+ "activation_dropout": 0.0,
8
+ "activation_function": "gelu",
9
+ "add_cross_attention": true,
10
+ "architectures": null,
11
+ "attention_dropout": 0.0,
12
+ "bad_words_ids": null,
13
+ "bos_token_id": 0,
14
+ "chunk_size_feed_forward": 0,
15
+ "classifier_dropout": 0.0,
16
+ "d_model": 1024,
17
+ "decoder_attention_heads": 16,
18
+ "decoder_ffn_dim": 4096,
19
+ "decoder_layerdrop": 0.0,
20
+ "decoder_layers": 12,
21
+ "decoder_start_token_id": 2,
22
+ "diversity_penalty": 0.0,
23
+ "do_sample": false,
24
+ "dropout": 0.1,
25
+ "early_stopping": false,
26
+ "cross_attention_hidden_size": 768,
27
+ "encoder_no_repeat_ngram_size": 0,
28
+ "eos_token_id": 2,
29
+ "finetuning_task": null,
30
+ "forced_bos_token_id": null,
31
+ "forced_eos_token_id": null,
32
+ "id2label": {
33
+ "0": "LABEL_0",
34
+ "1": "LABEL_1"
35
+ },
36
+ "init_std": 0.02,
37
+ "is_decoder": true,
38
+ "is_encoder_decoder": false,
39
+ "label2id": {
40
+ "LABEL_0": 0,
41
+ "LABEL_1": 1
42
+ },
43
+ "length_penalty": 1.0,
44
+ "max_length": 20,
45
+ "max_position_embeddings": 512,
46
+ "min_length": 0,
47
+ "model_type": "trocr",
48
+ "no_repeat_ngram_size": 0,
49
+ "num_beam_groups": 1,
50
+ "num_beams": 1,
51
+ "num_return_sequences": 1,
52
+ "output_attentions": false,
53
+ "output_hidden_states": false,
54
+ "output_scores": false,
55
+ "pad_token_id": 1,
56
+ "prefix": null,
57
+ "problem_type": null,
58
+ "pruned_heads": {},
59
+ "remove_invalid_values": false,
60
+ "repetition_penalty": 1.0,
61
+ "return_dict": true,
62
+ "return_dict_in_generate": false,
63
+ "scale_embedding": false,
64
+ "sep_token_id": null,
65
+ "task_specific_params": null,
66
+ "temperature": 1.0,
67
+ "tie_encoder_decoder": false,
68
+ "tie_word_embeddings": true,
69
+ "tokenizer_class": null,
70
+ "top_k": 50,
71
+ "top_p": 1.0,
72
+ "torch_dtype": null,
73
+ "torchscript": false,
74
+ "transformers_version": "4.12.0.dev0",
75
+ "use_bfloat16": false,
76
+ "use_cache": false,
77
+ "vocab_size": 50265
78
+ },
79
+ "encoder": {
80
+ "_name_or_path": "",
81
+ "add_cross_attention": false,
82
+ "architectures": null,
83
+ "attention_probs_dropout_prob": 0.0,
84
+ "bad_words_ids": null,
85
+ "bos_token_id": null,
86
+ "chunk_size_feed_forward": 0,
87
+ "decoder_start_token_id": null,
88
+ "diversity_penalty": 0.0,
89
+ "do_sample": false,
90
+ "early_stopping": false,
91
+ "cross_attention_hidden_size": null,
92
+ "encoder_no_repeat_ngram_size": 0,
93
+ "eos_token_id": null,
94
+ "finetuning_task": null,
95
+ "forced_bos_token_id": null,
96
+ "forced_eos_token_id": null,
97
+ "hidden_act": "gelu",
98
+ "hidden_dropout_prob": 0.0,
99
+ "hidden_size": 768,
100
+ "id2label": {
101
+ "0": "LABEL_0",
102
+ "1": "LABEL_1"
103
+ },
104
+ "image_size": 384,
105
+ "initializer_range": 0.02,
106
+ "intermediate_size": 3072,
107
+ "is_decoder": false,
108
+ "is_encoder_decoder": false,
109
+ "label2id": {
110
+ "LABEL_0": 0,
111
+ "LABEL_1": 1
112
+ },
113
+ "layer_norm_eps": 1e-12,
114
+ "length_penalty": 1.0,
115
+ "max_length": 20,
116
+ "min_length": 0,
117
+ "model_type": "vit",
118
+ "no_repeat_ngram_size": 0,
119
+ "num_attention_heads": 12,
120
+ "num_beam_groups": 1,
121
+ "num_beams": 1,
122
+ "num_channels": 3,
123
+ "num_hidden_layers": 12,
124
+ "num_return_sequences": 1,
125
+ "output_attentions": false,
126
+ "output_hidden_states": false,
127
+ "output_scores": false,
128
+ "pad_token_id": null,
129
+ "patch_size": 16,
130
+ "prefix": null,
131
+ "problem_type": null,
132
+ "pruned_heads": {},
133
+ "qkv_bias": false,
134
+ "remove_invalid_values": false,
135
+ "repetition_penalty": 1.0,
136
+ "return_dict": true,
137
+ "return_dict_in_generate": false,
138
+ "sep_token_id": null,
139
+ "task_specific_params": null,
140
+ "temperature": 1.0,
141
+ "tie_encoder_decoder": false,
142
+ "tie_word_embeddings": true,
143
+ "tokenizer_class": null,
144
+ "top_k": 50,
145
+ "top_p": 1.0,
146
+ "torch_dtype": null,
147
+ "torchscript": false,
148
+ "transformers_version": "4.12.0.dev0",
149
+ "use_bfloat16": false
150
+ },
151
+ "is_encoder_decoder": true,
152
+ "model_type": "vision-encoder-decoder",
153
+ "processor_class": "TrOCRProcessor",
154
+ "tie_word_embeddings": false,
155
+ "torch_dtype": "float32",
156
+ "transformers_version": null
157
+ }
generation_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "decoder_start_token_id": 2,
5
+ "eos_token_id": 2,
6
+ "pad_token_id": 1,
7
+ "transformers_version": "4.27.0.dev0",
8
+ "use_cache": false
9
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
preprocessor_config.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_resize": true,
4
+ "feature_extractor_type": "ViTFeatureExtractor",
5
+ "image_mean": [
6
+ 0.5,
7
+ 0.5,
8
+ 0.5
9
+ ],
10
+ "image_std": [
11
+ 0.5,
12
+ 0.5,
13
+ 0.5
14
+ ],
15
+ "resample": 2,
16
+ "size": 384
17
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bcc6345dc20003662aaa1fe2df35a20a8c36760651ec123863aa8a43f44e10cc
3
+ size 135
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<unk>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "sep_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "cls_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true}}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"errors": "replace", "unk_token": {"content": "<unk>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "sep_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "cls_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "roberta-large", "tokenizer_class": "RobertaTokenizer"}
vocab.json ADDED
The diff for this file is too large to render. See raw diff