goatrider commited on
Commit
8da71b3
1 Parent(s): 356c949

Upload 13 files

Browse files
config.json ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "microsoft/layoutlmv3-base",
3
+ "architectures": [
4
+ "LayoutLMv3ForTokenClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "coordinate_size": 128,
10
+ "eos_token_id": 2,
11
+ "has_relative_attention_bias": true,
12
+ "has_spatial_attention_bias": true,
13
+ "hidden_act": "gelu",
14
+ "hidden_dropout_prob": 0.1,
15
+ "hidden_size": 768,
16
+ "id2label": {
17
+ "0": "num_facture",
18
+ "1": "date_facture",
19
+ "2": "fournisseur",
20
+ "3": "client",
21
+ "4": "mat_client",
22
+ "5": "mat_fournisseur",
23
+ "6": "tva",
24
+ "7": "pourcentage_tva",
25
+ "8": "remise",
26
+ "9": "pourcentage_remise",
27
+ "10": "timbre",
28
+ "11": "fodec",
29
+ "12": "ttc",
30
+ "13": "devise",
31
+ "14": "net_ht"
32
+ },
33
+ "initializer_range": 0.02,
34
+ "input_size": 224,
35
+ "intermediate_size": 3072,
36
+ "label2id": {
37
+ "client": 3,
38
+ "date_facture": 1,
39
+ "devise": 13,
40
+ "fodec": 11,
41
+ "fournisseur": 2,
42
+ "mat_client": 4,
43
+ "mat_fournisseur": 5,
44
+ "net_ht": 14,
45
+ "num_facture": 0,
46
+ "pourcentage_remise": 9,
47
+ "pourcentage_tva": 7,
48
+ "remise": 8,
49
+ "timbre": 10,
50
+ "ttc": 12,
51
+ "tva": 6
52
+ },
53
+ "layer_norm_eps": 1e-05,
54
+ "max_2d_position_embeddings": 1024,
55
+ "max_position_embeddings": 514,
56
+ "max_rel_2d_pos": 256,
57
+ "max_rel_pos": 128,
58
+ "model_type": "layoutlmv3",
59
+ "num_attention_heads": 12,
60
+ "num_channels": 3,
61
+ "num_hidden_layers": 12,
62
+ "pad_token_id": 1,
63
+ "patch_size": 16,
64
+ "rel_2d_pos_bins": 64,
65
+ "rel_pos_bins": 32,
66
+ "second_input_size": 112,
67
+ "shape_size": 128,
68
+ "text_embed": true,
69
+ "torch_dtype": "float32",
70
+ "transformers_version": "4.40.0.dev0",
71
+ "type_vocab_size": 1,
72
+ "visual_embed": true,
73
+ "vocab_size": 50265
74
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc0f097dfdb6c3b7d3e5b073492fb0f7992ee39d4f82208b28900bf1dbfea399
3
+ size 503742740
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0522ea7521893d69bc92a44eb4d020b3a2e5d21a6da10e149cbf00ff6373c7a4
3
+ size 1007607354
preprocessor_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_valid_processor_keys": [
3
+ "images",
4
+ "do_resize",
5
+ "size",
6
+ "resample",
7
+ "do_rescale",
8
+ "rescale_factor",
9
+ "do_normalize",
10
+ "image_mean",
11
+ "image_std",
12
+ "apply_ocr",
13
+ "ocr_lang",
14
+ "tesseract_config",
15
+ "return_tensors",
16
+ "data_format",
17
+ "input_data_format"
18
+ ],
19
+ "apply_ocr": false,
20
+ "do_normalize": true,
21
+ "do_rescale": true,
22
+ "do_resize": true,
23
+ "image_mean": [
24
+ 0.5,
25
+ 0.5,
26
+ 0.5
27
+ ],
28
+ "image_processor_type": "LayoutLMv3ImageProcessor",
29
+ "image_std": [
30
+ 0.5,
31
+ 0.5,
32
+ 0.5
33
+ ],
34
+ "ocr_lang": null,
35
+ "processor_class": "LayoutLMv3Processor",
36
+ "resample": 2,
37
+ "rescale_factor": 0.00392156862745098,
38
+ "size": {
39
+ "height": 224,
40
+ "width": 224
41
+ },
42
+ "tesseract_config": ""
43
+ }
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6ba0d3d706565c4cf8db886c80c7d1693a6ca4a2b79290ff9b2611394b4eb66
3
+ size 13990
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c4a87e865d1e043b421a01e87164320b66af16153ba1c81841657c59d6f8e4c7
3
+ size 1064
special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "<s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "<mask>",
25
+ "lstrip": true,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "<pad>",
32
+ "lstrip": false,
33
+ "normalized": true,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "</s>",
39
+ "lstrip": false,
40
+ "normalized": true,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "<unk>",
46
+ "lstrip": false,
47
+ "normalized": true,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": true,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<s>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<pad>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "</s>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "50264": {
37
+ "content": "<mask>",
38
+ "lstrip": true,
39
+ "normalized": true,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ }
44
+ },
45
+ "apply_ocr": false,
46
+ "bos_token": "<s>",
47
+ "clean_up_tokenization_spaces": true,
48
+ "cls_token": "<s>",
49
+ "cls_token_box": [
50
+ 0,
51
+ 0,
52
+ 0,
53
+ 0
54
+ ],
55
+ "eos_token": "</s>",
56
+ "errors": "replace",
57
+ "mask_token": "<mask>",
58
+ "model_max_length": 512,
59
+ "only_label_first_subword": true,
60
+ "pad_token": "<pad>",
61
+ "pad_token_box": [
62
+ 0,
63
+ 0,
64
+ 0,
65
+ 0
66
+ ],
67
+ "pad_token_label": -100,
68
+ "processor_class": "LayoutLMv3Processor",
69
+ "sep_token": "</s>",
70
+ "sep_token_box": [
71
+ 0,
72
+ 0,
73
+ 0,
74
+ 0
75
+ ],
76
+ "tokenizer_class": "LayoutLMv3Tokenizer",
77
+ "trim_offsets": true,
78
+ "unk_token": "<unk>"
79
+ }
trainer_state.json ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.55,
3
+ "best_model_checkpoint": "test\\checkpoint-1000",
4
+ "epoch": 500.0,
5
+ "eval_steps": 100,
6
+ "global_step": 1000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 50.0,
13
+ "eval_accuracy": 0.6,
14
+ "eval_f1": 0.55,
15
+ "eval_loss": 1.2207863330841064,
16
+ "eval_precision": 0.55,
17
+ "eval_recall": 0.55,
18
+ "eval_runtime": 4.8902,
19
+ "eval_samples_per_second": 0.409,
20
+ "eval_steps_per_second": 0.204,
21
+ "step": 100
22
+ },
23
+ {
24
+ "epoch": 100.0,
25
+ "eval_accuracy": 0.6333333333333333,
26
+ "eval_f1": 0.5365853658536585,
27
+ "eval_loss": 0.9717425107955933,
28
+ "eval_precision": 0.5238095238095238,
29
+ "eval_recall": 0.55,
30
+ "eval_runtime": 4.525,
31
+ "eval_samples_per_second": 0.442,
32
+ "eval_steps_per_second": 0.221,
33
+ "step": 200
34
+ },
35
+ {
36
+ "epoch": 150.0,
37
+ "eval_accuracy": 0.6333333333333333,
38
+ "eval_f1": 0.5365853658536585,
39
+ "eval_loss": 1.0064021348953247,
40
+ "eval_precision": 0.5238095238095238,
41
+ "eval_recall": 0.55,
42
+ "eval_runtime": 4.277,
43
+ "eval_samples_per_second": 0.468,
44
+ "eval_steps_per_second": 0.234,
45
+ "step": 300
46
+ },
47
+ {
48
+ "epoch": 200.0,
49
+ "eval_accuracy": 0.6333333333333333,
50
+ "eval_f1": 0.55,
51
+ "eval_loss": 1.0413910150527954,
52
+ "eval_precision": 0.55,
53
+ "eval_recall": 0.55,
54
+ "eval_runtime": 4.4579,
55
+ "eval_samples_per_second": 0.449,
56
+ "eval_steps_per_second": 0.224,
57
+ "step": 400
58
+ },
59
+ {
60
+ "epoch": 250.0,
61
+ "grad_norm": 0.17069780826568604,
62
+ "learning_rate": 5e-06,
63
+ "loss": 0.326,
64
+ "step": 500
65
+ },
66
+ {
67
+ "epoch": 250.0,
68
+ "eval_accuracy": 0.6,
69
+ "eval_f1": 0.4878048780487805,
70
+ "eval_loss": 1.0729484558105469,
71
+ "eval_precision": 0.47619047619047616,
72
+ "eval_recall": 0.5,
73
+ "eval_runtime": 4.4009,
74
+ "eval_samples_per_second": 0.454,
75
+ "eval_steps_per_second": 0.227,
76
+ "step": 500
77
+ },
78
+ {
79
+ "epoch": 300.0,
80
+ "eval_accuracy": 0.6333333333333333,
81
+ "eval_f1": 0.55,
82
+ "eval_loss": 1.066214919090271,
83
+ "eval_precision": 0.55,
84
+ "eval_recall": 0.55,
85
+ "eval_runtime": 4.6788,
86
+ "eval_samples_per_second": 0.427,
87
+ "eval_steps_per_second": 0.214,
88
+ "step": 600
89
+ },
90
+ {
91
+ "epoch": 350.0,
92
+ "eval_accuracy": 0.6333333333333333,
93
+ "eval_f1": 0.55,
94
+ "eval_loss": 1.075073480606079,
95
+ "eval_precision": 0.55,
96
+ "eval_recall": 0.55,
97
+ "eval_runtime": 4.6425,
98
+ "eval_samples_per_second": 0.431,
99
+ "eval_steps_per_second": 0.215,
100
+ "step": 700
101
+ },
102
+ {
103
+ "epoch": 400.0,
104
+ "eval_accuracy": 0.6333333333333333,
105
+ "eval_f1": 0.55,
106
+ "eval_loss": 1.0892070531845093,
107
+ "eval_precision": 0.55,
108
+ "eval_recall": 0.55,
109
+ "eval_runtime": 4.3673,
110
+ "eval_samples_per_second": 0.458,
111
+ "eval_steps_per_second": 0.229,
112
+ "step": 800
113
+ },
114
+ {
115
+ "epoch": 450.0,
116
+ "eval_accuracy": 0.6333333333333333,
117
+ "eval_f1": 0.55,
118
+ "eval_loss": 1.0904639959335327,
119
+ "eval_precision": 0.55,
120
+ "eval_recall": 0.55,
121
+ "eval_runtime": 4.3499,
122
+ "eval_samples_per_second": 0.46,
123
+ "eval_steps_per_second": 0.23,
124
+ "step": 900
125
+ },
126
+ {
127
+ "epoch": 500.0,
128
+ "grad_norm": 0.07940377295017242,
129
+ "learning_rate": 0.0,
130
+ "loss": 0.0113,
131
+ "step": 1000
132
+ },
133
+ {
134
+ "epoch": 500.0,
135
+ "eval_accuracy": 0.6333333333333333,
136
+ "eval_f1": 0.55,
137
+ "eval_loss": 1.093883991241455,
138
+ "eval_precision": 0.55,
139
+ "eval_recall": 0.55,
140
+ "eval_runtime": 4.4538,
141
+ "eval_samples_per_second": 0.449,
142
+ "eval_steps_per_second": 0.225,
143
+ "step": 1000
144
+ }
145
+ ],
146
+ "logging_steps": 500,
147
+ "max_steps": 1000,
148
+ "num_input_tokens_seen": 0,
149
+ "num_train_epochs": 500,
150
+ "save_steps": 500,
151
+ "total_flos": 398157811200000.0,
152
+ "train_batch_size": 2,
153
+ "trial_name": null,
154
+ "trial_params": null
155
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df720c25e7939a8756124d1b77605d929a3df1c0cb9af1020e510d6e2419e51d
3
+ size 4856
vocab.json ADDED
The diff for this file is too large to render. See raw diff