dima806 commited on
Commit
2870cde
1 Parent(s): 838b36a

Upload folder using huggingface_hub

Browse files
checkpoint-3680/config.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224-in21k",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "rose",
13
+ "1": "astilbe",
14
+ "2": "carnation",
15
+ "3": "tulip",
16
+ "4": "water_lily",
17
+ "5": "bellflower",
18
+ "6": "coreopsis",
19
+ "7": "common_daisy",
20
+ "8": "iris",
21
+ "9": "dandelion",
22
+ "10": "sunflower",
23
+ "11": "california_poppy",
24
+ "12": "black_eyed_susan",
25
+ "13": "calendula"
26
+ },
27
+ "image_size": 224,
28
+ "initializer_range": 0.02,
29
+ "intermediate_size": 3072,
30
+ "label2id": {
31
+ "astilbe": 1,
32
+ "bellflower": 5,
33
+ "black_eyed_susan": 12,
34
+ "calendula": 13,
35
+ "california_poppy": 11,
36
+ "carnation": 2,
37
+ "common_daisy": 7,
38
+ "coreopsis": 6,
39
+ "dandelion": 9,
40
+ "iris": 8,
41
+ "rose": 0,
42
+ "sunflower": 10,
43
+ "tulip": 3,
44
+ "water_lily": 4
45
+ },
46
+ "layer_norm_eps": 1e-12,
47
+ "model_type": "vit",
48
+ "num_attention_heads": 12,
49
+ "num_channels": 3,
50
+ "num_hidden_layers": 12,
51
+ "patch_size": 16,
52
+ "problem_type": "single_label_classification",
53
+ "qkv_bias": true,
54
+ "torch_dtype": "float32",
55
+ "transformers_version": "4.34.1"
56
+ }
checkpoint-3680/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:775a946904e66c3fd2f3e3ecbf7b200e579885d76f4fc793407215303ec17b72
3
+ size 686642181
checkpoint-3680/preprocessor_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "image_mean": [
6
+ 0.5,
7
+ 0.5,
8
+ 0.5
9
+ ],
10
+ "image_processor_type": "ViTImageProcessor",
11
+ "image_std": [
12
+ 0.5,
13
+ 0.5,
14
+ 0.5
15
+ ],
16
+ "resample": 2,
17
+ "rescale_factor": 0.00392156862745098,
18
+ "size": {
19
+ "height": 224,
20
+ "width": 224
21
+ }
22
+ }
checkpoint-3680/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6325f60a1534037facc439d0823ae5d529fb9a95f2d69da3725d5836cd23dc05
3
+ size 343305581
checkpoint-3680/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da3783e1ea8da69848a0daf119206b8ea39b2772aff95800e1318b7cab1a3ed3
3
+ size 14575
checkpoint-3680/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bfb0336ecd3d4e1e8534f442d3ff3aae1cd8752a4d9b55925b786c4c19f7006f
3
+ size 627
checkpoint-3680/trainer_state.json ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.373588889837265,
3
+ "best_model_checkpoint": "14_flower_types_image_detection/checkpoint-3680",
4
+ "epoch": 20.0,
5
+ "eval_steps": 500,
6
+ "global_step": 3680,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "eval_accuracy": 0.8660988074957411,
14
+ "eval_loss": 2.1635284423828125,
15
+ "eval_runtime": 27.4019,
16
+ "eval_samples_per_second": 107.109,
17
+ "eval_steps_per_second": 3.357,
18
+ "step": 184
19
+ },
20
+ {
21
+ "epoch": 2.0,
22
+ "eval_accuracy": 0.9533219761499149,
23
+ "eval_loss": 1.622511386871338,
24
+ "eval_runtime": 27.6072,
25
+ "eval_samples_per_second": 106.313,
26
+ "eval_steps_per_second": 3.332,
27
+ "step": 368
28
+ },
29
+ {
30
+ "epoch": 2.72,
31
+ "learning_rate": 4.3801652892561984e-06,
32
+ "loss": 1.8666,
33
+ "step": 500
34
+ },
35
+ {
36
+ "epoch": 3.0,
37
+ "eval_accuracy": 0.9601362862010221,
38
+ "eval_loss": 1.2527281045913696,
39
+ "eval_runtime": 27.3116,
40
+ "eval_samples_per_second": 107.464,
41
+ "eval_steps_per_second": 3.369,
42
+ "step": 552
43
+ },
44
+ {
45
+ "epoch": 4.0,
46
+ "eval_accuracy": 0.9597955706984668,
47
+ "eval_loss": 1.0165150165557861,
48
+ "eval_runtime": 27.5451,
49
+ "eval_samples_per_second": 106.552,
50
+ "eval_steps_per_second": 3.34,
51
+ "step": 736
52
+ },
53
+ {
54
+ "epoch": 5.0,
55
+ "eval_accuracy": 0.9625212947189097,
56
+ "eval_loss": 0.8504080772399902,
57
+ "eval_runtime": 27.4761,
58
+ "eval_samples_per_second": 106.82,
59
+ "eval_steps_per_second": 3.348,
60
+ "step": 920
61
+ },
62
+ {
63
+ "epoch": 5.43,
64
+ "learning_rate": 3.6914600550964192e-06,
65
+ "loss": 0.827,
66
+ "step": 1000
67
+ },
68
+ {
69
+ "epoch": 6.0,
70
+ "eval_accuracy": 0.9638841567291312,
71
+ "eval_loss": 0.7503889799118042,
72
+ "eval_runtime": 27.4439,
73
+ "eval_samples_per_second": 106.945,
74
+ "eval_steps_per_second": 3.352,
75
+ "step": 1104
76
+ },
77
+ {
78
+ "epoch": 7.0,
79
+ "eval_accuracy": 0.9669505962521294,
80
+ "eval_loss": 0.6706383228302002,
81
+ "eval_runtime": 27.0271,
82
+ "eval_samples_per_second": 108.595,
83
+ "eval_steps_per_second": 3.404,
84
+ "step": 1288
85
+ },
86
+ {
87
+ "epoch": 8.0,
88
+ "eval_accuracy": 0.9659284497444633,
89
+ "eval_loss": 0.6013721823692322,
90
+ "eval_runtime": 27.3139,
91
+ "eval_samples_per_second": 107.454,
92
+ "eval_steps_per_second": 3.368,
93
+ "step": 1472
94
+ },
95
+ {
96
+ "epoch": 8.15,
97
+ "learning_rate": 3.002754820936639e-06,
98
+ "loss": 0.5071,
99
+ "step": 1500
100
+ },
101
+ {
102
+ "epoch": 9.0,
103
+ "eval_accuracy": 0.964565587734242,
104
+ "eval_loss": 0.5562598705291748,
105
+ "eval_runtime": 27.3473,
106
+ "eval_samples_per_second": 107.323,
107
+ "eval_steps_per_second": 3.364,
108
+ "step": 1656
109
+ },
110
+ {
111
+ "epoch": 10.0,
112
+ "eval_accuracy": 0.9672913117546849,
113
+ "eval_loss": 0.5129490494728088,
114
+ "eval_runtime": 27.1102,
115
+ "eval_samples_per_second": 108.262,
116
+ "eval_steps_per_second": 3.394,
117
+ "step": 1840
118
+ },
119
+ {
120
+ "epoch": 10.87,
121
+ "learning_rate": 2.31404958677686e-06,
122
+ "loss": 0.3754,
123
+ "step": 2000
124
+ },
125
+ {
126
+ "epoch": 11.0,
127
+ "eval_accuracy": 0.9696763202725724,
128
+ "eval_loss": 0.4799513518810272,
129
+ "eval_runtime": 27.2893,
130
+ "eval_samples_per_second": 107.551,
131
+ "eval_steps_per_second": 3.371,
132
+ "step": 2024
133
+ },
134
+ {
135
+ "epoch": 12.0,
136
+ "eval_accuracy": 0.9689948892674617,
137
+ "eval_loss": 0.4582151174545288,
138
+ "eval_runtime": 27.3147,
139
+ "eval_samples_per_second": 107.451,
140
+ "eval_steps_per_second": 3.368,
141
+ "step": 2208
142
+ },
143
+ {
144
+ "epoch": 13.0,
145
+ "eval_accuracy": 0.969335604770017,
146
+ "eval_loss": 0.434172660112381,
147
+ "eval_runtime": 27.0642,
148
+ "eval_samples_per_second": 108.446,
149
+ "eval_steps_per_second": 3.399,
150
+ "step": 2392
151
+ },
152
+ {
153
+ "epoch": 13.59,
154
+ "learning_rate": 1.62534435261708e-06,
155
+ "loss": 0.3068,
156
+ "step": 2500
157
+ },
158
+ {
159
+ "epoch": 14.0,
160
+ "eval_accuracy": 0.9683134582623509,
161
+ "eval_loss": 0.41843244433403015,
162
+ "eval_runtime": 27.2594,
163
+ "eval_samples_per_second": 107.669,
164
+ "eval_steps_per_second": 3.375,
165
+ "step": 2576
166
+ },
167
+ {
168
+ "epoch": 15.0,
169
+ "eval_accuracy": 0.969335604770017,
170
+ "eval_loss": 0.4030907154083252,
171
+ "eval_runtime": 27.2473,
172
+ "eval_samples_per_second": 107.717,
173
+ "eval_steps_per_second": 3.376,
174
+ "step": 2760
175
+ },
176
+ {
177
+ "epoch": 16.0,
178
+ "eval_accuracy": 0.9683134582623509,
179
+ "eval_loss": 0.39235585927963257,
180
+ "eval_runtime": 27.1296,
181
+ "eval_samples_per_second": 108.184,
182
+ "eval_steps_per_second": 3.391,
183
+ "step": 2944
184
+ },
185
+ {
186
+ "epoch": 16.3,
187
+ "learning_rate": 9.366391184573004e-07,
188
+ "loss": 0.2686,
189
+ "step": 3000
190
+ },
191
+ {
192
+ "epoch": 17.0,
193
+ "eval_accuracy": 0.9686541737649063,
194
+ "eval_loss": 0.3831399083137512,
195
+ "eval_runtime": 27.3271,
196
+ "eval_samples_per_second": 107.402,
197
+ "eval_steps_per_second": 3.367,
198
+ "step": 3128
199
+ },
200
+ {
201
+ "epoch": 18.0,
202
+ "eval_accuracy": 0.9686541737649063,
203
+ "eval_loss": 0.3779311776161194,
204
+ "eval_runtime": 27.0401,
205
+ "eval_samples_per_second": 108.542,
206
+ "eval_steps_per_second": 3.402,
207
+ "step": 3312
208
+ },
209
+ {
210
+ "epoch": 19.0,
211
+ "eval_accuracy": 0.9686541737649063,
212
+ "eval_loss": 0.3742893636226654,
213
+ "eval_runtime": 27.4112,
214
+ "eval_samples_per_second": 107.073,
215
+ "eval_steps_per_second": 3.356,
216
+ "step": 3496
217
+ },
218
+ {
219
+ "epoch": 19.02,
220
+ "learning_rate": 2.4793388429752067e-07,
221
+ "loss": 0.2483,
222
+ "step": 3500
223
+ },
224
+ {
225
+ "epoch": 20.0,
226
+ "eval_accuracy": 0.9683134582623509,
227
+ "eval_loss": 0.373588889837265,
228
+ "eval_runtime": 27.6437,
229
+ "eval_samples_per_second": 106.173,
230
+ "eval_steps_per_second": 3.328,
231
+ "step": 3680
232
+ }
233
+ ],
234
+ "logging_steps": 500,
235
+ "max_steps": 3680,
236
+ "num_train_epochs": 20,
237
+ "save_steps": 500,
238
+ "total_flos": 1.8192426067237847e+19,
239
+ "trial_name": null,
240
+ "trial_params": null
241
+ }
checkpoint-3680/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b78fa9474feeabadc327847aadfdd1bc753b802fd7d04195e9103836e706a471
3
+ size 4027
config.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224-in21k",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "rose",
13
+ "1": "astilbe",
14
+ "2": "carnation",
15
+ "3": "tulip",
16
+ "4": "water_lily",
17
+ "5": "bellflower",
18
+ "6": "coreopsis",
19
+ "7": "common_daisy",
20
+ "8": "iris",
21
+ "9": "dandelion",
22
+ "10": "sunflower",
23
+ "11": "california_poppy",
24
+ "12": "black_eyed_susan",
25
+ "13": "calendula"
26
+ },
27
+ "image_size": 224,
28
+ "initializer_range": 0.02,
29
+ "intermediate_size": 3072,
30
+ "label2id": {
31
+ "astilbe": 1,
32
+ "bellflower": 5,
33
+ "black_eyed_susan": 12,
34
+ "calendula": 13,
35
+ "california_poppy": 11,
36
+ "carnation": 2,
37
+ "common_daisy": 7,
38
+ "coreopsis": 6,
39
+ "dandelion": 9,
40
+ "iris": 8,
41
+ "rose": 0,
42
+ "sunflower": 10,
43
+ "tulip": 3,
44
+ "water_lily": 4
45
+ },
46
+ "layer_norm_eps": 1e-12,
47
+ "model_type": "vit",
48
+ "num_attention_heads": 12,
49
+ "num_channels": 3,
50
+ "num_hidden_layers": 12,
51
+ "patch_size": 16,
52
+ "problem_type": "single_label_classification",
53
+ "qkv_bias": true,
54
+ "torch_dtype": "float32",
55
+ "transformers_version": "4.34.1"
56
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "image_mean": [
6
+ 0.5,
7
+ 0.5,
8
+ 0.5
9
+ ],
10
+ "image_processor_type": "ViTImageProcessor",
11
+ "image_std": [
12
+ 0.5,
13
+ 0.5,
14
+ 0.5
15
+ ],
16
+ "resample": 2,
17
+ "rescale_factor": 0.00392156862745098,
18
+ "size": {
19
+ "height": 224,
20
+ "width": 224
21
+ }
22
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6325f60a1534037facc439d0823ae5d529fb9a95f2d69da3725d5836cd23dc05
3
+ size 343305581
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b78fa9474feeabadc327847aadfdd1bc753b802fd7d04195e9103836e706a471
3
+ size 4027