dima806 commited on
Commit
f9bfd20
1 Parent(s): c370cc0

Upload folder using huggingface_hub

Browse files
checkpoint-1860/config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224-in21k",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "LIONS",
13
+ "1": "CARACAL",
14
+ "2": "AFRICAN LEOPARD",
15
+ "3": "CHEETAH",
16
+ "4": "SNOW LEOPARD",
17
+ "5": "TIGER",
18
+ "6": "OCELOT",
19
+ "7": "JAGUAR",
20
+ "8": "PUMA",
21
+ "9": "CLOUDED LEOPARD"
22
+ },
23
+ "image_size": 224,
24
+ "initializer_range": 0.02,
25
+ "intermediate_size": 3072,
26
+ "label2id": {
27
+ "AFRICAN LEOPARD": 2,
28
+ "CARACAL": 1,
29
+ "CHEETAH": 3,
30
+ "CLOUDED LEOPARD": 9,
31
+ "JAGUAR": 7,
32
+ "LIONS": 0,
33
+ "OCELOT": 6,
34
+ "PUMA": 8,
35
+ "SNOW LEOPARD": 4,
36
+ "TIGER": 5
37
+ },
38
+ "layer_norm_eps": 1e-12,
39
+ "model_type": "vit",
40
+ "num_attention_heads": 12,
41
+ "num_channels": 3,
42
+ "num_hidden_layers": 12,
43
+ "patch_size": 16,
44
+ "problem_type": "single_label_classification",
45
+ "qkv_bias": true,
46
+ "torch_dtype": "float32",
47
+ "transformers_version": "4.34.0"
48
+ }
checkpoint-1860/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a6f6a386a7c8b5aa7d89e470558ff85e6416231fe12a9457259576c10ed1791
3
+ size 686617605
checkpoint-1860/preprocessor_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "image_mean": [
6
+ 0.5,
7
+ 0.5,
8
+ 0.5
9
+ ],
10
+ "image_processor_type": "ViTImageProcessor",
11
+ "image_std": [
12
+ 0.5,
13
+ 0.5,
14
+ 0.5
15
+ ],
16
+ "resample": 2,
17
+ "rescale_factor": 0.00392156862745098,
18
+ "size": {
19
+ "height": 224,
20
+ "width": 224
21
+ }
22
+ }
checkpoint-1860/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0660b0253ed2ca7f82a6f7dab750f60f9008e9dbf8ce619c270602d2b68480cb
3
+ size 343293293
checkpoint-1860/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:00699cef5792700f0384d1f51062411f51cb6eb95a28fb3951d1681a1eff6543
3
+ size 14575
checkpoint-1860/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8825793177e7bc465b89838f0b1c0a781618ecfe343a33dc185241f5a3f66075
3
+ size 627
checkpoint-1860/trainer_state.json ADDED
@@ -0,0 +1,307 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.2785223722457886,
3
+ "best_model_checkpoint": "wild_cats_image_detection/checkpoint-1860",
4
+ "epoch": 30.0,
5
+ "eval_steps": 500,
6
+ "global_step": 1860,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "eval_accuracy": 0.7076612903225806,
14
+ "eval_loss": 2.0900425910949707,
15
+ "eval_runtime": 4.3905,
16
+ "eval_samples_per_second": 112.972,
17
+ "eval_steps_per_second": 14.122,
18
+ "step": 62
19
+ },
20
+ {
21
+ "epoch": 2.0,
22
+ "eval_accuracy": 0.9354838709677419,
23
+ "eval_loss": 1.6859031915664673,
24
+ "eval_runtime": 4.4848,
25
+ "eval_samples_per_second": 110.597,
26
+ "eval_steps_per_second": 13.825,
27
+ "step": 124
28
+ },
29
+ {
30
+ "epoch": 3.0,
31
+ "eval_accuracy": 0.967741935483871,
32
+ "eval_loss": 1.3213326930999756,
33
+ "eval_runtime": 4.3759,
34
+ "eval_samples_per_second": 113.349,
35
+ "eval_steps_per_second": 14.169,
36
+ "step": 186
37
+ },
38
+ {
39
+ "epoch": 4.0,
40
+ "eval_accuracy": 0.9637096774193549,
41
+ "eval_loss": 1.0679508447647095,
42
+ "eval_runtime": 4.3639,
43
+ "eval_samples_per_second": 113.659,
44
+ "eval_steps_per_second": 14.207,
45
+ "step": 248
46
+ },
47
+ {
48
+ "epoch": 5.0,
49
+ "eval_accuracy": 0.9737903225806451,
50
+ "eval_loss": 0.8888944983482361,
51
+ "eval_runtime": 4.3709,
52
+ "eval_samples_per_second": 113.478,
53
+ "eval_steps_per_second": 14.185,
54
+ "step": 310
55
+ },
56
+ {
57
+ "epoch": 6.0,
58
+ "eval_accuracy": 0.9758064516129032,
59
+ "eval_loss": 0.753976047039032,
60
+ "eval_runtime": 4.3816,
61
+ "eval_samples_per_second": 113.202,
62
+ "eval_steps_per_second": 14.15,
63
+ "step": 372
64
+ },
65
+ {
66
+ "epoch": 7.0,
67
+ "eval_accuracy": 0.9758064516129032,
68
+ "eval_loss": 0.6596555113792419,
69
+ "eval_runtime": 4.4519,
70
+ "eval_samples_per_second": 111.413,
71
+ "eval_steps_per_second": 13.927,
72
+ "step": 434
73
+ },
74
+ {
75
+ "epoch": 8.0,
76
+ "eval_accuracy": 0.967741935483871,
77
+ "eval_loss": 0.5890974998474121,
78
+ "eval_runtime": 4.3896,
79
+ "eval_samples_per_second": 112.993,
80
+ "eval_steps_per_second": 14.124,
81
+ "step": 496
82
+ },
83
+ {
84
+ "epoch": 8.06,
85
+ "learning_rate": 7.513812154696133e-06,
86
+ "loss": 1.1437,
87
+ "step": 500
88
+ },
89
+ {
90
+ "epoch": 9.0,
91
+ "eval_accuracy": 0.9657258064516129,
92
+ "eval_loss": 0.5398248434066772,
93
+ "eval_runtime": 4.4625,
94
+ "eval_samples_per_second": 111.147,
95
+ "eval_steps_per_second": 13.893,
96
+ "step": 558
97
+ },
98
+ {
99
+ "epoch": 10.0,
100
+ "eval_accuracy": 0.969758064516129,
101
+ "eval_loss": 0.48776522278785706,
102
+ "eval_runtime": 4.3566,
103
+ "eval_samples_per_second": 113.851,
104
+ "eval_steps_per_second": 14.231,
105
+ "step": 620
106
+ },
107
+ {
108
+ "epoch": 11.0,
109
+ "eval_accuracy": 0.9778225806451613,
110
+ "eval_loss": 0.4477526545524597,
111
+ "eval_runtime": 4.3971,
112
+ "eval_samples_per_second": 112.802,
113
+ "eval_steps_per_second": 14.1,
114
+ "step": 682
115
+ },
116
+ {
117
+ "epoch": 12.0,
118
+ "eval_accuracy": 0.9717741935483871,
119
+ "eval_loss": 0.421452134847641,
120
+ "eval_runtime": 4.3354,
121
+ "eval_samples_per_second": 114.406,
122
+ "eval_steps_per_second": 14.301,
123
+ "step": 744
124
+ },
125
+ {
126
+ "epoch": 13.0,
127
+ "eval_accuracy": 0.9717741935483871,
128
+ "eval_loss": 0.3989526033401489,
129
+ "eval_runtime": 4.4002,
130
+ "eval_samples_per_second": 112.723,
131
+ "eval_steps_per_second": 14.09,
132
+ "step": 806
133
+ },
134
+ {
135
+ "epoch": 14.0,
136
+ "eval_accuracy": 0.967741935483871,
137
+ "eval_loss": 0.3813742995262146,
138
+ "eval_runtime": 4.3475,
139
+ "eval_samples_per_second": 114.088,
140
+ "eval_steps_per_second": 14.261,
141
+ "step": 868
142
+ },
143
+ {
144
+ "epoch": 15.0,
145
+ "eval_accuracy": 0.967741935483871,
146
+ "eval_loss": 0.36974093317985535,
147
+ "eval_runtime": 4.4027,
148
+ "eval_samples_per_second": 112.659,
149
+ "eval_steps_per_second": 14.082,
150
+ "step": 930
151
+ },
152
+ {
153
+ "epoch": 16.0,
154
+ "eval_accuracy": 0.969758064516129,
155
+ "eval_loss": 0.3524600565433502,
156
+ "eval_runtime": 4.462,
157
+ "eval_samples_per_second": 111.16,
158
+ "eval_steps_per_second": 13.895,
159
+ "step": 992
160
+ },
161
+ {
162
+ "epoch": 16.13,
163
+ "learning_rate": 4.751381215469614e-06,
164
+ "loss": 0.3243,
165
+ "step": 1000
166
+ },
167
+ {
168
+ "epoch": 17.0,
169
+ "eval_accuracy": 0.969758064516129,
170
+ "eval_loss": 0.3423265218734741,
171
+ "eval_runtime": 4.424,
172
+ "eval_samples_per_second": 112.117,
173
+ "eval_steps_per_second": 14.015,
174
+ "step": 1054
175
+ },
176
+ {
177
+ "epoch": 18.0,
178
+ "eval_accuracy": 0.969758064516129,
179
+ "eval_loss": 0.3300015926361084,
180
+ "eval_runtime": 4.4178,
181
+ "eval_samples_per_second": 112.272,
182
+ "eval_steps_per_second": 14.034,
183
+ "step": 1116
184
+ },
185
+ {
186
+ "epoch": 19.0,
187
+ "eval_accuracy": 0.967741935483871,
188
+ "eval_loss": 0.3230130076408386,
189
+ "eval_runtime": 4.3371,
190
+ "eval_samples_per_second": 114.361,
191
+ "eval_steps_per_second": 14.295,
192
+ "step": 1178
193
+ },
194
+ {
195
+ "epoch": 20.0,
196
+ "eval_accuracy": 0.9737903225806451,
197
+ "eval_loss": 0.309731662273407,
198
+ "eval_runtime": 4.3678,
199
+ "eval_samples_per_second": 113.558,
200
+ "eval_steps_per_second": 14.195,
201
+ "step": 1240
202
+ },
203
+ {
204
+ "epoch": 21.0,
205
+ "eval_accuracy": 0.967741935483871,
206
+ "eval_loss": 0.3085411489009857,
207
+ "eval_runtime": 4.3535,
208
+ "eval_samples_per_second": 113.932,
209
+ "eval_steps_per_second": 14.242,
210
+ "step": 1302
211
+ },
212
+ {
213
+ "epoch": 22.0,
214
+ "eval_accuracy": 0.967741935483871,
215
+ "eval_loss": 0.3113148510456085,
216
+ "eval_runtime": 4.343,
217
+ "eval_samples_per_second": 114.208,
218
+ "eval_steps_per_second": 14.276,
219
+ "step": 1364
220
+ },
221
+ {
222
+ "epoch": 23.0,
223
+ "eval_accuracy": 0.969758064516129,
224
+ "eval_loss": 0.2968937158584595,
225
+ "eval_runtime": 4.3579,
226
+ "eval_samples_per_second": 113.815,
227
+ "eval_steps_per_second": 14.227,
228
+ "step": 1426
229
+ },
230
+ {
231
+ "epoch": 24.0,
232
+ "eval_accuracy": 0.9717741935483871,
233
+ "eval_loss": 0.29355570673942566,
234
+ "eval_runtime": 4.4766,
235
+ "eval_samples_per_second": 110.799,
236
+ "eval_steps_per_second": 13.85,
237
+ "step": 1488
238
+ },
239
+ {
240
+ "epoch": 24.19,
241
+ "learning_rate": 1.988950276243094e-06,
242
+ "loss": 0.2043,
243
+ "step": 1500
244
+ },
245
+ {
246
+ "epoch": 25.0,
247
+ "eval_accuracy": 0.967741935483871,
248
+ "eval_loss": 0.285148561000824,
249
+ "eval_runtime": 4.5096,
250
+ "eval_samples_per_second": 109.987,
251
+ "eval_steps_per_second": 13.748,
252
+ "step": 1550
253
+ },
254
+ {
255
+ "epoch": 26.0,
256
+ "eval_accuracy": 0.967741935483871,
257
+ "eval_loss": 0.2845807373523712,
258
+ "eval_runtime": 4.3572,
259
+ "eval_samples_per_second": 113.836,
260
+ "eval_steps_per_second": 14.229,
261
+ "step": 1612
262
+ },
263
+ {
264
+ "epoch": 27.0,
265
+ "eval_accuracy": 0.9717741935483871,
266
+ "eval_loss": 0.2869494557380676,
267
+ "eval_runtime": 4.3648,
268
+ "eval_samples_per_second": 113.637,
269
+ "eval_steps_per_second": 14.205,
270
+ "step": 1674
271
+ },
272
+ {
273
+ "epoch": 28.0,
274
+ "eval_accuracy": 0.9657258064516129,
275
+ "eval_loss": 0.2816571891307831,
276
+ "eval_runtime": 4.3854,
277
+ "eval_samples_per_second": 113.101,
278
+ "eval_steps_per_second": 14.138,
279
+ "step": 1736
280
+ },
281
+ {
282
+ "epoch": 29.0,
283
+ "eval_accuracy": 0.9657258064516129,
284
+ "eval_loss": 0.2789705991744995,
285
+ "eval_runtime": 4.4354,
286
+ "eval_samples_per_second": 111.827,
287
+ "eval_steps_per_second": 13.978,
288
+ "step": 1798
289
+ },
290
+ {
291
+ "epoch": 30.0,
292
+ "eval_accuracy": 0.9657258064516129,
293
+ "eval_loss": 0.2785223722457886,
294
+ "eval_runtime": 4.4242,
295
+ "eval_samples_per_second": 112.112,
296
+ "eval_steps_per_second": 14.014,
297
+ "step": 1860
298
+ }
299
+ ],
300
+ "logging_steps": 500,
301
+ "max_steps": 1860,
302
+ "num_train_epochs": 30,
303
+ "save_steps": 500,
304
+ "total_flos": 4.612653932213699e+18,
305
+ "trial_name": null,
306
+ "trial_params": null
307
+ }
checkpoint-1860/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:379350d6cc5241e75a1873b11ab4ea5ccf97133b46ded85692b65ad97c537da6
3
+ size 4027
config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224-in21k",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "LIONS",
13
+ "1": "CARACAL",
14
+ "2": "AFRICAN LEOPARD",
15
+ "3": "CHEETAH",
16
+ "4": "SNOW LEOPARD",
17
+ "5": "TIGER",
18
+ "6": "OCELOT",
19
+ "7": "JAGUAR",
20
+ "8": "PUMA",
21
+ "9": "CLOUDED LEOPARD"
22
+ },
23
+ "image_size": 224,
24
+ "initializer_range": 0.02,
25
+ "intermediate_size": 3072,
26
+ "label2id": {
27
+ "AFRICAN LEOPARD": 2,
28
+ "CARACAL": 1,
29
+ "CHEETAH": 3,
30
+ "CLOUDED LEOPARD": 9,
31
+ "JAGUAR": 7,
32
+ "LIONS": 0,
33
+ "OCELOT": 6,
34
+ "PUMA": 8,
35
+ "SNOW LEOPARD": 4,
36
+ "TIGER": 5
37
+ },
38
+ "layer_norm_eps": 1e-12,
39
+ "model_type": "vit",
40
+ "num_attention_heads": 12,
41
+ "num_channels": 3,
42
+ "num_hidden_layers": 12,
43
+ "patch_size": 16,
44
+ "problem_type": "single_label_classification",
45
+ "qkv_bias": true,
46
+ "torch_dtype": "float32",
47
+ "transformers_version": "4.34.0"
48
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "image_mean": [
6
+ 0.5,
7
+ 0.5,
8
+ 0.5
9
+ ],
10
+ "image_processor_type": "ViTImageProcessor",
11
+ "image_std": [
12
+ 0.5,
13
+ 0.5,
14
+ 0.5
15
+ ],
16
+ "resample": 2,
17
+ "rescale_factor": 0.00392156862745098,
18
+ "size": {
19
+ "height": 224,
20
+ "width": 224
21
+ }
22
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0660b0253ed2ca7f82a6f7dab750f60f9008e9dbf8ce619c270602d2b68480cb
3
+ size 343293293
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:379350d6cc5241e75a1873b11ab4ea5ccf97133b46ded85692b65ad97c537da6
3
+ size 4027