rdmpage commited on
Commit
c9aad37
1 Parent(s): cac77a9

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ tags:
4
+ - autotrain
5
+ - image-classification
6
+ widget:
7
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg
8
+ example_title: Tiger
9
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg
10
+ example_title: Teapot
11
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg
12
+ example_title: Palace
13
+ datasets:
14
+ - autotrain-f8u3m-1w0uc/autotrain-data
15
+ ---
16
+
17
+ # Model Trained Using AutoTrain
18
+
19
+ - Problem type: Image Classification
20
+
21
+ ## Validation Metrics
22
+ loss: 0.588494598865509
23
+
24
+ f1_macro: 0.726419878296146
25
+
26
+ f1_micro: 0.7674418604651162
27
+
28
+ f1_weighted: 0.7444926647483373
29
+
30
+ precision_macro: 0.8743946731234866
31
+
32
+ precision_micro: 0.7674418604651163
33
+
34
+ precision_weighted: 0.8185567881074384
35
+
36
+ recall_macro: 0.6858527131782945
37
+
38
+ recall_micro: 0.7674418604651163
39
+
40
+ recall_weighted: 0.7674418604651163
41
+
42
+ accuracy: 0.7674418604651163
checkpoint-129/config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224",
3
+ "_num_labels": 4,
4
+ "architectures": [
5
+ "ViTForImageClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.0,
8
+ "encoder_stride": 16,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.0,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "blank",
14
+ "1": "content",
15
+ "2": "end",
16
+ "3": "start"
17
+ },
18
+ "image_size": 224,
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 3072,
21
+ "label2id": {
22
+ "blank": 0,
23
+ "content": 1,
24
+ "end": 2,
25
+ "start": 3
26
+ },
27
+ "layer_norm_eps": 1e-12,
28
+ "model_type": "vit",
29
+ "num_attention_heads": 12,
30
+ "num_channels": 3,
31
+ "num_hidden_layers": 12,
32
+ "patch_size": 16,
33
+ "problem_type": "single_label_classification",
34
+ "qkv_bias": true,
35
+ "torch_dtype": "float32",
36
+ "transformers_version": "4.39.0"
37
+ }
checkpoint-129/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6cb39e4419595735109964dd5d06982ea0141bd516e365f0c77433ea018e7bd
3
+ size 343230128
checkpoint-129/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2704327f7419c9e23b68834ac0b1aa96c87863523a5e51b42692e0d558b8b98
3
+ size 686581178
checkpoint-129/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f950a96f725d7ae0e18d6a68104201ecfbd07a8263205d8efe7dfa7efc33c8d6
3
+ size 14244
checkpoint-129/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:050bd0dc3838d2e39d3b795937ac070c6d7390f04abec8a76bda56c415eb3a30
3
+ size 1064
checkpoint-129/trainer_state.json ADDED
@@ -0,0 +1,523 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.588494598865509,
3
+ "best_model_checkpoint": "autotrain-f8u3m-1w0uc/checkpoint-129",
4
+ "epoch": 3.0,
5
+ "eval_steps": 500,
6
+ "global_step": 129,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.05,
13
+ "grad_norm": 17.909360885620117,
14
+ "learning_rate": 3.846153846153847e-06,
15
+ "loss": 1.7009,
16
+ "step": 2
17
+ },
18
+ {
19
+ "epoch": 0.09,
20
+ "grad_norm": 13.120195388793945,
21
+ "learning_rate": 7.692307692307694e-06,
22
+ "loss": 1.4648,
23
+ "step": 4
24
+ },
25
+ {
26
+ "epoch": 0.14,
27
+ "grad_norm": 14.559846878051758,
28
+ "learning_rate": 1.5384615384615387e-05,
29
+ "loss": 1.4937,
30
+ "step": 6
31
+ },
32
+ {
33
+ "epoch": 0.19,
34
+ "grad_norm": 8.80319881439209,
35
+ "learning_rate": 2.307692307692308e-05,
36
+ "loss": 1.1978,
37
+ "step": 8
38
+ },
39
+ {
40
+ "epoch": 0.23,
41
+ "grad_norm": 10.07514476776123,
42
+ "learning_rate": 3.0769230769230774e-05,
43
+ "loss": 1.2436,
44
+ "step": 10
45
+ },
46
+ {
47
+ "epoch": 0.28,
48
+ "grad_norm": 5.79322624206543,
49
+ "learning_rate": 3.846153846153846e-05,
50
+ "loss": 0.8964,
51
+ "step": 12
52
+ },
53
+ {
54
+ "epoch": 0.33,
55
+ "grad_norm": 15.918188095092773,
56
+ "learning_rate": 4.615384615384616e-05,
57
+ "loss": 1.5669,
58
+ "step": 14
59
+ },
60
+ {
61
+ "epoch": 0.37,
62
+ "grad_norm": 8.671540260314941,
63
+ "learning_rate": 4.9568965517241384e-05,
64
+ "loss": 0.9671,
65
+ "step": 16
66
+ },
67
+ {
68
+ "epoch": 0.42,
69
+ "grad_norm": 6.892113208770752,
70
+ "learning_rate": 4.870689655172414e-05,
71
+ "loss": 0.9687,
72
+ "step": 18
73
+ },
74
+ {
75
+ "epoch": 0.47,
76
+ "grad_norm": 6.245675563812256,
77
+ "learning_rate": 4.78448275862069e-05,
78
+ "loss": 0.5675,
79
+ "step": 20
80
+ },
81
+ {
82
+ "epoch": 0.51,
83
+ "grad_norm": 11.2545804977417,
84
+ "learning_rate": 4.698275862068966e-05,
85
+ "loss": 1.2117,
86
+ "step": 22
87
+ },
88
+ {
89
+ "epoch": 0.56,
90
+ "grad_norm": 5.111425399780273,
91
+ "learning_rate": 4.612068965517242e-05,
92
+ "loss": 0.9048,
93
+ "step": 24
94
+ },
95
+ {
96
+ "epoch": 0.6,
97
+ "grad_norm": 5.0768232345581055,
98
+ "learning_rate": 4.5258620689655176e-05,
99
+ "loss": 0.7902,
100
+ "step": 26
101
+ },
102
+ {
103
+ "epoch": 0.65,
104
+ "grad_norm": 8.407731056213379,
105
+ "learning_rate": 4.4396551724137933e-05,
106
+ "loss": 0.941,
107
+ "step": 28
108
+ },
109
+ {
110
+ "epoch": 0.7,
111
+ "grad_norm": 12.011809349060059,
112
+ "learning_rate": 4.353448275862069e-05,
113
+ "loss": 1.1812,
114
+ "step": 30
115
+ },
116
+ {
117
+ "epoch": 0.74,
118
+ "grad_norm": 6.191880702972412,
119
+ "learning_rate": 4.267241379310345e-05,
120
+ "loss": 0.7905,
121
+ "step": 32
122
+ },
123
+ {
124
+ "epoch": 0.79,
125
+ "grad_norm": 5.270814418792725,
126
+ "learning_rate": 4.1810344827586205e-05,
127
+ "loss": 0.7904,
128
+ "step": 34
129
+ },
130
+ {
131
+ "epoch": 0.84,
132
+ "grad_norm": 5.346973896026611,
133
+ "learning_rate": 4.094827586206897e-05,
134
+ "loss": 0.6126,
135
+ "step": 36
136
+ },
137
+ {
138
+ "epoch": 0.88,
139
+ "grad_norm": 6.431284427642822,
140
+ "learning_rate": 4.0086206896551726e-05,
141
+ "loss": 0.9431,
142
+ "step": 38
143
+ },
144
+ {
145
+ "epoch": 0.93,
146
+ "grad_norm": 5.295109748840332,
147
+ "learning_rate": 3.922413793103448e-05,
148
+ "loss": 0.8013,
149
+ "step": 40
150
+ },
151
+ {
152
+ "epoch": 0.98,
153
+ "grad_norm": 7.394813060760498,
154
+ "learning_rate": 3.8362068965517246e-05,
155
+ "loss": 0.5561,
156
+ "step": 42
157
+ },
158
+ {
159
+ "epoch": 1.0,
160
+ "eval_accuracy": 0.6511627906976745,
161
+ "eval_f1_macro": 0.5219298245614035,
162
+ "eval_f1_micro": 0.6511627906976745,
163
+ "eval_f1_weighted": 0.5661464708282333,
164
+ "eval_loss": 0.8391300439834595,
165
+ "eval_precision_macro": 0.5958528951486698,
166
+ "eval_precision_micro": 0.6511627906976745,
167
+ "eval_precision_weighted": 0.5547548859045747,
168
+ "eval_recall_macro": 0.5166666666666667,
169
+ "eval_recall_micro": 0.6511627906976745,
170
+ "eval_recall_weighted": 0.6511627906976745,
171
+ "eval_runtime": 0.5055,
172
+ "eval_samples_per_second": 170.128,
173
+ "eval_steps_per_second": 11.869,
174
+ "step": 43
175
+ },
176
+ {
177
+ "epoch": 1.02,
178
+ "grad_norm": 12.433929443359375,
179
+ "learning_rate": 3.7500000000000003e-05,
180
+ "loss": 0.8422,
181
+ "step": 44
182
+ },
183
+ {
184
+ "epoch": 1.07,
185
+ "grad_norm": 9.662508010864258,
186
+ "learning_rate": 3.663793103448276e-05,
187
+ "loss": 0.8562,
188
+ "step": 46
189
+ },
190
+ {
191
+ "epoch": 1.12,
192
+ "grad_norm": 6.504687309265137,
193
+ "learning_rate": 3.5775862068965524e-05,
194
+ "loss": 0.6381,
195
+ "step": 48
196
+ },
197
+ {
198
+ "epoch": 1.16,
199
+ "grad_norm": 12.690322875976562,
200
+ "learning_rate": 3.4913793103448275e-05,
201
+ "loss": 0.5752,
202
+ "step": 50
203
+ },
204
+ {
205
+ "epoch": 1.21,
206
+ "grad_norm": 5.9632792472839355,
207
+ "learning_rate": 3.405172413793103e-05,
208
+ "loss": 0.6849,
209
+ "step": 52
210
+ },
211
+ {
212
+ "epoch": 1.26,
213
+ "grad_norm": 8.237900733947754,
214
+ "learning_rate": 3.3189655172413796e-05,
215
+ "loss": 0.6422,
216
+ "step": 54
217
+ },
218
+ {
219
+ "epoch": 1.3,
220
+ "grad_norm": 5.977579593658447,
221
+ "learning_rate": 3.232758620689655e-05,
222
+ "loss": 0.6344,
223
+ "step": 56
224
+ },
225
+ {
226
+ "epoch": 1.35,
227
+ "grad_norm": 6.388967037200928,
228
+ "learning_rate": 3.146551724137931e-05,
229
+ "loss": 0.6051,
230
+ "step": 58
231
+ },
232
+ {
233
+ "epoch": 1.4,
234
+ "grad_norm": 6.785774230957031,
235
+ "learning_rate": 3.060344827586207e-05,
236
+ "loss": 0.5379,
237
+ "step": 60
238
+ },
239
+ {
240
+ "epoch": 1.44,
241
+ "grad_norm": 6.693228244781494,
242
+ "learning_rate": 2.974137931034483e-05,
243
+ "loss": 0.4483,
244
+ "step": 62
245
+ },
246
+ {
247
+ "epoch": 1.49,
248
+ "grad_norm": 4.33690071105957,
249
+ "learning_rate": 2.8879310344827588e-05,
250
+ "loss": 0.5161,
251
+ "step": 64
252
+ },
253
+ {
254
+ "epoch": 1.53,
255
+ "grad_norm": 7.381749629974365,
256
+ "learning_rate": 2.8017241379310345e-05,
257
+ "loss": 0.7829,
258
+ "step": 66
259
+ },
260
+ {
261
+ "epoch": 1.58,
262
+ "grad_norm": 11.129793167114258,
263
+ "learning_rate": 2.7155172413793105e-05,
264
+ "loss": 0.7715,
265
+ "step": 68
266
+ },
267
+ {
268
+ "epoch": 1.63,
269
+ "grad_norm": 11.77314281463623,
270
+ "learning_rate": 2.6293103448275862e-05,
271
+ "loss": 1.114,
272
+ "step": 70
273
+ },
274
+ {
275
+ "epoch": 1.67,
276
+ "grad_norm": 7.2926130294799805,
277
+ "learning_rate": 2.543103448275862e-05,
278
+ "loss": 0.938,
279
+ "step": 72
280
+ },
281
+ {
282
+ "epoch": 1.72,
283
+ "grad_norm": 5.774272441864014,
284
+ "learning_rate": 2.456896551724138e-05,
285
+ "loss": 0.7925,
286
+ "step": 74
287
+ },
288
+ {
289
+ "epoch": 1.77,
290
+ "grad_norm": 9.42758560180664,
291
+ "learning_rate": 2.370689655172414e-05,
292
+ "loss": 0.6834,
293
+ "step": 76
294
+ },
295
+ {
296
+ "epoch": 1.81,
297
+ "grad_norm": 7.223062038421631,
298
+ "learning_rate": 2.2844827586206897e-05,
299
+ "loss": 0.8257,
300
+ "step": 78
301
+ },
302
+ {
303
+ "epoch": 1.86,
304
+ "grad_norm": 9.974703788757324,
305
+ "learning_rate": 2.1982758620689654e-05,
306
+ "loss": 0.5291,
307
+ "step": 80
308
+ },
309
+ {
310
+ "epoch": 1.91,
311
+ "grad_norm": 11.898812294006348,
312
+ "learning_rate": 2.1120689655172415e-05,
313
+ "loss": 0.8295,
314
+ "step": 82
315
+ },
316
+ {
317
+ "epoch": 1.95,
318
+ "grad_norm": 5.932551383972168,
319
+ "learning_rate": 2.0258620689655172e-05,
320
+ "loss": 0.5594,
321
+ "step": 84
322
+ },
323
+ {
324
+ "epoch": 2.0,
325
+ "grad_norm": 8.01093864440918,
326
+ "learning_rate": 1.9396551724137932e-05,
327
+ "loss": 0.7166,
328
+ "step": 86
329
+ },
330
+ {
331
+ "epoch": 2.0,
332
+ "eval_accuracy": 0.7790697674418605,
333
+ "eval_f1_macro": 0.7326664355062412,
334
+ "eval_f1_micro": 0.7790697674418605,
335
+ "eval_f1_weighted": 0.7589375221752733,
336
+ "eval_loss": 0.6218534708023071,
337
+ "eval_precision_macro": 0.8907051282051283,
338
+ "eval_precision_micro": 0.7790697674418605,
339
+ "eval_precision_weighted": 0.831499701848539,
340
+ "eval_recall_macro": 0.6805555555555556,
341
+ "eval_recall_micro": 0.7790697674418605,
342
+ "eval_recall_weighted": 0.7790697674418605,
343
+ "eval_runtime": 0.4535,
344
+ "eval_samples_per_second": 189.622,
345
+ "eval_steps_per_second": 13.229,
346
+ "step": 86
347
+ },
348
+ {
349
+ "epoch": 2.05,
350
+ "grad_norm": 4.2975664138793945,
351
+ "learning_rate": 1.8534482758620693e-05,
352
+ "loss": 0.5073,
353
+ "step": 88
354
+ },
355
+ {
356
+ "epoch": 2.09,
357
+ "grad_norm": 8.290116310119629,
358
+ "learning_rate": 1.767241379310345e-05,
359
+ "loss": 0.606,
360
+ "step": 90
361
+ },
362
+ {
363
+ "epoch": 2.14,
364
+ "grad_norm": 5.366226673126221,
365
+ "learning_rate": 1.6810344827586207e-05,
366
+ "loss": 0.4448,
367
+ "step": 92
368
+ },
369
+ {
370
+ "epoch": 2.19,
371
+ "grad_norm": 6.743834495544434,
372
+ "learning_rate": 1.5948275862068967e-05,
373
+ "loss": 0.7529,
374
+ "step": 94
375
+ },
376
+ {
377
+ "epoch": 2.23,
378
+ "grad_norm": 6.312230587005615,
379
+ "learning_rate": 1.5086206896551724e-05,
380
+ "loss": 0.662,
381
+ "step": 96
382
+ },
383
+ {
384
+ "epoch": 2.28,
385
+ "grad_norm": 9.200089454650879,
386
+ "learning_rate": 1.4224137931034485e-05,
387
+ "loss": 0.9112,
388
+ "step": 98
389
+ },
390
+ {
391
+ "epoch": 2.33,
392
+ "grad_norm": 3.5302746295928955,
393
+ "learning_rate": 1.336206896551724e-05,
394
+ "loss": 0.2405,
395
+ "step": 100
396
+ },
397
+ {
398
+ "epoch": 2.37,
399
+ "grad_norm": 13.742111206054688,
400
+ "learning_rate": 1.25e-05,
401
+ "loss": 0.5303,
402
+ "step": 102
403
+ },
404
+ {
405
+ "epoch": 2.42,
406
+ "grad_norm": 8.292695045471191,
407
+ "learning_rate": 1.163793103448276e-05,
408
+ "loss": 0.5544,
409
+ "step": 104
410
+ },
411
+ {
412
+ "epoch": 2.47,
413
+ "grad_norm": 9.335094451904297,
414
+ "learning_rate": 1.0775862068965516e-05,
415
+ "loss": 0.7992,
416
+ "step": 106
417
+ },
418
+ {
419
+ "epoch": 2.51,
420
+ "grad_norm": 4.947837829589844,
421
+ "learning_rate": 9.913793103448277e-06,
422
+ "loss": 0.4896,
423
+ "step": 108
424
+ },
425
+ {
426
+ "epoch": 2.56,
427
+ "grad_norm": 11.157235145568848,
428
+ "learning_rate": 9.051724137931036e-06,
429
+ "loss": 0.6746,
430
+ "step": 110
431
+ },
432
+ {
433
+ "epoch": 2.6,
434
+ "grad_norm": 10.61878776550293,
435
+ "learning_rate": 8.189655172413793e-06,
436
+ "loss": 0.6722,
437
+ "step": 112
438
+ },
439
+ {
440
+ "epoch": 2.65,
441
+ "grad_norm": 4.7017621994018555,
442
+ "learning_rate": 7.3275862068965514e-06,
443
+ "loss": 0.5214,
444
+ "step": 114
445
+ },
446
+ {
447
+ "epoch": 2.7,
448
+ "grad_norm": 3.746359348297119,
449
+ "learning_rate": 6.465517241379311e-06,
450
+ "loss": 0.592,
451
+ "step": 116
452
+ },
453
+ {
454
+ "epoch": 2.74,
455
+ "grad_norm": 5.942258358001709,
456
+ "learning_rate": 5.603448275862069e-06,
457
+ "loss": 0.5589,
458
+ "step": 118
459
+ },
460
+ {
461
+ "epoch": 2.79,
462
+ "grad_norm": 13.716927528381348,
463
+ "learning_rate": 4.741379310344828e-06,
464
+ "loss": 1.0046,
465
+ "step": 120
466
+ },
467
+ {
468
+ "epoch": 2.84,
469
+ "grad_norm": 3.577460765838623,
470
+ "learning_rate": 3.8793103448275865e-06,
471
+ "loss": 0.4807,
472
+ "step": 122
473
+ },
474
+ {
475
+ "epoch": 2.88,
476
+ "grad_norm": 6.835740566253662,
477
+ "learning_rate": 3.017241379310345e-06,
478
+ "loss": 0.5735,
479
+ "step": 124
480
+ },
481
+ {
482
+ "epoch": 2.93,
483
+ "grad_norm": 7.47273588180542,
484
+ "learning_rate": 2.1551724137931035e-06,
485
+ "loss": 0.569,
486
+ "step": 126
487
+ },
488
+ {
489
+ "epoch": 2.98,
490
+ "grad_norm": 6.011229991912842,
491
+ "learning_rate": 1.293103448275862e-06,
492
+ "loss": 0.3485,
493
+ "step": 128
494
+ },
495
+ {
496
+ "epoch": 3.0,
497
+ "eval_accuracy": 0.7674418604651163,
498
+ "eval_f1_macro": 0.726419878296146,
499
+ "eval_f1_micro": 0.7674418604651162,
500
+ "eval_f1_weighted": 0.7444926647483373,
501
+ "eval_loss": 0.588494598865509,
502
+ "eval_precision_macro": 0.8743946731234866,
503
+ "eval_precision_micro": 0.7674418604651163,
504
+ "eval_precision_weighted": 0.8185567881074384,
505
+ "eval_recall_macro": 0.6858527131782945,
506
+ "eval_recall_micro": 0.7674418604651163,
507
+ "eval_recall_weighted": 0.7674418604651163,
508
+ "eval_runtime": 0.4559,
509
+ "eval_samples_per_second": 188.618,
510
+ "eval_steps_per_second": 13.159,
511
+ "step": 129
512
+ }
513
+ ],
514
+ "logging_steps": 2,
515
+ "max_steps": 129,
516
+ "num_input_tokens_seen": 0,
517
+ "num_train_epochs": 3,
518
+ "save_steps": 500,
519
+ "total_flos": 7.97406866688983e+16,
520
+ "train_batch_size": 8,
521
+ "trial_name": null,
522
+ "trial_params": null
523
+ }
checkpoint-129/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:373699646c5eae73c7a22630d3eb2d7d5b4ced18a12160daa1913d5e140bdfec
3
+ size 4920
config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224",
3
+ "_num_labels": 4,
4
+ "architectures": [
5
+ "ViTForImageClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.0,
8
+ "encoder_stride": 16,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.0,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "blank",
14
+ "1": "content",
15
+ "2": "end",
16
+ "3": "start"
17
+ },
18
+ "image_size": 224,
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 3072,
21
+ "label2id": {
22
+ "blank": 0,
23
+ "content": 1,
24
+ "end": 2,
25
+ "start": 3
26
+ },
27
+ "layer_norm_eps": 1e-12,
28
+ "model_type": "vit",
29
+ "num_attention_heads": 12,
30
+ "num_channels": 3,
31
+ "num_hidden_layers": 12,
32
+ "patch_size": 16,
33
+ "problem_type": "single_label_classification",
34
+ "qkv_bias": true,
35
+ "torch_dtype": "float32",
36
+ "transformers_version": "4.39.0"
37
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6cb39e4419595735109964dd5d06982ea0141bd516e365f0c77433ea018e7bd
3
+ size 343230128
preprocessor_config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_valid_processor_keys": [
3
+ "images",
4
+ "do_resize",
5
+ "size",
6
+ "resample",
7
+ "do_rescale",
8
+ "rescale_factor",
9
+ "do_normalize",
10
+ "image_mean",
11
+ "image_std",
12
+ "return_tensors",
13
+ "data_format",
14
+ "input_data_format"
15
+ ],
16
+ "do_normalize": true,
17
+ "do_rescale": true,
18
+ "do_resize": true,
19
+ "image_mean": [
20
+ 0.5,
21
+ 0.5,
22
+ 0.5
23
+ ],
24
+ "image_processor_type": "ViTImageProcessor",
25
+ "image_std": [
26
+ 0.5,
27
+ 0.5,
28
+ 0.5
29
+ ],
30
+ "resample": 2,
31
+ "rescale_factor": 0.00392156862745098,
32
+ "size": {
33
+ "height": 224,
34
+ "width": 224
35
+ }
36
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:373699646c5eae73c7a22630d3eb2d7d5b4ced18a12160daa1913d5e140bdfec
3
+ size 4920
training_params.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "data_path": "autotrain-f8u3m-1w0uc/autotrain-data",
3
+ "model": "google/vit-base-patch16-224",
4
+ "username": "rdmpage",
5
+ "lr": 5e-05,
6
+ "epochs": 3,
7
+ "batch_size": 8,
8
+ "warmup_ratio": 0.1,
9
+ "gradient_accumulation": 1,
10
+ "optimizer": "adamw_torch",
11
+ "scheduler": "linear",
12
+ "weight_decay": 0.0,
13
+ "max_grad_norm": 1.0,
14
+ "seed": 42,
15
+ "train_split": "train",
16
+ "valid_split": "validation",
17
+ "logging_steps": -1,
18
+ "project_name": "autotrain-f8u3m-1w0uc",
19
+ "auto_find_batch_size": false,
20
+ "mixed_precision": "fp16",
21
+ "save_total_limit": 1,
22
+ "save_strategy": "epoch",
23
+ "push_to_hub": true,
24
+ "repo_id": "rdmpage/autotrain-f8u3m-1w0uc",
25
+ "evaluation_strategy": "epoch",
26
+ "image_column": "autotrain_image",
27
+ "target_column": "autotrain_label",
28
+ "log": "none"
29
+ }