yuanhuaisen commited on
Commit
fe20aab
1 Parent(s): bb30058

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ tags:
4
+ - autotrain
5
+ - image-classification
6
+ widget:
7
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg
8
+ example_title: Tiger
9
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg
10
+ example_title: Teapot
11
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg
12
+ example_title: Palace
13
+ datasets:
14
+ - yuanhuaisen/autotrain-data-autotrain-9oj9k-0pndc
15
+ ---
16
+
17
+ # Model Trained Using AutoTrain
18
+
19
+ - Problem type: Image Classification
20
+
21
+ ## Validation Metricsg
22
+ loss: 0.5109696984291077
23
+
24
+ f1_macro: 0.7355182828867041
25
+
26
+ f1_micro: 0.7840909090909092
27
+
28
+ f1_weighted: 0.7828294512505038
29
+
30
+ precision_macro: 0.7308866944925176
31
+
32
+ precision_micro: 0.7840909090909091
33
+
34
+ precision_weighted: 0.782664525741997
35
+
36
+ recall_macro: 0.7416666666666667
37
+
38
+ recall_micro: 0.7840909090909091
39
+
40
+ recall_weighted: 0.7840909090909091
41
+
42
+ accuracy: 0.7840909090909091
checkpoint-132/config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-large-patch16-224",
3
+ "_num_labels": 3,
4
+ "architectures": [
5
+ "ViTForImageClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.0,
8
+ "encoder_stride": 16,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.0,
11
+ "hidden_size": 1024,
12
+ "id2label": {
13
+ "0": "11covered_with_a_quilt_and_only_the_head_exposed",
14
+ "1": "12covered_with_a_quilt_and_exposed_other_parts_of_the_body",
15
+ "2": "13has_nothing_to_do_with_11_and_12_above"
16
+ },
17
+ "image_size": 224,
18
+ "initializer_range": 0.02,
19
+ "intermediate_size": 4096,
20
+ "label2id": {
21
+ "11covered_with_a_quilt_and_only_the_head_exposed": 0,
22
+ "12covered_with_a_quilt_and_exposed_other_parts_of_the_body": 1,
23
+ "13has_nothing_to_do_with_11_and_12_above": 2
24
+ },
25
+ "layer_norm_eps": 1e-12,
26
+ "model_type": "vit",
27
+ "num_attention_heads": 16,
28
+ "num_channels": 3,
29
+ "num_hidden_layers": 24,
30
+ "patch_size": 16,
31
+ "problem_type": "single_label_classification",
32
+ "qkv_bias": true,
33
+ "torch_dtype": "float32",
34
+ "transformers_version": "4.36.1"
35
+ }
checkpoint-132/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7be0f498bb157517206317b4007334e962ca308a5a95fa5bbe9c262fb392769c
3
+ size 1213265372
checkpoint-132/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa083d213e22891385f9a2cce1faa92de1166a81e355b852efed81c3f111aa0f
3
+ size 2426765738
checkpoint-132/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8994f3d3e249c8292e91c23bd8d9c2312387b23ed25a651fd66caed4077b0196
3
+ size 14244
checkpoint-132/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e1fed2c440c84245eb85b6342c3d4bddd2342803cc2bb3a5c1416ed23dcf458
3
+ size 1064
checkpoint-132/trainer_state.json ADDED
@@ -0,0 +1,471 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.5109696984291077,
3
+ "best_model_checkpoint": "/tmp/model/checkpoint-132",
4
+ "epoch": 3.0,
5
+ "eval_steps": 500,
6
+ "global_step": 132,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.05,
13
+ "learning_rate": 7.142857142857143e-06,
14
+ "loss": 1.0986,
15
+ "step": 2
16
+ },
17
+ {
18
+ "epoch": 0.09,
19
+ "learning_rate": 1.0714285714285714e-05,
20
+ "loss": 1.0986,
21
+ "step": 4
22
+ },
23
+ {
24
+ "epoch": 0.14,
25
+ "learning_rate": 1.785714285714286e-05,
26
+ "loss": 1.0981,
27
+ "step": 6
28
+ },
29
+ {
30
+ "epoch": 0.18,
31
+ "learning_rate": 2.5e-05,
32
+ "loss": 1.0948,
33
+ "step": 8
34
+ },
35
+ {
36
+ "epoch": 0.23,
37
+ "learning_rate": 3.2142857142857144e-05,
38
+ "loss": 1.0962,
39
+ "step": 10
40
+ },
41
+ {
42
+ "epoch": 0.27,
43
+ "learning_rate": 3.928571428571429e-05,
44
+ "loss": 1.0926,
45
+ "step": 12
46
+ },
47
+ {
48
+ "epoch": 0.32,
49
+ "learning_rate": 4.642857142857143e-05,
50
+ "loss": 1.0908,
51
+ "step": 14
52
+ },
53
+ {
54
+ "epoch": 0.36,
55
+ "learning_rate": 4.957627118644068e-05,
56
+ "loss": 1.086,
57
+ "step": 16
58
+ },
59
+ {
60
+ "epoch": 0.41,
61
+ "learning_rate": 4.8728813559322034e-05,
62
+ "loss": 1.0706,
63
+ "step": 18
64
+ },
65
+ {
66
+ "epoch": 0.45,
67
+ "learning_rate": 4.788135593220339e-05,
68
+ "loss": 1.0397,
69
+ "step": 20
70
+ },
71
+ {
72
+ "epoch": 0.5,
73
+ "learning_rate": 4.703389830508475e-05,
74
+ "loss": 1.0251,
75
+ "step": 22
76
+ },
77
+ {
78
+ "epoch": 0.55,
79
+ "learning_rate": 4.6186440677966104e-05,
80
+ "loss": 1.032,
81
+ "step": 24
82
+ },
83
+ {
84
+ "epoch": 0.59,
85
+ "learning_rate": 4.533898305084746e-05,
86
+ "loss": 0.9616,
87
+ "step": 26
88
+ },
89
+ {
90
+ "epoch": 0.64,
91
+ "learning_rate": 4.4491525423728816e-05,
92
+ "loss": 0.8914,
93
+ "step": 28
94
+ },
95
+ {
96
+ "epoch": 0.68,
97
+ "learning_rate": 4.3644067796610175e-05,
98
+ "loss": 0.9912,
99
+ "step": 30
100
+ },
101
+ {
102
+ "epoch": 0.73,
103
+ "learning_rate": 4.279661016949153e-05,
104
+ "loss": 0.8628,
105
+ "step": 32
106
+ },
107
+ {
108
+ "epoch": 0.77,
109
+ "learning_rate": 4.1949152542372886e-05,
110
+ "loss": 0.9583,
111
+ "step": 34
112
+ },
113
+ {
114
+ "epoch": 0.82,
115
+ "learning_rate": 4.110169491525424e-05,
116
+ "loss": 0.9837,
117
+ "step": 36
118
+ },
119
+ {
120
+ "epoch": 0.86,
121
+ "learning_rate": 4.025423728813559e-05,
122
+ "loss": 0.9641,
123
+ "step": 38
124
+ },
125
+ {
126
+ "epoch": 0.91,
127
+ "learning_rate": 3.940677966101695e-05,
128
+ "loss": 0.8973,
129
+ "step": 40
130
+ },
131
+ {
132
+ "epoch": 0.95,
133
+ "learning_rate": 3.855932203389831e-05,
134
+ "loss": 0.8762,
135
+ "step": 42
136
+ },
137
+ {
138
+ "epoch": 1.0,
139
+ "learning_rate": 3.771186440677966e-05,
140
+ "loss": 0.8489,
141
+ "step": 44
142
+ },
143
+ {
144
+ "epoch": 1.0,
145
+ "eval_accuracy": 0.7727272727272727,
146
+ "eval_f1_macro": 0.7295238095238096,
147
+ "eval_f1_micro": 0.7727272727272727,
148
+ "eval_f1_weighted": 0.7701298701298701,
149
+ "eval_loss": 0.7866544127464294,
150
+ "eval_precision_macro": 0.7356589147286821,
151
+ "eval_precision_micro": 0.7727272727272727,
152
+ "eval_precision_weighted": 0.7901691331923889,
153
+ "eval_recall_macro": 0.7541666666666668,
154
+ "eval_recall_micro": 0.7727272727272727,
155
+ "eval_recall_weighted": 0.7727272727272727,
156
+ "eval_runtime": 3.048,
157
+ "eval_samples_per_second": 28.871,
158
+ "eval_steps_per_second": 1.968,
159
+ "step": 44
160
+ },
161
+ {
162
+ "epoch": 1.05,
163
+ "learning_rate": 3.686440677966102e-05,
164
+ "loss": 0.7592,
165
+ "step": 46
166
+ },
167
+ {
168
+ "epoch": 1.09,
169
+ "learning_rate": 3.601694915254237e-05,
170
+ "loss": 0.7081,
171
+ "step": 48
172
+ },
173
+ {
174
+ "epoch": 1.14,
175
+ "learning_rate": 3.516949152542373e-05,
176
+ "loss": 0.6742,
177
+ "step": 50
178
+ },
179
+ {
180
+ "epoch": 1.18,
181
+ "learning_rate": 3.432203389830508e-05,
182
+ "loss": 0.8392,
183
+ "step": 52
184
+ },
185
+ {
186
+ "epoch": 1.23,
187
+ "learning_rate": 3.347457627118644e-05,
188
+ "loss": 0.549,
189
+ "step": 54
190
+ },
191
+ {
192
+ "epoch": 1.27,
193
+ "learning_rate": 3.26271186440678e-05,
194
+ "loss": 0.7988,
195
+ "step": 56
196
+ },
197
+ {
198
+ "epoch": 1.32,
199
+ "learning_rate": 3.177966101694915e-05,
200
+ "loss": 0.6954,
201
+ "step": 58
202
+ },
203
+ {
204
+ "epoch": 1.36,
205
+ "learning_rate": 3.093220338983051e-05,
206
+ "loss": 0.703,
207
+ "step": 60
208
+ },
209
+ {
210
+ "epoch": 1.41,
211
+ "learning_rate": 3.0084745762711864e-05,
212
+ "loss": 0.538,
213
+ "step": 62
214
+ },
215
+ {
216
+ "epoch": 1.45,
217
+ "learning_rate": 2.9237288135593223e-05,
218
+ "loss": 0.6245,
219
+ "step": 64
220
+ },
221
+ {
222
+ "epoch": 1.5,
223
+ "learning_rate": 2.838983050847458e-05,
224
+ "loss": 0.6417,
225
+ "step": 66
226
+ },
227
+ {
228
+ "epoch": 1.55,
229
+ "learning_rate": 2.754237288135593e-05,
230
+ "loss": 0.5867,
231
+ "step": 68
232
+ },
233
+ {
234
+ "epoch": 1.59,
235
+ "learning_rate": 2.669491525423729e-05,
236
+ "loss": 0.7802,
237
+ "step": 70
238
+ },
239
+ {
240
+ "epoch": 1.64,
241
+ "learning_rate": 2.5847457627118642e-05,
242
+ "loss": 0.5801,
243
+ "step": 72
244
+ },
245
+ {
246
+ "epoch": 1.68,
247
+ "learning_rate": 2.5e-05,
248
+ "loss": 0.6174,
249
+ "step": 74
250
+ },
251
+ {
252
+ "epoch": 1.73,
253
+ "learning_rate": 2.4152542372881357e-05,
254
+ "loss": 0.6729,
255
+ "step": 76
256
+ },
257
+ {
258
+ "epoch": 1.77,
259
+ "learning_rate": 2.3305084745762712e-05,
260
+ "loss": 0.6024,
261
+ "step": 78
262
+ },
263
+ {
264
+ "epoch": 1.82,
265
+ "learning_rate": 2.245762711864407e-05,
266
+ "loss": 0.6754,
267
+ "step": 80
268
+ },
269
+ {
270
+ "epoch": 1.86,
271
+ "learning_rate": 2.1610169491525427e-05,
272
+ "loss": 0.5676,
273
+ "step": 82
274
+ },
275
+ {
276
+ "epoch": 1.91,
277
+ "learning_rate": 2.076271186440678e-05,
278
+ "loss": 0.7306,
279
+ "step": 84
280
+ },
281
+ {
282
+ "epoch": 1.95,
283
+ "learning_rate": 1.9915254237288135e-05,
284
+ "loss": 0.8223,
285
+ "step": 86
286
+ },
287
+ {
288
+ "epoch": 2.0,
289
+ "learning_rate": 1.906779661016949e-05,
290
+ "loss": 0.564,
291
+ "step": 88
292
+ },
293
+ {
294
+ "epoch": 2.0,
295
+ "eval_accuracy": 0.7727272727272727,
296
+ "eval_f1_macro": 0.7458886780518658,
297
+ "eval_f1_micro": 0.7727272727272727,
298
+ "eval_f1_weighted": 0.7822652521419123,
299
+ "eval_loss": 0.5731395483016968,
300
+ "eval_precision_macro": 0.8016726403823178,
301
+ "eval_precision_micro": 0.7727272727272727,
302
+ "eval_precision_weighted": 0.8294232649071358,
303
+ "eval_recall_macro": 0.7444444444444445,
304
+ "eval_recall_micro": 0.7727272727272727,
305
+ "eval_recall_weighted": 0.7727272727272727,
306
+ "eval_runtime": 3.0409,
307
+ "eval_samples_per_second": 28.939,
308
+ "eval_steps_per_second": 1.973,
309
+ "step": 88
310
+ },
311
+ {
312
+ "epoch": 2.05,
313
+ "learning_rate": 1.8220338983050846e-05,
314
+ "loss": 0.671,
315
+ "step": 90
316
+ },
317
+ {
318
+ "epoch": 2.09,
319
+ "learning_rate": 1.7372881355932205e-05,
320
+ "loss": 0.6541,
321
+ "step": 92
322
+ },
323
+ {
324
+ "epoch": 2.14,
325
+ "learning_rate": 1.652542372881356e-05,
326
+ "loss": 0.5452,
327
+ "step": 94
328
+ },
329
+ {
330
+ "epoch": 2.18,
331
+ "learning_rate": 1.5677966101694916e-05,
332
+ "loss": 0.5264,
333
+ "step": 96
334
+ },
335
+ {
336
+ "epoch": 2.23,
337
+ "learning_rate": 1.4830508474576272e-05,
338
+ "loss": 0.5391,
339
+ "step": 98
340
+ },
341
+ {
342
+ "epoch": 2.27,
343
+ "learning_rate": 1.3983050847457627e-05,
344
+ "loss": 0.4773,
345
+ "step": 100
346
+ },
347
+ {
348
+ "epoch": 2.32,
349
+ "learning_rate": 1.3135593220338985e-05,
350
+ "loss": 0.529,
351
+ "step": 102
352
+ },
353
+ {
354
+ "epoch": 2.36,
355
+ "learning_rate": 1.228813559322034e-05,
356
+ "loss": 0.4235,
357
+ "step": 104
358
+ },
359
+ {
360
+ "epoch": 2.41,
361
+ "learning_rate": 1.1440677966101696e-05,
362
+ "loss": 0.5401,
363
+ "step": 106
364
+ },
365
+ {
366
+ "epoch": 2.45,
367
+ "learning_rate": 1.0593220338983052e-05,
368
+ "loss": 0.4456,
369
+ "step": 108
370
+ },
371
+ {
372
+ "epoch": 2.5,
373
+ "learning_rate": 9.745762711864407e-06,
374
+ "loss": 0.3653,
375
+ "step": 110
376
+ },
377
+ {
378
+ "epoch": 2.55,
379
+ "learning_rate": 8.898305084745763e-06,
380
+ "loss": 0.4332,
381
+ "step": 112
382
+ },
383
+ {
384
+ "epoch": 2.59,
385
+ "learning_rate": 8.050847457627118e-06,
386
+ "loss": 0.3547,
387
+ "step": 114
388
+ },
389
+ {
390
+ "epoch": 2.64,
391
+ "learning_rate": 7.203389830508475e-06,
392
+ "loss": 0.534,
393
+ "step": 116
394
+ },
395
+ {
396
+ "epoch": 2.68,
397
+ "learning_rate": 6.3559322033898304e-06,
398
+ "loss": 0.3844,
399
+ "step": 118
400
+ },
401
+ {
402
+ "epoch": 2.73,
403
+ "learning_rate": 5.508474576271187e-06,
404
+ "loss": 0.4134,
405
+ "step": 120
406
+ },
407
+ {
408
+ "epoch": 2.77,
409
+ "learning_rate": 4.6610169491525425e-06,
410
+ "loss": 0.5368,
411
+ "step": 122
412
+ },
413
+ {
414
+ "epoch": 2.82,
415
+ "learning_rate": 3.813559322033899e-06,
416
+ "loss": 0.3829,
417
+ "step": 124
418
+ },
419
+ {
420
+ "epoch": 2.86,
421
+ "learning_rate": 2.9661016949152545e-06,
422
+ "loss": 0.5328,
423
+ "step": 126
424
+ },
425
+ {
426
+ "epoch": 2.91,
427
+ "learning_rate": 2.11864406779661e-06,
428
+ "loss": 0.2779,
429
+ "step": 128
430
+ },
431
+ {
432
+ "epoch": 2.95,
433
+ "learning_rate": 1.2711864406779662e-06,
434
+ "loss": 0.3616,
435
+ "step": 130
436
+ },
437
+ {
438
+ "epoch": 3.0,
439
+ "learning_rate": 4.2372881355932204e-07,
440
+ "loss": 0.4816,
441
+ "step": 132
442
+ },
443
+ {
444
+ "epoch": 3.0,
445
+ "eval_accuracy": 0.7840909090909091,
446
+ "eval_f1_macro": 0.7355182828867041,
447
+ "eval_f1_micro": 0.7840909090909092,
448
+ "eval_f1_weighted": 0.7828294512505038,
449
+ "eval_loss": 0.5109696984291077,
450
+ "eval_precision_macro": 0.7308866944925176,
451
+ "eval_precision_micro": 0.7840909090909091,
452
+ "eval_precision_weighted": 0.782664525741997,
453
+ "eval_recall_macro": 0.7416666666666667,
454
+ "eval_recall_micro": 0.7840909090909091,
455
+ "eval_recall_weighted": 0.7840909090909091,
456
+ "eval_runtime": 3.1235,
457
+ "eval_samples_per_second": 28.173,
458
+ "eval_steps_per_second": 1.921,
459
+ "step": 132
460
+ }
461
+ ],
462
+ "logging_steps": 2,
463
+ "max_steps": 132,
464
+ "num_input_tokens_seen": 0,
465
+ "num_train_epochs": 3,
466
+ "save_steps": 500,
467
+ "total_flos": 2.8927547152603546e+17,
468
+ "train_batch_size": 8,
469
+ "trial_name": null,
470
+ "trial_params": null
471
+ }
checkpoint-132/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5688e460044b608d6b5b7ffd3b5af0b0c6eb7a5a53a7689f5b2a366cd47a75a7
3
+ size 4728
config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-large-patch16-224",
3
+ "_num_labels": 3,
4
+ "architectures": [
5
+ "ViTForImageClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.0,
8
+ "encoder_stride": 16,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.0,
11
+ "hidden_size": 1024,
12
+ "id2label": {
13
+ "0": "11covered_with_a_quilt_and_only_the_head_exposed",
14
+ "1": "12covered_with_a_quilt_and_exposed_other_parts_of_the_body",
15
+ "2": "13has_nothing_to_do_with_11_and_12_above"
16
+ },
17
+ "image_size": 224,
18
+ "initializer_range": 0.02,
19
+ "intermediate_size": 4096,
20
+ "label2id": {
21
+ "11covered_with_a_quilt_and_only_the_head_exposed": 0,
22
+ "12covered_with_a_quilt_and_exposed_other_parts_of_the_body": 1,
23
+ "13has_nothing_to_do_with_11_and_12_above": 2
24
+ },
25
+ "layer_norm_eps": 1e-12,
26
+ "model_type": "vit",
27
+ "num_attention_heads": 16,
28
+ "num_channels": 3,
29
+ "num_hidden_layers": 24,
30
+ "patch_size": 16,
31
+ "problem_type": "single_label_classification",
32
+ "qkv_bias": true,
33
+ "torch_dtype": "float32",
34
+ "transformers_version": "4.36.1"
35
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7be0f498bb157517206317b4007334e962ca308a5a95fa5bbe9c262fb392769c
3
+ size 1213265372
preprocessor_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "image_mean": [
6
+ 0.5,
7
+ 0.5,
8
+ 0.5
9
+ ],
10
+ "image_processor_type": "ViTImageProcessor",
11
+ "image_std": [
12
+ 0.5,
13
+ 0.5,
14
+ 0.5
15
+ ],
16
+ "resample": 2,
17
+ "rescale_factor": 0.00392156862745098,
18
+ "size": {
19
+ "height": 224,
20
+ "width": 224
21
+ }
22
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5688e460044b608d6b5b7ffd3b5af0b0c6eb7a5a53a7689f5b2a366cd47a75a7
3
+ size 4728
training_params.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "data_path": "yuanhuaisen/autotrain-data-autotrain-9oj9k-0pndc",
3
+ "model": "google/vit-large-patch16-224",
4
+ "username": "yuanhuaisen",
5
+ "lr": 5e-05,
6
+ "epochs": 3,
7
+ "batch_size": 8,
8
+ "warmup_ratio": 0.1,
9
+ "gradient_accumulation": 1,
10
+ "optimizer": "adamw_torch",
11
+ "scheduler": "linear",
12
+ "weight_decay": 0.0,
13
+ "max_grad_norm": 1.0,
14
+ "seed": 42,
15
+ "train_split": "train",
16
+ "valid_split": "validation",
17
+ "logging_steps": -1,
18
+ "project_name": "/tmp/model",
19
+ "auto_find_batch_size": false,
20
+ "mixed_precision": "fp16",
21
+ "save_total_limit": 1,
22
+ "save_strategy": "epoch",
23
+ "push_to_hub": true,
24
+ "repo_id": "yuanhuaisen/autotrain-9oj9k-0pndc",
25
+ "evaluation_strategy": "epoch",
26
+ "image_column": "autotrain_image",
27
+ "target_column": "autotrain_label",
28
+ "log": "none"
29
+ }