nielsr's picture
nielsr HF staff
End of training
5904b65
{
"best_metric": 0.9855,
"best_model_checkpoint": "vit-base-patch16-224-in21k-finetuned-cifar10/checkpoint-1170",
"epoch": 2.998080614203455,
"global_step": 1170,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03,
"learning_rate": 4.273504273504274e-06,
"loss": 2.3074,
"step": 10
},
{
"epoch": 0.05,
"learning_rate": 8.547008547008548e-06,
"loss": 2.2947,
"step": 20
},
{
"epoch": 0.08,
"learning_rate": 1.282051282051282e-05,
"loss": 2.2588,
"step": 30
},
{
"epoch": 0.1,
"learning_rate": 1.7094017094017095e-05,
"loss": 2.2041,
"step": 40
},
{
"epoch": 0.13,
"learning_rate": 2.1367521367521368e-05,
"loss": 2.1221,
"step": 50
},
{
"epoch": 0.15,
"learning_rate": 2.564102564102564e-05,
"loss": 2.0014,
"step": 60
},
{
"epoch": 0.18,
"learning_rate": 2.9914529914529915e-05,
"loss": 1.8365,
"step": 70
},
{
"epoch": 0.2,
"learning_rate": 3.418803418803419e-05,
"loss": 1.6374,
"step": 80
},
{
"epoch": 0.23,
"learning_rate": 3.846153846153846e-05,
"loss": 1.438,
"step": 90
},
{
"epoch": 0.26,
"learning_rate": 4.2735042735042735e-05,
"loss": 1.2628,
"step": 100
},
{
"epoch": 0.28,
"learning_rate": 4.700854700854701e-05,
"loss": 1.0934,
"step": 110
},
{
"epoch": 0.31,
"learning_rate": 4.985754985754986e-05,
"loss": 0.9678,
"step": 120
},
{
"epoch": 0.33,
"learning_rate": 4.938271604938271e-05,
"loss": 0.8619,
"step": 130
},
{
"epoch": 0.36,
"learning_rate": 4.890788224121557e-05,
"loss": 0.837,
"step": 140
},
{
"epoch": 0.38,
"learning_rate": 4.8433048433048433e-05,
"loss": 0.7366,
"step": 150
},
{
"epoch": 0.41,
"learning_rate": 4.7958214624881294e-05,
"loss": 0.6799,
"step": 160
},
{
"epoch": 0.44,
"learning_rate": 4.7483380816714154e-05,
"loss": 0.6741,
"step": 170
},
{
"epoch": 0.46,
"learning_rate": 4.700854700854701e-05,
"loss": 0.6262,
"step": 180
},
{
"epoch": 0.49,
"learning_rate": 4.653371320037987e-05,
"loss": 0.6355,
"step": 190
},
{
"epoch": 0.51,
"learning_rate": 4.605887939221273e-05,
"loss": 0.5781,
"step": 200
},
{
"epoch": 0.54,
"learning_rate": 4.558404558404559e-05,
"loss": 0.5671,
"step": 210
},
{
"epoch": 0.56,
"learning_rate": 4.510921177587845e-05,
"loss": 0.5192,
"step": 220
},
{
"epoch": 0.59,
"learning_rate": 4.463437796771131e-05,
"loss": 0.5498,
"step": 230
},
{
"epoch": 0.61,
"learning_rate": 4.415954415954416e-05,
"loss": 0.507,
"step": 240
},
{
"epoch": 0.64,
"learning_rate": 4.368471035137702e-05,
"loss": 0.5239,
"step": 250
},
{
"epoch": 0.67,
"learning_rate": 4.3209876543209875e-05,
"loss": 0.4586,
"step": 260
},
{
"epoch": 0.69,
"learning_rate": 4.2735042735042735e-05,
"loss": 0.4815,
"step": 270
},
{
"epoch": 0.72,
"learning_rate": 4.2260208926875595e-05,
"loss": 0.4656,
"step": 280
},
{
"epoch": 0.74,
"learning_rate": 4.1785375118708455e-05,
"loss": 0.4937,
"step": 290
},
{
"epoch": 0.77,
"learning_rate": 4.131054131054131e-05,
"loss": 0.4649,
"step": 300
},
{
"epoch": 0.79,
"learning_rate": 4.083570750237417e-05,
"loss": 0.4279,
"step": 310
},
{
"epoch": 0.82,
"learning_rate": 4.036087369420703e-05,
"loss": 0.4736,
"step": 320
},
{
"epoch": 0.84,
"learning_rate": 3.988603988603989e-05,
"loss": 0.4434,
"step": 330
},
{
"epoch": 0.87,
"learning_rate": 3.941120607787275e-05,
"loss": 0.4459,
"step": 340
},
{
"epoch": 0.9,
"learning_rate": 3.893637226970561e-05,
"loss": 0.4307,
"step": 350
},
{
"epoch": 0.92,
"learning_rate": 3.846153846153846e-05,
"loss": 0.4029,
"step": 360
},
{
"epoch": 0.95,
"learning_rate": 3.798670465337132e-05,
"loss": 0.4007,
"step": 370
},
{
"epoch": 0.97,
"learning_rate": 3.7511870845204176e-05,
"loss": 0.4279,
"step": 380
},
{
"epoch": 1.0,
"learning_rate": 3.7037037037037037e-05,
"loss": 0.3831,
"step": 390
},
{
"epoch": 1.0,
"eval_accuracy": 0.978,
"eval_loss": 0.20566777884960175,
"eval_runtime": 152.329,
"eval_samples_per_second": 65.647,
"eval_steps_per_second": 2.055,
"step": 390
},
{
"epoch": 1.03,
"learning_rate": 3.65622032288699e-05,
"loss": 0.4258,
"step": 400
},
{
"epoch": 1.05,
"learning_rate": 3.608736942070276e-05,
"loss": 0.3416,
"step": 410
},
{
"epoch": 1.08,
"learning_rate": 3.561253561253561e-05,
"loss": 0.3881,
"step": 420
},
{
"epoch": 1.1,
"learning_rate": 3.513770180436847e-05,
"loss": 0.3453,
"step": 430
},
{
"epoch": 1.13,
"learning_rate": 3.466286799620133e-05,
"loss": 0.3806,
"step": 440
},
{
"epoch": 1.15,
"learning_rate": 3.418803418803419e-05,
"loss": 0.361,
"step": 450
},
{
"epoch": 1.18,
"learning_rate": 3.371320037986705e-05,
"loss": 0.3834,
"step": 460
},
{
"epoch": 1.2,
"learning_rate": 3.323836657169991e-05,
"loss": 0.3696,
"step": 470
},
{
"epoch": 1.23,
"learning_rate": 3.2763532763532764e-05,
"loss": 0.3389,
"step": 480
},
{
"epoch": 1.26,
"learning_rate": 3.2288698955365625e-05,
"loss": 0.3671,
"step": 490
},
{
"epoch": 1.28,
"learning_rate": 3.181386514719848e-05,
"loss": 0.3685,
"step": 500
},
{
"epoch": 1.31,
"learning_rate": 3.133903133903134e-05,
"loss": 0.3533,
"step": 510
},
{
"epoch": 1.33,
"learning_rate": 3.08641975308642e-05,
"loss": 0.3539,
"step": 520
},
{
"epoch": 1.36,
"learning_rate": 3.0389363722697055e-05,
"loss": 0.3499,
"step": 530
},
{
"epoch": 1.38,
"learning_rate": 2.9914529914529915e-05,
"loss": 0.3246,
"step": 540
},
{
"epoch": 1.41,
"learning_rate": 2.9439696106362775e-05,
"loss": 0.3134,
"step": 550
},
{
"epoch": 1.44,
"learning_rate": 2.8964862298195632e-05,
"loss": 0.3425,
"step": 560
},
{
"epoch": 1.46,
"learning_rate": 2.8490028490028492e-05,
"loss": 0.3185,
"step": 570
},
{
"epoch": 1.49,
"learning_rate": 2.8015194681861352e-05,
"loss": 0.3215,
"step": 580
},
{
"epoch": 1.51,
"learning_rate": 2.754036087369421e-05,
"loss": 0.3268,
"step": 590
},
{
"epoch": 1.54,
"learning_rate": 2.706552706552707e-05,
"loss": 0.3439,
"step": 600
},
{
"epoch": 1.56,
"learning_rate": 2.6590693257359926e-05,
"loss": 0.3049,
"step": 610
},
{
"epoch": 1.59,
"learning_rate": 2.611585944919278e-05,
"loss": 0.3306,
"step": 620
},
{
"epoch": 1.61,
"learning_rate": 2.564102564102564e-05,
"loss": 0.3158,
"step": 630
},
{
"epoch": 1.64,
"learning_rate": 2.51661918328585e-05,
"loss": 0.3118,
"step": 640
},
{
"epoch": 1.67,
"learning_rate": 2.4691358024691357e-05,
"loss": 0.3312,
"step": 650
},
{
"epoch": 1.69,
"learning_rate": 2.4216524216524217e-05,
"loss": 0.3094,
"step": 660
},
{
"epoch": 1.72,
"learning_rate": 2.3741690408357077e-05,
"loss": 0.3064,
"step": 670
},
{
"epoch": 1.74,
"learning_rate": 2.3266856600189934e-05,
"loss": 0.3144,
"step": 680
},
{
"epoch": 1.77,
"learning_rate": 2.2792022792022794e-05,
"loss": 0.2858,
"step": 690
},
{
"epoch": 1.79,
"learning_rate": 2.2317188983855654e-05,
"loss": 0.2634,
"step": 700
},
{
"epoch": 1.82,
"learning_rate": 2.184235517568851e-05,
"loss": 0.31,
"step": 710
},
{
"epoch": 1.84,
"learning_rate": 2.1367521367521368e-05,
"loss": 0.3035,
"step": 720
},
{
"epoch": 1.87,
"learning_rate": 2.0892687559354228e-05,
"loss": 0.2942,
"step": 730
},
{
"epoch": 1.9,
"learning_rate": 2.0417853751187084e-05,
"loss": 0.2951,
"step": 740
},
{
"epoch": 1.92,
"learning_rate": 1.9943019943019945e-05,
"loss": 0.26,
"step": 750
},
{
"epoch": 1.95,
"learning_rate": 1.9468186134852805e-05,
"loss": 0.2861,
"step": 760
},
{
"epoch": 1.97,
"learning_rate": 1.899335232668566e-05,
"loss": 0.2839,
"step": 770
},
{
"epoch": 2.0,
"learning_rate": 1.8518518518518518e-05,
"loss": 0.3007,
"step": 780
},
{
"epoch": 2.0,
"eval_accuracy": 0.9845,
"eval_loss": 0.11994641274213791,
"eval_runtime": 153.4534,
"eval_samples_per_second": 65.166,
"eval_steps_per_second": 2.04,
"step": 780
},
{
"epoch": 2.03,
"learning_rate": 1.804368471035138e-05,
"loss": 0.3043,
"step": 790
},
{
"epoch": 2.05,
"learning_rate": 1.7568850902184235e-05,
"loss": 0.2769,
"step": 800
},
{
"epoch": 2.08,
"learning_rate": 1.7094017094017095e-05,
"loss": 0.2983,
"step": 810
},
{
"epoch": 2.1,
"learning_rate": 1.6619183285849956e-05,
"loss": 0.3186,
"step": 820
},
{
"epoch": 2.13,
"learning_rate": 1.6144349477682812e-05,
"loss": 0.2619,
"step": 830
},
{
"epoch": 2.15,
"learning_rate": 1.566951566951567e-05,
"loss": 0.283,
"step": 840
},
{
"epoch": 2.18,
"learning_rate": 1.5194681861348528e-05,
"loss": 0.287,
"step": 850
},
{
"epoch": 2.2,
"learning_rate": 1.4719848053181388e-05,
"loss": 0.2631,
"step": 860
},
{
"epoch": 2.23,
"learning_rate": 1.4245014245014246e-05,
"loss": 0.263,
"step": 870
},
{
"epoch": 2.26,
"learning_rate": 1.3770180436847105e-05,
"loss": 0.2554,
"step": 880
},
{
"epoch": 2.28,
"learning_rate": 1.3295346628679963e-05,
"loss": 0.2702,
"step": 890
},
{
"epoch": 2.31,
"learning_rate": 1.282051282051282e-05,
"loss": 0.3224,
"step": 900
},
{
"epoch": 2.33,
"learning_rate": 1.2345679012345678e-05,
"loss": 0.2801,
"step": 910
},
{
"epoch": 2.36,
"learning_rate": 1.1870845204178538e-05,
"loss": 0.2521,
"step": 920
},
{
"epoch": 2.38,
"learning_rate": 1.1396011396011397e-05,
"loss": 0.2613,
"step": 930
},
{
"epoch": 2.41,
"learning_rate": 1.0921177587844255e-05,
"loss": 0.2485,
"step": 940
},
{
"epoch": 2.44,
"learning_rate": 1.0446343779677114e-05,
"loss": 0.2672,
"step": 950
},
{
"epoch": 2.46,
"learning_rate": 9.971509971509972e-06,
"loss": 0.2695,
"step": 960
},
{
"epoch": 2.49,
"learning_rate": 9.49667616334283e-06,
"loss": 0.2593,
"step": 970
},
{
"epoch": 2.51,
"learning_rate": 9.02184235517569e-06,
"loss": 0.2402,
"step": 980
},
{
"epoch": 2.54,
"learning_rate": 8.547008547008548e-06,
"loss": 0.2759,
"step": 990
},
{
"epoch": 2.56,
"learning_rate": 8.072174738841406e-06,
"loss": 0.2654,
"step": 1000
},
{
"epoch": 2.59,
"learning_rate": 7.597340930674264e-06,
"loss": 0.2363,
"step": 1010
},
{
"epoch": 2.61,
"learning_rate": 7.122507122507123e-06,
"loss": 0.2553,
"step": 1020
},
{
"epoch": 2.64,
"learning_rate": 6.6476733143399815e-06,
"loss": 0.2384,
"step": 1030
},
{
"epoch": 2.67,
"learning_rate": 6.172839506172839e-06,
"loss": 0.2173,
"step": 1040
},
{
"epoch": 2.69,
"learning_rate": 5.6980056980056985e-06,
"loss": 0.2794,
"step": 1050
},
{
"epoch": 2.72,
"learning_rate": 5.223171889838557e-06,
"loss": 0.2293,
"step": 1060
},
{
"epoch": 2.74,
"learning_rate": 4.748338081671415e-06,
"loss": 0.2107,
"step": 1070
},
{
"epoch": 2.77,
"learning_rate": 4.273504273504274e-06,
"loss": 0.2387,
"step": 1080
},
{
"epoch": 2.79,
"learning_rate": 3.798670465337132e-06,
"loss": 0.2534,
"step": 1090
},
{
"epoch": 2.82,
"learning_rate": 3.3238366571699908e-06,
"loss": 0.2449,
"step": 1100
},
{
"epoch": 2.84,
"learning_rate": 2.8490028490028492e-06,
"loss": 0.2188,
"step": 1110
},
{
"epoch": 2.87,
"learning_rate": 2.3741690408357077e-06,
"loss": 0.2444,
"step": 1120
},
{
"epoch": 2.9,
"learning_rate": 1.899335232668566e-06,
"loss": 0.2611,
"step": 1130
},
{
"epoch": 2.92,
"learning_rate": 1.4245014245014246e-06,
"loss": 0.2607,
"step": 1140
},
{
"epoch": 2.95,
"learning_rate": 9.49667616334283e-07,
"loss": 0.2601,
"step": 1150
},
{
"epoch": 2.97,
"learning_rate": 4.748338081671415e-07,
"loss": 0.2441,
"step": 1160
},
{
"epoch": 3.0,
"learning_rate": 0.0,
"loss": 0.2442,
"step": 1170
},
{
"epoch": 3.0,
"eval_accuracy": 0.9855,
"eval_loss": 0.10110656172037125,
"eval_runtime": 153.3402,
"eval_samples_per_second": 65.214,
"eval_steps_per_second": 2.041,
"step": 1170
},
{
"epoch": 3.0,
"step": 1170,
"total_flos": 1.161843208194687e+19,
"train_loss": 0.5034837816515539,
"train_runtime": 6176.4988,
"train_samples_per_second": 24.286,
"train_steps_per_second": 0.189
}
],
"max_steps": 1170,
"num_train_epochs": 3,
"total_flos": 1.161843208194687e+19,
"trial_name": null,
"trial_params": null
}