JackWong0911's picture
End of training
13087e5 verified
raw
history blame
16.6 kB
{
"best_metric": 0.35135135135135137,
"best_model_checkpoint": "vivit-b-16x2-kinetics400-finetuned-ucf101-subset-without-pretrained/checkpoint-1200",
"epoch": 3.25,
"eval_steps": 500,
"global_step": 1200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 4.166666666666667e-06,
"loss": 6.0507,
"step": 10
},
{
"epoch": 0.02,
"learning_rate": 8.333333333333334e-06,
"loss": 5.1227,
"step": 20
},
{
"epoch": 0.03,
"learning_rate": 1.25e-05,
"loss": 5.3799,
"step": 30
},
{
"epoch": 0.03,
"learning_rate": 1.6666666666666667e-05,
"loss": 4.4165,
"step": 40
},
{
"epoch": 0.04,
"learning_rate": 2.0833333333333336e-05,
"loss": 4.0944,
"step": 50
},
{
"epoch": 0.05,
"learning_rate": 2.5e-05,
"loss": 3.935,
"step": 60
},
{
"epoch": 0.06,
"learning_rate": 2.916666666666667e-05,
"loss": 3.1094,
"step": 70
},
{
"epoch": 0.07,
"learning_rate": 3.3333333333333335e-05,
"loss": 3.8124,
"step": 80
},
{
"epoch": 0.07,
"learning_rate": 3.7500000000000003e-05,
"loss": 2.8153,
"step": 90
},
{
"epoch": 0.08,
"learning_rate": 4.166666666666667e-05,
"loss": 3.5982,
"step": 100
},
{
"epoch": 0.09,
"learning_rate": 4.5833333333333334e-05,
"loss": 3.0482,
"step": 110
},
{
"epoch": 0.1,
"learning_rate": 5e-05,
"loss": 3.1105,
"step": 120
},
{
"epoch": 0.11,
"learning_rate": 4.9537037037037035e-05,
"loss": 2.6555,
"step": 130
},
{
"epoch": 0.12,
"learning_rate": 4.9074074074074075e-05,
"loss": 2.4942,
"step": 140
},
{
"epoch": 0.12,
"learning_rate": 4.8611111111111115e-05,
"loss": 2.2091,
"step": 150
},
{
"epoch": 0.13,
"learning_rate": 4.814814814814815e-05,
"loss": 2.3341,
"step": 160
},
{
"epoch": 0.14,
"learning_rate": 4.768518518518519e-05,
"loss": 3.136,
"step": 170
},
{
"epoch": 0.15,
"learning_rate": 4.722222222222222e-05,
"loss": 3.1267,
"step": 180
},
{
"epoch": 0.16,
"learning_rate": 4.675925925925926e-05,
"loss": 2.1749,
"step": 190
},
{
"epoch": 0.17,
"learning_rate": 4.62962962962963e-05,
"loss": 2.7431,
"step": 200
},
{
"epoch": 0.17,
"learning_rate": 4.5833333333333334e-05,
"loss": 4.1859,
"step": 210
},
{
"epoch": 0.18,
"learning_rate": 4.5370370370370374e-05,
"loss": 2.4884,
"step": 220
},
{
"epoch": 0.19,
"learning_rate": 4.490740740740741e-05,
"loss": 2.6813,
"step": 230
},
{
"epoch": 0.2,
"learning_rate": 4.4444444444444447e-05,
"loss": 2.7271,
"step": 240
},
{
"epoch": 0.21,
"learning_rate": 4.3981481481481486e-05,
"loss": 2.6526,
"step": 250
},
{
"epoch": 0.22,
"learning_rate": 4.351851851851852e-05,
"loss": 1.684,
"step": 260
},
{
"epoch": 0.23,
"learning_rate": 4.305555555555556e-05,
"loss": 2.5616,
"step": 270
},
{
"epoch": 0.23,
"learning_rate": 4.259259259259259e-05,
"loss": 2.5468,
"step": 280
},
{
"epoch": 0.24,
"learning_rate": 4.212962962962963e-05,
"loss": 2.2534,
"step": 290
},
{
"epoch": 0.25,
"learning_rate": 4.166666666666667e-05,
"loss": 2.8108,
"step": 300
},
{
"epoch": 0.25,
"eval_accuracy": 0.16216216216216217,
"eval_loss": 2.4264731407165527,
"eval_runtime": 9.8082,
"eval_samples_per_second": 3.772,
"eval_steps_per_second": 3.772,
"step": 300
},
{
"epoch": 1.01,
"learning_rate": 4.1203703703703705e-05,
"loss": 1.8148,
"step": 310
},
{
"epoch": 1.02,
"learning_rate": 4.074074074074074e-05,
"loss": 2.7365,
"step": 320
},
{
"epoch": 1.02,
"learning_rate": 4.027777777777778e-05,
"loss": 2.9229,
"step": 330
},
{
"epoch": 1.03,
"learning_rate": 3.981481481481482e-05,
"loss": 2.8199,
"step": 340
},
{
"epoch": 1.04,
"learning_rate": 3.935185185185186e-05,
"loss": 1.9582,
"step": 350
},
{
"epoch": 1.05,
"learning_rate": 3.888888888888889e-05,
"loss": 2.1509,
"step": 360
},
{
"epoch": 1.06,
"learning_rate": 3.8425925925925924e-05,
"loss": 1.9543,
"step": 370
},
{
"epoch": 1.07,
"learning_rate": 3.7962962962962964e-05,
"loss": 2.6913,
"step": 380
},
{
"epoch": 1.07,
"learning_rate": 3.7500000000000003e-05,
"loss": 2.9787,
"step": 390
},
{
"epoch": 1.08,
"learning_rate": 3.7037037037037037e-05,
"loss": 2.8233,
"step": 400
},
{
"epoch": 1.09,
"learning_rate": 3.6574074074074076e-05,
"loss": 2.1148,
"step": 410
},
{
"epoch": 1.1,
"learning_rate": 3.611111111111111e-05,
"loss": 2.2275,
"step": 420
},
{
"epoch": 1.11,
"learning_rate": 3.564814814814815e-05,
"loss": 1.7524,
"step": 430
},
{
"epoch": 1.12,
"learning_rate": 3.518518518518519e-05,
"loss": 2.2017,
"step": 440
},
{
"epoch": 1.12,
"learning_rate": 3.472222222222222e-05,
"loss": 2.202,
"step": 450
},
{
"epoch": 1.13,
"learning_rate": 3.425925925925926e-05,
"loss": 2.2568,
"step": 460
},
{
"epoch": 1.14,
"learning_rate": 3.3796296296296295e-05,
"loss": 2.4412,
"step": 470
},
{
"epoch": 1.15,
"learning_rate": 3.3333333333333335e-05,
"loss": 1.9082,
"step": 480
},
{
"epoch": 1.16,
"learning_rate": 3.2870370370370375e-05,
"loss": 2.1291,
"step": 490
},
{
"epoch": 1.17,
"learning_rate": 3.240740740740741e-05,
"loss": 2.3288,
"step": 500
},
{
"epoch": 1.18,
"learning_rate": 3.194444444444444e-05,
"loss": 2.5053,
"step": 510
},
{
"epoch": 1.18,
"learning_rate": 3.148148148148148e-05,
"loss": 1.9949,
"step": 520
},
{
"epoch": 1.19,
"learning_rate": 3.101851851851852e-05,
"loss": 2.0243,
"step": 530
},
{
"epoch": 1.2,
"learning_rate": 3.055555555555556e-05,
"loss": 1.9068,
"step": 540
},
{
"epoch": 1.21,
"learning_rate": 3.0092592592592593e-05,
"loss": 2.1791,
"step": 550
},
{
"epoch": 1.22,
"learning_rate": 2.962962962962963e-05,
"loss": 1.8557,
"step": 560
},
{
"epoch": 1.23,
"learning_rate": 2.916666666666667e-05,
"loss": 2.6166,
"step": 570
},
{
"epoch": 1.23,
"learning_rate": 2.8703703703703706e-05,
"loss": 1.981,
"step": 580
},
{
"epoch": 1.24,
"learning_rate": 2.824074074074074e-05,
"loss": 1.6169,
"step": 590
},
{
"epoch": 1.25,
"learning_rate": 2.777777777777778e-05,
"loss": 2.5955,
"step": 600
},
{
"epoch": 1.25,
"eval_accuracy": 0.13513513513513514,
"eval_loss": 2.2180917263031006,
"eval_runtime": 9.7602,
"eval_samples_per_second": 3.791,
"eval_steps_per_second": 3.791,
"step": 600
},
{
"epoch": 2.01,
"learning_rate": 2.7314814814814816e-05,
"loss": 2.4131,
"step": 610
},
{
"epoch": 2.02,
"learning_rate": 2.6851851851851855e-05,
"loss": 1.8115,
"step": 620
},
{
"epoch": 2.02,
"learning_rate": 2.6388888888888892e-05,
"loss": 2.5239,
"step": 630
},
{
"epoch": 2.03,
"learning_rate": 2.5925925925925925e-05,
"loss": 2.0357,
"step": 640
},
{
"epoch": 2.04,
"learning_rate": 2.5462962962962965e-05,
"loss": 2.0055,
"step": 650
},
{
"epoch": 2.05,
"learning_rate": 2.5e-05,
"loss": 1.7003,
"step": 660
},
{
"epoch": 2.06,
"learning_rate": 2.4537037037037038e-05,
"loss": 2.1512,
"step": 670
},
{
"epoch": 2.07,
"learning_rate": 2.4074074074074074e-05,
"loss": 2.3628,
"step": 680
},
{
"epoch": 2.08,
"learning_rate": 2.361111111111111e-05,
"loss": 1.5812,
"step": 690
},
{
"epoch": 2.08,
"learning_rate": 2.314814814814815e-05,
"loss": 1.7642,
"step": 700
},
{
"epoch": 2.09,
"learning_rate": 2.2685185185185187e-05,
"loss": 2.1108,
"step": 710
},
{
"epoch": 2.1,
"learning_rate": 2.2222222222222223e-05,
"loss": 1.9476,
"step": 720
},
{
"epoch": 2.11,
"learning_rate": 2.175925925925926e-05,
"loss": 2.2292,
"step": 730
},
{
"epoch": 2.12,
"learning_rate": 2.1296296296296296e-05,
"loss": 1.8725,
"step": 740
},
{
"epoch": 2.12,
"learning_rate": 2.0833333333333336e-05,
"loss": 1.6058,
"step": 750
},
{
"epoch": 2.13,
"learning_rate": 2.037037037037037e-05,
"loss": 2.1164,
"step": 760
},
{
"epoch": 2.14,
"learning_rate": 1.990740740740741e-05,
"loss": 1.8633,
"step": 770
},
{
"epoch": 2.15,
"learning_rate": 1.9444444444444445e-05,
"loss": 1.5445,
"step": 780
},
{
"epoch": 2.16,
"learning_rate": 1.8981481481481482e-05,
"loss": 1.8847,
"step": 790
},
{
"epoch": 2.17,
"learning_rate": 1.8518518518518518e-05,
"loss": 1.4885,
"step": 800
},
{
"epoch": 2.17,
"learning_rate": 1.8055555555555555e-05,
"loss": 1.8776,
"step": 810
},
{
"epoch": 2.18,
"learning_rate": 1.7592592592592595e-05,
"loss": 1.8485,
"step": 820
},
{
"epoch": 2.19,
"learning_rate": 1.712962962962963e-05,
"loss": 2.1993,
"step": 830
},
{
"epoch": 2.2,
"learning_rate": 1.6666666666666667e-05,
"loss": 2.0281,
"step": 840
},
{
"epoch": 2.21,
"learning_rate": 1.6203703703703704e-05,
"loss": 1.7665,
"step": 850
},
{
"epoch": 2.22,
"learning_rate": 1.574074074074074e-05,
"loss": 1.8035,
"step": 860
},
{
"epoch": 2.23,
"learning_rate": 1.527777777777778e-05,
"loss": 2.0851,
"step": 870
},
{
"epoch": 2.23,
"learning_rate": 1.4814814814814815e-05,
"loss": 2.0301,
"step": 880
},
{
"epoch": 2.24,
"learning_rate": 1.4351851851851853e-05,
"loss": 2.286,
"step": 890
},
{
"epoch": 2.25,
"learning_rate": 1.388888888888889e-05,
"loss": 1.5744,
"step": 900
},
{
"epoch": 2.25,
"eval_accuracy": 0.2972972972972973,
"eval_loss": 2.32218074798584,
"eval_runtime": 9.9364,
"eval_samples_per_second": 3.724,
"eval_steps_per_second": 3.724,
"step": 900
},
{
"epoch": 3.01,
"learning_rate": 1.3425925925925928e-05,
"loss": 1.8515,
"step": 910
},
{
"epoch": 3.02,
"learning_rate": 1.2962962962962962e-05,
"loss": 1.5024,
"step": 920
},
{
"epoch": 3.02,
"learning_rate": 1.25e-05,
"loss": 1.6205,
"step": 930
},
{
"epoch": 3.03,
"learning_rate": 1.2037037037037037e-05,
"loss": 1.756,
"step": 940
},
{
"epoch": 3.04,
"learning_rate": 1.1574074074074075e-05,
"loss": 1.6808,
"step": 950
},
{
"epoch": 3.05,
"learning_rate": 1.1111111111111112e-05,
"loss": 1.7203,
"step": 960
},
{
"epoch": 3.06,
"learning_rate": 1.0648148148148148e-05,
"loss": 1.7485,
"step": 970
},
{
"epoch": 3.07,
"learning_rate": 1.0185185185185185e-05,
"loss": 1.2808,
"step": 980
},
{
"epoch": 3.08,
"learning_rate": 9.722222222222223e-06,
"loss": 1.8201,
"step": 990
},
{
"epoch": 3.08,
"learning_rate": 9.259259259259259e-06,
"loss": 1.7245,
"step": 1000
},
{
"epoch": 3.09,
"learning_rate": 8.796296296296297e-06,
"loss": 1.5939,
"step": 1010
},
{
"epoch": 3.1,
"learning_rate": 8.333333333333334e-06,
"loss": 2.4791,
"step": 1020
},
{
"epoch": 3.11,
"learning_rate": 7.87037037037037e-06,
"loss": 1.7659,
"step": 1030
},
{
"epoch": 3.12,
"learning_rate": 7.4074074074074075e-06,
"loss": 1.8667,
"step": 1040
},
{
"epoch": 3.12,
"learning_rate": 6.944444444444445e-06,
"loss": 1.7532,
"step": 1050
},
{
"epoch": 3.13,
"learning_rate": 6.481481481481481e-06,
"loss": 1.955,
"step": 1060
},
{
"epoch": 3.14,
"learning_rate": 6.0185185185185185e-06,
"loss": 1.3113,
"step": 1070
},
{
"epoch": 3.15,
"learning_rate": 5.555555555555556e-06,
"loss": 1.6229,
"step": 1080
},
{
"epoch": 3.16,
"learning_rate": 5.092592592592592e-06,
"loss": 1.528,
"step": 1090
},
{
"epoch": 3.17,
"learning_rate": 4.6296296296296296e-06,
"loss": 1.3243,
"step": 1100
},
{
"epoch": 3.17,
"learning_rate": 4.166666666666667e-06,
"loss": 1.6464,
"step": 1110
},
{
"epoch": 3.18,
"learning_rate": 3.7037037037037037e-06,
"loss": 1.8068,
"step": 1120
},
{
"epoch": 3.19,
"learning_rate": 3.2407407407407406e-06,
"loss": 1.4424,
"step": 1130
},
{
"epoch": 3.2,
"learning_rate": 2.777777777777778e-06,
"loss": 2.2751,
"step": 1140
},
{
"epoch": 3.21,
"learning_rate": 2.3148148148148148e-06,
"loss": 1.9845,
"step": 1150
},
{
"epoch": 3.22,
"learning_rate": 1.8518518518518519e-06,
"loss": 2.0046,
"step": 1160
},
{
"epoch": 3.23,
"learning_rate": 1.388888888888889e-06,
"loss": 1.9218,
"step": 1170
},
{
"epoch": 3.23,
"learning_rate": 9.259259259259259e-07,
"loss": 1.8508,
"step": 1180
},
{
"epoch": 3.24,
"learning_rate": 4.6296296296296297e-07,
"loss": 1.2199,
"step": 1190
},
{
"epoch": 3.25,
"learning_rate": 0.0,
"loss": 1.9863,
"step": 1200
},
{
"epoch": 3.25,
"eval_accuracy": 0.35135135135135137,
"eval_loss": 2.1377880573272705,
"eval_runtime": 9.8941,
"eval_samples_per_second": 3.74,
"eval_steps_per_second": 3.74,
"step": 1200
},
{
"epoch": 3.25,
"step": 1200,
"total_flos": 3.0850716008448e+18,
"train_loss": 2.285027635097504,
"train_runtime": 686.1005,
"train_samples_per_second": 1.749,
"train_steps_per_second": 1.749
},
{
"epoch": 3.25,
"eval_accuracy": 0.27586206896551724,
"eval_loss": 2.1027634143829346,
"eval_runtime": 22.8546,
"eval_samples_per_second": 3.807,
"eval_steps_per_second": 3.807,
"step": 1200
},
{
"epoch": 3.25,
"eval_accuracy": 0.27586206896551724,
"eval_loss": 2.1027631759643555,
"eval_runtime": 22.8402,
"eval_samples_per_second": 3.809,
"eval_steps_per_second": 3.809,
"step": 1200
}
],
"logging_steps": 10,
"max_steps": 1200,
"num_train_epochs": 9223372036854775807,
"save_steps": 500,
"total_flos": 3.0850716008448e+18,
"trial_name": null,
"trial_params": null
}