KiraFenvy's picture
End of training
44d2a98
{
"best_metric": 0.9142857142857143,
"best_model_checkpoint": "videomae-base-finetuned-ucf101-subset/checkpoint-300",
"epoch": 3.25,
"global_step": 300,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03,
"learning_rate": 1.6666666666666667e-05,
"loss": 2.3692,
"step": 10
},
{
"epoch": 0.07,
"learning_rate": 3.3333333333333335e-05,
"loss": 2.2953,
"step": 20
},
{
"epoch": 0.1,
"learning_rate": 5e-05,
"loss": 2.1919,
"step": 30
},
{
"epoch": 0.13,
"learning_rate": 4.814814814814815e-05,
"loss": 2.0557,
"step": 40
},
{
"epoch": 0.17,
"learning_rate": 4.62962962962963e-05,
"loss": 2.0249,
"step": 50
},
{
"epoch": 0.2,
"learning_rate": 4.4444444444444447e-05,
"loss": 1.8514,
"step": 60
},
{
"epoch": 0.23,
"learning_rate": 4.259259259259259e-05,
"loss": 1.897,
"step": 70
},
{
"epoch": 0.25,
"eval_accuracy": 0.6,
"eval_loss": 1.4588954448699951,
"eval_runtime": 21.0777,
"eval_samples_per_second": 3.321,
"eval_steps_per_second": 0.854,
"step": 75
},
{
"epoch": 1.02,
"learning_rate": 4.074074074074074e-05,
"loss": 1.5467,
"step": 80
},
{
"epoch": 1.05,
"learning_rate": 3.888888888888889e-05,
"loss": 1.2117,
"step": 90
},
{
"epoch": 1.08,
"learning_rate": 3.7037037037037037e-05,
"loss": 1.0718,
"step": 100
},
{
"epoch": 1.12,
"learning_rate": 3.518518518518519e-05,
"loss": 0.9393,
"step": 110
},
{
"epoch": 1.15,
"learning_rate": 3.3333333333333335e-05,
"loss": 0.9189,
"step": 120
},
{
"epoch": 1.18,
"learning_rate": 3.148148148148148e-05,
"loss": 0.6238,
"step": 130
},
{
"epoch": 1.22,
"learning_rate": 2.962962962962963e-05,
"loss": 0.6134,
"step": 140
},
{
"epoch": 1.25,
"learning_rate": 2.777777777777778e-05,
"loss": 0.949,
"step": 150
},
{
"epoch": 1.25,
"eval_accuracy": 0.8,
"eval_loss": 0.6212201714515686,
"eval_runtime": 20.4284,
"eval_samples_per_second": 3.427,
"eval_steps_per_second": 0.881,
"step": 150
},
{
"epoch": 2.03,
"learning_rate": 2.5925925925925925e-05,
"loss": 0.3117,
"step": 160
},
{
"epoch": 2.07,
"learning_rate": 2.4074074074074074e-05,
"loss": 0.5063,
"step": 170
},
{
"epoch": 2.1,
"learning_rate": 2.2222222222222223e-05,
"loss": 0.4637,
"step": 180
},
{
"epoch": 2.13,
"learning_rate": 2.037037037037037e-05,
"loss": 0.2961,
"step": 190
},
{
"epoch": 2.17,
"learning_rate": 1.8518518518518518e-05,
"loss": 0.3104,
"step": 200
},
{
"epoch": 2.2,
"learning_rate": 1.6666666666666667e-05,
"loss": 0.2428,
"step": 210
},
{
"epoch": 2.23,
"learning_rate": 1.4814814814814815e-05,
"loss": 0.3123,
"step": 220
},
{
"epoch": 2.25,
"eval_accuracy": 0.8714285714285714,
"eval_loss": 0.3479521572589874,
"eval_runtime": 20.8608,
"eval_samples_per_second": 3.356,
"eval_steps_per_second": 0.863,
"step": 225
},
{
"epoch": 3.02,
"learning_rate": 1.2962962962962962e-05,
"loss": 0.2286,
"step": 230
},
{
"epoch": 3.05,
"learning_rate": 1.1111111111111112e-05,
"loss": 0.1221,
"step": 240
},
{
"epoch": 3.08,
"learning_rate": 9.259259259259259e-06,
"loss": 0.1433,
"step": 250
},
{
"epoch": 3.12,
"learning_rate": 7.4074074074074075e-06,
"loss": 0.1399,
"step": 260
},
{
"epoch": 3.15,
"learning_rate": 5.555555555555556e-06,
"loss": 0.1564,
"step": 270
},
{
"epoch": 3.18,
"learning_rate": 3.7037037037037037e-06,
"loss": 0.1845,
"step": 280
},
{
"epoch": 3.22,
"learning_rate": 1.8518518518518519e-06,
"loss": 0.2022,
"step": 290
},
{
"epoch": 3.25,
"learning_rate": 0.0,
"loss": 0.3394,
"step": 300
},
{
"epoch": 3.25,
"eval_accuracy": 0.9142857142857143,
"eval_loss": 0.18874740600585938,
"eval_runtime": 23.6778,
"eval_samples_per_second": 2.956,
"eval_steps_per_second": 0.76,
"step": 300
},
{
"epoch": 3.25,
"step": 300,
"total_flos": 1.495384188125184e+18,
"train_loss": 0.8839879536628723,
"train_runtime": 949.8263,
"train_samples_per_second": 1.263,
"train_steps_per_second": 0.316
},
{
"epoch": 3.25,
"eval_accuracy": 0.8903225806451613,
"eval_loss": 0.39073970913887024,
"eval_runtime": 41.2036,
"eval_samples_per_second": 3.762,
"eval_steps_per_second": 0.947,
"step": 300
},
{
"epoch": 3.25,
"eval_accuracy": 0.8903225806451613,
"eval_loss": 0.3907397389411926,
"eval_runtime": 37.7858,
"eval_samples_per_second": 4.102,
"eval_steps_per_second": 1.032,
"step": 300
}
],
"max_steps": 300,
"num_train_epochs": 9223372036854775807,
"total_flos": 1.495384188125184e+18,
"trial_name": null,
"trial_params": null
}