Nikeytas's picture
End of training
7874883 verified
{
"best_metric": 0.8428571428571429,
"best_model_checkpoint": "videomae-base-finetuned-ucf101-subset\\checkpoint-148",
"epoch": 3.22972972972973,
"eval_steps": 500,
"global_step": 148,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.06756756756756757,
"grad_norm": 10.565461158752441,
"learning_rate": 3.3333333333333335e-05,
"loss": 1.676,
"step": 10
},
{
"epoch": 0.13513513513513514,
"grad_norm": 9.007370948791504,
"learning_rate": 4.81203007518797e-05,
"loss": 1.3955,
"step": 20
},
{
"epoch": 0.20270270270270271,
"grad_norm": 12.666777610778809,
"learning_rate": 4.43609022556391e-05,
"loss": 1.0504,
"step": 30
},
{
"epoch": 0.25675675675675674,
"eval_accuracy": 0.7285714285714285,
"eval_loss": 0.965540885925293,
"eval_runtime": 218.999,
"eval_samples_per_second": 0.32,
"eval_steps_per_second": 0.041,
"step": 38
},
{
"epoch": 1.0135135135135136,
"grad_norm": 5.676259994506836,
"learning_rate": 4.0601503759398494e-05,
"loss": 0.8752,
"step": 40
},
{
"epoch": 1.0810810810810811,
"grad_norm": 13.729002952575684,
"learning_rate": 3.6842105263157895e-05,
"loss": 0.5743,
"step": 50
},
{
"epoch": 1.1486486486486487,
"grad_norm": 14.8609037399292,
"learning_rate": 3.3082706766917295e-05,
"loss": 0.6753,
"step": 60
},
{
"epoch": 1.2162162162162162,
"grad_norm": 13.474194526672363,
"learning_rate": 2.9323308270676693e-05,
"loss": 0.5387,
"step": 70
},
{
"epoch": 1.2567567567567568,
"eval_accuracy": 0.7571428571428571,
"eval_loss": 0.5636643767356873,
"eval_runtime": 215.3525,
"eval_samples_per_second": 0.325,
"eval_steps_per_second": 0.042,
"step": 76
},
{
"epoch": 2.027027027027027,
"grad_norm": 8.333715438842773,
"learning_rate": 2.556390977443609e-05,
"loss": 0.292,
"step": 80
},
{
"epoch": 2.0945945945945947,
"grad_norm": 12.702014923095703,
"learning_rate": 2.1804511278195487e-05,
"loss": 0.2774,
"step": 90
},
{
"epoch": 2.1621621621621623,
"grad_norm": 22.819400787353516,
"learning_rate": 1.8045112781954888e-05,
"loss": 0.2559,
"step": 100
},
{
"epoch": 2.22972972972973,
"grad_norm": 11.329200744628906,
"learning_rate": 1.4285714285714285e-05,
"loss": 0.2298,
"step": 110
},
{
"epoch": 2.2567567567567566,
"eval_accuracy": 0.8285714285714286,
"eval_loss": 0.4615909159183502,
"eval_runtime": 220.3855,
"eval_samples_per_second": 0.318,
"eval_steps_per_second": 0.041,
"step": 114
},
{
"epoch": 3.0405405405405403,
"grad_norm": 24.391935348510742,
"learning_rate": 1.0526315789473684e-05,
"loss": 0.2461,
"step": 120
},
{
"epoch": 3.108108108108108,
"grad_norm": 1.8534562587738037,
"learning_rate": 6.766917293233083e-06,
"loss": 0.1163,
"step": 130
},
{
"epoch": 3.175675675675676,
"grad_norm": 1.0798118114471436,
"learning_rate": 3.007518796992481e-06,
"loss": 0.13,
"step": 140
},
{
"epoch": 3.22972972972973,
"eval_accuracy": 0.8428571428571429,
"eval_loss": 0.4940463900566101,
"eval_runtime": 216.8735,
"eval_samples_per_second": 0.323,
"eval_steps_per_second": 0.041,
"step": 148
},
{
"epoch": 3.22972972972973,
"step": 148,
"total_flos": 1.460491890402263e+18,
"train_loss": 0.5682408096822532,
"train_runtime": 21379.1283,
"train_samples_per_second": 0.055,
"train_steps_per_second": 0.007
},
{
"epoch": 3.22972972972973,
"eval_accuracy": 0.8451612903225807,
"eval_loss": 0.4450737237930298,
"eval_runtime": 471.2207,
"eval_samples_per_second": 0.329,
"eval_steps_per_second": 0.042,
"step": 148
},
{
"epoch": 3.22972972972973,
"eval_accuracy": 0.8461538461538461,
"eval_loss": 0.44542694091796875,
"eval_runtime": 488.3429,
"eval_samples_per_second": 0.319,
"eval_steps_per_second": 0.041,
"step": 148
}
],
"logging_steps": 10,
"max_steps": 148,
"num_input_tokens_seen": 0,
"num_train_epochs": 9223372036854775807,
"save_steps": 500,
"total_flos": 1.460491890402263e+18,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}