test_ViT-Masked_5 / trainer_state.json
ppak10's picture
test_ViT-Masked_5
b17e998 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 50.0,
"eval_steps": 500,
"global_step": 12950,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.9305019305019306,
"grad_norm": 2.738110065460205,
"learning_rate": 9.613899613899614e-06,
"loss": 0.0888,
"step": 500
},
{
"epoch": 1.9305019305019306,
"eval_runtime": 667.5755,
"eval_samples_per_second": 264.418,
"eval_steps_per_second": 16.527,
"step": 500
},
{
"epoch": 3.861003861003861,
"grad_norm": 4.450708389282227,
"learning_rate": 9.227799227799229e-06,
"loss": 0.0558,
"step": 1000
},
{
"epoch": 3.861003861003861,
"eval_runtime": 651.8038,
"eval_samples_per_second": 270.816,
"eval_steps_per_second": 16.927,
"step": 1000
},
{
"epoch": 5.7915057915057915,
"grad_norm": 5.049165725708008,
"learning_rate": 8.841698841698842e-06,
"loss": 0.0409,
"step": 1500
},
{
"epoch": 5.7915057915057915,
"eval_runtime": 650.4821,
"eval_samples_per_second": 271.366,
"eval_steps_per_second": 16.961,
"step": 1500
},
{
"epoch": 7.722007722007722,
"grad_norm": 3.824125051498413,
"learning_rate": 8.455598455598457e-06,
"loss": 0.0319,
"step": 2000
},
{
"epoch": 7.722007722007722,
"eval_runtime": 652.7164,
"eval_samples_per_second": 270.438,
"eval_steps_per_second": 16.903,
"step": 2000
},
{
"epoch": 9.652509652509652,
"grad_norm": 3.7455878257751465,
"learning_rate": 8.06949806949807e-06,
"loss": 0.0277,
"step": 2500
},
{
"epoch": 9.652509652509652,
"eval_runtime": 652.469,
"eval_samples_per_second": 270.54,
"eval_steps_per_second": 16.91,
"step": 2500
},
{
"epoch": 11.583011583011583,
"grad_norm": 2.825524091720581,
"learning_rate": 7.683397683397685e-06,
"loss": 0.0249,
"step": 3000
},
{
"epoch": 11.583011583011583,
"eval_runtime": 724.8262,
"eval_samples_per_second": 243.533,
"eval_steps_per_second": 15.222,
"step": 3000
},
{
"epoch": 13.513513513513514,
"grad_norm": 1.5855698585510254,
"learning_rate": 7.297297297297298e-06,
"loss": 0.0233,
"step": 3500
},
{
"epoch": 13.513513513513514,
"eval_runtime": 721.7805,
"eval_samples_per_second": 244.56,
"eval_steps_per_second": 15.286,
"step": 3500
},
{
"epoch": 15.444015444015443,
"grad_norm": 2.516690254211426,
"learning_rate": 6.911196911196911e-06,
"loss": 0.0217,
"step": 4000
},
{
"epoch": 15.444015444015443,
"eval_runtime": 723.8804,
"eval_samples_per_second": 243.851,
"eval_steps_per_second": 15.241,
"step": 4000
},
{
"epoch": 17.374517374517374,
"grad_norm": 2.6874454021453857,
"learning_rate": 6.525096525096526e-06,
"loss": 0.0207,
"step": 4500
},
{
"epoch": 17.374517374517374,
"eval_runtime": 726.2467,
"eval_samples_per_second": 243.057,
"eval_steps_per_second": 15.192,
"step": 4500
},
{
"epoch": 19.305019305019304,
"grad_norm": 2.5405662059783936,
"learning_rate": 6.13899613899614e-06,
"loss": 0.0198,
"step": 5000
},
{
"epoch": 19.305019305019304,
"eval_runtime": 725.831,
"eval_samples_per_second": 243.196,
"eval_steps_per_second": 15.201,
"step": 5000
},
{
"epoch": 21.235521235521237,
"grad_norm": 2.5149359703063965,
"learning_rate": 5.752895752895753e-06,
"loss": 0.0191,
"step": 5500
},
{
"epoch": 21.235521235521237,
"eval_runtime": 724.4353,
"eval_samples_per_second": 243.664,
"eval_steps_per_second": 15.23,
"step": 5500
},
{
"epoch": 23.166023166023166,
"grad_norm": 1.5971148014068604,
"learning_rate": 5.366795366795368e-06,
"loss": 0.0183,
"step": 6000
},
{
"epoch": 23.166023166023166,
"eval_runtime": 725.1569,
"eval_samples_per_second": 243.422,
"eval_steps_per_second": 15.215,
"step": 6000
},
{
"epoch": 25.096525096525095,
"grad_norm": 2.0284860134124756,
"learning_rate": 4.980694980694981e-06,
"loss": 0.0178,
"step": 6500
},
{
"epoch": 25.096525096525095,
"eval_runtime": 726.8832,
"eval_samples_per_second": 242.844,
"eval_steps_per_second": 15.179,
"step": 6500
},
{
"epoch": 27.027027027027028,
"grad_norm": 2.005959987640381,
"learning_rate": 4.594594594594596e-06,
"loss": 0.0174,
"step": 7000
},
{
"epoch": 27.027027027027028,
"eval_runtime": 725.3449,
"eval_samples_per_second": 243.359,
"eval_steps_per_second": 15.211,
"step": 7000
},
{
"epoch": 28.957528957528957,
"grad_norm": 1.96770441532135,
"learning_rate": 4.208494208494209e-06,
"loss": 0.0168,
"step": 7500
},
{
"epoch": 28.957528957528957,
"eval_runtime": 765.8326,
"eval_samples_per_second": 230.493,
"eval_steps_per_second": 14.407,
"step": 7500
},
{
"epoch": 30.888030888030887,
"grad_norm": 1.844897747039795,
"learning_rate": 3.822393822393823e-06,
"loss": 0.0165,
"step": 8000
},
{
"epoch": 30.888030888030887,
"eval_runtime": 819.9764,
"eval_samples_per_second": 215.273,
"eval_steps_per_second": 13.455,
"step": 8000
},
{
"epoch": 32.818532818532816,
"grad_norm": 1.615881323814392,
"learning_rate": 3.4362934362934363e-06,
"loss": 0.0162,
"step": 8500
},
{
"epoch": 32.818532818532816,
"eval_runtime": 821.2267,
"eval_samples_per_second": 214.946,
"eval_steps_per_second": 13.435,
"step": 8500
},
{
"epoch": 34.74903474903475,
"grad_norm": 1.6435213088989258,
"learning_rate": 3.0501930501930503e-06,
"loss": 0.0158,
"step": 9000
},
{
"epoch": 34.74903474903475,
"eval_runtime": 819.0429,
"eval_samples_per_second": 215.519,
"eval_steps_per_second": 13.471,
"step": 9000
},
{
"epoch": 36.67953667953668,
"grad_norm": 1.7934831380844116,
"learning_rate": 2.6640926640926647e-06,
"loss": 0.0155,
"step": 9500
},
{
"epoch": 36.67953667953668,
"eval_runtime": 820.1169,
"eval_samples_per_second": 215.236,
"eval_steps_per_second": 13.453,
"step": 9500
},
{
"epoch": 38.61003861003861,
"grad_norm": 1.1239484548568726,
"learning_rate": 2.2779922779922782e-06,
"loss": 0.0152,
"step": 10000
},
{
"epoch": 38.61003861003861,
"eval_runtime": 821.2669,
"eval_samples_per_second": 214.935,
"eval_steps_per_second": 13.434,
"step": 10000
},
{
"epoch": 40.54054054054054,
"grad_norm": 1.256516456604004,
"learning_rate": 1.8918918918918922e-06,
"loss": 0.015,
"step": 10500
},
{
"epoch": 40.54054054054054,
"eval_runtime": 818.9208,
"eval_samples_per_second": 215.551,
"eval_steps_per_second": 13.473,
"step": 10500
},
{
"epoch": 42.47104247104247,
"grad_norm": 0.4177967607975006,
"learning_rate": 1.505791505791506e-06,
"loss": 0.0148,
"step": 11000
},
{
"epoch": 42.47104247104247,
"eval_runtime": 823.9199,
"eval_samples_per_second": 214.243,
"eval_steps_per_second": 13.391,
"step": 11000
},
{
"epoch": 44.4015444015444,
"grad_norm": 0.3761753737926483,
"learning_rate": 1.1196911196911197e-06,
"loss": 0.0146,
"step": 11500
},
{
"epoch": 44.4015444015444,
"eval_runtime": 821.1987,
"eval_samples_per_second": 214.953,
"eval_steps_per_second": 13.435,
"step": 11500
},
{
"epoch": 46.33204633204633,
"grad_norm": 0.4973774552345276,
"learning_rate": 7.335907335907337e-07,
"loss": 0.0145,
"step": 12000
},
{
"epoch": 46.33204633204633,
"eval_runtime": 821.7593,
"eval_samples_per_second": 214.806,
"eval_steps_per_second": 13.426,
"step": 12000
},
{
"epoch": 48.262548262548265,
"grad_norm": 0.3773488998413086,
"learning_rate": 3.474903474903475e-07,
"loss": 0.0144,
"step": 12500
},
{
"epoch": 48.262548262548265,
"eval_runtime": 824.3743,
"eval_samples_per_second": 214.125,
"eval_steps_per_second": 13.383,
"step": 12500
},
{
"epoch": 50.0,
"step": 12950,
"total_flos": 5.544539360447693e+19,
"train_loss": 0.023562463738283135,
"train_runtime": 98807.7466,
"train_samples_per_second": 267.972,
"train_steps_per_second": 0.131
}
],
"logging_steps": 500,
"max_steps": 12950,
"num_input_tokens_seen": 0,
"num_train_epochs": 50,
"save_steps": 100,
"total_flos": 5.544539360447693e+19,
"train_batch_size": 2048,
"trial_name": null,
"trial_params": null
}