easyrec-large / trainer_state.json
jibala-1022's picture
Upload large model
9a00e55 verified
{
"best_metric": 0.02853687417768363,
"best_model_checkpoint": "./checkpoints/easyrec-large",
"epoch": 2.588844433984467,
"eval_steps": 1000,
"global_step": 11000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.11767474699929395,
"grad_norm": 99.0,
"learning_rate": 4.803875421667844e-05,
"loss": 2.8035,
"step": 500
},
{
"epoch": 0.2353494939985879,
"grad_norm": 120.5,
"learning_rate": 4.607750843335687e-05,
"loss": 2.3662,
"step": 1000
},
{
"epoch": 0.3530242409978819,
"grad_norm": 46.0,
"learning_rate": 4.411626265003531e-05,
"loss": 2.2534,
"step": 1500
},
{
"epoch": 0.4706989879971758,
"grad_norm": 41.5,
"learning_rate": 4.215501686671374e-05,
"loss": 2.1194,
"step": 2000
},
{
"epoch": 0.5883737349964697,
"grad_norm": 56.5,
"learning_rate": 4.019377108339217e-05,
"loss": 2.0655,
"step": 2500
},
{
"epoch": 0.7060484819957638,
"grad_norm": 56.25,
"learning_rate": 3.82325253000706e-05,
"loss": 2.084,
"step": 3000
},
{
"epoch": 0.8237232289950577,
"grad_norm": 44.0,
"learning_rate": 3.627127951674904e-05,
"loss": 2.0148,
"step": 3500
},
{
"epoch": 0.9413979759943516,
"grad_norm": 43.0,
"learning_rate": 3.431003373342748e-05,
"loss": 2.0612,
"step": 4000
},
{
"epoch": 1.0590727229936456,
"grad_norm": 80.5,
"learning_rate": 3.234878795010591e-05,
"loss": 1.9376,
"step": 4500
},
{
"epoch": 1.1767474699929394,
"grad_norm": 48.25,
"learning_rate": 3.0387542166784343e-05,
"loss": 1.9588,
"step": 5000
},
{
"epoch": 1.2944222169922335,
"grad_norm": 67.5,
"learning_rate": 2.8426296383462774e-05,
"loss": 1.9291,
"step": 5500
},
{
"epoch": 1.4120969639915275,
"grad_norm": 75.5,
"learning_rate": 2.6465050600141212e-05,
"loss": 1.9643,
"step": 6000
},
{
"epoch": 1.5297717109908213,
"grad_norm": 45.75,
"learning_rate": 2.4503804816819646e-05,
"loss": 1.9045,
"step": 6500
},
{
"epoch": 1.6474464579901154,
"grad_norm": 44.5,
"learning_rate": 2.2542559033498077e-05,
"loss": 1.9263,
"step": 7000
},
{
"epoch": 1.7651212049894092,
"grad_norm": 56.25,
"learning_rate": 2.0581313250176512e-05,
"loss": 1.9065,
"step": 7500
},
{
"epoch": 1.8827959519887032,
"grad_norm": 44.75,
"learning_rate": 1.862006746685495e-05,
"loss": 1.9187,
"step": 8000
},
{
"epoch": 2.0004706989879972,
"grad_norm": 57.0,
"learning_rate": 1.665882168353338e-05,
"loss": 1.9198,
"step": 8500
},
{
"epoch": 2.1181454459872913,
"grad_norm": 49.5,
"learning_rate": 1.4697575900211815e-05,
"loss": 1.9371,
"step": 9000
},
{
"epoch": 2.2358201929865853,
"grad_norm": 45.0,
"learning_rate": 1.273633011689025e-05,
"loss": 1.8927,
"step": 9500
},
{
"epoch": 2.353494939985879,
"grad_norm": 45.0,
"learning_rate": 1.0775084333568684e-05,
"loss": 1.8996,
"step": 10000
},
{
"epoch": 2.471169686985173,
"grad_norm": 34.75,
"learning_rate": 8.813838550247118e-06,
"loss": 1.8713,
"step": 10500
},
{
"epoch": 2.588844433984467,
"grad_norm": 37.75,
"learning_rate": 6.852592766925552e-06,
"loss": 1.9081,
"step": 11000
}
],
"logging_steps": 500,
"max_steps": 12747,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}