|
{
|
|
"best_metric": 0.8883415435139573,
|
|
"best_model_checkpoint": "efficientnet-b0-finetuned-noh\\checkpoint-184",
|
|
"epoch": 9.577777777777778,
|
|
"eval_steps": 500,
|
|
"global_step": 220,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.4444444444444444,
|
|
"grad_norm": 8.235955238342285,
|
|
"learning_rate": 2.272727272727273e-05,
|
|
"loss": 0.6411,
|
|
"step": 10
|
|
},
|
|
{
|
|
"epoch": 0.8888888888888888,
|
|
"grad_norm": 7.751129150390625,
|
|
"learning_rate": 4.545454545454546e-05,
|
|
"loss": 0.6146,
|
|
"step": 20
|
|
},
|
|
{
|
|
"epoch": 1.0,
|
|
"eval_accuracy": 0.729064039408867,
|
|
"eval_loss": 0.6138808727264404,
|
|
"eval_runtime": 88.9218,
|
|
"eval_samples_per_second": 6.849,
|
|
"eval_steps_per_second": 0.439,
|
|
"step": 23
|
|
},
|
|
{
|
|
"epoch": 1.3111111111111111,
|
|
"grad_norm": 6.260272979736328,
|
|
"learning_rate": 4.797979797979798e-05,
|
|
"loss": 0.5289,
|
|
"step": 30
|
|
},
|
|
{
|
|
"epoch": 1.7555555555555555,
|
|
"grad_norm": 5.691160202026367,
|
|
"learning_rate": 4.545454545454546e-05,
|
|
"loss": 0.5116,
|
|
"step": 40
|
|
},
|
|
{
|
|
"epoch": 2.0,
|
|
"eval_accuracy": 0.8308702791461412,
|
|
"eval_loss": 0.47041037678718567,
|
|
"eval_runtime": 85.525,
|
|
"eval_samples_per_second": 7.121,
|
|
"eval_steps_per_second": 0.456,
|
|
"step": 46
|
|
},
|
|
{
|
|
"epoch": 2.1777777777777776,
|
|
"grad_norm": 5.566213130950928,
|
|
"learning_rate": 4.292929292929293e-05,
|
|
"loss": 0.4151,
|
|
"step": 50
|
|
},
|
|
{
|
|
"epoch": 2.6222222222222222,
|
|
"grad_norm": 5.660247325897217,
|
|
"learning_rate": 4.0404040404040405e-05,
|
|
"loss": 0.4655,
|
|
"step": 60
|
|
},
|
|
{
|
|
"epoch": 3.0,
|
|
"eval_accuracy": 0.8587848932676518,
|
|
"eval_loss": 0.4233274757862091,
|
|
"eval_runtime": 62.7576,
|
|
"eval_samples_per_second": 9.704,
|
|
"eval_steps_per_second": 0.621,
|
|
"step": 69
|
|
},
|
|
{
|
|
"epoch": 3.0444444444444443,
|
|
"grad_norm": 5.13922119140625,
|
|
"learning_rate": 3.787878787878788e-05,
|
|
"loss": 0.4394,
|
|
"step": 70
|
|
},
|
|
{
|
|
"epoch": 3.488888888888889,
|
|
"grad_norm": 5.63355827331543,
|
|
"learning_rate": 3.535353535353535e-05,
|
|
"loss": 0.4292,
|
|
"step": 80
|
|
},
|
|
{
|
|
"epoch": 3.9333333333333336,
|
|
"grad_norm": 6.373378276824951,
|
|
"learning_rate": 3.282828282828283e-05,
|
|
"loss": 0.4331,
|
|
"step": 90
|
|
},
|
|
{
|
|
"epoch": 4.0,
|
|
"eval_accuracy": 0.8604269293924466,
|
|
"eval_loss": 0.41187092661857605,
|
|
"eval_runtime": 62.2366,
|
|
"eval_samples_per_second": 9.785,
|
|
"eval_steps_per_second": 0.627,
|
|
"step": 92
|
|
},
|
|
{
|
|
"epoch": 4.355555555555555,
|
|
"grad_norm": 5.6973114013671875,
|
|
"learning_rate": 3.0303030303030306e-05,
|
|
"loss": 0.388,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 4.8,
|
|
"grad_norm": 6.741119861602783,
|
|
"learning_rate": 2.777777777777778e-05,
|
|
"loss": 0.4281,
|
|
"step": 110
|
|
},
|
|
{
|
|
"epoch": 5.0,
|
|
"eval_accuracy": 0.8752052545155994,
|
|
"eval_loss": 0.38966891169548035,
|
|
"eval_runtime": 62.2405,
|
|
"eval_samples_per_second": 9.785,
|
|
"eval_steps_per_second": 0.627,
|
|
"step": 115
|
|
},
|
|
{
|
|
"epoch": 5.222222222222222,
|
|
"grad_norm": 5.320347785949707,
|
|
"learning_rate": 2.5252525252525256e-05,
|
|
"loss": 0.3975,
|
|
"step": 120
|
|
},
|
|
{
|
|
"epoch": 5.666666666666667,
|
|
"grad_norm": 5.142993927001953,
|
|
"learning_rate": 2.272727272727273e-05,
|
|
"loss": 0.4001,
|
|
"step": 130
|
|
},
|
|
{
|
|
"epoch": 6.0,
|
|
"eval_accuracy": 0.8719211822660099,
|
|
"eval_loss": 0.4011945426464081,
|
|
"eval_runtime": 63.0341,
|
|
"eval_samples_per_second": 9.661,
|
|
"eval_steps_per_second": 0.619,
|
|
"step": 138
|
|
},
|
|
{
|
|
"epoch": 6.088888888888889,
|
|
"grad_norm": 4.859269142150879,
|
|
"learning_rate": 2.0202020202020203e-05,
|
|
"loss": 0.34,
|
|
"step": 140
|
|
},
|
|
{
|
|
"epoch": 6.533333333333333,
|
|
"grad_norm": 6.196608066558838,
|
|
"learning_rate": 1.7676767676767676e-05,
|
|
"loss": 0.4207,
|
|
"step": 150
|
|
},
|
|
{
|
|
"epoch": 6.977777777777778,
|
|
"grad_norm": 6.519267559051514,
|
|
"learning_rate": 1.5151515151515153e-05,
|
|
"loss": 0.3721,
|
|
"step": 160
|
|
},
|
|
{
|
|
"epoch": 7.0,
|
|
"eval_accuracy": 0.8817733990147784,
|
|
"eval_loss": 0.38614729046821594,
|
|
"eval_runtime": 62.6292,
|
|
"eval_samples_per_second": 9.724,
|
|
"eval_steps_per_second": 0.623,
|
|
"step": 161
|
|
},
|
|
{
|
|
"epoch": 7.4,
|
|
"grad_norm": 4.774895668029785,
|
|
"learning_rate": 1.2626262626262628e-05,
|
|
"loss": 0.3456,
|
|
"step": 170
|
|
},
|
|
{
|
|
"epoch": 7.844444444444444,
|
|
"grad_norm": 6.236260890960693,
|
|
"learning_rate": 1.0101010101010101e-05,
|
|
"loss": 0.3979,
|
|
"step": 180
|
|
},
|
|
{
|
|
"epoch": 8.0,
|
|
"eval_accuracy": 0.8883415435139573,
|
|
"eval_loss": 0.3784164488315582,
|
|
"eval_runtime": 62.8885,
|
|
"eval_samples_per_second": 9.684,
|
|
"eval_steps_per_second": 0.62,
|
|
"step": 184
|
|
},
|
|
{
|
|
"epoch": 8.266666666666667,
|
|
"grad_norm": 7.606906890869141,
|
|
"learning_rate": 7.5757575757575764e-06,
|
|
"loss": 0.3784,
|
|
"step": 190
|
|
},
|
|
{
|
|
"epoch": 8.71111111111111,
|
|
"grad_norm": 4.811132907867432,
|
|
"learning_rate": 5.050505050505051e-06,
|
|
"loss": 0.3376,
|
|
"step": 200
|
|
},
|
|
{
|
|
"epoch": 9.0,
|
|
"eval_accuracy": 0.8604269293924466,
|
|
"eval_loss": 0.4170529246330261,
|
|
"eval_runtime": 62.9062,
|
|
"eval_samples_per_second": 9.681,
|
|
"eval_steps_per_second": 0.62,
|
|
"step": 207
|
|
},
|
|
{
|
|
"epoch": 9.133333333333333,
|
|
"grad_norm": 5.669275760650635,
|
|
"learning_rate": 2.5252525252525253e-06,
|
|
"loss": 0.3526,
|
|
"step": 210
|
|
},
|
|
{
|
|
"epoch": 9.577777777777778,
|
|
"grad_norm": 6.406429767608643,
|
|
"learning_rate": 0.0,
|
|
"loss": 0.3984,
|
|
"step": 220
|
|
},
|
|
{
|
|
"epoch": 9.577777777777778,
|
|
"eval_accuracy": 0.8620689655172413,
|
|
"eval_loss": 0.4138999581336975,
|
|
"eval_runtime": 63.2813,
|
|
"eval_samples_per_second": 9.624,
|
|
"eval_steps_per_second": 0.616,
|
|
"step": 220
|
|
},
|
|
{
|
|
"epoch": 9.577777777777778,
|
|
"step": 220,
|
|
"total_flos": 4.991931114153984e+16,
|
|
"train_loss": 0.42888181643052536,
|
|
"train_runtime": 2265.5727,
|
|
"train_samples_per_second": 6.352,
|
|
"train_steps_per_second": 0.097
|
|
}
|
|
],
|
|
"logging_steps": 10,
|
|
"max_steps": 220,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 10,
|
|
"save_steps": 500,
|
|
"stateful_callbacks": {
|
|
"TrainerControl": {
|
|
"args": {
|
|
"should_epoch_stop": false,
|
|
"should_evaluate": false,
|
|
"should_log": false,
|
|
"should_save": true,
|
|
"should_training_stop": true
|
|
},
|
|
"attributes": {}
|
|
}
|
|
},
|
|
"total_flos": 4.991931114153984e+16,
|
|
"train_batch_size": 16,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|
|
|