Angy309's picture
End of training
3464848 verified
{
"best_metric": 0.9017857142857143,
"best_model_checkpoint": "swin-tiny-patch4-window7-224-Lego-v1-3ep/checkpoint-147",
"epoch": 3.0,
"eval_steps": 500,
"global_step": 147,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.20408163265306123,
"grad_norm": 3.9616575241088867,
"learning_rate": 3.3333333333333335e-05,
"loss": 2.7824,
"step": 10
},
{
"epoch": 0.40816326530612246,
"grad_norm": 5.216483116149902,
"learning_rate": 4.810606060606061e-05,
"loss": 2.4565,
"step": 20
},
{
"epoch": 0.6122448979591837,
"grad_norm": 8.999798774719238,
"learning_rate": 4.431818181818182e-05,
"loss": 1.7257,
"step": 30
},
{
"epoch": 0.8163265306122449,
"grad_norm": 13.705337524414062,
"learning_rate": 4.053030303030303e-05,
"loss": 1.1779,
"step": 40
},
{
"epoch": 1.0,
"eval_accuracy": 0.75,
"eval_loss": 0.7035489082336426,
"eval_runtime": 103.5,
"eval_samples_per_second": 1.082,
"eval_steps_per_second": 0.039,
"step": 49
},
{
"epoch": 1.0204081632653061,
"grad_norm": 10.42480754852295,
"learning_rate": 3.6742424242424246e-05,
"loss": 0.9019,
"step": 50
},
{
"epoch": 1.2244897959183674,
"grad_norm": 11.608043670654297,
"learning_rate": 3.295454545454545e-05,
"loss": 0.7082,
"step": 60
},
{
"epoch": 1.4285714285714286,
"grad_norm": 9.341365814208984,
"learning_rate": 2.916666666666667e-05,
"loss": 0.6998,
"step": 70
},
{
"epoch": 1.6326530612244898,
"grad_norm": 9.492186546325684,
"learning_rate": 2.537878787878788e-05,
"loss": 0.6349,
"step": 80
},
{
"epoch": 1.836734693877551,
"grad_norm": 10.111123085021973,
"learning_rate": 2.1590909090909093e-05,
"loss": 0.5591,
"step": 90
},
{
"epoch": 2.0,
"eval_accuracy": 0.8482142857142857,
"eval_loss": 0.38913723826408386,
"eval_runtime": 0.7084,
"eval_samples_per_second": 158.099,
"eval_steps_per_second": 5.646,
"step": 98
},
{
"epoch": 2.0408163265306123,
"grad_norm": 9.370523452758789,
"learning_rate": 1.7803030303030303e-05,
"loss": 0.4968,
"step": 100
},
{
"epoch": 2.2448979591836733,
"grad_norm": 8.256933212280273,
"learning_rate": 1.4015151515151515e-05,
"loss": 0.4329,
"step": 110
},
{
"epoch": 2.4489795918367347,
"grad_norm": 11.488386154174805,
"learning_rate": 1.0227272727272729e-05,
"loss": 0.4378,
"step": 120
},
{
"epoch": 2.6530612244897958,
"grad_norm": 9.060242652893066,
"learning_rate": 6.43939393939394e-06,
"loss": 0.4341,
"step": 130
},
{
"epoch": 2.857142857142857,
"grad_norm": 8.625679016113281,
"learning_rate": 2.651515151515152e-06,
"loss": 0.4446,
"step": 140
},
{
"epoch": 3.0,
"eval_accuracy": 0.9017857142857143,
"eval_loss": 0.2857852876186371,
"eval_runtime": 0.7016,
"eval_samples_per_second": 159.624,
"eval_steps_per_second": 5.701,
"step": 147
},
{
"epoch": 3.0,
"step": 147,
"total_flos": 4.6750026057231974e+17,
"train_loss": 0.9658059327780795,
"train_runtime": 6347.4478,
"train_samples_per_second": 2.962,
"train_steps_per_second": 0.023
}
],
"logging_steps": 10,
"max_steps": 147,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"total_flos": 4.6750026057231974e+17,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}