zabir735's picture
End of training
c657bea verified
{
"best_metric": 0.9715909090909091,
"best_model_checkpoint": "swin-tiny-patch4-window7-224-finetuned-batch8-nocrop/checkpoint-147",
"epoch": 2.984771573604061,
"eval_steps": 500,
"global_step": 147,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.20304568527918782,
"grad_norm": 6.709445953369141,
"learning_rate": 3.3333333333333335e-05,
"loss": 0.6831,
"step": 10
},
{
"epoch": 0.40609137055837563,
"grad_norm": 36.66501235961914,
"learning_rate": 4.810606060606061e-05,
"loss": 0.5802,
"step": 20
},
{
"epoch": 0.6091370558375635,
"grad_norm": 7.574514865875244,
"learning_rate": 4.431818181818182e-05,
"loss": 0.3748,
"step": 30
},
{
"epoch": 0.8121827411167513,
"grad_norm": 12.18085765838623,
"learning_rate": 4.053030303030303e-05,
"loss": 0.2052,
"step": 40
},
{
"epoch": 0.9949238578680203,
"eval_accuracy": 0.9602272727272727,
"eval_loss": 0.15112267434597015,
"eval_runtime": 1.4305,
"eval_samples_per_second": 123.032,
"eval_steps_per_second": 15.379,
"step": 49
},
{
"epoch": 1.015228426395939,
"grad_norm": 8.17941665649414,
"learning_rate": 3.6742424242424246e-05,
"loss": 0.1755,
"step": 50
},
{
"epoch": 1.218274111675127,
"grad_norm": 7.0626115798950195,
"learning_rate": 3.295454545454545e-05,
"loss": 0.0786,
"step": 60
},
{
"epoch": 1.4213197969543148,
"grad_norm": 25.362876892089844,
"learning_rate": 2.916666666666667e-05,
"loss": 0.1441,
"step": 70
},
{
"epoch": 1.6243654822335025,
"grad_norm": 15.33166790008545,
"learning_rate": 2.537878787878788e-05,
"loss": 0.0825,
"step": 80
},
{
"epoch": 1.8274111675126905,
"grad_norm": 2.483710527420044,
"learning_rate": 2.1590909090909093e-05,
"loss": 0.0852,
"step": 90
},
{
"epoch": 1.9898477157360406,
"eval_accuracy": 0.9488636363636364,
"eval_loss": 0.1786985695362091,
"eval_runtime": 1.4267,
"eval_samples_per_second": 123.365,
"eval_steps_per_second": 15.421,
"step": 98
},
{
"epoch": 2.030456852791878,
"grad_norm": 23.29215431213379,
"learning_rate": 1.7803030303030303e-05,
"loss": 0.0657,
"step": 100
},
{
"epoch": 2.233502538071066,
"grad_norm": 4.370492458343506,
"learning_rate": 1.4015151515151515e-05,
"loss": 0.0307,
"step": 110
},
{
"epoch": 2.436548223350254,
"grad_norm": 9.500088691711426,
"learning_rate": 1.0227272727272729e-05,
"loss": 0.0276,
"step": 120
},
{
"epoch": 2.6395939086294415,
"grad_norm": 6.448863506317139,
"learning_rate": 6.43939393939394e-06,
"loss": 0.0273,
"step": 130
},
{
"epoch": 2.8426395939086295,
"grad_norm": 7.503269672393799,
"learning_rate": 2.651515151515152e-06,
"loss": 0.0336,
"step": 140
},
{
"epoch": 2.984771573604061,
"eval_accuracy": 0.9715909090909091,
"eval_loss": 0.09903481602668762,
"eval_runtime": 1.4469,
"eval_samples_per_second": 121.636,
"eval_steps_per_second": 15.205,
"step": 147
},
{
"epoch": 2.984771573604061,
"step": 147,
"total_flos": 1.1692257218946662e+17,
"train_loss": 0.1783457891065247,
"train_runtime": 84.8256,
"train_samples_per_second": 55.738,
"train_steps_per_second": 1.733
}
],
"logging_steps": 10,
"max_steps": 147,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"total_flos": 1.1692257218946662e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}