{ "best_metric": 0.9772727272727273, "best_model_checkpoint": "swin-tiny-patch4-window7-224-finetuned-batch8/checkpoint-147", "epoch": 2.984771573604061, "eval_steps": 500, "global_step": 147, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.20304568527918782, "grad_norm": 9.754337310791016, "learning_rate": 3.3333333333333335e-05, "loss": 0.6886, "step": 10 }, { "epoch": 0.40609137055837563, "grad_norm": 13.60398006439209, "learning_rate": 4.810606060606061e-05, "loss": 0.5524, "step": 20 }, { "epoch": 0.6091370558375635, "grad_norm": 13.017239570617676, "learning_rate": 4.431818181818182e-05, "loss": 0.3516, "step": 30 }, { "epoch": 0.8121827411167513, "grad_norm": 10.370221138000488, "learning_rate": 4.053030303030303e-05, "loss": 0.2333, "step": 40 }, { "epoch": 0.9949238578680203, "eval_accuracy": 0.9659090909090909, "eval_loss": 0.07651708275079727, "eval_runtime": 1.4025, "eval_samples_per_second": 125.494, "eval_steps_per_second": 15.687, "step": 49 }, { "epoch": 1.015228426395939, "grad_norm": 22.6178035736084, "learning_rate": 3.6742424242424246e-05, "loss": 0.3668, "step": 50 }, { "epoch": 1.218274111675127, "grad_norm": 20.655942916870117, "learning_rate": 3.295454545454545e-05, "loss": 0.2372, "step": 60 }, { "epoch": 1.4213197969543148, "grad_norm": 21.389158248901367, "learning_rate": 2.916666666666667e-05, "loss": 0.1728, "step": 70 }, { "epoch": 1.6243654822335025, "grad_norm": 7.201972961425781, "learning_rate": 2.537878787878788e-05, "loss": 0.1676, "step": 80 }, { "epoch": 1.8274111675126905, "grad_norm": 15.233765602111816, "learning_rate": 2.1590909090909093e-05, "loss": 0.2171, "step": 90 }, { "epoch": 1.9898477157360406, "eval_accuracy": 0.9715909090909091, "eval_loss": 0.07618988305330276, "eval_runtime": 1.8486, "eval_samples_per_second": 95.208, "eval_steps_per_second": 11.901, "step": 98 }, { "epoch": 2.030456852791878, "grad_norm": 22.444684982299805, "learning_rate": 1.7803030303030303e-05, "loss": 0.2291, "step": 100 }, { "epoch": 2.233502538071066, "grad_norm": 17.800857543945312, "learning_rate": 1.4015151515151515e-05, "loss": 0.2027, "step": 110 }, { "epoch": 2.436548223350254, "grad_norm": 8.930273056030273, "learning_rate": 1.0227272727272729e-05, "loss": 0.1624, "step": 120 }, { "epoch": 2.6395939086294415, "grad_norm": 2.2625389099121094, "learning_rate": 6.43939393939394e-06, "loss": 0.1362, "step": 130 }, { "epoch": 2.8426395939086295, "grad_norm": 17.570310592651367, "learning_rate": 2.651515151515152e-06, "loss": 0.1713, "step": 140 }, { "epoch": 2.984771573604061, "eval_accuracy": 0.9772727272727273, "eval_loss": 0.05173008143901825, "eval_runtime": 1.4237, "eval_samples_per_second": 123.622, "eval_steps_per_second": 15.453, "step": 147 }, { "epoch": 2.984771573604061, "step": 147, "total_flos": 1.1692257218946662e+17, "train_loss": 0.2702838227051456, "train_runtime": 99.2204, "train_samples_per_second": 47.651, "train_steps_per_second": 1.482 } ], "logging_steps": 10, "max_steps": 147, "num_input_tokens_seen": 0, "num_train_epochs": 3, "save_steps": 500, "total_flos": 1.1692257218946662e+17, "train_batch_size": 8, "trial_name": null, "trial_params": null }