|
{ |
|
"best_metric": 0.7625570776255708, |
|
"best_model_checkpoint": "convnext-small-224-finetuned-piid/checkpoint-225", |
|
"epoch": 19.51219512195122, |
|
"eval_steps": 500, |
|
"global_step": 400, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 1.25e-05, |
|
"loss": 1.3915, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 2.5e-05, |
|
"loss": 1.3405, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"eval_accuracy": 0.4155251141552511, |
|
"eval_loss": 1.3200979232788086, |
|
"eval_runtime": 1.9477, |
|
"eval_samples_per_second": 112.44, |
|
"eval_steps_per_second": 14.376, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 1.2711, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 5e-05, |
|
"loss": 1.1715, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.5707762557077626, |
|
"eval_loss": 1.1361732482910156, |
|
"eval_runtime": 2.0636, |
|
"eval_samples_per_second": 106.124, |
|
"eval_steps_per_second": 13.568, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 4.8611111111111115e-05, |
|
"loss": 1.0171, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 4.722222222222222e-05, |
|
"loss": 0.9231, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"eval_accuracy": 0.6438356164383562, |
|
"eval_loss": 0.925467848777771, |
|
"eval_runtime": 1.9933, |
|
"eval_samples_per_second": 109.87, |
|
"eval_steps_per_second": 14.047, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"learning_rate": 4.5833333333333334e-05, |
|
"loss": 0.7776, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 3.9, |
|
"learning_rate": 4.4444444444444447e-05, |
|
"loss": 0.7128, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.6986301369863014, |
|
"eval_loss": 0.7557970881462097, |
|
"eval_runtime": 2.1609, |
|
"eval_samples_per_second": 101.346, |
|
"eval_steps_per_second": 12.958, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 4.39, |
|
"learning_rate": 4.305555555555556e-05, |
|
"loss": 0.6816, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 4.88, |
|
"learning_rate": 4.166666666666667e-05, |
|
"loss": 0.6204, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 4.98, |
|
"eval_accuracy": 0.7534246575342466, |
|
"eval_loss": 0.705605685710907, |
|
"eval_runtime": 2.1297, |
|
"eval_samples_per_second": 102.833, |
|
"eval_steps_per_second": 13.148, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 5.37, |
|
"learning_rate": 4.027777777777778e-05, |
|
"loss": 0.5559, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 5.85, |
|
"learning_rate": 3.888888888888889e-05, |
|
"loss": 0.5322, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.7397260273972602, |
|
"eval_loss": 0.6609848141670227, |
|
"eval_runtime": 2.1711, |
|
"eval_samples_per_second": 100.869, |
|
"eval_steps_per_second": 12.896, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 6.34, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 0.4642, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 6.83, |
|
"learning_rate": 3.611111111111111e-05, |
|
"loss": 0.4403, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 6.98, |
|
"eval_accuracy": 0.7442922374429224, |
|
"eval_loss": 0.6638585329055786, |
|
"eval_runtime": 2.0236, |
|
"eval_samples_per_second": 108.225, |
|
"eval_steps_per_second": 13.837, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 7.32, |
|
"learning_rate": 3.472222222222222e-05, |
|
"loss": 0.4919, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 7.8, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.4388, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.730593607305936, |
|
"eval_loss": 0.647175669670105, |
|
"eval_runtime": 2.0571, |
|
"eval_samples_per_second": 106.462, |
|
"eval_steps_per_second": 13.612, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 8.29, |
|
"learning_rate": 3.194444444444444e-05, |
|
"loss": 0.3633, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 8.78, |
|
"learning_rate": 3.055555555555556e-05, |
|
"loss": 0.3901, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 8.98, |
|
"eval_accuracy": 0.7351598173515982, |
|
"eval_loss": 0.6684472560882568, |
|
"eval_runtime": 2.0915, |
|
"eval_samples_per_second": 104.711, |
|
"eval_steps_per_second": 13.388, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 9.27, |
|
"learning_rate": 2.916666666666667e-05, |
|
"loss": 0.3475, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 9.76, |
|
"learning_rate": 2.777777777777778e-05, |
|
"loss": 0.4202, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy": 0.7397260273972602, |
|
"eval_loss": 0.5934084057807922, |
|
"eval_runtime": 2.2374, |
|
"eval_samples_per_second": 97.88, |
|
"eval_steps_per_second": 12.514, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 10.24, |
|
"learning_rate": 2.6388888888888892e-05, |
|
"loss": 0.3367, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 10.73, |
|
"learning_rate": 2.5e-05, |
|
"loss": 0.3784, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 10.98, |
|
"eval_accuracy": 0.7625570776255708, |
|
"eval_loss": 0.5651447772979736, |
|
"eval_runtime": 2.0962, |
|
"eval_samples_per_second": 104.476, |
|
"eval_steps_per_second": 13.358, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 11.22, |
|
"learning_rate": 2.361111111111111e-05, |
|
"loss": 0.2953, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 11.71, |
|
"learning_rate": 2.2222222222222223e-05, |
|
"loss": 0.2973, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_accuracy": 0.7579908675799086, |
|
"eval_loss": 0.6438630819320679, |
|
"eval_runtime": 2.0644, |
|
"eval_samples_per_second": 106.084, |
|
"eval_steps_per_second": 13.563, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 12.2, |
|
"learning_rate": 2.0833333333333336e-05, |
|
"loss": 0.2978, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 12.68, |
|
"learning_rate": 1.9444444444444445e-05, |
|
"loss": 0.3614, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 12.98, |
|
"eval_accuracy": 0.7534246575342466, |
|
"eval_loss": 0.5844311714172363, |
|
"eval_runtime": 2.0548, |
|
"eval_samples_per_second": 106.581, |
|
"eval_steps_per_second": 13.627, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 13.17, |
|
"learning_rate": 1.8055555555555555e-05, |
|
"loss": 0.3089, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 13.66, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 0.2795, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_accuracy": 0.730593607305936, |
|
"eval_loss": 0.6014636158943176, |
|
"eval_runtime": 2.1523, |
|
"eval_samples_per_second": 101.752, |
|
"eval_steps_per_second": 13.009, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 14.15, |
|
"learning_rate": 1.527777777777778e-05, |
|
"loss": 0.3327, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 14.63, |
|
"learning_rate": 1.388888888888889e-05, |
|
"loss": 0.2825, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 14.98, |
|
"eval_accuracy": 0.7625570776255708, |
|
"eval_loss": 0.6030529141426086, |
|
"eval_runtime": 2.0651, |
|
"eval_samples_per_second": 106.049, |
|
"eval_steps_per_second": 13.559, |
|
"step": 307 |
|
}, |
|
{ |
|
"epoch": 15.12, |
|
"learning_rate": 1.25e-05, |
|
"loss": 0.2496, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 15.61, |
|
"learning_rate": 1.1111111111111112e-05, |
|
"loss": 0.2364, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_accuracy": 0.7534246575342466, |
|
"eval_loss": 0.6248753666877747, |
|
"eval_runtime": 2.0546, |
|
"eval_samples_per_second": 106.592, |
|
"eval_steps_per_second": 13.628, |
|
"step": 328 |
|
}, |
|
{ |
|
"epoch": 16.1, |
|
"learning_rate": 9.722222222222223e-06, |
|
"loss": 0.2543, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 16.59, |
|
"learning_rate": 8.333333333333334e-06, |
|
"loss": 0.2162, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 16.98, |
|
"eval_accuracy": 0.7625570776255708, |
|
"eval_loss": 0.6248474717140198, |
|
"eval_runtime": 2.1992, |
|
"eval_samples_per_second": 99.582, |
|
"eval_steps_per_second": 12.732, |
|
"step": 348 |
|
}, |
|
{ |
|
"epoch": 17.07, |
|
"learning_rate": 6.944444444444445e-06, |
|
"loss": 0.2588, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 17.56, |
|
"learning_rate": 5.555555555555556e-06, |
|
"loss": 0.2455, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"eval_accuracy": 0.7488584474885844, |
|
"eval_loss": 0.6152719855308533, |
|
"eval_runtime": 2.0686, |
|
"eval_samples_per_second": 105.868, |
|
"eval_steps_per_second": 13.536, |
|
"step": 369 |
|
}, |
|
{ |
|
"epoch": 18.05, |
|
"learning_rate": 4.166666666666667e-06, |
|
"loss": 0.3108, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 18.54, |
|
"learning_rate": 2.777777777777778e-06, |
|
"loss": 0.2314, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 18.98, |
|
"eval_accuracy": 0.7579908675799086, |
|
"eval_loss": 0.6113206148147583, |
|
"eval_runtime": 2.1977, |
|
"eval_samples_per_second": 99.648, |
|
"eval_steps_per_second": 12.74, |
|
"step": 389 |
|
}, |
|
{ |
|
"epoch": 19.02, |
|
"learning_rate": 1.388888888888889e-06, |
|
"loss": 0.2958, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 19.51, |
|
"learning_rate": 0.0, |
|
"loss": 0.248, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 19.51, |
|
"eval_accuracy": 0.7579908675799086, |
|
"eval_loss": 0.6130588054656982, |
|
"eval_runtime": 2.18, |
|
"eval_samples_per_second": 100.46, |
|
"eval_steps_per_second": 12.844, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 19.51, |
|
"step": 400, |
|
"total_flos": 5.6921285921491354e+17, |
|
"train_loss": 0.5017214328050613, |
|
"train_runtime": 493.2709, |
|
"train_samples_per_second": 26.476, |
|
"train_steps_per_second": 0.811 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 400, |
|
"num_train_epochs": 20, |
|
"save_steps": 500, |
|
"total_flos": 5.6921285921491354e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|