|
{ |
|
"best_metric": 0.7853881278538812, |
|
"best_model_checkpoint": "swin-tiny-patch4-window7-224-finetuned-piid/checkpoint-123", |
|
"epoch": 19.51219512195122, |
|
"eval_steps": 500, |
|
"global_step": 400, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 1.25e-05, |
|
"loss": 1.3499, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 2.5e-05, |
|
"loss": 1.2088, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"eval_accuracy": 0.4520547945205479, |
|
"eval_loss": 1.1661090850830078, |
|
"eval_runtime": 1.4325, |
|
"eval_samples_per_second": 152.877, |
|
"eval_steps_per_second": 19.546, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 0.9539, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 5e-05, |
|
"loss": 0.7545, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.6073059360730594, |
|
"eval_loss": 0.8865624070167542, |
|
"eval_runtime": 1.3434, |
|
"eval_samples_per_second": 163.02, |
|
"eval_steps_per_second": 20.843, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 4.8611111111111115e-05, |
|
"loss": 0.6405, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 4.722222222222222e-05, |
|
"loss": 0.6281, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"eval_accuracy": 0.684931506849315, |
|
"eval_loss": 0.7788348197937012, |
|
"eval_runtime": 1.5459, |
|
"eval_samples_per_second": 141.667, |
|
"eval_steps_per_second": 18.113, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"learning_rate": 4.5833333333333334e-05, |
|
"loss": 0.5403, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 3.9, |
|
"learning_rate": 4.4444444444444447e-05, |
|
"loss": 0.5939, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.7397260273972602, |
|
"eval_loss": 0.644279420375824, |
|
"eval_runtime": 1.4721, |
|
"eval_samples_per_second": 148.762, |
|
"eval_steps_per_second": 19.02, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 4.39, |
|
"learning_rate": 4.305555555555556e-05, |
|
"loss": 0.5419, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 4.88, |
|
"learning_rate": 4.166666666666667e-05, |
|
"loss": 0.5254, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 4.98, |
|
"eval_accuracy": 0.7808219178082192, |
|
"eval_loss": 0.5097259879112244, |
|
"eval_runtime": 1.552, |
|
"eval_samples_per_second": 141.107, |
|
"eval_steps_per_second": 18.041, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 5.37, |
|
"learning_rate": 4.027777777777778e-05, |
|
"loss": 0.5295, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 5.85, |
|
"learning_rate": 3.888888888888889e-05, |
|
"loss": 0.5583, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.7853881278538812, |
|
"eval_loss": 0.5714804530143738, |
|
"eval_runtime": 1.328, |
|
"eval_samples_per_second": 164.91, |
|
"eval_steps_per_second": 21.084, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 6.34, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 0.4476, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 6.83, |
|
"learning_rate": 3.611111111111111e-05, |
|
"loss": 0.3463, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 6.98, |
|
"eval_accuracy": 0.7351598173515982, |
|
"eval_loss": 0.6163173317909241, |
|
"eval_runtime": 1.4078, |
|
"eval_samples_per_second": 155.566, |
|
"eval_steps_per_second": 19.89, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 7.32, |
|
"learning_rate": 3.472222222222222e-05, |
|
"loss": 0.4396, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 7.8, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.3878, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.7671232876712328, |
|
"eval_loss": 0.5671306848526001, |
|
"eval_runtime": 1.3088, |
|
"eval_samples_per_second": 167.328, |
|
"eval_steps_per_second": 21.394, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 8.29, |
|
"learning_rate": 3.194444444444444e-05, |
|
"loss": 0.3628, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 8.78, |
|
"learning_rate": 3.055555555555556e-05, |
|
"loss": 0.3653, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 8.98, |
|
"eval_accuracy": 0.7579908675799086, |
|
"eval_loss": 0.5689936876296997, |
|
"eval_runtime": 1.3448, |
|
"eval_samples_per_second": 162.852, |
|
"eval_steps_per_second": 20.821, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 9.27, |
|
"learning_rate": 2.916666666666667e-05, |
|
"loss": 0.3083, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 9.76, |
|
"learning_rate": 2.777777777777778e-05, |
|
"loss": 0.3529, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy": 0.7579908675799086, |
|
"eval_loss": 0.5939760208129883, |
|
"eval_runtime": 1.3291, |
|
"eval_samples_per_second": 164.77, |
|
"eval_steps_per_second": 21.067, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 10.24, |
|
"learning_rate": 2.6388888888888892e-05, |
|
"loss": 0.3382, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 10.73, |
|
"learning_rate": 2.5e-05, |
|
"loss": 0.301, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 10.98, |
|
"eval_accuracy": 0.7625570776255708, |
|
"eval_loss": 0.6303488612174988, |
|
"eval_runtime": 1.3837, |
|
"eval_samples_per_second": 158.272, |
|
"eval_steps_per_second": 20.236, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 11.22, |
|
"learning_rate": 2.361111111111111e-05, |
|
"loss": 0.2672, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 11.71, |
|
"learning_rate": 2.2222222222222223e-05, |
|
"loss": 0.2639, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_accuracy": 0.776255707762557, |
|
"eval_loss": 0.5725071430206299, |
|
"eval_runtime": 1.3678, |
|
"eval_samples_per_second": 160.117, |
|
"eval_steps_per_second": 20.472, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 12.2, |
|
"learning_rate": 2.0833333333333336e-05, |
|
"loss": 0.2627, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 12.68, |
|
"learning_rate": 1.9444444444444445e-05, |
|
"loss": 0.2847, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 12.98, |
|
"eval_accuracy": 0.771689497716895, |
|
"eval_loss": 0.6279605031013489, |
|
"eval_runtime": 1.3676, |
|
"eval_samples_per_second": 160.133, |
|
"eval_steps_per_second": 20.474, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 13.17, |
|
"learning_rate": 1.8055555555555555e-05, |
|
"loss": 0.2534, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 13.66, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 0.25, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_accuracy": 0.771689497716895, |
|
"eval_loss": 0.5975117683410645, |
|
"eval_runtime": 1.3451, |
|
"eval_samples_per_second": 162.816, |
|
"eval_steps_per_second": 20.817, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 14.15, |
|
"learning_rate": 1.527777777777778e-05, |
|
"loss": 0.3332, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 14.63, |
|
"learning_rate": 1.388888888888889e-05, |
|
"loss": 0.2472, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 14.98, |
|
"eval_accuracy": 0.7671232876712328, |
|
"eval_loss": 0.5821006894111633, |
|
"eval_runtime": 1.3394, |
|
"eval_samples_per_second": 163.506, |
|
"eval_steps_per_second": 20.905, |
|
"step": 307 |
|
}, |
|
{ |
|
"epoch": 15.12, |
|
"learning_rate": 1.25e-05, |
|
"loss": 0.2185, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 15.61, |
|
"learning_rate": 1.1111111111111112e-05, |
|
"loss": 0.1676, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_accuracy": 0.7625570776255708, |
|
"eval_loss": 0.6456273198127747, |
|
"eval_runtime": 1.3379, |
|
"eval_samples_per_second": 163.684, |
|
"eval_steps_per_second": 20.928, |
|
"step": 328 |
|
}, |
|
{ |
|
"epoch": 16.1, |
|
"learning_rate": 9.722222222222223e-06, |
|
"loss": 0.1898, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 16.59, |
|
"learning_rate": 8.333333333333334e-06, |
|
"loss": 0.1327, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 16.98, |
|
"eval_accuracy": 0.7671232876712328, |
|
"eval_loss": 0.6117277145385742, |
|
"eval_runtime": 1.3936, |
|
"eval_samples_per_second": 157.15, |
|
"eval_steps_per_second": 20.092, |
|
"step": 348 |
|
}, |
|
{ |
|
"epoch": 17.07, |
|
"learning_rate": 6.944444444444445e-06, |
|
"loss": 0.2238, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 17.56, |
|
"learning_rate": 5.555555555555556e-06, |
|
"loss": 0.1977, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"eval_accuracy": 0.7488584474885844, |
|
"eval_loss": 0.6987997889518738, |
|
"eval_runtime": 1.3339, |
|
"eval_samples_per_second": 164.179, |
|
"eval_steps_per_second": 20.991, |
|
"step": 369 |
|
}, |
|
{ |
|
"epoch": 18.05, |
|
"learning_rate": 4.166666666666667e-06, |
|
"loss": 0.3006, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 18.54, |
|
"learning_rate": 2.777777777777778e-06, |
|
"loss": 0.1602, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 18.98, |
|
"eval_accuracy": 0.7671232876712328, |
|
"eval_loss": 0.6448192000389099, |
|
"eval_runtime": 1.3619, |
|
"eval_samples_per_second": 160.799, |
|
"eval_steps_per_second": 20.559, |
|
"step": 389 |
|
}, |
|
{ |
|
"epoch": 19.02, |
|
"learning_rate": 1.388888888888889e-06, |
|
"loss": 0.231, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 19.51, |
|
"learning_rate": 0.0, |
|
"loss": 0.1785, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 19.51, |
|
"eval_accuracy": 0.771689497716895, |
|
"eval_loss": 0.633254885673523, |
|
"eval_runtime": 1.3431, |
|
"eval_samples_per_second": 163.06, |
|
"eval_steps_per_second": 20.848, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 19.51, |
|
"step": 400, |
|
"total_flos": 3.167575685961523e+17, |
|
"train_loss": 0.4159336131811142, |
|
"train_runtime": 271.0168, |
|
"train_samples_per_second": 48.189, |
|
"train_steps_per_second": 1.476 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 400, |
|
"num_train_epochs": 20, |
|
"save_steps": 500, |
|
"total_flos": 3.167575685961523e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|