nielsr's picture
nielsr HF staff
End of training
2164338
{
"best_metric": 0.9744444444444444,
"best_model_checkpoint": "swin-tiny-patch4-window7-224-finetuned-eurosat/checkpoint-570",
"epoch": 3.0,
"global_step": 570,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05,
"learning_rate": 8.771929824561403e-06,
"loss": 2.3138,
"step": 10
},
{
"epoch": 0.11,
"learning_rate": 1.7543859649122806e-05,
"loss": 2.0856,
"step": 20
},
{
"epoch": 0.16,
"learning_rate": 2.6315789473684212e-05,
"loss": 1.5983,
"step": 30
},
{
"epoch": 0.21,
"learning_rate": 3.508771929824561e-05,
"loss": 0.9471,
"step": 40
},
{
"epoch": 0.26,
"learning_rate": 4.3859649122807014e-05,
"loss": 0.5908,
"step": 50
},
{
"epoch": 0.32,
"learning_rate": 4.970760233918128e-05,
"loss": 0.5356,
"step": 60
},
{
"epoch": 0.37,
"learning_rate": 4.8732943469785574e-05,
"loss": 0.4591,
"step": 70
},
{
"epoch": 0.42,
"learning_rate": 4.7758284600389865e-05,
"loss": 0.4138,
"step": 80
},
{
"epoch": 0.47,
"learning_rate": 4.678362573099415e-05,
"loss": 0.3727,
"step": 90
},
{
"epoch": 0.53,
"learning_rate": 4.580896686159844e-05,
"loss": 0.3707,
"step": 100
},
{
"epoch": 0.58,
"learning_rate": 4.483430799220273e-05,
"loss": 0.3398,
"step": 110
},
{
"epoch": 0.63,
"learning_rate": 4.3859649122807014e-05,
"loss": 0.2767,
"step": 120
},
{
"epoch": 0.68,
"learning_rate": 4.2884990253411305e-05,
"loss": 0.2777,
"step": 130
},
{
"epoch": 0.74,
"learning_rate": 4.1910331384015596e-05,
"loss": 0.273,
"step": 140
},
{
"epoch": 0.79,
"learning_rate": 4.093567251461988e-05,
"loss": 0.3176,
"step": 150
},
{
"epoch": 0.84,
"learning_rate": 3.996101364522417e-05,
"loss": 0.2268,
"step": 160
},
{
"epoch": 0.89,
"learning_rate": 3.898635477582846e-05,
"loss": 0.2457,
"step": 170
},
{
"epoch": 0.95,
"learning_rate": 3.8011695906432746e-05,
"loss": 0.2458,
"step": 180
},
{
"epoch": 1.0,
"learning_rate": 3.7037037037037037e-05,
"loss": 0.2621,
"step": 190
},
{
"epoch": 1.0,
"eval_accuracy": 0.9629629629629629,
"eval_loss": 0.10834366083145142,
"eval_runtime": 16.0607,
"eval_samples_per_second": 168.112,
"eval_steps_per_second": 5.292,
"step": 190
},
{
"epoch": 1.05,
"learning_rate": 3.606237816764133e-05,
"loss": 0.2176,
"step": 200
},
{
"epoch": 1.11,
"learning_rate": 3.508771929824561e-05,
"loss": 0.2057,
"step": 210
},
{
"epoch": 1.16,
"learning_rate": 3.41130604288499e-05,
"loss": 0.2165,
"step": 220
},
{
"epoch": 1.21,
"learning_rate": 3.313840155945419e-05,
"loss": 0.2012,
"step": 230
},
{
"epoch": 1.26,
"learning_rate": 3.216374269005848e-05,
"loss": 0.1931,
"step": 240
},
{
"epoch": 1.32,
"learning_rate": 3.118908382066277e-05,
"loss": 0.2,
"step": 250
},
{
"epoch": 1.37,
"learning_rate": 3.0214424951267055e-05,
"loss": 0.1827,
"step": 260
},
{
"epoch": 1.42,
"learning_rate": 2.9239766081871346e-05,
"loss": 0.2031,
"step": 270
},
{
"epoch": 1.47,
"learning_rate": 2.8265107212475634e-05,
"loss": 0.1992,
"step": 280
},
{
"epoch": 1.53,
"learning_rate": 2.729044834307992e-05,
"loss": 0.2035,
"step": 290
},
{
"epoch": 1.58,
"learning_rate": 2.6315789473684212e-05,
"loss": 0.1952,
"step": 300
},
{
"epoch": 1.63,
"learning_rate": 2.53411306042885e-05,
"loss": 0.2532,
"step": 310
},
{
"epoch": 1.68,
"learning_rate": 2.4366471734892787e-05,
"loss": 0.2098,
"step": 320
},
{
"epoch": 1.74,
"learning_rate": 2.3391812865497074e-05,
"loss": 0.172,
"step": 330
},
{
"epoch": 1.79,
"learning_rate": 2.2417153996101365e-05,
"loss": 0.1518,
"step": 340
},
{
"epoch": 1.84,
"learning_rate": 2.1442495126705653e-05,
"loss": 0.1614,
"step": 350
},
{
"epoch": 1.89,
"learning_rate": 2.046783625730994e-05,
"loss": 0.1936,
"step": 360
},
{
"epoch": 1.95,
"learning_rate": 1.949317738791423e-05,
"loss": 0.1667,
"step": 370
},
{
"epoch": 2.0,
"learning_rate": 1.8518518518518518e-05,
"loss": 0.1769,
"step": 380
},
{
"epoch": 2.0,
"eval_accuracy": 0.95,
"eval_loss": 0.14253325760364532,
"eval_runtime": 16.2149,
"eval_samples_per_second": 166.513,
"eval_steps_per_second": 5.242,
"step": 380
},
{
"epoch": 2.05,
"learning_rate": 1.7543859649122806e-05,
"loss": 0.1545,
"step": 390
},
{
"epoch": 2.11,
"learning_rate": 1.6569200779727097e-05,
"loss": 0.1769,
"step": 400
},
{
"epoch": 2.16,
"learning_rate": 1.5594541910331384e-05,
"loss": 0.1611,
"step": 410
},
{
"epoch": 2.21,
"learning_rate": 1.4619883040935673e-05,
"loss": 0.1153,
"step": 420
},
{
"epoch": 2.26,
"learning_rate": 1.364522417153996e-05,
"loss": 0.1731,
"step": 430
},
{
"epoch": 2.32,
"learning_rate": 1.267056530214425e-05,
"loss": 0.1631,
"step": 440
},
{
"epoch": 2.37,
"learning_rate": 1.1695906432748537e-05,
"loss": 0.1348,
"step": 450
},
{
"epoch": 2.42,
"learning_rate": 1.0721247563352826e-05,
"loss": 0.1372,
"step": 460
},
{
"epoch": 2.47,
"learning_rate": 9.746588693957115e-06,
"loss": 0.1877,
"step": 470
},
{
"epoch": 2.53,
"learning_rate": 8.771929824561403e-06,
"loss": 0.1202,
"step": 480
},
{
"epoch": 2.58,
"learning_rate": 7.797270955165692e-06,
"loss": 0.1422,
"step": 490
},
{
"epoch": 2.63,
"learning_rate": 6.82261208576998e-06,
"loss": 0.1623,
"step": 500
},
{
"epoch": 2.68,
"learning_rate": 5.8479532163742686e-06,
"loss": 0.1338,
"step": 510
},
{
"epoch": 2.74,
"learning_rate": 4.873294346978558e-06,
"loss": 0.1458,
"step": 520
},
{
"epoch": 2.79,
"learning_rate": 3.898635477582846e-06,
"loss": 0.1411,
"step": 530
},
{
"epoch": 2.84,
"learning_rate": 2.9239766081871343e-06,
"loss": 0.1422,
"step": 540
},
{
"epoch": 2.89,
"learning_rate": 1.949317738791423e-06,
"loss": 0.1415,
"step": 550
},
{
"epoch": 2.95,
"learning_rate": 9.746588693957115e-07,
"loss": 0.1471,
"step": 560
},
{
"epoch": 3.0,
"learning_rate": 0.0,
"loss": 0.1343,
"step": 570
},
{
"epoch": 3.0,
"eval_accuracy": 0.9744444444444444,
"eval_loss": 0.06644155085086823,
"eval_runtime": 16.0914,
"eval_samples_per_second": 167.791,
"eval_steps_per_second": 5.282,
"step": 570
},
{
"epoch": 3.0,
"step": 570,
"total_flos": 1.8124066505760768e+18,
"train_loss": 0.3275571787566469,
"train_runtime": 973.9194,
"train_samples_per_second": 74.852,
"train_steps_per_second": 0.585
}
],
"max_steps": 570,
"num_train_epochs": 3,
"total_flos": 1.8124066505760768e+18,
"trial_name": null,
"trial_params": null
}