ahmedALM1221's picture
End of training
3e8a24b
raw
history blame
10.4 kB
{
"best_metric": 0.9732785200411099,
"best_model_checkpoint": "swinv2-large-patch4-window12to16-192to256-22kto1k-ft-finetuned-eurosat-50/checkpoint-549",
"epoch": 10.0,
"global_step": 610,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.16,
"learning_rate": 3.278688524590164e-07,
"loss": 2.1255,
"step": 10
},
{
"epoch": 0.33,
"learning_rate": 6.557377049180328e-07,
"loss": 2.045,
"step": 20
},
{
"epoch": 0.49,
"learning_rate": 9.836065573770493e-07,
"loss": 2.0287,
"step": 30
},
{
"epoch": 0.66,
"learning_rate": 1.3114754098360657e-06,
"loss": 2.0054,
"step": 40
},
{
"epoch": 0.82,
"learning_rate": 1.6393442622950819e-06,
"loss": 1.9418,
"step": 50
},
{
"epoch": 0.98,
"learning_rate": 1.9672131147540985e-06,
"loss": 1.9035,
"step": 60
},
{
"epoch": 1.0,
"eval_accuracy": 0.27132579650565264,
"eval_loss": 1.8945504426956177,
"eval_runtime": 12.371,
"eval_samples_per_second": 78.652,
"eval_steps_per_second": 4.931,
"step": 61
},
{
"epoch": 1.15,
"learning_rate": 2.295081967213115e-06,
"loss": 1.8717,
"step": 70
},
{
"epoch": 1.31,
"learning_rate": 2.6229508196721314e-06,
"loss": 1.8202,
"step": 80
},
{
"epoch": 1.48,
"learning_rate": 2.9508196721311478e-06,
"loss": 1.7631,
"step": 90
},
{
"epoch": 1.64,
"learning_rate": 3.2786885245901638e-06,
"loss": 1.7051,
"step": 100
},
{
"epoch": 1.8,
"learning_rate": 3.6065573770491806e-06,
"loss": 1.6242,
"step": 110
},
{
"epoch": 1.97,
"learning_rate": 3.934426229508197e-06,
"loss": 1.4731,
"step": 120
},
{
"epoch": 2.0,
"eval_accuracy": 0.5560123329907503,
"eval_loss": 1.2931360006332397,
"eval_runtime": 12.3906,
"eval_samples_per_second": 78.527,
"eval_steps_per_second": 4.923,
"step": 122
},
{
"epoch": 2.13,
"learning_rate": 4.2622950819672135e-06,
"loss": 1.3791,
"step": 130
},
{
"epoch": 2.3,
"learning_rate": 4.59016393442623e-06,
"loss": 1.2578,
"step": 140
},
{
"epoch": 2.46,
"learning_rate": 4.918032786885246e-06,
"loss": 1.1893,
"step": 150
},
{
"epoch": 2.62,
"learning_rate": 5.245901639344263e-06,
"loss": 1.037,
"step": 160
},
{
"epoch": 2.79,
"learning_rate": 5.573770491803278e-06,
"loss": 1.0718,
"step": 170
},
{
"epoch": 2.95,
"learning_rate": 5.9016393442622956e-06,
"loss": 0.9549,
"step": 180
},
{
"epoch": 3.0,
"eval_accuracy": 0.6998972250770812,
"eval_loss": 0.7530122399330139,
"eval_runtime": 12.2864,
"eval_samples_per_second": 79.193,
"eval_steps_per_second": 4.965,
"step": 183
},
{
"epoch": 3.11,
"learning_rate": 6.229508196721312e-06,
"loss": 1.0102,
"step": 190
},
{
"epoch": 3.28,
"learning_rate": 6.5573770491803276e-06,
"loss": 0.8888,
"step": 200
},
{
"epoch": 3.44,
"learning_rate": 6.885245901639345e-06,
"loss": 0.8389,
"step": 210
},
{
"epoch": 3.61,
"learning_rate": 7.213114754098361e-06,
"loss": 0.8072,
"step": 220
},
{
"epoch": 3.77,
"learning_rate": 7.540983606557377e-06,
"loss": 0.748,
"step": 230
},
{
"epoch": 3.93,
"learning_rate": 7.868852459016394e-06,
"loss": 0.7375,
"step": 240
},
{
"epoch": 4.0,
"eval_accuracy": 0.8129496402877698,
"eval_loss": 0.4988514482975006,
"eval_runtime": 12.1672,
"eval_samples_per_second": 79.969,
"eval_steps_per_second": 5.013,
"step": 244
},
{
"epoch": 4.1,
"learning_rate": 8.19672131147541e-06,
"loss": 0.7001,
"step": 250
},
{
"epoch": 4.26,
"learning_rate": 8.524590163934427e-06,
"loss": 0.6759,
"step": 260
},
{
"epoch": 4.43,
"learning_rate": 8.852459016393443e-06,
"loss": 0.6793,
"step": 270
},
{
"epoch": 4.59,
"learning_rate": 9.18032786885246e-06,
"loss": 0.6572,
"step": 280
},
{
"epoch": 4.75,
"learning_rate": 9.508196721311476e-06,
"loss": 0.6086,
"step": 290
},
{
"epoch": 4.92,
"learning_rate": 9.836065573770493e-06,
"loss": 0.615,
"step": 300
},
{
"epoch": 5.0,
"eval_accuracy": 0.8746145940390545,
"eval_loss": 0.3544515371322632,
"eval_runtime": 12.2633,
"eval_samples_per_second": 79.343,
"eval_steps_per_second": 4.974,
"step": 305
},
{
"epoch": 5.08,
"learning_rate": 9.836065573770493e-06,
"loss": 0.5495,
"step": 310
},
{
"epoch": 5.25,
"learning_rate": 9.508196721311476e-06,
"loss": 0.5625,
"step": 320
},
{
"epoch": 5.41,
"learning_rate": 9.18032786885246e-06,
"loss": 0.5274,
"step": 330
},
{
"epoch": 5.57,
"learning_rate": 8.852459016393443e-06,
"loss": 0.4914,
"step": 340
},
{
"epoch": 5.74,
"learning_rate": 8.524590163934427e-06,
"loss": 0.3984,
"step": 350
},
{
"epoch": 5.9,
"learning_rate": 8.19672131147541e-06,
"loss": 0.4751,
"step": 360
},
{
"epoch": 6.0,
"eval_accuracy": 0.9167523124357657,
"eval_loss": 0.2399262934923172,
"eval_runtime": 12.1531,
"eval_samples_per_second": 80.062,
"eval_steps_per_second": 5.019,
"step": 366
},
{
"epoch": 6.07,
"learning_rate": 7.868852459016394e-06,
"loss": 0.4427,
"step": 370
},
{
"epoch": 6.23,
"learning_rate": 7.540983606557377e-06,
"loss": 0.3884,
"step": 380
},
{
"epoch": 6.39,
"learning_rate": 7.213114754098361e-06,
"loss": 0.3789,
"step": 390
},
{
"epoch": 6.56,
"learning_rate": 6.885245901639345e-06,
"loss": 0.4063,
"step": 400
},
{
"epoch": 6.72,
"learning_rate": 6.5573770491803276e-06,
"loss": 0.3784,
"step": 410
},
{
"epoch": 6.89,
"learning_rate": 6.229508196721312e-06,
"loss": 0.3778,
"step": 420
},
{
"epoch": 7.0,
"eval_accuracy": 0.9558067831449126,
"eval_loss": 0.16275165975093842,
"eval_runtime": 12.3058,
"eval_samples_per_second": 79.068,
"eval_steps_per_second": 4.957,
"step": 427
},
{
"epoch": 7.05,
"learning_rate": 5.9016393442622956e-06,
"loss": 0.3681,
"step": 430
},
{
"epoch": 7.21,
"learning_rate": 5.573770491803278e-06,
"loss": 0.3397,
"step": 440
},
{
"epoch": 7.38,
"learning_rate": 5.245901639344263e-06,
"loss": 0.3429,
"step": 450
},
{
"epoch": 7.54,
"learning_rate": 4.918032786885246e-06,
"loss": 0.3384,
"step": 460
},
{
"epoch": 7.7,
"learning_rate": 4.59016393442623e-06,
"loss": 0.3018,
"step": 470
},
{
"epoch": 7.87,
"learning_rate": 4.2622950819672135e-06,
"loss": 0.3054,
"step": 480
},
{
"epoch": 8.0,
"eval_accuracy": 0.9619732785200411,
"eval_loss": 0.12017930299043655,
"eval_runtime": 12.2634,
"eval_samples_per_second": 79.342,
"eval_steps_per_second": 4.974,
"step": 488
},
{
"epoch": 8.03,
"learning_rate": 3.934426229508197e-06,
"loss": 0.3419,
"step": 490
},
{
"epoch": 8.2,
"learning_rate": 3.6065573770491806e-06,
"loss": 0.3126,
"step": 500
},
{
"epoch": 8.36,
"learning_rate": 3.2786885245901638e-06,
"loss": 0.2932,
"step": 510
},
{
"epoch": 8.52,
"learning_rate": 2.9508196721311478e-06,
"loss": 0.2949,
"step": 520
},
{
"epoch": 8.69,
"learning_rate": 2.6229508196721314e-06,
"loss": 0.327,
"step": 530
},
{
"epoch": 8.85,
"learning_rate": 2.295081967213115e-06,
"loss": 0.2787,
"step": 540
},
{
"epoch": 9.0,
"eval_accuracy": 0.9732785200411099,
"eval_loss": 0.0988311693072319,
"eval_runtime": 12.0955,
"eval_samples_per_second": 80.443,
"eval_steps_per_second": 5.043,
"step": 549
},
{
"epoch": 9.02,
"learning_rate": 1.9672131147540985e-06,
"loss": 0.2774,
"step": 550
},
{
"epoch": 9.18,
"learning_rate": 1.6393442622950819e-06,
"loss": 0.2333,
"step": 560
},
{
"epoch": 9.34,
"learning_rate": 1.3114754098360657e-06,
"loss": 0.2573,
"step": 570
},
{
"epoch": 9.51,
"learning_rate": 9.836065573770493e-07,
"loss": 0.2715,
"step": 580
},
{
"epoch": 9.67,
"learning_rate": 6.557377049180328e-07,
"loss": 0.2168,
"step": 590
},
{
"epoch": 9.84,
"learning_rate": 3.278688524590164e-07,
"loss": 0.3328,
"step": 600
},
{
"epoch": 10.0,
"learning_rate": 0.0,
"loss": 0.253,
"step": 610
},
{
"epoch": 10.0,
"eval_accuracy": 0.9722507708119219,
"eval_loss": 0.09661855548620224,
"eval_runtime": 12.2414,
"eval_samples_per_second": 79.484,
"eval_steps_per_second": 4.983,
"step": 610
},
{
"epoch": 10.0,
"step": 610,
"total_flos": 8.960328656655483e+18,
"train_loss": 0.807003553578111,
"train_runtime": 6787.6894,
"train_samples_per_second": 5.732,
"train_steps_per_second": 0.09
}
],
"max_steps": 610,
"num_train_epochs": 10,
"total_flos": 8.960328656655483e+18,
"trial_name": null,
"trial_params": null
}