ALM-AHME's picture
End of training
6f9c3d3
{
"best_metric": 0.9716500553709856,
"best_model_checkpoint": "beit-large-patch16-224-finetuned-LungCancer-Classification-LC25000-AH-40-30-30-Shuffled-3rd/checkpoint-465",
"epoch": 4.96,
"eval_steps": 500,
"global_step": 465,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05,
"learning_rate": 1.072961373390558e-05,
"loss": 1.469,
"step": 5
},
{
"epoch": 0.11,
"learning_rate": 2.145922746781116e-05,
"loss": 0.6329,
"step": 10
},
{
"epoch": 0.16,
"learning_rate": 3.2188841201716734e-05,
"loss": 0.2408,
"step": 15
},
{
"epoch": 0.21,
"learning_rate": 4.291845493562232e-05,
"loss": 0.2413,
"step": 20
},
{
"epoch": 0.27,
"learning_rate": 5.36480686695279e-05,
"loss": 0.1766,
"step": 25
},
{
"epoch": 0.32,
"learning_rate": 6.437768240343347e-05,
"loss": 0.1375,
"step": 30
},
{
"epoch": 0.37,
"learning_rate": 7.510729613733907e-05,
"loss": 0.5266,
"step": 35
},
{
"epoch": 0.43,
"learning_rate": 8.583690987124464e-05,
"loss": 0.192,
"step": 40
},
{
"epoch": 0.48,
"learning_rate": 9.656652360515021e-05,
"loss": 0.2199,
"step": 45
},
{
"epoch": 0.53,
"learning_rate": 0.0001072961373390558,
"loss": 0.1779,
"step": 50
},
{
"epoch": 0.59,
"learning_rate": 0.00011802575107296138,
"loss": 0.1609,
"step": 55
},
{
"epoch": 0.64,
"learning_rate": 0.00012875536480686693,
"loss": 0.1332,
"step": 60
},
{
"epoch": 0.69,
"learning_rate": 0.00013948497854077252,
"loss": 0.122,
"step": 65
},
{
"epoch": 0.75,
"learning_rate": 0.00015021459227467814,
"loss": 0.1725,
"step": 70
},
{
"epoch": 0.8,
"learning_rate": 0.0001609442060085837,
"loss": 0.2223,
"step": 75
},
{
"epoch": 0.85,
"learning_rate": 0.00017167381974248928,
"loss": 0.2739,
"step": 80
},
{
"epoch": 0.91,
"learning_rate": 0.00018240343347639484,
"loss": 0.2776,
"step": 85
},
{
"epoch": 0.96,
"learning_rate": 0.00019313304721030043,
"loss": 0.3123,
"step": 90
},
{
"epoch": 0.99,
"eval_accuracy": 0.7769656699889258,
"eval_loss": 0.7103577852249146,
"eval_runtime": 1628.9879,
"eval_samples_per_second": 2.772,
"eval_steps_per_second": 0.174,
"step": 93
},
{
"epoch": 1.01,
"learning_rate": 0.00020386266094420602,
"loss": 0.387,
"step": 95
},
{
"epoch": 1.07,
"learning_rate": 0.0002145922746781116,
"loss": 0.1535,
"step": 100
},
{
"epoch": 1.12,
"learning_rate": 0.00022532188841201716,
"loss": 0.2921,
"step": 105
},
{
"epoch": 1.17,
"learning_rate": 0.00023605150214592275,
"loss": 0.1437,
"step": 110
},
{
"epoch": 1.23,
"learning_rate": 0.0002467811158798283,
"loss": 0.2731,
"step": 115
},
{
"epoch": 1.28,
"learning_rate": 0.00025751072961373387,
"loss": 0.2365,
"step": 120
},
{
"epoch": 1.33,
"learning_rate": 0.0002682403433476395,
"loss": 0.1692,
"step": 125
},
{
"epoch": 1.39,
"learning_rate": 0.00027896995708154504,
"loss": 0.3372,
"step": 130
},
{
"epoch": 1.44,
"learning_rate": 0.00028969957081545066,
"loss": 0.2857,
"step": 135
},
{
"epoch": 1.49,
"learning_rate": 0.00030042918454935627,
"loss": 0.2505,
"step": 140
},
{
"epoch": 1.55,
"learning_rate": 0.00031115879828326183,
"loss": 0.2508,
"step": 145
},
{
"epoch": 1.6,
"learning_rate": 0.0003218884120171674,
"loss": 0.172,
"step": 150
},
{
"epoch": 1.65,
"learning_rate": 0.00033261802575107295,
"loss": 0.6477,
"step": 155
},
{
"epoch": 1.71,
"learning_rate": 0.00034334763948497857,
"loss": 0.3342,
"step": 160
},
{
"epoch": 1.76,
"learning_rate": 0.0003540772532188841,
"loss": 0.2687,
"step": 165
},
{
"epoch": 1.81,
"learning_rate": 0.0003648068669527897,
"loss": 0.1948,
"step": 170
},
{
"epoch": 1.87,
"learning_rate": 0.00037553648068669525,
"loss": 0.2025,
"step": 175
},
{
"epoch": 1.92,
"learning_rate": 0.00038626609442060086,
"loss": 0.3164,
"step": 180
},
{
"epoch": 1.97,
"learning_rate": 0.0003969957081545064,
"loss": 0.261,
"step": 185
},
{
"epoch": 1.99,
"eval_accuracy": 0.8172757475083057,
"eval_loss": 0.4562360942363739,
"eval_runtime": 54.0995,
"eval_samples_per_second": 83.457,
"eval_steps_per_second": 5.231,
"step": 187
},
{
"epoch": 2.03,
"learning_rate": 0.00040772532188841203,
"loss": 0.4598,
"step": 190
},
{
"epoch": 2.08,
"learning_rate": 0.00041845493562231765,
"loss": 0.668,
"step": 195
},
{
"epoch": 2.13,
"learning_rate": 0.0004291845493562232,
"loss": 0.3559,
"step": 200
},
{
"epoch": 2.19,
"learning_rate": 0.00043991416309012877,
"loss": 0.3004,
"step": 205
},
{
"epoch": 2.24,
"learning_rate": 0.0004506437768240343,
"loss": 0.3047,
"step": 210
},
{
"epoch": 2.29,
"learning_rate": 0.00046137339055793994,
"loss": 0.2776,
"step": 215
},
{
"epoch": 2.35,
"learning_rate": 0.0004721030042918455,
"loss": 0.3193,
"step": 220
},
{
"epoch": 2.4,
"learning_rate": 0.00048283261802575106,
"loss": 0.25,
"step": 225
},
{
"epoch": 2.45,
"learning_rate": 0.0004935622317596566,
"loss": 0.2472,
"step": 230
},
{
"epoch": 2.51,
"learning_rate": 0.0004956896551724138,
"loss": 0.2343,
"step": 235
},
{
"epoch": 2.56,
"learning_rate": 0.0004849137931034483,
"loss": 0.1439,
"step": 240
},
{
"epoch": 2.61,
"learning_rate": 0.00047413793103448276,
"loss": 0.2208,
"step": 245
},
{
"epoch": 2.67,
"learning_rate": 0.00046336206896551726,
"loss": 0.2339,
"step": 250
},
{
"epoch": 2.72,
"learning_rate": 0.0004525862068965517,
"loss": 0.1704,
"step": 255
},
{
"epoch": 2.77,
"learning_rate": 0.0004418103448275862,
"loss": 0.2218,
"step": 260
},
{
"epoch": 2.83,
"learning_rate": 0.0004310344827586207,
"loss": 0.1341,
"step": 265
},
{
"epoch": 2.88,
"learning_rate": 0.0004202586206896552,
"loss": 0.2466,
"step": 270
},
{
"epoch": 2.93,
"learning_rate": 0.00040948275862068967,
"loss": 0.2176,
"step": 275
},
{
"epoch": 2.99,
"learning_rate": 0.00039870689655172416,
"loss": 0.2012,
"step": 280
},
{
"epoch": 3.0,
"eval_accuracy": 0.9508305647840531,
"eval_loss": 0.12906160950660706,
"eval_runtime": 54.2749,
"eval_samples_per_second": 83.188,
"eval_steps_per_second": 5.214,
"step": 281
},
{
"epoch": 3.04,
"learning_rate": 0.0003879310344827586,
"loss": 0.2101,
"step": 285
},
{
"epoch": 3.09,
"learning_rate": 0.0003771551724137931,
"loss": 0.1814,
"step": 290
},
{
"epoch": 3.15,
"learning_rate": 0.0003663793103448276,
"loss": 0.1757,
"step": 295
},
{
"epoch": 3.2,
"learning_rate": 0.00035560344827586203,
"loss": 0.1658,
"step": 300
},
{
"epoch": 3.25,
"learning_rate": 0.0003448275862068966,
"loss": 0.1867,
"step": 305
},
{
"epoch": 3.31,
"learning_rate": 0.0003340517241379311,
"loss": 0.189,
"step": 310
},
{
"epoch": 3.36,
"learning_rate": 0.0003232758620689655,
"loss": 0.2099,
"step": 315
},
{
"epoch": 3.41,
"learning_rate": 0.0003125,
"loss": 0.1683,
"step": 320
},
{
"epoch": 3.47,
"learning_rate": 0.0003017241379310345,
"loss": 0.2641,
"step": 325
},
{
"epoch": 3.52,
"learning_rate": 0.00029094827586206894,
"loss": 0.191,
"step": 330
},
{
"epoch": 3.57,
"learning_rate": 0.00028017241379310343,
"loss": 0.2154,
"step": 335
},
{
"epoch": 3.63,
"learning_rate": 0.000269396551724138,
"loss": 0.1961,
"step": 340
},
{
"epoch": 3.68,
"learning_rate": 0.0002586206896551724,
"loss": 0.193,
"step": 345
},
{
"epoch": 3.73,
"learning_rate": 0.0002478448275862069,
"loss": 0.2128,
"step": 350
},
{
"epoch": 3.79,
"learning_rate": 0.00023706896551724138,
"loss": 0.1744,
"step": 355
},
{
"epoch": 3.84,
"learning_rate": 0.00022629310344827585,
"loss": 0.1067,
"step": 360
},
{
"epoch": 3.89,
"learning_rate": 0.00021551724137931034,
"loss": 0.1643,
"step": 365
},
{
"epoch": 3.95,
"learning_rate": 0.00020474137931034484,
"loss": 0.2163,
"step": 370
},
{
"epoch": 4.0,
"learning_rate": 0.0001939655172413793,
"loss": 0.1424,
"step": 375
},
{
"epoch": 4.0,
"eval_accuracy": 0.9508305647840531,
"eval_loss": 0.13319291174411774,
"eval_runtime": 54.0858,
"eval_samples_per_second": 83.479,
"eval_steps_per_second": 5.232,
"step": 375
},
{
"epoch": 4.05,
"learning_rate": 0.0001831896551724138,
"loss": 0.1964,
"step": 380
},
{
"epoch": 4.11,
"learning_rate": 0.0001724137931034483,
"loss": 0.179,
"step": 385
},
{
"epoch": 4.16,
"learning_rate": 0.00016163793103448276,
"loss": 0.1167,
"step": 390
},
{
"epoch": 4.21,
"learning_rate": 0.00015086206896551725,
"loss": 0.1361,
"step": 395
},
{
"epoch": 4.27,
"learning_rate": 0.00014008620689655172,
"loss": 0.1418,
"step": 400
},
{
"epoch": 4.32,
"learning_rate": 0.0001293103448275862,
"loss": 0.1664,
"step": 405
},
{
"epoch": 4.37,
"learning_rate": 0.00011853448275862069,
"loss": 0.1295,
"step": 410
},
{
"epoch": 4.43,
"learning_rate": 0.00010775862068965517,
"loss": 0.1185,
"step": 415
},
{
"epoch": 4.48,
"learning_rate": 9.698275862068965e-05,
"loss": 0.1278,
"step": 420
},
{
"epoch": 4.53,
"learning_rate": 8.620689655172414e-05,
"loss": 0.1287,
"step": 425
},
{
"epoch": 4.59,
"learning_rate": 7.543103448275863e-05,
"loss": 0.1168,
"step": 430
},
{
"epoch": 4.64,
"learning_rate": 6.46551724137931e-05,
"loss": 0.1142,
"step": 435
},
{
"epoch": 4.69,
"learning_rate": 5.3879310344827585e-05,
"loss": 0.1276,
"step": 440
},
{
"epoch": 4.75,
"learning_rate": 4.310344827586207e-05,
"loss": 0.127,
"step": 445
},
{
"epoch": 4.8,
"learning_rate": 3.232758620689655e-05,
"loss": 0.1219,
"step": 450
},
{
"epoch": 4.85,
"learning_rate": 2.1551724137931036e-05,
"loss": 0.1023,
"step": 455
},
{
"epoch": 4.91,
"learning_rate": 1.0775862068965518e-05,
"loss": 0.1329,
"step": 460
},
{
"epoch": 4.96,
"learning_rate": 0.0,
"loss": 0.0949,
"step": 465
},
{
"epoch": 4.96,
"eval_accuracy": 0.9716500553709856,
"eval_loss": 0.0727071464061737,
"eval_runtime": 54.0022,
"eval_samples_per_second": 83.608,
"eval_steps_per_second": 5.241,
"step": 465
},
{
"epoch": 4.96,
"step": 465,
"total_flos": 8.138660625246413e+18,
"train_loss": 0.23672226468722027,
"train_runtime": 4894.1951,
"train_samples_per_second": 6.114,
"train_steps_per_second": 0.095
}
],
"logging_steps": 5,
"max_steps": 465,
"num_train_epochs": 5,
"save_steps": 500,
"total_flos": 8.138660625246413e+18,
"trial_name": null,
"trial_params": null
}