c14kevincardenas's picture
End of training
30891d6 verified
{
"best_metric": 0.8097881078720093,
"best_model_checkpoint": "/media/mldrive/kcardenas/limb_classification_person_crop/beit-large-patch16-384/8_1e-5_5e-4_0.05/checkpoint-1926",
"epoch": 10.0,
"eval_steps": 500,
"global_step": 2140,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.11682242990654206,
"grad_norm": 1344286.0,
"learning_rate": 5.000000000000001e-07,
"loss": 1.5975,
"step": 25
},
{
"epoch": 0.2336448598130841,
"grad_norm": 1232636.5,
"learning_rate": 1.0000000000000002e-06,
"loss": 1.5637,
"step": 50
},
{
"epoch": 0.35046728971962615,
"grad_norm": 1367527.875,
"learning_rate": 1.5e-06,
"loss": 1.5906,
"step": 75
},
{
"epoch": 0.4672897196261682,
"grad_norm": 1119946.625,
"learning_rate": 2.0000000000000003e-06,
"loss": 1.5244,
"step": 100
},
{
"epoch": 0.5841121495327103,
"grad_norm": 957303.1875,
"learning_rate": 2.5e-06,
"loss": 1.4616,
"step": 125
},
{
"epoch": 0.7009345794392523,
"grad_norm": 554852.125,
"learning_rate": 3e-06,
"loss": 1.4292,
"step": 150
},
{
"epoch": 0.8177570093457944,
"grad_norm": 931890.375,
"learning_rate": 3.5e-06,
"loss": 1.3964,
"step": 175
},
{
"epoch": 0.9345794392523364,
"grad_norm": 709493.4375,
"learning_rate": 4.000000000000001e-06,
"loss": 1.4037,
"step": 200
},
{
"epoch": 1.0,
"eval_accuracy": 0.2728026533996683,
"eval_loss": 1.3978267908096313,
"eval_runtime": 14.8259,
"eval_samples_per_second": 81.344,
"eval_steps_per_second": 2.563,
"step": 214
},
{
"epoch": 1.0514018691588785,
"grad_norm": 470790.5,
"learning_rate": 4.5e-06,
"loss": 1.3872,
"step": 225
},
{
"epoch": 1.1682242990654206,
"grad_norm": 423176.3125,
"learning_rate": 5e-06,
"loss": 1.3614,
"step": 250
},
{
"epoch": 1.2850467289719627,
"grad_norm": 504318.0,
"learning_rate": 5.500000000000001e-06,
"loss": 1.3493,
"step": 275
},
{
"epoch": 1.4018691588785046,
"grad_norm": 467144.0625,
"learning_rate": 6e-06,
"loss": 1.3507,
"step": 300
},
{
"epoch": 1.5186915887850467,
"grad_norm": 794406.875,
"learning_rate": 6.5000000000000004e-06,
"loss": 1.3447,
"step": 325
},
{
"epoch": 1.6355140186915889,
"grad_norm": 600493.1875,
"learning_rate": 7e-06,
"loss": 1.3342,
"step": 350
},
{
"epoch": 1.7523364485981308,
"grad_norm": 503615.8125,
"learning_rate": 7.500000000000001e-06,
"loss": 1.3125,
"step": 375
},
{
"epoch": 1.8691588785046729,
"grad_norm": 897004.6875,
"learning_rate": 8.000000000000001e-06,
"loss": 1.2663,
"step": 400
},
{
"epoch": 1.985981308411215,
"grad_norm": 731026.0625,
"learning_rate": 8.5e-06,
"loss": 1.2477,
"step": 425
},
{
"epoch": 2.0,
"eval_accuracy": 0.494195688225539,
"eval_loss": 1.1721693277359009,
"eval_runtime": 14.8464,
"eval_samples_per_second": 81.232,
"eval_steps_per_second": 2.56,
"step": 428
},
{
"epoch": 2.102803738317757,
"grad_norm": 834097.25,
"learning_rate": 9e-06,
"loss": 1.2213,
"step": 450
},
{
"epoch": 2.2196261682242993,
"grad_norm": 589023.0625,
"learning_rate": 9.5e-06,
"loss": 1.1893,
"step": 475
},
{
"epoch": 2.336448598130841,
"grad_norm": 431312.71875,
"learning_rate": 1e-05,
"loss": 1.2029,
"step": 500
},
{
"epoch": 2.453271028037383,
"grad_norm": 615542.5625,
"learning_rate": 9.847560975609756e-06,
"loss": 1.1831,
"step": 525
},
{
"epoch": 2.5700934579439254,
"grad_norm": 450194.375,
"learning_rate": 9.695121951219513e-06,
"loss": 1.1495,
"step": 550
},
{
"epoch": 2.6869158878504673,
"grad_norm": 484475.25,
"learning_rate": 9.542682926829268e-06,
"loss": 1.1538,
"step": 575
},
{
"epoch": 2.803738317757009,
"grad_norm": 506196.09375,
"learning_rate": 9.390243902439025e-06,
"loss": 1.16,
"step": 600
},
{
"epoch": 2.9205607476635516,
"grad_norm": 615921.1875,
"learning_rate": 9.237804878048782e-06,
"loss": 1.1297,
"step": 625
},
{
"epoch": 3.0,
"eval_accuracy": 0.5970149253731343,
"eval_loss": 0.9763943552970886,
"eval_runtime": 14.869,
"eval_samples_per_second": 81.109,
"eval_steps_per_second": 2.556,
"step": 642
},
{
"epoch": 3.0373831775700935,
"grad_norm": 449741.21875,
"learning_rate": 9.085365853658538e-06,
"loss": 1.1258,
"step": 650
},
{
"epoch": 3.1542056074766354,
"grad_norm": 588529.75,
"learning_rate": 8.932926829268293e-06,
"loss": 1.0668,
"step": 675
},
{
"epoch": 3.2710280373831777,
"grad_norm": 489126.1875,
"learning_rate": 8.78048780487805e-06,
"loss": 1.0899,
"step": 700
},
{
"epoch": 3.3878504672897196,
"grad_norm": 679644.9375,
"learning_rate": 8.628048780487805e-06,
"loss": 1.106,
"step": 725
},
{
"epoch": 3.5046728971962615,
"grad_norm": 469723.8125,
"learning_rate": 8.475609756097562e-06,
"loss": 1.0807,
"step": 750
},
{
"epoch": 3.621495327102804,
"grad_norm": 566260.4375,
"learning_rate": 8.323170731707317e-06,
"loss": 1.0741,
"step": 775
},
{
"epoch": 3.7383177570093458,
"grad_norm": 636508.4375,
"learning_rate": 8.170731707317073e-06,
"loss": 1.0789,
"step": 800
},
{
"epoch": 3.8551401869158877,
"grad_norm": 584515.9375,
"learning_rate": 8.01829268292683e-06,
"loss": 1.058,
"step": 825
},
{
"epoch": 3.97196261682243,
"grad_norm": 511375.34375,
"learning_rate": 7.865853658536587e-06,
"loss": 1.0621,
"step": 850
},
{
"epoch": 4.0,
"eval_accuracy": 0.5912106135986733,
"eval_loss": 1.0269614458084106,
"eval_runtime": 15.3968,
"eval_samples_per_second": 78.328,
"eval_steps_per_second": 2.468,
"step": 856
},
{
"epoch": 4.088785046728972,
"grad_norm": 538369.125,
"learning_rate": 7.713414634146342e-06,
"loss": 1.0596,
"step": 875
},
{
"epoch": 4.205607476635514,
"grad_norm": 557098.75,
"learning_rate": 7.560975609756098e-06,
"loss": 1.0648,
"step": 900
},
{
"epoch": 4.322429906542056,
"grad_norm": 674418.9375,
"learning_rate": 7.408536585365854e-06,
"loss": 1.0151,
"step": 925
},
{
"epoch": 4.4392523364485985,
"grad_norm": 838321.1875,
"learning_rate": 7.25609756097561e-06,
"loss": 1.0045,
"step": 950
},
{
"epoch": 4.55607476635514,
"grad_norm": 595761.5625,
"learning_rate": 7.1036585365853665e-06,
"loss": 1.0178,
"step": 975
},
{
"epoch": 4.672897196261682,
"grad_norm": 595173.625,
"learning_rate": 6.951219512195122e-06,
"loss": 1.0028,
"step": 1000
},
{
"epoch": 4.789719626168225,
"grad_norm": 766422.8125,
"learning_rate": 6.798780487804879e-06,
"loss": 1.0042,
"step": 1025
},
{
"epoch": 4.906542056074766,
"grad_norm": 432231.9375,
"learning_rate": 6.646341463414635e-06,
"loss": 1.0244,
"step": 1050
},
{
"epoch": 5.0,
"eval_accuracy": 0.6625207296849088,
"eval_loss": 0.9042626619338989,
"eval_runtime": 14.8081,
"eval_samples_per_second": 81.442,
"eval_steps_per_second": 2.566,
"step": 1070
},
{
"epoch": 5.0233644859813085,
"grad_norm": 825474.0625,
"learning_rate": 6.493902439024391e-06,
"loss": 1.0353,
"step": 1075
},
{
"epoch": 5.140186915887851,
"grad_norm": 628319.1875,
"learning_rate": 6.341463414634147e-06,
"loss": 0.9928,
"step": 1100
},
{
"epoch": 5.257009345794392,
"grad_norm": 587063.25,
"learning_rate": 6.189024390243903e-06,
"loss": 0.9825,
"step": 1125
},
{
"epoch": 5.373831775700935,
"grad_norm": 642682.375,
"learning_rate": 6.0365853658536585e-06,
"loss": 1.0014,
"step": 1150
},
{
"epoch": 5.490654205607477,
"grad_norm": 723803.125,
"learning_rate": 5.884146341463415e-06,
"loss": 0.9637,
"step": 1175
},
{
"epoch": 5.607476635514018,
"grad_norm": 466309.0625,
"learning_rate": 5.731707317073171e-06,
"loss": 0.9845,
"step": 1200
},
{
"epoch": 5.724299065420561,
"grad_norm": 562217.5,
"learning_rate": 5.579268292682928e-06,
"loss": 1.0255,
"step": 1225
},
{
"epoch": 5.841121495327103,
"grad_norm": 470339.15625,
"learning_rate": 5.426829268292684e-06,
"loss": 0.9714,
"step": 1250
},
{
"epoch": 5.957943925233645,
"grad_norm": 607116.75,
"learning_rate": 5.27439024390244e-06,
"loss": 0.9623,
"step": 1275
},
{
"epoch": 6.0,
"eval_accuracy": 0.6824212271973465,
"eval_loss": 0.8453999161720276,
"eval_runtime": 14.9395,
"eval_samples_per_second": 80.726,
"eval_steps_per_second": 2.544,
"step": 1284
},
{
"epoch": 6.074766355140187,
"grad_norm": 988413.125,
"learning_rate": 5.121951219512195e-06,
"loss": 0.9296,
"step": 1300
},
{
"epoch": 6.191588785046729,
"grad_norm": 802783.125,
"learning_rate": 4.9695121951219515e-06,
"loss": 0.9449,
"step": 1325
},
{
"epoch": 6.308411214953271,
"grad_norm": 591705.125,
"learning_rate": 4.817073170731708e-06,
"loss": 0.9578,
"step": 1350
},
{
"epoch": 6.425233644859813,
"grad_norm": 625485.125,
"learning_rate": 4.664634146341464e-06,
"loss": 0.9165,
"step": 1375
},
{
"epoch": 6.542056074766355,
"grad_norm": 682533.0,
"learning_rate": 4.51219512195122e-06,
"loss": 0.9226,
"step": 1400
},
{
"epoch": 6.658878504672897,
"grad_norm": 809783.5,
"learning_rate": 4.359756097560976e-06,
"loss": 0.9039,
"step": 1425
},
{
"epoch": 6.775700934579439,
"grad_norm": 544206.0,
"learning_rate": 4.207317073170732e-06,
"loss": 0.9374,
"step": 1450
},
{
"epoch": 6.892523364485982,
"grad_norm": 631440.5625,
"learning_rate": 4.054878048780488e-06,
"loss": 0.8835,
"step": 1475
},
{
"epoch": 7.0,
"eval_accuracy": 0.685737976782753,
"eval_loss": 0.8481377363204956,
"eval_runtime": 14.9433,
"eval_samples_per_second": 80.705,
"eval_steps_per_second": 2.543,
"step": 1498
},
{
"epoch": 7.009345794392523,
"grad_norm": 695251.375,
"learning_rate": 3.902439024390244e-06,
"loss": 0.9291,
"step": 1500
},
{
"epoch": 7.126168224299065,
"grad_norm": 729127.375,
"learning_rate": 3.7500000000000005e-06,
"loss": 0.8883,
"step": 1525
},
{
"epoch": 7.242990654205608,
"grad_norm": 485646.09375,
"learning_rate": 3.5975609756097562e-06,
"loss": 0.9578,
"step": 1550
},
{
"epoch": 7.359813084112149,
"grad_norm": 456878.625,
"learning_rate": 3.4451219512195124e-06,
"loss": 0.9008,
"step": 1575
},
{
"epoch": 7.4766355140186915,
"grad_norm": 747060.8125,
"learning_rate": 3.292682926829269e-06,
"loss": 0.901,
"step": 1600
},
{
"epoch": 7.593457943925234,
"grad_norm": 802504.8125,
"learning_rate": 3.1402439024390246e-06,
"loss": 0.9339,
"step": 1625
},
{
"epoch": 7.710280373831775,
"grad_norm": 555262.9375,
"learning_rate": 2.9878048780487808e-06,
"loss": 0.9446,
"step": 1650
},
{
"epoch": 7.827102803738318,
"grad_norm": 1096160.5,
"learning_rate": 2.8353658536585365e-06,
"loss": 0.8785,
"step": 1675
},
{
"epoch": 7.94392523364486,
"grad_norm": 594814.5625,
"learning_rate": 2.682926829268293e-06,
"loss": 0.8853,
"step": 1700
},
{
"epoch": 8.0,
"eval_accuracy": 0.7114427860696517,
"eval_loss": 0.8122716546058655,
"eval_runtime": 15.1283,
"eval_samples_per_second": 79.718,
"eval_steps_per_second": 2.512,
"step": 1712
},
{
"epoch": 8.060747663551401,
"grad_norm": 820075.875,
"learning_rate": 2.530487804878049e-06,
"loss": 0.8793,
"step": 1725
},
{
"epoch": 8.177570093457945,
"grad_norm": 754132.3125,
"learning_rate": 2.378048780487805e-06,
"loss": 0.8494,
"step": 1750
},
{
"epoch": 8.294392523364486,
"grad_norm": 783069.0,
"learning_rate": 2.225609756097561e-06,
"loss": 0.883,
"step": 1775
},
{
"epoch": 8.411214953271028,
"grad_norm": 731905.0625,
"learning_rate": 2.073170731707317e-06,
"loss": 0.853,
"step": 1800
},
{
"epoch": 8.52803738317757,
"grad_norm": 585234.9375,
"learning_rate": 1.9207317073170733e-06,
"loss": 0.8859,
"step": 1825
},
{
"epoch": 8.644859813084112,
"grad_norm": 584208.0625,
"learning_rate": 1.7682926829268294e-06,
"loss": 0.8694,
"step": 1850
},
{
"epoch": 8.761682242990654,
"grad_norm": 686404.625,
"learning_rate": 1.6158536585365855e-06,
"loss": 0.8791,
"step": 1875
},
{
"epoch": 8.878504672897197,
"grad_norm": 625846.0625,
"learning_rate": 1.4634146341463414e-06,
"loss": 0.8576,
"step": 1900
},
{
"epoch": 8.995327102803738,
"grad_norm": 641595.8125,
"learning_rate": 1.3109756097560978e-06,
"loss": 0.868,
"step": 1925
},
{
"epoch": 9.0,
"eval_accuracy": 0.714759535655058,
"eval_loss": 0.8097881078720093,
"eval_runtime": 14.8393,
"eval_samples_per_second": 81.271,
"eval_steps_per_second": 2.561,
"step": 1926
},
{
"epoch": 9.11214953271028,
"grad_norm": 709608.5625,
"learning_rate": 1.158536585365854e-06,
"loss": 0.8316,
"step": 1950
},
{
"epoch": 9.228971962616823,
"grad_norm": 639677.75,
"learning_rate": 1.0060975609756098e-06,
"loss": 0.9073,
"step": 1975
},
{
"epoch": 9.345794392523365,
"grad_norm": 656041.0625,
"learning_rate": 8.53658536585366e-07,
"loss": 0.8598,
"step": 2000
},
{
"epoch": 9.462616822429906,
"grad_norm": 579136.0,
"learning_rate": 7.012195121951221e-07,
"loss": 0.7945,
"step": 2025
},
{
"epoch": 9.57943925233645,
"grad_norm": 787287.3125,
"learning_rate": 5.487804878048781e-07,
"loss": 0.9198,
"step": 2050
},
{
"epoch": 9.69626168224299,
"grad_norm": 813869.8125,
"learning_rate": 3.963414634146342e-07,
"loss": 0.8805,
"step": 2075
},
{
"epoch": 9.813084112149532,
"grad_norm": 734961.1875,
"learning_rate": 2.439024390243903e-07,
"loss": 0.8613,
"step": 2100
},
{
"epoch": 9.929906542056075,
"grad_norm": 704054.75,
"learning_rate": 9.146341463414634e-08,
"loss": 0.8474,
"step": 2125
},
{
"epoch": 10.0,
"eval_accuracy": 0.7072968490878938,
"eval_loss": 0.8144016265869141,
"eval_runtime": 15.0203,
"eval_samples_per_second": 80.292,
"eval_steps_per_second": 2.53,
"step": 2140
},
{
"epoch": 10.0,
"step": 2140,
"total_flos": 5.510586115727032e+19,
"train_loss": 1.0635530485171023,
"train_runtime": 1931.0211,
"train_samples_per_second": 35.37,
"train_steps_per_second": 1.108
}
],
"logging_steps": 25,
"max_steps": 2140,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.510586115727032e+19,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}