c14kevincardenas's picture
End of training
d9bc46c verified
raw
history blame contribute delete
No virus
17.5 kB
{
"best_metric": 1.040339469909668,
"best_model_checkpoint": "/media/mldrive/kcardenas/limb_classification_person_crop/beit-large-patch16-384/8_5e-5_5e-4_0.15/checkpoint-428",
"epoch": 10.0,
"eval_steps": 500,
"global_step": 2140,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.11682242990654206,
"grad_norm": 1226482.25,
"learning_rate": 2.5e-06,
"loss": 1.5898,
"step": 25
},
{
"epoch": 0.2336448598130841,
"grad_norm": 883115.375,
"learning_rate": 5e-06,
"loss": 1.5063,
"step": 50
},
{
"epoch": 0.35046728971962615,
"grad_norm": 729111.75,
"learning_rate": 7.5e-06,
"loss": 1.4618,
"step": 75
},
{
"epoch": 0.4672897196261682,
"grad_norm": 401205.5,
"learning_rate": 1e-05,
"loss": 1.4029,
"step": 100
},
{
"epoch": 0.5841121495327103,
"grad_norm": 392407.21875,
"learning_rate": 1.25e-05,
"loss": 1.3853,
"step": 125
},
{
"epoch": 0.7009345794392523,
"grad_norm": 296861.40625,
"learning_rate": 1.5e-05,
"loss": 1.3715,
"step": 150
},
{
"epoch": 0.8177570093457944,
"grad_norm": 644126.5,
"learning_rate": 1.75e-05,
"loss": 1.344,
"step": 175
},
{
"epoch": 0.9345794392523364,
"grad_norm": 561771.25,
"learning_rate": 2e-05,
"loss": 1.3404,
"step": 200
},
{
"epoch": 1.0,
"eval_accuracy": 0.39800995024875624,
"eval_loss": 1.4185985326766968,
"eval_runtime": 15.0252,
"eval_samples_per_second": 80.265,
"eval_steps_per_second": 2.529,
"step": 214
},
{
"epoch": 1.0514018691588785,
"grad_norm": 450167.59375,
"learning_rate": 2.25e-05,
"loss": 1.2961,
"step": 225
},
{
"epoch": 1.1682242990654206,
"grad_norm": 275605.5625,
"learning_rate": 2.5e-05,
"loss": 1.2836,
"step": 250
},
{
"epoch": 1.2850467289719627,
"grad_norm": 417004.96875,
"learning_rate": 2.7500000000000004e-05,
"loss": 1.2374,
"step": 275
},
{
"epoch": 1.4018691588785046,
"grad_norm": 313505.09375,
"learning_rate": 3e-05,
"loss": 1.2688,
"step": 300
},
{
"epoch": 1.5186915887850467,
"grad_norm": 345996.875,
"learning_rate": 3.2500000000000004e-05,
"loss": 1.2501,
"step": 325
},
{
"epoch": 1.6355140186915889,
"grad_norm": 439750.0,
"learning_rate": 3.5e-05,
"loss": 1.2455,
"step": 350
},
{
"epoch": 1.7523364485981308,
"grad_norm": 192930.5625,
"learning_rate": 3.7500000000000003e-05,
"loss": 1.2433,
"step": 375
},
{
"epoch": 1.8691588785046729,
"grad_norm": 403192.8125,
"learning_rate": 4e-05,
"loss": 1.1843,
"step": 400
},
{
"epoch": 1.985981308411215,
"grad_norm": 312608.1875,
"learning_rate": 4.25e-05,
"loss": 1.1899,
"step": 425
},
{
"epoch": 2.0,
"eval_accuracy": 0.6442786069651741,
"eval_loss": 1.040339469909668,
"eval_runtime": 15.0124,
"eval_samples_per_second": 80.333,
"eval_steps_per_second": 2.531,
"step": 428
},
{
"epoch": 2.102803738317757,
"grad_norm": 565702.9375,
"learning_rate": 4.5e-05,
"loss": 1.1768,
"step": 450
},
{
"epoch": 2.2196261682242993,
"grad_norm": 228019.625,
"learning_rate": 4.75e-05,
"loss": 1.3692,
"step": 475
},
{
"epoch": 2.336448598130841,
"grad_norm": 97234.1796875,
"learning_rate": 5e-05,
"loss": 1.4012,
"step": 500
},
{
"epoch": 2.453271028037383,
"grad_norm": 198352.921875,
"learning_rate": 4.923780487804878e-05,
"loss": 1.392,
"step": 525
},
{
"epoch": 2.5700934579439254,
"grad_norm": 124283.6953125,
"learning_rate": 4.847560975609756e-05,
"loss": 1.3919,
"step": 550
},
{
"epoch": 2.6869158878504673,
"grad_norm": 120723.7578125,
"learning_rate": 4.771341463414634e-05,
"loss": 1.3871,
"step": 575
},
{
"epoch": 2.803738317757009,
"grad_norm": 213803.59375,
"learning_rate": 4.695121951219512e-05,
"loss": 1.3944,
"step": 600
},
{
"epoch": 2.9205607476635516,
"grad_norm": 286424.5625,
"learning_rate": 4.618902439024391e-05,
"loss": 1.3962,
"step": 625
},
{
"epoch": 3.0,
"eval_accuracy": 0.2744610281923715,
"eval_loss": 1.3858356475830078,
"eval_runtime": 15.0104,
"eval_samples_per_second": 80.344,
"eval_steps_per_second": 2.532,
"step": 642
},
{
"epoch": 3.0373831775700935,
"grad_norm": 211193.328125,
"learning_rate": 4.542682926829269e-05,
"loss": 1.3901,
"step": 650
},
{
"epoch": 3.1542056074766354,
"grad_norm": 111855.8359375,
"learning_rate": 4.466463414634147e-05,
"loss": 1.3935,
"step": 675
},
{
"epoch": 3.2710280373831777,
"grad_norm": 157092.0625,
"learning_rate": 4.390243902439025e-05,
"loss": 1.3811,
"step": 700
},
{
"epoch": 3.3878504672897196,
"grad_norm": 127590.5703125,
"learning_rate": 4.314024390243903e-05,
"loss": 1.3921,
"step": 725
},
{
"epoch": 3.5046728971962615,
"grad_norm": 100796.25,
"learning_rate": 4.237804878048781e-05,
"loss": 1.3852,
"step": 750
},
{
"epoch": 3.621495327102804,
"grad_norm": 139129.4375,
"learning_rate": 4.161585365853659e-05,
"loss": 1.3856,
"step": 775
},
{
"epoch": 3.7383177570093458,
"grad_norm": 176988.171875,
"learning_rate": 4.085365853658537e-05,
"loss": 1.3844,
"step": 800
},
{
"epoch": 3.8551401869158877,
"grad_norm": 127246.34375,
"learning_rate": 4.0091463414634153e-05,
"loss": 1.3805,
"step": 825
},
{
"epoch": 3.97196261682243,
"grad_norm": 212300.828125,
"learning_rate": 3.932926829268293e-05,
"loss": 1.3914,
"step": 850
},
{
"epoch": 4.0,
"eval_accuracy": 0.2744610281923715,
"eval_loss": 1.3880783319473267,
"eval_runtime": 15.2034,
"eval_samples_per_second": 79.324,
"eval_steps_per_second": 2.499,
"step": 856
},
{
"epoch": 4.088785046728972,
"grad_norm": 61669.88671875,
"learning_rate": 3.856707317073171e-05,
"loss": 1.3905,
"step": 875
},
{
"epoch": 4.205607476635514,
"grad_norm": 223599.234375,
"learning_rate": 3.780487804878049e-05,
"loss": 1.3906,
"step": 900
},
{
"epoch": 4.322429906542056,
"grad_norm": 247944.15625,
"learning_rate": 3.704268292682927e-05,
"loss": 1.3902,
"step": 925
},
{
"epoch": 4.4392523364485985,
"grad_norm": 136334.375,
"learning_rate": 3.628048780487805e-05,
"loss": 1.3901,
"step": 950
},
{
"epoch": 4.55607476635514,
"grad_norm": 161834.765625,
"learning_rate": 3.551829268292683e-05,
"loss": 1.3842,
"step": 975
},
{
"epoch": 4.672897196261682,
"grad_norm": 160751.734375,
"learning_rate": 3.475609756097561e-05,
"loss": 1.3856,
"step": 1000
},
{
"epoch": 4.789719626168225,
"grad_norm": 152146.84375,
"learning_rate": 3.399390243902439e-05,
"loss": 1.3823,
"step": 1025
},
{
"epoch": 4.906542056074766,
"grad_norm": 193127.203125,
"learning_rate": 3.323170731707317e-05,
"loss": 1.3868,
"step": 1050
},
{
"epoch": 5.0,
"eval_accuracy": 0.27611940298507465,
"eval_loss": 1.3848323822021484,
"eval_runtime": 15.4933,
"eval_samples_per_second": 77.84,
"eval_steps_per_second": 2.453,
"step": 1070
},
{
"epoch": 5.0233644859813085,
"grad_norm": 238072.984375,
"learning_rate": 3.246951219512195e-05,
"loss": 1.3888,
"step": 1075
},
{
"epoch": 5.140186915887851,
"grad_norm": 133656.28125,
"learning_rate": 3.170731707317073e-05,
"loss": 1.3863,
"step": 1100
},
{
"epoch": 5.257009345794392,
"grad_norm": 120263.9453125,
"learning_rate": 3.094512195121951e-05,
"loss": 1.3856,
"step": 1125
},
{
"epoch": 5.373831775700935,
"grad_norm": 167475.484375,
"learning_rate": 3.0182926829268294e-05,
"loss": 1.3885,
"step": 1150
},
{
"epoch": 5.490654205607477,
"grad_norm": 197263.59375,
"learning_rate": 2.9420731707317074e-05,
"loss": 1.385,
"step": 1175
},
{
"epoch": 5.607476635514018,
"grad_norm": 145337.453125,
"learning_rate": 2.8658536585365854e-05,
"loss": 1.3807,
"step": 1200
},
{
"epoch": 5.724299065420561,
"grad_norm": 119471.03125,
"learning_rate": 2.7896341463414637e-05,
"loss": 1.3806,
"step": 1225
},
{
"epoch": 5.841121495327103,
"grad_norm": 139454.765625,
"learning_rate": 2.7134146341463417e-05,
"loss": 1.3843,
"step": 1250
},
{
"epoch": 5.957943925233645,
"grad_norm": 147728.828125,
"learning_rate": 2.6371951219512197e-05,
"loss": 1.3895,
"step": 1275
},
{
"epoch": 6.0,
"eval_accuracy": 0.2744610281923715,
"eval_loss": 1.386699914932251,
"eval_runtime": 14.8197,
"eval_samples_per_second": 81.378,
"eval_steps_per_second": 2.564,
"step": 1284
},
{
"epoch": 6.074766355140187,
"grad_norm": 217932.9375,
"learning_rate": 2.5609756097560977e-05,
"loss": 1.3858,
"step": 1300
},
{
"epoch": 6.191588785046729,
"grad_norm": 103804.140625,
"learning_rate": 2.4847560975609756e-05,
"loss": 1.3839,
"step": 1325
},
{
"epoch": 6.308411214953271,
"grad_norm": 144548.296875,
"learning_rate": 2.4085365853658536e-05,
"loss": 1.3757,
"step": 1350
},
{
"epoch": 6.425233644859813,
"grad_norm": 116841.2421875,
"learning_rate": 2.332317073170732e-05,
"loss": 1.3832,
"step": 1375
},
{
"epoch": 6.542056074766355,
"grad_norm": 60290.0859375,
"learning_rate": 2.25609756097561e-05,
"loss": 1.3866,
"step": 1400
},
{
"epoch": 6.658878504672897,
"grad_norm": 142147.796875,
"learning_rate": 2.179878048780488e-05,
"loss": 1.3812,
"step": 1425
},
{
"epoch": 6.775700934579439,
"grad_norm": 77919.5390625,
"learning_rate": 2.103658536585366e-05,
"loss": 1.3852,
"step": 1450
},
{
"epoch": 6.892523364485982,
"grad_norm": 112284.5234375,
"learning_rate": 2.0274390243902442e-05,
"loss": 1.3871,
"step": 1475
},
{
"epoch": 7.0,
"eval_accuracy": 0.2744610281923715,
"eval_loss": 1.3830299377441406,
"eval_runtime": 15.5048,
"eval_samples_per_second": 77.782,
"eval_steps_per_second": 2.451,
"step": 1498
},
{
"epoch": 7.009345794392523,
"grad_norm": 219239.40625,
"learning_rate": 1.9512195121951222e-05,
"loss": 1.3871,
"step": 1500
},
{
"epoch": 7.126168224299065,
"grad_norm": 139572.75,
"learning_rate": 1.8750000000000002e-05,
"loss": 1.3849,
"step": 1525
},
{
"epoch": 7.242990654205608,
"grad_norm": 71637.71875,
"learning_rate": 1.798780487804878e-05,
"loss": 1.3832,
"step": 1550
},
{
"epoch": 7.359813084112149,
"grad_norm": 134367.84375,
"learning_rate": 1.722560975609756e-05,
"loss": 1.3789,
"step": 1575
},
{
"epoch": 7.4766355140186915,
"grad_norm": 146302.65625,
"learning_rate": 1.6463414634146345e-05,
"loss": 1.3838,
"step": 1600
},
{
"epoch": 7.593457943925234,
"grad_norm": 65325.31640625,
"learning_rate": 1.5701219512195124e-05,
"loss": 1.3808,
"step": 1625
},
{
"epoch": 7.710280373831775,
"grad_norm": 105734.6328125,
"learning_rate": 1.4939024390243902e-05,
"loss": 1.3811,
"step": 1650
},
{
"epoch": 7.827102803738318,
"grad_norm": 158432.8125,
"learning_rate": 1.4176829268292682e-05,
"loss": 1.3782,
"step": 1675
},
{
"epoch": 7.94392523364486,
"grad_norm": 109856.3671875,
"learning_rate": 1.3414634146341466e-05,
"loss": 1.3802,
"step": 1700
},
{
"epoch": 8.0,
"eval_accuracy": 0.29270315091210614,
"eval_loss": 1.378788709640503,
"eval_runtime": 15.2702,
"eval_samples_per_second": 78.977,
"eval_steps_per_second": 2.489,
"step": 1712
},
{
"epoch": 8.060747663551401,
"grad_norm": 131191.734375,
"learning_rate": 1.2652439024390245e-05,
"loss": 1.3796,
"step": 1725
},
{
"epoch": 8.177570093457945,
"grad_norm": 111280.453125,
"learning_rate": 1.1890243902439025e-05,
"loss": 1.3764,
"step": 1750
},
{
"epoch": 8.294392523364486,
"grad_norm": 53637.3828125,
"learning_rate": 1.1128048780487805e-05,
"loss": 1.3815,
"step": 1775
},
{
"epoch": 8.411214953271028,
"grad_norm": 67744.1640625,
"learning_rate": 1.0365853658536585e-05,
"loss": 1.3667,
"step": 1800
},
{
"epoch": 8.52803738317757,
"grad_norm": 88018.4921875,
"learning_rate": 9.603658536585366e-06,
"loss": 1.369,
"step": 1825
},
{
"epoch": 8.644859813084112,
"grad_norm": 92662.328125,
"learning_rate": 8.841463414634146e-06,
"loss": 1.3732,
"step": 1850
},
{
"epoch": 8.761682242990654,
"grad_norm": 146536.6875,
"learning_rate": 8.079268292682928e-06,
"loss": 1.3732,
"step": 1875
},
{
"epoch": 8.878504672897197,
"grad_norm": 132991.578125,
"learning_rate": 7.317073170731707e-06,
"loss": 1.3685,
"step": 1900
},
{
"epoch": 8.995327102803738,
"grad_norm": 134634.984375,
"learning_rate": 6.554878048780488e-06,
"loss": 1.3745,
"step": 1925
},
{
"epoch": 9.0,
"eval_accuracy": 0.3200663349917081,
"eval_loss": 1.367501974105835,
"eval_runtime": 15.3432,
"eval_samples_per_second": 78.602,
"eval_steps_per_second": 2.477,
"step": 1926
},
{
"epoch": 9.11214953271028,
"grad_norm": 111594.0234375,
"learning_rate": 5.792682926829269e-06,
"loss": 1.3629,
"step": 1950
},
{
"epoch": 9.228971962616823,
"grad_norm": 260942.34375,
"learning_rate": 5.030487804878049e-06,
"loss": 1.3681,
"step": 1975
},
{
"epoch": 9.345794392523365,
"grad_norm": 132676.8125,
"learning_rate": 4.26829268292683e-06,
"loss": 1.3568,
"step": 2000
},
{
"epoch": 9.462616822429906,
"grad_norm": 137239.046875,
"learning_rate": 3.5060975609756102e-06,
"loss": 1.3566,
"step": 2025
},
{
"epoch": 9.57943925233645,
"grad_norm": 167074.9375,
"learning_rate": 2.7439024390243905e-06,
"loss": 1.3556,
"step": 2050
},
{
"epoch": 9.69626168224299,
"grad_norm": 89201.1015625,
"learning_rate": 1.9817073170731707e-06,
"loss": 1.3697,
"step": 2075
},
{
"epoch": 9.813084112149532,
"grad_norm": 104514.7109375,
"learning_rate": 1.2195121951219514e-06,
"loss": 1.346,
"step": 2100
},
{
"epoch": 9.929906542056075,
"grad_norm": 137668.4375,
"learning_rate": 4.573170731707317e-07,
"loss": 1.356,
"step": 2125
},
{
"epoch": 10.0,
"eval_accuracy": 0.3383084577114428,
"eval_loss": 1.3566746711730957,
"eval_runtime": 14.6136,
"eval_samples_per_second": 82.526,
"eval_steps_per_second": 2.6,
"step": 2140
},
{
"epoch": 10.0,
"step": 2140,
"total_flos": 5.510586115727032e+19,
"train_loss": 1.3681953902556518,
"train_runtime": 1910.7588,
"train_samples_per_second": 35.745,
"train_steps_per_second": 1.12
}
],
"logging_steps": 25,
"max_steps": 2140,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.510586115727032e+19,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}