c14kevincardenas's picture
End of training
38baa26 verified
raw
history blame contribute delete
No virus
17.4 kB
{
"best_metric": 0.7130943536758423,
"best_model_checkpoint": "/media/mldrive/kcardenas/limb_classification_person_crop/beit-large-patch16-384/8_5e-5_1e-4_0.05/checkpoint-1712",
"epoch": 10.0,
"eval_steps": 500,
"global_step": 2140,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.11682242990654206,
"grad_norm": 1340459.625,
"learning_rate": 2.5e-06,
"loss": 1.5852,
"step": 25
},
{
"epoch": 0.2336448598130841,
"grad_norm": 994590.0,
"learning_rate": 5e-06,
"loss": 1.5062,
"step": 50
},
{
"epoch": 0.35046728971962615,
"grad_norm": 835627.5,
"learning_rate": 7.5e-06,
"loss": 1.4702,
"step": 75
},
{
"epoch": 0.4672897196261682,
"grad_norm": 708100.8125,
"learning_rate": 1e-05,
"loss": 1.405,
"step": 100
},
{
"epoch": 0.5841121495327103,
"grad_norm": 19124280.0,
"learning_rate": 1.25e-05,
"loss": 1.3983,
"step": 125
},
{
"epoch": 0.7009345794392523,
"grad_norm": 281499.28125,
"learning_rate": 1.5e-05,
"loss": 1.3889,
"step": 150
},
{
"epoch": 0.8177570093457944,
"grad_norm": 727612.8125,
"learning_rate": 1.75e-05,
"loss": 1.3625,
"step": 175
},
{
"epoch": 0.9345794392523364,
"grad_norm": 628275.8125,
"learning_rate": 2e-05,
"loss": 1.3649,
"step": 200
},
{
"epoch": 1.0,
"eval_accuracy": 0.36401326699834163,
"eval_loss": 1.5214860439300537,
"eval_runtime": 15.5708,
"eval_samples_per_second": 77.453,
"eval_steps_per_second": 2.44,
"step": 214
},
{
"epoch": 1.0514018691588785,
"grad_norm": 456936.21875,
"learning_rate": 2.25e-05,
"loss": 1.3042,
"step": 225
},
{
"epoch": 1.1682242990654206,
"grad_norm": 314467.59375,
"learning_rate": 2.5e-05,
"loss": 1.275,
"step": 250
},
{
"epoch": 1.2850467289719627,
"grad_norm": 409308.0625,
"learning_rate": 2.7500000000000004e-05,
"loss": 1.2176,
"step": 275
},
{
"epoch": 1.4018691588785046,
"grad_norm": 343987.34375,
"learning_rate": 3e-05,
"loss": 1.2691,
"step": 300
},
{
"epoch": 1.5186915887850467,
"grad_norm": 415037.3125,
"learning_rate": 3.2500000000000004e-05,
"loss": 1.2335,
"step": 325
},
{
"epoch": 1.6355140186915889,
"grad_norm": 369134.59375,
"learning_rate": 3.5e-05,
"loss": 1.2058,
"step": 350
},
{
"epoch": 1.7523364485981308,
"grad_norm": 246889.234375,
"learning_rate": 3.7500000000000003e-05,
"loss": 1.2045,
"step": 375
},
{
"epoch": 1.8691588785046729,
"grad_norm": 388048.875,
"learning_rate": 4e-05,
"loss": 1.143,
"step": 400
},
{
"epoch": 1.985981308411215,
"grad_norm": 442224.96875,
"learning_rate": 4.25e-05,
"loss": 1.1297,
"step": 425
},
{
"epoch": 2.0,
"eval_accuracy": 0.6003316749585407,
"eval_loss": 1.0013911724090576,
"eval_runtime": 15.593,
"eval_samples_per_second": 77.343,
"eval_steps_per_second": 2.437,
"step": 428
},
{
"epoch": 2.102803738317757,
"grad_norm": 518648.0,
"learning_rate": 4.5e-05,
"loss": 1.1438,
"step": 450
},
{
"epoch": 2.2196261682242993,
"grad_norm": 496547.5625,
"learning_rate": 4.75e-05,
"loss": 1.1246,
"step": 475
},
{
"epoch": 2.336448598130841,
"grad_norm": 306726.9375,
"learning_rate": 5e-05,
"loss": 1.1061,
"step": 500
},
{
"epoch": 2.453271028037383,
"grad_norm": 327747.03125,
"learning_rate": 4.923780487804878e-05,
"loss": 1.1146,
"step": 525
},
{
"epoch": 2.5700934579439254,
"grad_norm": 250996.6875,
"learning_rate": 4.847560975609756e-05,
"loss": 1.0954,
"step": 550
},
{
"epoch": 2.6869158878504673,
"grad_norm": 292352.34375,
"learning_rate": 4.771341463414634e-05,
"loss": 1.129,
"step": 575
},
{
"epoch": 2.803738317757009,
"grad_norm": 299764.5,
"learning_rate": 4.695121951219512e-05,
"loss": 1.0878,
"step": 600
},
{
"epoch": 2.9205607476635516,
"grad_norm": 263247.34375,
"learning_rate": 4.618902439024391e-05,
"loss": 1.0881,
"step": 625
},
{
"epoch": 3.0,
"eval_accuracy": 0.6558872305140961,
"eval_loss": 0.9018123745918274,
"eval_runtime": 15.6284,
"eval_samples_per_second": 77.167,
"eval_steps_per_second": 2.431,
"step": 642
},
{
"epoch": 3.0373831775700935,
"grad_norm": 281239.8125,
"learning_rate": 4.542682926829269e-05,
"loss": 1.094,
"step": 650
},
{
"epoch": 3.1542056074766354,
"grad_norm": 263740.625,
"learning_rate": 4.466463414634147e-05,
"loss": 1.036,
"step": 675
},
{
"epoch": 3.2710280373831777,
"grad_norm": 345132.46875,
"learning_rate": 4.390243902439025e-05,
"loss": 1.0542,
"step": 700
},
{
"epoch": 3.3878504672897196,
"grad_norm": 281706.03125,
"learning_rate": 4.314024390243903e-05,
"loss": 1.0712,
"step": 725
},
{
"epoch": 3.5046728971962615,
"grad_norm": 256964.125,
"learning_rate": 4.237804878048781e-05,
"loss": 1.0417,
"step": 750
},
{
"epoch": 3.621495327102804,
"grad_norm": 426164.21875,
"learning_rate": 4.161585365853659e-05,
"loss": 1.0363,
"step": 775
},
{
"epoch": 3.7383177570093458,
"grad_norm": 366963.625,
"learning_rate": 4.085365853658537e-05,
"loss": 1.0382,
"step": 800
},
{
"epoch": 3.8551401869158877,
"grad_norm": 314130.34375,
"learning_rate": 4.0091463414634153e-05,
"loss": 1.0255,
"step": 825
},
{
"epoch": 3.97196261682243,
"grad_norm": 343246.59375,
"learning_rate": 3.932926829268293e-05,
"loss": 1.0065,
"step": 850
},
{
"epoch": 4.0,
"eval_accuracy": 0.599502487562189,
"eval_loss": 0.9687954187393188,
"eval_runtime": 15.5027,
"eval_samples_per_second": 77.793,
"eval_steps_per_second": 2.451,
"step": 856
},
{
"epoch": 4.088785046728972,
"grad_norm": 355268.8125,
"learning_rate": 3.856707317073171e-05,
"loss": 0.9995,
"step": 875
},
{
"epoch": 4.205607476635514,
"grad_norm": 266058.53125,
"learning_rate": 3.780487804878049e-05,
"loss": 0.9972,
"step": 900
},
{
"epoch": 4.322429906542056,
"grad_norm": 447425.875,
"learning_rate": 3.704268292682927e-05,
"loss": 1.004,
"step": 925
},
{
"epoch": 4.4392523364485985,
"grad_norm": 382122.4375,
"learning_rate": 3.628048780487805e-05,
"loss": 0.967,
"step": 950
},
{
"epoch": 4.55607476635514,
"grad_norm": 379657.3125,
"learning_rate": 3.551829268292683e-05,
"loss": 0.9769,
"step": 975
},
{
"epoch": 4.672897196261682,
"grad_norm": 281210.5625,
"learning_rate": 3.475609756097561e-05,
"loss": 0.9521,
"step": 1000
},
{
"epoch": 4.789719626168225,
"grad_norm": 337339.375,
"learning_rate": 3.399390243902439e-05,
"loss": 0.9654,
"step": 1025
},
{
"epoch": 4.906542056074766,
"grad_norm": 227255.0625,
"learning_rate": 3.323170731707317e-05,
"loss": 1.0028,
"step": 1050
},
{
"epoch": 5.0,
"eval_accuracy": 0.7014925373134329,
"eval_loss": 0.8239747285842896,
"eval_runtime": 15.7023,
"eval_samples_per_second": 76.804,
"eval_steps_per_second": 2.42,
"step": 1070
},
{
"epoch": 5.0233644859813085,
"grad_norm": 396188.9375,
"learning_rate": 3.246951219512195e-05,
"loss": 1.002,
"step": 1075
},
{
"epoch": 5.140186915887851,
"grad_norm": 331960.46875,
"learning_rate": 3.170731707317073e-05,
"loss": 0.9421,
"step": 1100
},
{
"epoch": 5.257009345794392,
"grad_norm": 390978.8125,
"learning_rate": 3.094512195121951e-05,
"loss": 0.9338,
"step": 1125
},
{
"epoch": 5.373831775700935,
"grad_norm": 321306.375,
"learning_rate": 3.0182926829268294e-05,
"loss": 0.9425,
"step": 1150
},
{
"epoch": 5.490654205607477,
"grad_norm": 363200.09375,
"learning_rate": 2.9420731707317074e-05,
"loss": 0.9054,
"step": 1175
},
{
"epoch": 5.607476635514018,
"grad_norm": 315464.1875,
"learning_rate": 2.8658536585365854e-05,
"loss": 0.9164,
"step": 1200
},
{
"epoch": 5.724299065420561,
"grad_norm": 257387.84375,
"learning_rate": 2.7896341463414637e-05,
"loss": 0.9579,
"step": 1225
},
{
"epoch": 5.841121495327103,
"grad_norm": 268010.53125,
"learning_rate": 2.7134146341463417e-05,
"loss": 0.9108,
"step": 1250
},
{
"epoch": 5.957943925233645,
"grad_norm": 313754.125,
"learning_rate": 2.6371951219512197e-05,
"loss": 0.9225,
"step": 1275
},
{
"epoch": 6.0,
"eval_accuracy": 0.7520729684908789,
"eval_loss": 0.735476553440094,
"eval_runtime": 15.6884,
"eval_samples_per_second": 76.872,
"eval_steps_per_second": 2.422,
"step": 1284
},
{
"epoch": 6.074766355140187,
"grad_norm": 319647.28125,
"learning_rate": 2.5609756097560977e-05,
"loss": 0.8836,
"step": 1300
},
{
"epoch": 6.191588785046729,
"grad_norm": 440299.0,
"learning_rate": 2.4847560975609756e-05,
"loss": 0.8731,
"step": 1325
},
{
"epoch": 6.308411214953271,
"grad_norm": 294800.8125,
"learning_rate": 2.4085365853658536e-05,
"loss": 0.8843,
"step": 1350
},
{
"epoch": 6.425233644859813,
"grad_norm": 390740.28125,
"learning_rate": 2.332317073170732e-05,
"loss": 0.8199,
"step": 1375
},
{
"epoch": 6.542056074766355,
"grad_norm": 393681.1875,
"learning_rate": 2.25609756097561e-05,
"loss": 0.8497,
"step": 1400
},
{
"epoch": 6.658878504672897,
"grad_norm": 378513.21875,
"learning_rate": 2.179878048780488e-05,
"loss": 0.8445,
"step": 1425
},
{
"epoch": 6.775700934579439,
"grad_norm": 330039.0625,
"learning_rate": 2.103658536585366e-05,
"loss": 0.8562,
"step": 1450
},
{
"epoch": 6.892523364485982,
"grad_norm": 318586.625,
"learning_rate": 2.0274390243902442e-05,
"loss": 0.8522,
"step": 1475
},
{
"epoch": 7.0,
"eval_accuracy": 0.746268656716418,
"eval_loss": 0.76929771900177,
"eval_runtime": 15.5157,
"eval_samples_per_second": 77.728,
"eval_steps_per_second": 2.449,
"step": 1498
},
{
"epoch": 7.009345794392523,
"grad_norm": 331889.5,
"learning_rate": 1.9512195121951222e-05,
"loss": 0.8529,
"step": 1500
},
{
"epoch": 7.126168224299065,
"grad_norm": 381441.40625,
"learning_rate": 1.8750000000000002e-05,
"loss": 0.7812,
"step": 1525
},
{
"epoch": 7.242990654205608,
"grad_norm": 248858.21875,
"learning_rate": 1.798780487804878e-05,
"loss": 0.8792,
"step": 1550
},
{
"epoch": 7.359813084112149,
"grad_norm": 339380.375,
"learning_rate": 1.722560975609756e-05,
"loss": 0.8191,
"step": 1575
},
{
"epoch": 7.4766355140186915,
"grad_norm": 363421.21875,
"learning_rate": 1.6463414634146345e-05,
"loss": 0.7828,
"step": 1600
},
{
"epoch": 7.593457943925234,
"grad_norm": 332095.78125,
"learning_rate": 1.5701219512195124e-05,
"loss": 0.8328,
"step": 1625
},
{
"epoch": 7.710280373831775,
"grad_norm": 267183.625,
"learning_rate": 1.4939024390243902e-05,
"loss": 0.8255,
"step": 1650
},
{
"epoch": 7.827102803738318,
"grad_norm": 463742.25,
"learning_rate": 1.4176829268292682e-05,
"loss": 0.792,
"step": 1675
},
{
"epoch": 7.94392523364486,
"grad_norm": 254509.796875,
"learning_rate": 1.3414634146341466e-05,
"loss": 0.821,
"step": 1700
},
{
"epoch": 8.0,
"eval_accuracy": 0.7678275290215588,
"eval_loss": 0.7130943536758423,
"eval_runtime": 15.4772,
"eval_samples_per_second": 77.921,
"eval_steps_per_second": 2.455,
"step": 1712
},
{
"epoch": 8.060747663551401,
"grad_norm": 432679.84375,
"learning_rate": 1.2652439024390245e-05,
"loss": 0.7849,
"step": 1725
},
{
"epoch": 8.177570093457945,
"grad_norm": 361022.125,
"learning_rate": 1.1890243902439025e-05,
"loss": 0.7447,
"step": 1750
},
{
"epoch": 8.294392523364486,
"grad_norm": 416379.84375,
"learning_rate": 1.1128048780487805e-05,
"loss": 0.7898,
"step": 1775
},
{
"epoch": 8.411214953271028,
"grad_norm": 386266.90625,
"learning_rate": 1.0365853658536585e-05,
"loss": 0.742,
"step": 1800
},
{
"epoch": 8.52803738317757,
"grad_norm": 321598.21875,
"learning_rate": 9.603658536585366e-06,
"loss": 0.7681,
"step": 1825
},
{
"epoch": 8.644859813084112,
"grad_norm": 386202.53125,
"learning_rate": 8.841463414634146e-06,
"loss": 0.7725,
"step": 1850
},
{
"epoch": 8.761682242990654,
"grad_norm": 319017.71875,
"learning_rate": 8.079268292682928e-06,
"loss": 0.7394,
"step": 1875
},
{
"epoch": 8.878504672897197,
"grad_norm": 388873.65625,
"learning_rate": 7.317073170731707e-06,
"loss": 0.7435,
"step": 1900
},
{
"epoch": 8.995327102803738,
"grad_norm": 302018.6875,
"learning_rate": 6.554878048780488e-06,
"loss": 0.735,
"step": 1925
},
{
"epoch": 9.0,
"eval_accuracy": 0.7761194029850746,
"eval_loss": 0.7315632104873657,
"eval_runtime": 15.8083,
"eval_samples_per_second": 76.289,
"eval_steps_per_second": 2.404,
"step": 1926
},
{
"epoch": 9.11214953271028,
"grad_norm": 400649.4375,
"learning_rate": 5.792682926829269e-06,
"loss": 0.6635,
"step": 1950
},
{
"epoch": 9.228971962616823,
"grad_norm": 330898.75,
"learning_rate": 5.030487804878049e-06,
"loss": 0.7556,
"step": 1975
},
{
"epoch": 9.345794392523365,
"grad_norm": 298238.21875,
"learning_rate": 4.26829268292683e-06,
"loss": 0.6982,
"step": 2000
},
{
"epoch": 9.462616822429906,
"grad_norm": 244765.453125,
"learning_rate": 3.5060975609756102e-06,
"loss": 0.657,
"step": 2025
},
{
"epoch": 9.57943925233645,
"grad_norm": 357814.375,
"learning_rate": 2.7439024390243905e-06,
"loss": 0.7641,
"step": 2050
},
{
"epoch": 9.69626168224299,
"grad_norm": 500304.03125,
"learning_rate": 1.9817073170731707e-06,
"loss": 0.7561,
"step": 2075
},
{
"epoch": 9.813084112149532,
"grad_norm": 457811.4375,
"learning_rate": 1.2195121951219514e-06,
"loss": 0.7183,
"step": 2100
},
{
"epoch": 9.929906542056075,
"grad_norm": 392788.8125,
"learning_rate": 4.573170731707317e-07,
"loss": 0.7123,
"step": 2125
},
{
"epoch": 10.0,
"eval_accuracy": 0.7777777777777778,
"eval_loss": 0.7300788760185242,
"eval_runtime": 15.6127,
"eval_samples_per_second": 77.245,
"eval_steps_per_second": 2.434,
"step": 2140
},
{
"epoch": 10.0,
"step": 2140,
"total_flos": 5.510586115727032e+19,
"train_loss": 0.9844673860852964,
"train_runtime": 1946.7718,
"train_samples_per_second": 35.084,
"train_steps_per_second": 1.099
}
],
"logging_steps": 25,
"max_steps": 2140,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.510586115727032e+19,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}