{ "best_metric": 1.3534746170043945, "best_model_checkpoint": "/media/mldrive/kcardenas/limb_classification_person_crop/beit-large-patch16-384/8_5e-5_1e-3_0.15/checkpoint-2140", "epoch": 10.0, "eval_steps": 500, "global_step": 2140, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.11682242990654206, "grad_norm": 1226482.25, "learning_rate": 2.5e-06, "loss": 1.5898, "step": 25 }, { "epoch": 0.2336448598130841, "grad_norm": 883115.375, "learning_rate": 5e-06, "loss": 1.5063, "step": 50 }, { "epoch": 0.35046728971962615, "grad_norm": 729111.75, "learning_rate": 7.5e-06, "loss": 1.4618, "step": 75 }, { "epoch": 0.4672897196261682, "grad_norm": 401205.5, "learning_rate": 1e-05, "loss": 1.4029, "step": 100 }, { "epoch": 0.5841121495327103, "grad_norm": 392407.21875, "learning_rate": 1.25e-05, "loss": 1.3853, "step": 125 }, { "epoch": 0.7009345794392523, "grad_norm": 296861.40625, "learning_rate": 1.5e-05, "loss": 1.3715, "step": 150 }, { "epoch": 0.8177570093457944, "grad_norm": 644126.5, "learning_rate": 1.75e-05, "loss": 1.344, "step": 175 }, { "epoch": 0.9345794392523364, "grad_norm": 561771.25, "learning_rate": 2e-05, "loss": 1.3404, "step": 200 }, { "epoch": 1.0, "eval_accuracy": 0.39800995024875624, "eval_loss": 1.4185985326766968, "eval_runtime": 15.0137, "eval_samples_per_second": 80.326, "eval_steps_per_second": 2.531, "step": 214 }, { "epoch": 1.0514018691588785, "grad_norm": 450167.59375, "learning_rate": 2.25e-05, "loss": 1.2961, "step": 225 }, { "epoch": 1.1682242990654206, "grad_norm": 275605.5625, "learning_rate": 2.5e-05, "loss": 1.2836, "step": 250 }, { "epoch": 1.2850467289719627, "grad_norm": 417004.96875, "learning_rate": 2.7500000000000004e-05, "loss": 1.2374, "step": 275 }, { "epoch": 1.4018691588785046, "grad_norm": 313505.09375, "learning_rate": 3e-05, "loss": 1.2688, "step": 300 }, { "epoch": 1.5186915887850467, "grad_norm": 345997.34375, "learning_rate": 3.2500000000000004e-05, "loss": 1.2501, "step": 325 }, { "epoch": 1.6355140186915889, "grad_norm": 95028.7734375, "learning_rate": 3.5e-05, "loss": 1.33, "step": 350 }, { "epoch": 1.7523364485981308, "grad_norm": 115577.6171875, "learning_rate": 3.7500000000000003e-05, "loss": 1.3968, "step": 375 }, { "epoch": 1.8691588785046729, "grad_norm": 259988.078125, "learning_rate": 4e-05, "loss": 1.3944, "step": 400 }, { "epoch": 1.985981308411215, "grad_norm": 61302.6796875, "learning_rate": 4.25e-05, "loss": 1.3839, "step": 425 }, { "epoch": 2.0, "eval_accuracy": 0.2703150912106136, "eval_loss": 1.3841294050216675, "eval_runtime": 14.7215, "eval_samples_per_second": 81.921, "eval_steps_per_second": 2.581, "step": 428 }, { "epoch": 2.102803738317757, "grad_norm": 278573.625, "learning_rate": 4.5e-05, "loss": 1.3805, "step": 450 }, { "epoch": 2.2196261682242993, "grad_norm": 224828.75, "learning_rate": 4.75e-05, "loss": 1.3793, "step": 475 }, { "epoch": 2.336448598130841, "grad_norm": 104505.984375, "learning_rate": 5e-05, "loss": 1.391, "step": 500 }, { "epoch": 2.453271028037383, "grad_norm": 198013.84375, "learning_rate": 4.923780487804878e-05, "loss": 1.3894, "step": 525 }, { "epoch": 2.5700934579439254, "grad_norm": 148076.09375, "learning_rate": 4.847560975609756e-05, "loss": 1.3911, "step": 550 }, { "epoch": 2.6869158878504673, "grad_norm": 113111.859375, "learning_rate": 4.771341463414634e-05, "loss": 1.3855, "step": 575 }, { "epoch": 2.803738317757009, "grad_norm": 224499.625, "learning_rate": 4.695121951219512e-05, "loss": 1.3932, "step": 600 }, { "epoch": 2.9205607476635516, "grad_norm": 271534.34375, "learning_rate": 4.618902439024391e-05, "loss": 1.3931, "step": 625 }, { "epoch": 3.0, "eval_accuracy": 0.2744610281923715, "eval_loss": 1.386692762374878, "eval_runtime": 15.8236, "eval_samples_per_second": 76.215, "eval_steps_per_second": 2.401, "step": 642 }, { "epoch": 3.0373831775700935, "grad_norm": 219634.640625, "learning_rate": 4.542682926829269e-05, "loss": 1.3891, "step": 650 }, { "epoch": 3.1542056074766354, "grad_norm": 111519.75, "learning_rate": 4.466463414634147e-05, "loss": 1.392, "step": 675 }, { "epoch": 3.2710280373831777, "grad_norm": 172780.359375, "learning_rate": 4.390243902439025e-05, "loss": 1.3815, "step": 700 }, { "epoch": 3.3878504672897196, "grad_norm": 132606.328125, "learning_rate": 4.314024390243903e-05, "loss": 1.3917, "step": 725 }, { "epoch": 3.5046728971962615, "grad_norm": 99811.65625, "learning_rate": 4.237804878048781e-05, "loss": 1.3847, "step": 750 }, { "epoch": 3.621495327102804, "grad_norm": 143071.3125, "learning_rate": 4.161585365853659e-05, "loss": 1.3851, "step": 775 }, { "epoch": 3.7383177570093458, "grad_norm": 184519.203125, "learning_rate": 4.085365853658537e-05, "loss": 1.3833, "step": 800 }, { "epoch": 3.8551401869158877, "grad_norm": 125933.3671875, "learning_rate": 4.0091463414634153e-05, "loss": 1.3809, "step": 825 }, { "epoch": 3.97196261682243, "grad_norm": 208119.21875, "learning_rate": 3.932926829268293e-05, "loss": 1.3889, "step": 850 }, { "epoch": 4.0, "eval_accuracy": 0.2744610281923715, "eval_loss": 1.3884413242340088, "eval_runtime": 14.7098, "eval_samples_per_second": 81.986, "eval_steps_per_second": 2.583, "step": 856 }, { "epoch": 4.088785046728972, "grad_norm": 59083.99609375, "learning_rate": 3.856707317073171e-05, "loss": 1.3896, "step": 875 }, { "epoch": 4.205607476635514, "grad_norm": 232480.921875, "learning_rate": 3.780487804878049e-05, "loss": 1.3892, "step": 900 }, { "epoch": 4.322429906542056, "grad_norm": 259920.0, "learning_rate": 3.704268292682927e-05, "loss": 1.3894, "step": 925 }, { "epoch": 4.4392523364485985, "grad_norm": 141156.375, "learning_rate": 3.628048780487805e-05, "loss": 1.3894, "step": 950 }, { "epoch": 4.55607476635514, "grad_norm": 160513.3125, "learning_rate": 3.551829268292683e-05, "loss": 1.3835, "step": 975 }, { "epoch": 4.672897196261682, "grad_norm": 165691.796875, "learning_rate": 3.475609756097561e-05, "loss": 1.3853, "step": 1000 }, { "epoch": 4.789719626168225, "grad_norm": 148144.03125, "learning_rate": 3.399390243902439e-05, "loss": 1.3829, "step": 1025 }, { "epoch": 4.906542056074766, "grad_norm": 192222.125, "learning_rate": 3.323170731707317e-05, "loss": 1.3864, "step": 1050 }, { "epoch": 5.0, "eval_accuracy": 0.27611940298507465, "eval_loss": 1.384167194366455, "eval_runtime": 14.7587, "eval_samples_per_second": 81.715, "eval_steps_per_second": 2.575, "step": 1070 }, { "epoch": 5.0233644859813085, "grad_norm": 248749.515625, "learning_rate": 3.246951219512195e-05, "loss": 1.3877, "step": 1075 }, { "epoch": 5.140186915887851, "grad_norm": 126036.1015625, "learning_rate": 3.170731707317073e-05, "loss": 1.3842, "step": 1100 }, { "epoch": 5.257009345794392, "grad_norm": 133823.828125, "learning_rate": 3.094512195121951e-05, "loss": 1.3842, "step": 1125 }, { "epoch": 5.373831775700935, "grad_norm": 150464.3125, "learning_rate": 3.0182926829268294e-05, "loss": 1.3873, "step": 1150 }, { "epoch": 5.490654205607477, "grad_norm": 198916.296875, "learning_rate": 2.9420731707317074e-05, "loss": 1.3853, "step": 1175 }, { "epoch": 5.607476635514018, "grad_norm": 128553.09375, "learning_rate": 2.8658536585365854e-05, "loss": 1.3773, "step": 1200 }, { "epoch": 5.724299065420561, "grad_norm": 121626.6171875, "learning_rate": 2.7896341463414637e-05, "loss": 1.3763, "step": 1225 }, { "epoch": 5.841121495327103, "grad_norm": 126122.3828125, "learning_rate": 2.7134146341463417e-05, "loss": 1.3797, "step": 1250 }, { "epoch": 5.957943925233645, "grad_norm": 167410.4375, "learning_rate": 2.6371951219512197e-05, "loss": 1.3892, "step": 1275 }, { "epoch": 6.0, "eval_accuracy": 0.2877280265339967, "eval_loss": 1.3801555633544922, "eval_runtime": 14.9729, "eval_samples_per_second": 80.545, "eval_steps_per_second": 2.538, "step": 1284 }, { "epoch": 6.074766355140187, "grad_norm": 204724.40625, "learning_rate": 2.5609756097560977e-05, "loss": 1.3823, "step": 1300 }, { "epoch": 6.191588785046729, "grad_norm": 119223.4296875, "learning_rate": 2.4847560975609756e-05, "loss": 1.371, "step": 1325 }, { "epoch": 6.308411214953271, "grad_norm": 141183.390625, "learning_rate": 2.4085365853658536e-05, "loss": 1.3678, "step": 1350 }, { "epoch": 6.425233644859813, "grad_norm": 126927.609375, "learning_rate": 2.332317073170732e-05, "loss": 1.3762, "step": 1375 }, { "epoch": 6.542056074766355, "grad_norm": 60989.0625, "learning_rate": 2.25609756097561e-05, "loss": 1.3783, "step": 1400 }, { "epoch": 6.658878504672897, "grad_norm": 99197.3203125, "learning_rate": 2.179878048780488e-05, "loss": 1.3652, "step": 1425 }, { "epoch": 6.775700934579439, "grad_norm": 94052.6015625, "learning_rate": 2.103658536585366e-05, "loss": 1.3785, "step": 1450 }, { "epoch": 6.892523364485982, "grad_norm": 114499.0703125, "learning_rate": 2.0274390243902442e-05, "loss": 1.369, "step": 1475 }, { "epoch": 7.0, "eval_accuracy": 0.3142620232172471, "eval_loss": 1.3726152181625366, "eval_runtime": 14.9337, "eval_samples_per_second": 80.757, "eval_steps_per_second": 2.545, "step": 1498 }, { "epoch": 7.009345794392523, "grad_norm": 197232.515625, "learning_rate": 1.9512195121951222e-05, "loss": 1.3783, "step": 1500 }, { "epoch": 7.126168224299065, "grad_norm": 143105.90625, "learning_rate": 1.8750000000000002e-05, "loss": 1.3774, "step": 1525 }, { "epoch": 7.242990654205608, "grad_norm": 89834.8125, "learning_rate": 1.798780487804878e-05, "loss": 1.3641, "step": 1550 }, { "epoch": 7.359813084112149, "grad_norm": 140115.375, "learning_rate": 1.722560975609756e-05, "loss": 1.3509, "step": 1575 }, { "epoch": 7.4766355140186915, "grad_norm": 159272.546875, "learning_rate": 1.6463414634146345e-05, "loss": 1.3614, "step": 1600 }, { "epoch": 7.593457943925234, "grad_norm": 77884.375, "learning_rate": 1.5701219512195124e-05, "loss": 1.3649, "step": 1625 }, { "epoch": 7.710280373831775, "grad_norm": 124513.578125, "learning_rate": 1.4939024390243902e-05, "loss": 1.3747, "step": 1650 }, { "epoch": 7.827102803738318, "grad_norm": 152671.921875, "learning_rate": 1.4176829268292682e-05, "loss": 1.3626, "step": 1675 }, { "epoch": 7.94392523364486, "grad_norm": 127847.6640625, "learning_rate": 1.3414634146341466e-05, "loss": 1.3545, "step": 1700 }, { "epoch": 8.0, "eval_accuracy": 0.3275290215588723, "eval_loss": 1.3627326488494873, "eval_runtime": 15.0566, "eval_samples_per_second": 80.098, "eval_steps_per_second": 2.524, "step": 1712 }, { "epoch": 8.060747663551401, "grad_norm": 178315.984375, "learning_rate": 1.2652439024390245e-05, "loss": 1.3554, "step": 1725 }, { "epoch": 8.177570093457945, "grad_norm": 133679.765625, "learning_rate": 1.1890243902439025e-05, "loss": 1.364, "step": 1750 }, { "epoch": 8.294392523364486, "grad_norm": 107949.953125, "learning_rate": 1.1128048780487805e-05, "loss": 1.3625, "step": 1775 }, { "epoch": 8.411214953271028, "grad_norm": 88464.8046875, "learning_rate": 1.0365853658536585e-05, "loss": 1.3447, "step": 1800 }, { "epoch": 8.52803738317757, "grad_norm": 87399.6171875, "learning_rate": 9.603658536585366e-06, "loss": 1.3497, "step": 1825 }, { "epoch": 8.644859813084112, "grad_norm": 99827.875, "learning_rate": 8.841463414634146e-06, "loss": 1.3633, "step": 1850 }, { "epoch": 8.761682242990654, "grad_norm": 136905.25, "learning_rate": 8.079268292682928e-06, "loss": 1.3559, "step": 1875 }, { "epoch": 8.878504672897197, "grad_norm": 92091.828125, "learning_rate": 7.317073170731707e-06, "loss": 1.3568, "step": 1900 }, { "epoch": 8.995327102803738, "grad_norm": 122825.078125, "learning_rate": 6.554878048780488e-06, "loss": 1.3626, "step": 1925 }, { "epoch": 9.0, "eval_accuracy": 0.33913764510779437, "eval_loss": 1.3594319820404053, "eval_runtime": 15.4853, "eval_samples_per_second": 77.881, "eval_steps_per_second": 2.454, "step": 1926 }, { "epoch": 9.11214953271028, "grad_norm": 186335.28125, "learning_rate": 5.792682926829269e-06, "loss": 1.351, "step": 1950 }, { "epoch": 9.228971962616823, "grad_norm": 269060.90625, "learning_rate": 5.030487804878049e-06, "loss": 1.3576, "step": 1975 }, { "epoch": 9.345794392523365, "grad_norm": 141771.921875, "learning_rate": 4.26829268292683e-06, "loss": 1.3473, "step": 2000 }, { "epoch": 9.462616822429906, "grad_norm": 164899.0625, "learning_rate": 3.5060975609756102e-06, "loss": 1.3503, "step": 2025 }, { "epoch": 9.57943925233645, "grad_norm": 198283.234375, "learning_rate": 2.7439024390243905e-06, "loss": 1.3505, "step": 2050 }, { "epoch": 9.69626168224299, "grad_norm": 83706.109375, "learning_rate": 1.9817073170731707e-06, "loss": 1.362, "step": 2075 }, { "epoch": 9.813084112149532, "grad_norm": 101845.78125, "learning_rate": 1.2195121951219514e-06, "loss": 1.3364, "step": 2100 }, { "epoch": 9.929906542056075, "grad_norm": 134814.96875, "learning_rate": 4.573170731707317e-07, "loss": 1.3464, "step": 2125 }, { "epoch": 10.0, "eval_accuracy": 0.3482587064676617, "eval_loss": 1.3534746170043945, "eval_runtime": 15.5044, "eval_samples_per_second": 77.784, "eval_steps_per_second": 2.451, "step": 2140 }, { "epoch": 10.0, "step": 2140, "total_flos": 5.510586115727032e+19, "train_loss": 1.3723564032082245, "train_runtime": 1908.126, "train_samples_per_second": 35.794, "train_steps_per_second": 1.122 } ], "logging_steps": 25, "max_steps": 2140, "num_input_tokens_seen": 0, "num_train_epochs": 10, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 5.510586115727032e+19, "train_batch_size": 32, "trial_name": null, "trial_params": null }