{ "best_metric": 0.8748477697372437, "best_model_checkpoint": "/media/mldrive/kcardenas/limb_classification_person_crop/beit-large-patch16-384/8_1e-5_1e-4_0.1/checkpoint-1926", "epoch": 10.0, "eval_steps": 500, "global_step": 2140, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.11682242990654206, "grad_norm": 1288282.125, "learning_rate": 5.000000000000001e-07, "loss": 1.6003, "step": 25 }, { "epoch": 0.2336448598130841, "grad_norm": 1179080.75, "learning_rate": 1.0000000000000002e-06, "loss": 1.5656, "step": 50 }, { "epoch": 0.35046728971962615, "grad_norm": 1302664.0, "learning_rate": 1.5e-06, "loss": 1.5881, "step": 75 }, { "epoch": 0.4672897196261682, "grad_norm": 1064078.25, "learning_rate": 2.0000000000000003e-06, "loss": 1.5212, "step": 100 }, { "epoch": 0.5841121495327103, "grad_norm": 891409.25, "learning_rate": 2.5e-06, "loss": 1.4607, "step": 125 }, { "epoch": 0.7009345794392523, "grad_norm": 506389.46875, "learning_rate": 3e-06, "loss": 1.4285, "step": 150 }, { "epoch": 0.8177570093457944, "grad_norm": 858052.0, "learning_rate": 3.5e-06, "loss": 1.3972, "step": 175 }, { "epoch": 0.9345794392523364, "grad_norm": 647255.5, "learning_rate": 4.000000000000001e-06, "loss": 1.4023, "step": 200 }, { "epoch": 1.0, "eval_accuracy": 0.2744610281923715, "eval_loss": 1.3974584341049194, "eval_runtime": 15.3016, "eval_samples_per_second": 78.815, "eval_steps_per_second": 2.483, "step": 214 }, { "epoch": 1.0514018691588785, "grad_norm": 432902.65625, "learning_rate": 4.5e-06, "loss": 1.3877, "step": 225 }, { "epoch": 1.1682242990654206, "grad_norm": 387082.78125, "learning_rate": 5e-06, "loss": 1.3642, "step": 250 }, { "epoch": 1.2850467289719627, "grad_norm": 460995.21875, "learning_rate": 5.500000000000001e-06, "loss": 1.3537, "step": 275 }, { "epoch": 1.4018691588785046, "grad_norm": 426754.125, "learning_rate": 6e-06, "loss": 1.3536, "step": 300 }, { "epoch": 1.5186915887850467, "grad_norm": 689839.1875, "learning_rate": 6.5000000000000004e-06, "loss": 1.3486, "step": 325 }, { "epoch": 1.6355140186915889, "grad_norm": 525583.1875, "learning_rate": 7e-06, "loss": 1.3369, "step": 350 }, { "epoch": 1.7523364485981308, "grad_norm": 458157.90625, "learning_rate": 7.500000000000001e-06, "loss": 1.3238, "step": 375 }, { "epoch": 1.8691588785046729, "grad_norm": 716735.5, "learning_rate": 8.000000000000001e-06, "loss": 1.2738, "step": 400 }, { "epoch": 1.985981308411215, "grad_norm": 547995.0625, "learning_rate": 8.5e-06, "loss": 1.2554, "step": 425 }, { "epoch": 2.0, "eval_accuracy": 0.49087893864013266, "eval_loss": 1.1979795694351196, "eval_runtime": 15.2164, "eval_samples_per_second": 79.257, "eval_steps_per_second": 2.497, "step": 428 }, { "epoch": 2.102803738317757, "grad_norm": 715070.1875, "learning_rate": 9e-06, "loss": 1.2346, "step": 450 }, { "epoch": 2.2196261682242993, "grad_norm": 568589.125, "learning_rate": 9.5e-06, "loss": 1.2079, "step": 475 }, { "epoch": 2.336448598130841, "grad_norm": 374537.625, "learning_rate": 1e-05, "loss": 1.2209, "step": 500 }, { "epoch": 2.453271028037383, "grad_norm": 539741.6875, "learning_rate": 9.847560975609756e-06, "loss": 1.2072, "step": 525 }, { "epoch": 2.5700934579439254, "grad_norm": 434154.53125, "learning_rate": 9.695121951219513e-06, "loss": 1.1796, "step": 550 }, { "epoch": 2.6869158878504673, "grad_norm": 416858.96875, "learning_rate": 9.542682926829268e-06, "loss": 1.182, "step": 575 }, { "epoch": 2.803738317757009, "grad_norm": 434543.6875, "learning_rate": 9.390243902439025e-06, "loss": 1.1822, "step": 600 }, { "epoch": 2.9205607476635516, "grad_norm": 560064.375, "learning_rate": 9.237804878048782e-06, "loss": 1.1636, "step": 625 }, { "epoch": 3.0, "eval_accuracy": 0.5978441127694859, "eval_loss": 1.027214765548706, "eval_runtime": 15.3221, "eval_samples_per_second": 78.71, "eval_steps_per_second": 2.48, "step": 642 }, { "epoch": 3.0373831775700935, "grad_norm": 413180.0, "learning_rate": 9.085365853658538e-06, "loss": 1.1574, "step": 650 }, { "epoch": 3.1542056074766354, "grad_norm": 562978.6875, "learning_rate": 8.932926829268293e-06, "loss": 1.0971, "step": 675 }, { "epoch": 3.2710280373831777, "grad_norm": 429750.75, "learning_rate": 8.78048780487805e-06, "loss": 1.1231, "step": 700 }, { "epoch": 3.3878504672897196, "grad_norm": 592197.9375, "learning_rate": 8.628048780487805e-06, "loss": 1.1382, "step": 725 }, { "epoch": 3.5046728971962615, "grad_norm": 402836.8125, "learning_rate": 8.475609756097562e-06, "loss": 1.1188, "step": 750 }, { "epoch": 3.621495327102804, "grad_norm": 506094.375, "learning_rate": 8.323170731707317e-06, "loss": 1.1118, "step": 775 }, { "epoch": 3.7383177570093458, "grad_norm": 520306.875, "learning_rate": 8.170731707317073e-06, "loss": 1.1209, "step": 800 }, { "epoch": 3.8551401869158877, "grad_norm": 517659.96875, "learning_rate": 8.01829268292683e-06, "loss": 1.0998, "step": 825 }, { "epoch": 3.97196261682243, "grad_norm": 463775.65625, "learning_rate": 7.865853658536587e-06, "loss": 1.0988, "step": 850 }, { "epoch": 4.0, "eval_accuracy": 0.5953565505804311, "eval_loss": 1.0476573705673218, "eval_runtime": 15.1491, "eval_samples_per_second": 79.609, "eval_steps_per_second": 2.508, "step": 856 }, { "epoch": 4.088785046728972, "grad_norm": 501856.03125, "learning_rate": 7.713414634146342e-06, "loss": 1.0932, "step": 875 }, { "epoch": 4.205607476635514, "grad_norm": 517486.46875, "learning_rate": 7.560975609756098e-06, "loss": 1.103, "step": 900 }, { "epoch": 4.322429906542056, "grad_norm": 595251.0, "learning_rate": 7.408536585365854e-06, "loss": 1.0561, "step": 925 }, { "epoch": 4.4392523364485985, "grad_norm": 681244.0625, "learning_rate": 7.25609756097561e-06, "loss": 1.0518, "step": 950 }, { "epoch": 4.55607476635514, "grad_norm": 482226.96875, "learning_rate": 7.1036585365853665e-06, "loss": 1.0594, "step": 975 }, { "epoch": 4.672897196261682, "grad_norm": 491086.5625, "learning_rate": 6.951219512195122e-06, "loss": 1.0495, "step": 1000 }, { "epoch": 4.789719626168225, "grad_norm": 682658.25, "learning_rate": 6.798780487804879e-06, "loss": 1.05, "step": 1025 }, { "epoch": 4.906542056074766, "grad_norm": 371352.9375, "learning_rate": 6.646341463414635e-06, "loss": 1.068, "step": 1050 }, { "epoch": 5.0, "eval_accuracy": 0.6592039800995025, "eval_loss": 0.9599226713180542, "eval_runtime": 15.1792, "eval_samples_per_second": 79.451, "eval_steps_per_second": 2.503, "step": 1070 }, { "epoch": 5.0233644859813085, "grad_norm": 737796.0625, "learning_rate": 6.493902439024391e-06, "loss": 1.0747, "step": 1075 }, { "epoch": 5.140186915887851, "grad_norm": 544087.5625, "learning_rate": 6.341463414634147e-06, "loss": 1.042, "step": 1100 }, { "epoch": 5.257009345794392, "grad_norm": 505016.96875, "learning_rate": 6.189024390243903e-06, "loss": 1.0356, "step": 1125 }, { "epoch": 5.373831775700935, "grad_norm": 552820.9375, "learning_rate": 6.0365853658536585e-06, "loss": 1.0499, "step": 1150 }, { "epoch": 5.490654205607477, "grad_norm": 599422.0625, "learning_rate": 5.884146341463415e-06, "loss": 1.0164, "step": 1175 }, { "epoch": 5.607476635514018, "grad_norm": 393778.09375, "learning_rate": 5.731707317073171e-06, "loss": 1.0345, "step": 1200 }, { "epoch": 5.724299065420561, "grad_norm": 477965.125, "learning_rate": 5.579268292682928e-06, "loss": 1.0708, "step": 1225 }, { "epoch": 5.841121495327103, "grad_norm": 424191.53125, "learning_rate": 5.426829268292684e-06, "loss": 1.0142, "step": 1250 }, { "epoch": 5.957943925233645, "grad_norm": 540424.5625, "learning_rate": 5.27439024390244e-06, "loss": 1.0159, "step": 1275 }, { "epoch": 6.0, "eval_accuracy": 0.6807628524046434, "eval_loss": 0.9090853333473206, "eval_runtime": 15.2965, "eval_samples_per_second": 78.842, "eval_steps_per_second": 2.484, "step": 1284 }, { "epoch": 6.074766355140187, "grad_norm": 677101.5, "learning_rate": 5.121951219512195e-06, "loss": 0.9873, "step": 1300 }, { "epoch": 6.191588785046729, "grad_norm": 694898.8125, "learning_rate": 4.9695121951219515e-06, "loss": 1.0045, "step": 1325 }, { "epoch": 6.308411214953271, "grad_norm": 514024.75, "learning_rate": 4.817073170731708e-06, "loss": 1.0045, "step": 1350 }, { "epoch": 6.425233644859813, "grad_norm": 590044.625, "learning_rate": 4.664634146341464e-06, "loss": 0.9731, "step": 1375 }, { "epoch": 6.542056074766355, "grad_norm": 611512.4375, "learning_rate": 4.51219512195122e-06, "loss": 0.9752, "step": 1400 }, { "epoch": 6.658878504672897, "grad_norm": 688492.6875, "learning_rate": 4.359756097560976e-06, "loss": 0.9653, "step": 1425 }, { "epoch": 6.775700934579439, "grad_norm": 507507.28125, "learning_rate": 4.207317073170732e-06, "loss": 0.9916, "step": 1450 }, { "epoch": 6.892523364485982, "grad_norm": 526057.875, "learning_rate": 4.054878048780488e-06, "loss": 0.9484, "step": 1475 }, { "epoch": 7.0, "eval_accuracy": 0.6890547263681592, "eval_loss": 0.9250107407569885, "eval_runtime": 15.1589, "eval_samples_per_second": 79.557, "eval_steps_per_second": 2.507, "step": 1498 }, { "epoch": 7.009345794392523, "grad_norm": 584858.375, "learning_rate": 3.902439024390244e-06, "loss": 0.9819, "step": 1500 }, { "epoch": 7.126168224299065, "grad_norm": 642905.0, "learning_rate": 3.7500000000000005e-06, "loss": 0.9513, "step": 1525 }, { "epoch": 7.242990654205608, "grad_norm": 444722.8125, "learning_rate": 3.5975609756097562e-06, "loss": 1.0087, "step": 1550 }, { "epoch": 7.359813084112149, "grad_norm": 418072.0, "learning_rate": 3.4451219512195124e-06, "loss": 0.9674, "step": 1575 }, { "epoch": 7.4766355140186915, "grad_norm": 616286.125, "learning_rate": 3.292682926829269e-06, "loss": 0.9598, "step": 1600 }, { "epoch": 7.593457943925234, "grad_norm": 660370.9375, "learning_rate": 3.1402439024390246e-06, "loss": 0.9899, "step": 1625 }, { "epoch": 7.710280373831775, "grad_norm": 474009.0, "learning_rate": 2.9878048780487808e-06, "loss": 0.9953, "step": 1650 }, { "epoch": 7.827102803738318, "grad_norm": 893681.125, "learning_rate": 2.8353658536585365e-06, "loss": 0.9445, "step": 1675 }, { "epoch": 7.94392523364486, "grad_norm": 500253.84375, "learning_rate": 2.682926829268293e-06, "loss": 0.9464, "step": 1700 }, { "epoch": 8.0, "eval_accuracy": 0.714759535655058, "eval_loss": 0.8801141977310181, "eval_runtime": 14.8458, "eval_samples_per_second": 81.235, "eval_steps_per_second": 2.56, "step": 1712 }, { "epoch": 8.060747663551401, "grad_norm": 714507.3125, "learning_rate": 2.530487804878049e-06, "loss": 0.941, "step": 1725 }, { "epoch": 8.177570093457945, "grad_norm": 655121.4375, "learning_rate": 2.378048780487805e-06, "loss": 0.9218, "step": 1750 }, { "epoch": 8.294392523364486, "grad_norm": 656624.75, "learning_rate": 2.225609756097561e-06, "loss": 0.9422, "step": 1775 }, { "epoch": 8.411214953271028, "grad_norm": 643071.5, "learning_rate": 2.073170731707317e-06, "loss": 0.9206, "step": 1800 }, { "epoch": 8.52803738317757, "grad_norm": 470872.96875, "learning_rate": 1.9207317073170733e-06, "loss": 0.9458, "step": 1825 }, { "epoch": 8.644859813084112, "grad_norm": 497745.5, "learning_rate": 1.7682926829268294e-06, "loss": 0.9301, "step": 1850 }, { "epoch": 8.761682242990654, "grad_norm": 596386.875, "learning_rate": 1.6158536585365855e-06, "loss": 0.9394, "step": 1875 }, { "epoch": 8.878504672897197, "grad_norm": 537784.75, "learning_rate": 1.4634146341463414e-06, "loss": 0.9246, "step": 1900 }, { "epoch": 8.995327102803738, "grad_norm": 554460.5625, "learning_rate": 1.3109756097560978e-06, "loss": 0.9361, "step": 1925 }, { "epoch": 9.0, "eval_accuracy": 0.7155887230514096, "eval_loss": 0.8748477697372437, "eval_runtime": 15.4428, "eval_samples_per_second": 78.094, "eval_steps_per_second": 2.461, "step": 1926 }, { "epoch": 9.11214953271028, "grad_norm": 641775.6875, "learning_rate": 1.158536585365854e-06, "loss": 0.9021, "step": 1950 }, { "epoch": 9.228971962616823, "grad_norm": 552990.4375, "learning_rate": 1.0060975609756098e-06, "loss": 0.9592, "step": 1975 }, { "epoch": 9.345794392523365, "grad_norm": 560462.5, "learning_rate": 8.53658536585366e-07, "loss": 0.924, "step": 2000 }, { "epoch": 9.462616822429906, "grad_norm": 517514.09375, "learning_rate": 7.012195121951221e-07, "loss": 0.8702, "step": 2025 }, { "epoch": 9.57943925233645, "grad_norm": 668150.0, "learning_rate": 5.487804878048781e-07, "loss": 0.9793, "step": 2050 }, { "epoch": 9.69626168224299, "grad_norm": 738988.9375, "learning_rate": 3.963414634146342e-07, "loss": 0.9452, "step": 2075 }, { "epoch": 9.813084112149532, "grad_norm": 622479.125, "learning_rate": 2.439024390243903e-07, "loss": 0.9314, "step": 2100 }, { "epoch": 9.929906542056075, "grad_norm": 633154.1875, "learning_rate": 9.146341463414634e-08, "loss": 0.919, "step": 2125 }, { "epoch": 10.0, "eval_accuracy": 0.7056384742951907, "eval_loss": 0.8807059526443481, "eval_runtime": 15.7901, "eval_samples_per_second": 76.377, "eval_steps_per_second": 2.407, "step": 2140 }, { "epoch": 10.0, "step": 2140, "total_flos": 5.510586115727032e+19, "train_loss": 1.103826135118431, "train_runtime": 1935.7344, "train_samples_per_second": 35.284, "train_steps_per_second": 1.106 } ], "logging_steps": 25, "max_steps": 2140, "num_input_tokens_seen": 0, "num_train_epochs": 10, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 5.510586115727032e+19, "train_batch_size": 32, "trial_name": null, "trial_params": null }