{ "best_metric": 0.9358813166618347, "best_model_checkpoint": "/media/mldrive/kcardenas/limb_classification_person_crop/beit-large-patch16-384/8_1e-5_1e-4_0.15/checkpoint-1926", "epoch": 10.0, "eval_steps": 500, "global_step": 2140, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.11682242990654206, "grad_norm": 1232665.625, "learning_rate": 5.000000000000001e-07, "loss": 1.603, "step": 25 }, { "epoch": 0.2336448598130841, "grad_norm": 1125859.125, "learning_rate": 1.0000000000000002e-06, "loss": 1.5674, "step": 50 }, { "epoch": 0.35046728971962615, "grad_norm": 1237422.625, "learning_rate": 1.5e-06, "loss": 1.5855, "step": 75 }, { "epoch": 0.4672897196261682, "grad_norm": 1008492.875, "learning_rate": 2.0000000000000003e-06, "loss": 1.5179, "step": 100 }, { "epoch": 0.5841121495327103, "grad_norm": 825557.9375, "learning_rate": 2.5e-06, "loss": 1.4596, "step": 125 }, { "epoch": 0.7009345794392523, "grad_norm": 461811.875, "learning_rate": 3e-06, "loss": 1.4276, "step": 150 }, { "epoch": 0.8177570093457944, "grad_norm": 785723.125, "learning_rate": 3.5e-06, "loss": 1.3977, "step": 175 }, { "epoch": 0.9345794392523364, "grad_norm": 589829.1875, "learning_rate": 4.000000000000001e-06, "loss": 1.4012, "step": 200 }, { "epoch": 1.0, "eval_accuracy": 0.2777777777777778, "eval_loss": 1.3970400094985962, "eval_runtime": 15.4524, "eval_samples_per_second": 78.046, "eval_steps_per_second": 2.459, "step": 214 }, { "epoch": 1.0514018691588785, "grad_norm": 396606.21875, "learning_rate": 4.5e-06, "loss": 1.3881, "step": 225 }, { "epoch": 1.1682242990654206, "grad_norm": 353690.125, "learning_rate": 5e-06, "loss": 1.3671, "step": 250 }, { "epoch": 1.2850467289719627, "grad_norm": 419103.875, "learning_rate": 5.500000000000001e-06, "loss": 1.3582, "step": 275 }, { "epoch": 1.4018691588785046, "grad_norm": 374088.375, "learning_rate": 6e-06, "loss": 1.3592, "step": 300 }, { "epoch": 1.5186915887850467, "grad_norm": 600778.9375, "learning_rate": 6.5000000000000004e-06, "loss": 1.3563, "step": 325 }, { "epoch": 1.6355140186915889, "grad_norm": 5453174.5, "learning_rate": 7e-06, "loss": 1.3424, "step": 350 }, { "epoch": 1.7523364485981308, "grad_norm": 657756.375, "learning_rate": 7.500000000000001e-06, "loss": 1.355, "step": 375 }, { "epoch": 1.8691588785046729, "grad_norm": 560229.8125, "learning_rate": 8.000000000000001e-06, "loss": 1.3133, "step": 400 }, { "epoch": 1.985981308411215, "grad_norm": 596723.375, "learning_rate": 8.5e-06, "loss": 1.2956, "step": 425 }, { "epoch": 2.0, "eval_accuracy": 0.4709784411276949, "eval_loss": 1.2437535524368286, "eval_runtime": 15.2261, "eval_samples_per_second": 79.206, "eval_steps_per_second": 2.496, "step": 428 }, { "epoch": 2.102803738317757, "grad_norm": 661517.125, "learning_rate": 9e-06, "loss": 1.278, "step": 450 }, { "epoch": 2.2196261682242993, "grad_norm": 499854.875, "learning_rate": 9.5e-06, "loss": 1.2474, "step": 475 }, { "epoch": 2.336448598130841, "grad_norm": 353308.375, "learning_rate": 1e-05, "loss": 1.2492, "step": 500 }, { "epoch": 2.453271028037383, "grad_norm": 568008.8125, "learning_rate": 9.847560975609756e-06, "loss": 1.2333, "step": 525 }, { "epoch": 2.5700934579439254, "grad_norm": 377403.0625, "learning_rate": 9.695121951219513e-06, "loss": 1.2086, "step": 550 }, { "epoch": 2.6869158878504673, "grad_norm": 376984.78125, "learning_rate": 9.542682926829268e-06, "loss": 1.2084, "step": 575 }, { "epoch": 2.803738317757009, "grad_norm": 424205.8125, "learning_rate": 9.390243902439025e-06, "loss": 1.2112, "step": 600 }, { "epoch": 2.9205607476635516, "grad_norm": 518474.75, "learning_rate": 9.237804878048782e-06, "loss": 1.1902, "step": 625 }, { "epoch": 3.0, "eval_accuracy": 0.5895522388059702, "eval_loss": 1.0810370445251465, "eval_runtime": 14.8541, "eval_samples_per_second": 81.19, "eval_steps_per_second": 2.558, "step": 642 }, { "epoch": 3.0373831775700935, "grad_norm": 363418.0625, "learning_rate": 9.085365853658538e-06, "loss": 1.1903, "step": 650 }, { "epoch": 3.1542056074766354, "grad_norm": 472692.21875, "learning_rate": 8.932926829268293e-06, "loss": 1.1369, "step": 675 }, { "epoch": 3.2710280373831777, "grad_norm": 389273.5, "learning_rate": 8.78048780487805e-06, "loss": 1.1591, "step": 700 }, { "epoch": 3.3878504672897196, "grad_norm": 536079.4375, "learning_rate": 8.628048780487805e-06, "loss": 1.1742, "step": 725 }, { "epoch": 3.5046728971962615, "grad_norm": 369198.15625, "learning_rate": 8.475609756097562e-06, "loss": 1.157, "step": 750 }, { "epoch": 3.621495327102804, "grad_norm": 459633.09375, "learning_rate": 8.323170731707317e-06, "loss": 1.1494, "step": 775 }, { "epoch": 3.7383177570093458, "grad_norm": 457643.6875, "learning_rate": 8.170731707317073e-06, "loss": 1.1559, "step": 800 }, { "epoch": 3.8551401869158877, "grad_norm": 459124.59375, "learning_rate": 8.01829268292683e-06, "loss": 1.1376, "step": 825 }, { "epoch": 3.97196261682243, "grad_norm": 416160.96875, "learning_rate": 7.865853658536587e-06, "loss": 1.1316, "step": 850 }, { "epoch": 4.0, "eval_accuracy": 0.6144278606965174, "eval_loss": 1.085984230041504, "eval_runtime": 15.259, "eval_samples_per_second": 79.035, "eval_steps_per_second": 2.49, "step": 856 }, { "epoch": 4.088785046728972, "grad_norm": 415968.6875, "learning_rate": 7.713414634146342e-06, "loss": 1.1317, "step": 875 }, { "epoch": 4.205607476635514, "grad_norm": 417537.21875, "learning_rate": 7.560975609756098e-06, "loss": 1.1403, "step": 900 }, { "epoch": 4.322429906542056, "grad_norm": 560744.375, "learning_rate": 7.408536585365854e-06, "loss": 1.1016, "step": 925 }, { "epoch": 4.4392523364485985, "grad_norm": 597802.5, "learning_rate": 7.25609756097561e-06, "loss": 1.0959, "step": 950 }, { "epoch": 4.55607476635514, "grad_norm": 442672.09375, "learning_rate": 7.1036585365853665e-06, "loss": 1.103, "step": 975 }, { "epoch": 4.672897196261682, "grad_norm": 427007.0625, "learning_rate": 6.951219512195122e-06, "loss": 1.092, "step": 1000 }, { "epoch": 4.789719626168225, "grad_norm": 545753.3125, "learning_rate": 6.798780487804879e-06, "loss": 1.0949, "step": 1025 }, { "epoch": 4.906542056074766, "grad_norm": 332691.28125, "learning_rate": 6.646341463414635e-06, "loss": 1.1133, "step": 1050 }, { "epoch": 5.0, "eval_accuracy": 0.6475953565505804, "eval_loss": 1.0152363777160645, "eval_runtime": 14.8608, "eval_samples_per_second": 81.153, "eval_steps_per_second": 2.557, "step": 1070 }, { "epoch": 5.0233644859813085, "grad_norm": 608418.625, "learning_rate": 6.493902439024391e-06, "loss": 1.1168, "step": 1075 }, { "epoch": 5.140186915887851, "grad_norm": 500188.65625, "learning_rate": 6.341463414634147e-06, "loss": 1.0857, "step": 1100 }, { "epoch": 5.257009345794392, "grad_norm": 472268.6875, "learning_rate": 6.189024390243903e-06, "loss": 1.0784, "step": 1125 }, { "epoch": 5.373831775700935, "grad_norm": 512482.09375, "learning_rate": 6.0365853658536585e-06, "loss": 1.0912, "step": 1150 }, { "epoch": 5.490654205607477, "grad_norm": 553851.8125, "learning_rate": 5.884146341463415e-06, "loss": 1.0615, "step": 1175 }, { "epoch": 5.607476635514018, "grad_norm": 362454.03125, "learning_rate": 5.731707317073171e-06, "loss": 1.0757, "step": 1200 }, { "epoch": 5.724299065420561, "grad_norm": 439604.0625, "learning_rate": 5.579268292682928e-06, "loss": 1.1072, "step": 1225 }, { "epoch": 5.841121495327103, "grad_norm": 363162.625, "learning_rate": 5.426829268292684e-06, "loss": 1.0625, "step": 1250 }, { "epoch": 5.957943925233645, "grad_norm": 481210.59375, "learning_rate": 5.27439024390244e-06, "loss": 1.0626, "step": 1275 }, { "epoch": 6.0, "eval_accuracy": 0.6890547263681592, "eval_loss": 0.9663113355636597, "eval_runtime": 15.345, "eval_samples_per_second": 78.592, "eval_steps_per_second": 2.476, "step": 1284 }, { "epoch": 6.074766355140187, "grad_norm": 537217.875, "learning_rate": 5.121951219512195e-06, "loss": 1.0343, "step": 1300 }, { "epoch": 6.191588785046729, "grad_norm": 622598.75, "learning_rate": 4.9695121951219515e-06, "loss": 1.0544, "step": 1325 }, { "epoch": 6.308411214953271, "grad_norm": 461880.90625, "learning_rate": 4.817073170731708e-06, "loss": 1.0571, "step": 1350 }, { "epoch": 6.425233644859813, "grad_norm": 483674.78125, "learning_rate": 4.664634146341464e-06, "loss": 1.0273, "step": 1375 }, { "epoch": 6.542056074766355, "grad_norm": 507309.1875, "learning_rate": 4.51219512195122e-06, "loss": 1.0243, "step": 1400 }, { "epoch": 6.658878504672897, "grad_norm": 572243.0, "learning_rate": 4.359756097560976e-06, "loss": 1.0237, "step": 1425 }, { "epoch": 6.775700934579439, "grad_norm": 447492.46875, "learning_rate": 4.207317073170732e-06, "loss": 1.0387, "step": 1450 }, { "epoch": 6.892523364485982, "grad_norm": 471134.8125, "learning_rate": 4.054878048780488e-06, "loss": 1.0039, "step": 1475 }, { "epoch": 7.0, "eval_accuracy": 0.6923714759535655, "eval_loss": 0.9811031818389893, "eval_runtime": 14.9676, "eval_samples_per_second": 80.574, "eval_steps_per_second": 2.539, "step": 1498 }, { "epoch": 7.009345794392523, "grad_norm": 484021.0, "learning_rate": 3.902439024390244e-06, "loss": 1.0382, "step": 1500 }, { "epoch": 7.126168224299065, "grad_norm": 545706.5, "learning_rate": 3.7500000000000005e-06, "loss": 1.0079, "step": 1525 }, { "epoch": 7.242990654205608, "grad_norm": 385661.21875, "learning_rate": 3.5975609756097562e-06, "loss": 1.0593, "step": 1550 }, { "epoch": 7.359813084112149, "grad_norm": 389834.625, "learning_rate": 3.4451219512195124e-06, "loss": 1.0175, "step": 1575 }, { "epoch": 7.4766355140186915, "grad_norm": 565496.25, "learning_rate": 3.292682926829269e-06, "loss": 1.0171, "step": 1600 }, { "epoch": 7.593457943925234, "grad_norm": 588504.4375, "learning_rate": 3.1402439024390246e-06, "loss": 1.0401, "step": 1625 }, { "epoch": 7.710280373831775, "grad_norm": 439602.84375, "learning_rate": 2.9878048780487808e-06, "loss": 1.0515, "step": 1650 }, { "epoch": 7.827102803738318, "grad_norm": 750778.125, "learning_rate": 2.8353658536585365e-06, "loss": 1.0046, "step": 1675 }, { "epoch": 7.94392523364486, "grad_norm": 399670.3125, "learning_rate": 2.682926829268293e-06, "loss": 1.0062, "step": 1700 }, { "epoch": 8.0, "eval_accuracy": 0.7205638474295191, "eval_loss": 0.9383352398872375, "eval_runtime": 15.5518, "eval_samples_per_second": 77.547, "eval_steps_per_second": 2.443, "step": 1712 }, { "epoch": 8.060747663551401, "grad_norm": 632884.5, "learning_rate": 2.530487804878049e-06, "loss": 0.9951, "step": 1725 }, { "epoch": 8.177570093457945, "grad_norm": 539285.375, "learning_rate": 2.378048780487805e-06, "loss": 0.9834, "step": 1750 }, { "epoch": 8.294392523364486, "grad_norm": 564263.875, "learning_rate": 2.225609756097561e-06, "loss": 1.0018, "step": 1775 }, { "epoch": 8.411214953271028, "grad_norm": 510646.3125, "learning_rate": 2.073170731707317e-06, "loss": 0.9775, "step": 1800 }, { "epoch": 8.52803738317757, "grad_norm": 443133.09375, "learning_rate": 1.9207317073170733e-06, "loss": 1.0046, "step": 1825 }, { "epoch": 8.644859813084112, "grad_norm": 459672.0625, "learning_rate": 1.7682926829268294e-06, "loss": 0.9931, "step": 1850 }, { "epoch": 8.761682242990654, "grad_norm": 494908.71875, "learning_rate": 1.6158536585365855e-06, "loss": 0.9988, "step": 1875 }, { "epoch": 8.878504672897197, "grad_norm": 472669.21875, "learning_rate": 1.4634146341463414e-06, "loss": 0.9798, "step": 1900 }, { "epoch": 8.995327102803738, "grad_norm": 473329.9375, "learning_rate": 1.3109756097560978e-06, "loss": 0.9948, "step": 1925 }, { "epoch": 9.0, "eval_accuracy": 0.7131011608623549, "eval_loss": 0.9358813166618347, "eval_runtime": 15.7094, "eval_samples_per_second": 76.769, "eval_steps_per_second": 2.419, "step": 1926 }, { "epoch": 9.11214953271028, "grad_norm": 530739.5625, "learning_rate": 1.158536585365854e-06, "loss": 0.966, "step": 1950 }, { "epoch": 9.228971962616823, "grad_norm": 483324.6875, "learning_rate": 1.0060975609756098e-06, "loss": 1.015, "step": 1975 }, { "epoch": 9.345794392523365, "grad_norm": 518854.625, "learning_rate": 8.53658536585366e-07, "loss": 0.985, "step": 2000 }, { "epoch": 9.462616822429906, "grad_norm": 473431.46875, "learning_rate": 7.012195121951221e-07, "loss": 0.9399, "step": 2025 }, { "epoch": 9.57943925233645, "grad_norm": 576602.0625, "learning_rate": 5.487804878048781e-07, "loss": 1.0286, "step": 2050 }, { "epoch": 9.69626168224299, "grad_norm": 585901.25, "learning_rate": 3.963414634146342e-07, "loss": 1.0004, "step": 2075 }, { "epoch": 9.813084112149532, "grad_norm": 543298.6875, "learning_rate": 2.439024390243903e-07, "loss": 0.9931, "step": 2100 }, { "epoch": 9.929906542056075, "grad_norm": 524303.25, "learning_rate": 9.146341463414634e-08, "loss": 0.9825, "step": 2125 }, { "epoch": 10.0, "eval_accuracy": 0.7072968490878938, "eval_loss": 0.9387073516845703, "eval_runtime": 15.6915, "eval_samples_per_second": 76.857, "eval_steps_per_second": 2.422, "step": 2140 }, { "epoch": 10.0, "step": 2140, "total_flos": 5.510586115727032e+19, "train_loss": 1.1433691055975228, "train_runtime": 1939.6937, "train_samples_per_second": 35.212, "train_steps_per_second": 1.103 } ], "logging_steps": 25, "max_steps": 2140, "num_input_tokens_seen": 0, "num_train_epochs": 10, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 5.510586115727032e+19, "train_batch_size": 32, "trial_name": null, "trial_params": null }