{ "best_metric": 0.87679523229599, "best_model_checkpoint": "/media/mldrive/kcardenas/limb_classification_person_crop/beit-large-patch16-384/8_1e-4_5e-4_0.15/checkpoint-2140", "epoch": 10.0, "eval_steps": 500, "global_step": 2140, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.11682242990654206, "grad_norm": 1159652.375, "learning_rate": 5e-06, "loss": 1.5842, "step": 25 }, { "epoch": 0.2336448598130841, "grad_norm": 665793.625, "learning_rate": 1e-05, "loss": 1.4698, "step": 50 }, { "epoch": 0.35046728971962615, "grad_norm": 641848.25, "learning_rate": 1.5e-05, "loss": 1.4099, "step": 75 }, { "epoch": 0.4672897196261682, "grad_norm": 361838.5625, "learning_rate": 2e-05, "loss": 1.4008, "step": 100 }, { "epoch": 0.5841121495327103, "grad_norm": 444083.375, "learning_rate": 2.5e-05, "loss": 1.3763, "step": 125 }, { "epoch": 0.7009345794392523, "grad_norm": 672154.375, "learning_rate": 3e-05, "loss": 1.3726, "step": 150 }, { "epoch": 0.8177570093457944, "grad_norm": 3515615.75, "learning_rate": 3.5e-05, "loss": 1.3777, "step": 175 }, { "epoch": 0.9345794392523364, "grad_norm": 1527419.75, "learning_rate": 4e-05, "loss": 1.3768, "step": 200 }, { "epoch": 1.0, "eval_accuracy": 0.2769485903814262, "eval_loss": 1.456185221672058, "eval_runtime": 15.2495, "eval_samples_per_second": 79.084, "eval_steps_per_second": 2.492, "step": 214 }, { "epoch": 1.0514018691588785, "grad_norm": 375564.53125, "learning_rate": 4.5e-05, "loss": 1.3807, "step": 225 }, { "epoch": 1.1682242990654206, "grad_norm": 285680.1875, "learning_rate": 5e-05, "loss": 1.367, "step": 250 }, { "epoch": 1.2850467289719627, "grad_norm": 428490.6875, "learning_rate": 5.500000000000001e-05, "loss": 1.3338, "step": 275 }, { "epoch": 1.4018691588785046, "grad_norm": 184152.671875, "learning_rate": 6e-05, "loss": 1.3453, "step": 300 }, { "epoch": 1.5186915887850467, "grad_norm": 180530.9375, "learning_rate": 6.500000000000001e-05, "loss": 1.2934, "step": 325 }, { "epoch": 1.6355140186915889, "grad_norm": 219887.5, "learning_rate": 7e-05, "loss": 1.3062, "step": 350 }, { "epoch": 1.7523364485981308, "grad_norm": 325509.625, "learning_rate": 7.500000000000001e-05, "loss": 1.2702, "step": 375 }, { "epoch": 1.8691588785046729, "grad_norm": 285242.25, "learning_rate": 8e-05, "loss": 1.2373, "step": 400 }, { "epoch": 1.985981308411215, "grad_norm": 233574.765625, "learning_rate": 8.5e-05, "loss": 1.2642, "step": 425 }, { "epoch": 2.0, "eval_accuracy": 0.5439469320066335, "eval_loss": 1.149614930152893, "eval_runtime": 15.5858, "eval_samples_per_second": 77.378, "eval_steps_per_second": 2.438, "step": 428 }, { "epoch": 2.102803738317757, "grad_norm": 361034.4375, "learning_rate": 9e-05, "loss": 1.2327, "step": 450 }, { "epoch": 2.2196261682242993, "grad_norm": 268217.28125, "learning_rate": 9.5e-05, "loss": 1.2506, "step": 475 }, { "epoch": 2.336448598130841, "grad_norm": 183026.546875, "learning_rate": 0.0001, "loss": 1.2404, "step": 500 }, { "epoch": 2.453271028037383, "grad_norm": 292976.4375, "learning_rate": 9.847560975609756e-05, "loss": 1.2289, "step": 525 }, { "epoch": 2.5700934579439254, "grad_norm": 165711.53125, "learning_rate": 9.695121951219512e-05, "loss": 1.2293, "step": 550 }, { "epoch": 2.6869158878504673, "grad_norm": 171990.359375, "learning_rate": 9.542682926829268e-05, "loss": 1.2409, "step": 575 }, { "epoch": 2.803738317757009, "grad_norm": 192889.6875, "learning_rate": 9.390243902439024e-05, "loss": 1.2113, "step": 600 }, { "epoch": 2.9205607476635516, "grad_norm": 192437.53125, "learning_rate": 9.237804878048782e-05, "loss": 1.2246, "step": 625 }, { "epoch": 3.0, "eval_accuracy": 0.6351575456053068, "eval_loss": 1.0498127937316895, "eval_runtime": 14.89, "eval_samples_per_second": 80.994, "eval_steps_per_second": 2.552, "step": 642 }, { "epoch": 3.0373831775700935, "grad_norm": 164391.703125, "learning_rate": 9.085365853658538e-05, "loss": 1.1982, "step": 650 }, { "epoch": 3.1542056074766354, "grad_norm": 165748.15625, "learning_rate": 8.932926829268294e-05, "loss": 1.1914, "step": 675 }, { "epoch": 3.2710280373831777, "grad_norm": 208878.25, "learning_rate": 8.78048780487805e-05, "loss": 1.1913, "step": 700 }, { "epoch": 3.3878504672897196, "grad_norm": 203566.75, "learning_rate": 8.628048780487805e-05, "loss": 1.194, "step": 725 }, { "epoch": 3.5046728971962615, "grad_norm": 148600.484375, "learning_rate": 8.475609756097561e-05, "loss": 1.1976, "step": 750 }, { "epoch": 3.621495327102804, "grad_norm": 173605.671875, "learning_rate": 8.323170731707317e-05, "loss": 1.1612, "step": 775 }, { "epoch": 3.7383177570093458, "grad_norm": 163473.796875, "learning_rate": 8.170731707317073e-05, "loss": 1.1965, "step": 800 }, { "epoch": 3.8551401869158877, "grad_norm": 173422.8125, "learning_rate": 8.018292682926831e-05, "loss": 1.1541, "step": 825 }, { "epoch": 3.97196261682243, "grad_norm": 193907.46875, "learning_rate": 7.865853658536587e-05, "loss": 1.1502, "step": 850 }, { "epoch": 4.0, "eval_accuracy": 0.6011608623548922, "eval_loss": 1.087031602859497, "eval_runtime": 15.7814, "eval_samples_per_second": 76.419, "eval_steps_per_second": 2.408, "step": 856 }, { "epoch": 4.088785046728972, "grad_norm": 196479.796875, "learning_rate": 7.713414634146343e-05, "loss": 1.1351, "step": 875 }, { "epoch": 4.205607476635514, "grad_norm": 123101.3671875, "learning_rate": 7.560975609756099e-05, "loss": 1.1463, "step": 900 }, { "epoch": 4.322429906542056, "grad_norm": 232118.515625, "learning_rate": 7.408536585365855e-05, "loss": 1.1351, "step": 925 }, { "epoch": 4.4392523364485985, "grad_norm": 227418.765625, "learning_rate": 7.25609756097561e-05, "loss": 1.1155, "step": 950 }, { "epoch": 4.55607476635514, "grad_norm": 145508.546875, "learning_rate": 7.103658536585366e-05, "loss": 1.121, "step": 975 }, { "epoch": 4.672897196261682, "grad_norm": 148252.265625, "learning_rate": 6.951219512195122e-05, "loss": 1.0959, "step": 1000 }, { "epoch": 4.789719626168225, "grad_norm": 191745.4375, "learning_rate": 6.798780487804878e-05, "loss": 1.1182, "step": 1025 }, { "epoch": 4.906542056074766, "grad_norm": 164514.484375, "learning_rate": 6.646341463414634e-05, "loss": 1.1322, "step": 1050 }, { "epoch": 5.0, "eval_accuracy": 0.693200663349917, "eval_loss": 0.9698711037635803, "eval_runtime": 15.748, "eval_samples_per_second": 76.581, "eval_steps_per_second": 2.413, "step": 1070 }, { "epoch": 5.0233644859813085, "grad_norm": 210620.15625, "learning_rate": 6.49390243902439e-05, "loss": 1.1317, "step": 1075 }, { "epoch": 5.140186915887851, "grad_norm": 192808.234375, "learning_rate": 6.341463414634146e-05, "loss": 1.0846, "step": 1100 }, { "epoch": 5.257009345794392, "grad_norm": 239944.546875, "learning_rate": 6.189024390243902e-05, "loss": 1.0816, "step": 1125 }, { "epoch": 5.373831775700935, "grad_norm": 150305.15625, "learning_rate": 6.036585365853659e-05, "loss": 1.0886, "step": 1150 }, { "epoch": 5.490654205607477, "grad_norm": 221186.4375, "learning_rate": 5.884146341463415e-05, "loss": 1.0788, "step": 1175 }, { "epoch": 5.607476635514018, "grad_norm": 163830.375, "learning_rate": 5.731707317073171e-05, "loss": 1.1041, "step": 1200 }, { "epoch": 5.724299065420561, "grad_norm": 155212.859375, "learning_rate": 5.5792682926829274e-05, "loss": 1.1092, "step": 1225 }, { "epoch": 5.841121495327103, "grad_norm": 161311.390625, "learning_rate": 5.4268292682926834e-05, "loss": 1.0742, "step": 1250 }, { "epoch": 5.957943925233645, "grad_norm": 125866.4296875, "learning_rate": 5.2743902439024394e-05, "loss": 1.0696, "step": 1275 }, { "epoch": 6.0, "eval_accuracy": 0.7039800995024875, "eval_loss": 0.9538396000862122, "eval_runtime": 15.035, "eval_samples_per_second": 80.213, "eval_steps_per_second": 2.527, "step": 1284 }, { "epoch": 6.074766355140187, "grad_norm": 206136.15625, "learning_rate": 5.121951219512195e-05, "loss": 1.054, "step": 1300 }, { "epoch": 6.191588785046729, "grad_norm": 201560.484375, "learning_rate": 4.969512195121951e-05, "loss": 1.0383, "step": 1325 }, { "epoch": 6.308411214953271, "grad_norm": 151493.71875, "learning_rate": 4.817073170731707e-05, "loss": 1.059, "step": 1350 }, { "epoch": 6.425233644859813, "grad_norm": 221694.390625, "learning_rate": 4.664634146341464e-05, "loss": 1.005, "step": 1375 }, { "epoch": 6.542056074766355, "grad_norm": 171523.03125, "learning_rate": 4.51219512195122e-05, "loss": 1.0513, "step": 1400 }, { "epoch": 6.658878504672897, "grad_norm": 194379.046875, "learning_rate": 4.359756097560976e-05, "loss": 1.0218, "step": 1425 }, { "epoch": 6.775700934579439, "grad_norm": 157827.703125, "learning_rate": 4.207317073170732e-05, "loss": 1.0534, "step": 1450 }, { "epoch": 6.892523364485982, "grad_norm": 189870.078125, "learning_rate": 4.0548780487804884e-05, "loss": 1.0189, "step": 1475 }, { "epoch": 7.0, "eval_accuracy": 0.7172470978441128, "eval_loss": 0.9233611226081848, "eval_runtime": 14.8072, "eval_samples_per_second": 81.447, "eval_steps_per_second": 2.566, "step": 1498 }, { "epoch": 7.009345794392523, "grad_norm": 170561.5625, "learning_rate": 3.9024390243902444e-05, "loss": 1.0216, "step": 1500 }, { "epoch": 7.126168224299065, "grad_norm": 199614.921875, "learning_rate": 3.7500000000000003e-05, "loss": 0.9941, "step": 1525 }, { "epoch": 7.242990654205608, "grad_norm": 168135.65625, "learning_rate": 3.597560975609756e-05, "loss": 1.0454, "step": 1550 }, { "epoch": 7.359813084112149, "grad_norm": 185285.3125, "learning_rate": 3.445121951219512e-05, "loss": 0.9979, "step": 1575 }, { "epoch": 7.4766355140186915, "grad_norm": 165282.078125, "learning_rate": 3.292682926829269e-05, "loss": 0.9869, "step": 1600 }, { "epoch": 7.593457943925234, "grad_norm": 169526.53125, "learning_rate": 3.140243902439025e-05, "loss": 1.0213, "step": 1625 }, { "epoch": 7.710280373831775, "grad_norm": 149986.765625, "learning_rate": 2.9878048780487805e-05, "loss": 1.02, "step": 1650 }, { "epoch": 7.827102803738318, "grad_norm": 248981.28125, "learning_rate": 2.8353658536585365e-05, "loss": 0.9901, "step": 1675 }, { "epoch": 7.94392523364486, "grad_norm": 132692.34375, "learning_rate": 2.682926829268293e-05, "loss": 0.9874, "step": 1700 }, { "epoch": 8.0, "eval_accuracy": 0.74212271973466, "eval_loss": 0.9064252972602844, "eval_runtime": 14.7597, "eval_samples_per_second": 81.709, "eval_steps_per_second": 2.575, "step": 1712 }, { "epoch": 8.060747663551401, "grad_norm": 256718.34375, "learning_rate": 2.530487804878049e-05, "loss": 0.9658, "step": 1725 }, { "epoch": 8.177570093457945, "grad_norm": 187598.078125, "learning_rate": 2.378048780487805e-05, "loss": 0.9418, "step": 1750 }, { "epoch": 8.294392523364486, "grad_norm": 274552.46875, "learning_rate": 2.225609756097561e-05, "loss": 0.9644, "step": 1775 }, { "epoch": 8.411214953271028, "grad_norm": 212352.921875, "learning_rate": 2.073170731707317e-05, "loss": 0.946, "step": 1800 }, { "epoch": 8.52803738317757, "grad_norm": 171896.203125, "learning_rate": 1.9207317073170733e-05, "loss": 0.9599, "step": 1825 }, { "epoch": 8.644859813084112, "grad_norm": 168001.296875, "learning_rate": 1.7682926829268292e-05, "loss": 0.9624, "step": 1850 }, { "epoch": 8.761682242990654, "grad_norm": 150168.40625, "learning_rate": 1.6158536585365855e-05, "loss": 0.9326, "step": 1875 }, { "epoch": 8.878504672897197, "grad_norm": 228498.40625, "learning_rate": 1.4634146341463415e-05, "loss": 0.9507, "step": 1900 }, { "epoch": 8.995327102803738, "grad_norm": 153940.3125, "learning_rate": 1.3109756097560976e-05, "loss": 0.9228, "step": 1925 }, { "epoch": 9.0, "eval_accuracy": 0.7562189054726368, "eval_loss": 0.8793113231658936, "eval_runtime": 14.9438, "eval_samples_per_second": 80.702, "eval_steps_per_second": 2.543, "step": 1926 }, { "epoch": 9.11214953271028, "grad_norm": 203478.21875, "learning_rate": 1.1585365853658537e-05, "loss": 0.8734, "step": 1950 }, { "epoch": 9.228971962616823, "grad_norm": 196428.75, "learning_rate": 1.0060975609756099e-05, "loss": 0.939, "step": 1975 }, { "epoch": 9.345794392523365, "grad_norm": 162436.5, "learning_rate": 8.53658536585366e-06, "loss": 0.9088, "step": 2000 }, { "epoch": 9.462616822429906, "grad_norm": 194883.640625, "learning_rate": 7.0121951219512205e-06, "loss": 0.8676, "step": 2025 }, { "epoch": 9.57943925233645, "grad_norm": 221290.5, "learning_rate": 5.487804878048781e-06, "loss": 0.9405, "step": 2050 }, { "epoch": 9.69626168224299, "grad_norm": 246742.265625, "learning_rate": 3.9634146341463414e-06, "loss": 0.9241, "step": 2075 }, { "epoch": 9.813084112149532, "grad_norm": 194605.296875, "learning_rate": 2.4390243902439027e-06, "loss": 0.9265, "step": 2100 }, { "epoch": 9.929906542056075, "grad_norm": 242350.640625, "learning_rate": 9.146341463414634e-07, "loss": 0.9201, "step": 2125 }, { "epoch": 10.0, "eval_accuracy": 0.763681592039801, "eval_loss": 0.87679523229599, "eval_runtime": 15.2158, "eval_samples_per_second": 79.26, "eval_steps_per_second": 2.497, "step": 2140 }, { "epoch": 10.0, "step": 2140, "total_flos": 5.510586115727032e+19, "train_loss": 1.1243137065495286, "train_runtime": 1922.3745, "train_samples_per_second": 35.529, "train_steps_per_second": 1.113 } ], "logging_steps": 25, "max_steps": 2140, "num_input_tokens_seen": 0, "num_train_epochs": 10, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 5.510586115727032e+19, "train_batch_size": 32, "trial_name": null, "trial_params": null }