{ "best_metric": 0.8104314804077148, "best_model_checkpoint": "/media/mldrive/kcardenas/limb_classification_person_crop/beit-large-patch16-384/8_1e-4_1e-3_0.1/checkpoint-2140", "epoch": 10.0, "eval_steps": 500, "global_step": 2140, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.11682242990654206, "grad_norm": 1217842.375, "learning_rate": 5e-06, "loss": 1.5829, "step": 25 }, { "epoch": 0.2336448598130841, "grad_norm": 743310.1875, "learning_rate": 1e-05, "loss": 1.4705, "step": 50 }, { "epoch": 0.35046728971962615, "grad_norm": 651590.4375, "learning_rate": 1.5e-05, "loss": 1.411, "step": 75 }, { "epoch": 0.4672897196261682, "grad_norm": 442779.90625, "learning_rate": 2e-05, "loss": 1.4027, "step": 100 }, { "epoch": 0.5841121495327103, "grad_norm": 468140.84375, "learning_rate": 2.5e-05, "loss": 1.3717, "step": 125 }, { "epoch": 0.7009345794392523, "grad_norm": 354567.5625, "learning_rate": 3e-05, "loss": 1.3302, "step": 150 }, { "epoch": 0.8177570093457944, "grad_norm": 512739.0625, "learning_rate": 3.5e-05, "loss": 1.3142, "step": 175 }, { "epoch": 0.9345794392523364, "grad_norm": 557678.25, "learning_rate": 4e-05, "loss": 1.2934, "step": 200 }, { "epoch": 1.0, "eval_accuracy": 0.45854063018242125, "eval_loss": 1.310935139656067, "eval_runtime": 14.7436, "eval_samples_per_second": 81.798, "eval_steps_per_second": 2.577, "step": 214 }, { "epoch": 1.0514018691588785, "grad_norm": 342119.03125, "learning_rate": 4.5e-05, "loss": 1.2604, "step": 225 }, { "epoch": 1.1682242990654206, "grad_norm": 267202.78125, "learning_rate": 5e-05, "loss": 1.2483, "step": 250 }, { "epoch": 1.2850467289719627, "grad_norm": 231700.59375, "learning_rate": 5.500000000000001e-05, "loss": 1.2028, "step": 275 }, { "epoch": 1.4018691588785046, "grad_norm": 240836.390625, "learning_rate": 6e-05, "loss": 1.2499, "step": 300 }, { "epoch": 1.5186915887850467, "grad_norm": 343488.6875, "learning_rate": 6.500000000000001e-05, "loss": 1.2375, "step": 325 }, { "epoch": 1.6355140186915889, "grad_norm": 235778.375, "learning_rate": 7e-05, "loss": 1.2126, "step": 350 }, { "epoch": 1.7523364485981308, "grad_norm": 217530.28125, "learning_rate": 7.500000000000001e-05, "loss": 1.2085, "step": 375 }, { "epoch": 1.8691588785046729, "grad_norm": 488959.4375, "learning_rate": 8e-05, "loss": 1.1709, "step": 400 }, { "epoch": 1.985981308411215, "grad_norm": 343188.84375, "learning_rate": 8.5e-05, "loss": 1.1806, "step": 425 }, { "epoch": 2.0, "eval_accuracy": 0.5563847429519071, "eval_loss": 1.109222650527954, "eval_runtime": 15.1212, "eval_samples_per_second": 79.756, "eval_steps_per_second": 2.513, "step": 428 }, { "epoch": 2.102803738317757, "grad_norm": 510198.4375, "learning_rate": 9e-05, "loss": 1.223, "step": 450 }, { "epoch": 2.2196261682242993, "grad_norm": 249117.484375, "learning_rate": 9.5e-05, "loss": 1.1548, "step": 475 }, { "epoch": 2.336448598130841, "grad_norm": 222429.15625, "learning_rate": 0.0001, "loss": 1.1652, "step": 500 }, { "epoch": 2.453271028037383, "grad_norm": 283511.53125, "learning_rate": 9.847560975609756e-05, "loss": 1.2161, "step": 525 }, { "epoch": 2.5700934579439254, "grad_norm": 183689.015625, "learning_rate": 9.695121951219512e-05, "loss": 1.2039, "step": 550 }, { "epoch": 2.6869158878504673, "grad_norm": 217182.84375, "learning_rate": 9.542682926829268e-05, "loss": 1.1824, "step": 575 }, { "epoch": 2.803738317757009, "grad_norm": 574460.0625, "learning_rate": 9.390243902439024e-05, "loss": 1.1651, "step": 600 }, { "epoch": 2.9205607476635516, "grad_norm": 194637.703125, "learning_rate": 9.237804878048782e-05, "loss": 1.181, "step": 625 }, { "epoch": 3.0, "eval_accuracy": 0.6077943615257048, "eval_loss": 1.038699746131897, "eval_runtime": 14.808, "eval_samples_per_second": 81.442, "eval_steps_per_second": 2.566, "step": 642 }, { "epoch": 3.0373831775700935, "grad_norm": 174755.5, "learning_rate": 9.085365853658538e-05, "loss": 1.1829, "step": 650 }, { "epoch": 3.1542056074766354, "grad_norm": 206334.171875, "learning_rate": 8.932926829268294e-05, "loss": 1.1063, "step": 675 }, { "epoch": 3.2710280373831777, "grad_norm": 214731.515625, "learning_rate": 8.78048780487805e-05, "loss": 1.1616, "step": 700 }, { "epoch": 3.3878504672897196, "grad_norm": 247518.84375, "learning_rate": 8.628048780487805e-05, "loss": 1.1424, "step": 725 }, { "epoch": 3.5046728971962615, "grad_norm": 206443.25, "learning_rate": 8.475609756097561e-05, "loss": 1.152, "step": 750 }, { "epoch": 3.621495327102804, "grad_norm": 189756.421875, "learning_rate": 8.323170731707317e-05, "loss": 1.114, "step": 775 }, { "epoch": 3.7383177570093458, "grad_norm": 202614.625, "learning_rate": 8.170731707317073e-05, "loss": 1.1359, "step": 800 }, { "epoch": 3.8551401869158877, "grad_norm": 240795.953125, "learning_rate": 8.018292682926831e-05, "loss": 1.1248, "step": 825 }, { "epoch": 3.97196261682243, "grad_norm": 222365.71875, "learning_rate": 7.865853658536587e-05, "loss": 1.1188, "step": 850 }, { "epoch": 4.0, "eval_accuracy": 0.6666666666666666, "eval_loss": 0.9512582421302795, "eval_runtime": 15.1388, "eval_samples_per_second": 79.663, "eval_steps_per_second": 2.51, "step": 856 }, { "epoch": 4.088785046728972, "grad_norm": 259224.40625, "learning_rate": 7.713414634146343e-05, "loss": 1.0856, "step": 875 }, { "epoch": 4.205607476635514, "grad_norm": 197973.890625, "learning_rate": 7.560975609756099e-05, "loss": 1.1044, "step": 900 }, { "epoch": 4.322429906542056, "grad_norm": 331749.875, "learning_rate": 7.408536585365855e-05, "loss": 1.083, "step": 925 }, { "epoch": 4.4392523364485985, "grad_norm": 229539.890625, "learning_rate": 7.25609756097561e-05, "loss": 1.0673, "step": 950 }, { "epoch": 4.55607476635514, "grad_norm": 199168.28125, "learning_rate": 7.103658536585366e-05, "loss": 1.0665, "step": 975 }, { "epoch": 4.672897196261682, "grad_norm": 162672.40625, "learning_rate": 6.951219512195122e-05, "loss": 1.0367, "step": 1000 }, { "epoch": 4.789719626168225, "grad_norm": 198779.140625, "learning_rate": 6.798780487804878e-05, "loss": 1.0551, "step": 1025 }, { "epoch": 4.906542056074766, "grad_norm": 198510.765625, "learning_rate": 6.646341463414634e-05, "loss": 1.0883, "step": 1050 }, { "epoch": 5.0, "eval_accuracy": 0.6849087893864013, "eval_loss": 0.9217574000358582, "eval_runtime": 15.6174, "eval_samples_per_second": 77.222, "eval_steps_per_second": 2.433, "step": 1070 }, { "epoch": 5.0233644859813085, "grad_norm": 330993.65625, "learning_rate": 6.49390243902439e-05, "loss": 1.0981, "step": 1075 }, { "epoch": 5.140186915887851, "grad_norm": 271345.6875, "learning_rate": 6.341463414634146e-05, "loss": 1.0346, "step": 1100 }, { "epoch": 5.257009345794392, "grad_norm": 257730.21875, "learning_rate": 6.189024390243902e-05, "loss": 1.0354, "step": 1125 }, { "epoch": 5.373831775700935, "grad_norm": 214900.15625, "learning_rate": 6.036585365853659e-05, "loss": 1.0315, "step": 1150 }, { "epoch": 5.490654205607477, "grad_norm": 271099.71875, "learning_rate": 5.884146341463415e-05, "loss": 1.0092, "step": 1175 }, { "epoch": 5.607476635514018, "grad_norm": 213789.734375, "learning_rate": 5.731707317073171e-05, "loss": 1.0221, "step": 1200 }, { "epoch": 5.724299065420561, "grad_norm": 232312.421875, "learning_rate": 5.5792682926829274e-05, "loss": 1.0395, "step": 1225 }, { "epoch": 5.841121495327103, "grad_norm": 144164.546875, "learning_rate": 5.4268292682926834e-05, "loss": 1.0129, "step": 1250 }, { "epoch": 5.957943925233645, "grad_norm": 169016.9375, "learning_rate": 5.2743902439024394e-05, "loss": 1.0148, "step": 1275 }, { "epoch": 6.0, "eval_accuracy": 0.7106135986733002, "eval_loss": 0.8751299381256104, "eval_runtime": 15.15, "eval_samples_per_second": 79.604, "eval_steps_per_second": 2.508, "step": 1284 }, { "epoch": 6.074766355140187, "grad_norm": 314712.125, "learning_rate": 5.121951219512195e-05, "loss": 0.9848, "step": 1300 }, { "epoch": 6.191588785046729, "grad_norm": 259893.6875, "learning_rate": 4.969512195121951e-05, "loss": 1.0045, "step": 1325 }, { "epoch": 6.308411214953271, "grad_norm": 160624.953125, "learning_rate": 4.817073170731707e-05, "loss": 1.0013, "step": 1350 }, { "epoch": 6.425233644859813, "grad_norm": 298028.3125, "learning_rate": 4.664634146341464e-05, "loss": 0.9589, "step": 1375 }, { "epoch": 6.542056074766355, "grad_norm": 221959.015625, "learning_rate": 4.51219512195122e-05, "loss": 0.9671, "step": 1400 }, { "epoch": 6.658878504672897, "grad_norm": 219538.75, "learning_rate": 4.359756097560976e-05, "loss": 0.9495, "step": 1425 }, { "epoch": 6.775700934579439, "grad_norm": 188839.3125, "learning_rate": 4.207317073170732e-05, "loss": 0.9806, "step": 1450 }, { "epoch": 6.892523364485982, "grad_norm": 242229.4375, "learning_rate": 4.0548780487804884e-05, "loss": 0.9767, "step": 1475 }, { "epoch": 7.0, "eval_accuracy": 0.746268656716418, "eval_loss": 0.8361870646476746, "eval_runtime": 15.1552, "eval_samples_per_second": 79.576, "eval_steps_per_second": 2.507, "step": 1498 }, { "epoch": 7.009345794392523, "grad_norm": 210763.171875, "learning_rate": 3.9024390243902444e-05, "loss": 0.963, "step": 1500 }, { "epoch": 7.126168224299065, "grad_norm": 201932.671875, "learning_rate": 3.7500000000000003e-05, "loss": 0.9168, "step": 1525 }, { "epoch": 7.242990654205608, "grad_norm": 167061.890625, "learning_rate": 3.597560975609756e-05, "loss": 0.9903, "step": 1550 }, { "epoch": 7.359813084112149, "grad_norm": 317654.1875, "learning_rate": 3.445121951219512e-05, "loss": 0.9235, "step": 1575 }, { "epoch": 7.4766355140186915, "grad_norm": 163850.625, "learning_rate": 3.292682926829269e-05, "loss": 0.9153, "step": 1600 }, { "epoch": 7.593457943925234, "grad_norm": 166939.59375, "learning_rate": 3.140243902439025e-05, "loss": 0.9669, "step": 1625 }, { "epoch": 7.710280373831775, "grad_norm": 187720.734375, "learning_rate": 2.9878048780487805e-05, "loss": 0.9397, "step": 1650 }, { "epoch": 7.827102803738318, "grad_norm": 279596.0625, "learning_rate": 2.8353658536585365e-05, "loss": 0.9164, "step": 1675 }, { "epoch": 7.94392523364486, "grad_norm": 190616.0625, "learning_rate": 2.682926829268293e-05, "loss": 0.9218, "step": 1700 }, { "epoch": 8.0, "eval_accuracy": 0.746268656716418, "eval_loss": 0.8222536444664001, "eval_runtime": 15.1444, "eval_samples_per_second": 79.633, "eval_steps_per_second": 2.509, "step": 1712 }, { "epoch": 8.060747663551401, "grad_norm": 212519.125, "learning_rate": 2.530487804878049e-05, "loss": 0.9206, "step": 1725 }, { "epoch": 8.177570093457945, "grad_norm": 177413.6875, "learning_rate": 2.378048780487805e-05, "loss": 0.8684, "step": 1750 }, { "epoch": 8.294392523364486, "grad_norm": 223749.203125, "learning_rate": 2.225609756097561e-05, "loss": 0.9026, "step": 1775 }, { "epoch": 8.411214953271028, "grad_norm": 237978.796875, "learning_rate": 2.073170731707317e-05, "loss": 0.8628, "step": 1800 }, { "epoch": 8.52803738317757, "grad_norm": 195313.34375, "learning_rate": 1.9207317073170733e-05, "loss": 0.8996, "step": 1825 }, { "epoch": 8.644859813084112, "grad_norm": 206904.25, "learning_rate": 1.7682926829268292e-05, "loss": 0.8929, "step": 1850 }, { "epoch": 8.761682242990654, "grad_norm": 185694.890625, "learning_rate": 1.6158536585365855e-05, "loss": 0.8662, "step": 1875 }, { "epoch": 8.878504672897197, "grad_norm": 235396.53125, "learning_rate": 1.4634146341463415e-05, "loss": 0.8701, "step": 1900 }, { "epoch": 8.995327102803738, "grad_norm": 163277.984375, "learning_rate": 1.3109756097560976e-05, "loss": 0.8507, "step": 1925 }, { "epoch": 9.0, "eval_accuracy": 0.7620232172470979, "eval_loss": 0.8148385286331177, "eval_runtime": 15.03, "eval_samples_per_second": 80.24, "eval_steps_per_second": 2.528, "step": 1926 }, { "epoch": 9.11214953271028, "grad_norm": 202752.34375, "learning_rate": 1.1585365853658537e-05, "loss": 0.7827, "step": 1950 }, { "epoch": 9.228971962616823, "grad_norm": 229156.890625, "learning_rate": 1.0060975609756099e-05, "loss": 0.8734, "step": 1975 }, { "epoch": 9.345794392523365, "grad_norm": 162009.6875, "learning_rate": 8.53658536585366e-06, "loss": 0.8241, "step": 2000 }, { "epoch": 9.462616822429906, "grad_norm": 203122.65625, "learning_rate": 7.0121951219512205e-06, "loss": 0.772, "step": 2025 }, { "epoch": 9.57943925233645, "grad_norm": 201357.0625, "learning_rate": 5.487804878048781e-06, "loss": 0.8671, "step": 2050 }, { "epoch": 9.69626168224299, "grad_norm": 326030.46875, "learning_rate": 3.9634146341463414e-06, "loss": 0.8559, "step": 2075 }, { "epoch": 9.813084112149532, "grad_norm": 297982.5625, "learning_rate": 2.4390243902439027e-06, "loss": 0.8573, "step": 2100 }, { "epoch": 9.929906542056075, "grad_norm": 219615.546875, "learning_rate": 9.146341463414634e-07, "loss": 0.8348, "step": 2125 }, { "epoch": 10.0, "eval_accuracy": 0.7628524046434494, "eval_loss": 0.8104314804077148, "eval_runtime": 15.5758, "eval_samples_per_second": 77.428, "eval_steps_per_second": 2.44, "step": 2140 }, { "epoch": 10.0, "step": 2140, "total_flos": 5.510586115727032e+19, "train_loss": 1.064676717062977, "train_runtime": 1932.3079, "train_samples_per_second": 35.346, "train_steps_per_second": 1.107 } ], "logging_steps": 25, "max_steps": 2140, "num_input_tokens_seen": 0, "num_train_epochs": 10, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 5.510586115727032e+19, "train_batch_size": 32, "trial_name": null, "trial_params": null }