{ "best_metric": 0.7102004289627075, "best_model_checkpoint": "/media/mldrive/kcardenas/limb_classification_person_crop/beit-large-patch16-384/8_5e-5_1e-3_0.05/checkpoint-1712", "epoch": 10.0, "eval_steps": 500, "global_step": 2140, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.11682242990654206, "grad_norm": 1340459.625, "learning_rate": 2.5e-06, "loss": 1.5852, "step": 25 }, { "epoch": 0.2336448598130841, "grad_norm": 994590.0, "learning_rate": 5e-06, "loss": 1.5062, "step": 50 }, { "epoch": 0.35046728971962615, "grad_norm": 835627.5, "learning_rate": 7.5e-06, "loss": 1.4702, "step": 75 }, { "epoch": 0.4672897196261682, "grad_norm": 708100.8125, "learning_rate": 1e-05, "loss": 1.405, "step": 100 }, { "epoch": 0.5841121495327103, "grad_norm": 19124280.0, "learning_rate": 1.25e-05, "loss": 1.3983, "step": 125 }, { "epoch": 0.7009345794392523, "grad_norm": 281499.28125, "learning_rate": 1.5e-05, "loss": 1.3889, "step": 150 }, { "epoch": 0.8177570093457944, "grad_norm": 727612.8125, "learning_rate": 1.75e-05, "loss": 1.3625, "step": 175 }, { "epoch": 0.9345794392523364, "grad_norm": 628275.8125, "learning_rate": 2e-05, "loss": 1.3649, "step": 200 }, { "epoch": 1.0, "eval_accuracy": 0.36401326699834163, "eval_loss": 1.5214860439300537, "eval_runtime": 15.6296, "eval_samples_per_second": 77.161, "eval_steps_per_second": 2.431, "step": 214 }, { "epoch": 1.0514018691588785, "grad_norm": 456936.21875, "learning_rate": 2.25e-05, "loss": 1.3042, "step": 225 }, { "epoch": 1.1682242990654206, "grad_norm": 314467.59375, "learning_rate": 2.5e-05, "loss": 1.275, "step": 250 }, { "epoch": 1.2850467289719627, "grad_norm": 409308.0625, "learning_rate": 2.7500000000000004e-05, "loss": 1.2176, "step": 275 }, { "epoch": 1.4018691588785046, "grad_norm": 343987.34375, "learning_rate": 3e-05, "loss": 1.2691, "step": 300 }, { "epoch": 1.5186915887850467, "grad_norm": 415036.9375, "learning_rate": 3.2500000000000004e-05, "loss": 1.2335, "step": 325 }, { "epoch": 1.6355140186915889, "grad_norm": 380739.28125, "learning_rate": 3.5e-05, "loss": 1.2052, "step": 350 }, { "epoch": 1.7523364485981308, "grad_norm": 245421.921875, "learning_rate": 3.7500000000000003e-05, "loss": 1.2038, "step": 375 }, { "epoch": 1.8691588785046729, "grad_norm": 417655.78125, "learning_rate": 4e-05, "loss": 1.1433, "step": 400 }, { "epoch": 1.985981308411215, "grad_norm": 438838.375, "learning_rate": 4.25e-05, "loss": 1.1236, "step": 425 }, { "epoch": 2.0, "eval_accuracy": 0.6202321724709784, "eval_loss": 0.9691005945205688, "eval_runtime": 15.6266, "eval_samples_per_second": 77.176, "eval_steps_per_second": 2.432, "step": 428 }, { "epoch": 2.102803738317757, "grad_norm": 471901.0625, "learning_rate": 4.5e-05, "loss": 1.1464, "step": 450 }, { "epoch": 2.2196261682242993, "grad_norm": 506105.5625, "learning_rate": 4.75e-05, "loss": 1.1179, "step": 475 }, { "epoch": 2.336448598130841, "grad_norm": 272071.0, "learning_rate": 5e-05, "loss": 1.1158, "step": 500 }, { "epoch": 2.453271028037383, "grad_norm": 458421.875, "learning_rate": 4.923780487804878e-05, "loss": 1.1191, "step": 525 }, { "epoch": 2.5700934579439254, "grad_norm": 182668.578125, "learning_rate": 4.847560975609756e-05, "loss": 1.1037, "step": 550 }, { "epoch": 2.6869158878504673, "grad_norm": 325595.375, "learning_rate": 4.771341463414634e-05, "loss": 1.1321, "step": 575 }, { "epoch": 2.803738317757009, "grad_norm": 282449.25, "learning_rate": 4.695121951219512e-05, "loss": 1.1108, "step": 600 }, { "epoch": 2.9205607476635516, "grad_norm": 329149.875, "learning_rate": 4.618902439024391e-05, "loss": 1.0837, "step": 625 }, { "epoch": 3.0, "eval_accuracy": 0.6666666666666666, "eval_loss": 0.8731465935707092, "eval_runtime": 15.4021, "eval_samples_per_second": 78.301, "eval_steps_per_second": 2.467, "step": 642 }, { "epoch": 3.0373831775700935, "grad_norm": 305588.1875, "learning_rate": 4.542682926829269e-05, "loss": 1.0819, "step": 650 }, { "epoch": 3.1542056074766354, "grad_norm": 260019.390625, "learning_rate": 4.466463414634147e-05, "loss": 1.0121, "step": 675 }, { "epoch": 3.2710280373831777, "grad_norm": 338644.4375, "learning_rate": 4.390243902439025e-05, "loss": 1.0588, "step": 700 }, { "epoch": 3.3878504672897196, "grad_norm": 325710.0, "learning_rate": 4.314024390243903e-05, "loss": 1.064, "step": 725 }, { "epoch": 3.5046728971962615, "grad_norm": 216358.59375, "learning_rate": 4.237804878048781e-05, "loss": 1.0566, "step": 750 }, { "epoch": 3.621495327102804, "grad_norm": 399771.96875, "learning_rate": 4.161585365853659e-05, "loss": 1.0209, "step": 775 }, { "epoch": 3.7383177570093458, "grad_norm": 407317.3125, "learning_rate": 4.085365853658537e-05, "loss": 1.0414, "step": 800 }, { "epoch": 3.8551401869158877, "grad_norm": 309527.8125, "learning_rate": 4.0091463414634153e-05, "loss": 1.059, "step": 825 }, { "epoch": 3.97196261682243, "grad_norm": 386142.875, "learning_rate": 3.932926829268293e-05, "loss": 1.0056, "step": 850 }, { "epoch": 4.0, "eval_accuracy": 0.6218905472636815, "eval_loss": 0.9375925064086914, "eval_runtime": 15.283, "eval_samples_per_second": 78.911, "eval_steps_per_second": 2.486, "step": 856 }, { "epoch": 4.088785046728972, "grad_norm": 424022.59375, "learning_rate": 3.856707317073171e-05, "loss": 0.9983, "step": 875 }, { "epoch": 4.205607476635514, "grad_norm": 270931.78125, "learning_rate": 3.780487804878049e-05, "loss": 0.9991, "step": 900 }, { "epoch": 4.322429906542056, "grad_norm": 416708.1875, "learning_rate": 3.704268292682927e-05, "loss": 0.9898, "step": 925 }, { "epoch": 4.4392523364485985, "grad_norm": 363340.96875, "learning_rate": 3.628048780487805e-05, "loss": 0.9574, "step": 950 }, { "epoch": 4.55607476635514, "grad_norm": 410850.46875, "learning_rate": 3.551829268292683e-05, "loss": 0.9555, "step": 975 }, { "epoch": 4.672897196261682, "grad_norm": 234931.4375, "learning_rate": 3.475609756097561e-05, "loss": 0.9532, "step": 1000 }, { "epoch": 4.789719626168225, "grad_norm": 313037.1875, "learning_rate": 3.399390243902439e-05, "loss": 0.9628, "step": 1025 }, { "epoch": 4.906542056074766, "grad_norm": 222636.90625, "learning_rate": 3.323170731707317e-05, "loss": 1.004, "step": 1050 }, { "epoch": 5.0, "eval_accuracy": 0.6915422885572139, "eval_loss": 0.8376840353012085, "eval_runtime": 15.0782, "eval_samples_per_second": 79.983, "eval_steps_per_second": 2.52, "step": 1070 }, { "epoch": 5.0233644859813085, "grad_norm": 351343.6875, "learning_rate": 3.246951219512195e-05, "loss": 1.0001, "step": 1075 }, { "epoch": 5.140186915887851, "grad_norm": 344688.3125, "learning_rate": 3.170731707317073e-05, "loss": 0.942, "step": 1100 }, { "epoch": 5.257009345794392, "grad_norm": 417808.71875, "learning_rate": 3.094512195121951e-05, "loss": 0.94, "step": 1125 }, { "epoch": 5.373831775700935, "grad_norm": 323364.71875, "learning_rate": 3.0182926829268294e-05, "loss": 0.926, "step": 1150 }, { "epoch": 5.490654205607477, "grad_norm": 406241.34375, "learning_rate": 2.9420731707317074e-05, "loss": 0.9038, "step": 1175 }, { "epoch": 5.607476635514018, "grad_norm": 326464.375, "learning_rate": 2.8658536585365854e-05, "loss": 0.9106, "step": 1200 }, { "epoch": 5.724299065420561, "grad_norm": 271519.625, "learning_rate": 2.7896341463414637e-05, "loss": 0.9567, "step": 1225 }, { "epoch": 5.841121495327103, "grad_norm": 235729.03125, "learning_rate": 2.7134146341463417e-05, "loss": 0.9158, "step": 1250 }, { "epoch": 5.957943925233645, "grad_norm": 294801.71875, "learning_rate": 2.6371951219512197e-05, "loss": 0.9203, "step": 1275 }, { "epoch": 6.0, "eval_accuracy": 0.7520729684908789, "eval_loss": 0.7408699989318848, "eval_runtime": 15.7054, "eval_samples_per_second": 76.789, "eval_steps_per_second": 2.42, "step": 1284 }, { "epoch": 6.074766355140187, "grad_norm": 335762.1875, "learning_rate": 2.5609756097560977e-05, "loss": 0.8907, "step": 1300 }, { "epoch": 6.191588785046729, "grad_norm": 436610.6875, "learning_rate": 2.4847560975609756e-05, "loss": 0.8671, "step": 1325 }, { "epoch": 6.308411214953271, "grad_norm": 305450.125, "learning_rate": 2.4085365853658536e-05, "loss": 0.8857, "step": 1350 }, { "epoch": 6.425233644859813, "grad_norm": 402883.78125, "learning_rate": 2.332317073170732e-05, "loss": 0.8171, "step": 1375 }, { "epoch": 6.542056074766355, "grad_norm": 338518.78125, "learning_rate": 2.25609756097561e-05, "loss": 0.8644, "step": 1400 }, { "epoch": 6.658878504672897, "grad_norm": 378284.9375, "learning_rate": 2.179878048780488e-05, "loss": 0.84, "step": 1425 }, { "epoch": 6.775700934579439, "grad_norm": 333087.625, "learning_rate": 2.103658536585366e-05, "loss": 0.8815, "step": 1450 }, { "epoch": 6.892523364485982, "grad_norm": 313590.78125, "learning_rate": 2.0274390243902442e-05, "loss": 0.8422, "step": 1475 }, { "epoch": 7.0, "eval_accuracy": 0.7412935323383084, "eval_loss": 0.7496287822723389, "eval_runtime": 15.2615, "eval_samples_per_second": 79.022, "eval_steps_per_second": 2.49, "step": 1498 }, { "epoch": 7.009345794392523, "grad_norm": 331659.71875, "learning_rate": 1.9512195121951222e-05, "loss": 0.8347, "step": 1500 }, { "epoch": 7.126168224299065, "grad_norm": 388029.40625, "learning_rate": 1.8750000000000002e-05, "loss": 0.795, "step": 1525 }, { "epoch": 7.242990654205608, "grad_norm": 285133.5625, "learning_rate": 1.798780487804878e-05, "loss": 0.8667, "step": 1550 }, { "epoch": 7.359813084112149, "grad_norm": 287052.90625, "learning_rate": 1.722560975609756e-05, "loss": 0.8106, "step": 1575 }, { "epoch": 7.4766355140186915, "grad_norm": 372351.96875, "learning_rate": 1.6463414634146345e-05, "loss": 0.7963, "step": 1600 }, { "epoch": 7.593457943925234, "grad_norm": 356293.3125, "learning_rate": 1.5701219512195124e-05, "loss": 0.8258, "step": 1625 }, { "epoch": 7.710280373831775, "grad_norm": 309102.75, "learning_rate": 1.4939024390243902e-05, "loss": 0.8195, "step": 1650 }, { "epoch": 7.827102803738318, "grad_norm": 369304.375, "learning_rate": 1.4176829268292682e-05, "loss": 0.7889, "step": 1675 }, { "epoch": 7.94392523364486, "grad_norm": 252997.21875, "learning_rate": 1.3414634146341466e-05, "loss": 0.8044, "step": 1700 }, { "epoch": 8.0, "eval_accuracy": 0.7628524046434494, "eval_loss": 0.7102004289627075, "eval_runtime": 15.1276, "eval_samples_per_second": 79.722, "eval_steps_per_second": 2.512, "step": 1712 }, { "epoch": 8.060747663551401, "grad_norm": 389864.21875, "learning_rate": 1.2652439024390245e-05, "loss": 0.7904, "step": 1725 }, { "epoch": 8.177570093457945, "grad_norm": 404623.25, "learning_rate": 1.1890243902439025e-05, "loss": 0.7354, "step": 1750 }, { "epoch": 8.294392523364486, "grad_norm": 407148.1875, "learning_rate": 1.1128048780487805e-05, "loss": 0.796, "step": 1775 }, { "epoch": 8.411214953271028, "grad_norm": 382684.03125, "learning_rate": 1.0365853658536585e-05, "loss": 0.7286, "step": 1800 }, { "epoch": 8.52803738317757, "grad_norm": 344973.09375, "learning_rate": 9.603658536585366e-06, "loss": 0.7714, "step": 1825 }, { "epoch": 8.644859813084112, "grad_norm": 346708.125, "learning_rate": 8.841463414634146e-06, "loss": 0.7752, "step": 1850 }, { "epoch": 8.761682242990654, "grad_norm": 269587.84375, "learning_rate": 8.079268292682928e-06, "loss": 0.7341, "step": 1875 }, { "epoch": 8.878504672897197, "grad_norm": 428276.75, "learning_rate": 7.317073170731707e-06, "loss": 0.7378, "step": 1900 }, { "epoch": 8.995327102803738, "grad_norm": 326870.46875, "learning_rate": 6.554878048780488e-06, "loss": 0.7268, "step": 1925 }, { "epoch": 9.0, "eval_accuracy": 0.7678275290215588, "eval_loss": 0.7323490381240845, "eval_runtime": 15.0448, "eval_samples_per_second": 80.161, "eval_steps_per_second": 2.526, "step": 1926 }, { "epoch": 9.11214953271028, "grad_norm": 456910.90625, "learning_rate": 5.792682926829269e-06, "loss": 0.6645, "step": 1950 }, { "epoch": 9.228971962616823, "grad_norm": 425251.84375, "learning_rate": 5.030487804878049e-06, "loss": 0.7721, "step": 1975 }, { "epoch": 9.345794392523365, "grad_norm": 267279.1875, "learning_rate": 4.26829268292683e-06, "loss": 0.7056, "step": 2000 }, { "epoch": 9.462616822429906, "grad_norm": 254717.8125, "learning_rate": 3.5060975609756102e-06, "loss": 0.6624, "step": 2025 }, { "epoch": 9.57943925233645, "grad_norm": 399249.125, "learning_rate": 2.7439024390243905e-06, "loss": 0.7665, "step": 2050 }, { "epoch": 9.69626168224299, "grad_norm": 563710.0, "learning_rate": 1.9817073170731707e-06, "loss": 0.7461, "step": 2075 }, { "epoch": 9.813084112149532, "grad_norm": 427442.15625, "learning_rate": 1.2195121951219514e-06, "loss": 0.7337, "step": 2100 }, { "epoch": 9.929906542056075, "grad_norm": 411388.09375, "learning_rate": 4.573170731707317e-07, "loss": 0.711, "step": 2125 }, { "epoch": 10.0, "eval_accuracy": 0.7719734660033167, "eval_loss": 0.7325395345687866, "eval_runtime": 15.4676, "eval_samples_per_second": 77.969, "eval_steps_per_second": 2.457, "step": 2140 }, { "epoch": 10.0, "step": 2140, "total_flos": 5.510586115727032e+19, "train_loss": 0.984144314204421, "train_runtime": 1941.5588, "train_samples_per_second": 35.178, "train_steps_per_second": 1.102 } ], "logging_steps": 25, "max_steps": 2140, "num_input_tokens_seen": 0, "num_train_epochs": 10, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 5.510586115727032e+19, "train_batch_size": 32, "trial_name": null, "trial_params": null }