{ "best_metric": 0.3966299593448639, "best_model_checkpoint": "mikhail-panzo/ceb_b128_le4_s8000/checkpoint-2000", "epoch": 396.03960396039605, "eval_steps": 500, "global_step": 5000, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 3.9603960396039604, "grad_norm": 2.95651912689209, "learning_rate": 2.5e-06, "loss": 0.7707, "step": 50 }, { "epoch": 7.920792079207921, "grad_norm": 1.1273679733276367, "learning_rate": 5e-06, "loss": 0.6934, "step": 100 }, { "epoch": 11.881188118811881, "grad_norm": 2.1830034255981445, "learning_rate": 7.5e-06, "loss": 0.5983, "step": 150 }, { "epoch": 15.841584158415841, "grad_norm": 1.0733364820480347, "learning_rate": 1e-05, "loss": 0.5094, "step": 200 }, { "epoch": 19.801980198019802, "grad_norm": 1.2824387550354004, "learning_rate": 1.25e-05, "loss": 0.4913, "step": 250 }, { "epoch": 23.762376237623762, "grad_norm": 0.9164418578147888, "learning_rate": 1.5e-05, "loss": 0.4764, "step": 300 }, { "epoch": 27.722772277227723, "grad_norm": 0.8938279747962952, "learning_rate": 1.75e-05, "loss": 0.4648, "step": 350 }, { "epoch": 31.683168316831683, "grad_norm": 0.7855221033096313, "learning_rate": 2e-05, "loss": 0.459, "step": 400 }, { "epoch": 35.64356435643565, "grad_norm": 0.9247761964797974, "learning_rate": 2.25e-05, "loss": 0.4519, "step": 450 }, { "epoch": 39.603960396039604, "grad_norm": 1.1086606979370117, "learning_rate": 2.5e-05, "loss": 0.4432, "step": 500 }, { "epoch": 39.603960396039604, "eval_loss": 0.4058004915714264, "eval_runtime": 7.6054, "eval_samples_per_second": 23.667, "eval_steps_per_second": 3.024, "step": 500 }, { "epoch": 43.56435643564357, "grad_norm": 1.3994853496551514, "learning_rate": 2.7500000000000004e-05, "loss": 0.4438, "step": 550 }, { "epoch": 47.524752475247524, "grad_norm": 1.1104933023452759, "learning_rate": 3e-05, "loss": 0.4367, "step": 600 }, { "epoch": 51.48514851485149, "grad_norm": 0.7456745505332947, "learning_rate": 3.2500000000000004e-05, "loss": 0.4315, "step": 650 }, { "epoch": 55.445544554455445, "grad_norm": 1.0349308252334595, "learning_rate": 3.5e-05, "loss": 0.4294, "step": 700 }, { "epoch": 59.40594059405941, "grad_norm": 0.5290626287460327, "learning_rate": 3.7500000000000003e-05, "loss": 0.4234, "step": 750 }, { "epoch": 63.366336633663366, "grad_norm": 1.054995059967041, "learning_rate": 4e-05, "loss": 0.4231, "step": 800 }, { "epoch": 67.32673267326733, "grad_norm": 1.1774096488952637, "learning_rate": 4.25e-05, "loss": 0.4198, "step": 850 }, { "epoch": 71.2871287128713, "grad_norm": 0.7513113617897034, "learning_rate": 4.5e-05, "loss": 0.4182, "step": 900 }, { "epoch": 75.24752475247524, "grad_norm": 0.8465940952301025, "learning_rate": 4.75e-05, "loss": 0.4179, "step": 950 }, { "epoch": 79.20792079207921, "grad_norm": 1.601589322090149, "learning_rate": 5e-05, "loss": 0.4129, "step": 1000 }, { "epoch": 79.20792079207921, "eval_loss": 0.3995668292045593, "eval_runtime": 8.0229, "eval_samples_per_second": 22.436, "eval_steps_per_second": 2.867, "step": 1000 }, { "epoch": 83.16831683168317, "grad_norm": 2.049264669418335, "learning_rate": 5.25e-05, "loss": 0.4129, "step": 1050 }, { "epoch": 87.12871287128714, "grad_norm": 1.0590511560440063, "learning_rate": 5.500000000000001e-05, "loss": 0.4118, "step": 1100 }, { "epoch": 91.08910891089108, "grad_norm": 1.015751600265503, "learning_rate": 5.7499999999999995e-05, "loss": 0.4074, "step": 1150 }, { "epoch": 95.04950495049505, "grad_norm": 1.295837640762329, "learning_rate": 6e-05, "loss": 0.4056, "step": 1200 }, { "epoch": 99.00990099009901, "grad_norm": 1.49947988986969, "learning_rate": 6.25e-05, "loss": 0.4052, "step": 1250 }, { "epoch": 102.97029702970298, "grad_norm": 1.3930383920669556, "learning_rate": 6.500000000000001e-05, "loss": 0.4072, "step": 1300 }, { "epoch": 106.93069306930693, "grad_norm": 1.1130273342132568, "learning_rate": 6.750000000000001e-05, "loss": 0.4015, "step": 1350 }, { "epoch": 110.89108910891089, "grad_norm": 2.1075382232666016, "learning_rate": 7e-05, "loss": 0.3994, "step": 1400 }, { "epoch": 114.85148514851485, "grad_norm": 1.8470244407653809, "learning_rate": 7.25e-05, "loss": 0.397, "step": 1450 }, { "epoch": 118.81188118811882, "grad_norm": 2.0715270042419434, "learning_rate": 7.500000000000001e-05, "loss": 0.3992, "step": 1500 }, { "epoch": 118.81188118811882, "eval_loss": 0.39855822920799255, "eval_runtime": 7.8631, "eval_samples_per_second": 22.892, "eval_steps_per_second": 2.925, "step": 1500 }, { "epoch": 122.77227722772277, "grad_norm": 0.9546501636505127, "learning_rate": 7.75e-05, "loss": 0.3934, "step": 1550 }, { "epoch": 126.73267326732673, "grad_norm": 1.1335992813110352, "learning_rate": 8e-05, "loss": 0.3927, "step": 1600 }, { "epoch": 130.69306930693068, "grad_norm": 1.905771017074585, "learning_rate": 8.25e-05, "loss": 0.3895, "step": 1650 }, { "epoch": 134.65346534653466, "grad_norm": 1.129404067993164, "learning_rate": 8.5e-05, "loss": 0.3897, "step": 1700 }, { "epoch": 138.6138613861386, "grad_norm": 0.9640969038009644, "learning_rate": 8.75e-05, "loss": 0.3894, "step": 1750 }, { "epoch": 142.5742574257426, "grad_norm": 0.9840293526649475, "learning_rate": 9e-05, "loss": 0.3862, "step": 1800 }, { "epoch": 146.53465346534654, "grad_norm": 2.770336627960205, "learning_rate": 9.250000000000001e-05, "loss": 0.3861, "step": 1850 }, { "epoch": 150.4950495049505, "grad_norm": 1.7189514636993408, "learning_rate": 9.5e-05, "loss": 0.3914, "step": 1900 }, { "epoch": 154.45544554455446, "grad_norm": 2.454707622528076, "learning_rate": 9.75e-05, "loss": 0.3848, "step": 1950 }, { "epoch": 158.41584158415841, "grad_norm": 2.1555542945861816, "learning_rate": 0.0001, "loss": 0.3814, "step": 2000 }, { "epoch": 158.41584158415841, "eval_loss": 0.3966299593448639, "eval_runtime": 7.7159, "eval_samples_per_second": 23.329, "eval_steps_per_second": 2.981, "step": 2000 }, { "epoch": 162.37623762376236, "grad_norm": 1.1228975057601929, "learning_rate": 9.916666666666667e-05, "loss": 0.3817, "step": 2050 }, { "epoch": 166.33663366336634, "grad_norm": 1.629294991493225, "learning_rate": 9.833333333333333e-05, "loss": 0.3813, "step": 2100 }, { "epoch": 170.2970297029703, "grad_norm": 0.9008976221084595, "learning_rate": 9.75e-05, "loss": 0.377, "step": 2150 }, { "epoch": 174.25742574257427, "grad_norm": 2.174643039703369, "learning_rate": 9.666666666666667e-05, "loss": 0.3737, "step": 2200 }, { "epoch": 178.21782178217822, "grad_norm": 1.617219090461731, "learning_rate": 9.583333333333334e-05, "loss": 0.3736, "step": 2250 }, { "epoch": 182.17821782178217, "grad_norm": 1.292871117591858, "learning_rate": 9.5e-05, "loss": 0.3786, "step": 2300 }, { "epoch": 186.13861386138615, "grad_norm": 1.5530415773391724, "learning_rate": 9.416666666666667e-05, "loss": 0.3726, "step": 2350 }, { "epoch": 190.0990099009901, "grad_norm": 0.9837754964828491, "learning_rate": 9.333333333333334e-05, "loss": 0.3713, "step": 2400 }, { "epoch": 194.05940594059405, "grad_norm": 0.8343392610549927, "learning_rate": 9.250000000000001e-05, "loss": 0.3727, "step": 2450 }, { "epoch": 198.01980198019803, "grad_norm": 2.9330906867980957, "learning_rate": 9.166666666666667e-05, "loss": 0.3688, "step": 2500 }, { "epoch": 198.01980198019803, "eval_loss": 0.40214845538139343, "eval_runtime": 7.9273, "eval_samples_per_second": 22.706, "eval_steps_per_second": 2.901, "step": 2500 }, { "epoch": 201.98019801980197, "grad_norm": 2.427091598510742, "learning_rate": 9.083333333333334e-05, "loss": 0.3699, "step": 2550 }, { "epoch": 205.94059405940595, "grad_norm": 0.680686891078949, "learning_rate": 9e-05, "loss": 0.3659, "step": 2600 }, { "epoch": 209.9009900990099, "grad_norm": 2.0860631465911865, "learning_rate": 8.916666666666667e-05, "loss": 0.3631, "step": 2650 }, { "epoch": 213.86138613861385, "grad_norm": 0.7303352355957031, "learning_rate": 8.833333333333333e-05, "loss": 0.3656, "step": 2700 }, { "epoch": 217.82178217821783, "grad_norm": 1.2716140747070312, "learning_rate": 8.75e-05, "loss": 0.3627, "step": 2750 }, { "epoch": 221.78217821782178, "grad_norm": 1.5935007333755493, "learning_rate": 8.666666666666667e-05, "loss": 0.3607, "step": 2800 }, { "epoch": 225.74257425742573, "grad_norm": 1.156733512878418, "learning_rate": 8.583333333333334e-05, "loss": 0.3594, "step": 2850 }, { "epoch": 229.7029702970297, "grad_norm": 1.1708168983459473, "learning_rate": 8.5e-05, "loss": 0.3596, "step": 2900 }, { "epoch": 233.66336633663366, "grad_norm": 1.4882978200912476, "learning_rate": 8.416666666666668e-05, "loss": 0.3613, "step": 2950 }, { "epoch": 237.62376237623764, "grad_norm": 1.0416059494018555, "learning_rate": 8.333333333333334e-05, "loss": 0.3574, "step": 3000 }, { "epoch": 237.62376237623764, "eval_loss": 0.39999938011169434, "eval_runtime": 7.4331, "eval_samples_per_second": 24.216, "eval_steps_per_second": 3.094, "step": 3000 }, { "epoch": 241.58415841584159, "grad_norm": 0.9737104773521423, "learning_rate": 8.25e-05, "loss": 0.3555, "step": 3050 }, { "epoch": 245.54455445544554, "grad_norm": 2.4858767986297607, "learning_rate": 8.166666666666667e-05, "loss": 0.3585, "step": 3100 }, { "epoch": 249.5049504950495, "grad_norm": 0.7558372616767883, "learning_rate": 8.083333333333334e-05, "loss": 0.3562, "step": 3150 }, { "epoch": 253.46534653465346, "grad_norm": 0.5738223195075989, "learning_rate": 8e-05, "loss": 0.3515, "step": 3200 }, { "epoch": 257.4257425742574, "grad_norm": 0.8245187401771545, "learning_rate": 7.916666666666666e-05, "loss": 0.3557, "step": 3250 }, { "epoch": 261.38613861386136, "grad_norm": 0.8837783336639404, "learning_rate": 7.833333333333333e-05, "loss": 0.3556, "step": 3300 }, { "epoch": 265.34653465346537, "grad_norm": 1.0626522302627563, "learning_rate": 7.75e-05, "loss": 0.3531, "step": 3350 }, { "epoch": 269.3069306930693, "grad_norm": 0.9158534407615662, "learning_rate": 7.666666666666667e-05, "loss": 0.3504, "step": 3400 }, { "epoch": 273.26732673267327, "grad_norm": 1.1604119539260864, "learning_rate": 7.583333333333334e-05, "loss": 0.3502, "step": 3450 }, { "epoch": 277.2277227722772, "grad_norm": 0.5547149181365967, "learning_rate": 7.500000000000001e-05, "loss": 0.3482, "step": 3500 }, { "epoch": 277.2277227722772, "eval_loss": 0.39900416135787964, "eval_runtime": 7.7665, "eval_samples_per_second": 23.177, "eval_steps_per_second": 2.961, "step": 3500 }, { "epoch": 281.18811881188117, "grad_norm": 1.213918924331665, "learning_rate": 7.416666666666668e-05, "loss": 0.3465, "step": 3550 }, { "epoch": 285.1485148514852, "grad_norm": 1.0824240446090698, "learning_rate": 7.333333333333333e-05, "loss": 0.3467, "step": 3600 }, { "epoch": 289.1089108910891, "grad_norm": 0.8079837560653687, "learning_rate": 7.25e-05, "loss": 0.3457, "step": 3650 }, { "epoch": 293.0693069306931, "grad_norm": 1.1617168188095093, "learning_rate": 7.166666666666667e-05, "loss": 0.3457, "step": 3700 }, { "epoch": 297.029702970297, "grad_norm": 1.0282728672027588, "learning_rate": 7.083333333333334e-05, "loss": 0.3491, "step": 3750 }, { "epoch": 300.990099009901, "grad_norm": 0.8772862553596497, "learning_rate": 7e-05, "loss": 0.3494, "step": 3800 }, { "epoch": 304.9504950495049, "grad_norm": 1.0405845642089844, "learning_rate": 6.916666666666666e-05, "loss": 0.3469, "step": 3850 }, { "epoch": 308.91089108910893, "grad_norm": 0.7217592000961304, "learning_rate": 6.833333333333333e-05, "loss": 0.3449, "step": 3900 }, { "epoch": 312.8712871287129, "grad_norm": 0.7694669365882874, "learning_rate": 6.750000000000001e-05, "loss": 0.341, "step": 3950 }, { "epoch": 316.83168316831683, "grad_norm": 0.6203835010528564, "learning_rate": 6.666666666666667e-05, "loss": 0.3421, "step": 4000 }, { "epoch": 316.83168316831683, "eval_loss": 0.4013901650905609, "eval_runtime": 8.6905, "eval_samples_per_second": 20.712, "eval_steps_per_second": 2.647, "step": 4000 }, { "epoch": 320.7920792079208, "grad_norm": 0.7539447546005249, "learning_rate": 6.583333333333334e-05, "loss": 0.3445, "step": 4050 }, { "epoch": 324.7524752475247, "grad_norm": 0.9108504056930542, "learning_rate": 6.500000000000001e-05, "loss": 0.341, "step": 4100 }, { "epoch": 328.71287128712873, "grad_norm": 0.5291554927825928, "learning_rate": 6.416666666666668e-05, "loss": 0.3412, "step": 4150 }, { "epoch": 332.6732673267327, "grad_norm": 1.2332825660705566, "learning_rate": 6.333333333333333e-05, "loss": 0.3408, "step": 4200 }, { "epoch": 336.63366336633663, "grad_norm": 0.5890567302703857, "learning_rate": 6.25e-05, "loss": 0.339, "step": 4250 }, { "epoch": 340.5940594059406, "grad_norm": 1.05361807346344, "learning_rate": 6.168333333333333e-05, "loss": 0.3388, "step": 4300 }, { "epoch": 344.55445544554453, "grad_norm": 1.6529746055603027, "learning_rate": 6.085000000000001e-05, "loss": 0.3423, "step": 4350 }, { "epoch": 348.51485148514854, "grad_norm": 0.6428557634353638, "learning_rate": 6.0016666666666664e-05, "loss": 0.3406, "step": 4400 }, { "epoch": 352.4752475247525, "grad_norm": 0.832099437713623, "learning_rate": 5.918333333333333e-05, "loss": 0.3399, "step": 4450 }, { "epoch": 356.43564356435644, "grad_norm": 1.4958369731903076, "learning_rate": 5.835e-05, "loss": 0.3414, "step": 4500 }, { "epoch": 356.43564356435644, "eval_loss": 0.4059315621852875, "eval_runtime": 7.3212, "eval_samples_per_second": 24.586, "eval_steps_per_second": 3.142, "step": 4500 }, { "epoch": 360.3960396039604, "grad_norm": 0.7380837798118591, "learning_rate": 5.751666666666667e-05, "loss": 0.3363, "step": 4550 }, { "epoch": 364.35643564356434, "grad_norm": 1.1721248626708984, "learning_rate": 5.668333333333333e-05, "loss": 0.3385, "step": 4600 }, { "epoch": 368.3168316831683, "grad_norm": 0.6880260109901428, "learning_rate": 5.585e-05, "loss": 0.3355, "step": 4650 }, { "epoch": 372.2772277227723, "grad_norm": 0.614639401435852, "learning_rate": 5.501666666666667e-05, "loss": 0.3378, "step": 4700 }, { "epoch": 376.23762376237624, "grad_norm": 0.6579806208610535, "learning_rate": 5.4183333333333334e-05, "loss": 0.3342, "step": 4750 }, { "epoch": 380.1980198019802, "grad_norm": 0.7675396203994751, "learning_rate": 5.335e-05, "loss": 0.3333, "step": 4800 }, { "epoch": 384.15841584158414, "grad_norm": 0.6529810428619385, "learning_rate": 5.251666666666667e-05, "loss": 0.3348, "step": 4850 }, { "epoch": 388.1188118811881, "grad_norm": 0.6606137752532959, "learning_rate": 5.168333333333334e-05, "loss": 0.3351, "step": 4900 }, { "epoch": 392.0792079207921, "grad_norm": 0.5794258713722229, "learning_rate": 5.0849999999999996e-05, "loss": 0.3319, "step": 4950 }, { "epoch": 396.03960396039605, "grad_norm": 0.8365175127983093, "learning_rate": 5.0016666666666665e-05, "loss": 0.3328, "step": 5000 }, { "epoch": 396.03960396039605, "eval_loss": 0.40650251507759094, "eval_runtime": 7.4758, "eval_samples_per_second": 24.078, "eval_steps_per_second": 3.077, "step": 5000 } ], "logging_steps": 50, "max_steps": 8000, "num_input_tokens_seen": 0, "num_train_epochs": 667, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 1.080405925015103e+17, "train_batch_size": 16, "trial_name": null, "trial_params": null }