{ "best_metric": null, "best_model_checkpoint": null, "epoch": 2.867383512544803, "eval_steps": 2000, "global_step": 4000, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.07, "grad_norm": 2.5333876609802246, "learning_rate": 9.9e-07, "loss": 0.9981, "step": 100 }, { "epoch": 0.14, "grad_norm": 2.1556735038757324, "learning_rate": 9.9e-07, "loss": 0.7461, "step": 200 }, { "epoch": 0.22, "grad_norm": 2.5188281536102295, "learning_rate": 9.7989898989899e-07, "loss": 0.6707, "step": 300 }, { "epoch": 0.29, "grad_norm": 2.559631824493408, "learning_rate": 9.697979797979798e-07, "loss": 0.6281, "step": 400 }, { "epoch": 0.36, "grad_norm": 3.3940675258636475, "learning_rate": 9.596969696969696e-07, "loss": 0.6437, "step": 500 }, { "epoch": 0.43, "grad_norm": 2.0749378204345703, "learning_rate": 9.495959595959595e-07, "loss": 0.6319, "step": 600 }, { "epoch": 0.5, "grad_norm": 2.3027114868164062, "learning_rate": 9.394949494949495e-07, "loss": 0.6477, "step": 700 }, { "epoch": 0.57, "grad_norm": 1.6429717540740967, "learning_rate": 9.293939393939394e-07, "loss": 0.6564, "step": 800 }, { "epoch": 0.65, "grad_norm": 2.391716718673706, "learning_rate": 9.192929292929292e-07, "loss": 0.6263, "step": 900 }, { "epoch": 0.72, "grad_norm": 2.9794795513153076, "learning_rate": 9.091919191919192e-07, "loss": 0.6422, "step": 1000 }, { "epoch": 0.79, "grad_norm": 2.853337049484253, "learning_rate": 8.99090909090909e-07, "loss": 0.6043, "step": 1100 }, { "epoch": 0.86, "grad_norm": 2.453305244445801, "learning_rate": 8.88989898989899e-07, "loss": 0.5914, "step": 1200 }, { "epoch": 0.93, "grad_norm": 2.768860340118408, "learning_rate": 8.788888888888889e-07, "loss": 0.6194, "step": 1300 }, { "epoch": 1.0, "grad_norm": 4.065279483795166, "learning_rate": 8.687878787878787e-07, "loss": 0.6038, "step": 1400 }, { "epoch": 1.08, "grad_norm": 3.8275232315063477, "learning_rate": 8.586868686868687e-07, "loss": 0.6038, "step": 1500 }, { "epoch": 1.15, "grad_norm": 2.6090586185455322, "learning_rate": 8.485858585858586e-07, "loss": 0.592, "step": 1600 }, { "epoch": 1.22, "grad_norm": 1.9870526790618896, "learning_rate": 8.384848484848484e-07, "loss": 0.5885, "step": 1700 }, { "epoch": 1.29, "grad_norm": 2.2942471504211426, "learning_rate": 8.283838383838383e-07, "loss": 0.622, "step": 1800 }, { "epoch": 1.36, "grad_norm": 2.0904691219329834, "learning_rate": 8.182828282828283e-07, "loss": 0.5857, "step": 1900 }, { "epoch": 1.43, "grad_norm": 2.909604072570801, "learning_rate": 8.081818181818182e-07, "loss": 0.5841, "step": 2000 }, { "epoch": 1.43, "eval_loss": 0.7236512303352356, "eval_runtime": 192.8239, "eval_samples_per_second": 5.186, "eval_steps_per_second": 0.648, "step": 2000 }, { "epoch": 1.51, "grad_norm": 2.645550012588501, "learning_rate": 7.98080808080808e-07, "loss": 0.6013, "step": 2100 }, { "epoch": 1.58, "grad_norm": 2.5540435314178467, "learning_rate": 7.88080808080808e-07, "loss": 0.6049, "step": 2200 }, { "epoch": 1.65, "grad_norm": 2.3379604816436768, "learning_rate": 7.779797979797979e-07, "loss": 0.5794, "step": 2300 }, { "epoch": 1.72, "grad_norm": 2.4884698390960693, "learning_rate": 7.678787878787878e-07, "loss": 0.5832, "step": 2400 }, { "epoch": 1.79, "grad_norm": 2.4760003089904785, "learning_rate": 7.577777777777778e-07, "loss": 0.5852, "step": 2500 }, { "epoch": 1.86, "grad_norm": 2.334510087966919, "learning_rate": 7.476767676767676e-07, "loss": 0.5825, "step": 2600 }, { "epoch": 1.94, "grad_norm": 3.3062024116516113, "learning_rate": 7.375757575757575e-07, "loss": 0.5739, "step": 2700 }, { "epoch": 2.01, "grad_norm": 3.0897557735443115, "learning_rate": 7.274747474747475e-07, "loss": 0.5823, "step": 2800 }, { "epoch": 2.08, "grad_norm": 3.0633504390716553, "learning_rate": 7.173737373737373e-07, "loss": 0.5625, "step": 2900 }, { "epoch": 2.15, "grad_norm": 3.3426527976989746, "learning_rate": 7.072727272727272e-07, "loss": 0.5484, "step": 3000 }, { "epoch": 2.22, "grad_norm": 2.7486729621887207, "learning_rate": 6.971717171717172e-07, "loss": 0.5853, "step": 3100 }, { "epoch": 2.29, "grad_norm": 2.5889084339141846, "learning_rate": 6.87070707070707e-07, "loss": 0.568, "step": 3200 }, { "epoch": 2.37, "grad_norm": 2.474393606185913, "learning_rate": 6.76969696969697e-07, "loss": 0.5699, "step": 3300 }, { "epoch": 2.44, "grad_norm": 2.99786114692688, "learning_rate": 6.668686868686869e-07, "loss": 0.5723, "step": 3400 }, { "epoch": 2.51, "grad_norm": 2.888652801513672, "learning_rate": 6.567676767676767e-07, "loss": 0.5942, "step": 3500 }, { "epoch": 2.58, "grad_norm": 3.20930552482605, "learning_rate": 6.466666666666666e-07, "loss": 0.5827, "step": 3600 }, { "epoch": 2.65, "grad_norm": 2.312199592590332, "learning_rate": 6.365656565656565e-07, "loss": 0.5644, "step": 3700 }, { "epoch": 2.72, "grad_norm": 3.1894469261169434, "learning_rate": 6.264646464646465e-07, "loss": 0.5409, "step": 3800 }, { "epoch": 2.8, "grad_norm": 2.740865468978882, "learning_rate": 6.163636363636363e-07, "loss": 0.5518, "step": 3900 }, { "epoch": 2.87, "grad_norm": 4.003296375274658, "learning_rate": 6.062626262626262e-07, "loss": 0.5796, "step": 4000 }, { "epoch": 2.87, "eval_loss": 0.7056564688682556, "eval_runtime": 192.8797, "eval_samples_per_second": 5.185, "eval_steps_per_second": 0.648, "step": 4000 } ], "logging_steps": 100, "max_steps": 10000, "num_input_tokens_seen": 0, "num_train_epochs": 8, "save_steps": 2000, "total_flos": 7.539739430769132e+17, "train_batch_size": 8, "trial_name": null, "trial_params": null }