{ "best_metric": null, "best_model_checkpoint": null, "epoch": 4.88786658999425, "eval_steps": 500, "global_step": 8500, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.2875215641173088, "grad_norm": 423.2573547363281, "learning_rate": 4.712478435882691e-05, "loss": 17.6035, "step": 500 }, { "epoch": 0.5750431282346176, "grad_norm": 9.149832725524902, "learning_rate": 4.4249568717653824e-05, "loss": 2.5022, "step": 1000 }, { "epoch": 0.8625646923519263, "grad_norm": 3.246171236038208, "learning_rate": 4.137435307648074e-05, "loss": 1.3833, "step": 1500 }, { "epoch": 1.1500862564692351, "grad_norm": 5.592076778411865, "learning_rate": 3.8499137435307647e-05, "loss": 1.0705, "step": 2000 }, { "epoch": 1.437607820586544, "grad_norm": 12.64777660369873, "learning_rate": 3.562392179413456e-05, "loss": 0.9577, "step": 2500 }, { "epoch": 1.7251293847038527, "grad_norm": 0.7939251065254211, "learning_rate": 3.2748706152961475e-05, "loss": 0.919, "step": 3000 }, { "epoch": 2.0126509488211615, "grad_norm": 0.4556220769882202, "learning_rate": 2.9873490511788386e-05, "loss": 0.8873, "step": 3500 }, { "epoch": 2.3001725129384702, "grad_norm": 0.45749151706695557, "learning_rate": 2.6998274870615297e-05, "loss": 0.8553, "step": 4000 }, { "epoch": 2.587694077055779, "grad_norm": 0.6427181363105774, "learning_rate": 2.4123059229442212e-05, "loss": 0.8427, "step": 4500 }, { "epoch": 2.875215641173088, "grad_norm": 0.35242655873298645, "learning_rate": 2.124784358826912e-05, "loss": 0.8227, "step": 5000 }, { "epoch": 3.1627372052903966, "grad_norm": 0.5690515041351318, "learning_rate": 1.8372627947096034e-05, "loss": 0.808, "step": 5500 }, { "epoch": 3.4502587694077054, "grad_norm": 0.46968725323677063, "learning_rate": 1.5497412305922945e-05, "loss": 0.8198, "step": 6000 }, { "epoch": 3.7377803335250146, "grad_norm": 0.4196397662162781, "learning_rate": 1.2622196664749858e-05, "loss": 0.8029, "step": 6500 }, { "epoch": 4.025301897642323, "grad_norm": 0.5571260452270508, "learning_rate": 9.746981023576769e-06, "loss": 0.8049, "step": 7000 }, { "epoch": 4.312823461759632, "grad_norm": 0.5123286247253418, "learning_rate": 6.871765382403681e-06, "loss": 0.7934, "step": 7500 }, { "epoch": 4.6003450258769405, "grad_norm": 0.3241867423057556, "learning_rate": 3.9965497412305925e-06, "loss": 0.796, "step": 8000 }, { "epoch": 4.88786658999425, "grad_norm": 0.4111732542514801, "learning_rate": 1.1213341000575044e-06, "loss": 0.7851, "step": 8500 } ], "logging_steps": 500, "max_steps": 8695, "num_input_tokens_seen": 0, "num_train_epochs": 5, "save_steps": 500, "total_flos": 4492786123407360.0, "train_batch_size": 8, "trial_name": null, "trial_params": null }