{ "best_metric": null, "best_model_checkpoint": null, "epoch": 2.4922118380062304, "eval_steps": 500, "global_step": 800, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.1557632398753894, "grad_norm": 5.09375, "learning_rate": 3.3333333333333335e-05, "loss": 2.6073, "step": 50 }, { "epoch": 0.3115264797507788, "grad_norm": 3.53125, "learning_rate": 6.666666666666667e-05, "loss": 0.8752, "step": 100 }, { "epoch": 0.4672897196261682, "grad_norm": 1.546875, "learning_rate": 0.0001, "loss": 0.5161, "step": 150 }, { "epoch": 0.6230529595015576, "grad_norm": 2.3125, "learning_rate": 0.00013333333333333334, "loss": 0.3785, "step": 200 }, { "epoch": 0.778816199376947, "grad_norm": 2.125, "learning_rate": 0.0001666666666666667, "loss": 0.293, "step": 250 }, { "epoch": 0.9345794392523364, "grad_norm": 2.0, "learning_rate": 0.0002, "loss": 0.252, "step": 300 }, { "epoch": 1.0903426791277258, "grad_norm": 1.9921875, "learning_rate": 0.00018491704374057317, "loss": 0.2341, "step": 350 }, { "epoch": 1.2461059190031152, "grad_norm": 2.015625, "learning_rate": 0.0001698340874811463, "loss": 0.183, "step": 400 }, { "epoch": 1.4018691588785046, "grad_norm": 1.8046875, "learning_rate": 0.00015475113122171948, "loss": 0.1659, "step": 450 }, { "epoch": 1.557632398753894, "grad_norm": 1.5390625, "learning_rate": 0.0001396681749622926, "loss": 0.1368, "step": 500 }, { "epoch": 1.7133956386292835, "grad_norm": 0.408203125, "learning_rate": 0.00012458521870286577, "loss": 0.104, "step": 550 }, { "epoch": 1.8691588785046729, "grad_norm": 2.671875, "learning_rate": 0.00010950226244343893, "loss": 0.088, "step": 600 }, { "epoch": 2.0249221183800623, "grad_norm": 0.91015625, "learning_rate": 9.441930618401207e-05, "loss": 0.0903, "step": 650 }, { "epoch": 2.1806853582554515, "grad_norm": 1.8046875, "learning_rate": 7.933634992458521e-05, "loss": 0.0713, "step": 700 }, { "epoch": 2.336448598130841, "grad_norm": 0.2392578125, "learning_rate": 6.425339366515838e-05, "loss": 0.061, "step": 750 }, { "epoch": 2.4922118380062304, "grad_norm": 1.0, "learning_rate": 4.917043740573152e-05, "loss": 0.0581, "step": 800 } ], "logging_steps": 50, "max_steps": 963, "num_input_tokens_seen": 0, "num_train_epochs": 3, "save_steps": 200, "total_flos": 0.0, "train_batch_size": 64, "trial_name": null, "trial_params": null }