{ "best_metric": null, "best_model_checkpoint": null, "epoch": 20.0, "eval_steps": 500, "global_step": 300, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 1.0, "grad_norm": 1.3937278985977173, "learning_rate": 0.00019, "loss": 1.0622, "step": 15 }, { "epoch": 2.0, "grad_norm": 1.5334441661834717, "learning_rate": 0.00018, "loss": 0.8896, "step": 30 }, { "epoch": 3.0, "grad_norm": 1.8579671382904053, "learning_rate": 0.00017, "loss": 0.7804, "step": 45 }, { "epoch": 4.0, "grad_norm": 1.8313461542129517, "learning_rate": 0.00016, "loss": 0.6891, "step": 60 }, { "epoch": 5.0, "grad_norm": 1.5978763103485107, "learning_rate": 0.00015000000000000001, "loss": 0.5876, "step": 75 }, { "epoch": 6.0, "grad_norm": 1.7626159191131592, "learning_rate": 0.00014, "loss": 0.5168, "step": 90 }, { "epoch": 7.0, "grad_norm": 1.6013137102127075, "learning_rate": 0.00013000000000000002, "loss": 0.444, "step": 105 }, { "epoch": 8.0, "grad_norm": 1.9760432243347168, "learning_rate": 0.00012, "loss": 0.3858, "step": 120 }, { "epoch": 9.0, "grad_norm": 1.9180957078933716, "learning_rate": 0.00011000000000000002, "loss": 0.3373, "step": 135 }, { "epoch": 10.0, "grad_norm": 1.409602403640747, "learning_rate": 0.0001, "loss": 0.2891, "step": 150 }, { "epoch": 11.0, "grad_norm": 1.8191059827804565, "learning_rate": 9e-05, "loss": 0.2519, "step": 165 }, { "epoch": 12.0, "grad_norm": 1.5950895547866821, "learning_rate": 8e-05, "loss": 0.2237, "step": 180 }, { "epoch": 13.0, "grad_norm": 1.4105021953582764, "learning_rate": 7e-05, "loss": 0.1976, "step": 195 }, { "epoch": 14.0, "grad_norm": 2.034846305847168, "learning_rate": 6e-05, "loss": 0.1792, "step": 210 }, { "epoch": 15.0, "grad_norm": 1.1015961170196533, "learning_rate": 5e-05, "loss": 0.1618, "step": 225 }, { "epoch": 16.0, "grad_norm": 1.245694637298584, "learning_rate": 4e-05, "loss": 0.1421, "step": 240 }, { "epoch": 17.0, "grad_norm": 1.5578792095184326, "learning_rate": 3e-05, "loss": 0.1361, "step": 255 }, { "epoch": 18.0, "grad_norm": 1.452083706855774, "learning_rate": 2e-05, "loss": 0.1245, "step": 270 }, { "epoch": 19.0, "grad_norm": 1.1172585487365723, "learning_rate": 1e-05, "loss": 0.1173, "step": 285 }, { "epoch": 20.0, "grad_norm": 1.3190027475357056, "learning_rate": 0.0, "loss": 0.1142, "step": 300 } ], "logging_steps": 500, "max_steps": 300, "num_input_tokens_seen": 0, "num_train_epochs": 20, "save_steps": 500, "total_flos": 4538269544755200.0, "train_batch_size": 2, "trial_name": null, "trial_params": null }