{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.4375, "eval_steps": 2500, "global_step": 17500, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.03, "grad_norm": 2.9973244667053223, "learning_rate": 1.971072463768116e-05, "loss": 1.6969, "step": 1000 }, { "epoch": 0.05, "grad_norm": 7.261388778686523, "learning_rate": 1.9131014492753626e-05, "loss": 1.5602, "step": 2000 }, { "epoch": 0.06, "eval_loss": 1.513683795928955, "eval_runtime": 202.2713, "eval_samples_per_second": 4.944, "eval_steps_per_second": 4.944, "step": 2500 }, { "epoch": 0.07, "grad_norm": 3.314629316329956, "learning_rate": 1.8551304347826088e-05, "loss": 1.5222, "step": 3000 }, { "epoch": 0.1, "grad_norm": 3.6707098484039307, "learning_rate": 1.7971594202898553e-05, "loss": 1.4933, "step": 4000 }, { "epoch": 0.12, "grad_norm": 1.160583257675171, "learning_rate": 1.7392463768115943e-05, "loss": 1.5056, "step": 5000 }, { "epoch": 0.12, "eval_loss": 1.4657692909240723, "eval_runtime": 201.9515, "eval_samples_per_second": 4.952, "eval_steps_per_second": 4.952, "step": 5000 }, { "epoch": 0.15, "grad_norm": 2.666102170944214, "learning_rate": 1.6812753623188408e-05, "loss": 1.4754, "step": 6000 }, { "epoch": 0.17, "grad_norm": 2.95894455909729, "learning_rate": 1.623304347826087e-05, "loss": 1.4816, "step": 7000 }, { "epoch": 0.19, "eval_loss": 1.4514989852905273, "eval_runtime": 201.9852, "eval_samples_per_second": 4.951, "eval_steps_per_second": 4.951, "step": 7500 }, { "epoch": 0.2, "grad_norm": 6.616883277893066, "learning_rate": 1.5653333333333335e-05, "loss": 1.4414, "step": 8000 }, { "epoch": 0.23, "grad_norm": 4.114346027374268, "learning_rate": 1.5074202898550725e-05, "loss": 1.4392, "step": 9000 }, { "epoch": 0.25, "grad_norm": 9.38364028930664, "learning_rate": 1.4495072463768116e-05, "loss": 1.4307, "step": 10000 }, { "epoch": 0.25, "eval_loss": 1.4246548414230347, "eval_runtime": 201.9169, "eval_samples_per_second": 4.953, "eval_steps_per_second": 4.953, "step": 10000 }, { "epoch": 0.28, "grad_norm": 3.899681806564331, "learning_rate": 1.3915362318840582e-05, "loss": 1.4313, "step": 11000 }, { "epoch": 0.3, "grad_norm": 2.839545965194702, "learning_rate": 1.3336231884057973e-05, "loss": 1.415, "step": 12000 }, { "epoch": 0.31, "eval_loss": 1.4323452711105347, "eval_runtime": 202.4132, "eval_samples_per_second": 4.94, "eval_steps_per_second": 4.94, "step": 12500 }, { "epoch": 0.33, "grad_norm": 3.805086612701416, "learning_rate": 1.2756521739130436e-05, "loss": 1.4003, "step": 13000 }, { "epoch": 0.35, "grad_norm": 6.0652241706848145, "learning_rate": 1.21768115942029e-05, "loss": 1.3952, "step": 14000 }, { "epoch": 0.38, "grad_norm": 8.923527717590332, "learning_rate": 1.1597681159420292e-05, "loss": 1.3933, "step": 15000 }, { "epoch": 0.38, "eval_loss": 1.3821483850479126, "eval_runtime": 201.8609, "eval_samples_per_second": 4.954, "eval_steps_per_second": 4.954, "step": 15000 }, { "epoch": 0.4, "grad_norm": 5.487946033477783, "learning_rate": 1.1017971014492755e-05, "loss": 1.3825, "step": 16000 }, { "epoch": 0.42, "grad_norm": 2.9339723587036133, "learning_rate": 1.0438260869565218e-05, "loss": 1.373, "step": 17000 }, { "epoch": 0.44, "eval_loss": 1.371016263961792, "eval_runtime": 201.6518, "eval_samples_per_second": 4.959, "eval_steps_per_second": 4.959, "step": 17500 } ], "logging_steps": 1000, "max_steps": 35000, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 2500, "total_flos": 2.8178720489472e+17, "train_batch_size": 1, "trial_name": null, "trial_params": null }