{ "best_metric": null, "best_model_checkpoint": null, "epoch": 7.861635220125786, "eval_steps": 500, "global_step": 2500, "is_hyper_param_search": true, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 1.0, "eval_accuracy": 0.6267741935483871, "eval_loss": 0.19981417059898376, "eval_runtime": 5.7482, "eval_samples_per_second": 539.296, "eval_steps_per_second": 11.308, "step": 318 }, { "epoch": 1.57, "learning_rate": 1.685534591194969e-05, "loss": 0.324, "step": 500 }, { "epoch": 2.0, "eval_accuracy": 0.8287096774193549, "eval_loss": 0.09645497798919678, "eval_runtime": 5.8849, "eval_samples_per_second": 526.769, "eval_steps_per_second": 11.045, "step": 636 }, { "epoch": 3.0, "eval_accuracy": 0.8841935483870967, "eval_loss": 0.061595458537340164, "eval_runtime": 5.6926, "eval_samples_per_second": 544.564, "eval_steps_per_second": 11.418, "step": 954 }, { "epoch": 3.14, "learning_rate": 1.371069182389937e-05, "loss": 0.1107, "step": 1000 }, { "epoch": 4.0, "eval_accuracy": 0.9035483870967742, "eval_loss": 0.046646881848573685, "eval_runtime": 5.7741, "eval_samples_per_second": 536.883, "eval_steps_per_second": 11.257, "step": 1272 }, { "epoch": 4.72, "learning_rate": 1.0566037735849058e-05, "loss": 0.0692, "step": 1500 }, { "epoch": 5.0, "eval_accuracy": 0.9096774193548387, "eval_loss": 0.03824981674551964, "eval_runtime": 5.8988, "eval_samples_per_second": 525.532, "eval_steps_per_second": 11.019, "step": 1590 }, { "epoch": 6.0, "eval_accuracy": 0.9193548387096774, "eval_loss": 0.033315952867269516, "eval_runtime": 5.6959, "eval_samples_per_second": 544.253, "eval_steps_per_second": 11.412, "step": 1908 }, { "epoch": 6.29, "learning_rate": 7.421383647798742e-06, "loss": 0.0539, "step": 2000 }, { "epoch": 7.0, "eval_accuracy": 0.9235483870967742, "eval_loss": 0.03077365830540657, "eval_runtime": 5.7622, "eval_samples_per_second": 537.993, "eval_steps_per_second": 11.28, "step": 2226 }, { "epoch": 7.86, "learning_rate": 4.276729559748428e-06, "loss": 0.0464, "step": 2500 } ], "logging_steps": 500, "max_steps": 3180, "num_input_tokens_seen": 0, "num_train_epochs": 10, "save_steps": 500, "total_flos": 708042017121348.0, "train_batch_size": 48, "trial_name": null, "trial_params": { "alpha": 0.8067006596645674, "num_train_epochs": 10, "temperature": 10 } }