{ "best_metric": 1.0493139028549194, "best_model_checkpoint": "out/checkpoint-7", "epoch": 0.009992862241256246, "eval_steps": 1, "global_step": 7, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.0, "grad_norm": 8.742387771606445, "learning_rate": 0.0002, "loss": 2.8568, "step": 1 }, { "epoch": 0.0, "eval_loss": 2.0093040466308594, "eval_runtime": 1917.976, "eval_samples_per_second": 0.365, "eval_steps_per_second": 0.365, "step": 1 }, { "epoch": 0.0, "grad_norm": 4.034029006958008, "learning_rate": 0.0002, "loss": 2.0005, "step": 2 }, { "epoch": 0.0, "eval_loss": 1.51231050491333, "eval_runtime": 1918.2314, "eval_samples_per_second": 0.365, "eval_steps_per_second": 0.365, "step": 2 }, { "epoch": 0.0, "grad_norm": 3.6795763969421387, "learning_rate": 0.0002, "loss": 1.5174, "step": 3 }, { "epoch": 0.0, "eval_loss": 1.2310951948165894, "eval_runtime": 1918.2692, "eval_samples_per_second": 0.365, "eval_steps_per_second": 0.365, "step": 3 }, { "epoch": 0.01, "grad_norm": 1.265252709388733, "learning_rate": 0.0002, "loss": 1.1661, "step": 4 }, { "epoch": 0.01, "eval_loss": 1.1966514587402344, "eval_runtime": 1918.1163, "eval_samples_per_second": 0.365, "eval_steps_per_second": 0.365, "step": 4 }, { "epoch": 0.01, "grad_norm": 3.3897781372070312, "learning_rate": 0.0002, "loss": 1.1807, "step": 5 }, { "epoch": 0.01, "eval_loss": 1.1253794431686401, "eval_runtime": 1918.161, "eval_samples_per_second": 0.365, "eval_steps_per_second": 0.365, "step": 5 }, { "epoch": 0.01, "grad_norm": 0.9615206718444824, "learning_rate": 0.0002, "loss": 1.1417, "step": 6 }, { "epoch": 0.01, "eval_loss": 1.085837721824646, "eval_runtime": 1918.0897, "eval_samples_per_second": 0.365, "eval_steps_per_second": 0.365, "step": 6 }, { "epoch": 0.01, "grad_norm": 0.8961493372917175, "learning_rate": 0.0002, "loss": 1.1129, "step": 7 }, { "epoch": 0.01, "eval_loss": 1.0493139028549194, "eval_runtime": 1918.1075, "eval_samples_per_second": 0.365, "eval_steps_per_second": 0.365, "step": 7 } ], "logging_steps": 1, "max_steps": 10, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 1, "total_flos": 1226868850163712.0, "train_batch_size": 1, "trial_name": null, "trial_params": null }