{ "best_metric": 2.3333284854888916, "best_model_checkpoint": "./output/training_results/C019_random_sample_Meta-Llama-3-8B_pretrain_20240726_033210/checkpoint-40700", "epoch": 4.0, "eval_steps": 4070, "global_step": 40700, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 9.828009828009828e-05, "grad_norm": 0.0, "learning_rate": 0.0, "loss": 2.7384, "step": 1 }, { "epoch": 0.2, "grad_norm": 2.155671963962152, "learning_rate": 1.9927939731411727e-06, "loss": 2.4775, "step": 2035 }, { "epoch": 0.4, "grad_norm": 2.0610799346731716, "learning_rate": 2.238606233181306e-06, "loss": 2.404, "step": 4070 }, { "epoch": 0.4, "eval_loss": 2.388622283935547, "eval_runtime": 386.8721, "eval_samples_per_second": 187.02, "eval_steps_per_second": 1.463, "step": 4070 }, { "epoch": 0.6, "grad_norm": 1.852185161682542, "learning_rate": 1.2183995585808034e-06, "loss": 2.3695, "step": 6105 }, { "epoch": 0.8, "grad_norm": 1.963949683462878, "learning_rate": 6.503021712782451e-07, "loss": 2.3519, "step": 8140 }, { "epoch": 0.8, "eval_loss": 2.347768545150757, "eval_runtime": 353.4879, "eval_samples_per_second": 204.683, "eval_steps_per_second": 1.601, "step": 8140 }, { "epoch": 1.0, "grad_norm": 1.8257205248825334, "learning_rate": 3.454633956957804e-07, "loss": 2.341, "step": 10175 }, { "epoch": 1.2, "grad_norm": 1.934651438097829, "learning_rate": 1.8843776358550853e-07, "loss": 2.272, "step": 12210 }, { "epoch": 1.2, "eval_loss": 2.3399369716644287, "eval_runtime": 353.2432, "eval_samples_per_second": 204.825, "eval_steps_per_second": 1.602, "step": 12210 }, { "epoch": 1.4, "grad_norm": 1.9087851196083852, "learning_rate": 1.1134838176940004e-07, "loss": 2.2758, "step": 14245 }, { "epoch": 1.6, "grad_norm": 1.9434977796166109, "learning_rate": 7.549690625694449e-08, "loss": 2.2737, "step": 16280 }, { "epoch": 1.6, "eval_loss": 2.3373029232025146, "eval_runtime": 353.221, "eval_samples_per_second": 204.838, "eval_steps_per_second": 1.602, "step": 16280 }, { "epoch": 1.8, "grad_norm": 1.8796392127857566, "learning_rate": 5.980471884190691e-08, "loss": 2.2725, "step": 18315 }, { "epoch": 2.0, "grad_norm": 1.9345766812761183, "learning_rate": 5.3444261786522375e-08, "loss": 2.2735, "step": 20350 }, { "epoch": 2.0, "eval_loss": 2.3360562324523926, "eval_runtime": 353.5294, "eval_samples_per_second": 204.659, "eval_steps_per_second": 1.601, "step": 20350 }, { "epoch": 2.2, "grad_norm": 1.941719791382991, "learning_rate": 5.108383189787308e-08, "loss": 2.2617, "step": 22385 }, { "epoch": 2.4, "grad_norm": 1.9268741466862327, "learning_rate": 5.029810361474273e-08, "loss": 2.2629, "step": 24420 }, { "epoch": 2.4, "eval_loss": 2.3360707759857178, "eval_runtime": 353.1028, "eval_samples_per_second": 204.906, "eval_steps_per_second": 1.603, "step": 24420 }, { "epoch": 2.6, "grad_norm": 1.9707963592912294, "learning_rate": 5.006893878600473e-08, "loss": 2.2617, "step": 26455 }, { "epoch": 2.8, "grad_norm": 1.9597319665023907, "learning_rate": 5.001272696084112e-08, "loss": 2.2655, "step": 28490 }, { "epoch": 2.8, "eval_loss": 2.3352677822113037, "eval_runtime": 353.4276, "eval_samples_per_second": 204.718, "eval_steps_per_second": 1.601, "step": 28490 }, { "epoch": 3.0, "grad_norm": 34.02276184361365, "learning_rate": 5.0001729586514593e-08, "loss": 2.2615, "step": 30525 }, { "epoch": 3.2, "grad_norm": 2.124382265375981, "learning_rate": 5.0000150744726626e-08, "loss": 2.2567, "step": 32560 }, { "epoch": 3.2, "eval_loss": 2.334810733795166, "eval_runtime": 353.1613, "eval_samples_per_second": 204.872, "eval_steps_per_second": 1.603, "step": 32560 }, { "epoch": 3.4, "grad_norm": 1.9867886821885041, "learning_rate": 5.000000648758777e-08, "loss": 2.2555, "step": 34595 }, { "epoch": 3.6, "grad_norm": 2.0413183498941763, "learning_rate": 5.000000007849755e-08, "loss": 2.2581, "step": 36630 }, { "epoch": 3.6, "eval_loss": 2.3342058658599854, "eval_runtime": 353.3438, "eval_samples_per_second": 204.767, "eval_steps_per_second": 1.602, "step": 36630 }, { "epoch": 3.8, "grad_norm": 1.954005554664922, "learning_rate": 5.000000000004389e-08, "loss": 2.2592, "step": 38665 }, { "epoch": 4.0, "grad_norm": 1.9904895703350474, "learning_rate": 5e-08, "loss": 2.2607, "step": 40700 }, { "epoch": 4.0, "eval_loss": 2.3333284854888916, "eval_runtime": 353.3086, "eval_samples_per_second": 204.787, "eval_steps_per_second": 1.602, "step": 40700 }, { "epoch": 4.0, "step": 40700, "total_flos": 4255641501696000.0, "train_loss": 2.2957492570853644, "train_runtime": 58446.3006, "train_samples_per_second": 44.566, "train_steps_per_second": 0.696 } ], "logging_steps": 2035, "max_steps": 40700, "num_input_tokens_seen": 0, "num_train_epochs": 4, "save_steps": 4070, "total_flos": 4255641501696000.0, "train_batch_size": 8, "trial_name": null, "trial_params": null }