{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.06431281754453663, "eval_steps": 1000, "global_step": 1000, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.0, "grad_norm": 58.440006256103516, "learning_rate": 1e-08, "loss": 1.3813, "step": 25 }, { "epoch": 0.0, "grad_norm": 59.934757232666016, "learning_rate": 2.2e-08, "loss": 1.469, "step": 50 }, { "epoch": 0.0, "grad_norm": 31.011018753051758, "learning_rate": 3.4500000000000005e-08, "loss": 1.2226, "step": 75 }, { "epoch": 0.01, "grad_norm": 59.818233489990234, "learning_rate": 4.7e-08, "loss": 1.2458, "step": 100 }, { "epoch": 0.01, "grad_norm": 60.51572036743164, "learning_rate": 5.95e-08, "loss": 1.2781, "step": 125 }, { "epoch": 0.01, "grad_norm": 51.360103607177734, "learning_rate": 7.2e-08, "loss": 1.4055, "step": 150 }, { "epoch": 0.01, "grad_norm": 73.35002136230469, "learning_rate": 8.45e-08, "loss": 1.3354, "step": 175 }, { "epoch": 0.01, "grad_norm": 69.32823944091797, "learning_rate": 9.7e-08, "loss": 1.2005, "step": 200 }, { "epoch": 0.01, "grad_norm": 51.02174377441406, "learning_rate": 1.095e-07, "loss": 1.3853, "step": 225 }, { "epoch": 0.02, "grad_norm": 72.20179748535156, "learning_rate": 1.2199999999999998e-07, "loss": 1.4476, "step": 250 }, { "epoch": 0.02, "grad_norm": 108.30382537841797, "learning_rate": 1.345e-07, "loss": 1.2339, "step": 275 }, { "epoch": 0.02, "grad_norm": 66.15994262695312, "learning_rate": 1.4699999999999998e-07, "loss": 1.379, "step": 300 }, { "epoch": 0.02, "grad_norm": 47.82923126220703, "learning_rate": 1.595e-07, "loss": 1.1467, "step": 325 }, { "epoch": 0.02, "grad_norm": 85.7218246459961, "learning_rate": 1.7199999999999998e-07, "loss": 1.1622, "step": 350 }, { "epoch": 0.02, "grad_norm": 68.25504302978516, "learning_rate": 1.845e-07, "loss": 1.1413, "step": 375 }, { "epoch": 0.03, "grad_norm": 106.06077575683594, "learning_rate": 1.97e-07, "loss": 1.0855, "step": 400 }, { "epoch": 0.03, "grad_norm": 79.60690307617188, "learning_rate": 2.095e-07, "loss": 0.929, "step": 425 }, { "epoch": 0.03, "grad_norm": 42.14814376831055, "learning_rate": 2.22e-07, "loss": 0.8728, "step": 450 }, { "epoch": 0.03, "grad_norm": 37.4913444519043, "learning_rate": 2.3449999999999996e-07, "loss": 0.6651, "step": 475 }, { "epoch": 0.03, "grad_norm": 41.89991760253906, "learning_rate": 2.47e-07, "loss": 0.5875, "step": 500 }, { "epoch": 0.03, "grad_norm": 75.21453094482422, "learning_rate": 2.595e-07, "loss": 0.6868, "step": 525 }, { "epoch": 0.04, "grad_norm": 21.09180450439453, "learning_rate": 2.72e-07, "loss": 0.741, "step": 550 }, { "epoch": 0.04, "grad_norm": 44.54707336425781, "learning_rate": 2.845e-07, "loss": 0.3898, "step": 575 }, { "epoch": 0.04, "grad_norm": 31.656843185424805, "learning_rate": 2.9699999999999997e-07, "loss": 0.422, "step": 600 }, { "epoch": 0.04, "grad_norm": 56.28642654418945, "learning_rate": 3.0949999999999996e-07, "loss": 0.3803, "step": 625 }, { "epoch": 0.04, "grad_norm": 38.66410827636719, "learning_rate": 3.22e-07, "loss": 0.5062, "step": 650 }, { "epoch": 0.04, "grad_norm": 31.183727264404297, "learning_rate": 3.345e-07, "loss": 0.4075, "step": 675 }, { "epoch": 0.05, "grad_norm": 23.618703842163086, "learning_rate": 3.4699999999999997e-07, "loss": 0.3627, "step": 700 }, { "epoch": 0.05, "grad_norm": 70.09487915039062, "learning_rate": 3.5949999999999996e-07, "loss": 0.3087, "step": 725 }, { "epoch": 0.05, "grad_norm": 74.42188262939453, "learning_rate": 3.72e-07, "loss": 0.4021, "step": 750 }, { "epoch": 0.05, "grad_norm": 44.99939727783203, "learning_rate": 3.845e-07, "loss": 0.3203, "step": 775 }, { "epoch": 0.05, "grad_norm": 42.77998352050781, "learning_rate": 3.97e-07, "loss": 0.3797, "step": 800 }, { "epoch": 0.05, "grad_norm": 64.61412811279297, "learning_rate": 4.0949999999999995e-07, "loss": 0.3403, "step": 825 }, { "epoch": 0.05, "grad_norm": 29.286806106567383, "learning_rate": 4.2199999999999994e-07, "loss": 0.2879, "step": 850 }, { "epoch": 0.06, "grad_norm": 58.146263122558594, "learning_rate": 4.345e-07, "loss": 0.4017, "step": 875 }, { "epoch": 0.06, "grad_norm": 44.624202728271484, "learning_rate": 4.4699999999999997e-07, "loss": 0.3698, "step": 900 }, { "epoch": 0.06, "grad_norm": 47.91656494140625, "learning_rate": 4.595e-07, "loss": 0.4008, "step": 925 }, { "epoch": 0.06, "grad_norm": 36.263668060302734, "learning_rate": 4.7199999999999994e-07, "loss": 0.2041, "step": 950 }, { "epoch": 0.06, "grad_norm": 12.398943901062012, "learning_rate": 4.845e-07, "loss": 0.2978, "step": 975 }, { "epoch": 0.06, "grad_norm": 4.42283821105957, "learning_rate": 4.97e-07, "loss": 0.2614, "step": 1000 }, { "epoch": 0.06, "eval_loss": 0.29864633083343506, "eval_runtime": 7667.7674, "eval_samples_per_second": 1.228, "eval_steps_per_second": 0.614, "eval_wer": 0.14664944291942517, "step": 1000 } ], "logging_steps": 25, "max_steps": 5000, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 1000, "total_flos": 2.0412098362212352e+18, "train_batch_size": 1, "trial_name": null, "trial_params": null }