{ "best_metric": null, "best_model_checkpoint": null, "epoch": 1.0, "eval_steps": 500, "global_step": 37, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.02702702702702703, "grad_norm": 34.43118667602539, "learning_rate": 9.729729729729731e-05, "loss": 1.2122, "step": 1 }, { "epoch": 0.05405405405405406, "grad_norm": 62.55014419555664, "learning_rate": 9.45945945945946e-05, "loss": 1.6651, "step": 2 }, { "epoch": 0.08108108108108109, "grad_norm": 61.55048370361328, "learning_rate": 9.18918918918919e-05, "loss": 3.0987, "step": 3 }, { "epoch": 0.10810810810810811, "grad_norm": 51.39985656738281, "learning_rate": 8.918918918918919e-05, "loss": 1.4937, "step": 4 }, { "epoch": 0.13513513513513514, "grad_norm": 40.16260528564453, "learning_rate": 8.64864864864865e-05, "loss": 1.6081, "step": 5 }, { "epoch": 0.16216216216216217, "grad_norm": 43.81644821166992, "learning_rate": 8.378378378378379e-05, "loss": 0.7173, "step": 6 }, { "epoch": 0.1891891891891892, "grad_norm": 69.73033905029297, "learning_rate": 8.108108108108109e-05, "loss": 2.9437, "step": 7 }, { "epoch": 0.21621621621621623, "grad_norm": 35.83936309814453, "learning_rate": 7.837837837837838e-05, "loss": 0.4272, "step": 8 }, { "epoch": 0.24324324324324326, "grad_norm": 71.4704818725586, "learning_rate": 7.567567567567568e-05, "loss": 0.9679, "step": 9 }, { "epoch": 0.2702702702702703, "grad_norm": 45.756526947021484, "learning_rate": 7.297297297297297e-05, "loss": 0.85, "step": 10 }, { "epoch": 0.2972972972972973, "grad_norm": 54.53055953979492, "learning_rate": 7.027027027027028e-05, "loss": 1.2797, "step": 11 }, { "epoch": 0.32432432432432434, "grad_norm": 36.27718734741211, "learning_rate": 6.756756756756757e-05, "loss": 1.2453, "step": 12 }, { "epoch": 0.35135135135135137, "grad_norm": 37.95497512817383, "learning_rate": 6.486486486486487e-05, "loss": 1.0391, "step": 13 }, { "epoch": 0.3783783783783784, "grad_norm": 32.62294006347656, "learning_rate": 6.216216216216216e-05, "loss": 0.6115, "step": 14 }, { "epoch": 0.40540540540540543, "grad_norm": 46.16029357910156, "learning_rate": 5.9459459459459466e-05, "loss": 1.5827, "step": 15 }, { "epoch": 0.43243243243243246, "grad_norm": 36.99343490600586, "learning_rate": 5.6756756756756757e-05, "loss": 0.8718, "step": 16 }, { "epoch": 0.4594594594594595, "grad_norm": 38.82675552368164, "learning_rate": 5.405405405405406e-05, "loss": 0.777, "step": 17 }, { "epoch": 0.4864864864864865, "grad_norm": 52.7644157409668, "learning_rate": 5.135135135135135e-05, "loss": 1.8544, "step": 18 }, { "epoch": 0.5135135135135135, "grad_norm": 22.739355087280273, "learning_rate": 4.8648648648648654e-05, "loss": 0.3267, "step": 19 }, { "epoch": 0.5405405405405406, "grad_norm": 43.35242462158203, "learning_rate": 4.594594594594595e-05, "loss": 0.7851, "step": 20 }, { "epoch": 0.5675675675675675, "grad_norm": 27.8333683013916, "learning_rate": 4.324324324324325e-05, "loss": 0.5171, "step": 21 }, { "epoch": 0.5945945945945946, "grad_norm": 35.44032287597656, "learning_rate": 4.0540540540540545e-05, "loss": 1.2803, "step": 22 }, { "epoch": 0.6216216216216216, "grad_norm": 25.532207489013672, "learning_rate": 3.783783783783784e-05, "loss": 0.6257, "step": 23 }, { "epoch": 0.6486486486486487, "grad_norm": 38.8851318359375, "learning_rate": 3.513513513513514e-05, "loss": 1.1882, "step": 24 }, { "epoch": 0.6756756756756757, "grad_norm": 20.330860137939453, "learning_rate": 3.2432432432432436e-05, "loss": 0.8143, "step": 25 }, { "epoch": 0.7027027027027027, "grad_norm": 35.819618225097656, "learning_rate": 2.9729729729729733e-05, "loss": 1.1736, "step": 26 }, { "epoch": 0.7297297297297297, "grad_norm": 53.788230895996094, "learning_rate": 2.702702702702703e-05, "loss": 1.9501, "step": 27 }, { "epoch": 0.7567567567567568, "grad_norm": 47.79997634887695, "learning_rate": 2.4324324324324327e-05, "loss": 1.7516, "step": 28 }, { "epoch": 0.7837837837837838, "grad_norm": 27.36383819580078, "learning_rate": 2.1621621621621624e-05, "loss": 0.5194, "step": 29 }, { "epoch": 0.8108108108108109, "grad_norm": 29.560096740722656, "learning_rate": 1.891891891891892e-05, "loss": 0.8976, "step": 30 }, { "epoch": 0.8378378378378378, "grad_norm": 56.96413040161133, "learning_rate": 1.6216216216216218e-05, "loss": 1.266, "step": 31 }, { "epoch": 0.8648648648648649, "grad_norm": 38.90911102294922, "learning_rate": 1.3513513513513515e-05, "loss": 1.5255, "step": 32 }, { "epoch": 0.8918918918918919, "grad_norm": 30.898990631103516, "learning_rate": 1.0810810810810812e-05, "loss": 0.7743, "step": 33 }, { "epoch": 0.918918918918919, "grad_norm": 47.93951416015625, "learning_rate": 8.108108108108109e-06, "loss": 1.4269, "step": 34 }, { "epoch": 0.9459459459459459, "grad_norm": 32.6657829284668, "learning_rate": 5.405405405405406e-06, "loss": 1.0057, "step": 35 }, { "epoch": 0.972972972972973, "grad_norm": 32.33424377441406, "learning_rate": 2.702702702702703e-06, "loss": 0.3576, "step": 36 }, { "epoch": 1.0, "grad_norm": 38.6456413269043, "learning_rate": 0.0, "loss": 0.8796, "step": 37 }, { "epoch": 1.0, "step": 37, "total_flos": 6359033384534016.0, "train_loss": 1.1705604413071193, "train_runtime": 1777.143, "train_samples_per_second": 0.167, "train_steps_per_second": 0.021 } ], "logging_steps": 1, "max_steps": 37, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 500, "total_flos": 6359033384534016.0, "train_batch_size": 8, "trial_name": null, "trial_params": null }