{ "best_metric": null, "best_model_checkpoint": null, "epoch": 3.0, "eval_steps": 500, "global_step": 351, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.08547008547008547, "grad_norm": 1.4181480407714844, "learning_rate": 9.992791852820709e-06, "loss": 0.665, "step": 10 }, { "epoch": 0.17094017094017094, "grad_norm": 1.27070951461792, "learning_rate": 9.971188194237141e-06, "loss": 0.473, "step": 20 }, { "epoch": 0.2564102564102564, "grad_norm": 1.1482155323028564, "learning_rate": 9.935251313189564e-06, "loss": 0.449, "step": 30 }, { "epoch": 0.3418803418803419, "grad_norm": 1.2000395059585571, "learning_rate": 9.885084825009085e-06, "loss": 0.4306, "step": 40 }, { "epoch": 0.42735042735042733, "grad_norm": 1.1685457229614258, "learning_rate": 9.820833372667813e-06, "loss": 0.4141, "step": 50 }, { "epoch": 0.5128205128205128, "grad_norm": 1.1243082284927368, "learning_rate": 9.742682209735727e-06, "loss": 0.4107, "step": 60 }, { "epoch": 0.5982905982905983, "grad_norm": 1.0574721097946167, "learning_rate": 9.650856666246693e-06, "loss": 0.4056, "step": 70 }, { "epoch": 0.6837606837606838, "grad_norm": 1.2004579305648804, "learning_rate": 9.54562149901362e-06, "loss": 0.4012, "step": 80 }, { "epoch": 0.7692307692307693, "grad_norm": 0.9274908304214478, "learning_rate": 9.427280128266049e-06, "loss": 0.3855, "step": 90 }, { "epoch": 0.8547008547008547, "grad_norm": 1.248368263244629, "learning_rate": 9.296173762811084e-06, "loss": 0.3987, "step": 100 }, { "epoch": 0.9401709401709402, "grad_norm": 1.0070111751556396, "learning_rate": 9.152680416240059e-06, "loss": 0.3871, "step": 110 }, { "epoch": 1.0256410256410255, "grad_norm": 0.8592380881309509, "learning_rate": 8.997213817017508e-06, "loss": 0.3453, "step": 120 }, { "epoch": 1.1111111111111112, "grad_norm": 1.1061818599700928, "learning_rate": 8.83022221559489e-06, "loss": 0.26, "step": 130 }, { "epoch": 1.1965811965811965, "grad_norm": 1.1157325506210327, "learning_rate": 8.652187091988516e-06, "loss": 0.2432, "step": 140 }, { "epoch": 1.282051282051282, "grad_norm": 1.1478718519210815, "learning_rate": 8.463621767547998e-06, "loss": 0.2481, "step": 150 }, { "epoch": 1.3675213675213675, "grad_norm": 1.0965734720230103, "learning_rate": 8.265069924917925e-06, "loss": 0.2392, "step": 160 }, { "epoch": 1.452991452991453, "grad_norm": 1.0359482765197754, "learning_rate": 8.057104040460062e-06, "loss": 0.2345, "step": 170 }, { "epoch": 1.5384615384615383, "grad_norm": 1.0444822311401367, "learning_rate": 7.84032373365578e-06, "loss": 0.2372, "step": 180 }, { "epoch": 1.623931623931624, "grad_norm": 1.166808009147644, "learning_rate": 7.615354038247889e-06, "loss": 0.2373, "step": 190 }, { "epoch": 1.7094017094017095, "grad_norm": 1.1571974754333496, "learning_rate": 7.382843600106539e-06, "loss": 0.2404, "step": 200 }, { "epoch": 1.7948717948717947, "grad_norm": 1.1317440271377563, "learning_rate": 7.143462807015271e-06, "loss": 0.2394, "step": 210 }, { "epoch": 1.8803418803418803, "grad_norm": 1.1420925855636597, "learning_rate": 6.897901855769483e-06, "loss": 0.2341, "step": 220 }, { "epoch": 1.965811965811966, "grad_norm": 1.0969715118408203, "learning_rate": 6.646868762160399e-06, "loss": 0.2363, "step": 230 }, { "epoch": 2.051282051282051, "grad_norm": 1.1839579343795776, "learning_rate": 6.391087319582264e-06, "loss": 0.1572, "step": 240 }, { "epoch": 2.1367521367521367, "grad_norm": 1.3952598571777344, "learning_rate": 6.131295012148613e-06, "loss": 0.0963, "step": 250 }, { "epoch": 2.2222222222222223, "grad_norm": 1.2655587196350098, "learning_rate": 5.8682408883346535e-06, "loss": 0.0934, "step": 260 }, { "epoch": 2.3076923076923075, "grad_norm": 1.3720885515213013, "learning_rate": 5.6026834012766155e-06, "loss": 0.0897, "step": 270 }, { "epoch": 2.393162393162393, "grad_norm": 1.4065742492675781, "learning_rate": 5.335388221955012e-06, "loss": 0.0919, "step": 280 }, { "epoch": 2.4786324786324787, "grad_norm": 1.3926897048950195, "learning_rate": 5.067126031566988e-06, "loss": 0.0892, "step": 290 }, { "epoch": 2.564102564102564, "grad_norm": 1.1955878734588623, "learning_rate": 4.798670299452926e-06, "loss": 0.0877, "step": 300 }, { "epoch": 2.6495726495726495, "grad_norm": 1.3691728115081787, "learning_rate": 4.530795052984104e-06, "loss": 0.0901, "step": 310 }, { "epoch": 2.735042735042735, "grad_norm": 1.1833082437515259, "learning_rate": 4.264272645841419e-06, "loss": 0.0891, "step": 320 }, { "epoch": 2.8205128205128203, "grad_norm": 1.3318713903427124, "learning_rate": 3.999871531119779e-06, "loss": 0.0846, "step": 330 }, { "epoch": 2.905982905982906, "grad_norm": 1.2540022134780884, "learning_rate": 3.7383540456788915e-06, "loss": 0.0856, "step": 340 }, { "epoch": 2.9914529914529915, "grad_norm": 1.1811027526855469, "learning_rate": 3.480474212128766e-06, "loss": 0.081, "step": 350 } ], "logging_steps": 10, "max_steps": 585, "num_input_tokens_seen": 0, "num_train_epochs": 5, "save_steps": 50.0, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 4.398524460433408e+17, "train_batch_size": 16, "trial_name": null, "trial_params": null }