{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.09269162210338681, "eval_steps": 62, "global_step": 30, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.0030897207367795603, "grad_norm": 9338.8359375, "learning_rate": 4e-12, "loss": 13.2844, "step": 1 }, { "epoch": 0.006179441473559121, "grad_norm": 9144.1259765625, "learning_rate": 8e-12, "loss": 12.994, "step": 2 }, { "epoch": 0.009269162210338681, "grad_norm": 9721.224609375, "learning_rate": 1.1999999999999999e-11, "loss": 12.6655, "step": 3 }, { "epoch": 0.012358882947118241, "grad_norm": 8743.544921875, "learning_rate": 1.6e-11, "loss": 13.5894, "step": 4 }, { "epoch": 0.015448603683897801, "grad_norm": 9426.9677734375, "learning_rate": 2e-11, "loss": 12.8587, "step": 5 }, { "epoch": 0.018538324420677363, "grad_norm": 9453.294921875, "learning_rate": 1.998481516433316e-11, "loss": 12.4593, "step": 6 }, { "epoch": 0.02162804515745692, "grad_norm": 9499.736328125, "learning_rate": 1.9939306773179494e-11, "loss": 12.7375, "step": 7 }, { "epoch": 0.024717765894236483, "grad_norm": 9644.2998046875, "learning_rate": 1.9863613034027223e-11, "loss": 12.5361, "step": 8 }, { "epoch": 0.027807486631016044, "grad_norm": 9461.044921875, "learning_rate": 1.9757963826274354e-11, "loss": 12.4943, "step": 9 }, { "epoch": 0.030897207367795602, "grad_norm": 10506.08203125, "learning_rate": 1.96226800030925e-11, "loss": 11.0811, "step": 10 }, { "epoch": 0.03398692810457516, "grad_norm": 9562.708984375, "learning_rate": 1.9458172417006344e-11, "loss": 12.1782, "step": 11 }, { "epoch": 0.037076648841354726, "grad_norm": 8141.35791015625, "learning_rate": 1.9264940672148017e-11, "loss": 14.1307, "step": 12 }, { "epoch": 0.040166369578134284, "grad_norm": 10255.396484375, "learning_rate": 1.9043571606975776e-11, "loss": 12.0333, "step": 13 }, { "epoch": 0.04325609031491384, "grad_norm": 9540.162109375, "learning_rate": 1.879473751206489e-11, "loss": 12.4084, "step": 14 }, { "epoch": 0.04634581105169341, "grad_norm": 9645.5986328125, "learning_rate": 1.851919408838327e-11, "loss": 13.0228, "step": 15 }, { "epoch": 0.049435531788472965, "grad_norm": 10008.0263671875, "learning_rate": 1.821777815225245e-11, "loss": 11.6848, "step": 16 }, { "epoch": 0.052525252525252523, "grad_norm": 10316.1103515625, "learning_rate": 1.7891405093963936e-11, "loss": 11.8311, "step": 17 }, { "epoch": 0.05561497326203209, "grad_norm": 8724.0751953125, "learning_rate": 1.754106609776896e-11, "loss": 13.4673, "step": 18 }, { "epoch": 0.05870469399881165, "grad_norm": 9629.205078125, "learning_rate": 1.7167825131684512e-11, "loss": 12.7426, "step": 19 }, { "epoch": 0.061794414735591205, "grad_norm": 9684.396484375, "learning_rate": 1.6772815716257412e-11, "loss": 11.9996, "step": 20 }, { "epoch": 0.06488413547237076, "grad_norm": 9191.28125, "learning_rate": 1.6357237482099683e-11, "loss": 13.1134, "step": 21 }, { "epoch": 0.06797385620915032, "grad_norm": 9772.142578125, "learning_rate": 1.59223525266498e-11, "loss": 12.5008, "step": 22 }, { "epoch": 0.0710635769459299, "grad_norm": 9289.20703125, "learning_rate": 1.546948158122427e-11, "loss": 12.5963, "step": 23 }, { "epoch": 0.07415329768270945, "grad_norm": 9196.2001953125, "learning_rate": 1.5e-11, "loss": 12.7144, "step": 24 }, { "epoch": 0.07724301841948901, "grad_norm": 10201.185546875, "learning_rate": 1.4515333583108892e-11, "loss": 12.3746, "step": 25 }, { "epoch": 0.08033273915626857, "grad_norm": 10000.76953125, "learning_rate": 1.4016954246529695e-11, "loss": 12.836, "step": 26 }, { "epoch": 0.08342245989304813, "grad_norm": 9503.5068359375, "learning_rate": 1.3506375551927544e-11, "loss": 12.4537, "step": 27 }, { "epoch": 0.08651218062982768, "grad_norm": 9471.9248046875, "learning_rate": 1.2985148110016946e-11, "loss": 13.0039, "step": 28 }, { "epoch": 0.08960190136660724, "grad_norm": 9412.03515625, "learning_rate": 1.245485487140799e-11, "loss": 12.4377, "step": 29 }, { "epoch": 0.09269162210338681, "grad_norm": 9012.6328125, "learning_rate": 1.1917106319237383e-11, "loss": 13.4764, "step": 30 } ], "logging_steps": 1, "max_steps": 62, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 10, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 4.978137921748992e+16, "train_batch_size": 2, "trial_name": null, "trial_params": null }