{ "best_metric": 0.4116077423095703, "best_model_checkpoint": "mikhail-panzo/zlm_b64_le4_s4000/checkpoint-1000", "epoch": 0.837696335078534, "eval_steps": 500, "global_step": 1000, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.041884816753926704, "grad_norm": 3.0341947078704834, "learning_rate": 2.4000000000000003e-06, "loss": 1.0529, "step": 50 }, { "epoch": 0.08376963350785341, "grad_norm": 3.274244785308838, "learning_rate": 4.9000000000000005e-06, "loss": 0.8525, "step": 100 }, { "epoch": 0.1256544502617801, "grad_norm": 7.409469127655029, "learning_rate": 7.4e-06, "loss": 0.7528, "step": 150 }, { "epoch": 0.16753926701570682, "grad_norm": 3.476987600326538, "learning_rate": 9.900000000000002e-06, "loss": 0.689, "step": 200 }, { "epoch": 0.2094240837696335, "grad_norm": 2.8871872425079346, "learning_rate": 1.24e-05, "loss": 0.6403, "step": 250 }, { "epoch": 0.2513089005235602, "grad_norm": 2.007760763168335, "learning_rate": 1.49e-05, "loss": 0.6108, "step": 300 }, { "epoch": 0.2931937172774869, "grad_norm": 1.8842569589614868, "learning_rate": 1.74e-05, "loss": 0.5696, "step": 350 }, { "epoch": 0.33507853403141363, "grad_norm": 3.3749866485595703, "learning_rate": 1.9900000000000003e-05, "loss": 0.5544, "step": 400 }, { "epoch": 0.3769633507853403, "grad_norm": 3.5075504779815674, "learning_rate": 2.2400000000000002e-05, "loss": 0.5441, "step": 450 }, { "epoch": 0.418848167539267, "grad_norm": 3.789675712585449, "learning_rate": 2.4900000000000002e-05, "loss": 0.5277, "step": 500 }, { "epoch": 0.418848167539267, "eval_loss": 0.4806458353996277, "eval_runtime": 268.4116, "eval_samples_per_second": 31.627, "eval_steps_per_second": 3.957, "step": 500 }, { "epoch": 0.4607329842931937, "grad_norm": 7.912121772766113, "learning_rate": 2.7400000000000002e-05, "loss": 0.527, "step": 550 }, { "epoch": 0.5026178010471204, "grad_norm": 1.7653332948684692, "learning_rate": 2.9900000000000002e-05, "loss": 0.5152, "step": 600 }, { "epoch": 0.5445026178010471, "grad_norm": 2.491381883621216, "learning_rate": 3.24e-05, "loss": 0.5163, "step": 650 }, { "epoch": 0.5863874345549738, "grad_norm": 3.3132810592651367, "learning_rate": 3.49e-05, "loss": 0.4987, "step": 700 }, { "epoch": 0.6282722513089005, "grad_norm": 3.01517915725708, "learning_rate": 3.74e-05, "loss": 0.4909, "step": 750 }, { "epoch": 0.6701570680628273, "grad_norm": 3.681798219680786, "learning_rate": 3.99e-05, "loss": 0.4856, "step": 800 }, { "epoch": 0.7120418848167539, "grad_norm": 2.726991653442383, "learning_rate": 4.235e-05, "loss": 0.4901, "step": 850 }, { "epoch": 0.7539267015706806, "grad_norm": 1.8803215026855469, "learning_rate": 4.4850000000000006e-05, "loss": 0.4686, "step": 900 }, { "epoch": 0.7958115183246073, "grad_norm": 2.8432154655456543, "learning_rate": 4.735e-05, "loss": 0.4782, "step": 950 }, { "epoch": 0.837696335078534, "grad_norm": 2.698971748352051, "learning_rate": 4.9850000000000006e-05, "loss": 0.4582, "step": 1000 }, { "epoch": 0.837696335078534, "eval_loss": 0.4116077423095703, "eval_runtime": 270.0841, "eval_samples_per_second": 31.431, "eval_steps_per_second": 3.932, "step": 1000 } ], "logging_steps": 50, "max_steps": 4000, "num_input_tokens_seen": 0, "num_train_epochs": 4, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 8963407491426432.0, "train_batch_size": 16, "trial_name": null, "trial_params": null }