{ "best_metric": 0.4093586802482605, "best_model_checkpoint": "mikhail_panzo/zlm_b128_le4_s12000/checkpoint-1000", "epoch": 1.675392670157068, "eval_steps": 500, "global_step": 1000, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.08376963350785341, "grad_norm": 2.469545841217041, "learning_rate": 2.4500000000000003e-06, "loss": 1.0402, "step": 50 }, { "epoch": 0.16753926701570682, "grad_norm": 3.3642780780792236, "learning_rate": 4.950000000000001e-06, "loss": 0.8472, "step": 100 }, { "epoch": 0.2513089005235602, "grad_norm": 1.8375087976455688, "learning_rate": 7.45e-06, "loss": 0.7331, "step": 150 }, { "epoch": 0.33507853403141363, "grad_norm": 3.700824737548828, "learning_rate": 9.950000000000001e-06, "loss": 0.6472, "step": 200 }, { "epoch": 0.418848167539267, "grad_norm": 1.7632888555526733, "learning_rate": 1.2450000000000001e-05, "loss": 0.6112, "step": 250 }, { "epoch": 0.5026178010471204, "grad_norm": 3.1861908435821533, "learning_rate": 1.4950000000000001e-05, "loss": 0.5865, "step": 300 }, { "epoch": 0.5863874345549738, "grad_norm": 2.2845046520233154, "learning_rate": 1.745e-05, "loss": 0.5682, "step": 350 }, { "epoch": 0.6701570680628273, "grad_norm": 2.079210042953491, "learning_rate": 1.995e-05, "loss": 0.5465, "step": 400 }, { "epoch": 0.7539267015706806, "grad_norm": 3.669891119003296, "learning_rate": 2.245e-05, "loss": 0.5302, "step": 450 }, { "epoch": 0.837696335078534, "grad_norm": 2.4679417610168457, "learning_rate": 2.495e-05, "loss": 0.5137, "step": 500 }, { "epoch": 0.837696335078534, "eval_loss": 0.4513999819755554, "eval_runtime": 277.1074, "eval_samples_per_second": 30.634, "eval_steps_per_second": 3.832, "step": 500 }, { "epoch": 0.9214659685863874, "grad_norm": 2.703871726989746, "learning_rate": 2.7450000000000003e-05, "loss": 0.5071, "step": 550 }, { "epoch": 1.0052356020942408, "grad_norm": 3.8184635639190674, "learning_rate": 2.995e-05, "loss": 0.4971, "step": 600 }, { "epoch": 1.0890052356020943, "grad_norm": 2.2857866287231445, "learning_rate": 3.245e-05, "loss": 0.4955, "step": 650 }, { "epoch": 1.1727748691099475, "grad_norm": 3.5974085330963135, "learning_rate": 3.495e-05, "loss": 0.4935, "step": 700 }, { "epoch": 1.256544502617801, "grad_norm": 1.720818281173706, "learning_rate": 3.745e-05, "loss": 0.4763, "step": 750 }, { "epoch": 1.3403141361256545, "grad_norm": 3.080139636993408, "learning_rate": 3.995e-05, "loss": 0.4735, "step": 800 }, { "epoch": 1.4240837696335078, "grad_norm": 4.877579212188721, "learning_rate": 4.245e-05, "loss": 0.4654, "step": 850 }, { "epoch": 1.5078534031413613, "grad_norm": 3.383965253829956, "learning_rate": 4.495e-05, "loss": 0.4628, "step": 900 }, { "epoch": 1.5916230366492146, "grad_norm": 3.3636982440948486, "learning_rate": 4.745e-05, "loss": 0.4541, "step": 950 }, { "epoch": 1.675392670157068, "grad_norm": 1.666568398475647, "learning_rate": 4.995e-05, "loss": 0.4565, "step": 1000 }, { "epoch": 1.675392670157068, "eval_loss": 0.4093586802482605, "eval_runtime": 269.4005, "eval_samples_per_second": 31.511, "eval_steps_per_second": 3.942, "step": 1000 } ], "logging_steps": 50, "max_steps": 12000, "num_input_tokens_seen": 0, "num_train_epochs": 21, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 1.7957703358412928e+16, "train_batch_size": 16, "trial_name": null, "trial_params": null }