{ "best_metric": 0.3477603793144226, "best_model_checkpoint": "mikhail-panzo/zlm_b64_le4_s4000/checkpoint-3000", "epoch": 2.513089005235602, "eval_steps": 500, "global_step": 3000, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.041884816753926704, "grad_norm": 3.0341947078704834, "learning_rate": 2.4000000000000003e-06, "loss": 1.0529, "step": 50 }, { "epoch": 0.08376963350785341, "grad_norm": 3.274244785308838, "learning_rate": 4.9000000000000005e-06, "loss": 0.8525, "step": 100 }, { "epoch": 0.1256544502617801, "grad_norm": 7.409469127655029, "learning_rate": 7.4e-06, "loss": 0.7528, "step": 150 }, { "epoch": 0.16753926701570682, "grad_norm": 3.476987600326538, "learning_rate": 9.900000000000002e-06, "loss": 0.689, "step": 200 }, { "epoch": 0.2094240837696335, "grad_norm": 2.8871872425079346, "learning_rate": 1.24e-05, "loss": 0.6403, "step": 250 }, { "epoch": 0.2513089005235602, "grad_norm": 2.007760763168335, "learning_rate": 1.49e-05, "loss": 0.6108, "step": 300 }, { "epoch": 0.2931937172774869, "grad_norm": 1.8842569589614868, "learning_rate": 1.74e-05, "loss": 0.5696, "step": 350 }, { "epoch": 0.33507853403141363, "grad_norm": 3.3749866485595703, "learning_rate": 1.9900000000000003e-05, "loss": 0.5544, "step": 400 }, { "epoch": 0.3769633507853403, "grad_norm": 3.5075504779815674, "learning_rate": 2.2400000000000002e-05, "loss": 0.5441, "step": 450 }, { "epoch": 0.418848167539267, "grad_norm": 3.789675712585449, "learning_rate": 2.4900000000000002e-05, "loss": 0.5277, "step": 500 }, { "epoch": 0.418848167539267, "eval_loss": 0.4806458353996277, "eval_runtime": 268.4116, "eval_samples_per_second": 31.627, "eval_steps_per_second": 3.957, "step": 500 }, { "epoch": 0.4607329842931937, "grad_norm": 7.912121772766113, "learning_rate": 2.7400000000000002e-05, "loss": 0.527, "step": 550 }, { "epoch": 0.5026178010471204, "grad_norm": 1.7653332948684692, "learning_rate": 2.9900000000000002e-05, "loss": 0.5152, "step": 600 }, { "epoch": 0.5445026178010471, "grad_norm": 2.491381883621216, "learning_rate": 3.24e-05, "loss": 0.5163, "step": 650 }, { "epoch": 0.5863874345549738, "grad_norm": 3.3132810592651367, "learning_rate": 3.49e-05, "loss": 0.4987, "step": 700 }, { "epoch": 0.6282722513089005, "grad_norm": 3.01517915725708, "learning_rate": 3.74e-05, "loss": 0.4909, "step": 750 }, { "epoch": 0.6701570680628273, "grad_norm": 3.681798219680786, "learning_rate": 3.99e-05, "loss": 0.4856, "step": 800 }, { "epoch": 0.7120418848167539, "grad_norm": 2.726991653442383, "learning_rate": 4.235e-05, "loss": 0.4901, "step": 850 }, { "epoch": 0.7539267015706806, "grad_norm": 1.8803215026855469, "learning_rate": 4.4850000000000006e-05, "loss": 0.4686, "step": 900 }, { "epoch": 0.7958115183246073, "grad_norm": 2.8432154655456543, "learning_rate": 4.735e-05, "loss": 0.4782, "step": 950 }, { "epoch": 0.837696335078534, "grad_norm": 2.698971748352051, "learning_rate": 4.9850000000000006e-05, "loss": 0.4582, "step": 1000 }, { "epoch": 0.837696335078534, "eval_loss": 0.4116077423095703, "eval_runtime": 270.0841, "eval_samples_per_second": 31.431, "eval_steps_per_second": 3.932, "step": 1000 }, { "epoch": 0.8795811518324608, "grad_norm": 2.2148396968841553, "learning_rate": 5.235e-05, "loss": 0.4678, "step": 1050 }, { "epoch": 0.9214659685863874, "grad_norm": 2.0481760501861572, "learning_rate": 5.485e-05, "loss": 0.4423, "step": 1100 }, { "epoch": 0.9633507853403142, "grad_norm": 3.5668869018554688, "learning_rate": 5.7350000000000005e-05, "loss": 0.4484, "step": 1150 }, { "epoch": 1.0052356020942408, "grad_norm": 1.8339941501617432, "learning_rate": 5.9850000000000005e-05, "loss": 0.4421, "step": 1200 }, { "epoch": 1.0471204188481675, "grad_norm": 1.6828802824020386, "learning_rate": 6.235000000000001e-05, "loss": 0.4449, "step": 1250 }, { "epoch": 1.0890052356020943, "grad_norm": 5.551531791687012, "learning_rate": 6.485e-05, "loss": 0.4428, "step": 1300 }, { "epoch": 1.130890052356021, "grad_norm": 2.2175750732421875, "learning_rate": 6.735e-05, "loss": 0.451, "step": 1350 }, { "epoch": 1.1727748691099475, "grad_norm": 3.919278383255005, "learning_rate": 6.985e-05, "loss": 0.4396, "step": 1400 }, { "epoch": 1.2146596858638743, "grad_norm": 2.8837966918945312, "learning_rate": 7.235000000000001e-05, "loss": 0.4327, "step": 1450 }, { "epoch": 1.256544502617801, "grad_norm": 2.3643736839294434, "learning_rate": 7.485e-05, "loss": 0.4312, "step": 1500 }, { "epoch": 1.256544502617801, "eval_loss": 0.39514681696891785, "eval_runtime": 274.9071, "eval_samples_per_second": 30.88, "eval_steps_per_second": 3.863, "step": 1500 }, { "epoch": 1.2984293193717278, "grad_norm": 1.5905354022979736, "learning_rate": 7.735e-05, "loss": 0.4341, "step": 1550 }, { "epoch": 1.3403141361256545, "grad_norm": 1.1960793733596802, "learning_rate": 7.985e-05, "loss": 0.4305, "step": 1600 }, { "epoch": 1.3821989528795813, "grad_norm": 3.3077008724212646, "learning_rate": 8.235000000000001e-05, "loss": 0.4302, "step": 1650 }, { "epoch": 1.4240837696335078, "grad_norm": 1.5816391706466675, "learning_rate": 8.485e-05, "loss": 0.4249, "step": 1700 }, { "epoch": 1.4659685863874345, "grad_norm": 3.135552167892456, "learning_rate": 8.735000000000001e-05, "loss": 0.4251, "step": 1750 }, { "epoch": 1.5078534031413613, "grad_norm": 2.1097774505615234, "learning_rate": 8.985e-05, "loss": 0.4209, "step": 1800 }, { "epoch": 1.5497382198952878, "grad_norm": 1.6095610857009888, "learning_rate": 9.235000000000001e-05, "loss": 0.4155, "step": 1850 }, { "epoch": 1.5916230366492146, "grad_norm": 2.1539840698242188, "learning_rate": 9.485e-05, "loss": 0.4155, "step": 1900 }, { "epoch": 1.6335078534031413, "grad_norm": 2.5837864875793457, "learning_rate": 9.735000000000001e-05, "loss": 0.4174, "step": 1950 }, { "epoch": 1.675392670157068, "grad_norm": 2.201157569885254, "learning_rate": 9.985000000000001e-05, "loss": 0.4122, "step": 2000 }, { "epoch": 1.675392670157068, "eval_loss": 0.3768020272254944, "eval_runtime": 274.0798, "eval_samples_per_second": 30.973, "eval_steps_per_second": 3.875, "step": 2000 }, { "epoch": 1.7172774869109948, "grad_norm": 1.8704116344451904, "learning_rate": 9.765e-05, "loss": 0.4098, "step": 2050 }, { "epoch": 1.7591623036649215, "grad_norm": 2.613638401031494, "learning_rate": 9.515000000000001e-05, "loss": 0.4104, "step": 2100 }, { "epoch": 1.8010471204188483, "grad_norm": 1.6951849460601807, "learning_rate": 9.265e-05, "loss": 0.4082, "step": 2150 }, { "epoch": 1.8429319371727748, "grad_norm": 2.618356704711914, "learning_rate": 9.015e-05, "loss": 0.4055, "step": 2200 }, { "epoch": 1.8848167539267016, "grad_norm": 2.2430710792541504, "learning_rate": 8.765e-05, "loss": 0.4052, "step": 2250 }, { "epoch": 1.9267015706806283, "grad_norm": 1.266263723373413, "learning_rate": 8.515000000000001e-05, "loss": 0.4039, "step": 2300 }, { "epoch": 1.9685863874345548, "grad_norm": 1.5831375122070312, "learning_rate": 8.265e-05, "loss": 0.4009, "step": 2350 }, { "epoch": 2.0104712041884816, "grad_norm": 1.1862380504608154, "learning_rate": 8.015e-05, "loss": 0.3932, "step": 2400 }, { "epoch": 2.0523560209424083, "grad_norm": 2.1208019256591797, "learning_rate": 7.765e-05, "loss": 0.3971, "step": 2450 }, { "epoch": 2.094240837696335, "grad_norm": 2.24529767036438, "learning_rate": 7.515e-05, "loss": 0.3985, "step": 2500 }, { "epoch": 2.094240837696335, "eval_loss": 0.359864205121994, "eval_runtime": 268.8203, "eval_samples_per_second": 31.579, "eval_steps_per_second": 3.951, "step": 2500 }, { "epoch": 2.136125654450262, "grad_norm": 1.0572404861450195, "learning_rate": 7.265e-05, "loss": 0.3972, "step": 2550 }, { "epoch": 2.1780104712041886, "grad_norm": 1.1414527893066406, "learning_rate": 7.015000000000001e-05, "loss": 0.3988, "step": 2600 }, { "epoch": 2.2198952879581153, "grad_norm": 1.8701543807983398, "learning_rate": 6.765e-05, "loss": 0.3899, "step": 2650 }, { "epoch": 2.261780104712042, "grad_norm": 1.2148722410202026, "learning_rate": 6.515e-05, "loss": 0.3914, "step": 2700 }, { "epoch": 2.303664921465969, "grad_norm": 1.6119493246078491, "learning_rate": 6.264999999999999e-05, "loss": 0.3936, "step": 2750 }, { "epoch": 2.345549738219895, "grad_norm": 1.5899128913879395, "learning_rate": 6.0150000000000005e-05, "loss": 0.3845, "step": 2800 }, { "epoch": 2.387434554973822, "grad_norm": 1.4083961248397827, "learning_rate": 5.7650000000000005e-05, "loss": 0.3871, "step": 2850 }, { "epoch": 2.4293193717277486, "grad_norm": 1.6995341777801514, "learning_rate": 5.515e-05, "loss": 0.397, "step": 2900 }, { "epoch": 2.4712041884816753, "grad_norm": 2.964442729949951, "learning_rate": 5.265e-05, "loss": 0.3882, "step": 2950 }, { "epoch": 2.513089005235602, "grad_norm": 1.7540944814682007, "learning_rate": 5.015e-05, "loss": 0.3873, "step": 3000 }, { "epoch": 2.513089005235602, "eval_loss": 0.3477603793144226, "eval_runtime": 267.3162, "eval_samples_per_second": 31.756, "eval_steps_per_second": 3.973, "step": 3000 } ], "logging_steps": 50, "max_steps": 4000, "num_input_tokens_seen": 0, "num_train_epochs": 4, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 2.686672014814656e+16, "train_batch_size": 16, "trial_name": null, "trial_params": null }