{ "best_metric": 0.4111216962337494, "best_model_checkpoint": "mikhail_panzo/zlm-fil_b64_le5_s8000/checkpoint-3500", "epoch": 152.17391304347825, "eval_steps": 500, "global_step": 3500, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 2.1739130434782608, "grad_norm": 5.992666721343994, "learning_rate": 2.5000000000000004e-07, "loss": 0.9248, "step": 50 }, { "epoch": 4.3478260869565215, "grad_norm": 3.4944448471069336, "learning_rate": 4.95e-07, "loss": 0.8709, "step": 100 }, { "epoch": 6.521739130434782, "grad_norm": 1.9902064800262451, "learning_rate": 7.450000000000001e-07, "loss": 0.8005, "step": 150 }, { "epoch": 8.695652173913043, "grad_norm": 1.4649031162261963, "learning_rate": 9.950000000000002e-07, "loss": 0.733, "step": 200 }, { "epoch": 10.869565217391305, "grad_norm": 1.4645055532455444, "learning_rate": 1.2450000000000002e-06, "loss": 0.6758, "step": 250 }, { "epoch": 13.043478260869565, "grad_norm": 1.5862590074539185, "learning_rate": 1.495e-06, "loss": 0.6246, "step": 300 }, { "epoch": 15.217391304347826, "grad_norm": 1.198890209197998, "learning_rate": 1.745e-06, "loss": 0.5917, "step": 350 }, { "epoch": 17.391304347826086, "grad_norm": 1.324212908744812, "learning_rate": 1.9950000000000004e-06, "loss": 0.5721, "step": 400 }, { "epoch": 19.565217391304348, "grad_norm": 1.1638662815093994, "learning_rate": 2.245e-06, "loss": 0.5635, "step": 450 }, { "epoch": 21.73913043478261, "grad_norm": 0.9293428063392639, "learning_rate": 2.4950000000000003e-06, "loss": 0.5541, "step": 500 }, { "epoch": 21.73913043478261, "eval_loss": 0.49769046902656555, "eval_runtime": 12.8521, "eval_samples_per_second": 12.527, "eval_steps_per_second": 1.634, "step": 500 }, { "epoch": 23.91304347826087, "grad_norm": 0.7982977628707886, "learning_rate": 2.7450000000000004e-06, "loss": 0.5487, "step": 550 }, { "epoch": 26.08695652173913, "grad_norm": 0.8491495251655579, "learning_rate": 2.995e-06, "loss": 0.5373, "step": 600 }, { "epoch": 28.26086956521739, "grad_norm": 0.7158030867576599, "learning_rate": 3.2450000000000003e-06, "loss": 0.5271, "step": 650 }, { "epoch": 30.434782608695652, "grad_norm": 1.2020411491394043, "learning_rate": 3.495e-06, "loss": 0.5201, "step": 700 }, { "epoch": 32.608695652173914, "grad_norm": 0.7339745163917542, "learning_rate": 3.745e-06, "loss": 0.5126, "step": 750 }, { "epoch": 34.78260869565217, "grad_norm": 1.1129218339920044, "learning_rate": 3.995000000000001e-06, "loss": 0.5136, "step": 800 }, { "epoch": 36.95652173913044, "grad_norm": 0.7733961939811707, "learning_rate": 4.245e-06, "loss": 0.5099, "step": 850 }, { "epoch": 39.130434782608695, "grad_norm": 1.1559356451034546, "learning_rate": 4.495e-06, "loss": 0.5047, "step": 900 }, { "epoch": 41.30434782608695, "grad_norm": 0.873518705368042, "learning_rate": 4.745e-06, "loss": 0.4955, "step": 950 }, { "epoch": 43.47826086956522, "grad_norm": 0.703760027885437, "learning_rate": 4.9950000000000005e-06, "loss": 0.4931, "step": 1000 }, { "epoch": 43.47826086956522, "eval_loss": 0.4528730511665344, "eval_runtime": 12.5404, "eval_samples_per_second": 12.838, "eval_steps_per_second": 1.675, "step": 1000 }, { "epoch": 45.65217391304348, "grad_norm": 0.9317522644996643, "learning_rate": 5.245e-06, "loss": 0.4922, "step": 1050 }, { "epoch": 47.82608695652174, "grad_norm": 0.9118973016738892, "learning_rate": 5.495000000000001e-06, "loss": 0.4855, "step": 1100 }, { "epoch": 50.0, "grad_norm": 2.0197670459747314, "learning_rate": 5.745000000000001e-06, "loss": 0.485, "step": 1150 }, { "epoch": 52.17391304347826, "grad_norm": 1.1067094802856445, "learning_rate": 5.995000000000001e-06, "loss": 0.4872, "step": 1200 }, { "epoch": 54.34782608695652, "grad_norm": 1.20389723777771, "learning_rate": 6.245000000000001e-06, "loss": 0.4836, "step": 1250 }, { "epoch": 56.52173913043478, "grad_norm": 0.9784926176071167, "learning_rate": 6.4950000000000005e-06, "loss": 0.4793, "step": 1300 }, { "epoch": 58.69565217391305, "grad_norm": 1.4276039600372314, "learning_rate": 6.745000000000001e-06, "loss": 0.4774, "step": 1350 }, { "epoch": 60.869565217391305, "grad_norm": 1.117233157157898, "learning_rate": 6.995000000000001e-06, "loss": 0.4776, "step": 1400 }, { "epoch": 63.04347826086956, "grad_norm": 0.9267759919166565, "learning_rate": 7.245000000000001e-06, "loss": 0.4682, "step": 1450 }, { "epoch": 65.21739130434783, "grad_norm": 0.8514929413795471, "learning_rate": 7.495000000000001e-06, "loss": 0.4695, "step": 1500 }, { "epoch": 65.21739130434783, "eval_loss": 0.43296581506729126, "eval_runtime": 12.858, "eval_samples_per_second": 12.521, "eval_steps_per_second": 1.633, "step": 1500 }, { "epoch": 67.3913043478261, "grad_norm": 1.6638849973678589, "learning_rate": 7.745e-06, "loss": 0.4674, "step": 1550 }, { "epoch": 69.56521739130434, "grad_norm": 0.8030112981796265, "learning_rate": 7.995e-06, "loss": 0.4652, "step": 1600 }, { "epoch": 71.73913043478261, "grad_norm": 1.2407337427139282, "learning_rate": 8.245000000000002e-06, "loss": 0.4638, "step": 1650 }, { "epoch": 73.91304347826087, "grad_norm": 1.0331224203109741, "learning_rate": 8.495e-06, "loss": 0.4685, "step": 1700 }, { "epoch": 76.08695652173913, "grad_norm": 2.4681615829467773, "learning_rate": 8.745000000000002e-06, "loss": 0.4627, "step": 1750 }, { "epoch": 78.26086956521739, "grad_norm": 0.9344178438186646, "learning_rate": 8.995000000000001e-06, "loss": 0.4575, "step": 1800 }, { "epoch": 80.43478260869566, "grad_norm": 1.0561341047286987, "learning_rate": 9.245e-06, "loss": 0.4621, "step": 1850 }, { "epoch": 82.6086956521739, "grad_norm": 0.9816989302635193, "learning_rate": 9.495000000000001e-06, "loss": 0.4544, "step": 1900 }, { "epoch": 84.78260869565217, "grad_norm": 1.4779256582260132, "learning_rate": 9.745e-06, "loss": 0.4579, "step": 1950 }, { "epoch": 86.95652173913044, "grad_norm": 1.1634950637817383, "learning_rate": 9.995000000000002e-06, "loss": 0.4518, "step": 2000 }, { "epoch": 86.95652173913044, "eval_loss": 0.42301931977272034, "eval_runtime": 12.725, "eval_samples_per_second": 12.652, "eval_steps_per_second": 1.65, "step": 2000 }, { "epoch": 89.1304347826087, "grad_norm": 1.0197867155075073, "learning_rate": 9.918333333333335e-06, "loss": 0.4501, "step": 2050 }, { "epoch": 91.30434782608695, "grad_norm": 1.2543045282363892, "learning_rate": 9.835000000000002e-06, "loss": 0.4499, "step": 2100 }, { "epoch": 93.47826086956522, "grad_norm": 0.8574827909469604, "learning_rate": 9.751666666666667e-06, "loss": 0.4485, "step": 2150 }, { "epoch": 95.65217391304348, "grad_norm": 1.3784998655319214, "learning_rate": 9.668333333333334e-06, "loss": 0.4507, "step": 2200 }, { "epoch": 97.82608695652173, "grad_norm": 0.9518347978591919, "learning_rate": 9.585e-06, "loss": 0.4486, "step": 2250 }, { "epoch": 100.0, "grad_norm": 2.422320604324341, "learning_rate": 9.501666666666667e-06, "loss": 0.4455, "step": 2300 }, { "epoch": 102.17391304347827, "grad_norm": 1.3441377878189087, "learning_rate": 9.418333333333334e-06, "loss": 0.45, "step": 2350 }, { "epoch": 104.34782608695652, "grad_norm": 0.969160795211792, "learning_rate": 9.335000000000001e-06, "loss": 0.4454, "step": 2400 }, { "epoch": 106.52173913043478, "grad_norm": 2.1275179386138916, "learning_rate": 9.251666666666668e-06, "loss": 0.4459, "step": 2450 }, { "epoch": 108.69565217391305, "grad_norm": 1.3673362731933594, "learning_rate": 9.168333333333333e-06, "loss": 0.4442, "step": 2500 }, { "epoch": 108.69565217391305, "eval_loss": 0.4178585410118103, "eval_runtime": 13.0534, "eval_samples_per_second": 12.334, "eval_steps_per_second": 1.609, "step": 2500 }, { "epoch": 110.8695652173913, "grad_norm": 1.3769398927688599, "learning_rate": 9.085e-06, "loss": 0.4406, "step": 2550 }, { "epoch": 113.04347826086956, "grad_norm": 0.9913681745529175, "learning_rate": 9.001666666666667e-06, "loss": 0.4389, "step": 2600 }, { "epoch": 115.21739130434783, "grad_norm": 1.1747106313705444, "learning_rate": 8.918333333333334e-06, "loss": 0.4386, "step": 2650 }, { "epoch": 117.3913043478261, "grad_norm": 1.0514781475067139, "learning_rate": 8.835000000000001e-06, "loss": 0.4393, "step": 2700 }, { "epoch": 119.56521739130434, "grad_norm": 1.8967915773391724, "learning_rate": 8.751666666666668e-06, "loss": 0.4421, "step": 2750 }, { "epoch": 121.73913043478261, "grad_norm": 0.8795832395553589, "learning_rate": 8.668333333333335e-06, "loss": 0.436, "step": 2800 }, { "epoch": 123.91304347826087, "grad_norm": 0.7928704023361206, "learning_rate": 8.585000000000002e-06, "loss": 0.4333, "step": 2850 }, { "epoch": 126.08695652173913, "grad_norm": 1.2805510759353638, "learning_rate": 8.501666666666667e-06, "loss": 0.4326, "step": 2900 }, { "epoch": 128.2608695652174, "grad_norm": 1.420920968055725, "learning_rate": 8.418333333333334e-06, "loss": 0.4317, "step": 2950 }, { "epoch": 130.43478260869566, "grad_norm": 0.7063888907432556, "learning_rate": 8.335e-06, "loss": 0.4344, "step": 3000 }, { "epoch": 130.43478260869566, "eval_loss": 0.41350311040878296, "eval_runtime": 12.8066, "eval_samples_per_second": 12.572, "eval_steps_per_second": 1.64, "step": 3000 }, { "epoch": 132.6086956521739, "grad_norm": 1.8065855503082275, "learning_rate": 8.251666666666668e-06, "loss": 0.4361, "step": 3050 }, { "epoch": 134.7826086956522, "grad_norm": 0.8073704838752747, "learning_rate": 8.168333333333334e-06, "loss": 0.4339, "step": 3100 }, { "epoch": 136.95652173913044, "grad_norm": 1.2890065908432007, "learning_rate": 8.085000000000001e-06, "loss": 0.4325, "step": 3150 }, { "epoch": 139.1304347826087, "grad_norm": 1.336401104927063, "learning_rate": 8.001666666666668e-06, "loss": 0.4334, "step": 3200 }, { "epoch": 141.30434782608697, "grad_norm": 1.2965891361236572, "learning_rate": 7.918333333333333e-06, "loss": 0.4298, "step": 3250 }, { "epoch": 143.47826086956522, "grad_norm": 0.8761409521102905, "learning_rate": 7.835e-06, "loss": 0.4231, "step": 3300 }, { "epoch": 145.65217391304347, "grad_norm": 1.1475930213928223, "learning_rate": 7.751666666666667e-06, "loss": 0.4316, "step": 3350 }, { "epoch": 147.82608695652175, "grad_norm": 0.8305974006652832, "learning_rate": 7.668333333333334e-06, "loss": 0.4277, "step": 3400 }, { "epoch": 150.0, "grad_norm": 1.6335935592651367, "learning_rate": 7.585e-06, "loss": 0.4248, "step": 3450 }, { "epoch": 152.17391304347825, "grad_norm": 1.1171984672546387, "learning_rate": 7.501666666666667e-06, "loss": 0.4318, "step": 3500 }, { "epoch": 152.17391304347825, "eval_loss": 0.4111216962337494, "eval_runtime": 13.0991, "eval_samples_per_second": 12.291, "eval_steps_per_second": 1.603, "step": 3500 } ], "logging_steps": 50, "max_steps": 8000, "num_input_tokens_seen": 0, "num_train_epochs": 348, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 4.241687452614216e+16, "train_batch_size": 32, "trial_name": null, "trial_params": null }