{ "best_metric": 0.3909551799297333, "best_model_checkpoint": "mikhail-panzo/zlm_b32_le4_s4000/checkpoint-2500", "epoch": 1.0471204188481675, "eval_steps": 500, "global_step": 2500, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.020942408376963352, "grad_norm": 3.1775436401367188, "learning_rate": 2.35e-06, "loss": 1.1228, "step": 50 }, { "epoch": 0.041884816753926704, "grad_norm": 4.18980073928833, "learning_rate": 4.85e-06, "loss": 0.8304, "step": 100 }, { "epoch": 0.06282722513089005, "grad_norm": 4.5355024337768555, "learning_rate": 7.35e-06, "loss": 0.7701, "step": 150 }, { "epoch": 0.08376963350785341, "grad_norm": 3.9590957164764404, "learning_rate": 9.85e-06, "loss": 0.7096, "step": 200 }, { "epoch": 0.10471204188481675, "grad_norm": 5.419675350189209, "learning_rate": 1.235e-05, "loss": 0.6518, "step": 250 }, { "epoch": 0.1256544502617801, "grad_norm": 3.1052777767181396, "learning_rate": 1.485e-05, "loss": 0.6326, "step": 300 }, { "epoch": 0.14659685863874344, "grad_norm": 3.4649453163146973, "learning_rate": 1.7349999999999998e-05, "loss": 0.6241, "step": 350 }, { "epoch": 0.16753926701570682, "grad_norm": 4.962624549865723, "learning_rate": 1.985e-05, "loss": 0.5723, "step": 400 }, { "epoch": 0.18848167539267016, "grad_norm": 2.7586095333099365, "learning_rate": 2.235e-05, "loss": 0.5764, "step": 450 }, { "epoch": 0.2094240837696335, "grad_norm": 3.5610299110412598, "learning_rate": 2.485e-05, "loss": 0.5552, "step": 500 }, { "epoch": 0.2094240837696335, "eval_loss": 0.48825809359550476, "eval_runtime": 275.2604, "eval_samples_per_second": 30.84, "eval_steps_per_second": 3.858, "step": 500 }, { "epoch": 0.23036649214659685, "grad_norm": 3.3351809978485107, "learning_rate": 2.7350000000000004e-05, "loss": 0.5322, "step": 550 }, { "epoch": 0.2513089005235602, "grad_norm": 3.7222373485565186, "learning_rate": 2.985e-05, "loss": 0.5186, "step": 600 }, { "epoch": 0.27225130890052357, "grad_norm": 3.1811156272888184, "learning_rate": 3.235e-05, "loss": 0.5157, "step": 650 }, { "epoch": 0.2931937172774869, "grad_norm": 2.2585642337799072, "learning_rate": 3.485e-05, "loss": 0.5089, "step": 700 }, { "epoch": 0.31413612565445026, "grad_norm": 3.999460220336914, "learning_rate": 3.735e-05, "loss": 0.5117, "step": 750 }, { "epoch": 0.33507853403141363, "grad_norm": 3.739990472793579, "learning_rate": 3.9850000000000006e-05, "loss": 0.5031, "step": 800 }, { "epoch": 0.35602094240837695, "grad_norm": 4.251980781555176, "learning_rate": 4.235e-05, "loss": 0.5064, "step": 850 }, { "epoch": 0.3769633507853403, "grad_norm": 2.770602226257324, "learning_rate": 4.4850000000000006e-05, "loss": 0.4921, "step": 900 }, { "epoch": 0.39790575916230364, "grad_norm": 2.506974220275879, "learning_rate": 4.735e-05, "loss": 0.4839, "step": 950 }, { "epoch": 0.418848167539267, "grad_norm": 2.2666189670562744, "learning_rate": 4.9850000000000006e-05, "loss": 0.4913, "step": 1000 }, { "epoch": 0.418848167539267, "eval_loss": 0.4265913963317871, "eval_runtime": 273.9661, "eval_samples_per_second": 30.986, "eval_steps_per_second": 3.876, "step": 1000 }, { "epoch": 0.4397905759162304, "grad_norm": 3.1451058387756348, "learning_rate": 5.235e-05, "loss": 0.4722, "step": 1050 }, { "epoch": 0.4607329842931937, "grad_norm": 3.197997570037842, "learning_rate": 5.485e-05, "loss": 0.4692, "step": 1100 }, { "epoch": 0.4816753926701571, "grad_norm": 2.9112601280212402, "learning_rate": 5.7350000000000005e-05, "loss": 0.4738, "step": 1150 }, { "epoch": 0.5026178010471204, "grad_norm": 3.036731243133545, "learning_rate": 5.9850000000000005e-05, "loss": 0.4521, "step": 1200 }, { "epoch": 0.5235602094240838, "grad_norm": 5.021958351135254, "learning_rate": 6.235000000000001e-05, "loss": 0.4666, "step": 1250 }, { "epoch": 0.5445026178010471, "grad_norm": 3.302204132080078, "learning_rate": 6.485e-05, "loss": 0.4625, "step": 1300 }, { "epoch": 0.5654450261780105, "grad_norm": 3.2643635272979736, "learning_rate": 6.735e-05, "loss": 0.4683, "step": 1350 }, { "epoch": 0.5863874345549738, "grad_norm": 1.7499467134475708, "learning_rate": 6.985e-05, "loss": 0.449, "step": 1400 }, { "epoch": 0.6073298429319371, "grad_norm": 1.3616622686386108, "learning_rate": 7.235000000000001e-05, "loss": 0.4523, "step": 1450 }, { "epoch": 0.6282722513089005, "grad_norm": 2.5826191902160645, "learning_rate": 7.485e-05, "loss": 0.446, "step": 1500 }, { "epoch": 0.6282722513089005, "eval_loss": 0.3975289463996887, "eval_runtime": 266.7188, "eval_samples_per_second": 31.828, "eval_steps_per_second": 3.982, "step": 1500 }, { "epoch": 0.6492146596858639, "grad_norm": 3.6052303314208984, "learning_rate": 7.735e-05, "loss": 0.4449, "step": 1550 }, { "epoch": 0.6701570680628273, "grad_norm": 3.4120566844940186, "learning_rate": 7.985e-05, "loss": 0.4477, "step": 1600 }, { "epoch": 0.6910994764397905, "grad_norm": 2.187040090560913, "learning_rate": 8.235000000000001e-05, "loss": 0.4522, "step": 1650 }, { "epoch": 0.7120418848167539, "grad_norm": 1.718518853187561, "learning_rate": 8.485e-05, "loss": 0.4431, "step": 1700 }, { "epoch": 0.7329842931937173, "grad_norm": 1.8248894214630127, "learning_rate": 8.735000000000001e-05, "loss": 0.4496, "step": 1750 }, { "epoch": 0.7539267015706806, "grad_norm": 2.733355760574341, "learning_rate": 8.985e-05, "loss": 0.4297, "step": 1800 }, { "epoch": 0.774869109947644, "grad_norm": 2.5899884700775146, "learning_rate": 9.235000000000001e-05, "loss": 0.4588, "step": 1850 }, { "epoch": 0.7958115183246073, "grad_norm": 1.5704914331436157, "learning_rate": 9.485e-05, "loss": 0.4345, "step": 1900 }, { "epoch": 0.8167539267015707, "grad_norm": 5.18487548828125, "learning_rate": 9.735000000000001e-05, "loss": 0.4325, "step": 1950 }, { "epoch": 0.837696335078534, "grad_norm": 2.4392499923706055, "learning_rate": 9.985000000000001e-05, "loss": 0.4222, "step": 2000 }, { "epoch": 0.837696335078534, "eval_loss": 0.39489272236824036, "eval_runtime": 270.1429, "eval_samples_per_second": 31.424, "eval_steps_per_second": 3.931, "step": 2000 }, { "epoch": 0.8586387434554974, "grad_norm": 2.3119680881500244, "learning_rate": 9.765e-05, "loss": 0.4441, "step": 2050 }, { "epoch": 0.8795811518324608, "grad_norm": 2.3084118366241455, "learning_rate": 9.515000000000001e-05, "loss": 0.4202, "step": 2100 }, { "epoch": 0.900523560209424, "grad_norm": 2.7656476497650146, "learning_rate": 9.265e-05, "loss": 0.428, "step": 2150 }, { "epoch": 0.9214659685863874, "grad_norm": 9.909614562988281, "learning_rate": 9.015e-05, "loss": 0.4252, "step": 2200 }, { "epoch": 0.9424083769633508, "grad_norm": 1.4575620889663696, "learning_rate": 8.765e-05, "loss": 0.4257, "step": 2250 }, { "epoch": 0.9633507853403142, "grad_norm": 1.5492056608200073, "learning_rate": 8.515000000000001e-05, "loss": 0.4285, "step": 2300 }, { "epoch": 0.9842931937172775, "grad_norm": 1.4776012897491455, "learning_rate": 8.265e-05, "loss": 0.4259, "step": 2350 }, { "epoch": 1.0052356020942408, "grad_norm": 1.9230506420135498, "learning_rate": 8.015e-05, "loss": 0.4077, "step": 2400 }, { "epoch": 1.0261780104712042, "grad_norm": 2.333806037902832, "learning_rate": 7.765e-05, "loss": 0.4201, "step": 2450 }, { "epoch": 1.0471204188481675, "grad_norm": 2.3305835723876953, "learning_rate": 7.515e-05, "loss": 0.4227, "step": 2500 }, { "epoch": 1.0471204188481675, "eval_loss": 0.3909551799297333, "eval_runtime": 265.2762, "eval_samples_per_second": 32.001, "eval_steps_per_second": 4.003, "step": 2500 } ], "logging_steps": 50, "max_steps": 4000, "num_input_tokens_seen": 0, "num_train_epochs": 2, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 1.1191222917756288e+16, "train_batch_size": 16, "trial_name": null, "trial_params": null }