| { |
| "best_global_step": null, |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 1.9910313901345291, |
| "eval_steps": 500, |
| "global_step": 444, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.02242152466367713, |
| "grad_norm": 2.15625, |
| "learning_rate": 2.305093438544906e-05, |
| "loss": 0.9112, |
| "step": 5 |
| }, |
| { |
| "epoch": 0.04484304932735426, |
| "grad_norm": 1.4765625, |
| "learning_rate": 5.1864602367260384e-05, |
| "loss": 0.7142, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.06726457399103139, |
| "grad_norm": 1.09375, |
| "learning_rate": 8.067827034907172e-05, |
| "loss": 0.6696, |
| "step": 15 |
| }, |
| { |
| "epoch": 0.08968609865470852, |
| "grad_norm": 0.9921875, |
| "learning_rate": 0.00010949193833088303, |
| "loss": 0.652, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.11210762331838565, |
| "grad_norm": 0.87109375, |
| "learning_rate": 0.00013830560631269436, |
| "loss": 0.6583, |
| "step": 25 |
| }, |
| { |
| "epoch": 0.13452914798206278, |
| "grad_norm": 1.2578125, |
| "learning_rate": 0.00016711927429450567, |
| "loss": 0.6354, |
| "step": 30 |
| }, |
| { |
| "epoch": 0.15695067264573992, |
| "grad_norm": 1.1875, |
| "learning_rate": 0.000195932942276317, |
| "loss": 0.6436, |
| "step": 35 |
| }, |
| { |
| "epoch": 0.17937219730941703, |
| "grad_norm": 1.1484375, |
| "learning_rate": 0.00022474661025812833, |
| "loss": 0.6563, |
| "step": 40 |
| }, |
| { |
| "epoch": 0.20179372197309417, |
| "grad_norm": 9.5, |
| "learning_rate": 0.00025356027823993967, |
| "loss": 0.8176, |
| "step": 45 |
| }, |
| { |
| "epoch": 0.2242152466367713, |
| "grad_norm": 1.2890625, |
| "learning_rate": 0.00028237394622175095, |
| "loss": 0.8045, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.24663677130044842, |
| "grad_norm": 1.3125, |
| "learning_rate": 0.00028811441478558746, |
| "loss": 0.7267, |
| "step": 55 |
| }, |
| { |
| "epoch": 0.26905829596412556, |
| "grad_norm": 1.2578125, |
| "learning_rate": 0.0002880239788169296, |
| "loss": 0.7135, |
| "step": 60 |
| }, |
| { |
| "epoch": 0.2914798206278027, |
| "grad_norm": 0.6953125, |
| "learning_rate": 0.0002878640385367635, |
| "loss": 0.6942, |
| "step": 65 |
| }, |
| { |
| "epoch": 0.31390134529147984, |
| "grad_norm": 0.640625, |
| "learning_rate": 0.0002876346969346014, |
| "loss": 0.6893, |
| "step": 70 |
| }, |
| { |
| "epoch": 0.336322869955157, |
| "grad_norm": 0.66015625, |
| "learning_rate": 0.0002873361016891883, |
| "loss": 0.6921, |
| "step": 75 |
| }, |
| { |
| "epoch": 0.35874439461883406, |
| "grad_norm": 0.60546875, |
| "learning_rate": 0.000286968445073407, |
| "loss": 0.6859, |
| "step": 80 |
| }, |
| { |
| "epoch": 0.3811659192825112, |
| "grad_norm": 0.57421875, |
| "learning_rate": 0.00028653196383046967, |
| "loss": 0.6619, |
| "step": 85 |
| }, |
| { |
| "epoch": 0.40358744394618834, |
| "grad_norm": 0.54296875, |
| "learning_rate": 0.0002860269390214723, |
| "loss": 0.6673, |
| "step": 90 |
| }, |
| { |
| "epoch": 0.4260089686098655, |
| "grad_norm": 0.55859375, |
| "learning_rate": 0.00028545369584441287, |
| "loss": 0.6669, |
| "step": 95 |
| }, |
| { |
| "epoch": 0.4484304932735426, |
| "grad_norm": 0.6328125, |
| "learning_rate": 0.00028481260342478823, |
| "loss": 0.6705, |
| "step": 100 |
| }, |
| { |
| "epoch": 0.47085201793721976, |
| "grad_norm": 0.53515625, |
| "learning_rate": 0.000284104074577905, |
| "loss": 0.6551, |
| "step": 105 |
| }, |
| { |
| "epoch": 0.49327354260089684, |
| "grad_norm": 0.5390625, |
| "learning_rate": 0.00028332856554305765, |
| "loss": 0.6536, |
| "step": 110 |
| }, |
| { |
| "epoch": 0.515695067264574, |
| "grad_norm": 0.484375, |
| "learning_rate": 0.0002824865756897446, |
| "loss": 0.6496, |
| "step": 115 |
| }, |
| { |
| "epoch": 0.5381165919282511, |
| "grad_norm": 0.5, |
| "learning_rate": 0.0002815786471961118, |
| "loss": 0.6421, |
| "step": 120 |
| }, |
| { |
| "epoch": 0.5605381165919282, |
| "grad_norm": 0.44921875, |
| "learning_rate": 0.00028060536469983084, |
| "loss": 0.6333, |
| "step": 125 |
| }, |
| { |
| "epoch": 0.5829596412556054, |
| "grad_norm": 0.46484375, |
| "learning_rate": 0.0002795673549216364, |
| "loss": 0.6506, |
| "step": 130 |
| }, |
| { |
| "epoch": 0.6053811659192825, |
| "grad_norm": 0.515625, |
| "learning_rate": 0.0002784652862617649, |
| "loss": 0.6242, |
| "step": 135 |
| }, |
| { |
| "epoch": 0.6278026905829597, |
| "grad_norm": 0.5234375, |
| "learning_rate": 0.0002772998683695552, |
| "loss": 0.6467, |
| "step": 140 |
| }, |
| { |
| "epoch": 0.6502242152466368, |
| "grad_norm": 0.43359375, |
| "learning_rate": 0.00027607185168648785, |
| "loss": 0.634, |
| "step": 145 |
| }, |
| { |
| "epoch": 0.672645739910314, |
| "grad_norm": 0.5078125, |
| "learning_rate": 0.0002747820269629572, |
| "loss": 0.6367, |
| "step": 150 |
| }, |
| { |
| "epoch": 0.695067264573991, |
| "grad_norm": 0.478515625, |
| "learning_rate": 0.0002734312247490874, |
| "loss": 0.6091, |
| "step": 155 |
| }, |
| { |
| "epoch": 0.7174887892376681, |
| "grad_norm": 0.421875, |
| "learning_rate": 0.0002720203148599208, |
| "loss": 0.6293, |
| "step": 160 |
| }, |
| { |
| "epoch": 0.7399103139013453, |
| "grad_norm": 0.4140625, |
| "learning_rate": 0.00027055020581532246, |
| "loss": 0.6168, |
| "step": 165 |
| }, |
| { |
| "epoch": 0.7623318385650224, |
| "grad_norm": 0.462890625, |
| "learning_rate": 0.00026902184425496155, |
| "loss": 0.6284, |
| "step": 170 |
| }, |
| { |
| "epoch": 0.7847533632286996, |
| "grad_norm": 0.435546875, |
| "learning_rate": 0.0002674362143287467, |
| "loss": 0.6163, |
| "step": 175 |
| }, |
| { |
| "epoch": 0.8071748878923767, |
| "grad_norm": 0.47265625, |
| "learning_rate": 0.0002657943370631075, |
| "loss": 0.6099, |
| "step": 180 |
| }, |
| { |
| "epoch": 0.8295964125560538, |
| "grad_norm": 0.53125, |
| "learning_rate": 0.00026409726970353, |
| "loss": 0.6159, |
| "step": 185 |
| }, |
| { |
| "epoch": 0.852017937219731, |
| "grad_norm": 0.48828125, |
| "learning_rate": 0.00026234610503377063, |
| "loss": 0.6001, |
| "step": 190 |
| }, |
| { |
| "epoch": 0.874439461883408, |
| "grad_norm": 0.41796875, |
| "learning_rate": 0.00026054197067218514, |
| "loss": 0.5967, |
| "step": 195 |
| }, |
| { |
| "epoch": 0.8968609865470852, |
| "grad_norm": 0.435546875, |
| "learning_rate": 0.0002586860283456274, |
| "loss": 0.6301, |
| "step": 200 |
| }, |
| { |
| "epoch": 0.9192825112107623, |
| "grad_norm": 0.447265625, |
| "learning_rate": 0.00025677947314138464, |
| "loss": 0.6061, |
| "step": 205 |
| }, |
| { |
| "epoch": 0.9417040358744395, |
| "grad_norm": 0.447265625, |
| "learning_rate": 0.00025482353273763113, |
| "loss": 0.5923, |
| "step": 210 |
| }, |
| { |
| "epoch": 0.9641255605381166, |
| "grad_norm": 0.494140625, |
| "learning_rate": 0.0002528194666128958, |
| "loss": 0.6004, |
| "step": 215 |
| }, |
| { |
| "epoch": 0.9865470852017937, |
| "grad_norm": 0.419921875, |
| "learning_rate": 0.0002507685652350527, |
| "loss": 0.612, |
| "step": 220 |
| }, |
| { |
| "epoch": 0.9955156950672646, |
| "eval_loss": 0.6039933562278748, |
| "eval_runtime": 2.2333, |
| "eval_samples_per_second": 18.806, |
| "eval_steps_per_second": 18.806, |
| "step": 222 |
| }, |
| { |
| "epoch": 1.0089686098654709, |
| "grad_norm": 0.55859375, |
| "learning_rate": 0.0002486721492303566, |
| "loss": 0.533, |
| "step": 225 |
| }, |
| { |
| "epoch": 1.031390134529148, |
| "grad_norm": 0.451171875, |
| "learning_rate": 0.0002465315685330595, |
| "loss": 0.4381, |
| "step": 230 |
| }, |
| { |
| "epoch": 1.053811659192825, |
| "grad_norm": 0.466796875, |
| "learning_rate": 0.0002443482015161539, |
| "loss": 0.4257, |
| "step": 235 |
| }, |
| { |
| "epoch": 1.0762331838565022, |
| "grad_norm": 0.490234375, |
| "learning_rate": 0.0002421234541038045, |
| "loss": 0.4324, |
| "step": 240 |
| }, |
| { |
| "epoch": 1.0986547085201794, |
| "grad_norm": 0.404296875, |
| "learning_rate": 0.00023985875886603888, |
| "loss": 0.4228, |
| "step": 245 |
| }, |
| { |
| "epoch": 1.1210762331838564, |
| "grad_norm": 0.40234375, |
| "learning_rate": 0.00023755557409627998, |
| "loss": 0.4219, |
| "step": 250 |
| }, |
| { |
| "epoch": 1.1434977578475336, |
| "grad_norm": 0.373046875, |
| "learning_rate": 0.00023521538287231476, |
| "loss": 0.4365, |
| "step": 255 |
| }, |
| { |
| "epoch": 1.1659192825112108, |
| "grad_norm": 0.42578125, |
| "learning_rate": 0.0002328396921013038, |
| "loss": 0.4315, |
| "step": 260 |
| }, |
| { |
| "epoch": 1.188340807174888, |
| "grad_norm": 0.384765625, |
| "learning_rate": 0.00023043003154944643, |
| "loss": 0.428, |
| "step": 265 |
| }, |
| { |
| "epoch": 1.210762331838565, |
| "grad_norm": 0.41796875, |
| "learning_rate": 0.000227987952856926, |
| "loss": 0.4248, |
| "step": 270 |
| }, |
| { |
| "epoch": 1.2331838565022422, |
| "grad_norm": 0.365234375, |
| "learning_rate": 0.00022551502853877082, |
| "loss": 0.4306, |
| "step": 275 |
| }, |
| { |
| "epoch": 1.2556053811659194, |
| "grad_norm": 0.376953125, |
| "learning_rate": 0.00022301285097227208, |
| "loss": 0.4423, |
| "step": 280 |
| }, |
| { |
| "epoch": 1.2780269058295963, |
| "grad_norm": 0.39453125, |
| "learning_rate": 0.00022048303137161342, |
| "loss": 0.4351, |
| "step": 285 |
| }, |
| { |
| "epoch": 1.3004484304932735, |
| "grad_norm": 0.3828125, |
| "learning_rate": 0.00021792719875036935, |
| "loss": 0.4371, |
| "step": 290 |
| }, |
| { |
| "epoch": 1.3228699551569507, |
| "grad_norm": 0.380859375, |
| "learning_rate": 0.00021534699887254367, |
| "loss": 0.434, |
| "step": 295 |
| }, |
| { |
| "epoch": 1.3452914798206277, |
| "grad_norm": 0.375, |
| "learning_rate": 0.00021274409319282082, |
| "loss": 0.4292, |
| "step": 300 |
| }, |
| { |
| "epoch": 1.3677130044843049, |
| "grad_norm": 0.37890625, |
| "learning_rate": 0.00021012015778671412, |
| "loss": 0.4345, |
| "step": 305 |
| }, |
| { |
| "epoch": 1.390134529147982, |
| "grad_norm": 0.390625, |
| "learning_rate": 0.00020747688227129932, |
| "loss": 0.4313, |
| "step": 310 |
| }, |
| { |
| "epoch": 1.4125560538116593, |
| "grad_norm": 0.3671875, |
| "learning_rate": 0.00020481596871722803, |
| "loss": 0.4319, |
| "step": 315 |
| }, |
| { |
| "epoch": 1.4349775784753362, |
| "grad_norm": 0.42578125, |
| "learning_rate": 0.0002021391305527223, |
| "loss": 0.4306, |
| "step": 320 |
| }, |
| { |
| "epoch": 1.4573991031390134, |
| "grad_norm": 0.359375, |
| "learning_rate": 0.00019944809146025586, |
| "loss": 0.4415, |
| "step": 325 |
| }, |
| { |
| "epoch": 1.4798206278026906, |
| "grad_norm": 0.376953125, |
| "learning_rate": 0.00019674458426663204, |
| "loss": 0.4288, |
| "step": 330 |
| }, |
| { |
| "epoch": 1.5022421524663678, |
| "grad_norm": 0.365234375, |
| "learning_rate": 0.0001940303498271737, |
| "loss": 0.4263, |
| "step": 335 |
| }, |
| { |
| "epoch": 1.5246636771300448, |
| "grad_norm": 0.361328125, |
| "learning_rate": 0.00019130713590474326, |
| "loss": 0.429, |
| "step": 340 |
| }, |
| { |
| "epoch": 1.547085201793722, |
| "grad_norm": 0.3671875, |
| "learning_rate": 0.00018857669604431496, |
| "loss": 0.4147, |
| "step": 345 |
| }, |
| { |
| "epoch": 1.5695067264573992, |
| "grad_norm": 0.369140625, |
| "learning_rate": 0.0001858407884438237, |
| "loss": 0.4215, |
| "step": 350 |
| }, |
| { |
| "epoch": 1.5919282511210762, |
| "grad_norm": 0.373046875, |
| "learning_rate": 0.0001831011748220177, |
| "loss": 0.4232, |
| "step": 355 |
| }, |
| { |
| "epoch": 1.6143497757847534, |
| "grad_norm": 0.359375, |
| "learning_rate": 0.00018035961928404432, |
| "loss": 0.4133, |
| "step": 360 |
| }, |
| { |
| "epoch": 1.6367713004484306, |
| "grad_norm": 0.3515625, |
| "learning_rate": 0.0001776178871854989, |
| "loss": 0.4198, |
| "step": 365 |
| }, |
| { |
| "epoch": 1.6591928251121075, |
| "grad_norm": 0.35546875, |
| "learning_rate": 0.00017487774399566828, |
| "loss": 0.4253, |
| "step": 370 |
| }, |
| { |
| "epoch": 1.6816143497757847, |
| "grad_norm": 0.36328125, |
| "learning_rate": 0.00017214095416070196, |
| "loss": 0.4247, |
| "step": 375 |
| }, |
| { |
| "epoch": 1.704035874439462, |
| "grad_norm": 0.3515625, |
| "learning_rate": 0.00016940927996744084, |
| "loss": 0.4173, |
| "step": 380 |
| }, |
| { |
| "epoch": 1.726457399103139, |
| "grad_norm": 0.337890625, |
| "learning_rate": 0.00016668448040863722, |
| "loss": 0.4197, |
| "step": 385 |
| }, |
| { |
| "epoch": 1.7488789237668163, |
| "grad_norm": 0.357421875, |
| "learning_rate": 0.000163968310050296, |
| "loss": 0.4171, |
| "step": 390 |
| }, |
| { |
| "epoch": 1.7713004484304933, |
| "grad_norm": 0.33203125, |
| "learning_rate": 0.00016126251790186578, |
| "loss": 0.3982, |
| "step": 395 |
| }, |
| { |
| "epoch": 1.7937219730941703, |
| "grad_norm": 0.330078125, |
| "learning_rate": 0.0001585688462900084, |
| "loss": 0.4102, |
| "step": 400 |
| }, |
| { |
| "epoch": 1.8161434977578477, |
| "grad_norm": 0.341796875, |
| "learning_rate": 0.0001558890297366718, |
| "loss": 0.4096, |
| "step": 405 |
| }, |
| { |
| "epoch": 1.8385650224215246, |
| "grad_norm": 0.330078125, |
| "learning_rate": 0.000153224793842188, |
| "loss": 0.4118, |
| "step": 410 |
| }, |
| { |
| "epoch": 1.8609865470852018, |
| "grad_norm": 0.328125, |
| "learning_rate": 0.0001505778541741166, |
| "loss": 0.3986, |
| "step": 415 |
| }, |
| { |
| "epoch": 1.883408071748879, |
| "grad_norm": 0.341796875, |
| "learning_rate": 0.00014794991516254793, |
| "loss": 0.399, |
| "step": 420 |
| }, |
| { |
| "epoch": 1.905829596412556, |
| "grad_norm": 0.345703125, |
| "learning_rate": 0.0001453426690025785, |
| "loss": 0.402, |
| "step": 425 |
| }, |
| { |
| "epoch": 1.9282511210762332, |
| "grad_norm": 0.34765625, |
| "learning_rate": 0.00014275779456466455, |
| "loss": 0.4102, |
| "step": 430 |
| }, |
| { |
| "epoch": 1.9506726457399104, |
| "grad_norm": 0.32421875, |
| "learning_rate": 0.00014019695631355567, |
| "loss": 0.3942, |
| "step": 435 |
| }, |
| { |
| "epoch": 1.9730941704035874, |
| "grad_norm": 0.333984375, |
| "learning_rate": 0.0001376618032365048, |
| "loss": 0.4017, |
| "step": 440 |
| }, |
| { |
| "epoch": 1.9910313901345291, |
| "eval_loss": 0.5547934770584106, |
| "eval_runtime": 2.0196, |
| "eval_samples_per_second": 20.796, |
| "eval_steps_per_second": 20.796, |
| "step": 444 |
| } |
| ], |
| "logging_steps": 5, |
| "max_steps": 669, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 3, |
| "save_steps": 500, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": false |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 7.145669325918044e+17, |
| "train_batch_size": 100, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|