|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 9.952153110047847, |
|
"eval_steps": 500, |
|
"global_step": 1040, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.009569377990430622, |
|
"grad_norm": 452.0, |
|
"learning_rate": 1.9230769230769234e-06, |
|
"loss": 52.8047, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.04784688995215311, |
|
"grad_norm": 348.0, |
|
"learning_rate": 9.615384615384616e-06, |
|
"loss": 50.9309, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.09569377990430622, |
|
"grad_norm": 151.0, |
|
"learning_rate": 1.923076923076923e-05, |
|
"loss": 41.6311, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.14354066985645933, |
|
"grad_norm": 38.5, |
|
"learning_rate": 2.8846153846153845e-05, |
|
"loss": 30.0661, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.19138755980861244, |
|
"grad_norm": 21.625, |
|
"learning_rate": 3.846153846153846e-05, |
|
"loss": 25.0223, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.23923444976076555, |
|
"grad_norm": 9.5, |
|
"learning_rate": 4.8076923076923084e-05, |
|
"loss": 22.4207, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.28708133971291866, |
|
"grad_norm": 5.21875, |
|
"learning_rate": 5.769230769230769e-05, |
|
"loss": 20.4665, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.3349282296650718, |
|
"grad_norm": 5.25, |
|
"learning_rate": 6.730769230769232e-05, |
|
"loss": 19.3811, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.3827751196172249, |
|
"grad_norm": 7.28125, |
|
"learning_rate": 7.692307692307693e-05, |
|
"loss": 18.3511, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.430622009569378, |
|
"grad_norm": 14.6875, |
|
"learning_rate": 8.653846153846155e-05, |
|
"loss": 16.8341, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.4784688995215311, |
|
"grad_norm": 28.25, |
|
"learning_rate": 9.615384615384617e-05, |
|
"loss": 13.7818, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.5263157894736842, |
|
"grad_norm": 28.375, |
|
"learning_rate": 0.00010576923076923077, |
|
"loss": 7.7465, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.5741626794258373, |
|
"grad_norm": 8.25, |
|
"learning_rate": 0.00011538461538461538, |
|
"loss": 3.0182, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.6220095693779905, |
|
"grad_norm": 6.90625, |
|
"learning_rate": 0.000125, |
|
"loss": 2.2631, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.6698564593301436, |
|
"grad_norm": 16.125, |
|
"learning_rate": 0.00013461538461538464, |
|
"loss": 1.8651, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.7177033492822966, |
|
"grad_norm": 0.83203125, |
|
"learning_rate": 0.00014423076923076924, |
|
"loss": 1.6482, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.7655502392344498, |
|
"grad_norm": 1.3125, |
|
"learning_rate": 0.00015384615384615385, |
|
"loss": 1.4966, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.8133971291866029, |
|
"grad_norm": 1.09375, |
|
"learning_rate": 0.00016346153846153846, |
|
"loss": 1.4308, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.861244019138756, |
|
"grad_norm": 1.0625, |
|
"learning_rate": 0.0001730769230769231, |
|
"loss": 1.3404, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.9090909090909091, |
|
"grad_norm": 1.828125, |
|
"learning_rate": 0.0001826923076923077, |
|
"loss": 1.2829, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.9569377990430622, |
|
"grad_norm": 0.72265625, |
|
"learning_rate": 0.00019230769230769233, |
|
"loss": 1.2495, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.9952153110047847, |
|
"eval_loss": 2.6542556285858154, |
|
"eval_runtime": 0.2797, |
|
"eval_samples_per_second": 35.754, |
|
"eval_steps_per_second": 3.575, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 1.0047846889952152, |
|
"grad_norm": 0.90234375, |
|
"learning_rate": 0.0001999994367286727, |
|
"loss": 1.2336, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 1.0526315789473684, |
|
"grad_norm": 2.046875, |
|
"learning_rate": 0.00019997972289848503, |
|
"loss": 1.1928, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.1004784688995215, |
|
"grad_norm": 9.25, |
|
"learning_rate": 0.00019993185184710165, |
|
"loss": 1.1702, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 1.1483253588516746, |
|
"grad_norm": 1.1015625, |
|
"learning_rate": 0.00019985583705641418, |
|
"loss": 1.1684, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.1961722488038278, |
|
"grad_norm": 2.859375, |
|
"learning_rate": 0.00019975169993441627, |
|
"loss": 1.125, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 1.244019138755981, |
|
"grad_norm": 0.76171875, |
|
"learning_rate": 0.00019961946980917456, |
|
"loss": 1.1078, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.291866028708134, |
|
"grad_norm": 1.546875, |
|
"learning_rate": 0.0001994591839205691, |
|
"loss": 1.1369, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 1.339712918660287, |
|
"grad_norm": 3.328125, |
|
"learning_rate": 0.0001992708874098054, |
|
"loss": 1.144, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.38755980861244, |
|
"grad_norm": 2.53125, |
|
"learning_rate": 0.00019905463330670143, |
|
"loss": 1.1301, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 1.4354066985645932, |
|
"grad_norm": 2.890625, |
|
"learning_rate": 0.0001988104825147528, |
|
"loss": 1.146, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.4832535885167464, |
|
"grad_norm": 2.453125, |
|
"learning_rate": 0.0001985385037939806, |
|
"loss": 1.0953, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 1.5311004784688995, |
|
"grad_norm": 2.1875, |
|
"learning_rate": 0.00019823877374156647, |
|
"loss": 1.0699, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.5789473684210527, |
|
"grad_norm": 1.984375, |
|
"learning_rate": 0.00019791137677028082, |
|
"loss": 1.0699, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 1.6267942583732058, |
|
"grad_norm": 2.296875, |
|
"learning_rate": 0.00019755640508470942, |
|
"loss": 1.0545, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.674641148325359, |
|
"grad_norm": 1.8828125, |
|
"learning_rate": 0.00019717395865528602, |
|
"loss": 1.0404, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 1.722488038277512, |
|
"grad_norm": 2.015625, |
|
"learning_rate": 0.00019676414519013781, |
|
"loss": 1.0512, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.7703349282296652, |
|
"grad_norm": 6.75, |
|
"learning_rate": 0.00019632708010475165, |
|
"loss": 1.0382, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 1.8181818181818183, |
|
"grad_norm": 0.8046875, |
|
"learning_rate": 0.00019586288648946947, |
|
"loss": 1.0383, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.8660287081339713, |
|
"grad_norm": 0.875, |
|
"learning_rate": 0.0001953716950748227, |
|
"loss": 1.0356, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 1.9138755980861244, |
|
"grad_norm": 6.03125, |
|
"learning_rate": 0.00019485364419471454, |
|
"loss": 1.0354, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.9617224880382775, |
|
"grad_norm": 1.640625, |
|
"learning_rate": 0.0001943088797474612, |
|
"loss": 1.0258, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 2.5231521129608154, |
|
"eval_runtime": 0.2369, |
|
"eval_samples_per_second": 42.205, |
|
"eval_steps_per_second": 4.22, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 2.0095693779904304, |
|
"grad_norm": 1.578125, |
|
"learning_rate": 0.00019373755515470254, |
|
"loss": 1.0034, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 2.0574162679425836, |
|
"grad_norm": 1.5703125, |
|
"learning_rate": 0.00019313983131819407, |
|
"loss": 0.9855, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 2.1052631578947367, |
|
"grad_norm": 4.4375, |
|
"learning_rate": 0.00019251587657449236, |
|
"loss": 0.9769, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 2.15311004784689, |
|
"grad_norm": 1.8359375, |
|
"learning_rate": 0.0001918658666475465, |
|
"loss": 0.9829, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 2.200956937799043, |
|
"grad_norm": 0.7421875, |
|
"learning_rate": 0.00019118998459920902, |
|
"loss": 0.9843, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 2.248803827751196, |
|
"grad_norm": 2.578125, |
|
"learning_rate": 0.0001904884207776804, |
|
"loss": 0.9876, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 2.2966507177033493, |
|
"grad_norm": 1.5390625, |
|
"learning_rate": 0.0001897613727639014, |
|
"loss": 0.9713, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 2.3444976076555024, |
|
"grad_norm": 1.9609375, |
|
"learning_rate": 0.00018900904531590846, |
|
"loss": 0.9706, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 2.3923444976076556, |
|
"grad_norm": 2.453125, |
|
"learning_rate": 0.0001882316503111678, |
|
"loss": 0.9635, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 2.4401913875598087, |
|
"grad_norm": 1.6484375, |
|
"learning_rate": 0.00018742940668690464, |
|
"loss": 0.9608, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 2.488038277511962, |
|
"grad_norm": 1.46875, |
|
"learning_rate": 0.00018660254037844388, |
|
"loss": 0.9498, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 2.535885167464115, |
|
"grad_norm": 4.34375, |
|
"learning_rate": 0.00018575128425558023, |
|
"loss": 0.9539, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 2.583732057416268, |
|
"grad_norm": 2.90625, |
|
"learning_rate": 0.00018487587805699526, |
|
"loss": 0.96, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 2.6315789473684212, |
|
"grad_norm": 0.92578125, |
|
"learning_rate": 0.0001839765683227398, |
|
"loss": 0.9431, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 2.679425837320574, |
|
"grad_norm": 4.375, |
|
"learning_rate": 0.00018305360832480117, |
|
"loss": 0.9457, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 2.7272727272727275, |
|
"grad_norm": 11.1875, |
|
"learning_rate": 0.00018210725799577439, |
|
"loss": 0.9594, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 2.77511961722488, |
|
"grad_norm": 1.078125, |
|
"learning_rate": 0.00018113778385565733, |
|
"loss": 0.9542, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 2.8229665071770333, |
|
"grad_norm": 3.734375, |
|
"learning_rate": 0.00018014545893679115, |
|
"loss": 0.9407, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 2.8708133971291865, |
|
"grad_norm": 1.9765625, |
|
"learning_rate": 0.0001791305627069662, |
|
"loss": 0.9418, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.9186602870813396, |
|
"grad_norm": 0.88671875, |
|
"learning_rate": 0.00017809338099071577, |
|
"loss": 0.9357, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 2.9665071770334928, |
|
"grad_norm": 1.046875, |
|
"learning_rate": 0.00017703420588881946, |
|
"loss": 0.9351, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 2.9952153110047846, |
|
"eval_loss": 2.466839551925659, |
|
"eval_runtime": 0.257, |
|
"eval_samples_per_second": 38.912, |
|
"eval_steps_per_second": 3.891, |
|
"step": 313 |
|
}, |
|
{ |
|
"epoch": 3.014354066985646, |
|
"grad_norm": 1.7109375, |
|
"learning_rate": 0.0001759533356960391, |
|
"loss": 0.9283, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 3.062200956937799, |
|
"grad_norm": 2.0625, |
|
"learning_rate": 0.00017485107481711012, |
|
"loss": 0.9187, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 3.110047846889952, |
|
"grad_norm": 1.8125, |
|
"learning_rate": 0.0001737277336810124, |
|
"loss": 0.9121, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 3.1578947368421053, |
|
"grad_norm": 3.6875, |
|
"learning_rate": 0.00017258362865354426, |
|
"loss": 0.9481, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 3.2057416267942584, |
|
"grad_norm": 3.84375, |
|
"learning_rate": 0.00017141908194822446, |
|
"loss": 0.9215, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 3.2535885167464116, |
|
"grad_norm": 8.625, |
|
"learning_rate": 0.00017023442153554777, |
|
"loss": 0.9082, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 3.3014354066985647, |
|
"grad_norm": 0.94140625, |
|
"learning_rate": 0.00016902998105061844, |
|
"loss": 0.9035, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 3.349282296650718, |
|
"grad_norm": 0.9921875, |
|
"learning_rate": 0.0001678060996991891, |
|
"loss": 0.9084, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 3.397129186602871, |
|
"grad_norm": 1.1484375, |
|
"learning_rate": 0.00016656312216213034, |
|
"loss": 0.8981, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 3.444976076555024, |
|
"grad_norm": 1.0859375, |
|
"learning_rate": 0.0001653013984983585, |
|
"loss": 0.9024, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 3.492822966507177, |
|
"grad_norm": 0.80078125, |
|
"learning_rate": 0.00016402128404624882, |
|
"loss": 0.9027, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 3.5406698564593304, |
|
"grad_norm": 0.8125, |
|
"learning_rate": 0.00016272313932356162, |
|
"loss": 0.8924, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 3.588516746411483, |
|
"grad_norm": 0.99609375, |
|
"learning_rate": 0.0001614073299259101, |
|
"loss": 0.8943, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 3.6363636363636362, |
|
"grad_norm": 1.6796875, |
|
"learning_rate": 0.0001600742264237979, |
|
"loss": 0.9152, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 3.6842105263157894, |
|
"grad_norm": 1.9375, |
|
"learning_rate": 0.0001587242042582554, |
|
"loss": 0.9076, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 3.7320574162679425, |
|
"grad_norm": 1.859375, |
|
"learning_rate": 0.0001573576436351046, |
|
"loss": 0.8935, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 3.7799043062200957, |
|
"grad_norm": 1.1328125, |
|
"learning_rate": 0.00015597492941788222, |
|
"loss": 0.8895, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 3.827751196172249, |
|
"grad_norm": 0.89453125, |
|
"learning_rate": 0.00015457645101945046, |
|
"loss": 0.8767, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 3.875598086124402, |
|
"grad_norm": 3.609375, |
|
"learning_rate": 0.00015316260229232727, |
|
"loss": 0.8795, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 3.923444976076555, |
|
"grad_norm": 2.40625, |
|
"learning_rate": 0.00015173378141776568, |
|
"loss": 0.8847, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 3.971291866028708, |
|
"grad_norm": 4.0, |
|
"learning_rate": 0.00015029039079361448, |
|
"loss": 0.8914, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 2.4789059162139893, |
|
"eval_runtime": 0.2403, |
|
"eval_samples_per_second": 41.62, |
|
"eval_steps_per_second": 4.162, |
|
"step": 418 |
|
}, |
|
{ |
|
"epoch": 4.019138755980861, |
|
"grad_norm": 2.28125, |
|
"learning_rate": 0.00014883283692099112, |
|
"loss": 0.8795, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 4.0669856459330145, |
|
"grad_norm": 1.890625, |
|
"learning_rate": 0.00014736153028979893, |
|
"loss": 0.8598, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 4.114832535885167, |
|
"grad_norm": 2.984375, |
|
"learning_rate": 0.00014587688526312143, |
|
"loss": 0.8533, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 4.162679425837321, |
|
"grad_norm": 2.453125, |
|
"learning_rate": 0.00014437931996052518, |
|
"loss": 0.8544, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 4.2105263157894735, |
|
"grad_norm": 8.4375, |
|
"learning_rate": 0.00014286925614030542, |
|
"loss": 0.8635, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 4.258373205741627, |
|
"grad_norm": 4.34375, |
|
"learning_rate": 0.00014134711908070631, |
|
"loss": 0.8664, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 4.30622009569378, |
|
"grad_norm": 0.83984375, |
|
"learning_rate": 0.0001398133374601501, |
|
"loss": 0.8567, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 4.354066985645933, |
|
"grad_norm": 0.73828125, |
|
"learning_rate": 0.000138268343236509, |
|
"loss": 0.8579, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 4.401913875598086, |
|
"grad_norm": 1.3203125, |
|
"learning_rate": 0.00013671257152545277, |
|
"loss": 0.8562, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 4.44976076555024, |
|
"grad_norm": 1.140625, |
|
"learning_rate": 0.00013514646047790775, |
|
"loss": 0.8527, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 4.497607655502392, |
|
"grad_norm": 0.80078125, |
|
"learning_rate": 0.0001335704511566605, |
|
"loss": 0.8474, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 4.545454545454545, |
|
"grad_norm": 2.625, |
|
"learning_rate": 0.00013198498741214166, |
|
"loss": 0.856, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 4.5933014354066986, |
|
"grad_norm": 0.91015625, |
|
"learning_rate": 0.0001303905157574247, |
|
"loss": 0.8586, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 4.641148325358852, |
|
"grad_norm": 1.28125, |
|
"learning_rate": 0.00012878748524247462, |
|
"loss": 0.8519, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 4.688995215311005, |
|
"grad_norm": 1.609375, |
|
"learning_rate": 0.00012717634732768243, |
|
"loss": 0.8483, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 4.7368421052631575, |
|
"grad_norm": 1.6171875, |
|
"learning_rate": 0.0001255575557567207, |
|
"loss": 0.8663, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 4.784688995215311, |
|
"grad_norm": 0.890625, |
|
"learning_rate": 0.0001239315664287558, |
|
"loss": 0.8684, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 4.832535885167464, |
|
"grad_norm": 1.3828125, |
|
"learning_rate": 0.00012229883727005365, |
|
"loss": 0.8391, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 4.880382775119617, |
|
"grad_norm": 1.21875, |
|
"learning_rate": 0.00012065982810501404, |
|
"loss": 0.8491, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 4.92822966507177, |
|
"grad_norm": 3.765625, |
|
"learning_rate": 0.00011901500052667068, |
|
"loss": 0.8435, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 4.976076555023924, |
|
"grad_norm": 6.75, |
|
"learning_rate": 0.00011736481776669306, |
|
"loss": 0.8487, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 4.9952153110047846, |
|
"eval_loss": 2.475181818008423, |
|
"eval_runtime": 0.2635, |
|
"eval_samples_per_second": 37.955, |
|
"eval_steps_per_second": 3.796, |
|
"step": 522 |
|
}, |
|
{ |
|
"epoch": 5.023923444976076, |
|
"grad_norm": 6.28125, |
|
"learning_rate": 0.00011570974456492678, |
|
"loss": 0.845, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 5.07177033492823, |
|
"grad_norm": 12.1875, |
|
"learning_rate": 0.00011405024703850929, |
|
"loss": 0.8426, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 5.119617224880383, |
|
"grad_norm": 7.71875, |
|
"learning_rate": 0.00011238679255059752, |
|
"loss": 0.8348, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 5.167464114832536, |
|
"grad_norm": 6.3125, |
|
"learning_rate": 0.00011071984957874479, |
|
"loss": 0.829, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 5.215311004784689, |
|
"grad_norm": 0.578125, |
|
"learning_rate": 0.0001090498875829638, |
|
"loss": 0.8233, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 5.2631578947368425, |
|
"grad_norm": 2.03125, |
|
"learning_rate": 0.00010737737687351284, |
|
"loss": 0.8409, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 5.311004784688995, |
|
"grad_norm": 0.984375, |
|
"learning_rate": 0.00010570278847844275, |
|
"loss": 0.8217, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 5.358851674641148, |
|
"grad_norm": 1.4609375, |
|
"learning_rate": 0.00010402659401094152, |
|
"loss": 0.8108, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 5.4066985645933014, |
|
"grad_norm": 1.2421875, |
|
"learning_rate": 0.00010234926553651422, |
|
"loss": 0.8351, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 5.454545454545454, |
|
"grad_norm": 1.140625, |
|
"learning_rate": 0.00010067127544003563, |
|
"loss": 0.8194, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 5.502392344497608, |
|
"grad_norm": 0.98046875, |
|
"learning_rate": 9.899309629271246e-05, |
|
"loss": 0.8161, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 5.55023923444976, |
|
"grad_norm": 1.1875, |
|
"learning_rate": 9.73152007189939e-05, |
|
"loss": 0.8156, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 5.598086124401914, |
|
"grad_norm": 1.4375, |
|
"learning_rate": 9.563806126346642e-05, |
|
"loss": 0.8183, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 5.645933014354067, |
|
"grad_norm": 0.8359375, |
|
"learning_rate": 9.396215025777139e-05, |
|
"loss": 0.8079, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 5.69377990430622, |
|
"grad_norm": 1.234375, |
|
"learning_rate": 9.22879396875828e-05, |
|
"loss": 0.821, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 5.741626794258373, |
|
"grad_norm": 0.85546875, |
|
"learning_rate": 9.061590105968208e-05, |
|
"loss": 0.8078, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 5.7894736842105265, |
|
"grad_norm": 1.25, |
|
"learning_rate": 8.894650526916803e-05, |
|
"loss": 0.8042, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 5.837320574162679, |
|
"grad_norm": 1.046875, |
|
"learning_rate": 8.728022246683894e-05, |
|
"loss": 0.8298, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 5.885167464114833, |
|
"grad_norm": 2.34375, |
|
"learning_rate": 8.561752192678443e-05, |
|
"loss": 0.8122, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 5.9330143540669855, |
|
"grad_norm": 12.75, |
|
"learning_rate": 8.395887191422397e-05, |
|
"loss": 0.819, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 5.980861244019139, |
|
"grad_norm": 1.5703125, |
|
"learning_rate": 8.23047395536298e-05, |
|
"loss": 0.8222, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_loss": 2.4928200244903564, |
|
"eval_runtime": 0.2377, |
|
"eval_samples_per_second": 42.076, |
|
"eval_steps_per_second": 4.208, |
|
"step": 627 |
|
}, |
|
{ |
|
"epoch": 6.028708133971292, |
|
"grad_norm": 2.8125, |
|
"learning_rate": 8.065559069717088e-05, |
|
"loss": 0.8044, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 6.076555023923445, |
|
"grad_norm": 12.0, |
|
"learning_rate": 7.901188979351526e-05, |
|
"loss": 0.8055, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 6.124401913875598, |
|
"grad_norm": 1.46875, |
|
"learning_rate": 7.73740997570278e-05, |
|
"loss": 0.7972, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 6.172248803827751, |
|
"grad_norm": 5.34375, |
|
"learning_rate": 7.574268183739989e-05, |
|
"loss": 0.8078, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 6.220095693779904, |
|
"grad_norm": 4.5, |
|
"learning_rate": 7.411809548974792e-05, |
|
"loss": 0.7981, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 6.267942583732057, |
|
"grad_norm": 1.1328125, |
|
"learning_rate": 7.250079824521743e-05, |
|
"loss": 0.8073, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 6.315789473684211, |
|
"grad_norm": 5.21875, |
|
"learning_rate": 7.089124558212871e-05, |
|
"loss": 0.8028, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 6.363636363636363, |
|
"grad_norm": 6.84375, |
|
"learning_rate": 6.928989079770094e-05, |
|
"loss": 0.7954, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 6.411483253588517, |
|
"grad_norm": 1.1640625, |
|
"learning_rate": 6.769718488039023e-05, |
|
"loss": 0.7899, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 6.45933014354067, |
|
"grad_norm": 7.21875, |
|
"learning_rate": 6.611357638287823e-05, |
|
"loss": 0.8025, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 6.507177033492823, |
|
"grad_norm": 5.1875, |
|
"learning_rate": 6.453951129574644e-05, |
|
"loss": 0.7917, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 6.555023923444976, |
|
"grad_norm": 0.7421875, |
|
"learning_rate": 6.297543292187215e-05, |
|
"loss": 0.7903, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 6.6028708133971294, |
|
"grad_norm": 2.390625, |
|
"learning_rate": 6.142178175158149e-05, |
|
"loss": 0.7894, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 6.650717703349282, |
|
"grad_norm": 2.46875, |
|
"learning_rate": 5.9878995338594224e-05, |
|
"loss": 0.7887, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 6.698564593301436, |
|
"grad_norm": 0.52734375, |
|
"learning_rate": 5.834750817679606e-05, |
|
"loss": 0.7866, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 6.746411483253588, |
|
"grad_norm": 1.140625, |
|
"learning_rate": 5.682775157787213e-05, |
|
"loss": 0.7845, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 6.794258373205742, |
|
"grad_norm": 0.9296875, |
|
"learning_rate": 5.5320153549837415e-05, |
|
"loss": 0.7874, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 6.842105263157895, |
|
"grad_norm": 0.98046875, |
|
"learning_rate": 5.382513867649663e-05, |
|
"loss": 0.7865, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 6.889952153110048, |
|
"grad_norm": 0.8515625, |
|
"learning_rate": 5.234312799786921e-05, |
|
"loss": 0.7866, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 6.937799043062201, |
|
"grad_norm": 3.40625, |
|
"learning_rate": 5.087453889161229e-05, |
|
"loss": 0.7742, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 6.985645933014354, |
|
"grad_norm": 0.546875, |
|
"learning_rate": 4.9419784955474524e-05, |
|
"loss": 0.7746, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 6.9952153110047846, |
|
"eval_loss": 2.4925007820129395, |
|
"eval_runtime": 0.2584, |
|
"eval_samples_per_second": 38.704, |
|
"eval_steps_per_second": 3.87, |
|
"step": 731 |
|
}, |
|
{ |
|
"epoch": 7.033492822966507, |
|
"grad_norm": 0.95703125, |
|
"learning_rate": 4.797927589081509e-05, |
|
"loss": 0.7674, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 7.08133971291866, |
|
"grad_norm": 0.5, |
|
"learning_rate": 4.6553417387219886e-05, |
|
"loss": 0.7691, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 7.1291866028708135, |
|
"grad_norm": 0.5234375, |
|
"learning_rate": 4.514261100824709e-05, |
|
"loss": 0.7602, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 7.177033492822966, |
|
"grad_norm": 0.41015625, |
|
"learning_rate": 4.374725407833532e-05, |
|
"loss": 0.7656, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 7.22488038277512, |
|
"grad_norm": 0.6171875, |
|
"learning_rate": 4.236773957090548e-05, |
|
"loss": 0.7673, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 7.2727272727272725, |
|
"grad_norm": 0.53515625, |
|
"learning_rate": 4.100445599768774e-05, |
|
"loss": 0.7712, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 7.320574162679426, |
|
"grad_norm": 0.546875, |
|
"learning_rate": 3.96577872993053e-05, |
|
"loss": 0.7658, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 7.368421052631579, |
|
"grad_norm": 0.671875, |
|
"learning_rate": 3.832811273714569e-05, |
|
"loss": 0.7612, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 7.416267942583732, |
|
"grad_norm": 0.90234375, |
|
"learning_rate": 3.701580678654925e-05, |
|
"loss": 0.7695, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 7.464114832535885, |
|
"grad_norm": 0.54296875, |
|
"learning_rate": 3.5721239031346066e-05, |
|
"loss": 0.7702, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 7.511961722488039, |
|
"grad_norm": 0.494140625, |
|
"learning_rate": 3.4444774059770536e-05, |
|
"loss": 0.7654, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 7.559808612440191, |
|
"grad_norm": 0.61328125, |
|
"learning_rate": 3.318677136178228e-05, |
|
"loss": 0.763, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 7.607655502392344, |
|
"grad_norm": 0.7265625, |
|
"learning_rate": 3.1947585227823394e-05, |
|
"loss": 0.772, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 7.655502392344498, |
|
"grad_norm": 0.396484375, |
|
"learning_rate": 3.072756464904006e-05, |
|
"loss": 0.7763, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 7.703349282296651, |
|
"grad_norm": 0.400390625, |
|
"learning_rate": 2.9527053218996037e-05, |
|
"loss": 0.7663, |
|
"step": 805 |
|
}, |
|
{ |
|
"epoch": 7.751196172248804, |
|
"grad_norm": 0.427734375, |
|
"learning_rate": 2.8346389036906828e-05, |
|
"loss": 0.7635, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 7.7990430622009566, |
|
"grad_norm": 0.466796875, |
|
"learning_rate": 2.7185904612421176e-05, |
|
"loss": 0.7679, |
|
"step": 815 |
|
}, |
|
{ |
|
"epoch": 7.84688995215311, |
|
"grad_norm": 0.61328125, |
|
"learning_rate": 2.6045926771976303e-05, |
|
"loss": 0.7752, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 7.894736842105263, |
|
"grad_norm": 0.462890625, |
|
"learning_rate": 2.492677656675414e-05, |
|
"loss": 0.7733, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 7.942583732057416, |
|
"grad_norm": 0.392578125, |
|
"learning_rate": 2.382876918226409e-05, |
|
"loss": 0.7713, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 7.990430622009569, |
|
"grad_norm": 0.5078125, |
|
"learning_rate": 2.2752213849577188e-05, |
|
"loss": 0.7644, |
|
"step": 835 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_loss": 2.5051019191741943, |
|
"eval_runtime": 0.2382, |
|
"eval_samples_per_second": 41.986, |
|
"eval_steps_per_second": 4.199, |
|
"step": 836 |
|
}, |
|
{ |
|
"epoch": 8.038277511961722, |
|
"grad_norm": 0.400390625, |
|
"learning_rate": 2.1697413758237784e-05, |
|
"loss": 0.7573, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 8.086124401913876, |
|
"grad_norm": 0.451171875, |
|
"learning_rate": 2.0664665970876496e-05, |
|
"loss": 0.7533, |
|
"step": 845 |
|
}, |
|
{ |
|
"epoch": 8.133971291866029, |
|
"grad_norm": 0.494140625, |
|
"learning_rate": 1.965426133954854e-05, |
|
"loss": 0.7638, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 8.181818181818182, |
|
"grad_norm": 0.55078125, |
|
"learning_rate": 1.8666484423821373e-05, |
|
"loss": 0.7594, |
|
"step": 855 |
|
}, |
|
{ |
|
"epoch": 8.229665071770334, |
|
"grad_norm": 0.6015625, |
|
"learning_rate": 1.7701613410634365e-05, |
|
"loss": 0.758, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 8.277511961722489, |
|
"grad_norm": 0.55859375, |
|
"learning_rate": 1.6759920035953093e-05, |
|
"loss": 0.7619, |
|
"step": 865 |
|
}, |
|
{ |
|
"epoch": 8.325358851674642, |
|
"grad_norm": 0.3828125, |
|
"learning_rate": 1.584166950824061e-05, |
|
"loss": 0.7578, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 8.373205741626794, |
|
"grad_norm": 0.380859375, |
|
"learning_rate": 1.4947120433767047e-05, |
|
"loss": 0.7544, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 8.421052631578947, |
|
"grad_norm": 0.388671875, |
|
"learning_rate": 1.4076524743778319e-05, |
|
"loss": 0.7606, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 8.4688995215311, |
|
"grad_norm": 0.39453125, |
|
"learning_rate": 1.3230127623545064e-05, |
|
"loss": 0.7512, |
|
"step": 885 |
|
}, |
|
{ |
|
"epoch": 8.516746411483254, |
|
"grad_norm": 0.439453125, |
|
"learning_rate": 1.2408167443311214e-05, |
|
"loss": 0.7481, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 8.564593301435407, |
|
"grad_norm": 0.53125, |
|
"learning_rate": 1.1610875691161915e-05, |
|
"loss": 0.7625, |
|
"step": 895 |
|
}, |
|
{ |
|
"epoch": 8.61244019138756, |
|
"grad_norm": 0.396484375, |
|
"learning_rate": 1.083847690782972e-05, |
|
"loss": 0.7542, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 8.660287081339712, |
|
"grad_norm": 0.388671875, |
|
"learning_rate": 1.0091188623457415e-05, |
|
"loss": 0.7658, |
|
"step": 905 |
|
}, |
|
{ |
|
"epoch": 8.708133971291867, |
|
"grad_norm": 0.451171875, |
|
"learning_rate": 9.369221296335006e-06, |
|
"loss": 0.7581, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 8.75598086124402, |
|
"grad_norm": 0.412109375, |
|
"learning_rate": 8.672778253628621e-06, |
|
"loss": 0.7485, |
|
"step": 915 |
|
}, |
|
{ |
|
"epoch": 8.803827751196172, |
|
"grad_norm": 0.400390625, |
|
"learning_rate": 8.002055634117578e-06, |
|
"loss": 0.7544, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 8.851674641148325, |
|
"grad_norm": 0.373046875, |
|
"learning_rate": 7.357242332955916e-06, |
|
"loss": 0.76, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 8.89952153110048, |
|
"grad_norm": 0.498046875, |
|
"learning_rate": 6.738519948473976e-06, |
|
"loss": 0.7572, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 8.947368421052632, |
|
"grad_norm": 0.388671875, |
|
"learning_rate": 6.146062731035129e-06, |
|
"loss": 0.7559, |
|
"step": 935 |
|
}, |
|
{ |
|
"epoch": 8.995215311004785, |
|
"grad_norm": 0.373046875, |
|
"learning_rate": 5.580037533961546e-06, |
|
"loss": 0.7578, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 8.995215311004785, |
|
"eval_loss": 2.52113676071167, |
|
"eval_runtime": 0.2561, |
|
"eval_samples_per_second": 39.042, |
|
"eval_steps_per_second": 3.904, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 9.043062200956937, |
|
"grad_norm": 0.416015625, |
|
"learning_rate": 5.040603766543594e-06, |
|
"loss": 0.7518, |
|
"step": 945 |
|
}, |
|
{ |
|
"epoch": 9.090909090909092, |
|
"grad_norm": 0.375, |
|
"learning_rate": 4.527913349145441e-06, |
|
"loss": 0.7595, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 9.138755980861244, |
|
"grad_norm": 0.435546875, |
|
"learning_rate": 4.042110670419763e-06, |
|
"loss": 0.7471, |
|
"step": 955 |
|
}, |
|
{ |
|
"epoch": 9.186602870813397, |
|
"grad_norm": 0.3984375, |
|
"learning_rate": 3.5833325466437694e-06, |
|
"loss": 0.7538, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 9.23444976076555, |
|
"grad_norm": 0.412109375, |
|
"learning_rate": 3.1517081831876737e-06, |
|
"loss": 0.75, |
|
"step": 965 |
|
}, |
|
{ |
|
"epoch": 9.282296650717702, |
|
"grad_norm": 0.37109375, |
|
"learning_rate": 2.7473591381266708e-06, |
|
"loss": 0.7547, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 9.330143540669857, |
|
"grad_norm": 0.384765625, |
|
"learning_rate": 2.3703992880066638e-06, |
|
"loss": 0.764, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 9.37799043062201, |
|
"grad_norm": 0.38671875, |
|
"learning_rate": 2.0209347957732328e-06, |
|
"loss": 0.7584, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 9.425837320574162, |
|
"grad_norm": 0.390625, |
|
"learning_rate": 1.6990640808730696e-06, |
|
"loss": 0.7524, |
|
"step": 985 |
|
}, |
|
{ |
|
"epoch": 9.473684210526315, |
|
"grad_norm": 0.412109375, |
|
"learning_rate": 1.404877791536141e-06, |
|
"loss": 0.7543, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 9.52153110047847, |
|
"grad_norm": 0.50390625, |
|
"learning_rate": 1.1384587792465872e-06, |
|
"loss": 0.7579, |
|
"step": 995 |
|
}, |
|
{ |
|
"epoch": 9.569377990430622, |
|
"grad_norm": 0.380859375, |
|
"learning_rate": 8.998820754091531e-07, |
|
"loss": 0.7534, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 9.617224880382775, |
|
"grad_norm": 0.3828125, |
|
"learning_rate": 6.892148702183133e-07, |
|
"loss": 0.7522, |
|
"step": 1005 |
|
}, |
|
{ |
|
"epoch": 9.665071770334928, |
|
"grad_norm": 0.462890625, |
|
"learning_rate": 5.065164937354428e-07, |
|
"loss": 0.7598, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 9.712918660287082, |
|
"grad_norm": 0.37109375, |
|
"learning_rate": 3.5183839917972697e-07, |
|
"loss": 0.7556, |
|
"step": 1015 |
|
}, |
|
{ |
|
"epoch": 9.760765550239235, |
|
"grad_norm": 0.376953125, |
|
"learning_rate": 2.2522414843748618e-07, |
|
"loss": 0.7503, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 9.808612440191387, |
|
"grad_norm": 0.375, |
|
"learning_rate": 1.2670939979384512e-07, |
|
"loss": 0.7479, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 9.85645933014354, |
|
"grad_norm": 0.41015625, |
|
"learning_rate": 5.632189789027687e-08, |
|
"loss": 0.7581, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 9.904306220095695, |
|
"grad_norm": 0.376953125, |
|
"learning_rate": 1.4081465910975588e-08, |
|
"loss": 0.7526, |
|
"step": 1035 |
|
}, |
|
{ |
|
"epoch": 9.952153110047847, |
|
"grad_norm": 0.4453125, |
|
"learning_rate": 0.0, |
|
"loss": 0.7589, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 9.952153110047847, |
|
"eval_loss": 2.515638828277588, |
|
"eval_runtime": 0.236, |
|
"eval_samples_per_second": 42.374, |
|
"eval_steps_per_second": 4.237, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 9.952153110047847, |
|
"step": 1040, |
|
"total_flos": 3.1750846888049377e+18, |
|
"train_loss": 2.1342261617000284, |
|
"train_runtime": 2535.4321, |
|
"train_samples_per_second": 26.287, |
|
"train_steps_per_second": 0.41 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 1040, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 100, |
|
"total_flos": 3.1750846888049377e+18, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|