|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9994869163673679, |
|
"eval_steps": 500, |
|
"global_step": 974, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 0.4224665734985152, |
|
"learning_rate": 2.040816326530612e-06, |
|
"loss": 1.0622, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.5284465715139516, |
|
"learning_rate": 1.0204081632653061e-05, |
|
"loss": 0.9796, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.4800413465398026, |
|
"learning_rate": 2.0408163265306123e-05, |
|
"loss": 1.0297, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.2816340466714462, |
|
"learning_rate": 3.061224489795919e-05, |
|
"loss": 1.0176, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.28168990888908674, |
|
"learning_rate": 4.0816326530612245e-05, |
|
"loss": 0.9993, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 0.1553913296147897, |
|
"learning_rate": 5.102040816326531e-05, |
|
"loss": 0.9358, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 0.18142210930647196, |
|
"learning_rate": 6.122448979591838e-05, |
|
"loss": 0.9419, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.23481370895189654, |
|
"learning_rate": 7.142857142857143e-05, |
|
"loss": 0.8759, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.14980692548924152, |
|
"learning_rate": 8.163265306122449e-05, |
|
"loss": 0.883, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.133576680908321, |
|
"learning_rate": 9.183673469387756e-05, |
|
"loss": 0.862, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.12532996035673116, |
|
"learning_rate": 0.00010204081632653062, |
|
"loss": 0.9202, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 0.13932219083428563, |
|
"learning_rate": 0.00011224489795918367, |
|
"loss": 0.8846, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 0.12328088246202568, |
|
"learning_rate": 0.00012244897959183676, |
|
"loss": 0.8614, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 0.14214840640740986, |
|
"learning_rate": 0.0001326530612244898, |
|
"loss": 0.8456, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 0.14034095762109017, |
|
"learning_rate": 0.00014285714285714287, |
|
"loss": 0.8278, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 0.1562397541717682, |
|
"learning_rate": 0.0001530612244897959, |
|
"loss": 0.8268, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 0.13834727022919813, |
|
"learning_rate": 0.00016326530612244898, |
|
"loss": 0.8246, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 0.13195301400647858, |
|
"learning_rate": 0.00017346938775510205, |
|
"loss": 0.8858, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 0.1476813054333854, |
|
"learning_rate": 0.00018367346938775512, |
|
"loss": 0.8734, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 0.1326769165243531, |
|
"learning_rate": 0.00019387755102040816, |
|
"loss": 0.8431, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 0.11831952224955868, |
|
"learning_rate": 0.0001999974277115551, |
|
"loss": 0.8467, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"grad_norm": 0.13613884130299994, |
|
"learning_rate": 0.00019996849098629418, |
|
"loss": 0.7846, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"grad_norm": 0.13456966066319215, |
|
"learning_rate": 0.00019990741151022301, |
|
"loss": 0.8007, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"grad_norm": 0.1679031230382061, |
|
"learning_rate": 0.0001998142089221534, |
|
"loss": 0.8864, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"grad_norm": 0.1195588588744713, |
|
"learning_rate": 0.0001996889131894033, |
|
"loss": 0.8526, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"grad_norm": 0.1316308906877641, |
|
"learning_rate": 0.00019953156459816179, |
|
"loss": 0.8104, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"grad_norm": 0.1295896362357539, |
|
"learning_rate": 0.0001993422137405354, |
|
"loss": 0.8584, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"grad_norm": 0.14146193582571115, |
|
"learning_rate": 0.00019912092149828174, |
|
"loss": 0.8828, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"grad_norm": 0.15128325355835132, |
|
"learning_rate": 0.00019886775902323405, |
|
"loss": 0.8569, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"grad_norm": 0.14649488912781936, |
|
"learning_rate": 0.00019858280771442385, |
|
"loss": 0.8549, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"grad_norm": 0.18943998152343347, |
|
"learning_rate": 0.00019826615919190887, |
|
"loss": 0.8605, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 0.124667248916099, |
|
"learning_rate": 0.00019791791526731445, |
|
"loss": 0.8382, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 0.22445656429732966, |
|
"learning_rate": 0.00019753818791109828, |
|
"loss": 0.8163, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"grad_norm": 0.14098918819116132, |
|
"learning_rate": 0.0001971270992165486, |
|
"loss": 0.778, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"grad_norm": 0.13339319403763952, |
|
"learning_rate": 0.00019668478136052774, |
|
"loss": 0.8138, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 0.13961481566344255, |
|
"learning_rate": 0.0001962113765609735, |
|
"loss": 0.7989, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 0.13087704871428119, |
|
"learning_rate": 0.0001957070370311717, |
|
"loss": 0.8503, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 0.14182563419142846, |
|
"learning_rate": 0.00019517192493081565, |
|
"loss": 0.8253, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 0.12666210841941644, |
|
"learning_rate": 0.00019460621231386676, |
|
"loss": 0.8828, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"grad_norm": 0.1271725385943482, |
|
"learning_rate": 0.00019401008107323455, |
|
"loss": 0.8586, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 0.13336647812691615, |
|
"learning_rate": 0.0001933837228822925, |
|
"loss": 0.8086, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 0.13715468934306543, |
|
"learning_rate": 0.0001927273391332499, |
|
"loss": 0.8562, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"grad_norm": 0.13349689371469725, |
|
"learning_rate": 0.00019204114087239806, |
|
"loss": 0.8601, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"grad_norm": 0.14685924776626536, |
|
"learning_rate": 0.00019132534873225323, |
|
"loss": 0.8373, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"grad_norm": 0.1350372733252807, |
|
"learning_rate": 0.00019058019286061665, |
|
"loss": 0.8331, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"grad_norm": 0.12803447380882063, |
|
"learning_rate": 0.00018980591284657535, |
|
"loss": 0.8365, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"grad_norm": 0.1334208833129311, |
|
"learning_rate": 0.00018900275764346768, |
|
"loss": 0.8322, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"grad_norm": 0.14702003545455958, |
|
"learning_rate": 0.0001881709854888372, |
|
"loss": 0.8217, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 0.3214308870564164, |
|
"learning_rate": 0.00018731086382140226, |
|
"loss": 0.8154, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 0.14661590768195695, |
|
"learning_rate": 0.00018642266919506644, |
|
"loss": 0.8471, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 0.15237243420118865, |
|
"learning_rate": 0.00018550668718999872, |
|
"loss": 0.795, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 0.1185694028090917, |
|
"learning_rate": 0.0001845632123208111, |
|
"loss": 0.8544, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"grad_norm": 0.13250010062470685, |
|
"learning_rate": 0.0001835925479418637, |
|
"loss": 0.8608, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"grad_norm": 0.11443256555837228, |
|
"learning_rate": 0.0001825950061497276, |
|
"loss": 0.7983, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"grad_norm": 0.13687580544983835, |
|
"learning_rate": 0.00018157090768283678, |
|
"loss": 0.8172, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"grad_norm": 0.13705867781023107, |
|
"learning_rate": 0.00018052058181836151, |
|
"loss": 0.8212, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 0.15563306524282045, |
|
"learning_rate": 0.00017944436626633623, |
|
"loss": 0.8411, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 0.14396760217664073, |
|
"learning_rate": 0.00017834260706107595, |
|
"loss": 0.8108, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"grad_norm": 0.12647869153054622, |
|
"learning_rate": 0.00017721565844991643, |
|
"loss": 0.8062, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"grad_norm": 0.14745297927170234, |
|
"learning_rate": 0.00017606388277931328, |
|
"loss": 0.8744, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 0.15053882715392, |
|
"learning_rate": 0.0001748876503783373, |
|
"loss": 0.8352, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 0.1317924804547838, |
|
"learning_rate": 0.00017368733943960276, |
|
"loss": 0.8384, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 0.12819173037979142, |
|
"learning_rate": 0.00017246333589766787, |
|
"loss": 0.8531, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 0.1312708124132867, |
|
"learning_rate": 0.00017121603330494544, |
|
"loss": 0.8638, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"grad_norm": 0.1411696833691384, |
|
"learning_rate": 0.0001699458327051647, |
|
"loss": 0.8382, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"grad_norm": 0.14606044120399994, |
|
"learning_rate": 0.00016865314250442398, |
|
"loss": 0.8168, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"grad_norm": 0.1455530152362454, |
|
"learning_rate": 0.00016733837833987633, |
|
"loss": 0.8117, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"grad_norm": 0.14309897141049993, |
|
"learning_rate": 0.00016600196294609045, |
|
"loss": 0.8108, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"grad_norm": 0.14312668748781052, |
|
"learning_rate": 0.00016464432601912912, |
|
"loss": 0.8303, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"grad_norm": 0.12579071761666902, |
|
"learning_rate": 0.0001632659040783897, |
|
"loss": 0.8476, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 0.13350400452418196, |
|
"learning_rate": 0.00016186714032625035, |
|
"loss": 0.8526, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 0.14761760265503343, |
|
"learning_rate": 0.00016044848450556787, |
|
"loss": 0.8192, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"grad_norm": 0.15245475954345805, |
|
"learning_rate": 0.00015901039275507245, |
|
"loss": 0.8298, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"grad_norm": 0.15994496269289082, |
|
"learning_rate": 0.00015755332746270572, |
|
"loss": 0.8106, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 0.14107023644999184, |
|
"learning_rate": 0.00015607775711694977, |
|
"loss": 0.8378, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 0.15210143490634426, |
|
"learning_rate": 0.00015458415615619484, |
|
"loss": 0.8185, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"grad_norm": 0.11955663423142318, |
|
"learning_rate": 0.00015307300481619333, |
|
"loss": 0.8576, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 0.13750521942283506, |
|
"learning_rate": 0.00015154478897565045, |
|
"loss": 0.881, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 0.11425649554717676, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 0.8139, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"grad_norm": 0.1481189219653055, |
|
"learning_rate": 0.00014843913458341645, |
|
"loss": 0.8009, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"grad_norm": 0.1588720467782321, |
|
"learning_rate": 0.00014686269458911332, |
|
"loss": 0.8177, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"grad_norm": 0.14436853208913628, |
|
"learning_rate": 0.00014527118688797963, |
|
"loss": 0.9043, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"grad_norm": 0.14039483442002895, |
|
"learning_rate": 0.0001436651231956064, |
|
"loss": 0.8292, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"grad_norm": 0.1409577227905237, |
|
"learning_rate": 0.00014204501990775533, |
|
"loss": 0.8476, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"grad_norm": 0.12770965177289617, |
|
"learning_rate": 0.00014041139793432274, |
|
"loss": 0.8266, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 0.13851032533153496, |
|
"learning_rate": 0.00013876478253185183, |
|
"loss": 0.8543, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 0.15009000078609738, |
|
"learning_rate": 0.00013710570313464778, |
|
"loss": 0.8273, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"grad_norm": 0.1490899855893196, |
|
"learning_rate": 0.0001354346931845492, |
|
"loss": 0.8624, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"grad_norm": 0.11669282581529886, |
|
"learning_rate": 0.00013375228995941133, |
|
"loss": 0.8476, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"grad_norm": 0.12486593160211, |
|
"learning_rate": 0.0001320590344003557, |
|
"loss": 0.84, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"grad_norm": 0.140864742842846, |
|
"learning_rate": 0.00013035547093784186, |
|
"loss": 0.8387, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 0.12552125440968517, |
|
"learning_rate": 0.00012864214731661742, |
|
"loss": 0.8177, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 0.14254214034385443, |
|
"learning_rate": 0.00012691961441960238, |
|
"loss": 0.8087, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 0.12161604299239723, |
|
"learning_rate": 0.00012518842609076413, |
|
"loss": 0.8255, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 0.14279805864279152, |
|
"learning_rate": 0.00012344913895704097, |
|
"loss": 0.8161, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"grad_norm": 0.13061903554890092, |
|
"learning_rate": 0.00012170231224937032, |
|
"loss": 0.8101, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"grad_norm": 0.14103602925499836, |
|
"learning_rate": 0.00011994850762287989, |
|
"loss": 0.8304, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 0.1437471461141528, |
|
"learning_rate": 0.0001181882889762994, |
|
"loss": 0.859, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 0.12804439910080107, |
|
"learning_rate": 0.00011642222227065089, |
|
"loss": 0.8103, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"grad_norm": 0.16321939750119213, |
|
"learning_rate": 0.00011465087534727587, |
|
"loss": 0.8931, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"grad_norm": 0.15577747479169646, |
|
"learning_rate": 0.0001128748177452581, |
|
"loss": 0.8272, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 0.13365546906519096, |
|
"learning_rate": 0.00011109462051830017, |
|
"loss": 0.8933, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 0.1290045452111647, |
|
"learning_rate": 0.00010931085605111354, |
|
"loss": 0.8053, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"grad_norm": 0.13767998743232984, |
|
"learning_rate": 0.00010752409787538, |
|
"loss": 0.8352, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"grad_norm": 0.1183523368788714, |
|
"learning_rate": 0.00010573492048534515, |
|
"loss": 0.7793, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"grad_norm": 0.12680023137301807, |
|
"learning_rate": 0.00010394389915310149, |
|
"loss": 0.7862, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"grad_norm": 0.1468717080766619, |
|
"learning_rate": 0.00010215160974362223, |
|
"loss": 0.8945, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 0.1340750536857251, |
|
"learning_rate": 0.00010035862852960387, |
|
"loss": 0.8677, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 0.1839710754750199, |
|
"learning_rate": 9.856553200617805e-05, |
|
"loss": 0.8004, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"grad_norm": 0.13546348015830512, |
|
"learning_rate": 9.677289670555169e-05, |
|
"loss": 0.8346, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"grad_norm": 0.13059522257925926, |
|
"learning_rate": 9.49812990116353e-05, |
|
"loss": 0.8545, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 0.1339705849021452, |
|
"learning_rate": 9.319131497471894e-05, |
|
"loss": 0.8575, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 0.11764292685223814, |
|
"learning_rate": 9.140352012625537e-05, |
|
"loss": 0.8284, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"grad_norm": 0.13114039777564312, |
|
"learning_rate": 8.961848929381026e-05, |
|
"loss": 0.8509, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"grad_norm": 0.13510854771345257, |
|
"learning_rate": 8.783679641623845e-05, |
|
"loss": 0.8578, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"grad_norm": 0.15637793864418187, |
|
"learning_rate": 8.605901435914607e-05, |
|
"loss": 0.8106, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 0.12883059386856624, |
|
"learning_rate": 8.428571473069775e-05, |
|
"loss": 0.8364, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 0.1462312921265624, |
|
"learning_rate": 8.25174676978282e-05, |
|
"loss": 0.7912, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"grad_norm": 0.14749765374112217, |
|
"learning_rate": 8.075484180291701e-05, |
|
"loss": 0.8091, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"grad_norm": 0.12928243297252306, |
|
"learning_rate": 7.899840378098588e-05, |
|
"loss": 0.8456, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 0.13914653439360944, |
|
"learning_rate": 7.724871837747707e-05, |
|
"loss": 0.8543, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 0.14536772205298842, |
|
"learning_rate": 7.550634816667142e-05, |
|
"loss": 0.7879, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"grad_norm": 0.16444943986829566, |
|
"learning_rate": 7.377185337080442e-05, |
|
"loss": 0.8295, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"grad_norm": 0.15156151416294053, |
|
"learning_rate": 7.204579167993881e-05, |
|
"loss": 0.8328, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 0.11700169890112103, |
|
"learning_rate": 7.032871807265096e-05, |
|
"loss": 0.8549, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 0.14385917799771358, |
|
"learning_rate": 6.862118463758943e-05, |
|
"loss": 0.8116, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 0.12908350111171227, |
|
"learning_rate": 6.69237403959624e-05, |
|
"loss": 0.822, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 0.14851267167990456, |
|
"learning_rate": 6.52369311250116e-05, |
|
"loss": 0.8039, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"grad_norm": 0.12710830010354282, |
|
"learning_rate": 6.356129918252927e-05, |
|
"loss": 0.8156, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"grad_norm": 0.1425733932027878, |
|
"learning_rate": 6.189738333247432e-05, |
|
"loss": 0.8118, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"grad_norm": 0.15619373129124595, |
|
"learning_rate": 6.024571857174443e-05, |
|
"loss": 0.8597, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"grad_norm": 0.13569977800267843, |
|
"learning_rate": 5.860683595815893e-05, |
|
"loss": 0.8106, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 0.12464446293377444, |
|
"learning_rate": 5.698126243970845e-05, |
|
"loss": 0.8121, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 0.13451779337168288, |
|
"learning_rate": 5.536952068512608e-05, |
|
"loss": 0.795, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"grad_norm": 0.12915761361111974, |
|
"learning_rate": 5.3772128915834184e-05, |
|
"loss": 0.9113, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"grad_norm": 0.12809195814074534, |
|
"learning_rate": 5.218960073932122e-05, |
|
"loss": 0.8646, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 0.1278348440067524, |
|
"learning_rate": 5.062244498400228e-05, |
|
"loss": 0.8477, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 0.13317787598393002, |
|
"learning_rate": 4.907116553561607e-05, |
|
"loss": 0.8133, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"grad_norm": 0.11667818988278275, |
|
"learning_rate": 4.753626117521103e-05, |
|
"loss": 0.7861, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"grad_norm": 0.14046555978563427, |
|
"learning_rate": 4.601822541877291e-05, |
|
"loss": 0.7955, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 0.14732626535083013, |
|
"learning_rate": 4.451754635854517e-05, |
|
"loss": 0.8456, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 0.13263814935125381, |
|
"learning_rate": 4.303470650609325e-05, |
|
"loss": 0.8043, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"grad_norm": 0.1360692214502702, |
|
"learning_rate": 4.1570182637163155e-05, |
|
"loss": 0.8857, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"grad_norm": 0.1398967005930706, |
|
"learning_rate": 4.0124445638384366e-05, |
|
"loss": 0.7555, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"grad_norm": 0.1408656242005034, |
|
"learning_rate": 3.869796035586625e-05, |
|
"loss": 0.8046, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"grad_norm": 0.13431365357395633, |
|
"learning_rate": 3.7291185445736444e-05, |
|
"loss": 0.8074, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 0.12962057427444676, |
|
"learning_rate": 3.590457322666997e-05, |
|
"loss": 0.8509, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 0.1352493946183552, |
|
"learning_rate": 3.453856953445557e-05, |
|
"loss": 0.8446, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 0.11494147585476418, |
|
"learning_rate": 3.319361357864663e-05, |
|
"loss": 0.817, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 0.12332246306931878, |
|
"learning_rate": 3.187013780134291e-05, |
|
"loss": 0.843, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"grad_norm": 0.13071315641894352, |
|
"learning_rate": 3.05685677381475e-05, |
|
"loss": 0.85, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"grad_norm": 0.1382161424220368, |
|
"learning_rate": 2.9289321881345254e-05, |
|
"loss": 0.8112, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"grad_norm": 0.13955515168937047, |
|
"learning_rate": 2.8032811545345294e-05, |
|
"loss": 0.8608, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"grad_norm": 0.1771167231447207, |
|
"learning_rate": 2.679944073443158e-05, |
|
"loss": 0.8045, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"grad_norm": 0.14193864210665846, |
|
"learning_rate": 2.5589606012863963e-05, |
|
"loss": 0.8664, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 0.13840297934079954, |
|
"learning_rate": 2.4403696377371142e-05, |
|
"loss": 0.8663, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 0.13536851743943085, |
|
"learning_rate": 2.324209313207736e-05, |
|
"loss": 0.858, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"grad_norm": 0.12646539460553902, |
|
"learning_rate": 2.210516976590179e-05, |
|
"loss": 0.8391, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"grad_norm": 0.15367864492686661, |
|
"learning_rate": 2.099329183247126e-05, |
|
"loss": 0.8189, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"grad_norm": 0.13297540559237797, |
|
"learning_rate": 1.9906816832584253e-05, |
|
"loss": 0.784, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"grad_norm": 0.13099877023338075, |
|
"learning_rate": 1.8846094099263912e-05, |
|
"loss": 0.8318, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 0.14050104223112714, |
|
"learning_rate": 1.781146468543765e-05, |
|
"loss": 0.7681, |
|
"step": 805 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 0.1284097349682222, |
|
"learning_rate": 1.6803261254278636e-05, |
|
"loss": 0.8185, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"grad_norm": 0.15940118109165669, |
|
"learning_rate": 1.582180797224507e-05, |
|
"loss": 0.8209, |
|
"step": 815 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"grad_norm": 0.14361251131573077, |
|
"learning_rate": 1.4867420404851307e-05, |
|
"loss": 0.8281, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"grad_norm": 0.1171536350981247, |
|
"learning_rate": 1.3940405415204416e-05, |
|
"loss": 0.8409, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"grad_norm": 0.12744294436872022, |
|
"learning_rate": 1.30410610653389e-05, |
|
"loss": 0.8649, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"grad_norm": 0.13903914731619177, |
|
"learning_rate": 1.2169676520381168e-05, |
|
"loss": 0.834, |
|
"step": 835 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"grad_norm": 0.17232697048230508, |
|
"learning_rate": 1.1326531955574526e-05, |
|
"loss": 0.8537, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"grad_norm": 0.1358777303761771, |
|
"learning_rate": 1.0511898466194903e-05, |
|
"loss": 0.8097, |
|
"step": 845 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"grad_norm": 0.12398803923306301, |
|
"learning_rate": 9.726037980385738e-06, |
|
"loss": 0.7775, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 0.13477267750481708, |
|
"learning_rate": 8.969203174940654e-06, |
|
"loss": 0.8016, |
|
"step": 855 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 0.11581854986662919, |
|
"learning_rate": 8.24163739406062e-06, |
|
"loss": 0.8478, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"grad_norm": 0.14736720617074886, |
|
"learning_rate": 7.543574571111655e-06, |
|
"loss": 0.844, |
|
"step": 865 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"grad_norm": 0.14132183623868166, |
|
"learning_rate": 6.875239153408542e-06, |
|
"loss": 0.7973, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"grad_norm": 0.1527267616049184, |
|
"learning_rate": 6.236846030048604e-06, |
|
"loss": 0.8063, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"grad_norm": 0.12562228277842963, |
|
"learning_rate": 5.6286004628186675e-06, |
|
"loss": 0.8484, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"grad_norm": 0.13601236333451983, |
|
"learning_rate": 5.0506980201973974e-06, |
|
"loss": 0.8285, |
|
"step": 885 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"grad_norm": 0.145710784777074, |
|
"learning_rate": 4.503324514474483e-06, |
|
"loss": 0.8706, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"grad_norm": 0.14032071629891896, |
|
"learning_rate": 3.986655942006579e-06, |
|
"loss": 0.8291, |
|
"step": 895 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"grad_norm": 0.16354220706871617, |
|
"learning_rate": 3.5008584266294386e-06, |
|
"loss": 0.8065, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"grad_norm": 0.14754573731673057, |
|
"learning_rate": 3.0460881662442763e-06, |
|
"loss": 0.872, |
|
"step": 905 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"grad_norm": 0.13523433944792257, |
|
"learning_rate": 2.622491382595693e-06, |
|
"loss": 0.8479, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"grad_norm": 0.13840943427588331, |
|
"learning_rate": 2.2302042742571193e-06, |
|
"loss": 0.8314, |
|
"step": 915 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"grad_norm": 0.12888668453211496, |
|
"learning_rate": 1.869352972839067e-06, |
|
"loss": 0.8115, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"grad_norm": 0.1620074090938547, |
|
"learning_rate": 1.5400535024342022e-06, |
|
"loss": 0.8037, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"grad_norm": 0.15116632265694227, |
|
"learning_rate": 1.2424117423122328e-06, |
|
"loss": 0.8315, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 0.12857560053281283, |
|
"learning_rate": 9.765233928766493e-07, |
|
"loss": 0.7992, |
|
"step": 935 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 0.14514383958462998, |
|
"learning_rate": 7.42473944894384e-07, |
|
"loss": 0.8109, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"grad_norm": 0.13037326046264752, |
|
"learning_rate": 5.403386520079323e-07, |
|
"loss": 0.7781, |
|
"step": 945 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"grad_norm": 0.12930160005915564, |
|
"learning_rate": 3.701825065392184e-07, |
|
"loss": 0.8515, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"grad_norm": 0.12470339296313583, |
|
"learning_rate": 2.320602185927001e-07, |
|
"loss": 0.7733, |
|
"step": 955 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"grad_norm": 0.14117795257501795, |
|
"learning_rate": 1.2601619846444035e-07, |
|
"loss": 0.867, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"grad_norm": 0.13083613982172085, |
|
"learning_rate": 5.208454236296234e-08, |
|
"loss": 0.8299, |
|
"step": 965 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.14645664746534057, |
|
"learning_rate": 1.0289021446308056e-08, |
|
"loss": 0.8721, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": NaN, |
|
"eval_runtime": 2192.2486, |
|
"eval_samples_per_second": 3.163, |
|
"eval_steps_per_second": 0.791, |
|
"step": 974 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 974, |
|
"total_flos": 1.053899276419072e+16, |
|
"train_loss": 0.8385560501527493, |
|
"train_runtime": 17932.7933, |
|
"train_samples_per_second": 3.477, |
|
"train_steps_per_second": 0.054 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 974, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 1.053899276419072e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|