|
{ |
|
"best_metric": 1.1562458276748657, |
|
"best_model_checkpoint": "output/madonna/checkpoint-1561", |
|
"epoch": 7.0, |
|
"global_step": 1561, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0001370283488463827, |
|
"loss": 2.5046, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00013651425439773298, |
|
"loss": 2.4348, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00013566028939181128, |
|
"loss": 2.5907, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00013447072741690948, |
|
"loss": 2.4827, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00013295152152507697, |
|
"loss": 2.4145, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0001311102744406261, |
|
"loss": 2.1817, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00012895620051300705, |
|
"loss": 2.3325, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0001265000796044546, |
|
"loss": 2.0885, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00012375420314317213, |
|
"loss": 2.1847, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00012073231261202464, |
|
"loss": 2.1728, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00011744953078056836, |
|
"loss": 2.0993, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00011392228602455959, |
|
"loss": 2.3687, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0001101682301116794, |
|
"loss": 1.9625, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0001062061498649071, |
|
"loss": 2.0389, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00010205587314561534, |
|
"loss": 2.098, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 9.773816962688569e-05, |
|
"loss": 2.1281, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 9.327464685361516e-05, |
|
"loss": 1.9415, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 8.868764210957135e-05, |
|
"loss": 2.3485, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 8.400011063253747e-05, |
|
"loss": 2.1515, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 7.923551073696397e-05, |
|
"loss": 1.8548, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 7.441768641901993e-05, |
|
"loss": 1.969, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 6.957074803153629e-05, |
|
"loss": 2.0221, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 6.471895162599185e-05, |
|
"loss": 2.0988, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 5.9886577565364115e-05, |
|
"loss": 1.7345, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 5.509780901531541e-05, |
|
"loss": 1.9466, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 5.037661092179389e-05, |
|
"loss": 2.1125, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 4.574661008069485e-05, |
|
"loss": 2.0488, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 4.123097689976244e-05, |
|
"loss": 2.0849, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 3.685230944444331e-05, |
|
"loss": 1.8891, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 3.263252034797391e-05, |
|
"loss": 1.8136, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 2.8592727151649647e-05, |
|
"loss": 1.9213, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 2.475314662405725e-05, |
|
"loss": 1.9865, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 2.113299358814034e-05, |
|
"loss": 2.2742, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 1.7750384762408767e-05, |
|
"loss": 1.8535, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 1.4622248097508915e-05, |
|
"loss": 2.1606, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 1.1764238061872434e-05, |
|
"loss": 1.7923, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 9.190657300387505e-06, |
|
"loss": 1.8476, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 6.91438505814559e-06, |
|
"loss": 1.9976, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 4.9468127274594585e-06, |
|
"loss": 1.9706, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 3.2977868407019207e-06, |
|
"loss": 1.982, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 1.975559794252193e-06, |
|
"loss": 1.9318, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 9.867485501471999e-07, |
|
"loss": 2.035, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 3.363015221117509e-07, |
|
"loss": 1.9653, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 2.7473811683140093e-08, |
|
"loss": 1.8408, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 1.924851655960083, |
|
"eval_runtime": 14.2025, |
|
"eval_samples_per_second": 22.672, |
|
"eval_steps_per_second": 2.887, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 1.7440342903137207, |
|
"eval_runtime": 17.6835, |
|
"eval_samples_per_second": 17.361, |
|
"eval_steps_per_second": 2.205, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 6.746689293153874e-09, |
|
"loss": 2.1578, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 2.427415035313232e-07, |
|
"loss": 1.867, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 8.147449052190903e-07, |
|
"loss": 1.7004, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 1.7199452243268996e-06, |
|
"loss": 1.8748, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 2.9538929687704672e-06, |
|
"loss": 1.7175, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 4.510522695790453e-06, |
|
"loss": 1.9134, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 6.382182826501689e-06, |
|
"loss": 1.8779, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 8.559673257059505e-06, |
|
"loss": 1.9084, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 1.1032290581565944e-05, |
|
"loss": 1.7949, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 1.3787880704424101e-05, |
|
"loss": 1.7769, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 1.681289858352526e-05, |
|
"loss": 1.724, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 2.009247481060283e-05, |
|
"loss": 1.8311, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 2.3610488701478854e-05, |
|
"loss": 1.7348, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 2.734964753692934e-05, |
|
"loss": 1.768, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 3.129157156466198e-05, |
|
"loss": 1.7488, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 3.541688434458052e-05, |
|
"loss": 1.9316, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 3.970530799324656e-05, |
|
"loss": 1.7607, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 4.413576285936533e-05, |
|
"loss": 1.6681, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 4.8686471140343875e-05, |
|
"loss": 2.065, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 5.333506393059682e-05, |
|
"loss": 1.848, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 5.805869117540521e-05, |
|
"loss": 1.8802, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 6.283413398985365e-05, |
|
"loss": 1.7551, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 6.763791879074358e-05, |
|
"loss": 1.7544, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 7.244643268047132e-05, |
|
"loss": 1.793, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 7.723603951570171e-05, |
|
"loss": 1.7917, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 8.19831960903064e-05, |
|
"loss": 1.63, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 8.666456786147141e-05, |
|
"loss": 2.0457, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 9.125714365012444e-05, |
|
"loss": 2.0322, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 9.5738348751875e-05, |
|
"loss": 1.687, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 0.00010008615590247305, |
|
"loss": 1.9025, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 0.00010427919355233763, |
|
"loss": 1.7503, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 0.00010829685091793463, |
|
"loss": 1.7749, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 0.00011211937929362609, |
|
"loss": 1.6852, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 0.00011572798912599424, |
|
"loss": 1.8013, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 0.00011910494237347491, |
|
"loss": 1.5069, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 0.00012223363969730684, |
|
"loss": 1.5859, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 0.00012509870205521334, |
|
"loss": 1.5894, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 0.00012768604629674322, |
|
"loss": 1.9085, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 0.00012998295438868449, |
|
"loss": 1.8982, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 0.00013197813593027427, |
|
"loss": 1.9507, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 0.00013366178365091335, |
|
"loss": 1.7504, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 0.00013502562161758715, |
|
"loss": 1.7843, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 0.00013606294591503093, |
|
"loss": 1.6083, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 0.00013676865759867644, |
|
"loss": 1.5864, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 0.00013713928775840084, |
|
"loss": 1.8849, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 1.5412778854370117, |
|
"eval_runtime": 17.7637, |
|
"eval_samples_per_second": 17.282, |
|
"eval_steps_per_second": 2.195, |
|
"step": 448 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 0.00013709110969999672, |
|
"loss": 1.6748, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 0.0001366493352528768, |
|
"loss": 1.7921, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 0.00013587006036997336, |
|
"loss": 1.6524, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 0.00013475714997680845, |
|
"loss": 1.5413, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 0.00013331612371205717, |
|
"loss": 1.5745, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 0.0001315541285521084, |
|
"loss": 1.4608, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 0.0001294799033646105, |
|
"loss": 1.637, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 0.00012710373556680405, |
|
"loss": 1.5212, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 0.00012443741010360104, |
|
"loss": 1.3742, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 0.00012149415099846082, |
|
"loss": 1.5598, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 0.0001182885557669493, |
|
"loss": 1.5528, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 0.00011483652301826763, |
|
"loss": 1.3306, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 0.00011115517360381999, |
|
"loss": 1.6203, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 0.00010726276570389622, |
|
"loss": 1.5496, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 0.00010317860427360839, |
|
"loss": 1.5886, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 9.892294529719824e-05, |
|
"loss": 1.5832, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 9.451689532557822e-05, |
|
"loss": 1.374, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 8.998230679536645e-05, |
|
"loss": 1.615, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 8.534166964859326e-05, |
|
"loss": 1.2303, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 8.061799979060907e-05, |
|
"loss": 1.5842, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 7.5834724939402e-05, |
|
"loss": 1.6827, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 7.10155684324705e-05, |
|
"loss": 1.3135, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 6.618443156752956e-05, |
|
"loss": 1.2597, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 6.136527506059804e-05, |
|
"loss": 1.3309, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 5.658200020939097e-05, |
|
"loss": 1.3751, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 5.185833035140678e-05, |
|
"loss": 1.4378, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 4.721769320463358e-05, |
|
"loss": 1.3661, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 4.268310467442181e-05, |
|
"loss": 1.4564, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 3.827705470280179e-05, |
|
"loss": 1.2126, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 3.4021395726391656e-05, |
|
"loss": 1.3859, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 2.9937234296103807e-05, |
|
"loss": 1.4162, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 2.6044826396180043e-05, |
|
"loss": 1.3521, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 2.236347698173244e-05, |
|
"loss": 1.4423, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 1.8911444233050724e-05, |
|
"loss": 1.2658, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 1.5705849001539244e-05, |
|
"loss": 1.2766, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 1.2762589896399027e-05, |
|
"loss": 1.2592, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 1.0096264433195999e-05, |
|
"loss": 1.4557, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 7.72009663538956e-06, |
|
"loss": 1.4331, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"learning_rate": 5.6458714478916046e-06, |
|
"loss": 1.5097, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 3.883876287942861e-06, |
|
"loss": 1.474, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 2.4428500231915692e-06, |
|
"loss": 1.4667, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 1.32993963002664e-06, |
|
"loss": 1.3699, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 5.506647471231935e-07, |
|
"loss": 1.4343, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"learning_rate": 1.0889030000328246e-07, |
|
"loss": 1.3816, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 1.3440579175949097, |
|
"eval_runtime": 2.2108, |
|
"eval_samples_per_second": 142.485, |
|
"eval_steps_per_second": 18.093, |
|
"step": 669 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 6.807332383425125e-09, |
|
"loss": 1.2189, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"learning_rate": 2.4492213937922245e-07, |
|
"loss": 1.2422, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"learning_rate": 8.22053756462089e-07, |
|
"loss": 1.3055, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"learning_rate": 1.735339816622185e-06, |
|
"loss": 1.4475, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 3.09, |
|
"learning_rate": 2.980250746687993e-06, |
|
"loss": 1.2442, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"learning_rate": 4.550612232394036e-06, |
|
"loss": 1.4277, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"learning_rate": 6.438635840777816e-06, |
|
"loss": 1.3777, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 3.16, |
|
"learning_rate": 8.634957648029665e-06, |
|
"loss": 1.3328, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 3.18, |
|
"learning_rate": 1.1128684681215911e-05, |
|
"loss": 1.3339, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 3.21, |
|
"learning_rate": 1.3907448943539908e-05, |
|
"loss": 1.2034, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"learning_rate": 1.6957468755195406e-05, |
|
"loss": 1.1218, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 3.25, |
|
"learning_rate": 2.0263617105584107e-05, |
|
"loss": 1.2544, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 3.27, |
|
"learning_rate": 2.3809496677893602e-05, |
|
"loss": 1.1479, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"learning_rate": 2.757752117394104e-05, |
|
"loss": 1.1837, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 3.32, |
|
"learning_rate": 3.1549002535941176e-05, |
|
"loss": 1.3242, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 3.34, |
|
"learning_rate": 3.570424363260841e-05, |
|
"loss": 1.1755, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"learning_rate": 4.002263594990341e-05, |
|
"loss": 1.2206, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 3.39, |
|
"learning_rate": 4.448276180191424e-05, |
|
"loss": 1.219, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"learning_rate": 4.9062500554940324e-05, |
|
"loss": 1.3741, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 3.43, |
|
"learning_rate": 5.373913833794688e-05, |
|
"loss": 1.3206, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"learning_rate": 5.848948069526606e-05, |
|
"loss": 1.2842, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"learning_rate": 6.32899676228247e-05, |
|
"loss": 1.4653, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 6.811679041736369e-05, |
|
"loss": 1.0829, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"learning_rate": 7.294600975911534e-05, |
|
"loss": 1.2317, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 3.54, |
|
"learning_rate": 7.775367444229201e-05, |
|
"loss": 1.0955, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 3.57, |
|
"learning_rate": 8.251594016452641e-05, |
|
"loss": 1.3158, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 3.59, |
|
"learning_rate": 8.720918778610773e-05, |
|
"loss": 1.2593, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"learning_rate": 9.181014047249159e-05, |
|
"loss": 1.1672, |
|
"step": 805 |
|
}, |
|
{ |
|
"epoch": 3.63, |
|
"learning_rate": 9.629597913909913e-05, |
|
"loss": 1.2407, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 3.65, |
|
"learning_rate": 0.0001006444556258389, |
|
"loss": 1.2637, |
|
"step": 815 |
|
}, |
|
{ |
|
"epoch": 3.68, |
|
"learning_rate": 0.00010483400304004644, |
|
"loss": 1.4097, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"learning_rate": 0.00010884384272058186, |
|
"loss": 1.3197, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 3.72, |
|
"learning_rate": 0.00011265408729257823, |
|
"loss": 1.0697, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 3.74, |
|
"learning_rate": 0.00011624583930172985, |
|
"loss": 1.079, |
|
"step": 835 |
|
}, |
|
{ |
|
"epoch": 3.77, |
|
"learning_rate": 0.00011960128493892567, |
|
"loss": 1.2747, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 3.79, |
|
"learning_rate": 0.0001227037823903907, |
|
"loss": 1.3586, |
|
"step": 845 |
|
}, |
|
{ |
|
"epoch": 3.81, |
|
"learning_rate": 0.000125537944375147, |
|
"loss": 1.3766, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"learning_rate": 0.00012808971446044075, |
|
"loss": 1.2691, |
|
"step": 855 |
|
}, |
|
{ |
|
"epoch": 3.86, |
|
"learning_rate": 0.00013034643677663536, |
|
"loss": 1.1551, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 3.88, |
|
"learning_rate": 0.00013229691878581222, |
|
"loss": 1.3372, |
|
"step": 865 |
|
}, |
|
{ |
|
"epoch": 3.9, |
|
"learning_rate": 0.00013393148679276723, |
|
"loss": 1.2608, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 3.92, |
|
"learning_rate": 0.00013524203392308896, |
|
"loss": 1.1078, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 3.95, |
|
"learning_rate": 0.00013622206033036527, |
|
"loss": 1.0532, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 3.97, |
|
"learning_rate": 0.00013686670543310324, |
|
"loss": 1.1992, |
|
"step": 885 |
|
}, |
|
{ |
|
"epoch": 3.99, |
|
"learning_rate": 0.00013717277202148013, |
|
"loss": 1.1418, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 1.3141199350357056, |
|
"eval_runtime": 2.2159, |
|
"eval_samples_per_second": 142.153, |
|
"eval_steps_per_second": 18.051, |
|
"step": 892 |
|
}, |
|
{ |
|
"epoch": 4.01, |
|
"learning_rate": 0.000137138742114364, |
|
"loss": 1.0312, |
|
"step": 895 |
|
}, |
|
{ |
|
"epoch": 4.04, |
|
"learning_rate": 0.00013676478448795875, |
|
"loss": 1.0096, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 4.06, |
|
"learning_rate": 0.00013605275383873428, |
|
"loss": 1.1906, |
|
"step": 905 |
|
}, |
|
{ |
|
"epoch": 4.08, |
|
"learning_rate": 0.00013500618158479363, |
|
"loss": 1.1664, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 4.1, |
|
"learning_rate": 0.00013363025835129812, |
|
"loss": 1.0422, |
|
"step": 915 |
|
}, |
|
{ |
|
"epoch": 4.13, |
|
"learning_rate": 0.00013193180822681808, |
|
"loss": 1.0758, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 4.15, |
|
"learning_rate": 0.0001299192549182867, |
|
"loss": 1.3852, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 4.17, |
|
"learning_rate": 0.0001276025799724176, |
|
"loss": 1.2089, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 4.19, |
|
"learning_rate": 0.00012499327327079299, |
|
"loss": 1.1252, |
|
"step": 935 |
|
}, |
|
{ |
|
"epoch": 4.22, |
|
"learning_rate": 0.00012210427604414836, |
|
"loss": 1.2521, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 4.24, |
|
"learning_rate": 0.00011894991668848237, |
|
"loss": 1.2428, |
|
"step": 945 |
|
}, |
|
{ |
|
"epoch": 4.26, |
|
"learning_rate": 0.00011554583970132328, |
|
"loss": 1.1082, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 4.28, |
|
"learning_rate": 0.00011190892809059987, |
|
"loss": 1.0971, |
|
"step": 955 |
|
}, |
|
{ |
|
"epoch": 4.3, |
|
"learning_rate": 0.00010805721964094202, |
|
"loss": 1.0657, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 4.33, |
|
"learning_rate": 0.00010400981745270263, |
|
"loss": 1.0439, |
|
"step": 965 |
|
}, |
|
{ |
|
"epoch": 4.35, |
|
"learning_rate": 9.978679519739164e-05, |
|
"loss": 1.1785, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 4.37, |
|
"learning_rate": 9.540909755942318e-05, |
|
"loss": 1.1708, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 4.39, |
|
"learning_rate": 9.089843635795119e-05, |
|
"loss": 1.0215, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 4.42, |
|
"learning_rate": 8.627718286398852e-05, |
|
"loss": 1.1393, |
|
"step": 985 |
|
}, |
|
{ |
|
"epoch": 4.44, |
|
"learning_rate": 8.156825684687972e-05, |
|
"loss": 1.0526, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 4.46, |
|
"learning_rate": 7.679501290041993e-05, |
|
"loss": 0.9887, |
|
"step": 995 |
|
}, |
|
{ |
|
"epoch": 4.48, |
|
"learning_rate": 7.198112461239741e-05, |
|
"loss": 0.8743, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 4.51, |
|
"learning_rate": 6.715046715204013e-05, |
|
"loss": 0.9096, |
|
"step": 1005 |
|
}, |
|
{ |
|
"epoch": 4.53, |
|
"learning_rate": 6.232699885769075e-05, |
|
"loss": 1.2684, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 4.55, |
|
"learning_rate": 5.753464241199265e-05, |
|
"loss": 1.0412, |
|
"step": 1015 |
|
}, |
|
{ |
|
"epoch": 4.57, |
|
"learning_rate": 5.279716619391422e-05, |
|
"loss": 0.9954, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 4.6, |
|
"learning_rate": 4.813806639606615e-05, |
|
"loss": 0.8753, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 4.62, |
|
"learning_rate": 4.358045049196426e-05, |
|
"loss": 0.8638, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 4.64, |
|
"learning_rate": 3.9146922631201556e-05, |
|
"loss": 1.0574, |
|
"step": 1035 |
|
}, |
|
{ |
|
"epoch": 4.66, |
|
"learning_rate": 3.485947153092735e-05, |
|
"loss": 0.8752, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 4.69, |
|
"learning_rate": 3.073936141965131e-05, |
|
"loss": 1.0359, |
|
"step": 1045 |
|
}, |
|
{ |
|
"epoch": 4.71, |
|
"learning_rate": 2.680702657425298e-05, |
|
"loss": 1.0701, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 4.73, |
|
"learning_rate": 2.3081969973255665e-05, |
|
"loss": 0.9777, |
|
"step": 1055 |
|
}, |
|
{ |
|
"epoch": 4.75, |
|
"learning_rate": 1.9582666569008116e-05, |
|
"loss": 0.9861, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 4.78, |
|
"learning_rate": 1.632647165850912e-05, |
|
"loss": 1.0365, |
|
"step": 1065 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"learning_rate": 1.3329534807322873e-05, |
|
"loss": 0.9034, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 4.82, |
|
"learning_rate": 1.0606719753493918e-05, |
|
"loss": 1.0476, |
|
"step": 1075 |
|
}, |
|
{ |
|
"epoch": 4.84, |
|
"learning_rate": 8.171530688706377e-06, |
|
"loss": 1.0288, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 4.87, |
|
"learning_rate": 6.036045282308682e-06, |
|
"loss": 1.1233, |
|
"step": 1085 |
|
}, |
|
{ |
|
"epoch": 4.89, |
|
"learning_rate": 4.210854780381334e-06, |
|
"loss": 0.9158, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 4.91, |
|
"learning_rate": 2.705011476932832e-06, |
|
"loss": 1.0037, |
|
"step": 1095 |
|
}, |
|
{ |
|
"epoch": 4.93, |
|
"learning_rate": 1.5259838177494246e-06, |
|
"loss": 1.279, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 4.96, |
|
"learning_rate": 6.796193595677371e-07, |
|
"loss": 1.0122, |
|
"step": 1105 |
|
}, |
|
{ |
|
"epoch": 4.98, |
|
"learning_rate": 1.7011576827836676e-07, |
|
"loss": 1.058, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 0.0, |
|
"loss": 1.0917, |
|
"step": 1115 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_loss": 1.2071579694747925, |
|
"eval_runtime": 2.221, |
|
"eval_samples_per_second": 141.829, |
|
"eval_steps_per_second": 18.01, |
|
"step": 1115 |
|
}, |
|
{ |
|
"epoch": 5.02, |
|
"learning_rate": 1.7011576827835916e-07, |
|
"loss": 0.9624, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 5.04, |
|
"learning_rate": 6.796193595677219e-07, |
|
"loss": 1.0903, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 5.07, |
|
"learning_rate": 1.5259838177494094e-06, |
|
"loss": 0.8195, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 5.09, |
|
"learning_rate": 2.705011476932809e-06, |
|
"loss": 0.9007, |
|
"step": 1135 |
|
}, |
|
{ |
|
"epoch": 5.11, |
|
"learning_rate": 4.210854780381303e-06, |
|
"loss": 0.6803, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 5.13, |
|
"learning_rate": 6.036045282308651e-06, |
|
"loss": 0.9312, |
|
"step": 1145 |
|
}, |
|
{ |
|
"epoch": 5.16, |
|
"learning_rate": 8.171530688706338e-06, |
|
"loss": 1.0329, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 5.18, |
|
"learning_rate": 1.0606719753493872e-05, |
|
"loss": 0.9101, |
|
"step": 1155 |
|
}, |
|
{ |
|
"epoch": 5.2, |
|
"learning_rate": 1.3329534807322828e-05, |
|
"loss": 0.8229, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 5.22, |
|
"learning_rate": 1.6326471658509066e-05, |
|
"loss": 1.0693, |
|
"step": 1165 |
|
}, |
|
{ |
|
"epoch": 5.25, |
|
"learning_rate": 1.9582666569008055e-05, |
|
"loss": 0.7779, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 5.27, |
|
"learning_rate": 2.3081969973255604e-05, |
|
"loss": 0.7962, |
|
"step": 1175 |
|
}, |
|
{ |
|
"epoch": 5.29, |
|
"learning_rate": 2.680702657425292e-05, |
|
"loss": 0.9127, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 5.31, |
|
"learning_rate": 3.073936141965114e-05, |
|
"loss": 0.8227, |
|
"step": 1185 |
|
}, |
|
{ |
|
"epoch": 5.34, |
|
"learning_rate": 3.4859471530927266e-05, |
|
"loss": 0.9535, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 5.36, |
|
"learning_rate": 3.914692263120148e-05, |
|
"loss": 0.7736, |
|
"step": 1195 |
|
}, |
|
{ |
|
"epoch": 5.38, |
|
"learning_rate": 4.358045049196419e-05, |
|
"loss": 0.9111, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 5.4, |
|
"learning_rate": 4.813806639606595e-05, |
|
"loss": 0.9058, |
|
"step": 1205 |
|
}, |
|
{ |
|
"epoch": 5.43, |
|
"learning_rate": 5.279716619391414e-05, |
|
"loss": 0.8259, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 5.45, |
|
"learning_rate": 5.753464241199256e-05, |
|
"loss": 0.9812, |
|
"step": 1215 |
|
}, |
|
{ |
|
"epoch": 5.47, |
|
"learning_rate": 6.232699885769054e-05, |
|
"loss": 1.0264, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 5.49, |
|
"learning_rate": 6.715046715203992e-05, |
|
"loss": 0.8185, |
|
"step": 1225 |
|
}, |
|
{ |
|
"epoch": 5.52, |
|
"learning_rate": 7.198112461239733e-05, |
|
"loss": 0.8403, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 5.54, |
|
"learning_rate": 7.679501290041973e-05, |
|
"loss": 0.914, |
|
"step": 1235 |
|
}, |
|
{ |
|
"epoch": 5.56, |
|
"learning_rate": 8.15682568468795e-05, |
|
"loss": 0.9836, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 5.58, |
|
"learning_rate": 8.627718286398833e-05, |
|
"loss": 0.9734, |
|
"step": 1245 |
|
}, |
|
{ |
|
"epoch": 5.61, |
|
"learning_rate": 9.089843635795102e-05, |
|
"loss": 0.8468, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 5.63, |
|
"learning_rate": 9.540909755942299e-05, |
|
"loss": 0.835, |
|
"step": 1255 |
|
}, |
|
{ |
|
"epoch": 5.65, |
|
"learning_rate": 9.978679519739145e-05, |
|
"loss": 1.0896, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 5.67, |
|
"learning_rate": 0.00010400981745270244, |
|
"loss": 0.7718, |
|
"step": 1265 |
|
}, |
|
{ |
|
"epoch": 5.7, |
|
"learning_rate": 0.00010805721964094184, |
|
"loss": 0.8329, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 5.72, |
|
"learning_rate": 0.0001119089280905997, |
|
"loss": 0.9622, |
|
"step": 1275 |
|
}, |
|
{ |
|
"epoch": 5.74, |
|
"learning_rate": 0.0001155458397013233, |
|
"loss": 0.8208, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 5.76, |
|
"learning_rate": 0.00011894991668848222, |
|
"loss": 0.9137, |
|
"step": 1285 |
|
}, |
|
{ |
|
"epoch": 5.78, |
|
"learning_rate": 0.00012210427604414823, |
|
"loss": 1.0149, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 5.81, |
|
"learning_rate": 0.000124993273270793, |
|
"loss": 0.9856, |
|
"step": 1295 |
|
}, |
|
{ |
|
"epoch": 5.83, |
|
"learning_rate": 0.0001276025799724176, |
|
"loss": 0.9686, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 5.85, |
|
"learning_rate": 0.0001299192549182866, |
|
"loss": 0.9409, |
|
"step": 1305 |
|
}, |
|
{ |
|
"epoch": 5.87, |
|
"learning_rate": 0.00013193180822681808, |
|
"loss": 1.214, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 5.9, |
|
"learning_rate": 0.00013363025835129815, |
|
"loss": 0.8486, |
|
"step": 1315 |
|
}, |
|
{ |
|
"epoch": 5.92, |
|
"learning_rate": 0.00013500618158479366, |
|
"loss": 1.0083, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 5.94, |
|
"learning_rate": 0.00013605275383873428, |
|
"loss": 0.9284, |
|
"step": 1325 |
|
}, |
|
{ |
|
"epoch": 5.96, |
|
"learning_rate": 0.00013676478448795875, |
|
"loss": 1.0467, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 5.99, |
|
"learning_rate": 0.00013713874211436402, |
|
"loss": 0.9702, |
|
"step": 1335 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_loss": 1.2323447465896606, |
|
"eval_runtime": 2.2212, |
|
"eval_samples_per_second": 141.817, |
|
"eval_steps_per_second": 18.008, |
|
"step": 1338 |
|
}, |
|
{ |
|
"epoch": 6.01, |
|
"learning_rate": 0.00013717277202148013, |
|
"loss": 0.9361, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 6.03, |
|
"learning_rate": 0.00013686670543310324, |
|
"loss": 0.951, |
|
"step": 1345 |
|
}, |
|
{ |
|
"epoch": 6.05, |
|
"learning_rate": 0.00013622206033036527, |
|
"loss": 0.7387, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 6.08, |
|
"learning_rate": 0.0001352420339230889, |
|
"loss": 0.8237, |
|
"step": 1355 |
|
}, |
|
{ |
|
"epoch": 6.1, |
|
"learning_rate": 0.0001339314867927672, |
|
"loss": 0.8428, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 6.12, |
|
"learning_rate": 0.00013229691878581222, |
|
"loss": 0.8352, |
|
"step": 1365 |
|
}, |
|
{ |
|
"epoch": 6.14, |
|
"learning_rate": 0.00013034643677663527, |
|
"loss": 0.8816, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 6.17, |
|
"learning_rate": 0.00012808971446044072, |
|
"loss": 0.781, |
|
"step": 1375 |
|
}, |
|
{ |
|
"epoch": 6.19, |
|
"learning_rate": 0.00012553794437514699, |
|
"loss": 0.7146, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 6.21, |
|
"learning_rate": 0.0001227037823903906, |
|
"loss": 0.9361, |
|
"step": 1385 |
|
}, |
|
{ |
|
"epoch": 6.23, |
|
"learning_rate": 0.00011960128493892572, |
|
"loss": 0.8889, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 6.26, |
|
"learning_rate": 0.00011624583930172982, |
|
"loss": 0.9329, |
|
"step": 1395 |
|
}, |
|
{ |
|
"epoch": 6.28, |
|
"learning_rate": 0.0001126540872925782, |
|
"loss": 0.7853, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 6.3, |
|
"learning_rate": 0.00010884384272058193, |
|
"loss": 0.7188, |
|
"step": 1405 |
|
}, |
|
{ |
|
"epoch": 6.32, |
|
"learning_rate": 0.00010483400304004653, |
|
"loss": 0.7662, |
|
"step": 1410 |
|
}, |
|
{ |
|
"epoch": 6.35, |
|
"learning_rate": 0.00010064445562583886, |
|
"loss": 0.7705, |
|
"step": 1415 |
|
}, |
|
{ |
|
"epoch": 6.37, |
|
"learning_rate": 9.629597913909932e-05, |
|
"loss": 0.9695, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 6.39, |
|
"learning_rate": 9.181014047249165e-05, |
|
"loss": 0.6245, |
|
"step": 1425 |
|
}, |
|
{ |
|
"epoch": 6.41, |
|
"learning_rate": 8.72091877861077e-05, |
|
"loss": 0.9465, |
|
"step": 1430 |
|
}, |
|
{ |
|
"epoch": 6.43, |
|
"learning_rate": 8.25159401645266e-05, |
|
"loss": 0.8094, |
|
"step": 1435 |
|
}, |
|
{ |
|
"epoch": 6.46, |
|
"learning_rate": 7.775367444229211e-05, |
|
"loss": 0.8367, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 6.48, |
|
"learning_rate": 7.29460097591153e-05, |
|
"loss": 0.5786, |
|
"step": 1445 |
|
}, |
|
{ |
|
"epoch": 6.5, |
|
"learning_rate": 6.81167904173639e-05, |
|
"loss": 0.8389, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 6.52, |
|
"learning_rate": 6.328996762282478e-05, |
|
"loss": 0.9198, |
|
"step": 1455 |
|
}, |
|
{ |
|
"epoch": 6.55, |
|
"learning_rate": 5.848948069526602e-05, |
|
"loss": 0.8697, |
|
"step": 1460 |
|
}, |
|
{ |
|
"epoch": 6.57, |
|
"learning_rate": 5.3739138337947083e-05, |
|
"loss": 0.805, |
|
"step": 1465 |
|
}, |
|
{ |
|
"epoch": 6.59, |
|
"learning_rate": 4.90625005549404e-05, |
|
"loss": 0.7912, |
|
"step": 1470 |
|
}, |
|
{ |
|
"epoch": 6.61, |
|
"learning_rate": 4.448276180191432e-05, |
|
"loss": 0.7832, |
|
"step": 1475 |
|
}, |
|
{ |
|
"epoch": 6.64, |
|
"learning_rate": 4.0022635949903595e-05, |
|
"loss": 0.7555, |
|
"step": 1480 |
|
}, |
|
{ |
|
"epoch": 6.66, |
|
"learning_rate": 3.570424363260848e-05, |
|
"loss": 0.88, |
|
"step": 1485 |
|
}, |
|
{ |
|
"epoch": 6.68, |
|
"learning_rate": 3.1549002535941244e-05, |
|
"loss": 0.7924, |
|
"step": 1490 |
|
}, |
|
{ |
|
"epoch": 6.7, |
|
"learning_rate": 2.75775211739412e-05, |
|
"loss": 0.7299, |
|
"step": 1495 |
|
}, |
|
{ |
|
"epoch": 6.73, |
|
"learning_rate": 2.3809496677893663e-05, |
|
"loss": 0.9469, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 6.75, |
|
"learning_rate": 2.0263617105584168e-05, |
|
"loss": 0.6995, |
|
"step": 1505 |
|
}, |
|
{ |
|
"epoch": 6.77, |
|
"learning_rate": 1.6957468755195542e-05, |
|
"loss": 0.8889, |
|
"step": 1510 |
|
}, |
|
{ |
|
"epoch": 6.79, |
|
"learning_rate": 1.3907448943539954e-05, |
|
"loss": 0.8217, |
|
"step": 1515 |
|
}, |
|
{ |
|
"epoch": 6.82, |
|
"learning_rate": 1.1128684681215955e-05, |
|
"loss": 0.7166, |
|
"step": 1520 |
|
}, |
|
{ |
|
"epoch": 6.84, |
|
"learning_rate": 8.63495764802965e-06, |
|
"loss": 0.873, |
|
"step": 1525 |
|
}, |
|
{ |
|
"epoch": 6.86, |
|
"learning_rate": 6.4386358407778465e-06, |
|
"loss": 0.906, |
|
"step": 1530 |
|
}, |
|
{ |
|
"epoch": 6.88, |
|
"learning_rate": 4.550612232394066e-06, |
|
"loss": 0.9969, |
|
"step": 1535 |
|
}, |
|
{ |
|
"epoch": 6.91, |
|
"learning_rate": 2.9802507466879855e-06, |
|
"loss": 0.6348, |
|
"step": 1540 |
|
}, |
|
{ |
|
"epoch": 6.93, |
|
"learning_rate": 1.735339816622208e-06, |
|
"loss": 0.7365, |
|
"step": 1545 |
|
}, |
|
{ |
|
"epoch": 6.95, |
|
"learning_rate": 8.220537564620966e-07, |
|
"loss": 0.9509, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 6.97, |
|
"learning_rate": 2.4492213937922245e-07, |
|
"loss": 0.8438, |
|
"step": 1555 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 6.807332383425125e-09, |
|
"loss": 0.9895, |
|
"step": 1560 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_loss": 1.1562458276748657, |
|
"eval_runtime": 2.2177, |
|
"eval_samples_per_second": 142.037, |
|
"eval_steps_per_second": 18.036, |
|
"step": 1561 |
|
} |
|
], |
|
"max_steps": 1561, |
|
"num_train_epochs": 7, |
|
"total_flos": 1630331633664000.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|