|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"global_step": 711, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0001362351617440225, |
|
"loss": 4.199, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.000135270323488045, |
|
"loss": 4.6198, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0001343054852320675, |
|
"loss": 4.3219, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00013334064697609002, |
|
"loss": 4.0312, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00013237580872011253, |
|
"loss": 4.3119, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00013141097046413501, |
|
"loss": 3.9227, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00013044613220815753, |
|
"loss": 4.1775, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00012948129395218, |
|
"loss": 4.3598, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00012851645569620252, |
|
"loss": 3.7573, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00012755161744022503, |
|
"loss": 3.011, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00012658677918424754, |
|
"loss": 3.3466, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00012562194092827005, |
|
"loss": 3.3757, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012465710267229254, |
|
"loss": 3.5864, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00012369226441631505, |
|
"loss": 3.8094, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00012272742616033753, |
|
"loss": 3.2495, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00012176258790436006, |
|
"loss": 3.4377, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00012079774964838257, |
|
"loss": 3.2708, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00011983291139240507, |
|
"loss": 3.7736, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00011886807313642758, |
|
"loss": 2.9204, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00011790323488045006, |
|
"loss": 3.4391, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00011693839662447257, |
|
"loss": 2.9807, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00011597355836849507, |
|
"loss": 3.1689, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00011500872011251758, |
|
"loss": 3.3144, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00011404388185654008, |
|
"loss": 3.5561, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00011307904360056259, |
|
"loss": 3.6431, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0001121142053445851, |
|
"loss": 3.8902, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00011114936708860759, |
|
"loss": 3.0992, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0001101845288326301, |
|
"loss": 2.7877, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0001092196905766526, |
|
"loss": 3.9218, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00010825485232067511, |
|
"loss": 3.5637, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0001072900140646976, |
|
"loss": 3.1522, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00010632517580872012, |
|
"loss": 3.4419, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00010536033755274261, |
|
"loss": 3.0554, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00010439549929676513, |
|
"loss": 3.4052, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00010343066104078762, |
|
"loss": 2.9866, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00010246582278481012, |
|
"loss": 2.5035, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00010150098452883263, |
|
"loss": 3.7511, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00010053614627285513, |
|
"loss": 2.7323, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 9.957130801687764e-05, |
|
"loss": 2.8699, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 9.860646976090014e-05, |
|
"loss": 3.1837, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 9.764163150492265e-05, |
|
"loss": 1.8443, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 9.667679324894516e-05, |
|
"loss": 3.3378, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 9.571195499296765e-05, |
|
"loss": 3.3139, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 9.474711673699016e-05, |
|
"loss": 2.715, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 9.378227848101265e-05, |
|
"loss": 2.2078, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 9.281744022503517e-05, |
|
"loss": 3.6404, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 9.185260196905766e-05, |
|
"loss": 2.7474, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 9.088776371308017e-05, |
|
"loss": 2.7442, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 8.992292545710267e-05, |
|
"loss": 2.7549, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 8.895808720112517e-05, |
|
"loss": 3.1178, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 8.799324894514768e-05, |
|
"loss": 3.5897, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 8.702841068917018e-05, |
|
"loss": 3.613, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 8.606357243319269e-05, |
|
"loss": 3.0409, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 8.509873417721519e-05, |
|
"loss": 2.6512, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 8.41338959212377e-05, |
|
"loss": 2.6139, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 8.31690576652602e-05, |
|
"loss": 3.4506, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 8.220421940928271e-05, |
|
"loss": 2.5725, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 8.123938115330519e-05, |
|
"loss": 3.4395, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 8.02745428973277e-05, |
|
"loss": 2.5777, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 7.930970464135022e-05, |
|
"loss": 2.9152, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 7.834486638537271e-05, |
|
"loss": 3.9138, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 7.738002812939522e-05, |
|
"loss": 3.4747, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 7.641518987341772e-05, |
|
"loss": 2.0438, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 7.545035161744023e-05, |
|
"loss": 3.9241, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 7.448551336146272e-05, |
|
"loss": 2.4667, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 7.352067510548523e-05, |
|
"loss": 3.0487, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 7.255583684950774e-05, |
|
"loss": 2.5293, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 7.159099859353024e-05, |
|
"loss": 2.4645, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 7.062616033755275e-05, |
|
"loss": 2.3263, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 6.966132208157525e-05, |
|
"loss": 3.305, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 6.869648382559776e-05, |
|
"loss": 2.4653, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 6.773164556962026e-05, |
|
"loss": 1.8414, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 6.676680731364275e-05, |
|
"loss": 2.8415, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 6.580196905766526e-05, |
|
"loss": 2.773, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 6.483713080168776e-05, |
|
"loss": 2.7577, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 6.387229254571027e-05, |
|
"loss": 2.6964, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 6.290745428973277e-05, |
|
"loss": 3.8508, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 6.194261603375527e-05, |
|
"loss": 1.7801, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 6.097777777777777e-05, |
|
"loss": 2.3334, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 6.0012939521800284e-05, |
|
"loss": 1.3051, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 5.904810126582279e-05, |
|
"loss": 2.7872, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 5.808326300984529e-05, |
|
"loss": 2.2091, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 5.711842475386779e-05, |
|
"loss": 2.1466, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 5.6153586497890295e-05, |
|
"loss": 2.7545, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 5.51887482419128e-05, |
|
"loss": 2.5816, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 5.42239099859353e-05, |
|
"loss": 2.1573, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 5.32590717299578e-05, |
|
"loss": 2.6483, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 5.229423347398031e-05, |
|
"loss": 2.1141, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 5.132939521800282e-05, |
|
"loss": 1.6248, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 5.0364556962025315e-05, |
|
"loss": 2.6418, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 4.939971870604782e-05, |
|
"loss": 2.7324, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 4.8434880450070324e-05, |
|
"loss": 2.661, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 4.747004219409283e-05, |
|
"loss": 2.1676, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 4.6505203938115326e-05, |
|
"loss": 2.0641, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 4.554036568213783e-05, |
|
"loss": 2.6073, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 4.4575527426160335e-05, |
|
"loss": 3.2055, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 4.361068917018285e-05, |
|
"loss": 1.5895, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 4.2645850914205344e-05, |
|
"loss": 3.1821, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 4.168101265822785e-05, |
|
"loss": 1.7399, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 4.071617440225035e-05, |
|
"loss": 3.1152, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 3.975133614627286e-05, |
|
"loss": 1.5188, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 3.8786497890295355e-05, |
|
"loss": 2.0804, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 3.782165963431786e-05, |
|
"loss": 2.0614, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 3.6856821378340364e-05, |
|
"loss": 3.1566, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 3.5891983122362876e-05, |
|
"loss": 2.0499, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 3.492714486638537e-05, |
|
"loss": 2.0387, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 3.396230661040788e-05, |
|
"loss": 2.0698, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 3.299746835443038e-05, |
|
"loss": 2.5658, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 3.203263009845288e-05, |
|
"loss": 1.5012, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 3.1067791842475384e-05, |
|
"loss": 1.978, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 3.0102953586497892e-05, |
|
"loss": 3.0437, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 2.9138115330520393e-05, |
|
"loss": 1.9812, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 2.8173277074542898e-05, |
|
"loss": 1.9784, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 2.72084388185654e-05, |
|
"loss": 1.9855, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 2.6243600562587903e-05, |
|
"loss": 2.5896, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 2.5278762306610408e-05, |
|
"loss": 2.4775, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 2.4313924050632912e-05, |
|
"loss": 2.4693, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 2.3349085794655413e-05, |
|
"loss": 1.9928, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 2.2384247538677918e-05, |
|
"loss": 3.0532, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 2.1419409282700422e-05, |
|
"loss": 2.4624, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 2.0454571026722927e-05, |
|
"loss": 1.9622, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 1.9489732770745428e-05, |
|
"loss": 1.9914, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 1.8524894514767932e-05, |
|
"loss": 1.3641, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 1.7560056258790437e-05, |
|
"loss": 2.8009, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 1.659521800281294e-05, |
|
"loss": 3.0455, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 1.5630379746835442e-05, |
|
"loss": 2.418, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 1.4665541490857947e-05, |
|
"loss": 3.0566, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 1.370070323488045e-05, |
|
"loss": 1.9132, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 1.2735864978902954e-05, |
|
"loss": 2.4978, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 1.1771026722925457e-05, |
|
"loss": 2.6473, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 1.0806188466947961e-05, |
|
"loss": 2.4882, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 9.841350210970464e-06, |
|
"loss": 3.1199, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 8.876511954992969e-06, |
|
"loss": 3.0693, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 7.911673699015471e-06, |
|
"loss": 2.4433, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 6.946835443037974e-06, |
|
"loss": 2.4399, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 5.981997187060478e-06, |
|
"loss": 2.5172, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 5.0171589310829815e-06, |
|
"loss": 3.0587, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 4.052320675105485e-06, |
|
"loss": 3.6936, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 3.0874824191279887e-06, |
|
"loss": 1.8324, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 2.1226441631504924e-06, |
|
"loss": 1.9274, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 1.1578059071729958e-06, |
|
"loss": 2.505, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 1.929676511954993e-07, |
|
"loss": 1.8712, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 711, |
|
"total_flos": 371557269504000.0, |
|
"train_loss": 2.8279777153299497, |
|
"train_runtime": 196.8346, |
|
"train_samples_per_second": 3.612, |
|
"train_steps_per_second": 3.612 |
|
} |
|
], |
|
"max_steps": 711, |
|
"num_train_epochs": 1, |
|
"total_flos": 371557269504000.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|