|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 10.0, |
|
"eval_steps": 500, |
|
"global_step": 1460, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00684931506849315, |
|
"grad_norm": 4.9375, |
|
"learning_rate": 1.3698630136986302e-06, |
|
"loss": 3.0017, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.03424657534246575, |
|
"grad_norm": 4.5625, |
|
"learning_rate": 6.849315068493151e-06, |
|
"loss": 3.0717, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0684931506849315, |
|
"grad_norm": 5.40625, |
|
"learning_rate": 1.3698630136986302e-05, |
|
"loss": 3.002, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.10273972602739725, |
|
"grad_norm": 2.640625, |
|
"learning_rate": 2.0547945205479453e-05, |
|
"loss": 2.8518, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.136986301369863, |
|
"grad_norm": 1.734375, |
|
"learning_rate": 2.7397260273972603e-05, |
|
"loss": 2.6474, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.17123287671232876, |
|
"grad_norm": 1.59375, |
|
"learning_rate": 3.424657534246575e-05, |
|
"loss": 2.4285, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.2054794520547945, |
|
"grad_norm": 75.0, |
|
"learning_rate": 4.1095890410958905e-05, |
|
"loss": 2.2533, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.23972602739726026, |
|
"grad_norm": 1.84375, |
|
"learning_rate": 4.794520547945205e-05, |
|
"loss": 2.0584, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.273972602739726, |
|
"grad_norm": 0.80859375, |
|
"learning_rate": 5.479452054794521e-05, |
|
"loss": 1.9038, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.3082191780821918, |
|
"grad_norm": 0.65625, |
|
"learning_rate": 6.164383561643835e-05, |
|
"loss": 1.797, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.3424657534246575, |
|
"grad_norm": 0.5859375, |
|
"learning_rate": 6.84931506849315e-05, |
|
"loss": 1.6455, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.3767123287671233, |
|
"grad_norm": 0.40625, |
|
"learning_rate": 7.534246575342466e-05, |
|
"loss": 1.5458, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.410958904109589, |
|
"grad_norm": 0.345703125, |
|
"learning_rate": 8.219178082191781e-05, |
|
"loss": 1.4771, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.4452054794520548, |
|
"grad_norm": 0.38671875, |
|
"learning_rate": 8.904109589041096e-05, |
|
"loss": 1.4206, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.4794520547945205, |
|
"grad_norm": 0.546875, |
|
"learning_rate": 9.58904109589041e-05, |
|
"loss": 1.3804, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.5136986301369864, |
|
"grad_norm": 0.353515625, |
|
"learning_rate": 0.00010273972602739728, |
|
"loss": 1.3471, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.547945205479452, |
|
"grad_norm": 0.259765625, |
|
"learning_rate": 0.00010958904109589041, |
|
"loss": 1.3165, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.5821917808219178, |
|
"grad_norm": 0.53515625, |
|
"learning_rate": 0.00011643835616438356, |
|
"loss": 1.2853, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.6164383561643836, |
|
"grad_norm": 0.3828125, |
|
"learning_rate": 0.0001232876712328767, |
|
"loss": 1.2863, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.6506849315068494, |
|
"grad_norm": 0.8828125, |
|
"learning_rate": 0.00013013698630136988, |
|
"loss": 1.267, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.684931506849315, |
|
"grad_norm": 0.74609375, |
|
"learning_rate": 0.000136986301369863, |
|
"loss": 1.2466, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.7191780821917808, |
|
"grad_norm": 1.0, |
|
"learning_rate": 0.00014383561643835618, |
|
"loss": 1.2444, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.7534246575342466, |
|
"grad_norm": 0.3671875, |
|
"learning_rate": 0.00015068493150684933, |
|
"loss": 1.2283, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.7876712328767124, |
|
"grad_norm": 0.3125, |
|
"learning_rate": 0.00015753424657534247, |
|
"loss": 1.2153, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.821917808219178, |
|
"grad_norm": 0.3984375, |
|
"learning_rate": 0.00016438356164383562, |
|
"loss": 1.2106, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.8561643835616438, |
|
"grad_norm": 0.330078125, |
|
"learning_rate": 0.00017123287671232877, |
|
"loss": 1.1939, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.8904109589041096, |
|
"grad_norm": 0.57421875, |
|
"learning_rate": 0.00017808219178082192, |
|
"loss": 1.1965, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.9246575342465754, |
|
"grad_norm": 0.515625, |
|
"learning_rate": 0.0001849315068493151, |
|
"loss": 1.1839, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.958904109589041, |
|
"grad_norm": 1.4453125, |
|
"learning_rate": 0.0001917808219178082, |
|
"loss": 1.1983, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.9931506849315068, |
|
"grad_norm": 0.470703125, |
|
"learning_rate": 0.00019863013698630139, |
|
"loss": 1.1808, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 2.487597942352295, |
|
"eval_runtime": 0.541, |
|
"eval_samples_per_second": 18.483, |
|
"eval_steps_per_second": 1.848, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 1.0273972602739727, |
|
"grad_norm": 0.396484375, |
|
"learning_rate": 0.00019999542705801296, |
|
"loss": 1.1618, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.0616438356164384, |
|
"grad_norm": 0.8125, |
|
"learning_rate": 0.00019997685019798912, |
|
"loss": 1.1531, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 1.095890410958904, |
|
"grad_norm": 0.390625, |
|
"learning_rate": 0.00019994398626371643, |
|
"loss": 1.1517, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.13013698630137, |
|
"grad_norm": 0.333984375, |
|
"learning_rate": 0.00019989683995157677, |
|
"loss": 1.1322, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 1.1643835616438356, |
|
"grad_norm": 0.318359375, |
|
"learning_rate": 0.0001998354179989585, |
|
"loss": 1.1413, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.1986301369863013, |
|
"grad_norm": 0.37109375, |
|
"learning_rate": 0.00019975972918329356, |
|
"loss": 1.1275, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 1.2328767123287672, |
|
"grad_norm": 0.54296875, |
|
"learning_rate": 0.00019966978432080316, |
|
"loss": 1.1261, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.2671232876712328, |
|
"grad_norm": 0.43359375, |
|
"learning_rate": 0.00019956559626495212, |
|
"loss": 1.1384, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 1.3013698630136985, |
|
"grad_norm": 2.4375, |
|
"learning_rate": 0.00019944717990461207, |
|
"loss": 1.1376, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.3356164383561644, |
|
"grad_norm": 0.423828125, |
|
"learning_rate": 0.00019931455216193382, |
|
"loss": 1.1376, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 1.36986301369863, |
|
"grad_norm": 0.55078125, |
|
"learning_rate": 0.000199167731989929, |
|
"loss": 1.14, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.404109589041096, |
|
"grad_norm": 0.44140625, |
|
"learning_rate": 0.00019900674036976173, |
|
"loss": 1.1281, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 1.4383561643835616, |
|
"grad_norm": 0.63671875, |
|
"learning_rate": 0.00019883160030775016, |
|
"loss": 1.1221, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.4726027397260273, |
|
"grad_norm": 0.36328125, |
|
"learning_rate": 0.00019864233683207906, |
|
"loss": 1.1217, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 1.5068493150684932, |
|
"grad_norm": 0.412109375, |
|
"learning_rate": 0.00019843897698922284, |
|
"loss": 1.1086, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.541095890410959, |
|
"grad_norm": 0.357421875, |
|
"learning_rate": 0.00019822154984008088, |
|
"loss": 1.1132, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 1.5753424657534247, |
|
"grad_norm": 0.314453125, |
|
"learning_rate": 0.0001979900864558242, |
|
"loss": 1.1094, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 1.6095890410958904, |
|
"grad_norm": 0.48046875, |
|
"learning_rate": 0.00019774461991345577, |
|
"loss": 1.1048, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 1.643835616438356, |
|
"grad_norm": 0.32421875, |
|
"learning_rate": 0.00019748518529108316, |
|
"loss": 1.0937, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.678082191780822, |
|
"grad_norm": 0.431640625, |
|
"learning_rate": 0.00019721181966290613, |
|
"loss": 1.1099, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 1.7123287671232876, |
|
"grad_norm": 0.35546875, |
|
"learning_rate": 0.00019692456209391846, |
|
"loss": 1.0998, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.7465753424657535, |
|
"grad_norm": 0.33984375, |
|
"learning_rate": 0.0001966234536343253, |
|
"loss": 1.0985, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 1.7808219178082192, |
|
"grad_norm": 0.455078125, |
|
"learning_rate": 0.00019630853731367713, |
|
"loss": 1.1036, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 1.8150684931506849, |
|
"grad_norm": 0.52734375, |
|
"learning_rate": 0.00019597985813472052, |
|
"loss": 1.0853, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 1.8493150684931505, |
|
"grad_norm": 0.30078125, |
|
"learning_rate": 0.0001956374630669672, |
|
"loss": 1.0958, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 1.8835616438356164, |
|
"grad_norm": 0.62890625, |
|
"learning_rate": 0.00019528140103998177, |
|
"loss": 1.0911, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 1.9178082191780823, |
|
"grad_norm": 0.5234375, |
|
"learning_rate": 0.00019491172293638968, |
|
"loss": 1.0812, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 1.952054794520548, |
|
"grad_norm": 0.44921875, |
|
"learning_rate": 0.0001945284815846057, |
|
"loss": 1.0777, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 1.9863013698630136, |
|
"grad_norm": 0.53515625, |
|
"learning_rate": 0.00019413173175128473, |
|
"loss": 1.0819, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 2.4820380210876465, |
|
"eval_runtime": 0.5459, |
|
"eval_samples_per_second": 18.32, |
|
"eval_steps_per_second": 1.832, |
|
"step": 292 |
|
}, |
|
{ |
|
"epoch": 2.0205479452054793, |
|
"grad_norm": 0.412109375, |
|
"learning_rate": 0.00019372153013349523, |
|
"loss": 1.0712, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 2.0547945205479454, |
|
"grad_norm": 0.302734375, |
|
"learning_rate": 0.00019329793535061723, |
|
"loss": 1.0467, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.089041095890411, |
|
"grad_norm": 0.34765625, |
|
"learning_rate": 0.0001928610079359652, |
|
"loss": 1.0444, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 2.1232876712328768, |
|
"grad_norm": 0.376953125, |
|
"learning_rate": 0.00019241081032813772, |
|
"loss": 1.0393, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 2.1575342465753424, |
|
"grad_norm": 0.33984375, |
|
"learning_rate": 0.00019194740686209464, |
|
"loss": 1.0475, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 2.191780821917808, |
|
"grad_norm": 1.2578125, |
|
"learning_rate": 0.0001914708637599636, |
|
"loss": 1.0427, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 2.2260273972602738, |
|
"grad_norm": 1.5546875, |
|
"learning_rate": 0.00019098124912157632, |
|
"loss": 1.0486, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 2.26027397260274, |
|
"grad_norm": 0.458984375, |
|
"learning_rate": 0.00019047863291473717, |
|
"loss": 1.0412, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 2.2945205479452055, |
|
"grad_norm": 0.62109375, |
|
"learning_rate": 0.00018996308696522433, |
|
"loss": 1.0414, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 2.328767123287671, |
|
"grad_norm": 0.2890625, |
|
"learning_rate": 0.0001894346849465257, |
|
"loss": 1.033, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 2.363013698630137, |
|
"grad_norm": 0.328125, |
|
"learning_rate": 0.00018889350236931055, |
|
"loss": 1.0325, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 2.3972602739726026, |
|
"grad_norm": 0.333984375, |
|
"learning_rate": 0.00018833961657063885, |
|
"loss": 1.0499, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 2.4315068493150687, |
|
"grad_norm": 0.578125, |
|
"learning_rate": 0.0001877731067029096, |
|
"loss": 1.0361, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 2.4657534246575343, |
|
"grad_norm": 0.4921875, |
|
"learning_rate": 0.00018719405372254948, |
|
"loss": 1.0412, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"grad_norm": 0.298828125, |
|
"learning_rate": 0.00018660254037844388, |
|
"loss": 1.0435, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 2.5342465753424657, |
|
"grad_norm": 0.4296875, |
|
"learning_rate": 0.00018599865120011192, |
|
"loss": 1.0369, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 2.5684931506849313, |
|
"grad_norm": 0.31640625, |
|
"learning_rate": 0.00018538247248562674, |
|
"loss": 1.0298, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 2.602739726027397, |
|
"grad_norm": 0.3125, |
|
"learning_rate": 0.00018475409228928312, |
|
"loss": 1.0325, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 2.636986301369863, |
|
"grad_norm": 0.41015625, |
|
"learning_rate": 0.0001841136004090144, |
|
"loss": 1.0411, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 2.671232876712329, |
|
"grad_norm": 0.310546875, |
|
"learning_rate": 0.00018346108837355972, |
|
"loss": 1.0372, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 2.7054794520547945, |
|
"grad_norm": 0.384765625, |
|
"learning_rate": 0.00018279664942938447, |
|
"loss": 1.0364, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 2.73972602739726, |
|
"grad_norm": 0.279296875, |
|
"learning_rate": 0.00018212037852735486, |
|
"loss": 1.0237, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.7739726027397262, |
|
"grad_norm": 0.375, |
|
"learning_rate": 0.0001814323723091692, |
|
"loss": 1.0341, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 2.808219178082192, |
|
"grad_norm": 0.486328125, |
|
"learning_rate": 0.00018073272909354727, |
|
"loss": 1.0256, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 2.8424657534246576, |
|
"grad_norm": 0.287109375, |
|
"learning_rate": 0.00018002154886218033, |
|
"loss": 1.0347, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 2.8767123287671232, |
|
"grad_norm": 0.427734375, |
|
"learning_rate": 0.00017929893324544332, |
|
"loss": 1.0357, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 2.910958904109589, |
|
"grad_norm": 0.408203125, |
|
"learning_rate": 0.00017856498550787144, |
|
"loss": 1.029, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 2.9452054794520546, |
|
"grad_norm": 0.375, |
|
"learning_rate": 0.00017781981053340337, |
|
"loss": 1.0263, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 2.9794520547945207, |
|
"grad_norm": 0.474609375, |
|
"learning_rate": 0.00017706351481039284, |
|
"loss": 1.035, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 2.4995357990264893, |
|
"eval_runtime": 0.5513, |
|
"eval_samples_per_second": 18.139, |
|
"eval_steps_per_second": 1.814, |
|
"step": 438 |
|
}, |
|
{ |
|
"epoch": 3.0136986301369864, |
|
"grad_norm": 0.302734375, |
|
"learning_rate": 0.00017629620641639103, |
|
"loss": 1.006, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 3.047945205479452, |
|
"grad_norm": 0.349609375, |
|
"learning_rate": 0.00017551799500270198, |
|
"loss": 0.9868, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 3.0821917808219177, |
|
"grad_norm": 0.37109375, |
|
"learning_rate": 0.00017472899177871297, |
|
"loss": 0.9878, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 3.1164383561643834, |
|
"grad_norm": 0.35546875, |
|
"learning_rate": 0.00017392930949600217, |
|
"loss": 0.9897, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 3.1506849315068495, |
|
"grad_norm": 0.50390625, |
|
"learning_rate": 0.00017311906243222614, |
|
"loss": 0.9801, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 3.184931506849315, |
|
"grad_norm": 0.322265625, |
|
"learning_rate": 0.00017229836637478902, |
|
"loss": 0.9837, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 3.219178082191781, |
|
"grad_norm": 0.38671875, |
|
"learning_rate": 0.00017146733860429612, |
|
"loss": 0.9774, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 3.2534246575342465, |
|
"grad_norm": 0.59765625, |
|
"learning_rate": 0.00017062609787779403, |
|
"loss": 0.9918, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 3.287671232876712, |
|
"grad_norm": 0.310546875, |
|
"learning_rate": 0.00016977476441179992, |
|
"loss": 0.9844, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 3.3219178082191783, |
|
"grad_norm": 0.337890625, |
|
"learning_rate": 0.0001689134598651219, |
|
"loss": 0.9841, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 3.356164383561644, |
|
"grad_norm": 0.30078125, |
|
"learning_rate": 0.0001680423073214737, |
|
"loss": 0.993, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 3.3904109589041096, |
|
"grad_norm": 0.349609375, |
|
"learning_rate": 0.00016716143127188548, |
|
"loss": 0.9842, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 3.4246575342465753, |
|
"grad_norm": 0.37109375, |
|
"learning_rate": 0.00016627095759691362, |
|
"loss": 0.9925, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 3.458904109589041, |
|
"grad_norm": 0.474609375, |
|
"learning_rate": 0.0001653710135486518, |
|
"loss": 0.9822, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 3.493150684931507, |
|
"grad_norm": 0.3046875, |
|
"learning_rate": 0.00016446172773254629, |
|
"loss": 0.985, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 3.5273972602739727, |
|
"grad_norm": 0.37109375, |
|
"learning_rate": 0.00016354323008901776, |
|
"loss": 0.9937, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 3.5616438356164384, |
|
"grad_norm": 0.314453125, |
|
"learning_rate": 0.0001626156518748922, |
|
"loss": 0.9889, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 3.595890410958904, |
|
"grad_norm": 0.3046875, |
|
"learning_rate": 0.00016167912564464383, |
|
"loss": 0.9816, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 3.6301369863013697, |
|
"grad_norm": 0.314453125, |
|
"learning_rate": 0.0001607337852314527, |
|
"loss": 0.9873, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 3.6643835616438354, |
|
"grad_norm": 0.3046875, |
|
"learning_rate": 0.0001597797657280792, |
|
"loss": 0.9935, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 3.6986301369863015, |
|
"grad_norm": 0.287109375, |
|
"learning_rate": 0.00015881720346755905, |
|
"loss": 0.9863, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 3.732876712328767, |
|
"grad_norm": 0.34375, |
|
"learning_rate": 0.00015784623600372042, |
|
"loss": 0.9782, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 3.767123287671233, |
|
"grad_norm": 0.375, |
|
"learning_rate": 0.00015686700209152738, |
|
"loss": 0.9779, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 3.8013698630136985, |
|
"grad_norm": 0.3515625, |
|
"learning_rate": 0.00015587964166725095, |
|
"loss": 0.9883, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 3.8356164383561646, |
|
"grad_norm": 0.33203125, |
|
"learning_rate": 0.00015488429582847192, |
|
"loss": 0.968, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 3.8698630136986303, |
|
"grad_norm": 0.478515625, |
|
"learning_rate": 0.00015388110681391725, |
|
"loss": 0.9858, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 3.904109589041096, |
|
"grad_norm": 0.294921875, |
|
"learning_rate": 0.0001528702179831338, |
|
"loss": 0.9668, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 3.9383561643835616, |
|
"grad_norm": 0.3203125, |
|
"learning_rate": 0.00015185177379600152, |
|
"loss": 0.9853, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 3.9726027397260273, |
|
"grad_norm": 0.302734375, |
|
"learning_rate": 0.00015082591979208976, |
|
"loss": 0.9796, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 2.5387372970581055, |
|
"eval_runtime": 0.5485, |
|
"eval_samples_per_second": 18.233, |
|
"eval_steps_per_second": 1.823, |
|
"step": 584 |
|
}, |
|
{ |
|
"epoch": 4.006849315068493, |
|
"grad_norm": 0.369140625, |
|
"learning_rate": 0.000149792802569859, |
|
"loss": 0.981, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 4.041095890410959, |
|
"grad_norm": 0.31640625, |
|
"learning_rate": 0.00014875256976571135, |
|
"loss": 0.9259, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 4.075342465753424, |
|
"grad_norm": 0.302734375, |
|
"learning_rate": 0.0001477053700328929, |
|
"loss": 0.9421, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 4.109589041095891, |
|
"grad_norm": 0.310546875, |
|
"learning_rate": 0.00014665135302025035, |
|
"loss": 0.9348, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 4.1438356164383565, |
|
"grad_norm": 0.322265625, |
|
"learning_rate": 0.00014559066935084588, |
|
"loss": 0.9353, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 4.178082191780822, |
|
"grad_norm": 0.314453125, |
|
"learning_rate": 0.00014452347060043237, |
|
"loss": 0.9267, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 4.212328767123288, |
|
"grad_norm": 0.359375, |
|
"learning_rate": 0.00014344990927579268, |
|
"loss": 0.9451, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 4.2465753424657535, |
|
"grad_norm": 0.486328125, |
|
"learning_rate": 0.0001423701387929459, |
|
"loss": 0.9245, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 4.280821917808219, |
|
"grad_norm": 0.4765625, |
|
"learning_rate": 0.0001412843134552235, |
|
"loss": 0.9371, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 4.315068493150685, |
|
"grad_norm": 0.38671875, |
|
"learning_rate": 0.00014019258843121893, |
|
"loss": 0.9309, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 4.3493150684931505, |
|
"grad_norm": 0.4453125, |
|
"learning_rate": 0.0001390951197326134, |
|
"loss": 0.9426, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 4.383561643835616, |
|
"grad_norm": 0.474609375, |
|
"learning_rate": 0.00013799206419188103, |
|
"loss": 0.9359, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 4.417808219178082, |
|
"grad_norm": 0.396484375, |
|
"learning_rate": 0.00013688357943987732, |
|
"loss": 0.945, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 4.4520547945205475, |
|
"grad_norm": 0.36328125, |
|
"learning_rate": 0.0001357698238833126, |
|
"loss": 0.9341, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 4.486301369863014, |
|
"grad_norm": 0.361328125, |
|
"learning_rate": 0.0001346509566821153, |
|
"loss": 0.9454, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 4.52054794520548, |
|
"grad_norm": 0.341796875, |
|
"learning_rate": 0.00013352713772668765, |
|
"loss": 0.9414, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 4.554794520547945, |
|
"grad_norm": 0.431640625, |
|
"learning_rate": 0.00013239852761505626, |
|
"loss": 0.9429, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 4.589041095890411, |
|
"grad_norm": 0.40625, |
|
"learning_rate": 0.00013126528762992247, |
|
"loss": 0.947, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 4.623287671232877, |
|
"grad_norm": 0.3125, |
|
"learning_rate": 0.00013012757971561415, |
|
"loss": 0.9387, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 4.657534246575342, |
|
"grad_norm": 0.33984375, |
|
"learning_rate": 0.00012898556645494325, |
|
"loss": 0.9485, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 4.691780821917808, |
|
"grad_norm": 0.326171875, |
|
"learning_rate": 0.0001278394110459724, |
|
"loss": 0.9457, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 4.726027397260274, |
|
"grad_norm": 0.341796875, |
|
"learning_rate": 0.0001266892772786929, |
|
"loss": 0.9463, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 4.760273972602739, |
|
"grad_norm": 0.31640625, |
|
"learning_rate": 0.0001255353295116187, |
|
"loss": 0.9518, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 4.794520547945205, |
|
"grad_norm": 0.349609375, |
|
"learning_rate": 0.00012437773264829897, |
|
"loss": 0.9382, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 4.828767123287671, |
|
"grad_norm": 0.34765625, |
|
"learning_rate": 0.00012321665211375256, |
|
"loss": 0.9485, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 4.863013698630137, |
|
"grad_norm": 0.3203125, |
|
"learning_rate": 0.00012205225383082843, |
|
"loss": 0.9393, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 4.897260273972603, |
|
"grad_norm": 0.359375, |
|
"learning_rate": 0.00012088470419649432, |
|
"loss": 0.9568, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 4.931506849315069, |
|
"grad_norm": 0.337890625, |
|
"learning_rate": 0.00011971417005805818, |
|
"loss": 0.9352, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 4.965753424657534, |
|
"grad_norm": 0.353515625, |
|
"learning_rate": 0.0001185408186893251, |
|
"loss": 0.9383, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 0.322265625, |
|
"learning_rate": 0.00011736481776669306, |
|
"loss": 0.9366, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_loss": 2.6037707328796387, |
|
"eval_runtime": 0.5451, |
|
"eval_samples_per_second": 18.346, |
|
"eval_steps_per_second": 1.835, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 5.034246575342466, |
|
"grad_norm": 0.349609375, |
|
"learning_rate": 0.00011618633534519141, |
|
"loss": 0.9013, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 5.068493150684931, |
|
"grad_norm": 0.31640625, |
|
"learning_rate": 0.00011500553983446527, |
|
"loss": 0.9034, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 5.102739726027397, |
|
"grad_norm": 0.34765625, |
|
"learning_rate": 0.00011382259997470899, |
|
"loss": 0.8925, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 5.136986301369863, |
|
"grad_norm": 0.345703125, |
|
"learning_rate": 0.00011263768481255264, |
|
"loss": 0.8901, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 5.171232876712328, |
|
"grad_norm": 0.333984375, |
|
"learning_rate": 0.00011145096367690444, |
|
"loss": 0.8945, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 5.205479452054795, |
|
"grad_norm": 0.42578125, |
|
"learning_rate": 0.00011026260615475333, |
|
"loss": 0.8961, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 5.239726027397261, |
|
"grad_norm": 0.4765625, |
|
"learning_rate": 0.00010907278206693395, |
|
"loss": 0.8911, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 5.273972602739726, |
|
"grad_norm": 0.3828125, |
|
"learning_rate": 0.00010788166144385888, |
|
"loss": 0.9018, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 5.308219178082192, |
|
"grad_norm": 0.51953125, |
|
"learning_rate": 0.00010668941450122055, |
|
"loss": 0.9046, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 5.342465753424658, |
|
"grad_norm": 0.54296875, |
|
"learning_rate": 0.0001054962116156667, |
|
"loss": 0.8964, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 5.376712328767123, |
|
"grad_norm": 0.50390625, |
|
"learning_rate": 0.00010430222330045304, |
|
"loss": 0.9071, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 5.410958904109589, |
|
"grad_norm": 0.4921875, |
|
"learning_rate": 0.0001031076201810762, |
|
"loss": 0.8981, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 5.445205479452055, |
|
"grad_norm": 0.3359375, |
|
"learning_rate": 0.00010191257297089052, |
|
"loss": 0.9054, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 5.47945205479452, |
|
"grad_norm": 0.337890625, |
|
"learning_rate": 0.00010071725244671282, |
|
"loss": 0.9061, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 5.513698630136986, |
|
"grad_norm": 0.4140625, |
|
"learning_rate": 9.952182942441733e-05, |
|
"loss": 0.905, |
|
"step": 805 |
|
}, |
|
{ |
|
"epoch": 5.5479452054794525, |
|
"grad_norm": 0.33203125, |
|
"learning_rate": 9.83264747345259e-05, |
|
"loss": 0.8981, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 5.582191780821918, |
|
"grad_norm": 0.416015625, |
|
"learning_rate": 9.713135919779515e-05, |
|
"loss": 0.8956, |
|
"step": 815 |
|
}, |
|
{ |
|
"epoch": 5.616438356164384, |
|
"grad_norm": 0.41015625, |
|
"learning_rate": 9.593665360080599e-05, |
|
"loss": 0.9116, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 5.6506849315068495, |
|
"grad_norm": 0.357421875, |
|
"learning_rate": 9.474252867155732e-05, |
|
"loss": 0.9002, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 5.684931506849315, |
|
"grad_norm": 0.322265625, |
|
"learning_rate": 9.354915505506839e-05, |
|
"loss": 0.8971, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 5.719178082191781, |
|
"grad_norm": 0.3359375, |
|
"learning_rate": 9.235670328899293e-05, |
|
"loss": 0.8988, |
|
"step": 835 |
|
}, |
|
{ |
|
"epoch": 5.7534246575342465, |
|
"grad_norm": 0.3359375, |
|
"learning_rate": 9.116534377924883e-05, |
|
"loss": 0.8922, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 5.787671232876712, |
|
"grad_norm": 0.32421875, |
|
"learning_rate": 8.997524677566627e-05, |
|
"loss": 0.8953, |
|
"step": 845 |
|
}, |
|
{ |
|
"epoch": 5.821917808219178, |
|
"grad_norm": 0.326171875, |
|
"learning_rate": 8.878658234765858e-05, |
|
"loss": 0.8986, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 5.8561643835616435, |
|
"grad_norm": 0.322265625, |
|
"learning_rate": 8.759952035991844e-05, |
|
"loss": 0.8949, |
|
"step": 855 |
|
}, |
|
{ |
|
"epoch": 5.890410958904109, |
|
"grad_norm": 0.33203125, |
|
"learning_rate": 8.641423044814374e-05, |
|
"loss": 0.9013, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 5.924657534246576, |
|
"grad_norm": 0.33203125, |
|
"learning_rate": 8.5230881994796e-05, |
|
"loss": 0.9133, |
|
"step": 865 |
|
}, |
|
{ |
|
"epoch": 5.958904109589041, |
|
"grad_norm": 0.33984375, |
|
"learning_rate": 8.404964410489485e-05, |
|
"loss": 0.9042, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 5.993150684931507, |
|
"grad_norm": 0.33203125, |
|
"learning_rate": 8.287068558185225e-05, |
|
"loss": 0.9051, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_loss": 2.6520962715148926, |
|
"eval_runtime": 0.5512, |
|
"eval_samples_per_second": 18.142, |
|
"eval_steps_per_second": 1.814, |
|
"step": 876 |
|
}, |
|
{ |
|
"epoch": 6.027397260273973, |
|
"grad_norm": 0.33203125, |
|
"learning_rate": 8.169417490335007e-05, |
|
"loss": 0.8764, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 6.061643835616438, |
|
"grad_norm": 0.34765625, |
|
"learning_rate": 8.052028019726371e-05, |
|
"loss": 0.8608, |
|
"step": 885 |
|
}, |
|
{ |
|
"epoch": 6.095890410958904, |
|
"grad_norm": 0.349609375, |
|
"learning_rate": 7.934916921763628e-05, |
|
"loss": 0.8474, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 6.13013698630137, |
|
"grad_norm": 0.361328125, |
|
"learning_rate": 7.818100932070546e-05, |
|
"loss": 0.8558, |
|
"step": 895 |
|
}, |
|
{ |
|
"epoch": 6.164383561643835, |
|
"grad_norm": 0.34375, |
|
"learning_rate": 7.701596744098818e-05, |
|
"loss": 0.858, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 6.198630136986301, |
|
"grad_norm": 0.34375, |
|
"learning_rate": 7.585421006742463e-05, |
|
"loss": 0.8568, |
|
"step": 905 |
|
}, |
|
{ |
|
"epoch": 6.232876712328767, |
|
"grad_norm": 0.333984375, |
|
"learning_rate": 7.469590321958662e-05, |
|
"loss": 0.8626, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 6.267123287671233, |
|
"grad_norm": 0.353515625, |
|
"learning_rate": 7.354121242395254e-05, |
|
"loss": 0.8685, |
|
"step": 915 |
|
}, |
|
{ |
|
"epoch": 6.301369863013699, |
|
"grad_norm": 0.3828125, |
|
"learning_rate": 7.239030269025311e-05, |
|
"loss": 0.8621, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 6.335616438356165, |
|
"grad_norm": 0.404296875, |
|
"learning_rate": 7.124333848789091e-05, |
|
"loss": 0.8687, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 6.36986301369863, |
|
"grad_norm": 0.35546875, |
|
"learning_rate": 7.010048372243698e-05, |
|
"loss": 0.8691, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 6.404109589041096, |
|
"grad_norm": 0.44140625, |
|
"learning_rate": 6.8961901712208e-05, |
|
"loss": 0.8651, |
|
"step": 935 |
|
}, |
|
{ |
|
"epoch": 6.438356164383562, |
|
"grad_norm": 0.353515625, |
|
"learning_rate": 6.782775516492771e-05, |
|
"loss": 0.8656, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 6.472602739726027, |
|
"grad_norm": 0.384765625, |
|
"learning_rate": 6.669820615447522e-05, |
|
"loss": 0.8683, |
|
"step": 945 |
|
}, |
|
{ |
|
"epoch": 6.506849315068493, |
|
"grad_norm": 0.361328125, |
|
"learning_rate": 6.5573416097724e-05, |
|
"loss": 0.8632, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 6.541095890410959, |
|
"grad_norm": 0.353515625, |
|
"learning_rate": 6.445354573147484e-05, |
|
"loss": 0.8693, |
|
"step": 955 |
|
}, |
|
{ |
|
"epoch": 6.575342465753424, |
|
"grad_norm": 0.341796875, |
|
"learning_rate": 6.333875508948593e-05, |
|
"loss": 0.8724, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 6.609589041095891, |
|
"grad_norm": 0.35546875, |
|
"learning_rate": 6.22292034796035e-05, |
|
"loss": 0.8659, |
|
"step": 965 |
|
}, |
|
{ |
|
"epoch": 6.6438356164383565, |
|
"grad_norm": 0.357421875, |
|
"learning_rate": 6.112504946099604e-05, |
|
"loss": 0.8674, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 6.678082191780822, |
|
"grad_norm": 0.390625, |
|
"learning_rate": 6.0026450821495536e-05, |
|
"loss": 0.8762, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 6.712328767123288, |
|
"grad_norm": 0.33984375, |
|
"learning_rate": 5.8933564555049105e-05, |
|
"loss": 0.8581, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 6.7465753424657535, |
|
"grad_norm": 0.34765625, |
|
"learning_rate": 5.784654683928391e-05, |
|
"loss": 0.8718, |
|
"step": 985 |
|
}, |
|
{ |
|
"epoch": 6.780821917808219, |
|
"grad_norm": 0.36328125, |
|
"learning_rate": 5.6765553013188766e-05, |
|
"loss": 0.8637, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 6.815068493150685, |
|
"grad_norm": 0.361328125, |
|
"learning_rate": 5.5690737554915604e-05, |
|
"loss": 0.8684, |
|
"step": 995 |
|
}, |
|
{ |
|
"epoch": 6.8493150684931505, |
|
"grad_norm": 0.353515625, |
|
"learning_rate": 5.462225405970401e-05, |
|
"loss": 0.8693, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 6.883561643835616, |
|
"grad_norm": 0.34765625, |
|
"learning_rate": 5.3560255217931785e-05, |
|
"loss": 0.871, |
|
"step": 1005 |
|
}, |
|
{ |
|
"epoch": 6.917808219178082, |
|
"grad_norm": 0.369140625, |
|
"learning_rate": 5.2504892793295e-05, |
|
"loss": 0.8644, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 6.9520547945205475, |
|
"grad_norm": 0.34375, |
|
"learning_rate": 5.145631760112022e-05, |
|
"loss": 0.8688, |
|
"step": 1015 |
|
}, |
|
{ |
|
"epoch": 6.986301369863014, |
|
"grad_norm": 0.34765625, |
|
"learning_rate": 5.041467948681269e-05, |
|
"loss": 0.8676, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_loss": 2.7248690128326416, |
|
"eval_runtime": 0.5502, |
|
"eval_samples_per_second": 18.174, |
|
"eval_steps_per_second": 1.817, |
|
"step": 1022 |
|
}, |
|
{ |
|
"epoch": 7.02054794520548, |
|
"grad_norm": 0.328125, |
|
"learning_rate": 4.9380127304442634e-05, |
|
"loss": 0.8501, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 7.054794520547945, |
|
"grad_norm": 0.357421875, |
|
"learning_rate": 4.835280889547351e-05, |
|
"loss": 0.8375, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 7.089041095890411, |
|
"grad_norm": 0.3515625, |
|
"learning_rate": 4.733287106763481e-05, |
|
"loss": 0.8297, |
|
"step": 1035 |
|
}, |
|
{ |
|
"epoch": 7.123287671232877, |
|
"grad_norm": 0.375, |
|
"learning_rate": 4.6320459573942856e-05, |
|
"loss": 0.8336, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 7.157534246575342, |
|
"grad_norm": 0.365234375, |
|
"learning_rate": 4.531571909187197e-05, |
|
"loss": 0.832, |
|
"step": 1045 |
|
}, |
|
{ |
|
"epoch": 7.191780821917808, |
|
"grad_norm": 0.376953125, |
|
"learning_rate": 4.431879320267972e-05, |
|
"loss": 0.8393, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 7.226027397260274, |
|
"grad_norm": 0.361328125, |
|
"learning_rate": 4.332982437088825e-05, |
|
"loss": 0.8425, |
|
"step": 1055 |
|
}, |
|
{ |
|
"epoch": 7.260273972602739, |
|
"grad_norm": 0.34765625, |
|
"learning_rate": 4.2348953923925916e-05, |
|
"loss": 0.846, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 7.294520547945205, |
|
"grad_norm": 0.349609375, |
|
"learning_rate": 4.137632203193086e-05, |
|
"loss": 0.8396, |
|
"step": 1065 |
|
}, |
|
{ |
|
"epoch": 7.328767123287671, |
|
"grad_norm": 0.3515625, |
|
"learning_rate": 4.041206768772022e-05, |
|
"loss": 0.836, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 7.363013698630137, |
|
"grad_norm": 0.365234375, |
|
"learning_rate": 3.9456328686927525e-05, |
|
"loss": 0.8442, |
|
"step": 1075 |
|
}, |
|
{ |
|
"epoch": 7.397260273972603, |
|
"grad_norm": 0.34765625, |
|
"learning_rate": 3.850924160831115e-05, |
|
"loss": 0.8358, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 7.431506849315069, |
|
"grad_norm": 0.3515625, |
|
"learning_rate": 3.757094179423672e-05, |
|
"loss": 0.8381, |
|
"step": 1085 |
|
}, |
|
{ |
|
"epoch": 7.465753424657534, |
|
"grad_norm": 0.349609375, |
|
"learning_rate": 3.6641563331336125e-05, |
|
"loss": 0.837, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 7.5, |
|
"grad_norm": 0.359375, |
|
"learning_rate": 3.5721239031346066e-05, |
|
"loss": 0.8435, |
|
"step": 1095 |
|
}, |
|
{ |
|
"epoch": 7.534246575342466, |
|
"grad_norm": 0.376953125, |
|
"learning_rate": 3.4810100412128747e-05, |
|
"loss": 0.8412, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 7.568493150684931, |
|
"grad_norm": 0.34765625, |
|
"learning_rate": 3.3908277678877445e-05, |
|
"loss": 0.8361, |
|
"step": 1105 |
|
}, |
|
{ |
|
"epoch": 7.602739726027397, |
|
"grad_norm": 0.359375, |
|
"learning_rate": 3.3015899705509734e-05, |
|
"loss": 0.84, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 7.636986301369863, |
|
"grad_norm": 0.369140625, |
|
"learning_rate": 3.21330940162508e-05, |
|
"loss": 0.8427, |
|
"step": 1115 |
|
}, |
|
{ |
|
"epoch": 7.671232876712329, |
|
"grad_norm": 0.373046875, |
|
"learning_rate": 3.125998676740987e-05, |
|
"loss": 0.8443, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 7.705479452054795, |
|
"grad_norm": 0.36328125, |
|
"learning_rate": 3.0396702729352023e-05, |
|
"loss": 0.841, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 7.739726027397261, |
|
"grad_norm": 0.353515625, |
|
"learning_rate": 2.9543365268667867e-05, |
|
"loss": 0.8482, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 7.773972602739726, |
|
"grad_norm": 0.349609375, |
|
"learning_rate": 2.8700096330544012e-05, |
|
"loss": 0.8388, |
|
"step": 1135 |
|
}, |
|
{ |
|
"epoch": 7.808219178082192, |
|
"grad_norm": 0.3515625, |
|
"learning_rate": 2.7867016421336776e-05, |
|
"loss": 0.8486, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 7.842465753424658, |
|
"grad_norm": 0.34765625, |
|
"learning_rate": 2.7044244591351232e-05, |
|
"loss": 0.8453, |
|
"step": 1145 |
|
}, |
|
{ |
|
"epoch": 7.876712328767123, |
|
"grad_norm": 0.36328125, |
|
"learning_rate": 2.6231898417828603e-05, |
|
"loss": 0.8358, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 7.910958904109589, |
|
"grad_norm": 0.35546875, |
|
"learning_rate": 2.5430093988143778e-05, |
|
"loss": 0.8344, |
|
"step": 1155 |
|
}, |
|
{ |
|
"epoch": 7.945205479452055, |
|
"grad_norm": 0.35546875, |
|
"learning_rate": 2.4638945883216235e-05, |
|
"loss": 0.8312, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 7.97945205479452, |
|
"grad_norm": 0.35546875, |
|
"learning_rate": 2.385856716113587e-05, |
|
"loss": 0.8291, |
|
"step": 1165 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_loss": 2.766744613647461, |
|
"eval_runtime": 0.5415, |
|
"eval_samples_per_second": 18.469, |
|
"eval_steps_per_second": 1.847, |
|
"step": 1168 |
|
}, |
|
{ |
|
"epoch": 8.013698630136986, |
|
"grad_norm": 0.369140625, |
|
"learning_rate": 2.3089069341006565e-05, |
|
"loss": 0.8283, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 8.047945205479452, |
|
"grad_norm": 0.349609375, |
|
"learning_rate": 2.2330562387009745e-05, |
|
"loss": 0.8203, |
|
"step": 1175 |
|
}, |
|
{ |
|
"epoch": 8.082191780821917, |
|
"grad_norm": 0.353515625, |
|
"learning_rate": 2.1583154692689976e-05, |
|
"loss": 0.8221, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 8.116438356164384, |
|
"grad_norm": 0.36328125, |
|
"learning_rate": 2.08469530654652e-05, |
|
"loss": 0.8256, |
|
"step": 1185 |
|
}, |
|
{ |
|
"epoch": 8.150684931506849, |
|
"grad_norm": 0.36328125, |
|
"learning_rate": 2.0122062711363532e-05, |
|
"loss": 0.8139, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 8.184931506849315, |
|
"grad_norm": 0.357421875, |
|
"learning_rate": 1.9408587219988805e-05, |
|
"loss": 0.8351, |
|
"step": 1195 |
|
}, |
|
{ |
|
"epoch": 8.219178082191782, |
|
"grad_norm": 0.359375, |
|
"learning_rate": 1.8706628549717452e-05, |
|
"loss": 0.8258, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 8.253424657534246, |
|
"grad_norm": 0.357421875, |
|
"learning_rate": 1.8016287013128018e-05, |
|
"loss": 0.8139, |
|
"step": 1205 |
|
}, |
|
{ |
|
"epoch": 8.287671232876713, |
|
"grad_norm": 0.357421875, |
|
"learning_rate": 1.7337661262666294e-05, |
|
"loss": 0.8305, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 8.321917808219178, |
|
"grad_norm": 0.353515625, |
|
"learning_rate": 1.6670848276547334e-05, |
|
"loss": 0.8239, |
|
"step": 1215 |
|
}, |
|
{ |
|
"epoch": 8.356164383561644, |
|
"grad_norm": 0.36328125, |
|
"learning_rate": 1.601594334489702e-05, |
|
"loss": 0.8252, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 8.39041095890411, |
|
"grad_norm": 0.36328125, |
|
"learning_rate": 1.5373040056134814e-05, |
|
"loss": 0.8194, |
|
"step": 1225 |
|
}, |
|
{ |
|
"epoch": 8.424657534246576, |
|
"grad_norm": 0.359375, |
|
"learning_rate": 1.474223028359939e-05, |
|
"loss": 0.8188, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 8.45890410958904, |
|
"grad_norm": 0.359375, |
|
"learning_rate": 1.4123604172419713e-05, |
|
"loss": 0.8206, |
|
"step": 1235 |
|
}, |
|
{ |
|
"epoch": 8.493150684931507, |
|
"grad_norm": 0.35546875, |
|
"learning_rate": 1.3517250126632986e-05, |
|
"loss": 0.8223, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 8.527397260273972, |
|
"grad_norm": 0.357421875, |
|
"learning_rate": 1.292325479655131e-05, |
|
"loss": 0.8257, |
|
"step": 1245 |
|
}, |
|
{ |
|
"epoch": 8.561643835616438, |
|
"grad_norm": 0.349609375, |
|
"learning_rate": 1.2341703066379074e-05, |
|
"loss": 0.8239, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 8.595890410958905, |
|
"grad_norm": 0.345703125, |
|
"learning_rate": 1.1772678042082607e-05, |
|
"loss": 0.8173, |
|
"step": 1255 |
|
}, |
|
{ |
|
"epoch": 8.63013698630137, |
|
"grad_norm": 0.357421875, |
|
"learning_rate": 1.1216261039514087e-05, |
|
"loss": 0.8266, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 8.664383561643836, |
|
"grad_norm": 0.3515625, |
|
"learning_rate": 1.0672531572791178e-05, |
|
"loss": 0.8237, |
|
"step": 1265 |
|
}, |
|
{ |
|
"epoch": 8.698630136986301, |
|
"grad_norm": 0.359375, |
|
"learning_rate": 1.0141567342934132e-05, |
|
"loss": 0.8335, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 8.732876712328768, |
|
"grad_norm": 0.353515625, |
|
"learning_rate": 9.623444226762035e-06, |
|
"loss": 0.8239, |
|
"step": 1275 |
|
}, |
|
{ |
|
"epoch": 8.767123287671232, |
|
"grad_norm": 0.353515625, |
|
"learning_rate": 9.118236266049707e-06, |
|
"loss": 0.8308, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 8.801369863013699, |
|
"grad_norm": 0.34765625, |
|
"learning_rate": 8.626015656946895e-06, |
|
"loss": 0.8266, |
|
"step": 1285 |
|
}, |
|
{ |
|
"epoch": 8.835616438356164, |
|
"grad_norm": 0.341796875, |
|
"learning_rate": 8.146852739661105e-06, |
|
"loss": 0.8248, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 8.86986301369863, |
|
"grad_norm": 0.353515625, |
|
"learning_rate": 7.6808159884057e-06, |
|
"loss": 0.8322, |
|
"step": 1295 |
|
}, |
|
{ |
|
"epoch": 8.904109589041095, |
|
"grad_norm": 0.349609375, |
|
"learning_rate": 7.2279720016148244e-06, |
|
"loss": 0.823, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 8.938356164383562, |
|
"grad_norm": 0.3515625, |
|
"learning_rate": 6.788385492426053e-06, |
|
"loss": 0.825, |
|
"step": 1305 |
|
}, |
|
{ |
|
"epoch": 8.972602739726028, |
|
"grad_norm": 0.357421875, |
|
"learning_rate": 6.36211927943271e-06, |
|
"loss": 0.8286, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_loss": 2.7898991107940674, |
|
"eval_runtime": 0.5501, |
|
"eval_samples_per_second": 18.177, |
|
"eval_steps_per_second": 1.818, |
|
"step": 1314 |
|
}, |
|
{ |
|
"epoch": 9.006849315068493, |
|
"grad_norm": 0.34765625, |
|
"learning_rate": 5.949234277706861e-06, |
|
"loss": 0.8231, |
|
"step": 1315 |
|
}, |
|
{ |
|
"epoch": 9.04109589041096, |
|
"grad_norm": 0.349609375, |
|
"learning_rate": 5.549789490094304e-06, |
|
"loss": 0.8237, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 9.075342465753424, |
|
"grad_norm": 0.349609375, |
|
"learning_rate": 5.163841998782837e-06, |
|
"loss": 0.8125, |
|
"step": 1325 |
|
}, |
|
{ |
|
"epoch": 9.10958904109589, |
|
"grad_norm": 0.34375, |
|
"learning_rate": 4.79144695714504e-06, |
|
"loss": 0.8179, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 9.143835616438356, |
|
"grad_norm": 0.34375, |
|
"learning_rate": 4.432657581856525e-06, |
|
"loss": 0.8173, |
|
"step": 1335 |
|
}, |
|
{ |
|
"epoch": 9.178082191780822, |
|
"grad_norm": 0.37109375, |
|
"learning_rate": 4.087525145291204e-06, |
|
"loss": 0.8186, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 9.212328767123287, |
|
"grad_norm": 0.3515625, |
|
"learning_rate": 3.7560989681941992e-06, |
|
"loss": 0.8233, |
|
"step": 1345 |
|
}, |
|
{ |
|
"epoch": 9.246575342465754, |
|
"grad_norm": 0.345703125, |
|
"learning_rate": 3.4384264126337328e-06, |
|
"loss": 0.8147, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 9.280821917808218, |
|
"grad_norm": 0.3515625, |
|
"learning_rate": 3.1345528752329212e-06, |
|
"loss": 0.8076, |
|
"step": 1355 |
|
}, |
|
{ |
|
"epoch": 9.315068493150685, |
|
"grad_norm": 0.3515625, |
|
"learning_rate": 2.8445217806824077e-06, |
|
"loss": 0.8259, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 9.349315068493151, |
|
"grad_norm": 0.3515625, |
|
"learning_rate": 2.5683745755348044e-06, |
|
"loss": 0.819, |
|
"step": 1365 |
|
}, |
|
{ |
|
"epoch": 9.383561643835616, |
|
"grad_norm": 0.361328125, |
|
"learning_rate": 2.30615072228183e-06, |
|
"loss": 0.8218, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 9.417808219178083, |
|
"grad_norm": 0.359375, |
|
"learning_rate": 2.057887693714988e-06, |
|
"loss": 0.8166, |
|
"step": 1375 |
|
}, |
|
{ |
|
"epoch": 9.452054794520548, |
|
"grad_norm": 0.3515625, |
|
"learning_rate": 1.8236209675705274e-06, |
|
"loss": 0.815, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 9.486301369863014, |
|
"grad_norm": 0.3515625, |
|
"learning_rate": 1.6033840214595308e-06, |
|
"loss": 0.8287, |
|
"step": 1385 |
|
}, |
|
{ |
|
"epoch": 9.520547945205479, |
|
"grad_norm": 0.349609375, |
|
"learning_rate": 1.397208328083921e-06, |
|
"loss": 0.8239, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 9.554794520547945, |
|
"grad_norm": 0.3515625, |
|
"learning_rate": 1.205123350738746e-06, |
|
"loss": 0.8238, |
|
"step": 1395 |
|
}, |
|
{ |
|
"epoch": 9.58904109589041, |
|
"grad_norm": 0.34765625, |
|
"learning_rate": 1.0271565391018922e-06, |
|
"loss": 0.8231, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 9.623287671232877, |
|
"grad_norm": 0.361328125, |
|
"learning_rate": 8.633333253113995e-07, |
|
"loss": 0.8246, |
|
"step": 1405 |
|
}, |
|
{ |
|
"epoch": 9.657534246575342, |
|
"grad_norm": 0.361328125, |
|
"learning_rate": 7.136771203310245e-07, |
|
"loss": 0.8131, |
|
"step": 1410 |
|
}, |
|
{ |
|
"epoch": 9.691780821917808, |
|
"grad_norm": 0.3515625, |
|
"learning_rate": 5.782093106048159e-07, |
|
"loss": 0.8196, |
|
"step": 1415 |
|
}, |
|
{ |
|
"epoch": 9.726027397260275, |
|
"grad_norm": 0.345703125, |
|
"learning_rate": 4.569492550008603e-07, |
|
"loss": 0.8097, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 9.76027397260274, |
|
"grad_norm": 0.36328125, |
|
"learning_rate": 3.49914282044872e-07, |
|
"loss": 0.8272, |
|
"step": 1425 |
|
}, |
|
{ |
|
"epoch": 9.794520547945206, |
|
"grad_norm": 0.365234375, |
|
"learning_rate": 2.5711968744382974e-07, |
|
"loss": 0.8285, |
|
"step": 1430 |
|
}, |
|
{ |
|
"epoch": 9.82876712328767, |
|
"grad_norm": 0.353515625, |
|
"learning_rate": 1.7857873190019192e-07, |
|
"loss": 0.8323, |
|
"step": 1435 |
|
}, |
|
{ |
|
"epoch": 9.863013698630137, |
|
"grad_norm": 0.359375, |
|
"learning_rate": 1.143026392168789e-07, |
|
"loss": 0.822, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 9.897260273972602, |
|
"grad_norm": 0.353515625, |
|
"learning_rate": 6.430059469334504e-08, |
|
"loss": 0.8329, |
|
"step": 1445 |
|
}, |
|
{ |
|
"epoch": 9.931506849315069, |
|
"grad_norm": 0.349609375, |
|
"learning_rate": 2.8579743813006432e-08, |
|
"loss": 0.8213, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 9.965753424657533, |
|
"grad_norm": 0.35546875, |
|
"learning_rate": 7.145191222035497e-09, |
|
"loss": 0.8169, |
|
"step": 1455 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"grad_norm": 0.35546875, |
|
"learning_rate": 0.0, |
|
"loss": 0.8185, |
|
"step": 1460 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_loss": 2.793144464492798, |
|
"eval_runtime": 0.5503, |
|
"eval_samples_per_second": 18.172, |
|
"eval_steps_per_second": 1.817, |
|
"step": 1460 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"step": 1460, |
|
"total_flos": 8.702314001108828e+17, |
|
"train_loss": 0.9981180969982931, |
|
"train_runtime": 8741.0304, |
|
"train_samples_per_second": 8.01, |
|
"train_steps_per_second": 0.167 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 1460, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 8.702314001108828e+17, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|